Skip to content

Commit

Permalink
Merge pull request #182 from NREL/update-v2024.6.0
Browse files Browse the repository at this point in the history
Update to v2024.6.0
  • Loading branch information
kennedy-mindermann authored Aug 26, 2024
2 parents 9b159a1 + 58937a7 commit b1a6d9b
Show file tree
Hide file tree
Showing 1,266 changed files with 1,477,063 additions and 4,345,466 deletions.
51 changes: 51 additions & 0 deletions .github/workflows/build-docs-external.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
# this workflow sets up dependencies, builds the documentation, and updates the gh-pages branch
# the gh-pages branch is what's used for GitHub Pages

name: Documentation

on:
# Triggers the workflow on push or pull request events but only for the "main" branch
push:
branches:
- main
pull_request:
branches:
- main

jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3

- name: Set up Python 3.11
uses: actions/setup-python@v4
with:
python-version: "3.11"

- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install sphinx==7.2.6
pip install myst-parser==2.0.0
pip install sphinx_rtd_theme==2.0.0
pip install pandas
pip install argparse
- name: Set variables for public github repo
run: |
echo "BASE_URL=https://github.com/NREL/ReEDS-2.0" >> $GITHUB_ENV
cd ${GITHUB_WORKSPACE}/docs/source/documentation_tools/
sh generate_sources_md_file.sh
cd ${GITHUB_WORKSPACE}
python docs/source/documentation_tools/generate_markdown.py --githubURL "https://github.com/NREL/ReEDS-2.0/blob/main" --reedsPath "${GITHUB_WORKSPACE}"
- name: Build Sphinx documentation
run: sphinx-build docs/source docs/build/

- name: Deploy to GitHub Pages
uses: peaceiris/actions-gh-pages@v3
if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: docs/build
103 changes: 34 additions & 69 deletions Augur.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,54 +14,20 @@
import pandas as pd
import gdxpds

import ReEDS_Augur.A_prep_data as A_prep_data
import ReEDS_Augur.E_capacity_credit as E_capacity_credit
import ReEDS_Augur.F_stress_periods as F_stress_periods
import ReEDS_Augur.prep_data as prep_data
import ReEDS_Augur.capacity_credit as capacity_credit
import ReEDS_Augur.stress_periods as stress_periods
import ReEDS_Augur.functions as functions


#%% Functions
def run_osprey(casedir, t, sw):
"""
"""
print('Running Osprey')
tic = datetime.datetime.now()
subprocess.call(
[
'gams', os.path.join(casedir, 'ReEDS_Augur', 'B1_osprey'),
'o='+os.path.join(casedir, 'lstfiles', f'osprey_{t}.lst'),
'logOption=0',
'logfile='+os.path.join(casedir, 'gamslog.txt'),
'appendLog=1',
'--solver=cplex',
f'--prev_year={t}',
f"--hoursperperiod={sw['hoursperperiod']:>03}",
f"--threads={sw['threads'] if sw['threads'] > 0 else 16}",
] + (['license=gamslice.txt'] if int(sw['hpc']) else []),
cwd=os.getcwd()
)
functions.toc(tic=tic, year=t, process='ReEDS_Augur/B1_osprey.gms')

### Write Osprey results to csv files
tic = datetime.datetime.now()
subprocess.call(
[
'gams', os.path.join(casedir, 'ReEDS_Augur', 'B2_gdx_dump'),
f"--prev_year={t}"
] + (['license=gamslice.txt'] if int(sw['hpc']) else []),
cwd=os.getcwd()
)
functions.toc(tic=tic, year=t, process='ReEDS_Augur/B2_gdx_dump.gms')


def run_pras(
casedir, t, sw, iteration=0, recordtime=True,
repo=False, overwrite=True, include_samples=False,
write_flow=False, write_surplus=False, write_energy=False,
):
"""
"""
reeds2pras_path = os.path.expanduser(sw['reeds2pras_path'])
### Get the PRAS settings for this solve year
print('Running ReEDS2PRAS and PRAS')
scriptpath = (sw['reeds_path'] if repo else casedir)
Expand All @@ -85,7 +51,6 @@ def run_pras(
f"--write_energy={int(write_energy)}",
f"--iteration={iteration}",
f"--samples={sw['pras_samples']}",
f"--reeds2praspath={reeds2pras_path}",
f"--overwrite={int(overwrite)}",
f"--include_samples={int(include_samples)}",
]
Expand All @@ -109,40 +74,39 @@ def run_pras(
def main(t, tnext, casedir, iteration=0):

# #%% To debug, uncomment these lines and update the run path
# t = 2035
# tnext = 2040
# casedir = os.path.expanduser(
# '~/github/ReEDS-2.0/runs/v20240118_stressM0_Z45_SP_5yr_H2_EI')
# t = 2020
# tnext = 2023
# reeds_path = os.path.expanduser('~/github2/ReEDS-2.0')
# casedir = os.path.join(
# reeds_path,'runs','v20240708_tforM1_Pacific')
# iteration = 0
# assert tnext >= t
# os.chdir(casedir)
# # ## Copy reeds2pras from repo to run folder
# # import shutil
# # shutil.rmtree(os.path.join(casedir, 'reeds2pras'))
# # shutil.copytree(
# # os.path.join(reeds_path, 'reeds2pras'),
# # os.path.join(casedir, 'reeds2pras'),
# # ignore=shutil.ignore_patterns('test'),
# # )

#%% Get switches from inputs_case/switches.csv and ReEDS_Augur/augur_switches.csv
sw = functions.get_switches(casedir)
sw['t'] = t

#%% Prep data for Osprey and capacity credit
print('Preparing data for Osprey, PRAS, and capacity credit calculation')
#%% Prep data for resource adequacy
print('Preparing data for resource adequacy calculations')
tic = datetime.datetime.now()
augur_gdx, augur_csv, augur_h5 = A_prep_data.main(t, casedir)
functions.toc(tic=tic, year=t, process='ReEDS_Augur/A_prep_data.py')

#%% Run Osprey if...
## the user specifies to run Osprey or...
if int(sw['osprey']) or (
## if we're using stress periods (not capacity credit) and using Osprey to
## identify dropped-PRM periods (instead of PRAS to identify high-risk periods).
(not int(sw.GSw_PRM_CapCredit))
and (sw['GSw_PRM_StressModel'].lower() == 'osprey')
):
run_osprey(casedir, t, sw)
augur_csv, augur_h5 = prep_data.main(t, casedir)
functions.toc(tic=tic, year=t, process='ReEDS_Augur/prep_data.py')

#%% Calculate capacity credit if necessary; otherwise bypass
print('calculating capacity credit...')
tic = datetime.datetime.now()

if int(sw['GSw_PRM_CapCredit']):
cc_results = E_capacity_credit.reeds_cc(t, tnext, casedir)
cc_results = capacity_credit.reeds_cc(t, tnext, casedir)
else:
cc_results = {
'cc_mar': pd.DataFrame(columns=['i','r','ccreg','szn','t','Value']),
Expand All @@ -151,7 +115,7 @@ def main(t, tnext, casedir, iteration=0):
'sdbin_size': pd.DataFrame(columns=['ccreg','szn','bin','t','Value']),
}

functions.toc(tic=tic, year=t, process='ReEDS_Augur/E_capacity_credit.py')
functions.toc(tic=tic, year=t, process='ReEDS_Augur/capacity_credit.py')

#%% Run PRAS if necessary
solveyears = pd.read_csv(
Expand All @@ -162,23 +126,23 @@ def main(t, tnext, casedir, iteration=0):
1: True if t == max(solveyears) else False,
2: True,
}[int(sw['pras'])]
if pras_this_solve_year or (
(not int(sw.GSw_PRM_CapCredit))
and (sw['GSw_PRM_StressModel'].lower() == 'pras')
):
if pras_this_solve_year or (not int(sw.GSw_PRM_CapCredit)):
result = run_pras(
casedir, t, sw, iteration=iteration,
write_flow=(True if t == max(solveyears) else False),
write_energy=True,
)
print(f"run_pras.jl returned code {result.returncode}")
if result.returncode:
raise Exception(
f"run_pras.jl returned code {result.returncode}. Check gamslog.txt for error trace."
)

#%% Identify stress periods
print('identifying new stress periods...')
tic = datetime.datetime.now()
if 'user' not in sw['GSw_PRM_StressModel'].lower():
_eue_sorted_periods = F_stress_periods.main(sw=sw, t=t, iteration=iteration)
functions.toc(tic=tic, year=t, process='ReEDS_Augur/F_stress_periods.py')
_eue_sorted_periods = stress_periods.main(sw=sw, t=t, iteration=iteration)
functions.toc(tic=tic, year=t, process='ReEDS_Augur/stress_periods.py')

#%% Write gdx file explicitly to ensure that all entries
### (even empty dataframes) are written as parameters, not sets
Expand All @@ -195,12 +159,13 @@ def main(t, tnext, casedir, iteration=0):
os.path.join('ReEDS_Augur', 'augur_data', f'ReEDS_Augur_{t}.gdx')
)

# #%% Uncomment to run G_plots (typically run from call_{}.sh script for parallelization)
# #%% Uncomment to run diagnostic_plots
# ### (typically run from call_{}.sh script for parallelization)
# try:
# import ReEDS_Augur.G_plots as G_plots
# G_plots.main(sw)
# import ReEDS_Augur.diagnostic_plots as diagnostic_plots
# diagnostic_plots.main(sw)
# except Exception as err:
# print('G_plots.py failed with the following exception:')
# print('diagnostic_plots.py failed with the following exception:')
# print(err)


Expand Down
3 changes: 1 addition & 2 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
Logging = "56ddb016-857b-54e1-b83d-db4d58db5568"
LoggingExtras = "e6f89c97-d47a-5376-807f-9c37f3926c36"
PRAS = "05348d26-1c52-11e9-35e3-9d51842d34b9"
Revise = "295af30f-e4ad-537b-8983-00126c2a3abe"
Random123 = "74087812-796a-5b5d-8853-05524746bad3"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
TimeZones = "f269a46b-ccf7-5d73-abea-4c690281aa53"

Expand All @@ -22,5 +22,4 @@ InlineStrings = "1.4.0"
JSON = "0.21.4"
LoggingExtras = "1.0.0"
PRAS = "0.6.3"
Revise = "3.5.3"
TimeZones = "1.10.0"
Loading

0 comments on commit b1a6d9b

Please sign in to comment.