Skip to content

Commit

Permalink
Merge pull request #186 from NREL/update-v2024.7.0
Browse files Browse the repository at this point in the history
Update to v2024.7.0
  • Loading branch information
kennedy-mindermann authored Oct 24, 2024
2 parents e65ed5e + 44742f2 commit 36235c2
Show file tree
Hide file tree
Showing 342 changed files with 128,507 additions and 89,467 deletions.
6 changes: 4 additions & 2 deletions .github/workflows/build-docs-external.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,13 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pandas
pip install argparse
pip install sphinx==7.2.6
pip install myst-parser==2.0.0
pip install sphinx_rtd_theme==2.0.0
pip install pandas
pip install argparse
pip install sphinxcontrib-bibtex==2.6.2
- name: Set variables for public github repo
run: |
Expand Down
6 changes: 3 additions & 3 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@ TimeZones = "f269a46b-ccf7-5d73-abea-4c690281aa53"
[compat]
ArgParse = "1.1.4"
CSV = "0.10.11"
DataFrames = "1.5.0"
DataFrames = "1.6.0"
HDF5 = "0.16.15"
InlineStrings = "1.4.0"
JSON = "0.21.4"
LoggingExtras = "1.0.0"
LoggingExtras = "1.0.3"
PRAS = "0.6.3"
TimeZones = "1.10.0"
TimeZones = "~1.13.0"
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

This GitHub repository contains the source code for NREL's ReEDS model. The ReEDS model source code is available at no cost from the National Renewable Energy Laboratory. The ReEDS model can be downloaded or cloned from [https://github.com/NREL/ReEDS-2.0](https://github.com/NREL/ReEDS-2.0).

**For more information about the model and how to get started, see the [open source ReEDS-2.0 Documentation](https://nrel.github.io/ReEDS-2.0)**
**For more information about the model, see the [open source ReEDS-2.0 Documentation](https://nrel.github.io/ReEDS-2.0)**

A ReEDS training video (based on the 2020 version of ReEDS) is available on the NREL YouTube channel at [https://youtu.be/aGj3Jnspk9M?si=iqCRNn5MbGZc8ZIO](https://youtu.be/aGj3Jnspk9M?si=iqCRNn5MbGZc8ZIO).

Expand Down
2 changes: 1 addition & 1 deletion ReEDS_Augur/diagnostic_plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ def get_inputs(sw):
os.path.join(sw['casedir'],'inputs_case','resources.csv')
).set_index('resource')
resources['tech'] = (
resources.i.map(lambda x: x.split('_')[0])
resources.i.map(lambda x: x.split('|')[0])
.map(lambda x: x if x.startswith('battery') else x.strip('_01234567890*')))

resources['rb'] = resources.r
Expand Down
39 changes: 34 additions & 5 deletions ReEDS_Augur/prep_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,8 @@ def main(t, casedir):
resources = pd.read_csv(os.path.join(inputs_case, 'resources.csv'))

recf = pd.read_hdf(os.path.join(inputs_case, 'recf.h5')).astype(np.float32)
recf.columns = recf.columns.map(
resources.set_index('resource')[['i','r']].apply(lambda row: tuple(row), axis=1)
).rename(('i','r'))
recf.columns = pd.MultiIndex.from_tuples([tuple(x.split('|')) for x in recf.columns],
names=('i','r'))

techs = gdxreeds['i_subsets'].pivot(columns='i_subtech',index='i',values='Value')

Expand Down Expand Up @@ -146,7 +145,7 @@ def _devint_storage(dfin):

### Store generation by (i,r) for capacity_credit.py
vre_gen_exist = gen_vre_ir.reindex(resources[['i','r']], axis=1).fillna(0).clip(lower=0)
vre_gen_exist.columns = resources.resource
vre_gen_exist.columns = ['|'.join(c) for c in vre_gen_exist.columns]
vre_gen_exist.index = h_dt_szn.set_index(['ccseason','year','h','hour']).index
h5out['vre_gen_exist'] = vre_gen_exist

Expand Down Expand Up @@ -179,7 +178,7 @@ def intify(v):
recf.multiply(cf_adj_i, level='i', axis=1)
.reindex(resources[['i','r']], axis=1)
)
vre_cf_marg.columns = resources.resource
vre_cf_marg.columns = ['|'.join(c) for c in vre_cf_marg.columns]
vre_cf_marg.index = h_dt_szn.set_index(['ccseason','year','h','hour']).index
h5out['vre_cf_marg'] = vre_cf_marg

Expand Down Expand Up @@ -256,6 +255,36 @@ def intify(v):
csvout['energy_cap'] = energy_cap.drop(too_small_storage, errors='ignore')
csvout['max_cap'] = max_cap.drop(too_small_storage, errors='ignore')

#%% Strip water tech suffixes from water-dependent technologies
### NOTE: This must be done to make water runs compatible with PRAS, as PRAS is not set
### up to ingest generation techs with water tech suffixes, as well as to ensure
### PRAS is operating the same for runs with/without GSw_WaterMain on.

# For each dataframe in csvout, check if it has an 'i' index and if it does, use
# i_ctt_wst_link to remove the water suffixes
watertech_link = pd.read_csv(
os.path.join(casedir,'inputs_case','i_coolingtech_watersource_link.csv'),
usecols=['*i','ii']
)
waterupgrades_link = pd.read_csv(
os.path.join(casedir,'inputs_case','i_coolingtech_watersource_upgrades_link.csv'),
usecols=['*i','ii']
)
watertech_link = pd.concat([watertech_link, waterupgrades_link])
watertech_link = watertech_link.apply(lambda x: x.str.lower()).set_index('*i').squeeze(1)

for key in csvout.keys():
df = csvout[key]
if 'i' in df.index.names:
# Strip water tech suffixes from tech names
df.rename(index=watertech_link, level='i', inplace=True)
# Sum over i,v,t combination duplicates now that water techs have been stripped
indices = list(df.index.names)
df = df.groupby(df.index).sum()
# Reset the index names
df.index = pd.MultiIndex.from_tuples(df.index, names=indices)
# Rewrite data in csvout with updated data
csvout[key] = df.copy()

#%%### Write it
#%% .csv files
Expand Down
Loading

0 comments on commit 36235c2

Please sign in to comment.