Skip to content

Commit

Permalink
Include all declared commodities in commodity groups and always have …
Browse files Browse the repository at this point in the history
…UC description (#118)

This PR adds commodities not explicitly specified in topology to commodity groups and uses user constrant name as user constraint description if user constraint description is missing. 
Closes #98.
  • Loading branch information
olejandro authored Sep 11, 2023
1 parent baad535 commit ab4c510
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 8 deletions.
3 changes: 2 additions & 1 deletion times_reader/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,13 +77,14 @@ def convert_xl_to_times(
transforms.generate_all_regions,
transforms.capitalise_attributes,
transforms.apply_fixups,
transforms.extract_commodity_groups,
transforms.generate_commodity_groups,
transforms.fill_in_missing_pcgs,
transforms.generate_top_ire,
transforms.include_tables_source,
transforms.merge_tables,
transforms.apply_more_fixups,
transforms.process_years,
transforms.complete_commodity_groups,
transforms.process_uc_wildcards,
transforms.process_wildcards,
transforms.convert_aliases,
Expand Down
2 changes: 1 addition & 1 deletion times_reader/config/times_mapping.txt
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ SHAPE[J,AGE,VALUE] = ~FI_T(Other_Indexes,Year,VALUE,Attribute:SHAPE)
STGIN_BND[REG,DATAYEAR,PRC,COM,TSLVL,BD,VALUE] = ~FI_T(Region,Year,TechName,CommName,TimeSlice,LimType,VALUE,Attribute:STGIN_BND)
STG_EFF[REG,DATAYEAR,PRC,VALUE] = ~FI_T(Region,Year,TechName,VALUE,Attribute:STG_EFF)
STG_LOSS[REG,DATAYEAR,PRC,TSLVL,VALUE] = ~FI_T(Region,Year,TechName,TimeSlice,VALUE,Attribute:STG_LOSS)
TOP[REG,PRC,COM,IO] = COMM_GROUPS(Region,TechName,CommName,IO)
TOP[REG,PRC,COM,IO] = TOPOLOGY(Region,TechName,CommName,IO)
TOP_IRE[ALL_REG,COM,ALL_R,C,PRC] = TOP_IRE(Origin,IN,Destination,OUT,TechName)
TS_GROUP[REG,TSLVL,TS_GROUP] = TimeSlicesGroup(Region,TSLVL,TS_GROUP)
TS_MAP[REG,PARENT,TS_MAP] = TimeSliceMap(Region,Parent,TimesliceMap)
Expand Down
39 changes: 33 additions & 6 deletions times_reader/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -1144,7 +1144,7 @@ def apply_fixups_table(table: datatypes.EmbeddedXlTable):
return [apply_fixups_table(table) for table in tables]


def extract_commodity_groups(
def generate_commodity_groups(
config: datatypes.Config,
tables: List[datatypes.EmbeddedXlTable],
) -> List[datatypes.EmbeddedXlTable]:
Expand Down Expand Up @@ -1253,7 +1253,7 @@ def name_comm_group(df):
range="",
filename="",
uc_sets={},
tag="COMM_GROUPS",
tag="TOPOLOGY",
dataframe=comm_groups,
)
)
Expand All @@ -1274,6 +1274,23 @@ def name_comm_group(df):
return tables


def complete_commodity_groups(
config: datatypes.Config, tables: Dict[str, DataFrame]
) -> Dict[str, DataFrame]:
"""
Complete the list of commodity groups
"""

commodities = generate_topology_dictionary(tables)["commodities_by_name"].rename(
columns={"commname": "commoditygroup"}
)
cgs_in_top = tables["TOPOLOGY"]["commoditygroup"].to_frame()
commodity_groups = pd.concat([commodities, cgs_in_top])
tables["COMM_GROUPS"] = commodity_groups.drop_duplicates(keep="first").reset_index()

return tables


def generate_top_ire(
config: datatypes.Config,
tables: List[datatypes.EmbeddedXlTable],
Expand All @@ -1285,7 +1302,7 @@ def generate_top_ire(
veda_set_ext_reg_mapping = {"IMP": "IMPEXP", "EXP": "IMPEXP", "MIN": "MINRNW"}
dummy_process_cset = [["NRG", "IMPNRGZ"], ["MAT", "IMPMATZ"], ["DEM", "IMPDEMZ"]]
veda_process_sets = utils.single_table(tables, "VedaProcessSets").dataframe
com_map = utils.single_table(tables, "COMM_GROUPS").dataframe
com_map = utils.single_table(tables, "TOPOLOGY").dataframe

ire_prc = pd.DataFrame(columns=["region", "techname"])
for table in tables:
Expand Down Expand Up @@ -1370,7 +1387,7 @@ def expand_pcg_from_suffix(df):
else:
df = table.dataframe.copy()
df["primarycg"] = df.apply(expand_pcg_from_suffix, axis=1)
default_pcgs = utils.single_table(tables, "COMM_GROUPS").dataframe.copy()
default_pcgs = utils.single_table(tables, "TOPOLOGY").dataframe.copy()
default_pcgs = default_pcgs.loc[
default_pcgs["DefaultVedaPCG"] == 1,
["region", "techname", "commoditygroup"],
Expand Down Expand Up @@ -2264,11 +2281,11 @@ def rename_cgs(


def apply_more_fixups(
config: datatypes.Config, input: Dict[str, DataFrame]
config: datatypes.Config, tables: Dict[str, DataFrame]
) -> Dict[str, DataFrame]:
output = {}
# TODO: This should only be applied to processes introduced in BASE
for table_type, df in input.items():
for table_type, df in tables.items():
if table_type == datatypes.Tag.fi_t:
index = df["attribute"] == "STOCK"
# Temporary solution to include only processes defined in BASE
Expand All @@ -2281,6 +2298,7 @@ def apply_more_fixups(
i_reg_prc = i_reg & (df["techname"] == process)
if any(i_reg_prc):
extra_rows.append(["NCAP_BND", region, process, "UP", 0, 2])
# TODO: TIMES already handles this. Drop?
if len(df[i_reg_prc]["year"].unique()) == 1:
year = df[i_reg_prc]["year"].unique()[0]
i_attr = (
Expand Down Expand Up @@ -2312,6 +2330,15 @@ def apply_more_fixups(
),
]
)
# TODO: Handle defaults in a general way.
# Use uc_n value if uc_desc is missing
elif table_type == datatypes.Tag.uc_t:
for uc_n in df["uc_n"].unique():
index = df["uc_n"] == uc_n
if all(df["uc_desc"][index].isna()):
# Populate the first row only
if any(index):
df.at[list(index).index(True), "uc_desc"] = uc_n

output[table_type] = df

Expand Down

0 comments on commit ab4c510

Please sign in to comment.