Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/mc' into mc
Browse files Browse the repository at this point in the history
  • Loading branch information
romainsacchi committed Jul 25, 2024
2 parents 02a0130 + 58e80a2 commit 572a47e
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 19 deletions.
10 changes: 3 additions & 7 deletions dev/timing.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,13 @@
methods=[
"EF v3.1 EN15804 - climate change - global warming potential (GWP100)",
"EF v3.1 EN15804 - ecotoxicity: freshwater - comparative toxic unit for ecosystems (CTUe)",
] + [m for m in p.lcia_methods if "relics" in m.lower()][-3:],
]
+ [m for m in p.lcia_methods if "relics" in m.lower()][-3:],
regions=[
"CH",
],
scenarios=p.scenarios.pathway.values.tolist(),
years=[
2020,
2030,
2040,
2050
],
years=[2020, 2030, 2040, 2050],
variables=vars,
use_distributions=100,
subshares=True,
Expand Down
29 changes: 17 additions & 12 deletions pathways/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,8 @@ def log_double_accounting(


def log_subshares(
shares: dict, region: str,
shares: dict,
region: str,
) -> pd.DataFrame:
"""
Create a pandas DataFrame where the keys of shares are the columns
Expand All @@ -113,9 +114,9 @@ def log_subshares(


def log_uncertainty_values(
region: str,
uncertainty_indices: np.array,
uncertainty_values: np.array,
region: str,
uncertainty_indices: np.array,
uncertainty_values: np.array,
) -> pd.DataFrame:
"""
Create a pandas DataFrame with the region and uncertainty indices as columns,
Expand Down Expand Up @@ -160,21 +161,21 @@ def log_results(
return df[["iteration", "region"] + methods]


def create_mapping_sheet(
indices: dict
) -> pd.DataFrame:
def create_mapping_sheet(indices: dict) -> pd.DataFrame:
"""
Create a mapping sheet for the activities with uncertainties.
"""

# Converting the dictionary into a pandas DataFrame
df = pd.DataFrame(indices.items(), columns=['Index', 'Value'])
df = pd.DataFrame(indices.items(), columns=["Index", "Value"])

# Split the 'Index' column into four separate columns
df[['Name', 'Product', 'Unit', 'Region']] = pd.DataFrame(df['Index'].tolist(), index=df.index)
df[["Name", "Product", "Unit", "Region"]] = pd.DataFrame(
df["Index"].tolist(), index=df.index
)

# Drop the now unnecessary 'Index' column
df.drop(columns=['Index'], inplace=True)
df.drop(columns=["Index"], inplace=True)

return df

Expand Down Expand Up @@ -275,11 +276,15 @@ def run_GSA_delta(
# based on "iteration" and "region" columns

if len(technology_shares) > 0:
df_parameters = uncertainty_values.merge(technology_shares, on=["iteration", "region"])
df_parameters = uncertainty_values.merge(
technology_shares, on=["iteration", "region"]
)
else:
df_parameters = uncertainty_values

parameters = [param for param in df_parameters.columns if param not in ["iteration", "region"]]
parameters = [
param for param in df_parameters.columns if param not in ["iteration", "region"]
]

problem = {
"num_vars": len(parameters),
Expand Down

0 comments on commit 572a47e

Please sign in to comment.