diff --git a/dev/timing.py b/dev/timing.py index cc526d1..6cd2cbd 100644 --- a/dev/timing.py +++ b/dev/timing.py @@ -4,21 +4,25 @@ vars = [v for v in p.scenarios.coords["variables"].values if v.startswith("FE")] - p.calculate( methods=[ "EF v3.1 EN15804 - climate change - global warming potential (GWP100)", "EF v3.1 EN15804 - ecotoxicity: freshwater - comparative toxic unit for ecosystems (CTUe)", - ], + ] + [m for m in p.lcia_methods if "relics" in m.lower()][-3:], regions=[ "CH", ], scenarios=p.scenarios.pathway.values.tolist(), years=[ - 2035, + 2020, + 2030, + 2040, + 2050 ], variables=vars, - use_distributions=3, + use_distributions=500, subshares=True, - multiprocessing=False, + multiprocessing=True, ) + +p.export_results() diff --git a/pathways/lca.py b/pathways/lca.py index a886e1e..f138bb1 100644 --- a/pathways/lca.py +++ b/pathways/lca.py @@ -356,6 +356,7 @@ def process_region(data: Tuple) -> dict[str, ndarray[Any, dtype[Any]] | list[int id_uncertainty_indices = None id_uncertainty_values = None id_technosphere_indices = None + id_iter_results_array = None if use_distributions == 0: # Regular LCA calculations @@ -374,7 +375,6 @@ def process_region(data: Tuple) -> dict[str, ndarray[Any, dtype[Any]] | list[int iter_results, iter_param_vals = [], [] with CustomFilter("(almost) singular matrix"): for iteration in range(use_distributions): - print(f"------ Iteration {iteration + 1}/{use_distributions}...") next(lca) lca.lci() @@ -395,6 +395,12 @@ def process_region(data: Tuple) -> dict[str, ndarray[Any, dtype[Any]] | list[int lci_results = np.array(iter_results) lci_results = np.quantile(lci_results, [0.05, 0.5, 0.95], axis=0) + total_results = np.array(iter_results).sum(-1).sum(1) + + # Save the iterations results to disk + id_iter_results_array = uuid.uuid4() + np.save(file=DIR_CACHED_DB / f"{id_iter_results_array}.npy", arr=total_results) + # Save the uncertainty indices and values to disk id_uncertainty_indices = uuid.uuid4() np.save( @@ -430,6 +436,7 @@ def process_region(data: Tuple) -> dict[str, ndarray[Any, dtype[Any]] | list[int d["uncertainty_params"] = id_uncertainty_indices d["uncertainty_vals"] = id_uncertainty_values d["technosphere_indices"] = id_technosphere_indices + d["iterations_results"] = id_iter_results_array return d diff --git a/pathways/pathways.py b/pathways/pathways.py index a4ce35d..8353bba 100644 --- a/pathways/pathways.py +++ b/pathways/pathways.py @@ -391,8 +391,6 @@ def _fill_in_result_array( self, results: dict, use_distributions: int, shares: [None, dict], methods: list ) -> None: - # Assuming DIR_CACHED_DB, results, and self.lca_results are already defined - # Pre-loading data from disk if possible cached_data = { data["id_array"]: load_numpy_array_from_disk( @@ -484,6 +482,15 @@ def _fill_in_result_array( if region != "other" } + iteration_results = { + data["iterations_results"]: load_numpy_array_from_disk( + DIR_CACHED_DB / f"{data['iterations_results']}.npy", + ) + for coord, result in results.items() + for region, data in result.items() + if region != "other" + } + for coord, result in results.items(): model, scenario, year = coord @@ -505,8 +512,7 @@ def _fill_in_result_array( if region == "other": continue - id_array = data["id_array"] - total_impacts = np.squeeze(cached_data[id_array]).sum(-1).sum(1) + total_impacts = iteration_results[data["iterations_results"]] df_sum_impacts = pd.concat( [ @@ -568,7 +574,7 @@ def _fill_in_result_array( ) df_GSA.to_excel(writer, sheet_name="Global Sensitivity Analysis", index=False) - print(f"Statistical analysis: {export_path.resolve()}") + print(f"Statistical analysis: {export_path.resolve()}") def display_results(self, cutoff: float = 0.001) -> xr.DataArray: return display_results(self.lca_results, cutoff=cutoff) diff --git a/pathways/stats.py b/pathways/stats.py index 704026e..3dccd0f 100644 --- a/pathways/stats.py +++ b/pathways/stats.py @@ -274,7 +274,11 @@ def run_GSA_delta( # merge uncertainty_values and technology_shares # based on "iteration" and "region" columns - df_parameters = uncertainty_values.merge(technology_shares, on=["iteration", "region"]) + if len(technology_shares) > 0: + df_parameters = uncertainty_values.merge(technology_shares, on=["iteration", "region"]) + else: + df_parameters = uncertainty_values + parameters = [param for param in df_parameters.columns if param not in ["iteration", "region"]] problem = { @@ -286,14 +290,12 @@ def run_GSA_delta( ], } - print(problem) - methods = [m for m in total_impacts.columns if m not in ["iteration", "region"]] results = [] for method in methods: - param_values = df_parameters[params].values + param_values = df_parameters[parameters].values # total impacts for the method Y = total_impacts[method].values @@ -302,7 +304,7 @@ def run_GSA_delta( results.append([f"Delta Moment-Independent Measure for {method}"]) results.append(["Parameter", "Delta", "Delta Conf", "S1", "S1 Conf"]) - for i, param in enumerate(params): + for i, param in enumerate(parameters): results.append( [ param, diff --git a/requirements.txt b/requirements.txt index 6ec2c15..4f2c78a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -numpy==1.24.0 +numpy==1.24.4 pathlib pandas scipy @@ -6,7 +6,7 @@ xarray premise pyyaml bw_processing -bw2calc >= 2.0.dev17 +bw2calc >= 2.0.dev18 scikit-umfpack datapackage pyprind diff --git a/setup.py b/setup.py index 009e1ee..bc5a833 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def package_files(directory): # Might need to change the directory name as well. include_package_data=True, install_requires=[ - "numpy==1.24.0", + "numpy==1.24.4", "pathlib", "pandas", "xarray", @@ -49,7 +49,7 @@ def package_files(directory): "premise", "pyyaml", "bw_processing", - "bw2calc>=2.0.dev17", + "bw2calc>=2.0.dev18", "datapackage", "pyprind", "platformdirs",