From 003b809206b066b460a918a84507fa5abd605cd3 Mon Sep 17 00:00:00 2001 From: "Thom R. Edwards" Date: Tue, 28 May 2024 10:36:51 -0600 Subject: [PATCH 1/8] updated warning text to be clearer --- gtep/gtep_solution.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/gtep/gtep_solution.py b/gtep/gtep_solution.py index 7774482..e211a7b 100644 --- a/gtep/gtep_solution.py +++ b/gtep/gtep_solution.py @@ -12,14 +12,6 @@ import json from pathlib import Path - -import matplotlib as mpl -import matplotlib.pyplot as plt -import pandas as pd -import numpy as np - -logger = logging.getLogger(__name__) - import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator @@ -121,7 +113,6 @@ def _to_dict(self): # split the name to figure out depth split_name = val[0].name.split(".") - # start at the bottom and nest accordingly tmp_dict = { "name": val[0].name, @@ -191,7 +182,6 @@ def discover_level_relationships(self, dispatch_level_dict): return matching_groups_dict - def _level_dict_to_df_workhorse( self, level_key, timeseries_dict, keys_of_interest, vars_of_interest ): @@ -397,7 +387,7 @@ def _plot_workhose_binaries(self, fig.align_labels() fig.suptitle(f"{parent_key_string}") - fig.savefig(f"{save_dir}{parent_key_string}_{pretty_title.replace(" ", "_")}.png") + fig.savefig(f"{save_dir}{parent_key_string}_{pretty_title.replace(' ', '_')}.png") plt.close() @@ -469,6 +459,7 @@ def _level_plot_workhorse( # check if it has a bracketed relationship, and if it does go ahead, otherwise skip try: # print(this_primal) + primal_category = this_primal.split("[")[0] primal_name = this_primal.split("[")[1].split("]")[0] @@ -482,7 +473,7 @@ def _level_plot_workhorse( this_key ][this_primal] except IndexError as iEx: - print(f"[WARNING] _level_plot_workhorse has encountered an error: Attempted to split out {this_key}, failed with error {iEx}. Skipping.") + print(f"[WARNING] _level_plot_workhorse has encountered an error: Attempted to split out {this_primal} from {this_key}, failed with error {iEx}. Skipping.") level_period_dict["primals_by_category"] = primals_by_category level_period_dict["primals_by_name"] = primals_by_name level_timeseries.append(level_period_dict) @@ -608,3 +599,4 @@ def plot_levels(self, save_dir="."): # "spinningReserve[4_STEAM]", # ] + pass From 5dd4e5d12176e0754f7cfde8d0559033a36d00d3 Mon Sep 17 00:00:00 2001 From: "Thom R. Edwards" Date: Fri, 31 May 2024 15:58:59 -0600 Subject: [PATCH 2/8] implemented expression saving and plotting prototype --- gtep/gtep_solution.py | 268 +++++++++++++++++++++++++++++++----------- 1 file changed, 198 insertions(+), 70 deletions(-) diff --git a/gtep/gtep_solution.py b/gtep/gtep_solution.py index e211a7b..28e2833 100644 --- a/gtep/gtep_solution.py +++ b/gtep/gtep_solution.py @@ -49,7 +49,9 @@ def load_from_model(self, gtep_model): self.len_reps = gtep_model.len_reps # int self.num_commit = gtep_model.num_commit # int self.num_dispatch = gtep_model.num_dispatch # int + self.expressions = {expr.name: value(expr) for expr in gtep_model.model.component_data_objects(Expression) if ("Commitment" in expr.name) or ("Investment" in expr.name)} # add in + pass def read_json(self, filepath): # read a json file and recover a solution primals @@ -72,6 +74,7 @@ def _to_dict(self): "best_feasible_objective": self.results.best_feasible_objective, "best_objective_bound": self.results.best_objective_bound, "wallclock_time": self.results.wallclock_time, + "expressions": self.expressions, } # "best_feasible_objective", "best_objective_bound", and "wallclock_time" are all numbers, dont need subhandlers @@ -108,6 +111,7 @@ def _to_dict(self): # things are either vars (which have some sort of signifier in [] brackets) or are an attribute, which dont # the name variable will give it away results_dict["primals_tree"] = {} + results_dict["expressions_tree"] = {} for key, val in self.results.solution_loader.get_primals()._dict.items(): # split the name to figure out depth @@ -143,17 +147,38 @@ def nested_set(this_dict, key, val): this_dict[key[0]] = val nested_set(results_dict["primals_tree"], split_name, tmp_dict) + + pass + + for key, val in self.expressions.items(): + # split the name to figure out depth + split_name = key.split(".") + + # start at the bottom and nest accordingly + tmp_dict = { + "value": val, + } + + # allocate the nested dictionary + def nested_set(this_dict, key, val): + if len(key) > 1: + # check if it's a binary var and pull up one layer + this_dict.setdefault(key[0], {}) + nested_set(this_dict[key[0]], key[1:], val) + else: + this_dict[key[0]] = val + + nested_set(results_dict["expressions_tree"], split_name, tmp_dict) + # split out expressions + self.expressions_tree = results_dict["expressions_tree"] + + # mint the final dictionary to save out_dict = {"data": self.data.data, "results": results_dict} self.primals_tree = results_dict["primals_tree"] return out_dict - def plot_investment_level(self): - - # can grab bus names from - pass - def discover_level_relationships(self, dispatch_level_dict): list_of_keys = list(dispatch_level_dict.keys()) @@ -169,7 +194,8 @@ def discover_level_relationships(self, dispatch_level_dict): relationships_dict[primal_name].add(primal_category) except IndexError as iEx: - print(f"[WARNING] discover_level_relationships has encountered an error: Attempted to split out {this_key}, failed with error: \"{iEx}\". Skipping.") + print(f"[WARNING] discover_level_relationships has encountered an error: Attempted to split out {this_key}, failed with error: \"{iEx}\". Assigning as axuilary.") + # convert sets to frozensets to be hashable for this_key in relationships_dict: relationships_dict[this_key] = frozenset(relationships_dict[this_key]) @@ -182,7 +208,7 @@ def discover_level_relationships(self, dispatch_level_dict): return matching_groups_dict - def _level_dict_to_df_workhorse( + def _level_relationship_dict_to_df_workhorse( self, level_key, timeseries_dict, keys_of_interest, vars_of_interest ): df_data_dict = {} @@ -229,7 +255,7 @@ def _level_dict_to_df_workhorse( data_df = data_df.fillna(value=np.nan) return data_df except ValueError as vEx: - print(f"[WARNING] _level_dict_to_df_workhorse attempted to create dataframe and failed: {vEx}") + print(f"[WARNING] _level_relationship_dict_to_df_workhorse attempted to create dataframe and failed: {vEx}") return pd.DataFrame() def _plot_workhorse_relational(self, @@ -391,7 +417,8 @@ def _plot_workhose_binaries(self, plt.close() - def _level_df_to_plot( + + def _level_relationship_df_to_plot( self, level_key, df, @@ -430,6 +457,110 @@ def _level_df_to_plot( plot_bounds, save_dir) + def _expressions_plot_workhorse( + self, + level_key, + upper_level_dict, + parent_key_string, + save_dir="./", + plot_bounds=False, + ): + # go through a commitment period and parse out the dispatch periods + # slice out all keys pertaining to dispatchPeriod + level_period_keys = [this_key for this_key in upper_level_dict.keys() if (level_key in this_key) ] + + # scan level period for keys that have values associated + keys_of_vals_of_interest = [] + for this_key in upper_level_dict[level_period_keys[0]]: + if 'value' in upper_level_dict[level_period_keys[0]][this_key]: + keys_of_vals_of_interest.append(this_key) + + print(keys_of_vals_of_interest) + + # if we found things that have values, make a dictionary and then plot + # why do we need to do it by level first and then pivot the dict? Because we don't actually know what the "periods" numbers are, they might be arbitrary and in an arbitrary order + if len(keys_of_vals_of_interest) > 0: + # check all the periods and sort them out + vals_dict = {} + + for this_key in upper_level_dict: + level_period_number = int(re.split('\[|\]', this_key.split(level_key)[1])[1]) + vals_dict.setdefault(level_period_number, {}) + for this_val_key in keys_of_vals_of_interest: + vals_dict[level_period_number][this_val_key] = upper_level_dict[this_key][this_val_key]['value'] + + + print(vals_dict) + + # now pivot the dictionary to make a dataframe + # make a dictionary where the keys are the top layer + df_dict = {key:[] for key in keys_of_vals_of_interest} + sorted_vals_period = sorted(vals_dict) + df_dict['period_number'] = sorted_vals_period + for this_val_period in sorted_vals_period: + for this_key in keys_of_vals_of_interest: + df_dict[this_key].append(vals_dict[this_val_period][this_key]) + + print(df_dict) + expression_level_df = pd.DataFrame(df_dict) + print(expression_level_df) + + # plot the DF + # figure out how big the plot needs to be + gridspec_height = 2*len(keys_of_vals_of_interest) + gridspec_width = 2 + fig_width_padding = 0 + fig_height_padding = 0 + max_figheight = 48 + total_periods = len(expression_level_df) + key_gridspec_div = floor(gridspec_height/len(keys_of_vals_of_interest)) # number of gridspec heights a key plot can be + + # to make things look nice, we dont want height or width to be more than twice the other + fig_width = (total_periods*gridspec_width*4)+fig_width_padding + fig_width = min(max_figheight, fig_width) + fig_height = (2*gridspec_height)+fig_height_padding + if fig_width/fig_height > 2: + fig_height = floor(fig_width/2) + elif fig_height/fig_height > 2: + fig_height = floor(fig_height/2) + + # set up plot + fig = plt.figure(figsize=(fig_width, + fig_height), + tight_layout=False) # (32, 16) works will for 4 plots tall and about 6 periods wide per plot + gs = fig.add_gridspec(gridspec_height, gridspec_width) + # plot out the keys of interest + + pretty_title = "Expression" + + + ax_koi_list = [] + for ix_koi, this_koi in enumerate(keys_of_vals_of_interest): + ax_koi = fig.add_subplot(gs[(ix_koi*key_gridspec_div):((ix_koi+1)*key_gridspec_div), :]) + ax_koi_list.append(ax_koi) + + ax_koi.plot( + expression_level_df['period_number'], + expression_level_df[this_koi], + label=f"{this_koi}", + marker="o", + ) + ax_koi.set_ylabel("Value $[n]$") + ax_koi.xaxis.set_major_locator(MaxNLocator(integer=True)) + ax_koi.legend() + + # label axes + ax_koi_list[-1].set_xlabel(f"{level_key} $[n]$") + ax_koi_list[0].set_title(f"{pretty_title} by Type") + + + fig.align_labels() + fig.suptitle(f"{parent_key_string}") + fig.savefig(f"{save_dir}{parent_key_string}_{pretty_title.replace(" ", "_")}.png") + plt.close() + + + def _level_plot_workhorse( self, level_key, @@ -442,6 +573,7 @@ def _level_plot_workhorse( level_timeseries = [] # slice out all keys pertaining to dispatchPeriod level_period_keys = [this_key for this_key in upper_level_dict.keys() if (level_key in this_key) ] + # aux_var_dict = {} for this_key in level_period_keys: level_period_dict = {} # cut out which dispatch period this is @@ -457,27 +589,39 @@ def _level_plot_workhorse( # split key on brackets to get title for this_primal in upper_level_dict[this_key]: # check if it has a bracketed relationship, and if it does go ahead, otherwise skip + tmp_save_primal = upper_level_dict[this_key][this_primal] try: - # print(this_primal) - primal_category = this_primal.split("[")[0] - primal_name = this_primal.split("[")[1].split("]")[0] + # check if this_primal is splitable on brackets + # based on the name of the primal, split out the "category" and the "name" + # Example: commitmentPeriod[1] + # - category: "commitmentPeriod" + # - name: "1" + primal_category = this_primal.split("[")[0] + primal_name = this_primal.split("[")[1].split("]")[0] + + # create one view that shares the categories, and one the shares the names primals_by_category.setdefault(primal_category, {}) primals_by_name.setdefault(primal_name, {}) primals_by_category[primal_category][primal_name] = ( - upper_level_dict[this_key][this_primal] + tmp_save_primal ) - primals_by_name[primal_name][primal_category] = upper_level_dict[ - this_key - ][this_primal] + primals_by_name[primal_name][primal_category] = tmp_save_primal + except IndexError as iEx: print(f"[WARNING] _level_plot_workhorse has encountered an error: Attempted to split out {this_primal} from {this_key}, failed with error {iEx}. Skipping.") + # aux_var_dict.setdefault(this_primal, []) + # aux_var_dict[this_primal].append(this_key) level_period_dict["primals_by_category"] = primals_by_category level_period_dict["primals_by_name"] = primals_by_name level_timeseries.append(level_period_dict) + # # clean up the aux vars based on what we found + # for aux_var_primal_name, aux_var_category_name in aux_var_dict.items(): + # print(aux_var_primal_name, aux_var_category_name) + # sort by the dispatch period number level_timeseries = sorted( level_timeseries, key=lambda x: x["period_number"] @@ -487,6 +631,7 @@ def _level_plot_workhorse( # ASSUMES that all the dispatch levels have the exact same underlying variables and relationships level_relationships = self.discover_level_relationships(upper_level_dict[level_period_keys[0]]) + # plot relationships for vars_of_interest, keys_of_interest in level_relationships.items(): # sort the vars and keys for consistency @@ -494,7 +639,7 @@ def _level_plot_workhorse( tmp_koi = sorted(keys_of_interest) # make a df for debug and also easy tabularness for plots - this_df_of_interest = self._level_dict_to_df_workhorse( + this_df_of_interest = self._level_relationship_dict_to_df_workhorse( level_key, level_timeseries, tmp_koi, tmp_voi ) @@ -505,7 +650,7 @@ def _level_plot_workhorse( this_pretty_title = ", ".join(tmp_voi) # plot it - self._level_df_to_plot( + self._level_relationship_df_to_plot( level_key, this_df_of_interest, tmp_koi, @@ -514,11 +659,11 @@ def _level_plot_workhorse( pretty_title=this_pretty_title, save_dir=save_dir, plot_bounds=plot_bounds) - + def plot_levels(self, save_dir="."): - # plot or represent some dispatch periods or something I don't know + # plot or represent primals trees for this_root_level_key in self.primals_tree: if "investmentStage" in this_root_level_key: investment_level_cut = self.primals_tree[this_root_level_key] @@ -548,55 +693,38 @@ def plot_levels(self, save_dir="."): self._level_plot_workhorse( "dispatchPeriod",commitment_level_cut, parent_key_string, save_dir ) - - pass - # things to cut on - # renewableGeneration - # renewableCurtailment - # powerFlow - # busAngle - # thermalGeneration - # spinningReserve - # quickstartReserve - - # [ - # "renewableGeneration[10_PV]", - # "renewableCurtailment[10_PV]", - # "renewableGeneration[2_RTPV]", - # "renewableCurtailment[2_RTPV]", - # "renewableGeneration[1_HYDRO]", - # "renewableCurtailment[1_HYDRO]", - # "renewableGeneration[4_WIND]", - # "renewableCurtailment[4_WIND]", - # "powerFlow[branch_2_3]", - # "busAngle[bus3]", - # "busAngle[bus2]", - # "powerFlow[branch_1_10]", - # "busAngle[bus10]", - # "busAngle[bus1]", - # "powerFlow[branch_3_4_1]", - # "busAngle[bus4]", - # "powerFlow[branch_4_10]", - # "powerFlow[branch_1_4]", - # "powerFlow[branch_1_2]", - # "powerFlow[branch_3_4_0]", - # "loadShed[bus1]", - # "thermalGeneration[4_CC]", - # "thermalGeneration[4_STEAM]", - # "loadShed[bus4]", - # "thermalGeneration[10_STEAM]", - # "loadShed[bus10]", - # "loadShed[bus2]", - # "thermalGeneration[3_CT]", - # "loadShed[bus3]", - # "quickstartReserve[3_CT]", - # "quickstartReserve[10_STEAM]", - # "quickstartReserve[4_CC]", - # "quickstartReserve[4_STEAM]", - # "spinningReserve[3_CT]", - # "spinningReserve[10_STEAM]", - # "spinningReserve[4_CC]", - # "spinningReserve[4_STEAM]", - # ] + # plot or represent expressions + self._expressions_plot_workhorse( + "investmentStage", self.expressions_tree, 'investmentStage', save_dir + ) + for this_root_level_key in self.expressions_tree: + if "investmentStage" in this_root_level_key: + investment_level_cut = self.expressions_tree[this_root_level_key] + parent_key_string = f"{this_root_level_key}" + self._expressions_plot_workhorse( + "representativePeriod", investment_level_cut, parent_key_string, save_dir + ) + + for this_inv_level_key in self.expressions_tree[this_root_level_key].keys(): + if "representativePeriod" in this_inv_level_key: + representative_level_cut = self.expressions_tree[this_root_level_key][this_inv_level_key] + parent_key_string = f"{this_root_level_key}_{this_inv_level_key}" + self._expressions_plot_workhorse( + "commitmentPeriod", representative_level_cut, parent_key_string, save_dir + ) + + for this_rep_level_key in self.expressions_tree[ + this_root_level_key + ][this_inv_level_key].keys(): + if "commitmentPeriod" in this_rep_level_key: + commitment_level_cut = self.expressions_tree[ + this_root_level_key + ][this_inv_level_key][this_rep_level_key] + + parent_key_string = f"{this_root_level_key}_{this_inv_level_key}_{this_rep_level_key}" + + self._expressions_plot_workhorse( + "dispatchPeriod",commitment_level_cut, parent_key_string, save_dir + ) pass From 1ae23e7348a5cad1cc1cbd6e7c53444beed5746a Mon Sep 17 00:00:00 2001 From: "Thom R. Edwards" Date: Wed, 3 Jul 2024 09:16:37 -0600 Subject: [PATCH 3/8] copied over bool/int fix and updated gitignore --- .gitignore | 6 +++++- gtep/gtep_solution.py | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 0da3c14..df971ad 100644 --- a/.gitignore +++ b/.gitignore @@ -168,4 +168,8 @@ cython_debug/ *.json # ignore .DS_Store -*.DS_Store \ No newline at end of file +*.DS_Store + +# ignore "data" +*.csv +*.xlsx \ No newline at end of file diff --git a/gtep/gtep_solution.py b/gtep/gtep_solution.py index 28e2833..7d8b292 100644 --- a/gtep/gtep_solution.py +++ b/gtep/gtep_solution.py @@ -231,7 +231,7 @@ def _level_relationship_dict_to_df_workhorse( if 'is_binary' in period_dict["primals_by_name"][this_koi][this_voi]: if period_dict["primals_by_name"][this_koi][this_voi]['is_binary']: df_data_dict[f"{this_koi}_{this_voi}_value"].append( - bool(int(period_dict["primals_by_name"][this_koi][this_voi]["value"])) # have to cast to int because there are floating point errors + bool(round(period_dict["primals_by_name"][this_koi][this_voi]["value"])) # have to cast to int because there are floating point errors ) else: df_data_dict[f"{this_koi}_{this_voi}_value"].append( From 3d6b715eb1793c474e127d362175088dac4e6e9f Mon Sep 17 00:00:00 2001 From: "Thom R. Edwards" Date: Wed, 3 Jul 2024 09:16:54 -0600 Subject: [PATCH 4/8] stub for mapping bus locations to "data" --- gtep/map_bus_to_vars.py | 255 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 255 insertions(+) create mode 100644 gtep/map_bus_to_vars.py diff --git a/gtep/map_bus_to_vars.py b/gtep/map_bus_to_vars.py new file mode 100644 index 0000000..7d1bd03 --- /dev/null +++ b/gtep/map_bus_to_vars.py @@ -0,0 +1,255 @@ +import pandas as pd +from pathlib import Path + +from geopy.geocoders import Nominatim + +# read bus data +bus_data_df = pd.read_csv("Bus_data.csv") + +# read pricing data +pricing_data_book_names = [ + "Land-Based Wind", + "Solar - CSP", + "Natural Gas_FE", + "Coal_FE", + "Biopower", + "Nuclear", + "Geothermal", + "Solar - Utility PV", +] +pricing_data_sheet = "./gtep/2022 v3 Annual Technology Baseline Workbook Mid-year update 2-15-2023 Clean.xlsx" +pricing_data_sheet_path = Path(pricing_data_sheet) +pricing_dict = {} +for this_book_name in pricing_data_book_names: + pricing_dict[this_book_name] = pd.read_excel( + pricing_data_sheet_path, this_book_name + ) + +geolocator = Nominatim(user_agent="GetLoc") +county_lookup = [] +for this_lat, this_lon in zip( + bus_data_df["Bus latitude"], bus_data_df["Bus longitude"] +): + tmp_county = geolocator.reverse([this_lat, this_lon]) + county_lookup.append(tmp_county.raw["address"]["county"]) + +bus_data_df["County"] = county_lookup +bus_data_df.to_csv("Bus_data_extended.csv", index=False) + +# tech_baseline_to_suitability_map + +# Wind || SolarThermal || NatGasCT || NatGasCC || Coal || Biomass || Nuclear || GeoThermal || SolarPV +# Land-Based Wind || Solar - CSP || Natural Gas_FE || Natural Gas_FE || Coal_FE || Biopower || Nuclear || Geothermal || Solar - Utility PV + +# Net Capacity Factor || CAPEX || Construction Financing Cost || Overnight Capital Cost || fixed operation etc. || variable operation etc. || fuel costs($/MWh) (fuel costs won't exist for all types, can be assumed 0 in that case) +# Summart_CF || Summary_CAPEX || Scrape || Scrape || Scrape || Scrape || Summary_Fuel + + +# Table A.1 Gives us SOME hints on how we can map things, or how things from C.1 and C.2 depend on each other. +# Everything outside of Solar PV requires "Low Urban Density", so we're just going to completely ignore that entirely. +# Wind needs to map to Wind Zones +# Solar Thermal need Solar Resource and Water Vailability +# NG CT needs NG Supply, and Non-attainment is requird if replacing higher-emission generation +# NG CC needs NG Supply, and Water Availability +# Coal needs Rail, Can't be in Non-Attainment Area, and Water Availability +# Biomass needs Rail and Biomass +# Nuclear needs Water Availability +# Geothermal needs Geothermal Potential +# Solar PV needs Solar Resource + +# mapping Table Table C.1: County Ratings Based on Resource Type -------> Table C.2: Suitability of County for Technology Type* +# +# Resource C.1 Label C.1 Values C.2 Label C.2 Values +# Wind Wind Condition 1, 2, 3, 4 Wind NA (one Avg), Avg (one NA), Good, Good +# SolarThermal SolarThermalCond. 5.5, 5.75, 6, 6.5, 6.75, 7 SolarThermal NA, NA, NA, NA or Avg, NA or Good, NA +# NatGasCT GasPipelineDensityCapacity H, M, L, VL NatGasCT (Mixed between Good, Avg, RO, and NA; mostly Good), Avg. (one Good, one NA), NA, NA +# NatGasCC GasPipelineDensityCapacity H, M, L, VL NatGasCC (Mixed between Good, Avg, RO, and NA; mostly Good), Avg. (one Good, some NA), NA, NA +# NatGasCC Surface Water H, M, L, VL NatGasCC (Mixed between Good, Avg, RO, and NA; mostly Good), Avg. (one Good, some NA), NA, NA +# Coal (Railroad Density) RailroadDensity H, M, L, VL Coal (Mix of Good and NA), (Mix of Good and NA), NA, NA +# Biomass Biomass H, M, L Biomass (Mix of Good and NA), NA, NA +# Geothermal Geothermal H, M, NA Geothermal Good (one NA), NA (one Good), NA +# SolarPV SolarPV VH, H, M SolarPV Good, Good, Avg + +# Workbook identified variables +# Generation Type (Workbook) Generation Type (Table C.1) Generation Type (Table C.2) Resource Classification (Workbook) Resource Rating (Table C.1) Technology Rating (Table C.2) +# Land-based Wind Wind Cond. Wind Class 1-10 (lower is better) Poor (1) - Very Good (7) Good, Avg., NA +# Solar-CSP Solar Thermal Condition Solar Thermal Class 2, 3, or 7 (lower is better) 5.5, 5.75, 6, 6.5, 6.75, 7 Good, Avg., NA +# Natural Gas_FE Gas Pipeline Density/Capacity NatGasCT F-FrameCT H, M, L, VL Good, Avg., RO, NA +# Natural Gas_FE Gas Pipeline Density/Capacity NatGasCC G-FrameCC, H-FrameCC, F-FrameCC95CCS, H-FrameCC95CCS, F-FrameCCMaxCCS, H-FrameCCMaxCCS H, M, L, VL Good, Avg., RO, NA +# Natural Gas_FE Surface Water NatGasCC G-FrameCC, H-FrameCC, F-FrameCC95CCS, H-FrameCC95CCS, F-FrameCCMaxCCS, H-FrameCCMaxCCS H, M, L, VL Good, Avg., RO, NA +# Coal_FE Railroad Density Coal new, newTo2ndGen, newToTT, CCS95, CCS95To2ndGen, CCS95ToTT, MaxCCS, MaxCCSTo2ndGen, MaxCCSToTT, IGCC H, M, L, VL Good, NA +# Biopower Biomass Biomass Dedicated H, M, L Good, NA +# Nuclear Surface Water Nuclear AP1000, SmallModularReactor H, M, L, VL Good, NA +# Geothermal Geothermal GeoThermal HydroFlash, HydroBinary, NFEGSFlash, NFEGSBinary, DeepEGSFlash, DeepEGSBinary H, M, NA Good, NA +# Solar - Utility PV Solar PV SolarPV Class 1-10 (lower is better) VH, H, M Good, Avg. + +# need to combine ratings to say is the RESOURCE AVAILABLE and is the TECHNOLOGY SUITABLE and then map that to the PRICE of THINGS for that County +# It's clear from the PDF that C.2 should be used if possible: "Table C.2 shows a summary of [A.1 constraints and C.1 favorable counties] classification and is used in determining potential locations for generator siting." + +# --- Land-based Wind --- +# Resource Classification (Workbook) have a measurement of wind speed, which helps. Workbook: 1.7 m/s (Class10) to >9.0 m/s (Class1) +# Resource Rating also has a class rating, BUT it's reversed: "zone 1" is the worst, "zone 7" is the best. Table C.1: Condition Rating (CR) 1-7 +# Mapping the counties together we can align the Condition Rating with the Technology Rating and get NA = 1, "Avg" == 2, "Good" == 3 or 4. +# For sake of argument, Let's just assume that CR1 --> Class10, CR2 --> Class9, etc. + +# --- Solar-CSP --- +# The Resource Rating here is actually decent, and is measured in kWh/m^2/day. This is actually useful. Praise be. +# - Good = 6.5-7 kWh/m^2/day +# - Average = 6-6.5 kWh/m^2/day +# - Below Average = 5.5-6 kWh/m^2/day +# - Poor = <=5.5 kWh/m^2/day +# The Workbook only has 3 options, Class 2, 3 or 7 (lower is better) AND THEY HAVE UNITS. +# - Class 7 = 6.16 kWh/m^2/day +# - Class 3 = 7.1 kWh/m^2/day +# - Class 2 = 7.46 kWh/m^2/day +# which means we can map these: +# - Good = Class 3 or Class 2 +# - Average = Class 7 +# - Below Average = NONE +# - Poor = NONE +# but this isn't super useful, so I recommend we do this: +# *************************************************************************************************************************************************** +# - C.1 Rating Workbook Classification +# - Good Class 2 +# - Average Class 3 +# - Below Average Class 7 +# - Poor None +# *************************************************************************************************************************************************** + +# --- NatGasCT --- +# The difference between NatGasCT and NatGasCC: +# NG CT needs NG Supply, and Non-attainment is required if replacing higher-emission generation +# NG CC needs NG Supply, and Water Availability +# NatGasCT is easy: there is only one mapping you can do. +# *************************************************************************************************************************************************** +# - C.2 Rating Workbook Classification +# - Good/Avg/RO F-FrameCT +# - NA None +# *************************************************************************************************************************************************** + +# --- NatGasCC --- +# NatGasCC is hell. We're in hell. +# In order to solve the mapping we need to know how levels of Post Combustion Carbon Capture (CCS) maps to Surface Water and Gas Pipeline Density/Capacity. +# There is something between F-Frame (old) and H-Frame (new) that matters, but I don't know what requires what. It sounds like a style. +# CCS evolves upwards, either it's "normal" or it's 95% or it's "Max" (97%). +# How any of that maps to either Surface Water or to Pipeline Density is completely lost on me. +# Not solving now. Placeholder mapping +# F-FrameCC, H-FrameCC, F-FrameCC95CCS, H-FrameCC95CCS, F-FrameCCMaxCCS, H-FrameCCMaxCCS +# *************************************************************************************************************************************************** +# - C.2 Rating Workbook Classification +# - Good H-FrameCCMaxCCS +# - Avg H-FrameCC95CCS +# - RO None +# - NA None +# *************************************************************************************************************************************************** + +# --- Coal_FE --- +# Similar to NatGas CC, there's so many possible mappings, we need a lot more information. +# Not solving now. Placeholder mapping +# Heck coal, everything NA? +# Coal isn't real, it can't hurt you +# *************************************************************************************************************************************************** +# - C.2 Rating Workbook Classification +# - Good None (ignore new, newTo2ndGen, newToTT, CCS95, CCS95To2ndGen, CCS95ToTT, MaxCCS, MaxCCSTo2ndGen, MaxCCSToTT, IGCC) +# - NA None +# *************************************************************************************************************************************************** + +# --- Biopower --- +# Biopower is also easy, there's only one mapping. Either it's there or not. +# *************************************************************************************************************************************************** +# - C.2 Rating Workbook Classification +# - Good Dedicated +# - NA None +# *************************************************************************************************************************************************** + +# --- Nuclear --- +# Nuclear also needs more information to be useful, but we can at least pick one. +# *************************************************************************************************************************************************** +# - C.2 Rating Workbook Classification +# - Good SmallModularReactor (pretend AP1000 isn't a thing) +# - NA None +# *************************************************************************************************************************************************** + +# --- Geothermal --- +# Once again, we need a lot more info, but there's some things to note: +# Deep EGS (DeepEGSFlash and DeepEGSBinary) both have no possible sites. We can probably safely ignore those. +# Beyond that, we have Hydrothermal and NF-EGS. Among those, there are Flash (>=200C) and Binary (<200C) +# The PDF has a map showing three different zones for Geothermal: Hydrothermal, Geopressure, and Hot Dry Rock. +# Hydrothermal is probably Hydrothermal in both cases. That only really makes sense for Binary, because the map says that the highest temperature is +# ~160F (71C), which is well below the 200C needed for Flash. Geopressure can get high enough temperature, but is deep. +# Geopressure is very deep (13000 ft ~= 3.9 km), so this likely applies to the Deep EGS (3-6 km). Let's ignore it for now. +# Hot Dry Rock is very explicity "little or no water", so let's seperate Hydro from NFEGS +# C.2 only gives us High, Medium and NA. Everything is pain. +# Looking at the workbook, Flash and Binary appear to be identical in the data and we can ignore Hydro and NFEGS, until you get to costs. +# If it was the other way around, we could instead map "Medium" and "High" to production. +# However, one las thing: comparing C.1 and C.2 shows us that only "High" get "Good" ratings outside of one, and "Medium" get "NA" ratings outside of +# one. So let's just assign "Good" to something and NA to everything None +# *************************************************************************************************************************************************** +# - C.2 Rating Workbook Classification +# - High NFEGSBinary (Ignore HydroFlash, HydroBinary, NFEGSFlash) +# - NA None +# *************************************************************************************************************************************************** + +# --- Solar - Utility PV --- +# For some reason, we can't assign any actual units here, even though we did so on the other solar classificaitons. Cool. Thanks. Appreciate that. +# Instead we get "Very High", "High", and "Medium" from C.1 and "Good" or "Avg." from C.2. +# Mapping C.1 to C.2 we get that Very High = Good, High = Good and Medium = Avg. So let's use C.1 because it's more detailed. +# The workbook gives us Class 1-10. Lower is better. Everything is pain. +# BUT I FOUND A REFERENCE: +# https://atb.nrel.gov/electricity/2022/utility-scale_pv +# Resource Class GHI Bin (kWh/m2/day) Mean AC Capacity Factor Area (sq. km) +# 1 >5.75 32.80% 216,551 +# 2 5.5–5.75 31.80% 349,894 +# 3 5.25–5.5 30.30% 372,764 +# 4 5–5.25 28.70% 497,444 +# 5 4.75–5 26.80% 779,720 +# 6 4.5–4.75 25.80% 870,218 +# 7 4.25–4.5 24.60% 727,918 +# 8 4–4.25 23.40% 828,438 +# 9 3.75–4 22.30% 794,496 +# 10 <3.75 20.40% 163,120 +# +# Notably, Not close to the other Solar classification with actual units, so good thing I didn't use that *teehee swings legs* +# I also found an image: +# https://atb.nrel.gov/img/electricity/2021/p19/v1/solar-annual-ghi-2018-usa-scale-01.png +# So this shows that Texas actually spans 5 different Classes. I will not be back deriving them here. Importantly, it spans Class 1-5, going west to +# east, which means we can probably just figure it out. Using the weather zones: +# +# - FarWest gets Very High and High ratings. --> Class 1 +# - West is all High --> Class 2 +# - NorthCentral is all High --> Class 2 +# - North is all High --> Class 2 +# - East has mostly High and some Medium --> Class 3 +# - SouthCentral has mostly High with some Medium --> Class 3 +# - South has mostly Medium with some Highs --> Class 4 +# - Coast is all Medium --> Class 5 +# +# Clearly the mapping above won't actually work (they don't even correspond to the image I made them from) but instead we can use it to try to fake it +# So let's just throw some classifications out there and hope. +# *************************************************************************************************************************************************** +# - C.1 Rating Workbook Classification +# - Very High Class 1 +# - High Class 2 +# - Medium Class 4 +# *************************************************************************************************************************************************** + + +# Table A.1 Generation Technologies and Resource Limitations from PDF: +# Resource Type Rating Resource Limitations +# Wind Good Low Urban Density; Wind Zone 3-4 +# Wind Average Low Urban Density; Wind Zone 2 +# Solar Thermal Good Low Urban Density; Good Direct Solar Resource; High Water Availability +# Solar Thermal Average Low Urban Density; Average to Good Direct Solar Resource; Medium to High Water Availability +# NG CT Good Low Urban Density; High Availability of Natural Gas Supply; Can Only Build in Non-Attainment Area if Replacing Higher-Emission Generation +# NG CT Average Low Urban Density; Medium Availability of Natural Gas Supply; Can Only Build in Non-Attainment Area if Replacing Higher-Emission Generation +# NG CC Good Low Urban Density; High Availability of Natural Gas Supply; Can Only Build in Non-Attainment Area if Replacing Higher-Emission Generation; Medium to High Water Availability +# NG CC Average Low Urban Density; Medium Availability of Natural Gas Supply; Can Only Build in Non-Attainment Area if Replacing Higher-Emission Generation; Medium to High Water Availability +# Coal Good Low Urban Density; Medium to High Availability of Rail Transportation; Cannot Build in Non-Attainment Area; Medium to High Water Availability +# Biomass Good Low Urban Density; Low to High Availability of Rail Transportation; High Biomass Availability +# Nuclear Good Low Urban Density; High Water Availability +# Geothermal Good Low Urban Density; High Geothermal Potential +# Solar PV Good High to Very High Total Solar Resource +# Solar PV Average Medium Total Solar Resource + + +pass From 765ae2cb08091ec6821b9ded83be24ca1d24c607 Mon Sep 17 00:00:00 2001 From: "Thom R. Edwards" Date: Wed, 3 Jul 2024 10:49:46 -0600 Subject: [PATCH 5/8] hard override for gen state order and pulled out investment layer plots --- gtep/gtep_solution.py | 91 ++++++++++++++++++++++++++----------------- 1 file changed, 56 insertions(+), 35 deletions(-) diff --git a/gtep/gtep_solution.py b/gtep/gtep_solution.py index 7d8b292..b76c9b3 100644 --- a/gtep/gtep_solution.py +++ b/gtep/gtep_solution.py @@ -384,7 +384,6 @@ def _plot_workhose_binaries(self, for axline_ix in range(len(df[level_key])): ax_bins.axvline(axline_ix+0.5, color='grey', linewidth=3, linestyle='dotted', alpha=0.5) # draw a seperator line between each level - for ix_key, this_koi in enumerate(keys): # make a dummy line to steal the color cycler and make a single item for the legend line, = ax_bins.plot( @@ -428,8 +427,12 @@ def _level_relationship_df_to_plot( pretty_title="Selected Data", plot_bounds=False, save_dir=".", + config={}, ): + # [HACK] hard coding the generator state order, to be fixed later + config['order_gen_state'] = ['genOff', 'genShutdown', 'genStartup', 'genOn'] + # check if ALL the possible things to look at are binaries all_binaries = True for ix, this_voi in enumerate(vars): @@ -439,6 +442,17 @@ def _level_relationship_df_to_plot( break if all_binaries: + # check the config to see if we have any overrides + if 'order_gen_state' in config: + # check that everything can be mapped over + matched_config_override = True + for item in vars: + if not item in config['order_gen_state']: + matched_config_override = False + break + if matched_config_override: + vars = config['order_gen_state'] + self._plot_workhose_binaries(level_key, df, keys, @@ -666,6 +680,13 @@ def plot_levels(self, save_dir="."): # plot or represent primals trees for this_root_level_key in self.primals_tree: if "investmentStage" in this_root_level_key: + # run the toplevel keys + parent_key_string = f"{this_root_level_key}" + self._level_plot_workhorse( + "investmentStage", self.primals_tree, "", save_dir + ) + + # run the representative period subkeys investment_level_cut = self.primals_tree[this_root_level_key] parent_key_string = f"{this_root_level_key}" self._level_plot_workhorse( @@ -694,37 +715,37 @@ def plot_levels(self, save_dir="."): "dispatchPeriod",commitment_level_cut, parent_key_string, save_dir ) - # plot or represent expressions - self._expressions_plot_workhorse( - "investmentStage", self.expressions_tree, 'investmentStage', save_dir - ) - for this_root_level_key in self.expressions_tree: - if "investmentStage" in this_root_level_key: - investment_level_cut = self.expressions_tree[this_root_level_key] - parent_key_string = f"{this_root_level_key}" - self._expressions_plot_workhorse( - "representativePeriod", investment_level_cut, parent_key_string, save_dir - ) - - for this_inv_level_key in self.expressions_tree[this_root_level_key].keys(): - if "representativePeriod" in this_inv_level_key: - representative_level_cut = self.expressions_tree[this_root_level_key][this_inv_level_key] - parent_key_string = f"{this_root_level_key}_{this_inv_level_key}" - self._expressions_plot_workhorse( - "commitmentPeriod", representative_level_cut, parent_key_string, save_dir - ) - - for this_rep_level_key in self.expressions_tree[ - this_root_level_key - ][this_inv_level_key].keys(): - if "commitmentPeriod" in this_rep_level_key: - commitment_level_cut = self.expressions_tree[ - this_root_level_key - ][this_inv_level_key][this_rep_level_key] - - parent_key_string = f"{this_root_level_key}_{this_inv_level_key}_{this_rep_level_key}" - - self._expressions_plot_workhorse( - "dispatchPeriod",commitment_level_cut, parent_key_string, save_dir - ) - pass + # # plot or represent expressions + # self._expressions_plot_workhorse( + # "investmentStage", self.expressions_tree, 'investmentStage', save_dir + # ) + # for this_root_level_key in self.expressions_tree: + # if "investmentStage" in this_root_level_key: + # investment_level_cut = self.expressions_tree[this_root_level_key] + # parent_key_string = f"{this_root_level_key}" + # self._expressions_plot_workhorse( + # "representativePeriod", investment_level_cut, parent_key_string, save_dir + # ) + + # for this_inv_level_key in self.expressions_tree[this_root_level_key].keys(): + # if "representativePeriod" in this_inv_level_key: + # representative_level_cut = self.expressions_tree[this_root_level_key][this_inv_level_key] + # parent_key_string = f"{this_root_level_key}_{this_inv_level_key}" + # self._expressions_plot_workhorse( + # "commitmentPeriod", representative_level_cut, parent_key_string, save_dir + # ) + + # for this_rep_level_key in self.expressions_tree[ + # this_root_level_key + # ][this_inv_level_key].keys(): + # if "commitmentPeriod" in this_rep_level_key: + # commitment_level_cut = self.expressions_tree[ + # this_root_level_key + # ][this_inv_level_key][this_rep_level_key] + + # parent_key_string = f"{this_root_level_key}_{this_inv_level_key}_{this_rep_level_key}" + + # self._expressions_plot_workhorse( + # "dispatchPeriod",commitment_level_cut, parent_key_string, save_dir + # ) + # pass From a0a3e2cd70699fa84bac7e455d3c23d9ae9375e0 Mon Sep 17 00:00:00 2001 From: "Thom R. Edwards" Date: Wed, 3 Jul 2024 13:19:40 -0600 Subject: [PATCH 6/8] edge direcitonal plotting prototyping --- gtep/gtep_solution.py | 209 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 173 insertions(+), 36 deletions(-) diff --git a/gtep/gtep_solution.py b/gtep/gtep_solution.py index b76c9b3..63e221e 100644 --- a/gtep/gtep_solution.py +++ b/gtep/gtep_solution.py @@ -12,9 +12,9 @@ import json from pathlib import Path -import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator +import networkx as nx import pandas as pd import numpy as np import re @@ -50,8 +50,9 @@ def load_from_model(self, gtep_model): self.num_commit = gtep_model.num_commit # int self.num_dispatch = gtep_model.num_dispatch # int self.expressions = {expr.name: value(expr) for expr in gtep_model.model.component_data_objects(Expression) if ("Commitment" in expr.name) or ("Investment" in expr.name)} - # add in - pass + + def import_data_object(self, data_obj): + self.data = data_obj.md def read_json(self, filepath): # read a json file and recover a solution primals @@ -673,47 +674,183 @@ def _level_plot_workhorse( pretty_title=this_pretty_title, save_dir=save_dir, plot_bounds=plot_bounds) - + + def _plot_graph_workhorse(self): + # testing networkx plots + + # construct graph object + G = nx.Graph() + + for item in self.data.data['elements']['branch']: + print(self.data.data['elements']['branch'][item]['from_bus'], '--->', self.data.data['elements']['branch'][item]['to_bus']) + + + + # fig = plt.figure(tight_layout=False) + # ax_graph = fig.add_subplot() + G = nx.Graph() + labels = {} + # add nodes + for item in self.data.data['elements']['bus']: + G.add_node(item) + labels[item] = item + + # do edges manually later + + + # set up plot + fig = plt.figure(figsize=(16, 8), tight_layout=False) # (32, 16) works will for 4 plots tall and about 6 periods wide per plot + ax_graph = fig.add_subplot() + ax_graph.grid(False) + + # # add edges + # for item in self.data.data['elements']['branch']: + # G.add_edge(self.data.data['elements']['branch'][item]['from_bus'], self.data.data['elements']['branch'][item]['to_bus']) + + + + # G = nx.path_graph(5) + graph_node_position_dict = nx.kamada_kawai_layout(G) + nx.drawing.draw_networkx_nodes(G, graph_node_position_dict, ax=ax_graph) + + + # make some blocks + num_blocks = 4 + rand_weights = (np.random.randn(4)+1)/2. + from matplotlib.patches import Rectangle, RegularPolygon + from matplotlib.collections import PatchCollection + import matplotlib.cm as cm + from matplotlib.transforms import Affine2D + flow_glyphs = [] + spacing = 0.05 # take 10% of the rectangle and use it to create space between rectangles + kind = 'triangle' + # kind = 'rectangle' + for this_block_ix in range(num_blocks): + # normalizing this patch to 1 + + #### + # rectangle version + #### + if kind == 'rectangle': + # anchor for rectangles are set to bottom left + glyph_anchor_coord = [this_block_ix/float(num_blocks), -.5] + # height is y, width is x + consistent_width = 1./float(num_blocks) + # apply scaling + x_nudge = consistent_width*(1-spacing) + # nudge the start forward a bit (by the nudge factor) + glyph_anchor_coord[0]+=x_nudge + patch_width = consistent_width-x_nudge + patch_height = 1 + flow_glyphs.append(Rectangle(glyph_anchor_coord, + patch_width, + patch_height, + color=cm.hot(rand_weights[this_block_ix]))) + + #### + # triangle version + #### + if kind == 'triangle': + # triangles need to be in the center and then given a size + glyph_anchor_coord = [(this_block_ix+1)/float(num_blocks+1), 0] + glyph_verts = 3 + glyph_radius = (1./float(num_blocks))/2 + # apply nudges + glyph_radius *= (1-(spacing/2.)) + flow_glyphs.append(RegularPolygon(glyph_anchor_coord, + glyph_verts, + radius=glyph_radius, + orientation=-(np.pi/2.))) + + + + facecolors = cm.rainbow(rand_weights) + pc = PatchCollection(flow_glyphs, facecolors=facecolors) + + # attempt to rotate + start_key = self.data.data['elements']['branch']['branch_2_3']['from_bus'] + end_key = self.data.data['elements']['branch']['branch_2_3']['to_bus'] + start_pos = graph_node_position_dict[start_key] + end_pos = graph_node_position_dict[end_key] + node_distance = np.linalg.norm(end_pos-start_pos) + rot_angle_rad = np.arctan2((end_pos[1]-start_pos[1]),(end_pos[0]-start_pos[0])) + + # set up transformations + # stretch to the distance between target nodes + length_transform = Affine2D().scale(sx=node_distance, sy=1) + # squish + scale_transform = Affine2D().scale(sx=1, sy=.5) + # rotate + rot_transform = Affine2D().rotate_deg(np.rad2deg(rot_angle_rad)) + # translate to the node start + translate_transform = Affine2D().translate(*start_pos) + length_transform = Affine2D().scale(sx=node_distance, sy=1) + t2 = length_transform + scale_transform + rot_transform + translate_transform + ax_graph.transData + pc.set_transform(t2) + + # add collection + ax_graph.add_collection(pc) + + # # add edges + # for item in self.data.data['elements']['branch']: + # # grab the keys we care about + # start_key = self.data.data['elements']['branch'][item]['from_bus'] + # end_key = self.data.data['elements']['branch'][item]['to_bus'] + # start_pos = graph_node_position_dict[start_key] + # end_pos = graph_node_position_dict[end_key] + + # # forward arrow + # ax_graph.arrow(start_pos[0], start_pos[1], (end_pos[0]-start_pos[0]), (end_pos[1]-start_pos[1]), color='black') + # # backward arrow + # ax_graph.arrow(end_pos[0], end_pos[1], (start_pos[0]-end_pos[0]), (start_pos[1]-end_pos[1]), color='black') + + # graph_plot_obj = nx.draw_kamada_kawai(G, labels=labels) + pass + fig.savefig('test.png') + pass def plot_levels(self, save_dir="."): - # plot or represent primals trees - for this_root_level_key in self.primals_tree: - if "investmentStage" in this_root_level_key: - # run the toplevel keys - parent_key_string = f"{this_root_level_key}" - self._level_plot_workhorse( - "investmentStage", self.primals_tree, "", save_dir - ) + self._plot_graph_workhorse() - # run the representative period subkeys - investment_level_cut = self.primals_tree[this_root_level_key] - parent_key_string = f"{this_root_level_key}" - self._level_plot_workhorse( - "representativePeriod", investment_level_cut, parent_key_string, save_dir - ) - for this_inv_level_key in self.primals_tree[this_root_level_key].keys(): - if "representativePeriod" in this_inv_level_key: - representative_level_cut = self.primals_tree[this_root_level_key][this_inv_level_key] - parent_key_string = f"{this_root_level_key}_{this_inv_level_key}" - self._level_plot_workhorse( - "commitmentPeriod", representative_level_cut, parent_key_string, save_dir - ) + # # plot or represent primals trees + # for this_root_level_key in self.primals_tree: + # if "investmentStage" in this_root_level_key: + # # run the toplevel keys + # parent_key_string = f"{this_root_level_key}" + # self._level_plot_workhorse( + # "investmentStage", self.primals_tree, "", save_dir + # ) - for this_rep_level_key in self.primals_tree[ - this_root_level_key - ][this_inv_level_key].keys(): - if "commitmentPeriod" in this_rep_level_key: - commitment_level_cut = self.primals_tree[ - this_root_level_key - ][this_inv_level_key][this_rep_level_key] + # # run the representative period subkeys + # investment_level_cut = self.primals_tree[this_root_level_key] + # parent_key_string = f"{this_root_level_key}" + # self._level_plot_workhorse( + # "representativePeriod", investment_level_cut, parent_key_string, save_dir + # ) - parent_key_string = f"{this_root_level_key}_{this_inv_level_key}_{this_rep_level_key}" + # for this_inv_level_key in self.primals_tree[this_root_level_key].keys(): + # if "representativePeriod" in this_inv_level_key: + # representative_level_cut = self.primals_tree[this_root_level_key][this_inv_level_key] + # parent_key_string = f"{this_root_level_key}_{this_inv_level_key}" + # self._level_plot_workhorse( + # "commitmentPeriod", representative_level_cut, parent_key_string, save_dir + # ) - self._level_plot_workhorse( - "dispatchPeriod",commitment_level_cut, parent_key_string, save_dir - ) + # for this_rep_level_key in self.primals_tree[ + # this_root_level_key + # ][this_inv_level_key].keys(): + # if "commitmentPeriod" in this_rep_level_key: + # commitment_level_cut = self.primals_tree[ + # this_root_level_key + # ][this_inv_level_key][this_rep_level_key] + + # parent_key_string = f"{this_root_level_key}_{this_inv_level_key}_{this_rep_level_key}" + + # self._level_plot_workhorse( + # "dispatchPeriod",commitment_level_cut, parent_key_string, save_dir + # ) # # plot or represent expressions # self._expressions_plot_workhorse( From c5bea11b1d40751f806694dfbc97e229278423f5 Mon Sep 17 00:00:00 2001 From: "Thom R. Edwards" Date: Wed, 3 Jul 2024 15:56:14 -0600 Subject: [PATCH 7/8] iteration on bus viz dev --- gtep/driver.py | 16 ++- gtep/gtep_solution.py | 318 +++++++++++++++++++++++++----------------- 2 files changed, 201 insertions(+), 133 deletions(-) diff --git a/gtep/driver.py b/gtep/driver.py index e9517a2..20e398f 100644 --- a/gtep/driver.py +++ b/gtep/driver.py @@ -7,9 +7,11 @@ from pyomo.core import TransformationFactory from pyomo.contrib.appsi.solvers.highs import Highs -# data_path = "./gtep/data/5bus" -# data_object = ExpansionPlanningData() -# data_object.load_prescient(data_path) +data_path = "./gtep/data/5bus" +data_path = "./gtep/data/WECC_Reduced_USAEE" +data_object = ExpansionPlanningData() +data_object.load_prescient(data_path) + # mod_object = ExpansionPlanningModel( # data=data_object.md, num_reps=2, len_reps=3, num_commit=4, num_dispatch=5 # ) @@ -26,8 +28,14 @@ # sol_object.load_from_model(mod_object) # sol_object.dump_json() + +sol_object.import_data_object(data_object) + +# sol_object.read_json("./gtep_lots_of_buses_solution.json") # "./gtep/data/WECC_USAEE" +sol_object.read_json("./gtep_11bus_solution.json") # "./gtep/data/WECC_Reduced_USAEE" # sol_object.read_json("./gtep_solution.json") -sol_object.read_json("./bigger_longer_wigglier_gtep_solution.json") +# sol_object.read_json("./updated_gtep_solution_test.json") +# sol_object.read_json("./gtep_wiggles.json") sol_object.plot_levels(save_dir="./plots/") pass diff --git a/gtep/gtep_solution.py b/gtep/gtep_solution.py index 63e221e..de2cb45 100644 --- a/gtep/gtep_solution.py +++ b/gtep/gtep_solution.py @@ -267,7 +267,8 @@ def _plot_workhorse_relational(self, parent_key_string, pretty_title="Selected Data", plot_bounds=False, - save_dir=".",): + save_dir=".", + aspect_ratio=1): @@ -285,10 +286,10 @@ def _plot_workhorse_relational(self, fig_width = (total_periods*gridspec_width*4)+fig_width_padding fig_width = min(max_figheight, fig_width) fig_height = (2*gridspec_height)+fig_height_padding - if fig_width/fig_height > 2: - fig_height = floor(fig_width/2) - elif fig_height/fig_height > 2: - fig_height = floor(fig_height/2) + if fig_width/fig_height > aspect_ratio: + fig_height = floor(fig_width/aspect_ratio) + elif fig_height/fig_width > aspect_ratio: + fig_width = floor(fig_height/aspect_ratio) # set up plot fig = plt.figure(figsize=(fig_width, @@ -536,8 +537,8 @@ def _expressions_plot_workhorse( fig_height = (2*gridspec_height)+fig_height_padding if fig_width/fig_height > 2: fig_height = floor(fig_width/2) - elif fig_height/fig_height > 2: - fig_height = floor(fig_height/2) + elif fig_height/fig_width > 2: + fig_width = floor(fig_height/2) # set up plot fig = plt.figure(figsize=(fig_width, @@ -675,7 +676,7 @@ def _level_plot_workhorse( save_dir=save_dir, plot_bounds=plot_bounds) - def _plot_graph_workhorse(self): + def _plot_graph_workhorse(self, what_is_a_bus_called='dc_branch'): # testing networkx plots # construct graph object @@ -711,100 +712,159 @@ def _plot_graph_workhorse(self): # G = nx.path_graph(5) graph_node_position_dict = nx.kamada_kawai_layout(G) + # graph_node_position_dict = nx.planar_layout(G) + # graph_node_position_dict = nx.spectral_layout(G) nx.drawing.draw_networkx_nodes(G, graph_node_position_dict, ax=ax_graph) + def draw_single_edge_flow(item, ax_graph, glyph_type='triangle'): - # make some blocks - num_blocks = 4 - rand_weights = (np.random.randn(4)+1)/2. - from matplotlib.patches import Rectangle, RegularPolygon - from matplotlib.collections import PatchCollection - import matplotlib.cm as cm - from matplotlib.transforms import Affine2D - flow_glyphs = [] - spacing = 0.05 # take 10% of the rectangle and use it to create space between rectangles - kind = 'triangle' - # kind = 'rectangle' - for this_block_ix in range(num_blocks): - # normalizing this patch to 1 - - #### - # rectangle version - #### - if kind == 'rectangle': - # anchor for rectangles are set to bottom left - glyph_anchor_coord = [this_block_ix/float(num_blocks), -.5] - # height is y, width is x - consistent_width = 1./float(num_blocks) - # apply scaling - x_nudge = consistent_width*(1-spacing) - # nudge the start forward a bit (by the nudge factor) - glyph_anchor_coord[0]+=x_nudge - patch_width = consistent_width-x_nudge - patch_height = 1 - flow_glyphs.append(Rectangle(glyph_anchor_coord, - patch_width, - patch_height, - color=cm.hot(rand_weights[this_block_ix]))) - - #### - # triangle version - #### - if kind == 'triangle': - # triangles need to be in the center and then given a size - glyph_anchor_coord = [(this_block_ix+1)/float(num_blocks+1), 0] - glyph_verts = 3 - glyph_radius = (1./float(num_blocks))/2 - # apply nudges - glyph_radius *= (1-(spacing/2.)) - flow_glyphs.append(RegularPolygon(glyph_anchor_coord, - glyph_verts, - radius=glyph_radius, - orientation=-(np.pi/2.))) - - + from matplotlib.patches import Rectangle, RegularPolygon + from matplotlib.collections import PatchCollection + import matplotlib.cm as cm + from matplotlib.transforms import Affine2D - facecolors = cm.rainbow(rand_weights) - pc = PatchCollection(flow_glyphs, facecolors=facecolors) - - # attempt to rotate - start_key = self.data.data['elements']['branch']['branch_2_3']['from_bus'] - end_key = self.data.data['elements']['branch']['branch_2_3']['to_bus'] - start_pos = graph_node_position_dict[start_key] - end_pos = graph_node_position_dict[end_key] - node_distance = np.linalg.norm(end_pos-start_pos) - rot_angle_rad = np.arctan2((end_pos[1]-start_pos[1]),(end_pos[0]-start_pos[0])) - - # set up transformations - # stretch to the distance between target nodes - length_transform = Affine2D().scale(sx=node_distance, sy=1) - # squish - scale_transform = Affine2D().scale(sx=1, sy=.5) - # rotate - rot_transform = Affine2D().rotate_deg(np.rad2deg(rot_angle_rad)) - # translate to the node start - translate_transform = Affine2D().translate(*start_pos) - length_transform = Affine2D().scale(sx=node_distance, sy=1) - t2 = length_transform + scale_transform + rot_transform + translate_transform + ax_graph.transData - pc.set_transform(t2) - - # add collection - ax_graph.add_collection(pc) - - # # add edges - # for item in self.data.data['elements']['branch']: - # # grab the keys we care about - # start_key = self.data.data['elements']['branch'][item]['from_bus'] - # end_key = self.data.data['elements']['branch'][item]['to_bus'] - # start_pos = graph_node_position_dict[start_key] - # end_pos = graph_node_position_dict[end_key] - - # # forward arrow - # ax_graph.arrow(start_pos[0], start_pos[1], (end_pos[0]-start_pos[0]), (end_pos[1]-start_pos[1]), color='black') - # # backward arrow - # ax_graph.arrow(end_pos[0], end_pos[1], (start_pos[0]-end_pos[0]), (start_pos[1]-end_pos[1]), color='black') + def generate_flow_glyphs(num_glyphs, + spacing=0.05, + glyph_type='triangle', + glyph_rotation=-(np.pi/2.), + verts=3, + alpha=0.5): + + flow_glyphs = [] + for this_block_ix in range(num_glyphs): + # normalizing this patch to 1 + + #### + # rectangle version + #### + if glyph_type == 'rectangle': + # anchor for rectangles are set to bottom left + glyph_anchor_coord = [this_block_ix/float(num_glyphs), -.5] + # height is y, width is x + consistent_width = 1./float(num_glyphs) + # apply scaling + x_nudge = consistent_width*(spacing) + # nudge the start forward a bit (by the nudge factor) + glyph_anchor_coord[0]+=x_nudge + patch_width = consistent_width-x_nudge + patch_height = 1 + flow_glyphs.append(Rectangle(glyph_anchor_coord, + patch_width, + patch_height, + alpha=alpha)) + + #### + # triangle version + #### + + if glyph_type == 'triangle': + # triangles need to be in the center and then given a size + glyph_anchor_coord = [(this_block_ix+.5)/float(num_glyphs), 0] + glyph_verts = 3 + glyph_radius = (1./float(num_glyphs))/2. + # apply nudges + glyph_radius *= (1-(spacing/2.)) + flow_glyphs.append(RegularPolygon(glyph_anchor_coord, + glyph_verts, + radius=glyph_radius, + orientation=glyph_rotation, + alpha=alpha)) + + yscale_transform = Affine2D().scale(sx=1, sy=0.5/glyph_radius) + # rescale y to make it fit in a 1x1 box + flow_glyphs[-1].set_transform(yscale_transform) + + if glyph_type == 'n-gon': + # triangles need to be in the center and then given a size + glyph_anchor_coord = [(this_block_ix+.5)/float(num_glyphs), 0] + glyph_verts = verts + glyph_radius = (1./float(num_glyphs))/2. + # apply nudges + glyph_radius *= (1-(spacing)) + flow_glyphs.append(RegularPolygon(glyph_anchor_coord, + glyph_verts, + radius=glyph_radius, + orientation=glyph_rotation, + alpha=alpha)) + + yscale_transform = Affine2D().scale(sx=1, sy=0.5/glyph_radius) + # rescale y to make it fit in a 1x1 box + flow_glyphs[-1].set_transform(yscale_transform) + + return flow_glyphs + + # make some blocks + num_blocks = 4 + # rand_weights_top = (np.random.randn(4)+1)/2. + # rand_weights_bot = (np.random.randn(4)+1)/2. + rand_weights_top = np.array(range(num_blocks))/(num_blocks*2) + rand_weights_bot = (np.array(range(num_blocks))+num_blocks)/(num_blocks*2) + + top_flow_glyphs = generate_flow_glyphs(len(rand_weights_top), glyph_type=glyph_type) + top_facecolors = cm.rainbow(rand_weights_top) + top_flow_collection = PatchCollection(top_flow_glyphs, facecolors=top_facecolors, alpha=0.5) + bot_flow_glyphs = generate_flow_glyphs(len(rand_weights_bot), glyph_type=glyph_type, glyph_rotation=(np.pi/2.)) + bot_flow_glyphs = reversed(bot_flow_glyphs) + bot_facecolors = cm.rainbow(rand_weights_bot) + bot_flow_collection = PatchCollection(bot_flow_glyphs, facecolors=bot_facecolors, alpha=0.5) + + # scale and move top and bottom collections + top_base_transform = Affine2D().scale(sx=1, sy=0.9) + Affine2D().translate(0, 0.5) #+ ax_graph.transData + top_flow_collection.set_transform(top_base_transform) + bot_base_transform = Affine2D().scale(sx=1, sy=0.9) + Affine2D().translate(0, -0.5)# + ax_graph.transData + # bot_base_transform = Affine2D().scale(sx=1, sy=0.9) + Affine2D().translate(0, -0.5) + ax_graph.transData + bot_flow_collection.set_transform(bot_base_transform) + + + # combine collections and move to edge between nodes + + # attempt to rotate + start_key = self.data.data['elements'][what_is_a_bus_called][item]['from_bus'] + end_key = self.data.data['elements'][what_is_a_bus_called][item]['to_bus'] + start_pos = graph_node_position_dict[start_key] + end_pos = graph_node_position_dict[end_key] + node_distance = np.linalg.norm(end_pos-start_pos) + rot_angle_rad = np.arctan2((end_pos[1]-start_pos[1]),(end_pos[0]-start_pos[0])) + + along_edge_scale = 0.5 + + # set up transformations + # stretch to the distance between target nodes + length_transform = Affine2D().scale(sx=node_distance*along_edge_scale, sy=1) + # squish + scale_transform = Affine2D().scale(sx=1, sy=.1) + # rotate + rot_transform = Affine2D().rotate_deg(np.rad2deg(rot_angle_rad)) + # translate to the node start, then push it along the edge until it's apprximately centered and scaled nicely + translate_transform = Affine2D().translate(start_pos[0]+(np.cos(rot_angle_rad)*node_distance*.5*(1-along_edge_scale)), + start_pos[1]+(np.sin(rot_angle_rad)*node_distance*.5*(1-along_edge_scale))) + t2 = length_transform + scale_transform + rot_transform + translate_transform + ax_graph.transData + + top_flow_collection.set_transform(top_flow_collection.get_transform() + t2) + bot_flow_collection.set_transform(bot_flow_collection.get_transform() + t2) + + # add collection + ax_graph.add_collection(top_flow_collection) + ax_graph.add_collection(bot_flow_collection) + + # add edges + for item in self.data.data['elements'][what_is_a_bus_called]: + + kind = 'triangle' + kind = 'rectangle' + draw_single_edge_flow(item, ax_graph, glyph_type=kind) + + # grab the keys we care about + start_key = self.data.data['elements'][what_is_a_bus_called][item]['from_bus'] + end_key = self.data.data['elements'][what_is_a_bus_called][item]['to_bus'] + start_pos = graph_node_position_dict[start_key] + end_pos = graph_node_position_dict[end_key] + + # forward arrow + ax_graph.arrow(start_pos[0], start_pos[1], (end_pos[0]-start_pos[0]), (end_pos[1]-start_pos[1]), color='black') + # backward arrow + ax_graph.arrow(end_pos[0], end_pos[1], (start_pos[0]-end_pos[0]), (start_pos[1]-end_pos[1]), color='black') - # graph_plot_obj = nx.draw_kamada_kawai(G, labels=labels) pass fig.savefig('test.png') pass @@ -814,43 +874,43 @@ def plot_levels(self, save_dir="."): self._plot_graph_workhorse() - # # plot or represent primals trees - # for this_root_level_key in self.primals_tree: - # if "investmentStage" in this_root_level_key: - # # run the toplevel keys - # parent_key_string = f"{this_root_level_key}" - # self._level_plot_workhorse( - # "investmentStage", self.primals_tree, "", save_dir - # ) + # plot or represent primals trees + for this_root_level_key in self.primals_tree: + if "investmentStage" in this_root_level_key: + # run the toplevel keys + parent_key_string = f"{this_root_level_key}" + self._level_plot_workhorse( + "investmentStage", self.primals_tree, this_root_level_key, save_dir + ) - # # run the representative period subkeys - # investment_level_cut = self.primals_tree[this_root_level_key] - # parent_key_string = f"{this_root_level_key}" - # self._level_plot_workhorse( - # "representativePeriod", investment_level_cut, parent_key_string, save_dir - # ) + # run the representative period subkeys + investment_level_cut = self.primals_tree[this_root_level_key] + parent_key_string = f"{this_root_level_key}" + self._level_plot_workhorse( + "representativePeriod", investment_level_cut, parent_key_string, save_dir + ) - # for this_inv_level_key in self.primals_tree[this_root_level_key].keys(): - # if "representativePeriod" in this_inv_level_key: - # representative_level_cut = self.primals_tree[this_root_level_key][this_inv_level_key] - # parent_key_string = f"{this_root_level_key}_{this_inv_level_key}" - # self._level_plot_workhorse( - # "commitmentPeriod", representative_level_cut, parent_key_string, save_dir - # ) + for this_inv_level_key in self.primals_tree[this_root_level_key].keys(): + if "representativePeriod" in this_inv_level_key: + representative_level_cut = self.primals_tree[this_root_level_key][this_inv_level_key] + parent_key_string = f"{this_root_level_key}_{this_inv_level_key}" + self._level_plot_workhorse( + "commitmentPeriod", representative_level_cut, parent_key_string, save_dir + ) - # for this_rep_level_key in self.primals_tree[ - # this_root_level_key - # ][this_inv_level_key].keys(): - # if "commitmentPeriod" in this_rep_level_key: - # commitment_level_cut = self.primals_tree[ - # this_root_level_key - # ][this_inv_level_key][this_rep_level_key] + for this_rep_level_key in self.primals_tree[ + this_root_level_key + ][this_inv_level_key].keys(): + if "commitmentPeriod" in this_rep_level_key: + commitment_level_cut = self.primals_tree[ + this_root_level_key + ][this_inv_level_key][this_rep_level_key] - # parent_key_string = f"{this_root_level_key}_{this_inv_level_key}_{this_rep_level_key}" + parent_key_string = f"{this_root_level_key}_{this_inv_level_key}_{this_rep_level_key}" - # self._level_plot_workhorse( - # "dispatchPeriod",commitment_level_cut, parent_key_string, save_dir - # ) + self._level_plot_workhorse( + "dispatchPeriod",commitment_level_cut, parent_key_string, save_dir + ) # # plot or represent expressions # self._expressions_plot_workhorse( From 70b021dea2ffbc841938f67ece1dba596ffe4b49 Mon Sep 17 00:00:00 2001 From: "Thom R. Edwards" Date: Wed, 3 Jul 2024 17:55:48 -0600 Subject: [PATCH 8/8] power flow graph dev interation (NEEDS CLEANUP) --- gtep/gtep_solution.py | 212 +++++++++++++++++++++++++++++------------- 1 file changed, 147 insertions(+), 65 deletions(-) diff --git a/gtep/gtep_solution.py b/gtep/gtep_solution.py index de2cb45..b265fcc 100644 --- a/gtep/gtep_solution.py +++ b/gtep/gtep_solution.py @@ -19,6 +19,14 @@ import numpy as np import re + +from matplotlib.patches import Rectangle, RegularPolygon, PathPatch +from matplotlib.collections import PatchCollection +import matplotlib.cm as cm +from matplotlib.transforms import Affine2D +from matplotlib.colors import Normalize +import matplotlib.path as mpath + logger = logging.getLogger(__name__) # [TODO] inject units into plots @@ -213,6 +221,7 @@ def _level_relationship_dict_to_df_workhorse( self, level_key, timeseries_dict, keys_of_interest, vars_of_interest ): df_data_dict = {} + units_dict = {} # set our defaults df_data_dict.setdefault(level_key, []) for this_koi in keys_of_interest: @@ -234,30 +243,35 @@ def _level_relationship_dict_to_df_workhorse( df_data_dict[f"{this_koi}_{this_voi}_value"].append( bool(round(period_dict["primals_by_name"][this_koi][this_voi]["value"])) # have to cast to int because there are floating point errors ) + units_dict.setdefault(f"{this_koi}_{this_voi}_value", period_dict["primals_by_name"][this_koi][this_voi]['units']) else: df_data_dict[f"{this_koi}_{this_voi}_value"].append( period_dict["primals_by_name"][this_koi][this_voi]["value"] ) + units_dict.setdefault(f"{this_koi}_{this_voi}_value", period_dict["primals_by_name"][this_koi][this_voi]['units']) else: df_data_dict[f"{this_koi}_{this_voi}_value"].append( period_dict["primals_by_name"][this_koi][this_voi]["value"] ) + units_dict.setdefault(f"{this_koi}_{this_voi}_value", period_dict["primals_by_name"][this_koi][this_voi]['units']) df_data_dict[f"{this_koi}_{this_voi}_lower_bound"].append( period_dict["primals_by_name"][this_koi][this_voi]["bounds"][0] ) + units_dict.setdefault(f"{this_koi}_{this_voi}_value", period_dict["primals_by_name"][this_koi][this_voi]['units']) df_data_dict[f"{this_koi}_{this_voi}_upper_bound"].append( period_dict["primals_by_name"][this_koi][this_voi]["bounds"][1] ) + units_dict.setdefault(f"{this_koi}_{this_voi}_value", period_dict["primals_by_name"][this_koi][this_voi]['units']) # try to make a DF, and if not just pass back an empty try: data_df = pd.DataFrame(df_data_dict) # fix any Nones and make them NaNs data_df = data_df.fillna(value=np.nan) - return data_df + return data_df, units_dict except ValueError as vEx: print(f"[WARNING] _level_relationship_dict_to_df_workhorse attempted to create dataframe and failed: {vEx}") - return pd.DataFrame() + return pd.DataFrame(), {} def _plot_workhorse_relational(self, level_key, @@ -655,7 +669,7 @@ def _level_plot_workhorse( tmp_koi = sorted(keys_of_interest) # make a df for debug and also easy tabularness for plots - this_df_of_interest = self._level_relationship_dict_to_df_workhorse( + this_df_of_interest, this_df_units = self._level_relationship_dict_to_df_workhorse( level_key, level_timeseries, tmp_koi, tmp_voi ) @@ -665,30 +679,50 @@ def _level_plot_workhorse( # just ram the variable names together, that'll be fine, right? this_pretty_title = ", ".join(tmp_voi) - # plot it - self._level_relationship_df_to_plot( - level_key, - this_df_of_interest, - tmp_koi, - tmp_voi, - parent_key_string, - pretty_title=this_pretty_title, - save_dir=save_dir, - plot_bounds=plot_bounds) + # [HACK] + # if we find powerflow, plot it as a network + if 'powerFlow' in tmp_voi: + self._plot_graph_workhorse(this_df_of_interest, + 'powerFlow', + parent_key_string, + units=this_df_units, + pretty_title=this_pretty_title, + save_dir=save_dir,) - def _plot_graph_workhorse(self, what_is_a_bus_called='dc_branch'): + # [HACK] put this back one intent level when done + # plot it + self._level_relationship_df_to_plot( + level_key, + this_df_of_interest, + tmp_koi, + tmp_voi, + parent_key_string, + pretty_title=this_pretty_title, + save_dir=save_dir, + plot_bounds=plot_bounds) + + + def _plot_graph_workhorse(self, + df, + value_key, + parent_key_string, + what_is_a_bus_called='dc_branch', + units=None, + pretty_title="Selected Data", + save_dir=".",): # testing networkx plots - # construct graph object - G = nx.Graph() - - for item in self.data.data['elements']['branch']: - print(self.data.data['elements']['branch'][item]['from_bus'], '--->', self.data.data['elements']['branch'][item]['to_bus']) - + # preslice out data of interest + cols_of_interest = [col for col in df.columns if f"{value_key}_value" in col] + df_of_interest = df[cols_of_interest] + df_max = df_of_interest.to_numpy().max() + df_min = df_of_interest.to_numpy().min() + # assume all the units are the same, and pull the first one + units_str = "" + if units[cols_of_interest[0]] is not None: + units_str = f" [{units[cols_of_interest[0]]}]" - - # fig = plt.figure(tight_layout=False) - # ax_graph = fig.add_subplot() + # construct graph object G = nx.Graph() labels = {} # add nodes @@ -698,7 +732,6 @@ def _plot_graph_workhorse(self, what_is_a_bus_called='dc_branch'): # do edges manually later - # set up plot fig = plt.figure(figsize=(16, 8), tight_layout=False) # (32, 16) works will for 4 plots tall and about 6 periods wide per plot ax_graph = fig.add_subplot() @@ -708,27 +741,26 @@ def _plot_graph_workhorse(self, what_is_a_bus_called='dc_branch'): # for item in self.data.data['elements']['branch']: # G.add_edge(self.data.data['elements']['branch'][item]['from_bus'], self.data.data['elements']['branch'][item]['to_bus']) - - # G = nx.path_graph(5) graph_node_position_dict = nx.kamada_kawai_layout(G) # graph_node_position_dict = nx.planar_layout(G) # graph_node_position_dict = nx.spectral_layout(G) - nx.drawing.draw_networkx_nodes(G, graph_node_position_dict, ax=ax_graph) + nx.drawing.draw_networkx_nodes(G, graph_node_position_dict, node_size=1000, ax=ax_graph) + nx.draw_networkx_labels(G, graph_node_position_dict, labels, font_size=18, font_color="whitesmoke", ax=ax_graph) - def draw_single_edge_flow(item, ax_graph, glyph_type='triangle'): + def draw_single_edge_flow(item, + glyph_values_slice, + ax_graph, + cmap=cm.rainbow, + norm=Normalize(vmin=None, vmax=None), + glyph_type='custom'): - from matplotlib.patches import Rectangle, RegularPolygon - from matplotlib.collections import PatchCollection - import matplotlib.cm as cm - from matplotlib.transforms import Affine2D def generate_flow_glyphs(num_glyphs, spacing=0.05, glyph_type='triangle', - glyph_rotation=-(np.pi/2.), - verts=3, - alpha=0.5): + glyph_rotation=0., + verts=3): flow_glyphs = [] for this_block_ix in range(num_glyphs): @@ -750,13 +782,11 @@ def generate_flow_glyphs(num_glyphs, patch_height = 1 flow_glyphs.append(Rectangle(glyph_anchor_coord, patch_width, - patch_height, - alpha=alpha)) + patch_height)) #### # triangle version #### - if glyph_type == 'triangle': # triangles need to be in the center and then given a size glyph_anchor_coord = [(this_block_ix+.5)/float(num_glyphs), 0] @@ -767,13 +797,15 @@ def generate_flow_glyphs(num_glyphs, flow_glyphs.append(RegularPolygon(glyph_anchor_coord, glyph_verts, radius=glyph_radius, - orientation=glyph_rotation, - alpha=alpha)) + orientation=glyph_rotation)) yscale_transform = Affine2D().scale(sx=1, sy=0.5/glyph_radius) # rescale y to make it fit in a 1x1 box flow_glyphs[-1].set_transform(yscale_transform) + #### + # n-gon version + #### if glyph_type == 'n-gon': # triangles need to be in the center and then given a size glyph_anchor_coord = [(this_block_ix+.5)/float(num_glyphs), 0] @@ -784,29 +816,58 @@ def generate_flow_glyphs(num_glyphs, flow_glyphs.append(RegularPolygon(glyph_anchor_coord, glyph_verts, radius=glyph_radius, - orientation=glyph_rotation, - alpha=alpha)) + orientation=glyph_rotation)) yscale_transform = Affine2D().scale(sx=1, sy=0.5/glyph_radius) # rescale y to make it fit in a 1x1 box flow_glyphs[-1].set_transform(yscale_transform) + #### + # custom_flow + #### + if glyph_type == 'custom': + # anchor for rectangles are set to bottom left + glyph_anchor_coord = [this_block_ix/float(num_glyphs), -.5] + # height is y, width is x + consistent_width = 1./float(num_glyphs) + # apply scaling + x_nudge = consistent_width*(spacing) + patch_width = consistent_width-x_nudge + patch_height = 1 + codes, verts = zip(*[ + (mpath.Path.MOVETO, glyph_anchor_coord), + (mpath.Path.LINETO, [glyph_anchor_coord[0], glyph_anchor_coord[1]+patch_height]), + (mpath.Path.LINETO, [glyph_anchor_coord[0]+patch_width*.7, glyph_anchor_coord[1]+patch_height]), # go 70% of the width along the top + (mpath.Path.LINETO, [glyph_anchor_coord[0]+patch_width, glyph_anchor_coord[1]+patch_height*.5]), # go the rest of the width and meet in the center + (mpath.Path.LINETO, [glyph_anchor_coord[0]+patch_width*.7, glyph_anchor_coord[1]]), # go back a bit and to the bottom to finish the wedge + (mpath.Path.LINETO, glyph_anchor_coord)]) # go to home + + flow_glyphs.append(PathPatch(mpath.Path(verts, codes), ec="none"),) + + rotation_transofrm = Affine2D().rotate_around(glyph_anchor_coord[0]+patch_width*.5, + glyph_anchor_coord[1]+patch_height*.5, + glyph_rotation) + # rescale y to make it fit in a 1x1 box + flow_glyphs[-1].set_transform(rotation_transofrm) + return flow_glyphs # make some blocks - num_blocks = 4 - # rand_weights_top = (np.random.randn(4)+1)/2. - # rand_weights_bot = (np.random.randn(4)+1)/2. - rand_weights_top = np.array(range(num_blocks))/(num_blocks*2) - rand_weights_bot = (np.array(range(num_blocks))+num_blocks)/(num_blocks*2) - - top_flow_glyphs = generate_flow_glyphs(len(rand_weights_top), glyph_type=glyph_type) - top_facecolors = cm.rainbow(rand_weights_top) - top_flow_collection = PatchCollection(top_flow_glyphs, facecolors=top_facecolors, alpha=0.5) - bot_flow_glyphs = generate_flow_glyphs(len(rand_weights_bot), glyph_type=glyph_type, glyph_rotation=(np.pi/2.)) + # weights_top = (np.random.randn(4)+1)/2. + # weights_bot = (np.random.randn(4)+1)/2. + # weights_top = np.array(range(num_blocks))/(num_blocks*2) + # weights_bot = (np.array(range(num_blocks))+num_blocks)/(num_blocks*2) + weights_top = glyph_values_slice + weights_bot = glyph_values_slice + + top_flow_glyphs = generate_flow_glyphs(len(weights_top), glyph_type=glyph_type) + top_facecolors = cmap(norm(weights_top)) + top_flow_collection = PatchCollection(top_flow_glyphs, facecolors=top_facecolors, edgecolors='grey', alpha=0.5) + # bot_flow_glyphs = generate_flow_glyphs(len(weights_bot), glyph_type=glyph_type, glyph_rotation=(np.pi/2.)) # for squares + bot_flow_glyphs = generate_flow_glyphs(len(weights_bot), glyph_type=glyph_type, glyph_rotation=(np.pi)) # for custom bot_flow_glyphs = reversed(bot_flow_glyphs) - bot_facecolors = cm.rainbow(rand_weights_bot) - bot_flow_collection = PatchCollection(bot_flow_glyphs, facecolors=bot_facecolors, alpha=0.5) + bot_facecolors = cmap(norm(weights_bot)) + bot_flow_collection = PatchCollection(bot_flow_glyphs, facecolors=bot_facecolors, edgecolors='grey', alpha=0.5) # scale and move top and bottom collections top_base_transform = Affine2D().scale(sx=1, sy=0.9) + Affine2D().translate(0, 0.5) #+ ax_graph.transData @@ -815,7 +876,6 @@ def generate_flow_glyphs(num_glyphs, # bot_base_transform = Affine2D().scale(sx=1, sy=0.9) + Affine2D().translate(0, -0.5) + ax_graph.transData bot_flow_collection.set_transform(bot_base_transform) - # combine collections and move to edge between nodes # attempt to rotate @@ -827,12 +887,12 @@ def generate_flow_glyphs(num_glyphs, rot_angle_rad = np.arctan2((end_pos[1]-start_pos[1]),(end_pos[0]-start_pos[0])) along_edge_scale = 0.5 - + away_from_edge_scale = 0.05 # set up transformations # stretch to the distance between target nodes length_transform = Affine2D().scale(sx=node_distance*along_edge_scale, sy=1) # squish - scale_transform = Affine2D().scale(sx=1, sy=.1) + scale_transform = Affine2D().scale(sx=1, sy=away_from_edge_scale) # rotate rot_transform = Affine2D().rotate_deg(np.rad2deg(rot_angle_rad)) # translate to the node start, then push it along the edge until it's apprximately centered and scaled nicely @@ -848,30 +908,52 @@ def generate_flow_glyphs(num_glyphs, ax_graph.add_collection(bot_flow_collection) # add edges + # define edge colorbar + cmap = cm.rainbow + normalize = Normalize(vmin=df_min, vmax=df_max) + cmappable = cm.ScalarMappable(norm=normalize, cmap=cmap) + for item in self.data.data['elements'][what_is_a_bus_called]: - - kind = 'triangle' - kind = 'rectangle' - draw_single_edge_flow(item, ax_graph, glyph_type=kind) # grab the keys we care about start_key = self.data.data['elements'][what_is_a_bus_called][item]['from_bus'] end_key = self.data.data['elements'][what_is_a_bus_called][item]['to_bus'] start_pos = graph_node_position_dict[start_key] end_pos = graph_node_position_dict[end_key] + edge_key = f"branch_{start_key}_{end_key}_{value_key}_value" + alt_edge_key = f"branch_{end_key}_{start_key}_{value_key}_value" + + # kind = 'triangle' + # kind = 'rectangle' + kind = 'custom' + glyph_values_slice = None + try: + glyph_values_slice = df[edge_key].values + except KeyError as kex: + try: + glyph_values_slice = df[alt_edge_key].values + except KeyError as kex_second_attempt: + print(f"Attempted to slice DF in network twice using {edge_key} and {alt_edge_key}, failed both.") + draw_single_edge_flow(item, glyph_values_slice, ax_graph, cmap=cmap, norm=normalize, glyph_type=kind) # forward arrow ax_graph.arrow(start_pos[0], start_pos[1], (end_pos[0]-start_pos[0]), (end_pos[1]-start_pos[1]), color='black') # backward arrow ax_graph.arrow(end_pos[0], end_pos[1], (start_pos[0]-end_pos[0]), (start_pos[1]-end_pos[1]), color='black') - - pass - fig.savefig('test.png') + + + # insert colorbar + fig.colorbar(cmappable, ax=ax_graph, label=f"{value_key}{units_str}") + # make some titles + fig.suptitle(f"{parent_key_string}_{value_key}") + + # save + fig.savefig(f"{save_dir}{parent_key_string}_{pretty_title.replace(" ", "_")}_graph.png") pass def plot_levels(self, save_dir="."): - self._plot_graph_workhorse() + # self._plot_graph_workhorse() # plot or represent primals trees