Skip to content

Feature/integrate tsam seg objwei reviewed #1259

Feature/integrate tsam seg objwei reviewed

Feature/integrate tsam seg objwei reviewed #1259

Triggered via pull request October 31, 2023 15:54
Status Failure
Total duration 2m 42s
Artifacts

tox_checks.yml

on: pull_request
Matrix: lint
Fit to window
Zoom out
Zoom in

Annotations

38 errors and 3 warnings
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_invest_optimize_all_technologies_using_mp_and_tsam.py#L304
# initialise the operational model om = solph.Model(energysystem) # if tee_switch is true solver messages will be displayed logging.info("Solve the optimization problem") - om.write('my_model.lp', io_options={'symbolic_solver_labels': True}) + om.write("my_model.lp", io_options={"symbolic_solver_labels": True}) om.solve(solver="cbc", solve_kwargs={"tee": True}) ########################################################################## # Check and plot the results ##########################################################################
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py#L88
from oemof.tools import economics from oemof.tools import logger from oemof.solph import views from oemof import solph -def check_equal_timesteps_after_aggregation(hours_per_period : int, - hours_of_input_time_series: int, - periods_total_occurrence: list): - + + +def check_equal_timesteps_after_aggregation( + hours_per_period: int, + hours_of_input_time_series: int, + periods_total_occurrence: list, +): if not sum(periods_total_occurrence) * hours_per_period == 8760: - #todo: prints can be deleted in future - print("aggregated timeseries has: " + str(int(sum(periods_total_occurrence) * hours_per_period)) + " timesteps") - print("unaggregated timeseries has: " + str(hours_of_input_time_series) + " timesteps") - print("therefore the occurrence of the typical periods for the objective weighting will be customized") - customize_factor = hours_of_input_time_series / int(sum(periods_total_occurrence) * hours_per_period) - result_list = [float(occurrence) * customize_factor for occurrence in periods_total_occurrence] + # todo: prints can be deleted in future + print( + "aggregated timeseries has: " + + str(int(sum(periods_total_occurrence) * hours_per_period)) + + " timesteps" + ) + print( + "unaggregated timeseries has: " + + str(hours_of_input_time_series) + + " timesteps" + ) + print( + "therefore the occurrence of the typical periods for the objective weighting will be customized" + ) + customize_factor = hours_of_input_time_series / int( + sum(periods_total_occurrence) * hours_per_period + ) + result_list = [ + float(occurrence) * customize_factor + for occurrence in periods_total_occurrence + ] periods_total_occurrence = result_list return periods_total_occurrence else: return periods_total_occurrence -def set_aggregated_timeseries_and_objective_weighting(segmentation, - periods_total_occurrence, - aggregated_period_dict, - first_time_stamp): + +def set_aggregated_timeseries_and_objective_weighting( + segmentation, + periods_total_occurrence, + aggregated_period_dict, + first_time_stamp, +): previous_period = 0 objective_weighting = [] aggregated_time_series = [] - current_timestamp=first_time_stamp + current_timestamp = first_time_stamp if segmentation: - for period, timestep, segmented_timestep in aggregated_period_dict.index: + for ( + period, + timestep, + segmented_timestep, + ) in aggregated_period_dict.index: if previous_period == period: aggregated_time_series.append(current_timestamp) else: aggregated_time_series.append(current_timestamp) previous_period = period - objective_weighting.append(periods_total_occurrence[period] * segmented_timestep) + objective_weighting.append( + periods_total_occurrence[period] * segmented_timestep + ) current_timestamp += pd.Timedelta(minutes=60 * segmented_timestep) else: for period, timestep in aggregated_period_dict.index: if previous_period == period: aggregated_time_series.append(current_timestamp)
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py#L133
objective_weighting.append(periods_total_occurrence[period]) current_timestamp += pd.Timedelta(minutes=60) aggregated_time_series = pd.DatetimeIndex(aggregated_time_series) return aggregated_time_series, objective_weighting + def main(): # Read data file filename = os.path.join(os.getcwd(), "storage_investment.csv") try: data = pd.read_csv(filename)
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py#L170
typical_periods = 40 hours_per_period = 24 segmentation = False if segmentation: print("segmentation hasn't been added so far") - else: aggregation1 = tsam.TimeSeriesAggregation( timeSeries=data.iloc[:8760], noTypicalPeriods=typical_periods,
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py#L201
aggregation1.createTypicalPeriods() aggregation2.createTypicalPeriods() periods_total_occurrence1 = [ - (aggregation1.clusterOrder == typical_period_name).sum() for typical_period_name in - aggregation1.clusterPeriodIdx] + (aggregation1.clusterOrder == typical_period_name).sum() + for typical_period_name in aggregation1.clusterPeriodIdx + ] periods_total_occurrence2 = [ - (aggregation2.clusterOrder == typical_period_name).sum() for typical_period_name in - aggregation2.clusterPeriodIdx] - periods_total_occurrence1 = check_equal_timesteps_after_aggregation(hours_per_period=hours_per_period, - hours_of_input_time_series=t1.__len__(), - periods_total_occurrence=periods_total_occurrence1 - ) - periods_total_occurrence2 = check_equal_timesteps_after_aggregation(hours_per_period = hours_per_period, - hours_of_input_time_series = t2.__len__(), - periods_total_occurrence=periods_total_occurrence2 - ) - #before timeseries generation was based on freq="H" (hourly), now you have to set the number of minutes of one timestep - t1_agg, objective_weighting1 = set_aggregated_timeseries_and_objective_weighting(segmentation=segmentation, - periods_total_occurrence = periods_total_occurrence1, - aggregated_period_dict=pd.DataFrame.from_dict(aggregation1.clusterPeriodDict), - first_time_stamp=pd.to_datetime(t1[0]) - ) - t2_agg, objective_weighting2 = set_aggregated_timeseries_and_objective_weighting(segmentation=segmentation, - periods_total_occurrence = periods_total_occurrence2, - aggregated_period_dict=pd.DataFrame.from_dict(aggregation2.clusterPeriodDict), - first_time_stamp=pd.to_datetime(t2[0]) - ) + (aggregation2.clusterOrder == typical_period_name).sum() + for typical_period_name in aggregation2.clusterPeriodIdx + ] + periods_total_occurrence1 = check_equal_timesteps_after_aggregation( + hours_per_period=hours_per_period, + hours_of_input_time_series=t1.__len__(), + periods_total_occurrence=periods_total_occurrence1, + ) + periods_total_occurrence2 = check_equal_timesteps_after_aggregation( + hours_per_period=hours_per_period, + hours_of_input_time_series=t2.__len__(), + periods_total_occurrence=periods_total_occurrence2, + ) + # before timeseries generation was based on freq="H" (hourly), now you have to set the number of minutes of one timestep + ( + t1_agg, + objective_weighting1, + ) = set_aggregated_timeseries_and_objective_weighting( + segmentation=segmentation, + periods_total_occurrence=periods_total_occurrence1, + aggregated_period_dict=pd.DataFrame.from_dict( + aggregation1.clusterPeriodDict + ), + first_time_stamp=pd.to_datetime(t1[0]), + ) + ( + t2_agg, + objective_weighting2, + ) = set_aggregated_timeseries_and_objective_weighting( + segmentation=segmentation, + periods_total_occurrence=periods_total_occurrence2, + aggregated_period_dict=pd.DataFrame.from_dict( + aggregation2.clusterPeriodDict + ), + first_time_stamp=pd.to_datetime(t2[0]), + ) objective_weighting = objective_weighting1 + objective_weighting2 - t2_agg = t2_agg.append(pd.DatetimeIndex([t2_agg[-1] + pd.DateOffset(hours=1)])) + t2_agg = t2_agg.append( + pd.DatetimeIndex([t2_agg[-1] + pd.DateOffset(hours=1)]) + ) tindex_agg = t1_agg.append(t2_agg) energysystem = solph.EnergySystem( timeindex=tindex_agg, periods=[t1_agg, t2_agg],
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py#L373
########################################################################## logging.info("Optimise the energy system") # initialise the operational model - om = solph.Model(energysystem, - objective_weighting= objective_weighting - ) + om = solph.Model(energysystem, objective_weighting=objective_weighting) # if tee_switch is true solver messages will be displayed logging.info("Solve the optimization problem") om.solve(solver="cbc", solve_kwargs={"tee": True})
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py#L402
meta_results = solph.processing.meta_results(om) pp.pprint(meta_results) fig, ax = plt.subplots(figsize=(10, 5)) - storage_results = results[(storage, None)]["sequences"] / storage.nominal_storage_capacity - storage_results .plot( - ax=ax, kind="line", drawstyle="steps-post" - ) + storage_results = ( + results[(storage, None)]["sequences"] + / storage.nominal_storage_capacity + ) + storage_results.plot(ax=ax, kind="line", drawstyle="steps-post") plt.show() my_results = electricity_bus["period_scalars"] # installed capacity of storage in GWh
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py#L110
data["pv"].iloc[8760 - 24 : 8760] = 0 data["pv"].iloc[8760 * 2 - 24 : 8760] = 0 # add a season without electricity production to simulate the possible advantage using a seasonal storages # for the first perido - data["wind"].iloc[2920: 2 * 2920 + 1] = 0 - data["pv"].iloc[2920:2 * 2920 + 1] = 0 + data["wind"].iloc[2920 : 2 * 2920 + 1] = 0 + data["pv"].iloc[2920 : 2 * 2920 + 1] = 0 ########################################################################## # Initialize the energy system and read/calculate necessary parameters ########################################################################## logger.define_logging() logging.info("Initialize the energy system") - #todo: right now, tsam only determines the timeincrement right, when you pick the - #first periods last timestamp next to the second periods first timestep - #2022-31-12-23:00 --> 2023-01-01-00:00 , than timeincrement in between is equal to 1 - #todo add initial storage level in new periods is equal to zero? + # todo: right now, tsam only determines the timeincrement right, when you pick the + # first periods last timestamp next to the second periods first timestep + # 2022-31-12-23:00 --> 2023-01-01-00:00 , than timeincrement in between is equal to 1 + # todo add initial storage level in new periods is equal to zero? t1 = pd.date_range("2022-01-01", periods=8760, freq="H") t2 = pd.date_range("2023-01-01", periods=8760, freq="H") tindex = t1.append(t2) data.index = tindex
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py#L135
typical_periods = 10 hours_per_period = 24 segmentation = False if segmentation: print("segmentation hasn't been added so far") - else: aggregation1 = tsam.TimeSeriesAggregation( timeSeries=data.iloc[:8760], noTypicalPeriods=typical_periods,
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py#L177
# Create oemof objects ########################################################################## logging.info("Create oemof objects") - # create electricity bus bel = solph.Bus(label="electricity") - energysystem.add( bel) + energysystem.add(bel) # create excess component for the electricity bus to allow overproduction excess = solph.components.Sink( label="excess_bel", inputs={bel: solph.Flow()} ) # create source object representing the gas commodity (annual limit) elect_resource = solph.components.Source( - label="electricity_source", outputs={bel: solph.Flow(variable_costs=electricity_price)} + label="electricity_source", + outputs={bel: solph.Flow(variable_costs=electricity_price)}, ) wind_profile = pd.concat( [ aggregation1.typicalPeriods["wind"],
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py#L205
wind_profile.iloc[-24:] = 0 # create fixed source object representing wind power plants wind = solph.components.Source( label="wind", - outputs={ - bel: solph.Flow( - fix=wind_profile, - nominal_value=1500000 - ) - }, + outputs={bel: solph.Flow(fix=wind_profile, nominal_value=1500000)}, ) pv_profile = pd.concat( - [aggregation1.typicalPeriods["pv"], - aggregation2.typicalPeriods["pv"] - ], + [aggregation1.typicalPeriods["pv"], aggregation2.typicalPeriods["pv"]], ignore_index=True, ) pv_profile.iloc[-24:] = 0 # create fixed source object representing pv power plants
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py#L234
aggregation1.typicalPeriods["pv"], aggregation2.typicalPeriods["pv"], ], ignore_index=True, ), - nominal_value=900000 + nominal_value=900000, ) }, ) # create simple sink object representing the electrical demand
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py#L279
########################################################################## logging.info("Optimise the energy system") # initialise the operational model - om = solph.Model(energysystem - ) + om = solph.Model(energysystem) # if tee_switch is true solver messages will be displayed logging.info("Solve the optimization problem") om.solve(solver="cbc", solve_kwargs={"tee": True})
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py#L307
meta_results = solph.processing.meta_results(om) pp.pprint(meta_results) fig, ax = plt.subplots(figsize=(10, 5)) - storage_results = results[(storage, None)]["sequences"] / storage.nominal_storage_capacity - storage_results .plot( - ax=ax, kind="line", drawstyle="steps-post" - ) + storage_results = ( + results[(storage, None)]["sequences"] + / storage.nominal_storage_capacity + ) + storage_results.plot(ax=ax, kind="line", drawstyle="steps-post") plt.show() fig, ax = plt.subplots(figsize=(10, 5)) storage_results = results[(wind, bel)]["sequences"] - storage_results .plot( - ax=ax, kind="line", drawstyle="steps-post" - ) + storage_results.plot(ax=ax, kind="line", drawstyle="steps-post") ax.set_title("Elect. from Wind") plt.show() if False: fig, ax = plt.subplots(figsize=(10, 5)) storage_results = results[(pv, bel)]["sequences"] - storage_results .plot( - ax=ax, kind="line", drawstyle="steps-post" - ) + storage_results.plot(ax=ax, kind="line", drawstyle="steps-post") ax.set_title("Elect. from PV") plt.show() fig, ax = plt.subplots(figsize=(10, 5)) storage_results = results[(bel, demand)]["sequences"] - storage_results .plot( - ax=ax, kind="line", drawstyle="steps-post" - ) + storage_results.plot(ax=ax, kind="line", drawstyle="steps-post") ax.set_title("Demand") plt.show() fig, ax = plt.subplots(figsize=(10, 5)) storage_results = results[(elect_resource, bel)]["sequences"] - storage_results .plot( - ax=ax, kind="line", drawstyle="steps-post" - ) + storage_results.plot(ax=ax, kind="line", drawstyle="steps-post") ax.set_title("Elect. from Grid") plt.show() my_results = electricity_bus["period_scalars"] - pp.pprint(my_results) if __name__ == "__main__": main()
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L88
from oemof.tools import economics from oemof.tools import logger from oemof.solph import views from oemof import solph -def check_equal_timesteps_after_aggregation(hours_per_period : int, - hours_of_input_time_series: int, - periods_total_occurrence: list): - + + +def check_equal_timesteps_after_aggregation( + hours_per_period: int, + hours_of_input_time_series: int, + periods_total_occurrence: list, +): if not sum(periods_total_occurrence) * hours_per_period == 8760: - #todo: prints can be deleted in future - print("aggregated timeseries has: " + str(int(sum(periods_total_occurrence) * hours_per_period)) + " timesteps") - print("unaggregated timeseries has: " + str(hours_of_input_time_series) + " timesteps") - print("therefore the occurrence of the typical periods for the objective weighting will be customized") - customize_factor = hours_of_input_time_series / int(sum(periods_total_occurrence) * hours_per_period) - result_list = [float(occurrence) * customize_factor for occurrence in periods_total_occurrence] + # todo: prints can be deleted in future + print( + "aggregated timeseries has: " + + str(int(sum(periods_total_occurrence) * hours_per_period)) + + " timesteps" + ) + print( + "unaggregated timeseries has: " + + str(hours_of_input_time_series) + + " timesteps" + ) + print( + "therefore the occurrence of the typical periods for the objective weighting will be customized" + ) + customize_factor = hours_of_input_time_series / int( + sum(periods_total_occurrence) * hours_per_period + ) + result_list = [ + float(occurrence) * customize_factor + for occurrence in periods_total_occurrence + ] periods_total_occurrence = result_list return periods_total_occurrence else: return periods_total_occurrence -def set_aggregated_timeseries_and_objective_weighting(segmentation, - periods_total_occurrence, - aggregated_period_dict, - first_time_stamp): + +def set_aggregated_timeseries_and_objective_weighting( + segmentation, + periods_total_occurrence, + aggregated_period_dict, + first_time_stamp, +): previous_period = 0 objective_weighting = [] aggregated_time_series = [] - current_timestamp=first_time_stamp + current_timestamp = first_time_stamp if segmentation: - for period, timestep, segmented_timestep in aggregated_period_dict.index: + for ( + period, + timestep, + segmented_timestep, + ) in aggregated_period_dict.index: if previous_period == period: aggregated_time_series.append(current_timestamp) else: aggregated_time_series.append(current_timestamp) previous_period = period - objective_weighting.append(periods_total_occurrence[period] * segmented_timestep) + objective_weighting.append( + periods_total_occurrence[period] * segmented_timestep + ) current_timestamp += pd.Timedelta(minutes=60 * segmented_timestep) else: for period, timestep in aggregated_period_dict.index: if previous_period == period: aggregated_time_series.append(current_timestamp) else: aggregated_time_series.append(current_timestamp) previous_period = period objective_weighting.append(periods_total_occurrence[period]) current_timestamp += pd.Timedelta(minutes=60) - #time series have to be extended by one, to fit into form of energysystem iput + # time series have to be extended by one, to fit into form of energysystem iput aggregated_time_series.append(current_timestamp) aggregated_time_series = pd.DatetimeIndex(aggregated_time_series) return aggregated_time_series, objective_weighting + def main(): # Read data file filename = os.path.join(os.getcwd(), "storage_investment.csv") try:
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L154
data["wind"].iloc[8760 * 2 - 24 : 8760] = 0 data["pv"].iloc[8760 - 24 : 8760] = 0 data["pv"].iloc[8760 * 2 - 24 : 8760] = 0 # add a season without electricity production to simulate the possible advantage using a seasonal storages - data["wind"].iloc[2920 * 4:5 * 2920 + 1] = 0 - data["wind"].iloc[2920: 2 * 2920 + 1] = 0 - data["pv"].iloc[2920:2 * 2920 + 1] = 0 - data["pv"].iloc[2920 * 4:5 * 2920 + 1] = 0 + data["wind"].iloc[2920 * 4 : 5 * 2920 + 1] = 0 + data["wind"].iloc[2920 : 2 * 2920 + 1] = 0 + data["pv"].iloc[2920 : 2 * 2920 + 1] = 0 + data["pv"].iloc[2920 * 4 : 5 * 2920 + 1] = 0 ########################################################################## # Initialize the energy system and read/calculate necessary parameters ##########################################################################
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L178
typical_periods = 60 hours_per_period = 24 segmentation = False if segmentation: print("segmentation hasn't been added so far") - else: aggregation1 = tsam.TimeSeriesAggregation( timeSeries=data.iloc[:8760], noTypicalPeriods=typical_periods,
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L205
extremePeriodMethod="replace_cluster_center", addPeakMin=["wind", "pv"], representationMethod="durationRepresentation", ) - aggregation1.createTypicalPeriods() aggregation2.createTypicalPeriods() if False: periods_total_occurrence1 = [ - (aggregation1.clusterOrder == typical_period_name).sum() for typical_period_name in - aggregation1.clusterPeriodIdx] + (aggregation1.clusterOrder == typical_period_name).sum() + for typical_period_name in aggregation1.clusterPeriodIdx + ] periods_total_occurrence2 = [ - (aggregation2.clusterOrder == typical_period_name).sum() for typical_period_name in - aggregation2.clusterPeriodIdx] + (aggregation2.clusterOrder == typical_period_name).sum() + for typical_period_name in aggregation2.clusterPeriodIdx + ] else: periods_total_occurrence1 = aggregation1.clusterPeriodNoOccur periods_total_occurrence2 = aggregation1.clusterPeriodNoOccur - periods_total_occurrence1 = check_equal_timesteps_after_aggregation(hours_per_period=hours_per_period, - hours_of_input_time_series=t1.__len__(), - periods_total_occurrence=periods_total_occurrence1 - ) - periods_total_occurrence2 = check_equal_timesteps_after_aggregation(hours_per_period = hours_per_period, - hours_of_input_time_series = t2.__len__(), - periods_total_occurrence=periods_total_occurrence2 - ) - #before timeseries generation was based on freq="H" (hourly), now you have to set the number of minutes of one timestep - t1_agg, objective_weighting1 = set_aggregated_timeseries_and_objective_weighting(segmentation=segmentation, - periods_total_occurrence = periods_total_occurrence1, - aggregated_period_dict=pd.DataFrame.from_dict(aggregation1.clusterPeriodDict), - first_time_stamp=pd.to_datetime(t1[0]) - ) - t2_agg, objective_weighting2 = set_aggregated_timeseries_and_objective_weighting(segmentation=segmentation, - periods_total_occurrence = periods_total_occurrence2, - aggregated_period_dict=pd.DataFrame.from_dict(aggregation2.clusterPeriodDict), - first_time_stamp=pd.to_datetime(t1[0]) - ) - #objective_weighting = objective_weighting1 + objective_weighting2 + periods_total_occurrence1 = check_equal_timesteps_after_aggregation( + hours_per_period=hours_per_period, + hours_of_input_time_series=t1.__len__(), + periods_total_occurrence=periods_total_occurrence1, + ) + periods_total_occurrence2 = check_equal_timesteps_after_aggregation( + hours_per_period=hours_per_period, + hours_of_input_time_series=t2.__len__(), + periods_total_occurrence=periods_total_occurrence2, + ) + # before timeseries generation was based on freq="H" (hourly), now you have to set the number of minutes of one timestep + ( + t1_agg, + objective_weighting1, + ) = set_aggregated_timeseries_and_objective_weighting( + segmentation=segmentation, + periods_total_occurrence=periods_total_occurrence1, + aggregated_period_dict=pd.DataFrame.from_dict( + aggregation1.clusterPeriodDict + ), + first_time_stamp=pd.to_datetime(t1[0]), + ) + ( + t2_agg, + objective_weighting2, + ) = set_aggregated_timeseries_and_objective_weighting( + segmentation=segmentation, + periods_total_occurrence=periods_total_occurrence2, + aggregated_period_dict=pd.DataFrame.from_dict( + aggregation2.clusterPeriodDict + ), + first_time_stamp=pd.to_datetime(t1[0]), + ) + # objective_weighting = objective_weighting1 + objective_weighting2 objective_weighting = objective_weighting1 - #tindex_agg = t1_agg.append(t2_agg) + # tindex_agg = t1_agg.append(t2_agg) tindex_agg = t1_agg - #todo aggregation1.clusterPeriodNoOccur besser zum objective weighting nutzen + # todo aggregation1.clusterPeriodNoOccur besser zum objective weighting nutzen energysystem = solph.EnergySystem( timeindex=tindex_agg, - #timeincrement=[1] * len(tindex_agg), - periods=[t1_agg, - #t2_agg - ], + # timeincrement=[1] * len(tindex_agg), + periods=[ + t1_agg, + # t2_agg + ], tsa_parameters=[ { "timesteps_per_period": aggregation1.hoursPerPeriod, "order": aggregation1.clusterOrder, "occurrences": aggregation1.clusterPeriodNoOccur,
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L269
# Create oemof objects ########################################################################## logging.info("Create oemof objects") - # create electricity bus bel = solph.Bus(label="electricity") - energysystem.add( bel) + energysystem.add(bel) # create excess component for the electricity bus to allow overproduction excess = solph.components.Sink( label="excess_bel", inputs={bel: solph.Flow()} ) # create source object representing the gas commodity (annual limit) elect_resource = solph.components.Source( - label="electricity_source", outputs={bel: solph.Flow(variable_costs=electricity_price)} + label="electricity_source", + outputs={bel: solph.Flow(variable_costs=electricity_price)}, ) wind_profile = pd.concat( [ aggregation1.typicalPeriods["wind"], - #aggregation2.typicalPeriods["wind"], + # aggregation2.typicalPeriods["wind"], ], ignore_index=True, ) wind_profile.iloc[-24:] = 0 # create fixed source object representing wind power plants wind = solph.components.Source( label="wind", - outputs={ - bel: solph.Flow( - fix=wind_profile, - nominal_value=1500000 - ) - }, + outputs={bel: solph.Flow(fix=wind_profile, nominal_value=1500000)}, ) pv_profile = pd.concat( - [aggregation1.typicalPeriods["pv"], - #aggregation2.typicalPeriods["pv"] - ], + [ + aggregation1.typicalPeriods["pv"], + # aggregation2.typicalPeriods["pv"] + ], ignore_index=True, ) pv_profile.iloc[-24:] = 0 # create fixed source object representing pv power plants
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L321
outputs={ bel: solph.Flow( fix=pd.concat( [ aggregation1.typicalPeriods["pv"], - #aggregation2.typicalPeriods["pv"], + # aggregation2.typicalPeriods["pv"], ], ignore_index=True, ), - nominal_value=900000 + nominal_value=900000, ) }, ) # create simple sink object representing the electrical demand
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L338
inputs={ bel: solph.Flow( fix=pd.concat( [ aggregation1.typicalPeriods["demand_el"], - #aggregation2.typicalPeriods["demand_el"], + # aggregation2.typicalPeriods["demand_el"], ], ignore_index=True, ), nominal_value=0.05, )
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L368
########################################################################## logging.info("Optimise the energy system") # initialise the operational model - om = solph.Model(energysystem - ) + om = solph.Model(energysystem) # if tee_switch is true solver messages will be displayed logging.info("Solve the optimization problem") om.solve(solver="cbc", solve_kwargs={"tee": True})
/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L396
meta_results = solph.processing.meta_results(om) pp.pprint(meta_results) fig, ax = plt.subplots(figsize=(10, 5)) - storage_results = results[(storage, None)]["sequences"] / storage.nominal_storage_capacity - storage_results .plot( - ax=ax, kind="line", drawstyle="steps-post" - ) + storage_results = ( + results[(storage, None)]["sequences"] + / storage.nominal_storage_capacity + ) + storage_results.plot(ax=ax, kind="line", drawstyle="steps-post") plt.show() fig, ax = plt.subplots(figsize=(10, 5)) storage_results = results[(wind, bel)]["sequences"] - storage_results .plot( - ax=ax, kind="line", drawstyle="steps-post" - ) + storage_results.plot(ax=ax, kind="line", drawstyle="steps-post") plt.show() fig, ax = plt.subplots(figsize=(10, 5)) storage_results = results[(pv, bel)]["sequences"] - storage_results .plot( - ax=ax, kind="line", drawstyle="steps-post" - ) + storage_results.plot(ax=ax, kind="line", drawstyle="steps-post") plt.show() fig, ax = plt.subplots(figsize=(10, 5)) storage_results = results[(bel, demand)]["sequences"] - storage_results .plot( - ax=ax, kind="line", drawstyle="steps-post" - ) + storage_results.plot(ax=ax, kind="line", drawstyle="steps-post") plt.show() my_results = electricity_bus["period_scalars"] # installed capacity of storage in GWh my_results["storage_invest_GWh"] = (
/home/runner/work/oemof-solph/oemof-solph/examples/tsam/basic_tsam_example.py#L64
from oemof.solph import helpers from oemof.solph import processing from oemof.solph import views from oemof.solph._helpers import aggregate_time_series + def main(): # ************************************************************************* # ********** PART 1 - Define and optimise the energy system *************** # *************************************************************************
/home/runner/work/oemof-solph/oemof-solph/examples/tsam/basic_tsam_example.py#L82
data = pd.DataFrame({"pv": [0.3], "wind": [0.6], "demand_el": [500]}) # First there need to be a pre-processing, because our aggregate_time_series function only # works with timeseries. df = pd.DataFrame(data) - start_time = pd.Timestamp.now().replace(year=2023, month=1, day=1,hour=0, minute=0, second=0, microsecond=0) + start_time = pd.Timestamp.now().replace( + year=2023, month=1, day=1, hour=0, minute=0, second=0, microsecond=0 + ) df["datetime"] = start_time + pd.to_timedelta(df["timestep"] - 1, unit="H") df.set_index("datetime", inplace=True) - data_dict_to_aggregate = {"wind" : {"timeseries" : df["wind"], "weighted_factor" : 1}, - "pv" : {"timeseries" : df["pv"], "weighted_factor" : 1}, - "demand_el" : {"timeseries" : df["demand_el"], "weighted_factor" : 1}, - } - - number_of_time_steps_per_periods =24 - number_of_segments_per_period = 24*4 + data_dict_to_aggregate = { + "wind": {"timeseries": df["wind"], "weighted_factor": 1}, + "pv": {"timeseries": df["pv"], "weighted_factor": 1}, + "demand_el": {"timeseries": df["demand_el"], "weighted_factor": 1}, + } + + number_of_time_steps_per_periods = 24 + number_of_segments_per_period = 24 * 4 number_of_typical_periods = 12 segmentation = False if False: - determine_aggregation_parameters(data_dict_to_aggregate = data_dict_to_aggregate, - number_of_time_steps_per_periods = number_of_time_steps_per_periods, - segmentation = False) + determine_aggregation_parameters( + data_dict_to_aggregate=data_dict_to_aggregate, + number_of_time_steps_per_periods=number_of_time_steps_per_periods, + segmentation=False, + ) if segmentation: - data_dict_aggregated, objective_weighting, clusterClass = aggregate_time_series(data_dict_to_aggregate = data_dict_to_aggregate, - number_of_typical_periods = number_of_typical_periods, - number_of_time_steps_per_periods = number_of_time_steps_per_periods, - number_of_segments_per_period= number_of_segments_per_period, - segmentation = segmentation + ( + data_dict_aggregated, + objective_weighting, + clusterClass, + ) = aggregate_time_series( + data_dict_to_aggregate=data_dict_to_aggregate, + number_of_typical_periods=number_of_typical_periods, + number_of_time_steps_per_periods=number_of_time_steps_per_periods, + number_of_segments_per_period=number_of_segments_per_period, + segmentation=segmentation, ) else: - data_dict_aggregated, objective_weighting, clusterClass = aggregate_time_series(data_dict_to_aggregate = data_dict_to_aggregate, - number_of_typical_periods = number_of_typical_periods, - number_of_time_steps_per_periods = number_of_time_steps_per_periods, - segmentation = segmentation + ( + data_dict_aggregated, + objective_weighting, + clusterClass, + ) = aggregate_time_series( + data_dict_to_aggregate=data_dict_to_aggregate, + number_of_typical_periods=number_of_typical_periods, + number_of_time_steps_per_periods=number_of_time_steps_per_periods, + segmentation=segmentation, ) solver = "cbc" # 'glpk', 'gurobi',.... debug = False # Set number_of_timesteps to 3 to get a readable lp-file. solver_verbose = False # show/hide solver output
/home/runner/work/oemof-solph/oemof-solph/examples/tsam/basic_tsam_example.py#L130
date_time_index = data_dict_aggregated["demand_el"]["timeseries"].index energysystem = EnergySystem( timeindex=date_time_index, infer_last_interval=False, - ) ########################################################################## # Create oemof object ##########################################################################
/home/runner/work/oemof-solph/oemof-solph/examples/tsam/basic_tsam_example.py#L166
# create fixed source object representing wind power plants energysystem.add( cmp.Source( label="wind", - outputs={bel: flows.Flow(fix=data_dict_aggregated["wind"]["timeseries"], - - nominal_value=1000000)}, + outputs={ + bel: flows.Flow( + fix=data_dict_aggregated["wind"]["timeseries"], + nominal_value=1000000, + ) + }, ) ) # create fixed source object representing pv power plants energysystem.add( cmp.Source( label="pv", - outputs={bel: flows.Flow(fix=data_dict_aggregated["pv"]["timeseries"], nominal_value=582000)}, + outputs={ + bel: flows.Flow( + fix=data_dict_aggregated["pv"]["timeseries"], + nominal_value=582000, + ) + }, ) ) # create simple sink object representing the electrical demand energysystem.add( cmp.Sink( label="demand", - inputs={bel: flows.Flow(fix=data_dict_aggregated["demand_el"]["timeseries"], nominal_value=1)}, + inputs={ + bel: flows.Flow( + fix=data_dict_aggregated["demand_el"]["timeseries"], + nominal_value=1, + ) + }, ) ) # create simple transformer object representing a gas power plant energysystem.add(
/home/runner/work/oemof-solph/oemof-solph/examples/tsam/basic_tsam_example.py#L205
########################################################################## logging.info("Optimise the energy system") # initialise the operational model - model = Model(energysystem, objective_weighting= objective_weighting) + model = Model(energysystem, objective_weighting=objective_weighting) # This is for debugging only. It is not(!) necessary to solve the problem # and should be set to False to save time and disc space in normal use. For # debugging the timesteps should be set to 3, to increase the readability # of the lp-file.
/home/runner/work/oemof-solph/oemof-solph/examples/tsam/basic_tsam_example.py#L263
# print the sums of the flows around the electricity bus print("********* Main results *********") print(electricity_bus["sequences"].sum(axis=0)) + if __name__ == "__main__": main() - -
/home/runner/work/oemof-solph/oemof-solph/src/oemof/solph/_helpers.py#L23
from tsam.hyperparametertuning import HyperTunedAggregations from typing import Dict, List import copy import pandas as pd + + def check_node_object_for_missing_attribute(obj, attribute): """Raises a predefined warning if object does not have attribute. Arguments ---------
/home/runner/work/oemof-solph/oemof-solph/src/oemof/solph/_helpers.py#L49
warn( msg.format(attribute, obj.label, type(obj)), debugging.SuspiciousUsageWarning, ) + def aggregate_time_series( data_dict_to_aggregate: Dict[str, Dict[str, any]], number_of_typical_periods: int = 7, - number_of_time_steps_per_periods : int = 24, - number_of_segments_per_period :int = 16, - segmentation : bool = False, - cluster_method : str = 'hierarchical', - sort_values : bool = True, - minutes_per_time_step : int = 60, - ): + number_of_time_steps_per_periods: int = 24, + number_of_segments_per_period: int = 16, + segmentation: bool = False, + cluster_method: str = "hierarchical", + sort_values: bool = True, + minutes_per_time_step: int = 60, +): """Aggregate timeseries with the tsam-package. Firstly controls format of input data. Secondly does time series aggregation with tsam. Thirdly changes datetime index of dataframe of results.
/home/runner/work/oemof-solph/oemof-solph/src/oemof/solph/_helpers.py#L88
sort_values : (bool) #todo understand what this variable does """ entry_format_timeseries = None for entry_name, entry_data in data_dict_to_aggregate.items(): if not isinstance(entry_data, dict): - raise ValueError(f"Entry '{entry_name}' should have a dictionary as its value.") + raise ValueError( + f"Entry '{entry_name}' should have a dictionary as its value." + ) required_keys = ["timeseries", "weighted_factor"] missing_keys = [key for key in required_keys if key not in entry_data] if entry_format_timeseries is None: entry_format_timeseries = entry_data["timeseries"].index else: - if not entry_format_timeseries.equals(entry_data["timeseries"].index): - raise ValueError(f"TimeSeries Format of at least'{entry_name}' is unequal to: {previous_entry_name}") + if not entry_format_timeseries.equals( + entry_data["timeseries"].index + ): + raise ValueError( + f"TimeSeries Format of at least'{entry_name}' is unequal to: {previous_entry_name}" + ) previous_entry_name = entry_name if missing_keys: - raise ValueError(f"Entry '{entry_name}' is missing the following keys: {', '.join(missing_keys)}") + raise ValueError( + f"Entry '{entry_name}' is missing the following keys: {', '.join(missing_keys)}" + ) if not isinstance(entry_data["timeseries"], pd.Series): - raise ValueError(f"Timeseries for entry '{entry_name}' should be a pd.Series.") - - if not all(isinstance(timestamp, (int, float)) for timestamp in entry_data["timeseries"]): - raise ValueError(f"Timeseries for entry '{entry_name}' should contain only numeric timestamps.") + raise ValueError( + f"Timeseries for entry '{entry_name}' should be a pd.Series." + ) + + if not all( + isinstance(timestamp, (int, float)) + for timestamp in entry_data["timeseries"] + ): + raise ValueError( + f"Timeseries for entry '{entry_name}' should contain only numeric timestamps." + ) if not isinstance(entry_data["weighted_factor"], (float, int)): - raise ValueError(f"Weighted factor for entry '{entry_name}' should be a float.") + raise ValueError( + f"Weighted factor for entry '{entry_name}' should be a float." + ) if segmentation: if number_of_segments_per_period > number_of_time_steps_per_periods: - ValueError(f"Number of segments per period equal to'{number_of_segments_per_period}' has to be smaller than number of time steps per periods equal to {number_of_time_steps_per_periods}") - - hours_per_period = number_of_time_steps_per_periods * minutes_per_time_step / 60 + ValueError( + f"Number of segments per period equal to'{number_of_segments_per_period}' has to be smaller than number of time steps per periods equal to {number_of_time_steps_per_periods}" + ) + + hours_per_period = ( + number_of_time_steps_per_periods * minutes_per_time_step / 60 + ) time_series_data = pd.DataFrame() weighted_factors_dict = {} for key, value in data_dict_to_aggregate.items(): - if 'timeseries' in value: - time_series_data[key] = value['timeseries'] - if 'weighted_factor' in value: - weighted_factors_dict[key] = value['weighted_factor'] + if "timeseries" in value: + time_series_data[key] = value["timeseries"] + if "weighted_factor" in value: + weighted_factors_dict[key] = value["weighted_factor"] if segmentation: clusterClass = TimeSeriesAggregation( timeSeries=time_series_data, noTypicalPeriods=number_of_typical_periods, segmentation=segmentation, noSegments=number_of_segments_per_period, hoursPerPeriod=hours_per_period, clusterMethod=cluster_method, sortValues=sort_values, - weightDict=weighted_factors_dict + weightDict=weighted_factors_dict, ) data = pd.DataFrame.from_dict(clusterClass.clusterPeriodDict) else: clusterClass = TimeSeriesAggregation( timeSeries=time_series_data,
/home/runner/work/oemof-solph/oemof-solph/src/oemof/solph/_helpers.py#L150
hours_of_time_series = entry_format_timeseries.__len__() periods_name = clusterClass.clusterPeriodIdx periods_order = clusterClass.clusterOrder periods_total_occurrence = [ - (periods_order == typical_period_name).sum() for typical_period_name in periods_name + (periods_order == typical_period_name).sum() + for typical_period_name in periods_name ] start_date = entry_format_timeseries[0] if not sum(periods_total_occurrence) * hours_per_period == 8760: - - print("aggregated timeseries has: " +str(int(sum(periods_total_occurrence) * hours_per_period))+ " timesteps") - print("unaggregated timeseries has: " +str(hours_of_time_series)+ " timesteps") - print("therefore the occurrence of the typical periods for the objective weighting will be customized") - customize_factor = hours_of_time_series / int(sum(periods_total_occurrence) * hours_per_period) - result_list = [float(occurrence) * customize_factor for occurrence in periods_total_occurrence] + print( + "aggregated timeseries has: " + + str(int(sum(periods_total_occurrence) * hours_per_period)) + + " timesteps" + ) + print( + "unaggregated timeseries has: " + + str(hours_of_time_series) + + " timesteps" + ) + print( + "therefore the occurrence of the typical periods for the objective weighting will be customized" + ) + customize_factor = hours_of_time_series / int( + sum(periods_total_occurrence) * hours_per_period + ) + result_list = [ + float(occurrence) * customize_factor + for occurrence in periods_total_occurrence + ] periods_total_occurrence = result_list - current_timestamp = pd.to_datetime(start_date) previous_period = 0 objective_weighting = [] extended_time_series = []
/home/runner/work/oemof-solph/oemof-solph/src/oemof/solph/_helpers.py#L175
if previous_period == period: extended_time_series.append(current_timestamp) else: extended_time_series.append(current_timestamp) previous_period = period - objective_weighting.append(periods_total_occurrence[period] * segmented_timestep) - current_timestamp += pd.Timedelta(minutes=minute_resolution_of_one_hour * segmented_timestep) + objective_weighting.append( + periods_total_occurrence[period] * segmented_timestep + ) + current_timestamp += pd.Timedelta( + minutes=minute_resolution_of_one_hour * segmented_timestep + ) else: for period, timestep in data.index: if previous_period == period: extended_time_series.append(current_timestamp) else: extended_time_series.append(current_timestamp) previous_period = period objective_weighting.append(periods_total_occurrence[period]) - current_timestamp += pd.Timedelta(minutes=minute_resolution_of_one_hour) + current_timestamp += pd.Timedelta( + minutes=minute_resolution_of_one_hour + ) data.index = extended_time_series data_dict_aggregated = {} for name in data: if len(data[name]) == len(objective_weighting): - data_dict_aggregated[name] = { "timeseries" : data[name]} + data_dict_aggregated[name] = {"timeseries": data[name]} else: - raise ValueError(f"Aggregated timeseries for: '{data[name]}' has a different length as " - f"objective weighting list") + raise ValueError( + f"Aggregated timeseries for: '{data[name]}' has a different length as " + f"objective weighting list" + ) return data_dict_aggregated, objective_weighting, clusterClass
/home/runner/work/oemof-solph/oemof-solph/tests/test_scripts/test_solph/test_tsam/test_storage_tsam_integration.py#L145
last_output = (100 * 1 / 0.8) / 0.99 init_soc = (first_input - last_output) / (1 / 0.99 + 0.99) def test_storage_input(): - assert flows["electricity-storage"][0] == pytest.approx((first_input - 0.99 * init_soc) / 0.9) + assert flows["electricity-storage"][0] == pytest.approx( + (first_input - 0.99 * init_soc) / 0.9 + ) assert flows["electricity-storage"][1] == 0 assert flows["electricity-storage"][2] == 0 assert flows["electricity-storage"][3] == 0 assert flows["electricity-storage"][4] == 0 assert flows["electricity-storage"][5] == 0
/home/runner/work/oemof-solph/oemof-solph/tests/test_scripts/test_solph/test_tsam/test_storage_invest_tsam_integration.py#L148
init_soc = 0 def test_storage_investment(): """Make sure that max SOC investment equals max load""" - assert results[storage, None]["period_scalars"]["invest"].iloc[0] == pytest.approx(first_input) + assert results[storage, None]["period_scalars"]["invest"].iloc[ + 0 + ] == pytest.approx(first_input) def test_storage_input(): - assert flows["electricity-storage"][0] == pytest.approx((first_input - 0.99 * init_soc) / 0.9) + assert flows["electricity-storage"][0] == pytest.approx( + (first_input - 0.99 * init_soc) / 0.9 + ) assert flows["electricity-storage"][1] == 0 assert flows["electricity-storage"][2] == 0 assert flows["electricity-storage"][3] == 0 assert flows["electricity-storage"][4] == 0 assert flows["electricity-storage"][5] == 0
docs
Process completed with exit code 2.
check
The operation was canceled.
clean
The following actions uses node12 which is deprecated and will be forced to run on node16: actions/checkout@v2, actions/setup-python@v2, actions/cache@v2. For more info: https://github.blog/changelog/2023-06-13-github-actions-all-actions-will-run-on-node16-instead-of-node12-by-default/
docs
The following actions uses node12 which is deprecated and will be forced to run on node16: actions/checkout@v2, actions/setup-python@v2, actions/cache@v2. For more info: https://github.blog/changelog/2023-06-13-github-actions-all-actions-will-run-on-node16-instead-of-node12-by-default/
check
The following actions uses node12 which is deprecated and will be forced to run on node16: actions/checkout@v2, actions/setup-python@v2, actions/cache@v2. For more info: https://github.blog/changelog/2023-06-13-github-actions-all-actions-will-run-on-node16-instead-of-node12-by-default/