Skip to content

Feature/integrate tsam seg objwei reviewed #1259

Feature/integrate tsam seg objwei reviewed

Feature/integrate tsam seg objwei reviewed #1259

GitHub Actions / Black failed Oct 31, 2023 in 0s

36 errors

Black found 36 errors

Annotations

Check failure on line 315 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_invest_optimize_all_technologies_using_mp_and_tsam.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_invest_optimize_all_technologies_using_mp_and_tsam.py#L304-L315

     # initialise the operational model
     om = solph.Model(energysystem)
 
     # if tee_switch is true solver messages will be displayed
     logging.info("Solve the optimization problem")
-    om.write('my_model.lp', io_options={'symbolic_solver_labels': True})
+    om.write("my_model.lp", io_options={"symbolic_solver_labels": True})
     om.solve(solver="cbc", solve_kwargs={"tee": True})
 
     ##########################################################################
     # Check and plot the results
     ##########################################################################

Check failure on line 130 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py#L88-L130

 from oemof.tools import economics
 from oemof.tools import logger
 from oemof.solph import views
 
 from oemof import solph
-def check_equal_timesteps_after_aggregation(hours_per_period : int,
-                                            hours_of_input_time_series: int,
-                                            periods_total_occurrence: list):
-
+
+
+def check_equal_timesteps_after_aggregation(
+    hours_per_period: int,
+    hours_of_input_time_series: int,
+    periods_total_occurrence: list,
+):
     if not sum(periods_total_occurrence) * hours_per_period == 8760:
-        #todo: prints can be deleted in future
-        print("aggregated timeseries has: " + str(int(sum(periods_total_occurrence) * hours_per_period)) + " timesteps")
-        print("unaggregated timeseries has: " + str(hours_of_input_time_series) + " timesteps")
-        print("therefore the occurrence of the typical periods for the objective weighting will be customized")
-        customize_factor = hours_of_input_time_series / int(sum(periods_total_occurrence) * hours_per_period)
-        result_list = [float(occurrence) * customize_factor for occurrence in periods_total_occurrence]
+        # todo: prints can be deleted in future
+        print(
+            "aggregated timeseries has: "
+            + str(int(sum(periods_total_occurrence) * hours_per_period))
+            + " timesteps"
+        )
+        print(
+            "unaggregated timeseries has: "
+            + str(hours_of_input_time_series)
+            + " timesteps"
+        )
+        print(
+            "therefore the occurrence of the typical periods for the objective weighting will be customized"
+        )
+        customize_factor = hours_of_input_time_series / int(
+            sum(periods_total_occurrence) * hours_per_period
+        )
+        result_list = [
+            float(occurrence) * customize_factor
+            for occurrence in periods_total_occurrence
+        ]
         periods_total_occurrence = result_list
         return periods_total_occurrence
     else:
         return periods_total_occurrence
 
-def set_aggregated_timeseries_and_objective_weighting(segmentation,
-                                                      periods_total_occurrence,
-                                                      aggregated_period_dict,
-                                                      first_time_stamp):
+
+def set_aggregated_timeseries_and_objective_weighting(
+    segmentation,
+    periods_total_occurrence,
+    aggregated_period_dict,
+    first_time_stamp,
+):
     previous_period = 0
     objective_weighting = []
     aggregated_time_series = []
-    current_timestamp=first_time_stamp
+    current_timestamp = first_time_stamp
     if segmentation:
-        for period, timestep, segmented_timestep in aggregated_period_dict.index:
+        for (
+            period,
+            timestep,
+            segmented_timestep,
+        ) in aggregated_period_dict.index:
             if previous_period == period:
                 aggregated_time_series.append(current_timestamp)
             else:
                 aggregated_time_series.append(current_timestamp)
                 previous_period = period
-            objective_weighting.append(periods_total_occurrence[period] * segmented_timestep)
+            objective_weighting.append(
+                periods_total_occurrence[period] * segmented_timestep
+            )
             current_timestamp += pd.Timedelta(minutes=60 * segmented_timestep)
     else:
         for period, timestep in aggregated_period_dict.index:
             if previous_period == period:
                 aggregated_time_series.append(current_timestamp)

Check failure on line 143 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py#L133-L143

             objective_weighting.append(periods_total_occurrence[period])
             current_timestamp += pd.Timedelta(minutes=60)
     aggregated_time_series = pd.DatetimeIndex(aggregated_time_series)
     return aggregated_time_series, objective_weighting
 
+
 def main():
     # Read data file
     filename = os.path.join(os.getcwd(), "storage_investment.csv")
     try:
         data = pd.read_csv(filename)

Check failure on line 181 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py#L170-L181

     typical_periods = 40
     hours_per_period = 24
     segmentation = False
     if segmentation:
         print("segmentation hasn't been added so far")
-
 
     else:
         aggregation1 = tsam.TimeSeriesAggregation(
             timeSeries=data.iloc[:8760],
             noTypicalPeriods=typical_periods,

Check failure on line 238 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py#L201-L238

 
     aggregation1.createTypicalPeriods()
     aggregation2.createTypicalPeriods()
 
     periods_total_occurrence1 = [
-        (aggregation1.clusterOrder == typical_period_name).sum() for typical_period_name in
-        aggregation1.clusterPeriodIdx]
+        (aggregation1.clusterOrder == typical_period_name).sum()
+        for typical_period_name in aggregation1.clusterPeriodIdx
+    ]
     periods_total_occurrence2 = [
-        (aggregation2.clusterOrder == typical_period_name).sum() for typical_period_name in
-        aggregation2.clusterPeriodIdx]
-    periods_total_occurrence1 = check_equal_timesteps_after_aggregation(hours_per_period=hours_per_period,
-                                            hours_of_input_time_series=t1.__len__(),
-                                            periods_total_occurrence=periods_total_occurrence1
-                                                                        )
-    periods_total_occurrence2 = check_equal_timesteps_after_aggregation(hours_per_period = hours_per_period,
-                                            hours_of_input_time_series = t2.__len__(),
-                                            periods_total_occurrence=periods_total_occurrence2
-                                                                        )
-    #before timeseries generation was based on freq="H" (hourly), now you have to set the number of minutes of one timestep
-    t1_agg, objective_weighting1 = set_aggregated_timeseries_and_objective_weighting(segmentation=segmentation,
-                                                      periods_total_occurrence = periods_total_occurrence1,
-                                                      aggregated_period_dict=pd.DataFrame.from_dict(aggregation1.clusterPeriodDict),
-                                                      first_time_stamp=pd.to_datetime(t1[0])
-                                                                                     )
-    t2_agg, objective_weighting2 = set_aggregated_timeseries_and_objective_weighting(segmentation=segmentation,
-                                                      periods_total_occurrence = periods_total_occurrence2,
-                                                      aggregated_period_dict=pd.DataFrame.from_dict(aggregation2.clusterPeriodDict),
-                                                      first_time_stamp=pd.to_datetime(t2[0])
-                                                                                     )
+        (aggregation2.clusterOrder == typical_period_name).sum()
+        for typical_period_name in aggregation2.clusterPeriodIdx
+    ]
+    periods_total_occurrence1 = check_equal_timesteps_after_aggregation(
+        hours_per_period=hours_per_period,
+        hours_of_input_time_series=t1.__len__(),
+        periods_total_occurrence=periods_total_occurrence1,
+    )
+    periods_total_occurrence2 = check_equal_timesteps_after_aggregation(
+        hours_per_period=hours_per_period,
+        hours_of_input_time_series=t2.__len__(),
+        periods_total_occurrence=periods_total_occurrence2,
+    )
+    # before timeseries generation was based on freq="H" (hourly), now you have to set the number of minutes of one timestep
+    (
+        t1_agg,
+        objective_weighting1,
+    ) = set_aggregated_timeseries_and_objective_weighting(
+        segmentation=segmentation,
+        periods_total_occurrence=periods_total_occurrence1,
+        aggregated_period_dict=pd.DataFrame.from_dict(
+            aggregation1.clusterPeriodDict
+        ),
+        first_time_stamp=pd.to_datetime(t1[0]),
+    )
+    (
+        t2_agg,
+        objective_weighting2,
+    ) = set_aggregated_timeseries_and_objective_weighting(
+        segmentation=segmentation,
+        periods_total_occurrence=periods_total_occurrence2,
+        aggregated_period_dict=pd.DataFrame.from_dict(
+            aggregation2.clusterPeriodDict
+        ),
+        first_time_stamp=pd.to_datetime(t2[0]),
+    )
     objective_weighting = objective_weighting1 + objective_weighting2
 
-    t2_agg = t2_agg.append(pd.DatetimeIndex([t2_agg[-1] + pd.DateOffset(hours=1)]))
+    t2_agg = t2_agg.append(
+        pd.DatetimeIndex([t2_agg[-1] + pd.DateOffset(hours=1)])
+    )
     tindex_agg = t1_agg.append(t2_agg)
 
     energysystem = solph.EnergySystem(
         timeindex=tindex_agg,
         periods=[t1_agg, t2_agg],

Check failure on line 386 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py#L373-L386

     ##########################################################################
 
     logging.info("Optimise the energy system")
 
     # initialise the operational model
-    om = solph.Model(energysystem,
-                     objective_weighting= objective_weighting
-                     )
+    om = solph.Model(energysystem, objective_weighting=objective_weighting)
 
     # if tee_switch is true solver messages will be displayed
     logging.info("Solve the optimization problem")
     om.solve(solver="cbc", solve_kwargs={"tee": True})
 

Check failure on line 416 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v5_invest_optimize_all_technologies_but_storage_using_mp_and_tsam.py#L402-L416

 
     meta_results = solph.processing.meta_results(om)
     pp.pprint(meta_results)
 
     fig, ax = plt.subplots(figsize=(10, 5))
-    storage_results = results[(storage, None)]["sequences"] / storage.nominal_storage_capacity
-    storage_results .plot(
-        ax=ax, kind="line", drawstyle="steps-post"
-    )
+    storage_results = (
+        results[(storage, None)]["sequences"]
+        / storage.nominal_storage_capacity
+    )
+    storage_results.plot(ax=ax, kind="line", drawstyle="steps-post")
     plt.show()
 
     my_results = electricity_bus["period_scalars"]
 
     # installed capacity of storage in GWh

Check failure on line 133 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py#L110-L133

     data["pv"].iloc[8760 - 24 : 8760] = 0
     data["pv"].iloc[8760 * 2 - 24 : 8760] = 0
 
     # add a season without electricity production to simulate the possible advantage using a seasonal storages
     # for the first perido
-    data["wind"].iloc[2920: 2 * 2920 + 1] = 0
-    data["pv"].iloc[2920:2 * 2920 + 1] = 0
+    data["wind"].iloc[2920 : 2 * 2920 + 1] = 0
+    data["pv"].iloc[2920 : 2 * 2920 + 1] = 0
 
     ##########################################################################
     # Initialize the energy system and read/calculate necessary parameters
     ##########################################################################
 
     logger.define_logging()
     logging.info("Initialize the energy system")
-    #todo: right now, tsam only determines the timeincrement right, when you pick the
-    #first periods last timestamp next to the second periods first timestep
-    #2022-31-12-23:00 --> 2023-01-01-00:00 , than timeincrement in between is equal to 1
-    #todo add initial storage level in new periods is equal to zero?
+    # todo: right now, tsam only determines the timeincrement right, when you pick the
+    # first periods last timestamp next to the second periods first timestep
+    # 2022-31-12-23:00 --> 2023-01-01-00:00 , than timeincrement in between is equal to 1
+    # todo add initial storage level in new periods is equal to zero?
     t1 = pd.date_range("2022-01-01", periods=8760, freq="H")
     t2 = pd.date_range("2023-01-01", periods=8760, freq="H")
     tindex = t1.append(t2)
 
     data.index = tindex

Check failure on line 146 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py#L135-L146

     typical_periods = 10
     hours_per_period = 24
     segmentation = False
     if segmentation:
         print("segmentation hasn't been added so far")
-
 
     else:
         aggregation1 = tsam.TimeSeriesAggregation(
             timeSeries=data.iloc[:8760],
             noTypicalPeriods=typical_periods,

Check failure on line 201 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py#L177-L201

     # Create oemof objects
     ##########################################################################
 
     logging.info("Create oemof objects")
 
-
     # create electricity bus
     bel = solph.Bus(label="electricity")
 
-    energysystem.add( bel)
+    energysystem.add(bel)
 
     # create excess component for the electricity bus to allow overproduction
     excess = solph.components.Sink(
         label="excess_bel", inputs={bel: solph.Flow()}
     )
 
     # create source object representing the gas commodity (annual limit)
     elect_resource = solph.components.Source(
-        label="electricity_source", outputs={bel: solph.Flow(variable_costs=electricity_price)}
+        label="electricity_source",
+        outputs={bel: solph.Flow(variable_costs=electricity_price)},
     )
 
     wind_profile = pd.concat(
         [
             aggregation1.typicalPeriods["wind"],

Check failure on line 227 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py#L205-L227

     wind_profile.iloc[-24:] = 0
 
     # create fixed source object representing wind power plants
     wind = solph.components.Source(
         label="wind",
-        outputs={
-            bel: solph.Flow(
-                fix=wind_profile,
-                nominal_value=1500000
-            )
-        },
+        outputs={bel: solph.Flow(fix=wind_profile, nominal_value=1500000)},
     )
 
     pv_profile = pd.concat(
-        [aggregation1.typicalPeriods["pv"],
-         aggregation2.typicalPeriods["pv"]
-         ],
+        [aggregation1.typicalPeriods["pv"], aggregation2.typicalPeriods["pv"]],
         ignore_index=True,
     )
     pv_profile.iloc[-24:] = 0
 
     # create fixed source object representing pv power plants

Check failure on line 245 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py#L234-L245

                             aggregation1.typicalPeriods["pv"],
                             aggregation2.typicalPeriods["pv"],
                         ],
                         ignore_index=True,
                     ),
-                    nominal_value=900000
+                    nominal_value=900000,
                 )
             },
         )
 
     # create simple sink object representing the electrical demand

Check failure on line 291 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py#L279-L291

     ##########################################################################
 
     logging.info("Optimise the energy system")
 
     # initialise the operational model
-    om = solph.Model(energysystem
-                     )
+    om = solph.Model(energysystem)
 
     # if tee_switch is true solver messages will be displayed
     logging.info("Solve the optimization problem")
     om.solve(solver="cbc", solve_kwargs={"tee": True})
 

Check failure on line 357 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v7_no_invest_tsam_integrated_into_energy_system.py#L307-L357

 
     meta_results = solph.processing.meta_results(om)
     pp.pprint(meta_results)
 
     fig, ax = plt.subplots(figsize=(10, 5))
-    storage_results = results[(storage, None)]["sequences"] / storage.nominal_storage_capacity
-    storage_results .plot(
-        ax=ax, kind="line", drawstyle="steps-post"
-    )
+    storage_results = (
+        results[(storage, None)]["sequences"]
+        / storage.nominal_storage_capacity
+    )
+    storage_results.plot(ax=ax, kind="line", drawstyle="steps-post")
     plt.show()
 
     fig, ax = plt.subplots(figsize=(10, 5))
     storage_results = results[(wind, bel)]["sequences"]
-    storage_results .plot(
-        ax=ax, kind="line", drawstyle="steps-post"
-    )
+    storage_results.plot(ax=ax, kind="line", drawstyle="steps-post")
     ax.set_title("Elect. from Wind")
     plt.show()
     if False:
         fig, ax = plt.subplots(figsize=(10, 5))
         storage_results = results[(pv, bel)]["sequences"]
-        storage_results .plot(
-            ax=ax, kind="line", drawstyle="steps-post"
-        )
+        storage_results.plot(ax=ax, kind="line", drawstyle="steps-post")
         ax.set_title("Elect. from PV")
         plt.show()
 
     fig, ax = plt.subplots(figsize=(10, 5))
     storage_results = results[(bel, demand)]["sequences"]
-    storage_results .plot(
-        ax=ax, kind="line", drawstyle="steps-post"
-    )
+    storage_results.plot(ax=ax, kind="line", drawstyle="steps-post")
     ax.set_title("Demand")
     plt.show()
 
     fig, ax = plt.subplots(figsize=(10, 5))
     storage_results = results[(elect_resource, bel)]["sequences"]
-    storage_results .plot(
-        ax=ax, kind="line", drawstyle="steps-post"
-    )
+    storage_results.plot(ax=ax, kind="line", drawstyle="steps-post")
     ax.set_title("Elect. from Grid")
     plt.show()
     my_results = electricity_bus["period_scalars"]
 
-
     pp.pprint(my_results)
 
 
 if __name__ == "__main__":
     main()

Check failure on line 144 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L88-L144

 from oemof.tools import economics
 from oemof.tools import logger
 from oemof.solph import views
 
 from oemof import solph
-def check_equal_timesteps_after_aggregation(hours_per_period : int,
-                                            hours_of_input_time_series: int,
-                                            periods_total_occurrence: list):
-
+
+
+def check_equal_timesteps_after_aggregation(
+    hours_per_period: int,
+    hours_of_input_time_series: int,
+    periods_total_occurrence: list,
+):
     if not sum(periods_total_occurrence) * hours_per_period == 8760:
-        #todo: prints can be deleted in future
-        print("aggregated timeseries has: " + str(int(sum(periods_total_occurrence) * hours_per_period)) + " timesteps")
-        print("unaggregated timeseries has: " + str(hours_of_input_time_series) + " timesteps")
-        print("therefore the occurrence of the typical periods for the objective weighting will be customized")
-        customize_factor = hours_of_input_time_series / int(sum(periods_total_occurrence) * hours_per_period)
-        result_list = [float(occurrence) * customize_factor for occurrence in periods_total_occurrence]
+        # todo: prints can be deleted in future
+        print(
+            "aggregated timeseries has: "
+            + str(int(sum(periods_total_occurrence) * hours_per_period))
+            + " timesteps"
+        )
+        print(
+            "unaggregated timeseries has: "
+            + str(hours_of_input_time_series)
+            + " timesteps"
+        )
+        print(
+            "therefore the occurrence of the typical periods for the objective weighting will be customized"
+        )
+        customize_factor = hours_of_input_time_series / int(
+            sum(periods_total_occurrence) * hours_per_period
+        )
+        result_list = [
+            float(occurrence) * customize_factor
+            for occurrence in periods_total_occurrence
+        ]
         periods_total_occurrence = result_list
         return periods_total_occurrence
     else:
         return periods_total_occurrence
 
-def set_aggregated_timeseries_and_objective_weighting(segmentation,
-                                                      periods_total_occurrence,
-                                                      aggregated_period_dict,
-                                                      first_time_stamp):
+
+def set_aggregated_timeseries_and_objective_weighting(
+    segmentation,
+    periods_total_occurrence,
+    aggregated_period_dict,
+    first_time_stamp,
+):
     previous_period = 0
     objective_weighting = []
     aggregated_time_series = []
-    current_timestamp=first_time_stamp
+    current_timestamp = first_time_stamp
     if segmentation:
-        for period, timestep, segmented_timestep in aggregated_period_dict.index:
+        for (
+            period,
+            timestep,
+            segmented_timestep,
+        ) in aggregated_period_dict.index:
             if previous_period == period:
                 aggregated_time_series.append(current_timestamp)
             else:
                 aggregated_time_series.append(current_timestamp)
                 previous_period = period
-            objective_weighting.append(periods_total_occurrence[period] * segmented_timestep)
+            objective_weighting.append(
+                periods_total_occurrence[period] * segmented_timestep
+            )
             current_timestamp += pd.Timedelta(minutes=60 * segmented_timestep)
     else:
         for period, timestep in aggregated_period_dict.index:
             if previous_period == period:
                 aggregated_time_series.append(current_timestamp)
             else:
                 aggregated_time_series.append(current_timestamp)
                 previous_period = period
             objective_weighting.append(periods_total_occurrence[period])
             current_timestamp += pd.Timedelta(minutes=60)
-    #time series have to be extended by one, to fit into form of energysystem iput
+    # time series have to be extended by one, to fit into form of energysystem iput
     aggregated_time_series.append(current_timestamp)
     aggregated_time_series = pd.DatetimeIndex(aggregated_time_series)
     return aggregated_time_series, objective_weighting
+
 
 def main():
     # Read data file
     filename = os.path.join(os.getcwd(), "storage_investment.csv")
     try:

Check failure on line 168 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L154-L168

     data["wind"].iloc[8760 * 2 - 24 : 8760] = 0
     data["pv"].iloc[8760 - 24 : 8760] = 0
     data["pv"].iloc[8760 * 2 - 24 : 8760] = 0
 
     # add a season without electricity production to simulate the possible advantage using a seasonal storages
-    data["wind"].iloc[2920 * 4:5 * 2920 + 1] = 0
-    data["wind"].iloc[2920: 2 * 2920 + 1] = 0
-    data["pv"].iloc[2920:2 * 2920 + 1] = 0
-    data["pv"].iloc[2920 * 4:5 * 2920 + 1] = 0
+    data["wind"].iloc[2920 * 4 : 5 * 2920 + 1] = 0
+    data["wind"].iloc[2920 : 2 * 2920 + 1] = 0
+    data["pv"].iloc[2920 : 2 * 2920 + 1] = 0
+    data["pv"].iloc[2920 * 4 : 5 * 2920 + 1] = 0
 
     ##########################################################################
     # Initialize the energy system and read/calculate necessary parameters
     ##########################################################################
 

Check failure on line 189 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L178-L189

     typical_periods = 60
     hours_per_period = 24
     segmentation = False
     if segmentation:
         print("segmentation hasn't been added so far")
-
 
     else:
         aggregation1 = tsam.TimeSeriesAggregation(
             timeSeries=data.iloc[:8760],
             noTypicalPeriods=typical_periods,

Check failure on line 260 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L205-L260

             extremePeriodMethod="replace_cluster_center",
             addPeakMin=["wind", "pv"],
             representationMethod="durationRepresentation",
         )
 
-
     aggregation1.createTypicalPeriods()
     aggregation2.createTypicalPeriods()
     if False:
         periods_total_occurrence1 = [
-            (aggregation1.clusterOrder == typical_period_name).sum() for typical_period_name in
-            aggregation1.clusterPeriodIdx]
+            (aggregation1.clusterOrder == typical_period_name).sum()
+            for typical_period_name in aggregation1.clusterPeriodIdx
+        ]
         periods_total_occurrence2 = [
-            (aggregation2.clusterOrder == typical_period_name).sum() for typical_period_name in
-            aggregation2.clusterPeriodIdx]
+            (aggregation2.clusterOrder == typical_period_name).sum()
+            for typical_period_name in aggregation2.clusterPeriodIdx
+        ]
     else:
         periods_total_occurrence1 = aggregation1.clusterPeriodNoOccur
         periods_total_occurrence2 = aggregation1.clusterPeriodNoOccur
-    periods_total_occurrence1 = check_equal_timesteps_after_aggregation(hours_per_period=hours_per_period,
-                                            hours_of_input_time_series=t1.__len__(),
-                                            periods_total_occurrence=periods_total_occurrence1
-                                                                        )
-    periods_total_occurrence2 = check_equal_timesteps_after_aggregation(hours_per_period = hours_per_period,
-                                            hours_of_input_time_series = t2.__len__(),
-                                            periods_total_occurrence=periods_total_occurrence2
-                                                                        )
-    #before timeseries generation was based on freq="H" (hourly), now you have to set the number of minutes of one timestep
-    t1_agg, objective_weighting1 = set_aggregated_timeseries_and_objective_weighting(segmentation=segmentation,
-                                                      periods_total_occurrence = periods_total_occurrence1,
-                                                      aggregated_period_dict=pd.DataFrame.from_dict(aggregation1.clusterPeriodDict),
-                                                      first_time_stamp=pd.to_datetime(t1[0])
-                                                                                     )
-    t2_agg, objective_weighting2 = set_aggregated_timeseries_and_objective_weighting(segmentation=segmentation,
-                                                      periods_total_occurrence = periods_total_occurrence2,
-                                                      aggregated_period_dict=pd.DataFrame.from_dict(aggregation2.clusterPeriodDict),
-                                                      first_time_stamp=pd.to_datetime(t1[0])
-                                                                                     )
-    #objective_weighting = objective_weighting1 + objective_weighting2
+    periods_total_occurrence1 = check_equal_timesteps_after_aggregation(
+        hours_per_period=hours_per_period,
+        hours_of_input_time_series=t1.__len__(),
+        periods_total_occurrence=periods_total_occurrence1,
+    )
+    periods_total_occurrence2 = check_equal_timesteps_after_aggregation(
+        hours_per_period=hours_per_period,
+        hours_of_input_time_series=t2.__len__(),
+        periods_total_occurrence=periods_total_occurrence2,
+    )
+    # before timeseries generation was based on freq="H" (hourly), now you have to set the number of minutes of one timestep
+    (
+        t1_agg,
+        objective_weighting1,
+    ) = set_aggregated_timeseries_and_objective_weighting(
+        segmentation=segmentation,
+        periods_total_occurrence=periods_total_occurrence1,
+        aggregated_period_dict=pd.DataFrame.from_dict(
+            aggregation1.clusterPeriodDict
+        ),
+        first_time_stamp=pd.to_datetime(t1[0]),
+    )
+    (
+        t2_agg,
+        objective_weighting2,
+    ) = set_aggregated_timeseries_and_objective_weighting(
+        segmentation=segmentation,
+        periods_total_occurrence=periods_total_occurrence2,
+        aggregated_period_dict=pd.DataFrame.from_dict(
+            aggregation2.clusterPeriodDict
+        ),
+        first_time_stamp=pd.to_datetime(t1[0]),
+    )
+    # objective_weighting = objective_weighting1 + objective_weighting2
     objective_weighting = objective_weighting1
 
-    #tindex_agg = t1_agg.append(t2_agg)
+    # tindex_agg = t1_agg.append(t2_agg)
     tindex_agg = t1_agg
 
-    #todo aggregation1.clusterPeriodNoOccur besser zum objective weighting nutzen
+    # todo aggregation1.clusterPeriodNoOccur besser zum objective weighting nutzen
     energysystem = solph.EnergySystem(
         timeindex=tindex_agg,
-        #timeincrement=[1] * len(tindex_agg),
-        periods=[t1_agg,
-                 #t2_agg
-                 ],
+        # timeincrement=[1] * len(tindex_agg),
+        periods=[
+            t1_agg,
+            # t2_agg
+        ],
         tsa_parameters=[
             {
                 "timesteps_per_period": aggregation1.hoursPerPeriod,
                 "order": aggregation1.clusterOrder,
                 "occurrences": aggregation1.clusterPeriodNoOccur,

Check failure on line 319 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L269-L319

     # Create oemof objects
     ##########################################################################
 
     logging.info("Create oemof objects")
 
-
     # create electricity bus
     bel = solph.Bus(label="electricity")
 
-    energysystem.add( bel)
+    energysystem.add(bel)
 
     # create excess component for the electricity bus to allow overproduction
     excess = solph.components.Sink(
         label="excess_bel", inputs={bel: solph.Flow()}
     )
 
     # create source object representing the gas commodity (annual limit)
     elect_resource = solph.components.Source(
-        label="electricity_source", outputs={bel: solph.Flow(variable_costs=electricity_price)}
+        label="electricity_source",
+        outputs={bel: solph.Flow(variable_costs=electricity_price)},
     )
 
     wind_profile = pd.concat(
         [
             aggregation1.typicalPeriods["wind"],
-            #aggregation2.typicalPeriods["wind"],
+            # aggregation2.typicalPeriods["wind"],
         ],
         ignore_index=True,
     )
     wind_profile.iloc[-24:] = 0
 
     # create fixed source object representing wind power plants
     wind = solph.components.Source(
         label="wind",
-        outputs={
-            bel: solph.Flow(
-                fix=wind_profile,
-                nominal_value=1500000
-            )
-        },
+        outputs={bel: solph.Flow(fix=wind_profile, nominal_value=1500000)},
     )
 
     pv_profile = pd.concat(
-        [aggregation1.typicalPeriods["pv"],
-         #aggregation2.typicalPeriods["pv"]
-         ],
+        [
+            aggregation1.typicalPeriods["pv"],
+            # aggregation2.typicalPeriods["pv"]
+        ],
         ignore_index=True,
     )
     pv_profile.iloc[-24:] = 0
 
     # create fixed source object representing pv power plants

Check failure on line 336 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L321-L336

         outputs={
             bel: solph.Flow(
                 fix=pd.concat(
                     [
                         aggregation1.typicalPeriods["pv"],
-                        #aggregation2.typicalPeriods["pv"],
+                        # aggregation2.typicalPeriods["pv"],
                     ],
                     ignore_index=True,
                 ),
-                nominal_value=900000
+                nominal_value=900000,
             )
         },
     )
 
     # create simple sink object representing the electrical demand

Check failure on line 349 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L338-L349

         inputs={
             bel: solph.Flow(
                 fix=pd.concat(
                     [
                         aggregation1.typicalPeriods["demand_el"],
-                        #aggregation2.typicalPeriods["demand_el"],
+                        # aggregation2.typicalPeriods["demand_el"],
                     ],
                     ignore_index=True,
                 ),
                 nominal_value=0.05,
             )

Check failure on line 380 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L368-L380

     ##########################################################################
 
     logging.info("Optimise the energy system")
 
     # initialise the operational model
-    om = solph.Model(energysystem
-                     )
+    om = solph.Model(energysystem)
 
     # if tee_switch is true solver messages will be displayed
     logging.info("Solve the optimization problem")
     om.solve(solver="cbc", solve_kwargs={"tee": True})
 

Check failure on line 428 in /home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/storage_investment/v6_no_invest_optimize_all_technologies_using_mp_and_tsam.py#L396-L428

 
     meta_results = solph.processing.meta_results(om)
     pp.pprint(meta_results)
 
     fig, ax = plt.subplots(figsize=(10, 5))
-    storage_results = results[(storage, None)]["sequences"] / storage.nominal_storage_capacity
-    storage_results .plot(
-        ax=ax, kind="line", drawstyle="steps-post"
-    )
+    storage_results = (
+        results[(storage, None)]["sequences"]
+        / storage.nominal_storage_capacity
+    )
+    storage_results.plot(ax=ax, kind="line", drawstyle="steps-post")
     plt.show()
     fig, ax = plt.subplots(figsize=(10, 5))
     storage_results = results[(wind, bel)]["sequences"]
-    storage_results .plot(
-        ax=ax, kind="line", drawstyle="steps-post"
-    )
+    storage_results.plot(ax=ax, kind="line", drawstyle="steps-post")
     plt.show()
     fig, ax = plt.subplots(figsize=(10, 5))
     storage_results = results[(pv, bel)]["sequences"]
-    storage_results .plot(
-        ax=ax, kind="line", drawstyle="steps-post"
-    )
+    storage_results.plot(ax=ax, kind="line", drawstyle="steps-post")
     plt.show()
     fig, ax = plt.subplots(figsize=(10, 5))
     storage_results = results[(bel, demand)]["sequences"]
-    storage_results .plot(
-        ax=ax, kind="line", drawstyle="steps-post"
-    )
+    storage_results.plot(ax=ax, kind="line", drawstyle="steps-post")
     plt.show()
     my_results = electricity_bus["period_scalars"]
 
     # installed capacity of storage in GWh
     my_results["storage_invest_GWh"] = (

Check failure on line 74 in /home/runner/work/oemof-solph/oemof-solph/examples/tsam/basic_tsam_example.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/tsam/basic_tsam_example.py#L64-L74

 from oemof.solph import helpers
 from oemof.solph import processing
 from oemof.solph import views
 from oemof.solph._helpers import aggregate_time_series
 
+
 def main():
     # *************************************************************************
     # ********** PART 1 - Define and optimise the energy system ***************
     # *************************************************************************
 

Check failure on line 121 in /home/runner/work/oemof-solph/oemof-solph/examples/tsam/basic_tsam_example.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/oemof-solph/oemof-solph/examples/tsam/basic_tsam_example.py#L82-L121

         data = pd.DataFrame({"pv": [0.3], "wind": [0.6], "demand_el": [500]})
 
     # First there need to be a pre-processing, because our aggregate_time_series function only
     # works with timeseries.
     df = pd.DataFrame(data)
-    start_time = pd.Timestamp.now().replace(year=2023, month=1, day=1,hour=0, minute=0, second=0, microsecond=0)
+    start_time = pd.Timestamp.now().replace(
+        year=2023, month=1, day=1, hour=0, minute=0, second=0, microsecond=0
+    )
     df["datetime"] = start_time + pd.to_timedelta(df["timestep"] - 1, unit="H")
     df.set_index("datetime", inplace=True)
 
-    data_dict_to_aggregate = {"wind" : {"timeseries" : df["wind"], "weighted_factor" : 1},
-                              "pv" : {"timeseries" : df["pv"], "weighted_factor" : 1},
-                              "demand_el" : {"timeseries" : df["demand_el"], "weighted_factor" : 1},
-                              }
-
-    number_of_time_steps_per_periods =24
-    number_of_segments_per_period = 24*4
+    data_dict_to_aggregate = {
+        "wind": {"timeseries": df["wind"], "weighted_factor": 1},
+        "pv": {"timeseries": df["pv"], "weighted_factor": 1},
+        "demand_el": {"timeseries": df["demand_el"], "weighted_factor": 1},
+    }
+
+    number_of_time_steps_per_periods = 24
+    number_of_segments_per_period = 24 * 4
     number_of_typical_periods = 12
     segmentation = False
     if False:
-        determine_aggregation_parameters(data_dict_to_aggregate = data_dict_to_aggregate,
-                              number_of_time_steps_per_periods = number_of_time_steps_per_periods,
-                              segmentation = False)
+        determine_aggregation_parameters(
+            data_dict_to_aggregate=data_dict_to_aggregate,
+            number_of_time_steps_per_periods=number_of_time_steps_per_periods,
+            segmentation=False,
+        )
     if segmentation:
-        data_dict_aggregated, objective_weighting, clusterClass = aggregate_time_series(data_dict_to_aggregate = data_dict_to_aggregate,
-                              number_of_typical_periods = number_of_typical_periods,
-                              number_of_time_steps_per_periods = number_of_time_steps_per_periods,
-                              number_of_segments_per_period= number_of_segments_per_period,
-                              segmentation = segmentation
+        (
+            data_dict_aggregated,
+            objective_weighting,
+            clusterClass,
+        ) = aggregate_time_series(
+            data_dict_to_aggregate=data_dict_to_aggregate,
+            number_of_typical_periods=number_of_typical_periods,
+            number_of_time_steps_per_periods=number_of_time_steps_per_periods,
+            number_of_segments_per_period=number_of_segments_per_period,
+            segmentation=segmentation,
         )
     else:
-        data_dict_aggregated, objective_weighting, clusterClass = aggregate_time_series(data_dict_to_aggregate = data_dict_to_aggregate,
-                              number_of_typical_periods = number_of_typical_periods,
-                              number_of_time_steps_per_periods = number_of_time_steps_per_periods,
-                              segmentation = segmentation
+        (
+            data_dict_aggregated,
+            objective_weighting,
+            clusterClass,
+        ) = aggregate_time_series(
+            data_dict_to_aggregate=data_dict_to_aggregate,
+            number_of_typical_periods=number_of_typical_periods,
+            number_of_time_steps_per_periods=number_of_time_steps_per_periods,
+            segmentation=segmentation,
         )
 
     solver = "cbc"  # 'glpk', 'gurobi',....
     debug = False  # Set number_of_timesteps to 3 to get a readable lp-file.
     solver_verbose = False  # show/hide solver output