diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml
new file mode 100644
index 0000000000..453925c3f9
--- /dev/null
+++ b/.JuliaFormatter.toml
@@ -0,0 +1 @@
+style = "sciml"
\ No newline at end of file
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 0000000000..c228d15181
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,5 @@
+# .git-blame-ignore-revs
+# Standardize code formatting across project (#673)
+ee3f08756584ba16a57bb701492270a7bf129b4d
+# Update code formatting
+730f91df23447e94177c3a9c3d4e553cb502e2bf
\ No newline at end of file
diff --git a/.github/workflows/format_suggestions.yml b/.github/workflows/format_suggestions.yml
new file mode 100644
index 0000000000..dbd307846d
--- /dev/null
+++ b/.github/workflows/format_suggestions.yml
@@ -0,0 +1,14 @@
+name: Format suggestions
+on:
+ pull_request:
+
+jobs:
+ code-style:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: julia-actions/julia-format@v2
+ continue-on-error: true
+ - name: Check on failures
+ if: steps.julia-format.outcome != 'success'
+ run: echo "There are formatting errors. Please check the logs above."
+ shell: bash
\ No newline at end of file
diff --git a/.zenodo.json b/.zenodo.json
index 0b4d814792..97590da304 100644
--- a/.zenodo.json
+++ b/.zenodo.json
@@ -52,7 +52,8 @@
},
{
"name": "Morris, Jack",
- "affiliation": "MITRE"
+ "affiliation": "MITRE",
+ "orcid": "0000-0002-1471-9708"
},
{
"name": "Patankar, Neha",
@@ -87,6 +88,10 @@
"name": "Xu, Qingyu",
"affiliation": "Tsinghua University",
"orcid": "0000-0003-2692-5135"
+ },
+ {
+ "name": "Zhou, Justin",
+ "affiliation": "Massachusetts Institute of Technology"
}
]
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b0fdf495d9..8b4478add3 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,15 +7,18 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## Unreleased
+### Added
+- Add objective scaler for addressing problem ill-conditioning (#667)
+
## [0.4.0] - 2024-03-18
### Added
- Feature CO2 and fuel module (#536)
- Adds a fuel module which enables modeling of fuel usage via (1) a constant heat rate and (2)
- piecewise-linear approximation of heat rate curves.
- Adds a CO2 module that determines the CO2 emissions based on fuel consumption, CO2 capture
+ Adds a fuel module which enables modeling of fuel usage via (1) a constant heat rate and (2)
+ piecewise-linear approximation of heat rate curves.
+ Adds a CO2 module that determines the CO2 emissions based on fuel consumption, CO2 capture
fraction, and whether the feedstock is biomass.
-- Enable thermal power plants to burn multiple fuels (#586)
+- Enable thermal power plants to burn multiple fuels (#586)
- Feature electrolysis basic (#525)
Adds hydrogen electrolyzer model which enables the addition of hydrogen electrolyzer
demands along with optional clean supply constraints.
@@ -25,18 +28,33 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Add PR template (#516)
- Validation ensures that resource flags (THERM, HYDRO, LDS etc) are self-consistent (#513).
- Maintenance formulation for thermal-commit plants (#556).
-- Add new tests for GenX: three-zone, multi-stage, electrolyzer, VRE+storage,
+- Add new tests for GenX: three-zone, multi-stage, electrolyzer, VRE+storage,
piecewise_fuel+CO2, and TDR (#563 and #578).
- Added a DC OPF method (#543) to calculate power flows across all lines
- Added write_operating_reserve_price_revenue.jl to compute annual operating reserve and regulation revenue.
Added the operating reserve and regulation revenue to net revenue (PR # 611)
- Add functions to compute conflicting constraints when model is infeasible if supported by the solver (#624).
-- New settings parameter, VirtualChargeDischargeCost to test script and VREStor example case. The PR 608 attempts to
- introduce this parameter as cost of virtual charging and discharging to avoid unusual results (#608).
+- New settings parameter, VirtualChargeDischargeCost to test script and VREStor example case. The PR 608 attempts to
+ introduce this parameter as cost of virtual charging and discharging to avoid unusual results (#608).
- New settings parameter, StorageVirtualDischarge, to turn storage virtual charging and discharging off if desired by the user (#638).
- Add module to retrofit existing resources with new technologies (#600).
+- Formatted the code and added a format check to the CI pipeline (#673).
+- Add check when capital recovery period is zero and investment costs are
+ non-zero in multi-stage GenX (#666)
+- Added condition number scaling added to objective function (#667)
+- Added versioned doc-pages for v0.3.6 and v0.4.0
+
+- Added a warning message in write_costs_multistage mentioning th approximate value of costs currently.
### Fixed
+- Add constraint in mga to compute total capacity in each zone from a given technology type (#681)
+- New settings parameter MGAAnnualGeneration to switch between different MGA formulations (#681)
+- Add validation for `Can_Retire` column in multi-stage GenX since the current implementation
+ does not allow a resource to switch from can_retire = 0 to can_retire = 1 between stages. (#683)
+- Add tutorials for running GenX (#637 and #685)
+- Add writing of multistage stats during optimization with foresight (#687)
+- Fix docstring in operational_reserves.jl (#690)
+- Fix docstring in energy_share_requirement.jl (#692)
- Set MUST_RUN=1 for RealSystemExample/small_hydro plants (#517).
Previously these plants had no resource flag set, and so they did not contribute to the power balance.
As these plants are now useful, the objective in these cases is slightly lower.
@@ -54,6 +72,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Fix modeling of hydro reservoir with long duration storage (#572).
- Fix update of starting transmission capacity in multistage GenX
- Fix write_status with UCommit = WriteShadowPrices = 1 (#645)
+- Fixed outputting capital recovery cost to 0 if the remaining number of years is 0 (#666)
+- Updated the docstring for the initialize_cost_to_go function and adjusted the formula for the discount factor to reflect the code implementation (#672).
+- Fix write_multi_stage_cost.jl: add discount with OPEX multipliers to cUnmetPolicyPenalty (#679)
+- Fix DF calculation in DDP to make it more generic for variable length stages (#680)
+- Fix write_power_balance.jl: add additional two columns ("VRE_Storage_Discharge" and "VRE_Storage_Charge") for VRE_STOR
### Changed
- Use add_to_expression! instead of the += and -= operators for memory performance improvements (#498).
@@ -73,14 +96,18 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
This mitigates but does not fully fix (#576).
- Expressions of virtual charging and discharging costs in storage_all.jl and vre_stor.jl
- The input file `Generators_data.csv` has been split into different files, one for each type of generator.
- The new files are: `Thermal.csv`, `Hydro.csv`, `Vre.csv`, `Storage.csv`, `Flex_demand.csv`, `Must_run.csv`,
- `Electrolyzer.csv`, and `Vre_stor.csv`. The examples have been updated, and new tests have been added to
+ The new files are: `Thermal.csv`, `Hydro.csv`, `Vre.csv`, `Storage.csv`, `Flex_demand.csv`, `Must_run.csv`,
+ `Electrolyzer.csv`, and `Vre_stor.csv`. The examples have been updated, and new tests have been added to
check the new data format (#612).
-- The settings parameter `Reserves` has been renamed to `OperationalReserves`, `Reserves.csv` to
+- The settings parameter `Reserves` has been renamed to `OperationalReserves`, `Reserves.csv` to
`Operational_reserves.csv`, and the `.jl` files contain the word `reserves` have been renamed to
`operational_reserves` (#641).
- New folder structure for a GenX case. The input files are now organized in the following folders: `settings`,
`policies`, `resources` and `system`. The examples and tests have been updated to reflect this change.
+- New folder structure implemented for `example_system`. This folder now consists of nine separate folders each pertaining to a different case study example,
+ ranging from the ISONE three zones, with singlestage, multistage, electrolyzers, all the way to the 9 bus IEEE case for running DC-OPF.
+- Pruned HiGHS solver settings to the necessary minimum (#668)
+- Changed deploydoc URL to GenX.jl (#662)
### Deprecated
- The above `load` keys, which generally refer to electrical demand, are being deprecated.
@@ -135,7 +162,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Removed
-- The settings key `OperationsWrapping`. Its functionality has now been folded into the
+- The settings key `OperationsWrapping`. Its functionality has now been folded into the
`TimeDomainReduction` setting. Using the key now will print a gentle warning (#426).
## [0.3.4] - 2023-04-28
diff --git a/CITATION.cff b/CITATION.cff
index 648457d1ef..752cd4a74e 100644
--- a/CITATION.cff
+++ b/CITATION.cff
@@ -1,30 +1,60 @@
cff-version: 1.2.0
message: "If you use this software, please cite it as below."
authors:
+- family-names: "Bonaldo"
+ given-names: "Luca"
+ orcid: "https://orcid.org/0009-0000-0650-0266"
+- family-names: "Chakrabarti"
+ given-names: "Sambuddha"
+ orcid: "https://orcid.org/0000-0002-8916-5076"
+- family-names: "Cheng"
+ given-names: "Fangwei"
+ orcid: "https://orcid.org/0000-0001-6589-2749"
+- family-names: "Ding"
+ given-names: "Yifu"
+ orcid: "https://orcid.org/0000-0001-7128-8847"
- family-names: "Jenkins"
given-names: "Jesse"
-- family-names: "Sepulveda"
- given-names: "Nestor"
+ orcid: "https://orcid.org/0000-0002-9670-7793"
+- family-names: "Luo"
+ given-names: "Qian"
+ orcid: "https://orcid.org/0000-0003-3894-4093"
+- family-names: "Macdonald"
+ given-names: "Ruaridh"
+ orcid: "https://orcid.org/0000-0001-9034-6635"
- family-names: "Mallapragada"
given-names: "Dharik"
+ orcid: "https://orcid.org/0000-0002-0330-0063"
+- family-names: "Manocha"
+ given-names: "Aneesha"
+ orcid: "https://orcid.org/0000-0002-7190-4782"
+- family-names: "Mantegna"
+ given-names: "Gabe"
+ orcid: "https://orcid.org/0000-0002-7707-0221"
+- family-names: "Morris"
+ given-names: "Jack"
- family-names: "Patankar"
given-names: "Neha"
+ orcid: "https://orcid.org/0000-0001-7288-0391"
+- family-names: "Pecci"
+ given-names: "Filippo"
+ orcid: "https://orcid.org/0000-0003-3200-0892"
- family-names: "Schwartz"
given-names: "Aaron"
- family-names: "Schwartz"
given-names: "Jacob"
orcid: "https://orcid.org/0000-0001-9636-8181"
-- family-names: "Chakrabarti"
- given-names: "Sambuddha"
- orcid: "https://orcid.org/0000-0002-8916-5076"
-- family-names: "Xu"
- given-names: "Qingyu"
-- family-names: "Morris"
- given-names: "Jack"
+- family-names: "Schivley"
+ given-names: "Greg"
+ orcid: "https://orcid.org/0000-0002-8947-694X"
- family-names: "Sepulveda"
given-names: "Nestor"
+ orcid: "https://orcid.org/0000-0003-2735-8769"
+- family-names: "Xu"
+ given-names: "Qingyu"
+ orcid: "https://orcid.org/0000-0003-2692-5135"
title: "GenX"
-version: 0.3.0
-doi: 10.5281/zenodo.6229425
-date-released: 2022-04-26
-url: "https://github.com/GenXProject/GenX"
+version: 0.4.0
+doi: 10.5281/zenodo.10846070
+date-released: 2024-04-26
+url: "https://github.com/GenXProject/GenX.jl"
diff --git a/Project.toml b/Project.toml
index 9694c26432..6a092d4996 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,6 +1,6 @@
name = "GenX"
uuid = "5d317b1e-30ec-4ed6-a8ce-8d2d88d7cfac"
-authors = ["Bonaldo, Luca", "Chakrabarti, Sambuddha", "Cheng, Fangwei", "Ding, Yifu", "Jenkins, Jesse D.", "Luo, Qian", "Macdonald, Ruaridh", "Mallapragada, Dharik", "Manocha, Aneesha", "Mantegna, Gabe ", "Morris, Jack", "Patankar, Neha", "Pecci, Filippo", "Schwartz, Aaron", "Schwartz, Jacob", "Schivley, Greg", "Sepulveda, Nestor", "Xu, Qingyu"]
+authors = ["Bonaldo, Luca", "Chakrabarti, Sambuddha", "Cheng, Fangwei", "Ding, Yifu", "Jenkins, Jesse D.", "Luo, Qian", "Macdonald, Ruaridh", "Mallapragada, Dharik", "Manocha, Aneesha", "Mantegna, Gabe ", "Morris, Jack", "Patankar, Neha", "Pecci, Filippo", "Schwartz, Aaron", "Schwartz, Jacob", "Schivley, Greg", "Sepulveda, Nestor", "Xu, Qingyu", "Zhou, Justin"]
version = "0.4.0"
[deps]
diff --git a/README.md b/README.md
index f4eb78ce22..b7f513b34e 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,10 @@
-[![CI](https://github.com/GenXProject/GenX/actions/workflows/ci.yml/badge.svg)](https://github.com/GenXProject/GenX/actions/workflows/ci.yml)
-[![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://genxproject.github.io/GenX.jl/dev)
-[![DOI](https://zenodo.org/badge/368957308.svg)](https://zenodo.org/doi/10.5281/zenodo.10846069)
-[![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor's%20Guide-blueviolet)](https://github.com/SciML/ColPrac)
+| **Documentation** | **DOI** |
+|:-------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------:|
+[![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](https://genxproject.github.io/GenX.jl/stable/) [![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://genxproject.github.io/GenX.jl/dev) | [![DOI](https://zenodo.org/badge/368957308.svg)](https://zenodo.org/doi/10.5281/zenodo.10846069)
+
+[![CI](https://github.com/GenXProject/GenX/actions/workflows/ci.yml/badge.svg)](https://github.com/GenXProject/GenX/actions/workflows/ci.yml) [![SciML Code Style](https://img.shields.io/static/v1?label=code%20style&message=SciML&color=9558b2&labelColor=389826)](https://github.com/SciML/SciMLStyle) [![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor's%20Guide-blueviolet)](https://github.com/SciML/ColPrac)
## Overview
GenX is a highly-configurable, [open source](https://github.com/GenXProject/GenX/blob/main/LICENSE) electricity resource capacity expansion model
@@ -51,7 +52,7 @@ It is currently setup to use one of the following open-source freely available s
- [Cbc](https://github.com/jump-dev/Cbc.jl) for mixed integer linear programming (MILP) problems
We also provide the option to use one of these two commercial solvers:
- [Gurobi](https://www.gurobi.com), or
-- [CPLEX](https://www.ibm.com/analytics/cplex-optimizer).
+- [CPLEX](https://www.ibm.com/docs/en/icos/22.1.1?topic=documentation-orientation-guide).
Note that using Gurobi and CPLEX requires a valid license on the host machine.
There are two ways to run GenX with either type of solver options (open-source free or, licensed commercial) as detailed in the section, `Getting Started`.
diff --git a/docs/make.jl b/docs/make.jl
index eeaa501c5b..5fd26c6e0f 100644
--- a/docs/make.jl
+++ b/docs/make.jl
@@ -2,7 +2,7 @@ using Documenter
using GenX
import DataStructures: OrderedDict
-DocMeta.setdocmeta!(GenX, :DocTestSetup, :(using GenX); recursive=true)
+DocMeta.setdocmeta!(GenX, :DocTestSetup, :(using GenX); recursive = true)
pages = OrderedDict(
"Welcome Page" => [
@@ -13,7 +13,7 @@ pages = OrderedDict(
],
"Getting Started" => [
"Running GenX" => "Getting_Started/examples_casestudies.md",
- "Commertial solvers" => "Getting_Started/commercial_solvers.md",
+ "Commertial solvers" => "Getting_Started/commercial_solvers.md"
],
"Tutorials" => [
"Tutorials Overview" => "Tutorials/Tutorials_intro.md",
@@ -22,7 +22,9 @@ pages = OrderedDict(
"Tutorial 3: K-Means and Time Domain Reduction" => "Tutorials/Tutorial_3_K-means_time_domain_reduction.md",
"Tutorial 4: Model Generation" => "Tutorials/Tutorial_4_model_generation.md",
"Tutorial 5: Solving the Model" => "Tutorials/Tutorial_5_solve_model.md",
- "Tutorial 6: Post Processing" => "Tutorials/Tutorial_6_solver_settings.md",
+ "Tutorial 6: Solver Settings" => "Tutorials/Tutorial_6_solver_settings.md",
+ "Tutorial 7: Policy Constraints" => "Tutorials/Tutorial_7_setup.md",
+ "Tutorial 8: Outputs" => "Tutorials/Tutorial_8_outputs.md"
],
"User Guide" => [
"Overall workflow" => "User_Guide/workflow.md",
@@ -36,13 +38,13 @@ pages = OrderedDict(
"Slack Variables for Policies" => "User_Guide/slack_variables_overview.md",
"Method of Morris Inputs" => "User_Guide/methodofmorris_input.md",
"Running the Model" => "User_Guide/running_model.md",
- "Model Outputs" => "User_Guide/model_output.md",
+ "Model Outputs" => "User_Guide/model_output.md"
],
"Model Concept and Overview" => [
"Model Introduction" => "Model_Concept_Overview/model_introduction.md",
"Notation" => "Model_Concept_Overview/model_notation.md",
"Objective Function" => "Model_Concept_Overview/objective_function.md",
- "Power Balance" => "Model_Concept_Overview/power_balance.md",
+ "Power Balance" => "Model_Concept_Overview/power_balance.md"
],
"Model Reference" => [
"Core" => "Model_Reference/core.md",
@@ -71,7 +73,6 @@ pages = OrderedDict(
"Thermal No Commit" => "Model_Reference/Resources/thermal_no_commit.md"
],
"Hydrogen Electrolyzers" => "Model_Reference/Resources/electrolyzers.md",
- "Retrofit" => "Model_Reference/Resources/retrofit.md",
"Scheduled maintenance for various resources" => "Model_Reference/Resources/maintenance.md",
"Resource types" => "Model_Reference/Resources/resource.md"
],
@@ -91,43 +92,41 @@ pages = OrderedDict(
"Endogenous Retirement" => "Model_Reference/Multi_Stage/endogenous_retirement.md"
],
"Method of Morris" => "Model_Reference/methodofmorris.md",
- "Utility Functions" => "Model_Reference/utility_functions.md",
+ "Utility Functions" => "Model_Reference/utility_functions.md"
],
"Public API Reference" => [
- "Public API" => "Public_API/public_api.md",
-
- ],
+ "Public API" => "Public_API/public_api.md"],
"Third Party Extensions" => "additional_third_party_extensions.md",
- "Developer Docs" => "developer_guide.md",
+ "Developer Docs" => "developer_guide.md"
)
# Build documentation.
# ====================
makedocs(;
- modules=[GenX],
- authors="Jesse Jenkins, Nestor Sepulveda, Dharik Mallapragada, Aaron Schwartz, Neha Patankar, Qingyu Xu, Jack Morris, Sambuddha Chakrabarti",
- sitename="GenX.jl",
- format=Documenter.HTML(;
- prettyurls=get(ENV, "CI", "false") == "true",
- canonical="https://genxproject.github.io/GenX.jl/stable",
+ modules = [GenX],
+ authors = "Jesse Jenkins, Nestor Sepulveda, Dharik Mallapragada, Aaron Schwartz, Neha Patankar, Qingyu Xu, Jack Morris, Sambuddha Chakrabarti",
+ sitename = "GenX.jl",
+ format = Documenter.HTML(;
+ prettyurls = get(ENV, "CI", "false") == "true",
+ canonical = "https://genxproject.github.io/GenX.jl/stable",
assets = ["assets/genx_style.css"],
- sidebar_sitename=false,
- collapselevel=1
+ sidebar_sitename = false,
+ collapselevel = 1
),
- pages=[p for p in pages]
+ pages = [p for p in pages]
)
# Deploy built documentation.
# ===========================
deploydocs(;
- repo="github.com/GenXProject/GenX.jl.git",
+ repo = "github.com/GenXProject/GenX.jl.git",
target = "build",
branch = "gh-pages",
devbranch = "main",
devurl = "dev",
- push_preview=true,
+ push_preview = true,
versions = ["stable" => "v^", "v#.#.#", "dev" => "dev"],
- forcepush = false,
+ forcepush = false
)
diff --git a/docs/src/Model_Concept_Overview/objective_function.md b/docs/src/Model_Concept_Overview/objective_function.md
index 94308431f8..40f90e7d4a 100644
--- a/docs/src/Model_Concept_Overview/objective_function.md
+++ b/docs/src/Model_Concept_Overview/objective_function.md
@@ -32,7 +32,7 @@ The objective function of GenX minimizes total annual electricity system costs o
& \sum_{y \in \mathcal{VS}^{sym,dc} \cup \mathcal{VS}^{asym,dc,dis}} \sum_{z \in \mathcal{Z}} \sum_{t \in \mathcal{T}} \left( \omega_{t}\times\pi^{VOM,dc,dis}_{y,z} \times\eta^{inverter}_{y,z} \times \Theta^{dc}_{y,z,t}\right) + \\
& \sum_{y \in \mathcal{VS}^{sym,dc} \cup \mathcal{VS}^{asym,dc,cha}} \sum_{z \in \mathcal{Z}} \sum_{t \in \mathcal{T}} \left( \omega_{t}\times\pi^{VOM,dc,cha}_{y,z} \times \frac{\Pi^{dc}_{y,z,t}}{\eta^{inverter}_{y,z}}\right) + \\
& \sum_{y \in \mathcal{VS}^{sym,ac} \cup \mathcal{VS}^{asym,ac,dis}} \sum_{z \in \mathcal{Z}} \sum_{t \in \mathcal{T}} \left( \omega_{t}\times\pi^{VOM,ac,dis}_{y,z} \times \Theta^{ac}_{y,z,t}\right) + \\
- & \sum_{y \in \mathcal{VS}^{sym,ac} \cup \mathcal{VS}^{asym,ac,cha}} \sum_{z \in \mathcal{Z}} \sum_{t \in \mathcal{T}} \left( \omega_{t}\times\pi^{VOM,ac,cha}_{y,z} \times \Pi^{ac}_{y,z,t}\right)
+ & \sum_{y \in \mathcal{VS}^{sym,ac} \cup \mathcal{VS}^{asym,ac,cha}} \sum_{z \in \mathcal{Z}} \sum_{t \in \mathcal{T}} \left( \omega_{t}\times\pi^{VOM,ac,cha}_{y,z} \times \Pi^{ac}_{y,z,t}\right)
\end{aligned}
```
@@ -56,7 +56,7 @@ The seventh summation represents the total cost of not meeting hourly operating
The eighth summation corresponds to the startup costs incurred by technologies to which unit commitment decisions apply (e.g. $y \in \mathcal{UC}$), equal to the cost of start-up, $\pi^{START}_{y,z}$, times the number of startup events, $\chi_{y,z,t}$, for the cluster of units in each zone and time step (weighted by $\omega_t$).
The ninth term corresponds to the transmission reinforcement or construction costs, for each transmission line (if modeled).
-Transmission reinforcement costs are equal to the sum across all lines of the product between the transmission reinforcement/construction cost, $pi^{TCAP}_{l}$, times the additional transmission capacity variable, $\bigtriangleup\varphi^{max}_{l}$. Note that fixed O&M and replacement capital costs (depreciation) for existing transmission capacity is treated as a sunk cost and not included explicitly in the GenX objective function.
+Transmission reinforcement costs are equal to the sum across all lines of the product between the transmission reinforcement/construction cost, $\pi^{TCAP}_{l}$, times the additional transmission capacity variable, $\bigtriangleup\varphi^{max}_{l}$. Note that fixed O&M and replacement capital costs (depreciation) for existing transmission capacity is treated as a sunk cost and not included explicitly in the GenX objective function.
The tenth term onwards specifically relates to the breakdown investment, fixed O&M, and variable O&M costs associated with each configurable component of a co-located VRE and storage resource.
The tenth term represents to the fixed cost of installed inverter capacity and is summed over only the co-located resources with an inverter component ($y \in \mathcal{VS}^{inv}$).
@@ -80,7 +80,7 @@ The eighteenth summation represents the variable O&M cost, $\pi^{VOM,wind}_{y,z}
The nineteenth summation represents the variable O&M cost, $\pi^{VOM,dc,dis}_{y,z}$, times the energy discharge by storage DC components ($y\in\mathcal{VS}^{sym,dc} \cup \mathcal{VS}^{asym,dc,dis}$) in time step $t$, $\Theta^{dc}_{y,z,t}$, the inverter efficiency, $\eta^{inverter}_{y,z}$, and the weight of each time step $t$, $\omega_t$.
The twentieth summation represents the variable O&M cost, $\pi^{VOM,dc,cha}_{y,z}$, times the energy withdrawn by storage DC components ($y\in\mathcal{VS}^{sym,dc} \cup \mathcal{VS}^{asym,dc,cha}$) in time step $t$, $\Pi^{dc}_{y,z,t}$, and the weight of each time step $t$, $\omega_t$, and divided by the inverter efficiency, $\eta^{inverter}_{y,z}$.
The twenty-first summation represents the variable O&M cost, $\pi^{VOM,ac,dis}_{y,z}$, times the energy discharge by storage AC components ($y\in\mathcal{VS}^{sym,ac} \cup \mathcal{VS}^{asym,ac,dis}$) in time step $t$, $\Theta^{ac}_{y,z,t}$, and the weight of each time step $t$, $\omega_t$.
-The twenty-second summation represents the variable O&M cost, $\pi^{VOM,ac,cha}_{y,z}$, times the energy withdrawn by storage AC components ($y\in\mathcal{VS}^{sym,ac} \cup \mathcal{VS}^{asym,ac,cha}$) in time step $t$, $\Pi^{ac}_{y,z,t}$, and the weight of each time step $t$, $\omega_t$.
+The twenty-second summation represents the variable O&M cost, $\pi^{VOM,ac,cha}_{y,z}$, times the energy withdrawn by storage AC components ($y\in\mathcal{VS}^{sym,ac} \cup \mathcal{VS}^{asym,ac,cha}$) in time step $t$, $\Pi^{ac}_{y,z,t}$, and the weight of each time step $t$, $\omega_t$.
In summary, the objective function can be understood as the minimization of costs associated with five sets of different decisions:
1. where and how to invest on capacity,
@@ -92,3 +92,6 @@ In summary, the objective function can be understood as the minimization of cost
Note however that each of these components are considered jointly and the optimization is performed over the whole problem at once as a monolithic co-optimization problem.
While the objective function is formulated as a cost minimization problem, it is also equivalent to a social welfare maximization problem, with the bulk of demand treated as inelastic and always served, and the utility of consumption for price-elastic consumers represented as a segment-wise approximation, as per the cost of unserved demand summation above.
+
+# Objective Scaling
+Sometimes the model will be built into an ill form if some objective terms are quite large or small. To alleviate this problem, we could add a scaling factor to scale the objective function during solving while leaving all other expressions untouched. The default ```ObjScale``` is set to 1 which has no effect on objective. If you want to scale the objective, you can set the ```ObjScale``` to an appropriate value in the ```genx_settings.yml```. The objective function will be multiplied by the ```ObjScale``` value during the solving process.
\ No newline at end of file
diff --git a/docs/src/Model_Reference/write_outputs.md b/docs/src/Model_Reference/write_outputs.md
index 2b9ae15b3d..4bb02f563f 100644
--- a/docs/src/Model_Reference/write_outputs.md
+++ b/docs/src/Model_Reference/write_outputs.md
@@ -38,7 +38,7 @@ Modules = [GenX]
Pages = ["write_capacity.jl"]
```
-## Write Capacity Value # TODO: add it
+## Write Capacity Value
```@autodocs
Modules = [GenX]
Pages = ["write_capacity_value.jl"]
@@ -142,13 +142,20 @@ GenX.write_vre_stor_discharge
```
## Write Multi-stage files
+```@autodocs
+Modules = [GenX]
+Pages = ["write_multi_stage_outputs.jl"]
+```
```@docs
GenX.write_multi_stage_costs
GenX.write_multi_stage_stats
GenX.write_multi_stage_settings
GenX.write_multi_stage_network_expansion
+GenX.write_multi_stage_capacities_discharge
GenX.write_multi_stage_capacities_charge
GenX.write_multi_stage_capacities_energy
+GenX.create_multi_stage_stats_file
+GenX.update_multi_stage_stats_file
```
## Write maintenance files
@@ -162,8 +169,7 @@ Pages = ["write_maintenance.jl"]
GenX.write_angles
```
-## Write Settings files
-```@autodocs
-Modules = [GenX]
-Pages = ["write_settings.jl"]
+## Write Settings Files
+```@docs
+GenX.write_settings_file
```
diff --git a/docs/src/Tutorials/Tutorial_1_configuring_settings.md b/docs/src/Tutorials/Tutorial_1_configuring_settings.md
index 8ba1968665..a25b85a41d 100644
--- a/docs/src/Tutorials/Tutorial_1_configuring_settings.md
+++ b/docs/src/Tutorials/Tutorial_1_configuring_settings.md
@@ -17,15 +17,11 @@ To see how changing the settings affects the outputs, see Tutorials 3 and 7.
Below is the settings file for `example_systems/1_three_zones`:
-```@raw html
-
-```
+![png](./files/genxsettings.png)
-All `genx_settings.yml` files in `Example_Systems` specify most parameters. When configuring your own settings, however, it is not necessary to input all parameters as defaults are specified for each one in `configure_settings.jl`.
+All `genx_settings.yml` files in `example_systems` specify most parameters. When configuring your own settings, however, it is not necessary to input all parameters as defaults are specified for each one in `configure_settings.jl`.
-```@raw html
-
-```
+![png](./files/default_settings.png)
To open `genx_settings.yml` in Jupyter, use the function `YAML.load(open(...))` and navigate to file in the desired directory:
@@ -69,11 +65,9 @@ YAML.write_file("example_systems/1_three_zones/settings/genx_settings.yml", new_
The empty file will look like this:
-```@raw html
-
-```
+![png](./files/genx_settings_none.png)
-Now, we run GenX and output the file `capacity.csv` from the `Results` folder. To do this, we use the function `include`, which takes a .jl file and runs it in jupyter notebook:
+Now, we run GenX and output the file `capacity.csv` from the `results` folder. To do this, we use the function `include`, which takes a .jl file and runs it in jupyter notebook:
```julia
diff --git a/docs/src/Tutorials/Tutorial_2_network_visualization.md b/docs/src/Tutorials/Tutorial_2_network_visualization.md
index 67624d285f..9c598962d7 100644
--- a/docs/src/Tutorials/Tutorial_2_network_visualization.md
+++ b/docs/src/Tutorials/Tutorial_2_network_visualization.md
@@ -24,10 +24,9 @@ network = CSV.read("example_systems/1_three_zones/system/Network.csv",DataFrame,
MA, CT, and ME are the abbreviations for states Massachusetts, Connecticut, and Maine. However, since the US region of New England contains other states as well, MA in this case is also used to refer to those states.
-Columns `Start_Zone` and `End_Zone` specify the network of the three regions. In this case, there are only two network lines, specified in the `Network_Lines` columns. The `Start_Zone` column indicates that the first node, MA, is the source of both lines as both rows have value 1. Rows `z1` and `z2` have values of 2 and 3 in `End_Zone`, which means both nodes CT and ME recieve energy from node MA. This is also indicated in the column `transmission_path_name'.
+Columns `Start_Zone` and `End_Zone` specify the network of the three regions. In this case, there are only two network lines, specified in the `Network_Lines` columns. The `Start_Zone` column indicates that the first node, MA, is the source of both lines as both rows have value 1. Rows `z1` and `z2` have values of 2 and 3 in `End_Zone`, which means both nodes CT and ME recieve energy from node MA. This is also indicated in the column `transmission path name'.
Below is a visualization of the network:
-```@raw html
-
-```
+![png](./files/new_england.png)
+
diff --git a/docs/src/Tutorials/Tutorial_3_K-means_time_domain_reduction.md b/docs/src/Tutorials/Tutorial_3_K-means_time_domain_reduction.md
index 4e9a49a6e6..5648c1eadc 100644
--- a/docs/src/Tutorials/Tutorial_3_K-means_time_domain_reduction.md
+++ b/docs/src/Tutorials/Tutorial_3_K-means_time_domain_reduction.md
@@ -3,19 +3,19 @@
[Interactive Notebook of the tutorial](https://github.com/GenXProject/GenX-Tutorials/blob/main/Tutorials/Tutorial_3_K-means_Time_Domain_Reduction.ipynb)
-A good tool to reduce computation time of GenX is to use [Time-domain reduction](@ref). Time Domain Reduction is a method that selects a smaller set of time steps from the data in a way that reduces computation time while still capturing the main information of the model. In this tutorial, we go over how TDR works in GenX and how it uses K-means clustering to choose the optimal time steps. For more information on TDR in capacity expansion models, see [Mallapragada et al](https://www.sciencedirect.com/science/article/pii/S0360544218315238).
+A good tool to reduce computation time of GenX is to use [Time-domain reduction](@ref). Time-domain Reduction is a method that selects a smaller set of time steps from the data in a way that reduces computation time while still capturing the main information of the model. In this tutorial, we go over how TDR works in GenX and how it uses K-means clustering to choose the optimal time steps. For more information on TDR in capacity expansion models, see [Mallapragada et al](https://www.sciencedirect.com/science/article/pii/S0360544218315238).
### Table of Contents
* [Time Domain Reduction](#TDR)
* [K-Means Clustering](#Kmeans)
* [Results of Time Domain Reduction](#TDRResults)
- * [Reconstruction](#Reconstruction)
+* [Reconstruction](#Reconstruction)
* [Extreme Periods](#ExtPeriods)
* [Objective Values and Representative Periods](#ObjVals)
### Time Domain Reduction
-To see how Time Domain Reduction works, let's look at the `Doad_data` in `example_systems/1_three_zones`:
+To see how Time Domain Reduction works, let's look at the `Demand_data` in `example_systems/1_three_zones`:
```julia
@@ -29,9 +29,6 @@ using Plots
using Clustering
using ScikitLearn
@sk_import datasets: (make_blobs)
-```
- WARNING: redefinition of constant make_blobs. This may fail, cause incorrect answers, or produce other errors.
- PyObject
```julia
case = joinpath("example_systems/1_three_zones");
@@ -88,7 +85,7 @@ loads |>
width=600,height=400,linewidth=.01)
```
-![svg](./files/output_14_0.svg)
+![svg](./files/t3_demand.svg)
As in [Tutorial 1: Configuring Settings](@ref), we can open the `genx_settings.yml` file for `1_three_zones` to see how `TimeDomainReduction` is set. If it's set to 1, this means TDR is being used.
@@ -118,7 +115,7 @@ genx_settings_TZ = YAML.load(open((joinpath(case,"settings/genx_settings.yml")))
"WriteShadowPrices" => 1
```
-To visualize how TDR decreases computation time, let's start by running `SmallNewEngland/OneZone` without TDR. In the third section of this tutorial, we'll run the example again using TDR.
+To visualize how TDR decreases computation time, let's start by running `example_systems/1_three_zones` without TDR. In the third section of this tutorial, we'll run the example again using TDR.
To run GenX without TDR, we start by editing the settings to set `TimeDomainReduction` to 0:
@@ -158,14 +155,16 @@ And run it using `include`. (Note: this process will take a few minutes):
```julia
@time include("example_systems/1_three_zones/Run.jl")
```
+Time elapsed for writing is
+ 142.404724 seconds
This took a little while to run, and would take even longer for larger systems. Let's see how we can get the run time down using Time Domain Reduction. The next sections go over how K-means clustering is used to perform TDR, and how to interpret the resulting files in GenX.
### K-means clustering
-Let's go over how TDR works. To perform TDR, GenX uses __K-means clustering__. K-means is an optimization method that clusters data into several groups based on their proximity to "centers" determined by the algorithm.
+Let's go over how TDR works. To perform TDR, GenX uses __K-means clustering__. _K_-means is an optimization method that clusters data into several groups based on their proximity to "centers" determined by the algorithm.
-K-means finds a set number of groups such that the variance between the distance of each point in the group to the mean of the group is minimized.
+_K_-means finds a set number of groups such that the variance between the distance of each point in the group to the mean of the group is minimized.
```math
\begin{align*}
@@ -173,7 +172,7 @@ K-means finds a set number of groups such that the variance between the distance
\end{align*}
```
-Where $\mathbf{S} = \{S_1, ... , S_k\}$ are the clusters, with $x$ denoting the elements of the clusters, and $\mu_i$ the mean of each cluster, i.e. the mean of the distances from each point to the center of the cluster. By taking the argmin over $\mathbf{S}$, the points $x$ are clustered into groups where their distance to the center is the smallest. For more information on how k-means works, see the [Wikipedia](https://en.wikipedia.org/wiki/K-means_clustering).
+Where $\mathbf{S} = \{S_1, ... , S_k\}$ are the clusters, with $x$ denoting the elements of the clusters, and $\mu_i$ the mean of each cluster, i.e. the mean of the distances from each point to the center of the cluster. By taking the argmin over $\mathbf{S}$, the points $x$ are clustered into groups where their distance to the center is the smallest. For more information on how _k_-means works, see the [Wikipedia](https://en.wikipedia.org/wiki/K-means_clustering).
GenX uses the package `Clustering.jl`, with documentation [here](https://juliastats.org/Clustering.jl/dev/kmeans.html#K-means). As an example, using the package `ScikitLearn.jl`, let's generate data that can cluster easily.
@@ -184,7 +183,7 @@ X, y = make_blobs(n_samples=50,centers=centers); # From scikit-learn
b = DataFrame(X,:auto)
```
-Note that clustering works for data without obvious groupings, but using blobs as an example makes k-means easier to visualize.
+Note that clustering works for data without obvious groupings, but using blobs as an example makes _k_-means easier to visualize.
```julia
@@ -192,6 +191,8 @@ plotly()
Plots.scatter(b[!,"x1"],b[!,"x2"],legend=false,title="Before K-means Clustering")
```
+![png](./files/t3_nokmeans.png)
+
Now we use the function `kmeans`, which is also used in `src/time_domain_reduction` in GenX.
@@ -217,11 +218,13 @@ plotly()
Plots.scatter(b[!,"x1"],b[!,"x2"],legend=false,marker_z=R.assignments,c=:lightrainbow,title="After K-means Clustering")
```
+![png](./files/t3_kmeans.png)
+
In GenX, the representative periods are the centers of the clusters, each representing one week of the year. In the above example that would mean there are 52 data points gathered into 11 clusters (to see this for yourself, change `make_blobs` to have 52 data points and 11 clusters.)
### Results of Time Domain Reduction
-To visualize the results of TDR, we'll set TDR = 1 back in the `genx_settings.yml` file in `Example_Systems_Tutorials/SmallNewEngland/OneZone/`:
+To visualize the results of TDR, we'll set TDR = 1 back in the `genx_settings.yml` file in `example_systems/1_three_zones`:
```julia
@@ -245,7 +248,7 @@ And run GenX again with TDR:
@time include("example_systems/1_three_zones/Run.jl")
```
-Csv files with the results of TDR are generated automatically in a folder called `TDR_results` found within the same folder containing the input csv files, in this case `Example_Systems_Tutorials/SmallNewEngland/OneZone`. The csv files in this folder show the files used in `Run.jl` that have been pared down from the initial input files.
+Csv files with the results of TDR are generated automatically in a folder called `TDR_results` found within the same folder containing the input csv files, in this case `example_systems/1_three_zones`. The csv files in this folder show the files used in `Run.jl` that have been pared down from the initial input files.
As an example, consider the input file `Fuels_data.csv`:
@@ -268,7 +271,7 @@ As you can see, the original has all 8,760 hours, while the TDR version only has
loads_TDR = CSV.read(joinpath(case,"TDR_Results/Demand_data.csv"),DataFrame,missingstring="NA")
```
-The 1,848 hours are divided into 11 sections of 168 hours, with each section representing one week of the original data. The number of hours per representative period is set in `time_domain_reduction_settings.yml`. Also specified in the file are the minimum and maximum number of clusters we would like to have (in this case 8 and 11). The k-means algorithm will then select the number of clusters that should be sufficient to capture the GenX model in fewer time steps (in this case 11).
+The 1,848 hours are divided into 11 sections of 168 hours, with each section representing one week of the original data. The number of hours per representative period is set in `time_domain_reduction_settings.yml`. Also specified in the file are the minimum and maximum number of clusters we would like to have (in this case 8 and 11). The _k_-means algorithm will then select the number of clusters that should be sufficient to capture the GenX model in fewer time steps (in this case 11).
```julia
@@ -286,7 +289,9 @@ Below, we create arrays out of the representative weeks and plot them on the sam
```julia
Period_map = CSV.read(joinpath(case,"TDR_Results/Period_map.csv"),DataFrame,missingstring="NA")
```
-
+``` @raw html
+
52×3 DataFrame
27 rows omitted
Row
Period_Index
Rep_Period
Rep_Period_Index
Int64
Int64
Int64
1
1
4
1
2
2
4
1
3
3
4
1
4
4
4
1
5
5
8
2
6
6
8
2
7
7
8
2
8
8
8
2
9
9
8
2
10
10
12
3
11
11
12
3
12
12
12
3
13
13
12
3
⋮
⋮
⋮
⋮
41
41
20
5
42
42
20
5
43
43
23
6
44
44
17
4
45
45
48
10
46
46
48
10
47
47
48
10
48
48
48
10
49
49
49
11
50
50
49
11
51
51
8
2
52
52
8
2
+```
```julia
# Find array of unique representative periods
@@ -317,7 +322,7 @@ loads_with_TDR |>
color={"Week:n", scale={scheme="paired"},sort="decsending"}, title="MW Load per hour with TDR Representative Weeks",
width=845,height=400)
```
-![svg](./files/output_58_0.svg)
+![svg](./files/t3_TDR_demand.svg)
TDR is performed for four total data sets: demand (found in Demand.csv), wind and solar (found in Generators_variability.csv), and fuel prices (found in Fuels.csv). Above is just the demand load for one of the three total nodes in the example system, which is why the data may not appear to "represent" all 52 weeks (notice there are fewer representative periods in the fall). Instead, the periods more accurately represent all the data time series combined, including some other parts of the data not seen in this particular plot.
@@ -384,7 +389,7 @@ myscheme = ["#a6cee3","#a6cee3","#1f78b4","#b2df8a","#33a02c","#fb9a99","#e31a1c
width=845,height=300)
```
-![svg](./files/output_65_0.svg)
+![svg](./files/t3_ext_periods.svg)
The first plot (with Extreme Periods off) may not have the week with the highest peak highlighted. If the week with the highest demand is highlighted, try re-running the cell with Extreme Periods Off plotting the results.
@@ -396,7 +401,7 @@ YAML.write_file(joinpath(case,"settings/time_domain_reduction_settings.yml"), ti
rm(joinpath(case,"TDR_results"), recursive=true)
```
-#### Reconstruction
+### Reconstruction
Below is a plot of a reconstruction of the data using only the weeks isolated as representative periods. This is what GenX reads when it runs the solver with TDR on.
@@ -443,6 +448,8 @@ G2 = Plots.plot(recon[!,:hour], recon[!,:MW], linewidth=1.7,
Plots.plot(G1,G2,layout=(2,1))
```
+![svg](./files/t3_recon.svg)
+
Each color represents one of the representative weeks.
The range of 8-11 representative periods was chosen by the developers because it was deemed to be the smallest set that still matches the optimal value of the data well. The next section of this Tutorial goes over how the optimal values of the data change as the number of representative periods changes.
@@ -617,6 +624,8 @@ scatter!(twinx(),obj_val_plot[:,1],times,color=:red,markeralpha=.5,label=:"Time"
ygrid!(:on, :dashdot, 0.1)
```
+![svg](./files/t3_obj_val.svg)
+
Here, we can see that while having very few representative periods produces an objective value that differs greatly from the orignal, once we reach around 12 representative periods the difference begins to taper out. Therefore, the original choice of 11 maximum periods in `1_three_zones` decreases the run time of GenX significantly while while maintaining an objective value close to the original.
diff --git a/docs/src/Tutorials/Tutorial_4_model_generation.md b/docs/src/Tutorials/Tutorial_4_model_generation.md
index 116b76fe2d..39031fb630 100644
--- a/docs/src/Tutorials/Tutorial_4_model_generation.md
+++ b/docs/src/Tutorials/Tutorial_4_model_generation.md
@@ -13,9 +13,7 @@ We'll start by explaining JuMP, the optimization package that GenX uses to gener
* [Run generate_model](#Run)
-```@raw html
-
-```
+![png](./files/jump_logo.png)
JuMP is a modeling language for Julia. It allows users to create models for optimization problems, define variables and constraints, and apply a variety of solvers for the model.
@@ -122,16 +120,30 @@ When `Run.jl` is called, the model for GenX is constructed in a similar way, but
The basic structure of the way `Run.jl` generates and solves the model is as follows:
-```@raw html
-
-```
+![png](./files/LatexHierarchy.png)
The function `run_genx_case(case)` takes the "case" as its input. The case is all of the input files and settings found in the same folder as `Run.jl`. For example, in `example_systems/1_three_zones`, the case is:
-```@raw html
-
+```julia
+cd(readdir,"example_systems/1_three_zones")
```
+
+
+
+ 9-element Vector{String}:
+ ".DS_Store"
+ "README.md"
+ "Run.jl"
+ "TDR_results"
+ "policies"
+ "resources"
+ "results"
+ "settings"
+ "system"
+
+
+
`Run_genx_case` defines the __setup__, which are the settings in `genx_settings.yml`. From there, either `run_genx_case_simple(case, mysetup)` or`run_genx_case_multistage(case, mysetup)` is called. Both of these define the __inputs__ and __optimizer__. The optimizer is the solver as specified in `genx_settings.yml`, and the inputs are a variety of parameters specified by the settings and csv files found in the folder. Both of these functions then call `generate_model(mysetup, myinputs, OPTIMIZER)`, which is the main subject of this tutorial.
As in the above example, `generate_model` utilizes the JuMP functions `Model()`, `@expression`, `@variable`, and `@constraints` to form a model. This section goes through `generate_model` and explains how the expressions are formed to create the model.
@@ -260,7 +272,7 @@ typeof(OPTIMIZER)
MathOptInterface.OptimizerWithAttributes
```
-The "inputs" argument is generated by the function `load_inputs` from the case in `run_genx_case_simple` (or multistage). If TDR is set to 1 in the settings file, then `load_inputs` will draw some of the files from the `TDR_Results` folder. `TDR_Results` is produced when the case is run.
+The "inputs" argument is generated by the function `load_inputs` from the case in `run_genx_case_simple` (or multistage). If TDR is set to 1 in the settings file, then `load_inputs` will draw some of the files from the `TDR_results` folder. `TDR_results` is produced when the case is run.
```julia
diff --git a/docs/src/Tutorials/Tutorial_7_setup.md b/docs/src/Tutorials/Tutorial_7_setup.md
new file mode 100644
index 0000000000..0d93604c83
--- /dev/null
+++ b/docs/src/Tutorials/Tutorial_7_setup.md
@@ -0,0 +1,2872 @@
+# Tutorial 7: Policy Constraints
+
+[Interactive Notebook of the tutorial](https://github.com/GenXProject/GenX-Tutorials/blob/main/Tutorials/Tutorial_7_Setup.ipynb)
+
+As show in previous tutorials, the settings file can be changed to adapt to a number of different systems. In Tutorial 3, we discussed how the setting Time Domain Reduction can reduce the computation time of the solver. Other settings, however, directly impact the values of the solution itself. This tutorial goes over the policy attributes in the settings and shows how implementing certain policies affects the optimal solution in GenX. To demonstrate these effects, we will be using `example_systems/1_three_zones`.
+
+## Table of Contents
+* [No Policy Constraints](#NoPolicies)
+* [CO2 Cap](#CO2Cap)
+ * [Mass Cap](#MassCap)
+ * [Tolerance](#Tolerance)
+ * [CO2 Slack](#CO2Slack)
+ * [Load-based Cap](#LoadCap)
+ * [Generator-based Cap](#GenerateCap)
+* [Energy Share Requirement](#ESR)
+* [Capacity Reserve Margin](#CRM)
+* [Minimum Capacity Requirement](#MCR)
+* [All Together](#All)
+
+
+## No Policy Constraints
+
+
+```julia
+using JuMP
+using HiGHS
+using GenX
+using CSV
+using DataFrames
+using Plots
+using StatsPlots
+```
+
+
+```julia
+case = joinpath("example_systems/1_three_zones")
+
+genx_settings = GenX.get_settings_path(case, "genx_settings.yml");
+writeoutput_settings = GenX.get_settings_path(case, "output_settings.yml")
+setup = GenX.configure_settings(genx_settings,writeoutput_settings)
+```
+
+ Configuring Settings
+
+
+
+
+
+ Dict{Any, Any} with 33 entries:
+ "HydrogenHourlyMatching" => 0
+ "NetworkExpansion" => 1
+ "TimeDomainReductionFolder" => "TDR_results"
+ "WriteOutputs" => "full"
+ "SystemFolder" => "system"
+ "EnableJuMPStringNames" => 1
+ "Trans_Loss_Segments" => 1
+ "ModelingtoGenerateAlternativeSlack" => 0.1
+ "PoliciesFolder" => "policies"
+ "MultiStage" => 0
+ "ComputeConflicts" => 1
+ "OverwriteResults" => 0
+ "ModelingToGenerateAlternatives" => 0
+ "MaxCapReq" => 0
+ "MinCapReq" => 1
+ "CO2Cap" => 2
+ "WriteShadowPrices" => 1
+ "OperationalReserves" => 0
+ "ParameterScale" => 1
+ "EnergyShareRequirement" => 0
+ "PrintModel" => 0
+ "TimeDomainReduction" => 1
+ "DC_OPF" => 0
+ "CapacityReserveMargin" => 0
+ "MethodofMorris" => 0
+ ⋮ => ⋮
+
+
+
+The settings we'll focus on here are , `CO2Cap`, `EnergyShareRequirement`, `CapacityReserveMargin`, and `MinCapReq`. Each of these ensures that the environmental impact of the model is taken into account, and are therefore referred to as __policy settings__ . For more information on what each one does, see the documentation on [Emission mitigation policies].
+
+
+```julia
+println("MaxCapReq: ", setup["MaxCapReq"])
+println("MinCapReq: ", setup["MinCapReq"])
+println("CO2Cap: ", setup["CO2Cap"])
+println("EnergyShareRequirement: ", setup["EnergyShareRequirement"])
+println("CapacityReserveMargin: ", setup["CapacityReserveMargin"])
+```
+
+ MaxCapReq: 0
+ MinCapReq: 1
+ CO2Cap: 2
+ EnergyShareRequirement: 0
+ CapacityReserveMargin: 0
+
+
+`1_three_zones` uses `MinCapReq` and `CO2Cap`. For the purpose of this tutorial, we're going to set these back to zero to start.
+
+
+```julia
+setup["MinCapReq"] = 0
+setup["CO2Cap"] = 0;
+```
+
+Now, we'll generate and solve the model using these results:
+
+
+```julia
+## Delte Previous TDR Results
+if "TDR_results" in cd(readdir,case)
+ rm(joinpath(case,"TDR_results"), recursive=true)
+end
+
+### Create TDR_Results
+TDRpath = joinpath(case, setup["TimeDomainReductionFolder"])
+system_path = joinpath(case, setup["SystemFolder"])
+settings_path = GenX.get_settings_path(case)
+
+if setup["TimeDomainReduction"] == 1
+ GenX.prevent_doubled_timedomainreduction(system_path)
+ if !GenX.time_domain_reduced_files_exist(TDRpath)
+ println("Clustering Time Series Data (Grouped)...")
+ GenX.cluster_inputs(case, settings_path, setup)
+ else
+ println("Time Series Data Already Clustered.")
+ end
+end
+
+```
+
+ Clustering Time Series Data (Grouped)...
+ Reading Input CSV Files
+ Network.csv Successfully Read!
+ Demand (load) data Successfully Read!
+ Fuels_data.csv Successfully Read!
+
+
+ Thermal.csv Successfully Read.
+ Vre.csv Successfully Read.
+ Storage.csv Successfully Read.
+ Resource_energy_share_requirement.csv Successfully Read.
+ Resource_capacity_reserve_margin.csv Successfully Read.
+ Resource_minimum_capacity_requirement.csv Successfully Read.
+
+
+ Summary of resources loaded into the model:
+ -------------------------------------------------------
+ Resource type Number of resources
+ =======================================================
+ Thermal 3
+ VRE 4
+ Storage 3
+ =======================================================
+ Total number of resources: 10
+ -------------------------------------------------------
+ Generators_variability.csv Successfully Read!
+ Validating time basis
+ CSV Files Successfully Read In From example_systems/1_three_zones
+ Error: Geography Key 1 is invalid. Select `System' or `Zone'.
+
+
+
+
+
+ Dict{String, Any} with 9 entries:
+ "RMSE" => Dict("ME_NG"=>0.210014, "ME_onshore_wind_z3"=>0.310986, "D…
+ "OutputDF" => 1848×19 DataFrame
+ "ColToZoneMap" => Dict("Demand_MW_z3"=>3, "CT_battery_z2"=>2, "MA_natural_ga…
+ "ClusterObject" => KmeansResult{Matrix{Float64}, Float64, Int64}([-0.734116 2…
+ "TDRsetup" => Dict{Any, Any}("IterativelyAddPeriods"=>1, "ExtremePeriods…
+ "Assignments" => [1, 1, 1, 1, 2, 2, 2, 2, 2, 3 … 6, 9, 3, 10, 10, 10, 11,…
+ "InputDF" => 1680×52 DataFrame
+ "Weights" => [673.846, 1179.23, 842.308, 673.846, 1010.77, 1347.69, 134…
+ "Centers" => Any[4, 8, 12, 15, 20, 23, 27, 30, 44, 48, 49]
+
+
+
+
+```julia
+OPTIMIZER = GenX.configure_solver(settings_path,HiGHS.Optimizer);
+inputs = GenX.load_inputs(setup, case)
+```
+
+ Reading Input CSV Files
+ Network.csv Successfully Read!
+ Demand (load) data Successfully Read!
+ Fuels_data.csv Successfully Read!
+
+ Summary of resources loaded into the model:
+ -------------------------------------------------------
+ Resource type Number of resources
+ =======================================================
+ Thermal 3
+ VRE 4
+ Storage 3
+ =======================================================
+ Total number of resources: 10
+ -------------------------------------------------------
+ Generators_variability.csv Successfully Read!
+ Validating time basis
+ CSV Files Successfully Read In From example_systems/1_three_zones
+
+
+ Thermal.csv Successfully Read.
+ Vre.csv Successfully Read.
+ Storage.csv Successfully Read.
+ Resource_energy_share_requirement.csv Successfully Read.
+ Resource_capacity_reserve_margin.csv Successfully Read.
+ Resource_minimum_capacity_requirement.csv Successfully Read.
+
+
+
+
+ Dict{Any, Any} with 67 entries:
+ "Z" => 3
+ "LOSS_LINES" => [1, 2]
+ "STOR_HYDRO_SHORT_DURATION" => Int64[]
+ "RET_CAP_CHARGE" => Set{Int64}()
+ "pC_D_Curtail" => [50.0, 45.0, 27.5, 10.0]
+ "pTrans_Max_Possible" => [5.9, 4.0]
+ "pNet_Map" => [1.0 -1.0 0.0; 1.0 0.0 -1.0]
+ "omega" => 4.01099, 4.01099, 4.01099, 4.01099, 4.01099, …
+ "pMax_Line_Reinforcement" => [2.95, 2.0]
+ "RET_CAP_ENERGY" => Int64[]
+ "RESOURCES" => AbstractResource
+ "COMMIT" => [1, 2, 3]
+ "pMax_D_Curtail" => [1.0, 0.04, 0.024, 0.003]
+ "STOR_ALL" => [8, 9, 10]
+ "THERM_ALL" => [1, 2, 3]
+ "REP_PERIOD" => 11
+ "PWFU_Num_Segments" => 0
+ "STOR_LONG_DURATION" => Int64[]
+ "THERM_COMMIT_PWFU" => Int64[]
+ "STOR_SYMMETRIC" => [8, 9, 10]
+ "VRE" => [4, 5, 6, 7]
+ "RETRO" => Int64[]
+ "THERM_COMMIT" => [1, 2, 3]
+ "TRANS_LOSS_SEGS" => 1
+ "H" => 168
+ ⋮ => ⋮
+
+
+
+
+```julia
+EP = GenX.generate_model(setup,inputs,OPTIMIZER)
+```
+
+ Discharge Module
+ Non-served Energy Module
+ Investment Discharge Module
+ Unit Commitment Module
+ Fuel Module
+ CO2 Module
+ Investment Transmission Module
+ Transmission Module
+ Dispatchable Resources Module
+ Storage Resources Module
+ Storage Investment Module
+ Storage Core Resources Module
+ Storage Resources with Symmetric Charge/Discharge Capacity Module
+ Thermal (Unit Commitment) Resources Module
+
+
+
+
+
+ A JuMP Model
+ Minimization problem with:
+ Variables: 120136
+ Objective function type: AffExpr
+ `AffExpr`-in-`MathOptInterface.EqualTo{Float64}`: 35112 constraints
+ `AffExpr`-in-`MathOptInterface.GreaterThan{Float64}`: 20331 constraints
+ `AffExpr`-in-`MathOptInterface.LessThan{Float64}`: 97949 constraints
+ `VariableRef`-in-`MathOptInterface.EqualTo{Float64}`: 1 constraint
+ `VariableRef`-in-`MathOptInterface.GreaterThan{Float64}`: 116439 constraints
+ Model mode: AUTOMATIC
+ CachingOptimizer state: EMPTY_OPTIMIZER
+ Solver name: HiGHS
+ Names registered in the model: FuelCalculationCommit_single, cFuelCalculation_single, cMaxCap, cMaxCapEnergy, cMaxCapEnergyDuration, cMaxFlow_in, cMaxFlow_out, cMaxLineReinforcement, cMaxNSE, cMaxRetCommit, cMaxRetEnergy, cMaxRetNoCommit, cMinCap, cMinCapEnergy, cMinCapEnergyDuration, cNSEPerSeg, cPowerBalance, cSoCBalInterior, cSoCBalStart, cStartFuel_single, cTAuxLimit, cTAuxSum, cTLoss, eAvail_Trans_Cap, eCFix, eCFixEnergy, eCFuelOut, eCFuelStart, eCNSE, eCStart, eCVar_in, eCVar_out, eELOSS, eELOSSByZone, eEmissionsByPlant, eEmissionsByZone, eExistingCap, eExistingCapEnergy, eFuelConsumption, eFuelConsumptionYear, eFuelConsumption_single, eGenerationByThermAll, eGenerationByVRE, eGenerationByZone, eLosses_By_Zone, eNet_Export_Flows, eObj, ePlantCFuelOut, ePlantCFuelStart, ePlantFuel_generation, ePlantFuel_start, ePowerBalance, ePowerBalanceDisp, ePowerBalanceLossesByZone, ePowerBalanceNetExportFlows, ePowerBalanceNse, ePowerBalanceStor, ePowerBalanceThermCommit, eStartFuel, eTotalCFix, eTotalCFixEnergy, eTotalCFuelOut, eTotalCFuelStart, eTotalCNSE, eTotalCNSET, eTotalCNSETS, eTotalCNetworkExp, eTotalCStart, eTotalCStartT, eTotalCVarIn, eTotalCVarInT, eTotalCVarOut, eTotalCVarOutT, eTotalCap, eTotalCapEnergy, eTransMax, eZonalCFuelOut, eZonalCFuelStart, vCAP, vCAPENERGY, vCHARGE, vCOMMIT, vFLOW, vFuel, vNEW_TRANS_CAP, vNSE, vP, vRETCAP, vRETCAPENERGY, vS, vSHUT, vSTART, vStartFuel, vTAUX_NEG, vTAUX_POS, vTLOSS, vZERO
+
+
+
+
+```julia
+GenX.solve_model(EP,setup)
+```
+
+ Running HiGHS 1.6.0: Copyright (c) 2023 HiGHS under MIT licence terms
+ Presolving model
+ 118155 rows, 81204 cols, 422835 nonzeros
+ 110998 rows, 74047 cols, 423349 nonzeros
+ Presolve : Reductions: rows 110998(-42394); columns 74047(-46089); elements 423349(-47782)
+ Solving the presolved LP
+ IPX model has 110998 rows, 74047 columns and 423349 nonzeros
+ Input
+ Number of variables: 74047
+ Number of free variables: 3696
+ Number of constraints: 110998
+ Number of equality constraints: 16867
+ Number of matrix entries: 423349
+ Matrix range: [4e-07, 1e+01]
+ RHS range: [7e-01, 2e+01]
+ Objective range: [1e-04, 4e+02]
+ Bounds range: [2e-03, 2e+01]
+ Preprocessing
+ Dualized model: no
+ Number of dense columns: 15
+ Range of scaling factors: [5.00e-01, 1.00e+00]
+ IPX version 1.0
+ Interior Point Solve
+ Iter P.res D.res P.obj D.obj mu Time
+ 0 8.62e+00 3.81e+02 3.30336414e+06 -5.31617580e+06 3.30e+03 0s
+ 1 4.09e+00 1.06e+02 2.34353411e+05 -5.13796175e+06 1.43e+03 0s
+ 2 3.78e+00 7.03e+01 1.87013341e+05 -1.15199236e+07 1.34e+03 0s
+ 3 1.33e+00 4.12e+01 -3.76464137e+05 -1.37088411e+07 7.85e+02 1s
+ Constructing starting basis...
+ 4 4.13e-01 1.08e+01 2.66640168e+05 -8.48314805e+06 2.43e+02 3s
+ 5 1.12e-01 5.62e+00 3.71879810e+05 -5.58576107e+06 1.28e+02 4s
+ 6 7.53e-03 1.62e+00 2.30531116e+05 -1.92720962e+06 3.67e+01 5s
+ 7 9.27e-04 1.77e-01 1.30486918e+05 -3.83901614e+05 5.66e+00 6s
+ 8 1.14e-04 4.38e-02 5.27259057e+04 -1.00386376e+05 1.48e+00 7s
+ 9 1.52e-05 6.88e-03 2.76584248e+04 -2.19746140e+04 3.52e-01 8s
+ 10 5.20e-06 2.59e-03 1.39025442e+04 -7.44814138e+03 1.35e-01 8s
+ 11 2.36e-06 1.09e-03 1.02345396e+04 -1.80403130e+03 7.13e-02 10s
+ 12 1.03e-06 3.59e-04 7.72508848e+03 1.35005473e+03 3.59e-02 12s
+ 13 6.67e-07 1.53e-04 6.83171406e+03 2.57204744e+03 2.35e-02 15s
+ 14 5.06e-07 7.64e-05 6.41494456e+03 3.13597410e+03 1.79e-02 18s
+ 15 3.52e-07 4.64e-05 5.95636098e+03 3.44861286e+03 1.36e-02 21s
+ 16 1.69e-07 2.49e-05 5.33436713e+03 3.75261594e+03 8.58e-03 24s
+ 17 1.38e-07 2.05e-05 5.23488752e+03 3.81660239e+03 7.68e-03 26s
+ 18 1.21e-07 1.95e-05 5.23765885e+03 3.83998603e+03 7.57e-03 28s
+ 19 7.35e-08 1.60e-05 5.05272742e+03 3.91685812e+03 6.15e-03 30s
+ 20 6.04e-08 1.49e-05 5.02221768e+03 3.93278446e+03 5.90e-03 33s
+ 21 2.85e-08 1.13e-05 4.88298181e+03 4.01774654e+03 4.68e-03 35s
+ 22 2.29e-08 6.29e-06 4.83470832e+03 4.00260600e+03 4.48e-03 37s
+ 23 1.31e-08 3.63e-06 4.72358228e+03 4.16712858e+03 3.00e-03 38s
+ 24 9.10e-09 2.96e-06 4.70648899e+03 4.19085995e+03 2.77e-03 40s
+ 25 4.45e-09 1.89e-06 4.63836996e+03 4.26416898e+03 2.01e-03 41s
+ 26 3.49e-09 1.84e-06 4.63722238e+03 4.26628187e+03 1.99e-03 42s
+ 27 2.15e-09 1.23e-06 4.59679916e+03 4.32342389e+03 1.47e-03 43s
+ 28 1.87e-09 1.07e-06 4.59259771e+03 4.33476451e+03 1.39e-03 46s
+ 29 1.58e-09 1.05e-06 4.59058841e+03 4.33567604e+03 1.37e-03 48s
+ 30 1.18e-09 8.27e-07 4.58014487e+03 4.35414040e+03 1.21e-03 49s
+ 31 7.95e-10 4.22e-07 4.56632970e+03 4.39355672e+03 9.27e-04 51s
+ 32 4.34e-10 3.79e-07 4.55792749e+03 4.39835388e+03 8.56e-04 51s
+ 33 2.37e-10 2.16e-07 4.54724964e+03 4.42097386e+03 6.77e-04 52s
+ 34 2.10e-10 2.06e-07 4.54686537e+03 4.42339252e+03 6.62e-04 53s
+ 35 7.73e-11 1.26e-07 4.53250796e+03 4.44139125e+03 4.89e-04 54s
+ 36 1.55e-11 1.06e-07 4.52064756e+03 4.44667475e+03 3.97e-04 55s
+ 37 1.50e-11 1.03e-07 4.52062467e+03 4.44749533e+03 3.92e-04 55s
+ 38 9.79e-12 1.00e-07 4.52199331e+03 4.44806728e+03 3.96e-04 56s
+ 39 8.92e-12 6.93e-08 4.52041735e+03 4.46101031e+03 3.19e-04 56s
+ 40 7.30e-12 5.17e-08 4.52001352e+03 4.46367984e+03 3.02e-04 57s
+ 41 4.70e-12 3.05e-08 4.51328477e+03 4.47536516e+03 2.03e-04 57s
+ 42 3.83e-12 2.70e-08 4.51240381e+03 4.47626904e+03 1.94e-04 58s
+ 43 2.85e-12 2.63e-08 4.51136761e+03 4.47647929e+03 1.87e-04 59s
+ 44 2.10e-12 1.31e-08 4.50946086e+03 4.48203628e+03 1.47e-04 59s
+ 45 1.31e-12 4.15e-09 4.50665353e+03 4.48821996e+03 9.88e-05 60s
+ 46 9.09e-13 3.31e-09 4.50579733e+03 4.48871202e+03 9.15e-05 61s
+ 47 9.09e-13 3.26e-09 4.50576718e+03 4.48852664e+03 9.24e-05 61s
+ 48 7.53e-13 2.25e-09 4.50502226e+03 4.49018328e+03 7.95e-05 62s
+ 49 7.53e-13 2.21e-09 4.50492035e+03 4.48999997e+03 7.99e-05 62s
+ 50 5.83e-13 1.52e-09 4.50350728e+03 4.49199895e+03 6.17e-05 63s
+ 51 1.99e-13 1.32e-09 4.50092854e+03 4.49240806e+03 4.57e-05 63s
+ 52 1.78e-13 1.23e-09 4.50080001e+03 4.49253227e+03 4.43e-05 64s
+ 53 9.24e-14 7.11e-10 4.49993325e+03 4.49367022e+03 3.36e-05 64s
+ 54 7.11e-14 5.59e-10 4.49966942e+03 4.49408670e+03 2.99e-05 65s
+ 55 7.11e-14 5.54e-10 4.49963721e+03 4.49407202e+03 2.98e-05 65s
+ 56 7.11e-14 5.25e-10 4.49964358e+03 4.49413082e+03 2.95e-05 66s
+ 57 4.97e-14 3.86e-10 4.49901737e+03 4.49483758e+03 2.24e-05 66s
+ 58 4.97e-14 3.77e-10 4.49900976e+03 4.49485417e+03 2.23e-05 66s
+ 59 4.97e-14 3.61e-10 4.49894867e+03 4.49494477e+03 2.15e-05 67s
+ 60 2.84e-14 2.16e-10 4.49842279e+03 4.49567956e+03 1.47e-05 67s
+ 61 2.84e-14 1.25e-10 4.49813093e+03 4.49622420e+03 1.02e-05 67s
+ 62 2.13e-14 9.05e-11 4.49785493e+03 4.49641576e+03 7.71e-06 68s
+ 63 2.13e-14 2.46e-11 4.49758346e+03 4.49698685e+03 3.20e-06 68s
+ 64 2.13e-14 1.77e-11 4.49754878e+03 4.49704995e+03 2.67e-06 68s
+ 65 2.13e-14 3.18e-12 4.49747123e+03 4.49722789e+03 1.30e-06 69s
+ 66 2.13e-14 1.36e-12 4.49740334e+03 4.49723907e+03 8.80e-07 69s
+ 67 2.13e-14 1.36e-12 4.49734663e+03 4.49727135e+03 4.03e-07 70s
+ 68 2.13e-14 9.66e-13 4.49734376e+03 4.49727731e+03 3.56e-07 70s
+ 69 2.13e-14 5.68e-13 4.49732633e+03 4.49728530e+03 2.20e-07 70s
+ 70 2.13e-14 4.55e-13 4.49731802e+03 4.49730841e+03 5.15e-08 71s
+ 71 2.13e-14 4.83e-13 4.49731683e+03 4.49730985e+03 3.74e-08 71s
+ 72 2.13e-14 1.92e-12 4.49731281e+03 4.49731141e+03 7.53e-09 72s
+ 73 2.13e-14 5.35e-12 4.49731227e+03 4.49731210e+03 9.13e-10 72s
+ 74* 2.13e-14 2.73e-12 4.49731219e+03 4.49731218e+03 7.21e-11 72s
+ 75* 2.84e-14 7.65e-12 4.49731219e+03 4.49731218e+03 4.94e-12 73s
+ 76* 3.55e-14 4.81e-12 4.49731219e+03 4.49731219e+03 4.09e-13 73s
+ 77* 3.55e-14 7.19e-12 4.49731219e+03 4.49731219e+03 2.31e-15 73s
+ Running crossover as requested
+ Primal residual before push phase: 1.40e-09
+ Dual residual before push phase: 5.05e-10
+ Number of dual pushes required: 41031
+ Number of primal pushes required: 722
+ Summary
+ Runtime: 73.32s
+ Status interior point solve: optimal
+ Status crossover: optimal
+ objective value: 4.49731219e+03
+ interior solution primal residual (abs/rel): 5.48e-11 / 3.35e-12
+ interior solution dual residual (abs/rel): 7.19e-12 / 1.79e-14
+ interior solution objective gap (abs/rel): -5.22e-10 / -1.16e-13
+ basic solution primal infeasibility: 2.78e-17
+ basic solution dual infeasibility: 5.41e-16
+ Ipx: IPM optimal
+ Ipx: Crossover optimal
+ Solving the original LP from the solution after postsolve
+ Model status : Optimal
+ IPM iterations: 77
+ Crossover iterations: 4712
+ Objective value : 4.4973121850e+03
+ HiGHS run time : 73.55
+ LP solved for primal
+
+
+
+
+
+ (A JuMP Model
+ Minimization problem with:
+ Variables: 120136
+ Objective function type: AffExpr
+ `AffExpr`-in-`MathOptInterface.EqualTo{Float64}`: 35112 constraints
+ `AffExpr`-in-`MathOptInterface.GreaterThan{Float64}`: 20331 constraints
+ `AffExpr`-in-`MathOptInterface.LessThan{Float64}`: 97949 constraints
+ `VariableRef`-in-`MathOptInterface.EqualTo{Float64}`: 1 constraint
+ `VariableRef`-in-`MathOptInterface.GreaterThan{Float64}`: 116439 constraints
+ Model mode: AUTOMATIC
+ CachingOptimizer state: ATTACHED_OPTIMIZER
+ Solver name: HiGHS
+ Names registered in the model: FuelCalculationCommit_single, cFuelCalculation_single, cMaxCap, cMaxCapEnergy, cMaxCapEnergyDuration, cMaxFlow_in, cMaxFlow_out, cMaxLineReinforcement, cMaxNSE, cMaxRetCommit, cMaxRetEnergy, cMaxRetNoCommit, cMinCap, cMinCapEnergy, cMinCapEnergyDuration, cNSEPerSeg, cPowerBalance, cSoCBalInterior, cSoCBalStart, cStartFuel_single, cTAuxLimit, cTAuxSum, cTLoss, eAvail_Trans_Cap, eCFix, eCFixEnergy, eCFuelOut, eCFuelStart, eCNSE, eCStart, eCVar_in, eCVar_out, eELOSS, eELOSSByZone, eEmissionsByPlant, eEmissionsByZone, eExistingCap, eExistingCapEnergy, eFuelConsumption, eFuelConsumptionYear, eFuelConsumption_single, eGenerationByThermAll, eGenerationByVRE, eGenerationByZone, eLosses_By_Zone, eNet_Export_Flows, eObj, ePlantCFuelOut, ePlantCFuelStart, ePlantFuel_generation, ePlantFuel_start, ePowerBalance, ePowerBalanceDisp, ePowerBalanceLossesByZone, ePowerBalanceNetExportFlows, ePowerBalanceNse, ePowerBalanceStor, ePowerBalanceThermCommit, eStartFuel, eTotalCFix, eTotalCFixEnergy, eTotalCFuelOut, eTotalCFuelStart, eTotalCNSE, eTotalCNSET, eTotalCNSETS, eTotalCNetworkExp, eTotalCStart, eTotalCStartT, eTotalCVarIn, eTotalCVarInT, eTotalCVarOut, eTotalCVarOutT, eTotalCap, eTotalCapEnergy, eTransMax, eZonalCFuelOut, eZonalCFuelStart, vCAP, vCAPENERGY, vCHARGE, vCOMMIT, vFLOW, vFuel, vNEW_TRANS_CAP, vNSE, vP, vRETCAP, vRETCAPENERGY, vS, vSHUT, vSTART, vStartFuel, vTAUX_NEG, vTAUX_POS, vTLOSS, vZERO, 73.97517013549805)
+
+
+
+Using `value.()`, we can see what the total capacity is of the optimized model:
+
+
+```julia
+totCap_base = value.(EP[:eTotalCap])
+```
+
+
+
+
+ 10-element Vector{Float64}:
+ 10.41532872265646
+ 10.085613331810192
+ 0.0
+ 0.0
+ 0.0
+ 0.0
+ 2.026239619715743
+ 0.0
+ 0.0
+ 0.16552558225070782
+
+
+
+Each element corresponds to the MW value of the node in the grid. In `1_three_zones`, there are ten nodes, each of which are either natural gas, wind, solar, or battery plants. We can see which is which using `RESOURCE_NAMES` in the inputs dictionary:
+
+
+```julia
+RT = inputs["RESOURCE_NAMES"];
+DataFrame([RT totCap_base],["Resource","Total Capacity"])
+```
+
+
+
+```@raw html
+
+```
+
+
+
+```julia
+G2 = groupedbar(transpose(totCapB2), bar_position = :stack, bar_width=0.1,size=(100,450),
+ labels=["Natural Gas" "Solar" "Wind" "Battery"],legend = false,title="CO2 Mass Cap \n Obj Val: $(round(objective_value(EP2),digits=6))",
+xticks=[ ],ylabel="GW",color=colors)
+plot(G2,G1,size=(900,450),titlefontsize=8)
+
+```
+![svg](./files/t7_2p_mass_none.svg)
+
+
+
+The model favors solar power now, but natural gas and wind are also used. One thing to note is that the objective value of this system is much higher than it was without emissions constraints. The amount of CO$_2$ allowed is determined by the input file CO2_cap.csv:
+
+
+```julia
+CO2Cap = CSV.read(joinpath(case,"policies/CO2_cap.csv"),DataFrame,missingstring="NA")
+```
+
+
+
+```@raw html
+
3×11 DataFrame
Row
Column1
Network_zones
CO_2_Cap_Zone_1
CO_2_Cap_Zone_2
CO_2_Cap_Zone_3
CO_2_Max_tons_MWh_1
CO_2_Max_tons_MWh_2
CO_2_Max_tons_MWh_3
CO_2_Max_Mtons_1
CO_2_Max_Mtons_2
CO_2_Max_Mtons_3
String3
String3
Int64
Int64
Int64
Float64
Float64
Float64
Float64
Float64
Float64
1
MA
z1
1
0
0
0.05
0.0
0.0
0.018
0.0
0.0
2
CT
z2
0
1
0
0.0
0.05
0.0
0.0
0.025
0.0
3
ME
z3
0
0
1
0.0
0.0
0.05
0.0
0.0
0.025
+```
+
+
+#### Tolerance
+
+Let's try setting the CO$_2$ emissions tolerance to 0 for all nodes:
+
+
+```julia
+CO2Cap2 = copy(CO2Cap); # Save old tolerances
+```
+
+
+```julia
+CO2Cap2[!,"CO_2_Max_tons_MWh_1"] = [0.0;0.0;0.0];
+CO2Cap2[!,"CO_2_Max_tons_MWh_2"] = [0.0;0.0;0.0];
+CO2Cap2[!,"CO_2_Max_tons_MWh_3"] = [0.0;0.0;0.0];
+CO2Cap2[!,"CO_2_Max_Mtons_1"] = [0.0;0.0;0.0];
+CO2Cap2[!,"CO_2_Max_Mtons_2"] = [0.0;0.0;0.0];
+CO2Cap2[!,"CO_2_Max_Mtons_3"] = [0.0;0.0;0.0];
+```
+
+
+```julia
+CSV.write(joinpath(case,"policies/CO2_cap.csv"),CO2Cap2)
+```
+
+
+
+
+ "example_systems/1_three_zones/policies/CO2_cap.csv"
+
+
+
+
+```julia
+inputs = GenX.load_inputs(setup, case)
+EP3 = GenX.generate_model(setup,inputs,OPTIMIZER)
+GenX.solve_model(EP3,setup)
+```
+
+ Reading Input CSV Files
+ Network.csv Successfully Read!
+ Demand (load) data Successfully Read!
+ Fuels_data.csv Successfully Read!
+
+ Summary of resources loaded into the model:
+ -------------------------------------------------------
+ Resource type Number of resources
+ =======================================================
+ Thermal 3
+ VRE 4
+ Storage 3
+ =======================================================
+ Total number of resources: 10
+ -------------------------------------------------------
+ Generators_variability.csv Successfully Read!
+ Validating time basis
+ CO2_cap.csv Successfully Read!
+ CSV Files Successfully Read In From example_systems/1_three_zones
+ Discharge Module
+
+
+ Thermal.csv Successfully Read.
+ Vre.csv Successfully Read.
+ Storage.csv Successfully Read.
+ Resource_energy_share_requirement.csv Successfully Read.
+ Resource_capacity_reserve_margin.csv Successfully Read.
+ Resource_minimum_capacity_requirement.csv Successfully Read.
+
+ Non-served Energy Module
+ Investment Discharge Module
+ Unit Commitment Module
+ Fuel Module
+ CO2 Module
+ Investment Transmission Module
+ Transmission Module
+ Dispatchable Resources Module
+ Storage Resources Module
+ Storage Investment Module
+ Storage Core Resources Module
+ Storage Resources with Symmetric Charge/Discharge Capacity Module
+ Thermal (Unit Commitment) Resources Module
+ CO2 Policies Module
+ Running HiGHS 1.6.0: Copyright (c) 2023 HiGHS under MIT licence terms
+ Presolving model
+ 62715 rows, 59025 cols, 206619 nonzeros
+ 55750 rows, 52060 cols, 206345 nonzeros
+ Presolve : Reductions: rows 55750(-97645); columns 52060(-68079); elements 206345(-275877)
+ Solving the presolved LP
+ IPX model has 55750 rows, 52060 columns and 206345 nonzeros
+ Input
+ Number of variables: 52060
+ Number of free variables: 3696
+ Number of constraints: 55750
+ Number of equality constraints: 11515
+ Number of matrix entries: 206345
+ Matrix range: [4e-07, 1e+01]
+ RHS range: [7e-01, 2e+01]
+ Objective range: [1e-04, 4e+02]
+ Bounds range: [2e-03, 2e+01]
+ Preprocessing
+ Dualized model: no
+ Number of dense columns: 12
+ Range of scaling factors: [5.00e-01, 1.00e+00]
+ IPX version 1.0
+ Interior Point Solve
+ Iter P.res D.res P.obj D.obj mu Time
+ 0 8.66e+00 3.74e+02 3.32626622e+06 -5.13181178e+06 3.28e+03 0s
+ 1 4.14e+00 1.16e+02 8.23203603e+05 -4.36728593e+06 1.47e+03 0s
+ 2 3.82e+00 7.84e+01 7.99936610e+05 -9.74711636e+06 1.47e+03 0s
+ 3 2.29e+00 4.73e+01 6.28103564e+05 -1.28672691e+07 1.11e+03 0s
+ Constructing starting basis...
+ 4 3.38e-01 1.80e+01 1.09797314e+06 -8.08041855e+06 2.88e+02 1s
+ 5 1.83e-01 6.66e+00 7.53792907e+05 -4.07570830e+06 1.33e+02 2s
+ 6 8.56e-02 2.91e+00 4.53047274e+05 -2.12656299e+06 6.27e+01 2s
+ 7 4.32e-02 1.06e+00 2.95761273e+05 -9.82158558e+05 2.74e+01 3s
+ 8 2.45e-02 4.65e-01 2.07805891e+05 -5.33311956e+05 1.42e+01 3s
+ 9 1.41e-02 2.62e-01 1.50001050e+05 -3.47096559e+05 8.53e+00 3s
+ 10 1.09e-02 2.01e-01 1.34054733e+05 -3.01418233e+05 7.08e+00 3s
+ 11 3.02e-03 1.70e-01 9.84009565e+04 -2.78142992e+05 5.84e+00 4s
+ 12 6.80e-04 1.29e-01 8.22624601e+04 -2.40603504e+05 4.88e+00 4s
+ 13 5.19e-04 8.27e-02 7.53145221e+04 -1.68983726e+05 3.45e+00 4s
+ 14 2.11e-04 5.30e-02 6.28457097e+04 -1.25280563e+05 2.49e+00 4s
+ 15 8.42e-05 2.34e-02 5.29276919e+04 -7.04624346e+04 1.44e+00 5s
+ 16 4.88e-05 1.67e-02 4.48734610e+04 -4.78632217e+04 1.05e+00 5s
+ 17 3.17e-05 1.36e-02 4.18106416e+04 -3.79473404e+04 8.85e-01 5s
+ 18 1.54e-05 7.88e-03 3.47578913e+04 -1.29683476e+04 5.12e-01 6s
+ 19 5.88e-06 3.32e-03 3.06596888e+04 3.42643300e+03 2.74e-01 6s
+ 20 3.16e-06 1.13e-03 2.91816414e+04 1.11818829e+04 1.69e-01 6s
+ 21 2.01e-06 7.37e-04 2.80927086e+04 1.35130431e+04 1.35e-01 7s
+ 22 1.72e-06 6.41e-04 2.77751797e+04 1.42624417e+04 1.24e-01 7s
+ 23 1.64e-06 5.49e-04 2.77386064e+04 1.47199954e+04 1.19e-01 7s
+ 24 7.12e-07 4.66e-04 2.64331519e+04 1.56194992e+04 9.88e-02 8s
+ 25 2.77e-07 2.78e-04 2.60769883e+04 1.74334827e+04 7.80e-02 8s
+ 26 2.04e-07 1.45e-04 2.59822889e+04 1.87097084e+04 6.48e-02 8s
+ 27 7.83e-08 8.86e-05 2.50321932e+04 2.02590181e+04 4.24e-02 9s
+ 28 6.65e-08 7.45e-05 2.50172059e+04 2.04898905e+04 4.01e-02 9s
+ 29 5.27e-08 5.73e-05 2.49264089e+04 2.09172601e+04 3.55e-02 9s
+ 30 3.63e-08 4.90e-05 2.48125703e+04 2.11310315e+04 3.25e-02 10s
+ 31 2.24e-08 2.37e-05 2.46270269e+04 2.20176436e+04 2.29e-02 10s
+ 32 1.61e-08 1.80e-05 2.44783423e+04 2.23689799e+04 1.85e-02 10s
+ 33 1.48e-08 1.67e-05 2.44673758e+04 2.24042775e+04 1.81e-02 11s
+ 34 1.34e-08 1.10e-05 2.44514558e+04 2.26133491e+04 1.61e-02 11s
+ 35 5.16e-09 8.66e-06 2.42796001e+04 2.27599671e+04 1.33e-02 11s
+ 36 3.11e-09 3.47e-06 2.42120824e+04 2.31931323e+04 8.91e-03 11s
+ 37 5.68e-14 2.68e-06 2.40481768e+04 2.32862443e+04 6.66e-03 12s
+ 38 5.68e-14 1.15e-06 2.39818085e+04 2.36142391e+04 3.21e-03 12s
+ 39 5.68e-14 1.13e-06 2.39814115e+04 2.36145091e+04 3.21e-03 12s
+ 40 5.68e-14 9.70e-07 2.39782559e+04 2.36292988e+04 3.05e-03 13s
+ 41 6.39e-14 9.03e-07 2.39740539e+04 2.36364177e+04 2.95e-03 13s
+ 42 8.53e-14 8.87e-07 2.39736934e+04 2.36389906e+04 2.92e-03 13s
+ 43 8.53e-14 4.51e-07 2.39668070e+04 2.37015134e+04 2.32e-03 14s
+ 44 5.68e-14 3.63e-07 2.39530038e+04 2.37286287e+04 1.96e-03 14s
+ 45 6.39e-14 2.62e-07 2.39399533e+04 2.37645087e+04 1.53e-03 15s
+ 46 7.11e-14 2.35e-07 2.39403195e+04 2.37681261e+04 1.50e-03 15s
+ 47 5.68e-14 1.07e-07 2.39354909e+04 2.38220443e+04 9.89e-04 15s
+ 48 5.83e-14 8.32e-08 2.39319570e+04 2.38298216e+04 8.91e-04 16s
+ 49 6.34e-14 8.23e-08 2.39320866e+04 2.38307615e+04 8.84e-04 16s
+ 50 5.68e-14 2.73e-08 2.39222759e+04 2.38676200e+04 4.76e-04 16s
+ 51 5.68e-14 1.81e-08 2.39090601e+04 2.38790981e+04 2.61e-04 17s
+ 52 5.68e-14 2.13e-09 2.39087205e+04 2.38918872e+04 1.47e-04 17s
+ 53 5.68e-14 6.69e-10 2.39073686e+04 2.38988570e+04 7.42e-05 17s
+ 54 5.68e-14 1.71e-10 2.39057985e+04 2.39011287e+04 4.07e-05 17s
+ 55 5.68e-14 4.37e-11 2.39047907e+04 2.39026178e+04 1.89e-05 18s
+ 56 5.68e-14 2.50e-11 2.39045148e+04 2.39028929e+04 1.41e-05 18s
+ 57 5.68e-14 1.27e-11 2.39040896e+04 2.39032417e+04 7.39e-06 18s
+ 58 5.68e-14 2.27e-12 2.39039185e+04 2.39036010e+04 2.77e-06 18s
+ 59 5.68e-14 4.55e-13 2.39038268e+04 2.39036756e+04 1.32e-06 18s
+ 60 5.68e-14 9.09e-13 2.39037810e+04 2.39037207e+04 5.26e-07 18s
+ 61 5.68e-14 2.27e-13 2.39037547e+04 2.39037335e+04 1.85e-07 19s
+ 62 5.68e-14 2.27e-13 2.39037481e+04 2.39037351e+04 1.13e-07 19s
+ 63 5.68e-14 4.55e-13 2.39037407e+04 2.39037385e+04 1.93e-08 19s
+ 64 5.68e-14 1.14e-13 2.39037395e+04 2.39037392e+04 2.44e-09 19s
+ 65* 5.68e-14 2.27e-13 2.39037393e+04 2.39037393e+04 1.93e-10 19s
+ 66* 8.53e-14 2.27e-13 2.39037393e+04 2.39037393e+04 1.22e-11 19s
+ 67* 5.68e-14 9.09e-13 2.39037393e+04 2.39037393e+04 1.62e-12 19s
+ Running crossover as requested
+ Primal residual before push phase: 7.05e-08
+ Dual residual before push phase: 9.91e-07
+ Number of dual pushes required: 2532
+ Number of primal pushes required: 3866
+ Summary
+ Runtime: 19.36s
+ Status interior point solve: optimal
+ Status crossover: optimal
+ objective value: 2.39037393e+04
+ interior solution primal residual (abs/rel): 2.26e-10 / 1.38e-11
+ interior solution dual residual (abs/rel): 2.64e-09 / 6.57e-12
+ interior solution objective gap (abs/rel): 2.04e-07 / 8.51e-12
+ basic solution primal infeasibility: 4.44e-15
+ basic solution dual infeasibility: 2.08e-15
+ Ipx: IPM optimal
+ Ipx: Crossover optimal
+ Solving the original LP from the solution after postsolve
+ Model status : Optimal
+ IPM iterations: 67
+ Crossover iterations: 1428
+ Objective value : 2.3903739324e+04
+ HiGHS run time : 19.55
+ LP solved for primal
+
+
+
+
+
+ (A JuMP Model
+ Minimization problem with:
+ Variables: 120139
+ Objective function type: AffExpr
+ `AffExpr`-in-`MathOptInterface.EqualTo{Float64}`: 35112 constraints
+ `AffExpr`-in-`MathOptInterface.GreaterThan{Float64}`: 20331 constraints
+ `AffExpr`-in-`MathOptInterface.LessThan{Float64}`: 97952 constraints
+ `VariableRef`-in-`MathOptInterface.EqualTo{Float64}`: 4 constraints
+ `VariableRef`-in-`MathOptInterface.GreaterThan{Float64}`: 116439 constraints
+ Model mode: AUTOMATIC
+ CachingOptimizer state: ATTACHED_OPTIMIZER
+ Solver name: HiGHS
+ Names registered in the model: FuelCalculationCommit_single, cCO2Emissions_systemwide, cFuelCalculation_single, cMaxCap, cMaxCapEnergy, cMaxCapEnergyDuration, cMaxFlow_in, cMaxFlow_out, cMaxLineReinforcement, cMaxNSE, cMaxRetCommit, cMaxRetEnergy, cMaxRetNoCommit, cMinCap, cMinCapEnergy, cMinCapEnergyDuration, cNSEPerSeg, cPowerBalance, cSoCBalInterior, cSoCBalStart, cStartFuel_single, cTAuxLimit, cTAuxSum, cTLoss, eAvail_Trans_Cap, eCFix, eCFixEnergy, eCFuelOut, eCFuelStart, eCNSE, eCStart, eCVar_in, eCVar_out, eELOSS, eELOSSByZone, eEmissionsByPlant, eEmissionsByZone, eExistingCap, eExistingCapEnergy, eFuelConsumption, eFuelConsumptionYear, eFuelConsumption_single, eGenerationByThermAll, eGenerationByVRE, eGenerationByZone, eLosses_By_Zone, eNet_Export_Flows, eObj, ePlantCFuelOut, ePlantCFuelStart, ePlantFuel_generation, ePlantFuel_start, ePowerBalance, ePowerBalanceDisp, ePowerBalanceLossesByZone, ePowerBalanceNetExportFlows, ePowerBalanceNse, ePowerBalanceStor, ePowerBalanceThermCommit, eStartFuel, eTotalCFix, eTotalCFixEnergy, eTotalCFuelOut, eTotalCFuelStart, eTotalCNSE, eTotalCNSET, eTotalCNSETS, eTotalCNetworkExp, eTotalCStart, eTotalCStartT, eTotalCVarIn, eTotalCVarInT, eTotalCVarOut, eTotalCVarOutT, eTotalCap, eTotalCapEnergy, eTransMax, eZonalCFuelOut, eZonalCFuelStart, vCAP, vCAPENERGY, vCHARGE, vCO2Cap_slack, vCOMMIT, vFLOW, vFuel, vNEW_TRANS_CAP, vNSE, vP, vRETCAP, vRETCAPENERGY, vS, vSHUT, vSTART, vStartFuel, vTAUX_NEG, vTAUX_POS, vTLOSS, vZERO, 19.870718002319336)
+
+
+
+
+```julia
+totCap3 = value.(EP3[:eTotalCap])
+
+totCapB3 = [totCap3[1] + totCap3[2] + totCap3[3], totCap3[4] + totCap3[6],
+ totCap3[5] + totCap3[7], totCap3[8] + totCap3[9] + totCap3[10]]
+
+println(DataFrame([RT totCap3],["Resource Type","Total Capacity"]))
+println(" ")
+
+println("Objective Value: ", objective_value(EP3))
+
+G3 = groupedbar(transpose(totCapB3), bar_position = :stack, bar_width=0.1,size=(400,450), xticks=[ ],ylabel="GW",
+ labels=["Natural Gas" "Solar" "Wind" "Battery"],color=colors,
+ title="CO2 Mass Cap, Zero Tolerance \n Obj Val: $(round(objective_value(EP3),digits=6))")
+
+plot(G3,G2,size=(800,450),titlefontsize=8)
+```
+
+ 10×2 DataFrame
+ Row│Resource Type Total Capacity
+ │Any Any
+ ─────┼───────────────────────────────────────────────
+ 1 │ MA_natural_gas_combined_cycle 0.0
+ 2 │ CT_natural_gas_combined_cycle 0.0
+ 3 │ ME_natural_gas_combined_cycle 0.0
+ 4 │ MA_solar_pv 44.2331
+ 5 │ CT_onshore_wind 0.0
+ 6 │ CT_solar_pv 71.8741
+ 7 │ ME_onshore_wind 5.55301
+ 8 │ MA_battery 15.1583
+ 9 │ CT_battery 30.3461
+ 10 │ ME_battery 2.16509
+
+ Objective Value: 23903.739324217397
+
+
+![svg](./files/t7_2p_mass_zero.svg)
+
+
+As you can see, the use of natural gas has been eliminated compeltely. Note that the objective value increases here as well as renewable energy tends to cost more than natural gas.
+
+#### CO2 Slack
+
+Another thing we can do is, instead of demanding that the model 100% meet the CO$_2$ cap, we can add a penalty for if it violates the cap. This lets the system allow some CO$_2$ emmissions if it's determined the cost of the grid with some emmissions is low enough that it will offset the cost from the penalty variable. GenX will automatically incorporate this feature if a file by the name "CO2_cap_slack.csv" is in the policies folder of the directory. For more information on other types of policy slack variables in GenX, see the documentation on [Policy Slack Variables].
+
+Here, the CO$_2$ slack cap models a [carbon tax](https://en.wikipedia.org/wiki/Carbon_tax#:~:text=A%20carbon%20tax%20is%20a,like%20more%20severe%20weather%20events.of) of \$250 per ton of emissions.
+
+
+```julia
+CO2Cap_slack = DataFrame(["CO_2_Cap_Zone_1" 250; "CO_2_Cap_Zone_2" 250; "CO_2_Cap_Zone_2" 250],["CO2_Cap_Constraint","PriceCap"])
+```
+
+
+
+```@raw html
+
+```
+
+
+Below is a visualization of the production over the first 168 hours, with the load demand curve from all three zones plotted on top:
+
+
+```julia
+# Pre-processing
+tstart = 3
+tend = 170
+names_power = ["Solar","Natural_Gas","Battery","Wind"]
+
+power_tot = DataFrame([power[!,5]+power[!,7] power[!,2]+power[!,3]+power[!,4] power[!,9]+power[!,10]+power[!,11] power[!,6]+power[!,8]],
+ ["Solar","Natural_Gas","Battery","Wind"])
+
+power_plot = DataFrame([collect(1:length(power_tot[tstart:tend,1])) power_tot[tstart:tend,1] repeat([names_power[1]],length(power_tot[tstart:tend,1]))],
+ ["Hour","MW", "Resource_Type"]);
+
+for i in range(2,4)
+ power_plot_temp = DataFrame([collect(1:length(power_tot[tstart:tend,i])) power_tot[tstart:tend,i] repeat([names_power[i]],length(power_tot[tstart:tend,i]))],["Hour","MW", "Resource_Type"])
+ power_plot = [power_plot; power_plot_temp]
+end
+
+loads = CSV.read(joinpath(case,"system/Demand_data.csv"),DataFrame,missingstring="NA")
+loads_tot = loads[!,"Demand_MW_z1"]+loads[!,"Demand_MW_z2"]+loads[!,"Demand_MW_z3"]
+power_plot[!,"Demand_Total"] = repeat(loads_tot[tstart:tend],4);
+```
+
+
+```julia
+power_plot |>
+@vlplot()+
+@vlplot(mark={:area},
+ x={:Hour,title="Time Step (hours)",labels="Resource_Type:n",axis={values=0:12:168}}, y={:MW,title="Load (MW)",type="quantitative"},
+ color={"Resource_Type:n",scale={scheme="accent"},sort="descending"},order={field="Resource_Type:n"},width=845,height=400)+
+@vlplot(mark=:line,x=:Hour,y=:Demand_Total,lables="Demand",color={datum="Demand",legend={title=nothing}},title="Resource Capacity per Hour with Load Demand Curve, all Zones")
+```
+
+![svg](./files/t8_cap.svg)
+
+
+We can separate it by zone in the following plot:
+
+
+```julia
+Zone1 = [power[2,2] power[2,5] 0 power[2,9]]
+Zone2 = [power[2,3] power[2,7] power[2,6] power[2,10]]
+Zone3 = [power[2,4] 0 power[2,8] power[2,11]]
+
+colors=[:silver :yellow :deepskyblue :violetred3]
+
+groupedbar(["Zone 1", "Zone 2", "Zone 3"],[Zone1; Zone2; Zone3], bar_position = :stack, bar_width=0.5,size=(400,450),
+ labels=["Natural Gas" "Solar" "Wind" "Battery"],
+ title="Resource Allocation in MW Per Zone",ylabel="MW",color=colors, titlefontsize=10)
+```
+
+![svg](./files/t8_resource_allocation.svg)
+
+Below is a heatmap for the natural gas plant in Massachusetts. It is normalized by the end capacity in `capcity.csv`. To change which plant the heat map plots, change the DataFrame column in `power` when defining `power_cap` below, and the corresponding capacity.
+
+
+```julia
+capacity = CSV.read(joinpath(case,"results/capacity.csv"),DataFrame,missingstring="NA")
+Period_map = CSV.read(joinpath(case,"TDR_results/Period_map.csv"),DataFrame,missingstring="NA")
+
+# Take the EndCap and power of MA_natural_gas_combined_cycle
+cap = capacity[1,"EndCap"]
+power_cap = power[3:end,"MA_natural_gas_combined_cycle"]/cap;
+
+# Reconstruction of all hours of the year from TDR
+recon = []
+for i in range(1,52)
+ index = Period_map[i,"Rep_Period_Index"]
+ recon_temp = power_cap[(168*index-167):(168*index)]
+ recon = [recon; recon_temp]
+end
+
+# Convert to matrix format
+heat = recon[1:24]
+for i in range(1,364)
+ heat = [heat recon[(i*24-23):(i*24)]]
+end
+
+```
+
+
+```julia
+Plots.heatmap(heat,yticks=0:4:24,xticks=([15:30:364;],
+ ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sept","Oct","Nov","Dec"]),
+ size=(900,200),c=:lajolla)
+```
+
+![svg](./files/t8_heatmap.svg)
+
+
+
+### Cost and Revenue
+
+The basic cost of each power plant and the revenue it generates can be found in files `costs.csv`, `NetRevenue.csv`,and `EnergyRevenue.csv`. `NetRevenue.csv` breaks down each specific cost per node in each zone, which is useful to visualize what the cost is coming from.
+
+
+```julia
+netrevenue = CSV.read(joinpath(case,"results/NetRevenue.csv"),DataFrame,missingstring="NA")
+```
+
+
+
+``` @raw html
+
10×28 DataFrame
Row
region
Resource
zone
Cluster
R_ID
Inv_cost_MW
Inv_cost_MWh
Inv_cost_charge_MW
Fixed_OM_cost_MW
Fixed_OM_cost_MWh
Fixed_OM_cost_charge_MW
Var_OM_cost_out
Fuel_cost
Var_OM_cost_in
StartCost
Charge_cost
CO2SequestrationCost
EnergyRevenue
SubsidyRevenue
OperatingReserveRevenue
OperatingRegulationRevenue
ReserveMarginRevenue
ESRRevenue
EmissionsCost
RegSubsidyRevenue
Revenue
Cost
Profit
String3
String31
Int64
Int64
Int64
Float64
Float64
Float64
Float64
Float64
Float64
Float64
Float64
Float64
Float64
Float64
Float64
Float64
Float64
Float64
Float64
Float64
Float64
Float64
Float64
Float64
Float64
Float64
1
MA
MA_natural_gas_combined_cycle
1
1
1
5.54734e8
0.0
0.0
8.72561e7
0.0
0.0
3.69253e7
2.10416e8
0.0
3.84832e7
0.0
0.0
2.77103e9
0.0
0.0
0.0
0.0
0.0
1.84321e9
0.0
2.77103e9
2.77103e9
1.43051e-6
2
CT
CT_natural_gas_combined_cycle
2
1
2
1.42906e8
0.0
0.0
2.11911e7
0.0
0.0
1.22258e7
4.97792e7
0.0
7.75292e6
0.0
0.0
8.4423e8
0.0
0.0
0.0
0.0
0.0
6.10375e8
0.0
8.4423e8
8.4423e8
1.19209e-7
3
ME
ME_natural_gas_combined_cycle
3
1
3
3.52336e7
0.0
0.0
8.77661e6
0.0
0.0
4.02739e6
2.26505e7
0.0
3.33663e6
0.0
0.0
2.19267e8
0.0
0.0
0.0
0.0
0.0
1.45243e8
0.0
2.19267e8
2.19267e8
0.0
4
MA
MA_solar_pv
1
1
4
1.27007e9
0.0
0.0
2.79327e8
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
1.5494e9
0.0
0.0
0.0
0.0
0.0
0.0
0.0
1.5494e9
1.5494e9
-2.86102e-6
5
CT
CT_onshore_wind
2
1
5
1.40748e9
0.0
0.0
6.25617e8
0.0
0.0
2.90683e6
0.0
0.0
0.0
0.0
0.0
2.036e9
0.0
0.0
0.0
0.0
0.0
0.0
0.0
2.036e9
2.036e9
-5.00679e-6
6
CT
CT_solar_pv
2
1
6
1.35108e9
0.0
0.0
2.97142e8
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
1.64822e9
0.0
0.0
0.0
0.0
0.0
0.0
0.0
1.64822e9
1.64822e9
9.53674e-7
7
ME
ME_onshore_wind
3
1
7
1.03673e9
0.0
0.0
4.60821e8
0.0
0.0
2.625e6
0.0
0.0
0.0
0.0
0.0
1.50017e9
0.0
0.0
0.0
0.0
0.0
0.0
0.0
1.50017e9
1.50017e9
2.38419e-6
8
MA
MA_battery
1
0
8
4.29792e7
2.23673e8
0.0
1.07426e7
5.59033e7
0.0
7.59532e5
0.0
8.97367e5
0.0
1.3432e8
0.0
4.48833e8
0.0
0.0
0.0
0.0
0.0
0.0
0.0
4.48833e8
4.69275e8
-2.0442e7
9
CT
CT_battery
2
0
9
1.08405e8
5.73615e8
0.0
2.70957e7
1.43365e8
0.0
2.1875e6
0.0
2.58447e6
0.0
5.24177e8
0.0
1.31941e9
0.0
0.0
0.0
0.0
0.0
0.0
0.0
1.31941e9
1.38143e9
-6.20165e7
10
ME
ME_battery
3
0
10
3.58043e7
1.03994e8
0.0
8.94925e6
2.59915e7
0.0
7.35552e5
0.0
8.69036e5
0.0
3.81057e7
0.0
2.03732e8
0.0
0.0
0.0
0.0
0.0
0.0
0.0
2.03732e8
2.14449e8
-1.0717e7
+```
+
+
+
+```julia
+xnames = netrevenue[!,2]
+names1 = ["Investment cost" "Fixed OM cost" "Variable OM cost" "Fuel cost" "Start Cost" "Battery charge cost" "CO2 Sequestration Cost" "Revenue"]
+
+netrev = [netrevenue[!,6]+netrevenue[!,7]+netrevenue[!,8] netrevenue[!,9]+netrevenue[!,11]+netrevenue[!,11] netrevenue[!,12]+netrevenue[!,14] netrevenue[!,13] netrevenue[!,15] netrevenue[!,16] netrevenue[!,17]]
+
+groupedbar(xnames,netrev, bar_position = :stack, bar_width=0.9,size=(850,800),
+ labels=names1,title="Cost Allocation per Node with Revenue",xlabel="Node",ylabel="Cost (Dollars)",
+ titlefontsize=10,legend=:outerright,ylims=[0,maximum(netrevenue[!,"Revenue"])+1e8],xrotation = 90)
+StatsPlots.scatter!(xnames,netrevenue[!,"Revenue"],label="Revenue",color="black")
+
+```
+
+![svg](./files/t8_cost.svg)
+
+
+
+### Emissions
+
+The file `emmissions.csv` gives the total CO2 emmissions per zone for each hour GenX runs. The first three rows give the marginal CO2 abatement cost in $/ton CO2.
+
+
+```julia
+emm1 = CSV.read(joinpath(case,"results/emissions.csv"),DataFrame)
+```
+
+
+
+``` @raw html
+
1852×5 DataFrame
1827 rows omitted
Row
Zone
1
2
3
Total
String15
Float64
Float64
Float64
Float64
1
CO2_Price_1
444.921
0.0
0.0
0.0
2
CO2_Price_2
0.0
468.668
0.0
0.0
3
CO2_Price_3
0.0
0.0
240.86
0.0
4
AnnualSum
4.14279e6
1.30236e6
6.03017e5
6.04816e6
5
t1
0.0
0.0
0.0
0.0
6
t2
0.0
0.0
0.0
0.0
7
t3
0.0
0.0
0.0
0.0
8
t4
0.0
0.0
0.0
0.0
9
t5
0.0
0.0
0.0
0.0
10
t6
0.0
0.0
0.0
0.0
11
t7
0.0
0.0
0.0
0.0
12
t8
0.0
0.0
0.0
0.0
13
t9
0.0
0.0
0.0
0.0
⋮
⋮
⋮
⋮
⋮
⋮
1841
t1837
0.0
0.0
0.0
0.0
1842
t1838
0.0
0.0
0.0
0.0
1843
t1839
0.0
0.0
0.0
0.0
1844
t1840
0.0
0.0
0.0
0.0
1845
t1841
0.0
0.0
0.0
0.0
1846
t1842
0.0
0.0
0.0
0.0
1847
t1843
0.0
0.0
0.0
0.0
1848
t1844
0.0
0.0
0.0
0.0
1849
t1845
0.0
0.0
0.0
0.0
1850
t1846
0.0
0.0
0.0
0.0
1851
t1847
0.0
0.0
0.0
0.0
1852
t1848
0.0
0.0
0.0
0.0
+```
+
+
+
+```julia
+# Pre-processing
+tstart = 470
+tend = 1500
+names_emm = ["Zone 1","Zone 2","Zone 3"]
+
+emm_tot = DataFrame([emm1[3:end,2] emm1[3:end,3] emm1[3:end,4]],
+ ["Zone 1","Zone 2","Zone 3"])
+
+
+emm_plot = DataFrame([collect((tstart-3):(tend-3)) emm_tot[tstart:tend,1] repeat([names_emm[1]],(tend-tstart+1))],
+ ["Hour","MW","Zone"]);
+
+for i in range(2,3)
+ emm_plot_temp = DataFrame([collect((tstart-3):(tend-3)) emm_tot[tstart:tend,i] repeat([names_emm[i]],(tend-tstart+1))],["Hour","MW","Zone"])
+ emm_plot = [emm_plot; emm_plot_temp]
+end
+
+```
+
+
+```julia
+emm_plot |>
+@vlplot(mark={:line},
+ x={:Hour,title="Time Step (hours)",labels="Zone:n",axis={values=tstart:24:tend}}, y={:MW,title="Emmissions (Tons)",type="quantitative"},
+ color={"Zone:n"},width=845,height=400,title="Emmissions per Time Step by Zone")
+```
+
+![svg](./files/t8_emm1.svg)
+
+
+
+
+Let's try changing the CO2 cap, as in Tutorial 7, and plotting the resulting emmissions.
+
+
+```julia
+genx_settings_TZ = YAML.load(open((joinpath(case,"settings/genx_settings.yml"))))
+genx_settings_TZ["CO2Cap"] = 0
+YAML.write_file((joinpath(case,"settings/genx_settings.yml")), genx_settings_TZ)
+
+include("example_systems/1_three_zones/Run.jl")
+
+# run outside of notebook
+```
+
+ Configuring Settings
+ Time Series Data Already Clustered.
+ Configuring Solver
+ Loading Inputs
+ Reading Input CSV Files
+ Network.csv Successfully Read!
+ Demand (load) data Successfully Read!
+ Fuels_data.csv Successfully Read!
+
+ Summary of resources loaded into the model:
+ -------------------------------------------------------
+ Resource type Number of resources
+ =======================================================
+ Thermal 3
+ VRE 4
+ Storage 3
+ =======================================================
+ Total number of resources: 10
+ -------------------------------------------------------
+
+
+ Thermal.csv Successfully Read.
+ Vre.csv Successfully Read.
+ Storage.csv Successfully Read.
+ Resource_energy_share_requirement.csv Successfully Read.
+ Resource_capacity_reserve_margin.csv Successfully Read.
+ Resource_minimum_capacity_requirement.csv Successfully Read.
+
+
+ Generators_variability.csv Successfully Read!
+ Validating time basis
+ Minimum_capacity_requirement.csv Successfully Read!
+ CSV Files Successfully Read In From /Users/mayamutic/Desktop/GenX-Tutorials/Tutorials/example_systems/1_three_zones
+ Generating the Optimization Model
+ Discharge Module
+ Non-served Energy Module
+ Investment Discharge Module
+ Unit Commitment Module
+ Fuel Module
+ CO2 Module
+ Investment Transmission Module
+ Transmission Module
+ Dispatchable Resources Module
+ Storage Resources Module
+ Storage Investment Module
+ Storage Core Resources Module
+ Storage Resources with Symmetric Charge/Discharge Capacity Module
+ Thermal (Unit Commitment) Resources Module
+ Minimum Capacity Requirement Module
+ Time elapsed for model building is
+ 0.531860834
+ Solving Model
+ Running HiGHS 1.6.0: Copyright (c) 2023 HiGHS under MIT licence terms
+ Presolving model
+ 118035 rows, 81083 cols, 422475 nonzeros
+ 110878 rows, 73926 cols, 422989 nonzeros
+ Presolve : Reductions: rows 110878(-42517); columns 73926(-46210); elements 422989(-48026)
+ Solving the presolved LP
+ IPX model has 110878 rows, 73926 columns and 422989 nonzeros
+ Input
+ Number of variables: 73926
+ Number of free variables: 3696
+ Number of constraints: 110878
+ Number of equality constraints: 16867
+ Number of matrix entries: 422989
+ Matrix range: [4e-07, 1e+01]
+ RHS range: [8e-01, 2e+01]
+ Objective range: [1e-04, 7e+02]
+ Bounds range: [2e-03, 2e+01]
+ Preprocessing
+ Dualized model: no
+ Number of dense columns: 15
+ Range of scaling factors: [5.00e-01, 1.00e+00]
+ IPX version 1.0
+ Interior Point Solve
+ Iter P.res D.res P.obj D.obj mu Time
+ 0 2.34e+01 6.62e+02 3.28242911e+06 -1.30284671e+07 1.55e+04 0s
+ 1 1.39e+01 1.95e+02 -2.79051574e+06 -1.70869614e+07 8.32e+03 0s
+ 2 1.34e+01 1.41e+02 -2.86489620e+06 -3.99200815e+07 8.76e+03 0s
+ 3 4.75e+00 7.73e+01 -3.58904115e+06 -4.55608455e+07 4.46e+03 1s
+ Constructing starting basis...
+ 4 2.62e+00 2.77e+01 -1.46128616e+06 -3.92821768e+07 2.06e+03 3s
+ 5 2.29e+00 2.23e+01 -1.07522739e+06 -3.64123392e+07 1.79e+03 4s
+ 6 1.30e+00 6.60e+00 5.76572112e+04 -2.35071885e+07 8.03e+02 6s
+ 7 5.52e-02 1.21e+00 9.07716904e+05 -1.09217119e+07 1.39e+02 7s
+ 8 3.19e-03 1.35e-01 4.98206547e+05 -1.86042062e+06 2.08e+01 7s
+ 9 1.88e-04 3.20e-02 1.94049580e+05 -4.73698668e+05 5.30e+00 8s
+ 10 5.02e-05 7.56e-03 1.21122260e+05 -1.44306243e+05 1.78e+00 9s
+ 11 1.41e-05 1.14e-03 4.93526445e+04 -2.41004370e+04 4.23e-01 9s
+ 12 5.61e-06 1.68e-04 3.67745870e+04 -1.32012445e+04 2.72e-01 10s
+ 13 1.95e-06 1.01e-05 2.77016719e+04 -6.88123837e+03 1.86e-01 11s
+ 14 9.38e-07 4.53e-06 1.71337276e+04 -1.48902435e+03 1.00e-01 13s
+ 15 4.55e-07 2.12e-06 1.18334304e+04 1.03786061e+03 5.79e-02 14s
+ 16 2.04e-07 1.21e-06 9.18918668e+03 2.04003217e+03 3.84e-02 15s
+ 17 1.10e-07 6.34e-07 7.84163830e+03 3.03187846e+03 2.58e-02 17s
+ 18 5.85e-08 3.55e-07 7.07336591e+03 3.60947669e+03 1.86e-02 19s
+ 19 4.19e-08 1.93e-07 6.81537596e+03 4.04962353e+03 1.48e-02 22s
+ 20 2.17e-08 1.22e-07 6.38250114e+03 4.36184309e+03 1.08e-02 26s
+ 21 1.46e-08 8.65e-08 6.15373845e+03 4.59489784e+03 8.36e-03 28s
+ 22 1.45e-08 8.60e-08 6.21987475e+03 4.64840404e+03 8.43e-03 31s
+ 23 1.10e-08 6.52e-08 6.17121693e+03 4.72787295e+03 7.74e-03 33s
+ 24 8.82e-09 4.21e-08 6.08867860e+03 4.94663843e+03 6.13e-03 35s
+ 25 7.42e-09 1.59e-08 6.06378830e+03 5.01156108e+03 5.64e-03 37s
+ 26 7.08e-09 2.46e-09 6.05642307e+03 5.09371090e+03 5.16e-03 38s
+ 27 3.57e-09 1.59e-09 5.87880189e+03 5.21058424e+03 3.58e-03 40s
+ 28 1.95e-09 1.11e-09 5.81293790e+03 5.25218415e+03 3.01e-03 41s
+ 29 1.42e-09 7.21e-10 5.77482634e+03 5.32239130e+03 2.43e-03 43s
+ 30 1.35e-09 6.49e-10 5.77061907e+03 5.32860331e+03 2.37e-03 45s
+ 31 1.26e-09 5.90e-10 5.76739631e+03 5.33020034e+03 2.35e-03 46s
+ 32 1.08e-09 4.91e-10 5.75363003e+03 5.35203400e+03 2.15e-03 47s
+ 33 2.49e-14 4.26e-10 5.68794026e+03 5.36071156e+03 1.76e-03 47s
+ 34 2.13e-14 2.53e-10 5.66831172e+03 5.41753142e+03 1.35e-03 48s
+ 35 2.13e-14 1.06e-10 5.63886596e+03 5.49300645e+03 7.82e-04 49s
+ 36 2.13e-14 5.55e-11 5.61729546e+03 5.52199336e+03 5.11e-04 51s
+ 37 2.13e-14 2.59e-11 5.60778510e+03 5.54931828e+03 3.14e-04 52s
+ 38 2.13e-14 1.75e-11 5.60173021e+03 5.55566214e+03 2.47e-04 53s
+ 39 2.13e-14 1.18e-11 5.59813889e+03 5.56260835e+03 1.91e-04 54s
+ 40 2.13e-14 1.01e-11 5.59718690e+03 5.56442962e+03 1.76e-04 55s
+ 41 2.13e-14 1.00e-11 5.59698222e+03 5.56447950e+03 1.74e-04 55s
+ 42 2.13e-14 4.04e-12 5.59428165e+03 5.57215354e+03 1.19e-04 56s
+ 43 2.13e-14 2.50e-12 5.59133373e+03 5.57571709e+03 8.38e-05 56s
+ 44 2.13e-14 1.48e-12 5.59035970e+03 5.57874298e+03 6.23e-05 56s
+ 45 2.13e-14 1.22e-12 5.58936152e+03 5.57965257e+03 5.21e-05 57s
+ 46 2.13e-14 1.25e-12 5.58736745e+03 5.58061357e+03 3.62e-05 57s
+ 47 2.13e-14 5.68e-13 5.58697892e+03 5.58214126e+03 2.60e-05 57s
+ 48 2.13e-14 5.36e-13 5.58691900e+03 5.58233212e+03 2.46e-05 58s
+ 49 2.13e-14 3.73e-13 5.58656054e+03 5.58365417e+03 1.56e-05 58s
+ 50 2.13e-14 3.55e-13 5.58656104e+03 5.58367145e+03 1.55e-05 58s
+ 51 2.13e-14 2.31e-13 5.58641950e+03 5.58394090e+03 1.33e-05 59s
+ 52 2.13e-14 2.56e-13 5.58608647e+03 5.58430397e+03 9.56e-06 59s
+ 53 2.13e-14 1.43e-13 5.58604712e+03 5.58455329e+03 8.01e-06 59s
+ 54 2.13e-14 3.13e-13 5.58604145e+03 5.58455679e+03 7.96e-06 59s
+ 55 2.13e-14 1.99e-13 5.58598248e+03 5.58506295e+03 4.93e-06 60s
+ 56 2.13e-14 2.56e-13 5.58593821e+03 5.58507236e+03 4.64e-06 60s
+ 57 2.13e-14 1.99e-13 5.58578478e+03 5.58540690e+03 2.03e-06 60s
+ 58 2.84e-14 2.91e-13 5.58578450e+03 5.58540754e+03 2.02e-06 61s
+ 59 2.13e-14 2.56e-13 5.58572083e+03 5.58541744e+03 1.63e-06 61s
+ 60 2.84e-14 2.56e-13 5.58571491e+03 5.58541894e+03 1.59e-06 61s
+ 61 2.13e-14 1.63e-13 5.58565078e+03 5.58546281e+03 1.01e-06 61s
+ 62 2.13e-14 3.41e-13 5.58557843e+03 5.58548803e+03 4.85e-07 62s
+ 63 2.13e-14 3.98e-13 5.58557613e+03 5.58548563e+03 4.85e-07 62s
+ 64 2.13e-14 3.69e-13 5.58556537e+03 5.58552541e+03 2.14e-07 62s
+ 65 2.13e-14 3.13e-13 5.58556537e+03 5.58552559e+03 2.13e-07 62s
+ 66 2.13e-14 1.42e-13 5.58555314e+03 5.58553125e+03 1.17e-07 63s
+ 67 2.13e-14 1.74e-13 5.58555081e+03 5.58553284e+03 9.64e-08 63s
+ 68 2.13e-14 2.13e-13 5.58554989e+03 5.58553484e+03 8.07e-08 63s
+ 69 2.13e-14 5.68e-13 5.58554752e+03 5.58553671e+03 5.80e-08 63s
+ 70 2.13e-14 4.83e-13 5.58554607e+03 5.58553831e+03 4.16e-08 64s
+ 71 2.13e-14 2.13e-13 5.58554582e+03 5.58554198e+03 2.06e-08 64s
+ 72 2.13e-14 8.92e-13 5.58554574e+03 5.58554196e+03 2.03e-08 64s
+ 73 2.13e-14 1.09e-12 5.58554539e+03 5.58554200e+03 1.82e-08 64s
+ 74 2.13e-14 3.23e-12 5.58554405e+03 5.58554312e+03 4.99e-09 65s
+ 75 2.13e-14 5.31e-12 5.58554382e+03 5.58554334e+03 2.58e-09 65s
+ 76 2.13e-14 7.04e-12 5.58554366e+03 5.58554353e+03 7.22e-10 65s
+ 77* 2.84e-14 1.57e-12 5.58554362e+03 5.58554357e+03 2.79e-10 65s
+ 78* 3.55e-14 4.18e-12 5.58554360e+03 5.58554358e+03 1.16e-10 65s
+ 79* 3.55e-14 5.29e-12 5.58554360e+03 5.58554360e+03 2.36e-11 66s
+ 80* 3.55e-14 5.19e-12 5.58554360e+03 5.58554360e+03 3.40e-12 66s
+ 81* 3.55e-14 1.16e-11 5.58554360e+03 5.58554360e+03 3.27e-13 66s
+ 82* 3.55e-14 9.05e-12 5.58554360e+03 5.58554360e+03 2.97e-14 66s
+ Running crossover as requested
+ Primal residual before push phase: 9.82e-08
+ Dual residual before push phase: 1.24e-07
+ Number of dual pushes required: 18968
+ Number of primal pushes required: 2204
+ Summary
+ Runtime: 66.29s
+ Status interior point solve: optimal
+ Status crossover: optimal
+ objective value: 5.58554360e+03
+ interior solution primal residual (abs/rel): 1.51e-10 / 8.54e-12
+ interior solution dual residual (abs/rel): 8.46e-10 / 1.20e-12
+ interior solution objective gap (abs/rel): 2.29e-09 / 4.10e-13
+ basic solution primal infeasibility: 1.43e-14
+ basic solution dual infeasibility: 6.89e-16
+ Ipx: IPM optimal
+ Ipx: Crossover optimal
+ Solving the original LP from the solution after postsolve
+ Model status : Optimal
+ IPM iterations: 82
+ Crossover iterations: 1447
+ Objective value : 5.5855435982e+03
+ HiGHS run time : 66.51
+ LP solved for primal
+ Writing Output
+ Time elapsed for writing costs is
+ 0.099885792
+ Time elapsed for writing capacity is
+ 0.000646583
+ Time elapsed for writing power is
+ 0.021790625
+ Time elapsed for writing charge is
+ 0.0167645
+ Time elapsed for writing capacity factor is
+ 0.021259458
+ Time elapsed for writing storage is
+ 0.009532667
+ Time elapsed for writing curtailment is
+ 0.019054083
+ Time elapsed for writing nse is
+ 0.0452305
+ Time elapsed for writing power balance is
+ 0.053504209
+ Time elapsed for writing transmission flows is
+ 0.004709417
+ Time elapsed for writing transmission losses is
+ 0.013975458
+ Time elapsed for writing network expansion is
+ 0.000157
+ Time elapsed for writing emissions is
+ 0.050411042
+ Time elapsed for writing reliability is
+ 0.005842667
+ Time elapsed for writing storage duals is
+ 0.024307708
+ Time elapsed for writing commitment is
+ 0.006124458
+ Time elapsed for writing startup is
+ 0.012590917
+ Time elapsed for writing shutdown is
+ 0.012514292
+ Time elapsed for writing fuel consumption is
+ 0.054159667
+ Time elapsed for writing co2 is
+ 0.019371417
+ Time elapsed for writing price is
+ 0.005712875
+ Time elapsed for writing energy revenue is
+ 0.010585041
+ Time elapsed for writing charging cost is
+ 0.005354792
+ Time elapsed for writing subsidy is
+ 0.000396208
+ Time elapsed for writing time weights is
+ 0.000497875
+ Time elapsed for writing minimum capacity requirement is
+ 0.000146875
+ Time elapsed for writing net revenue is
+ 0.011134208
+ Wrote outputs to /Users/mayamutic/Desktop/GenX-Tutorials/Tutorials/example_systems/1_three_zones/results_1
+ Time elapsed for writing is
+ 0.530491792
+
+
+
+```julia
+emm2 = CSV.read(joinpath(case,"results_1/emissions.csv"),DataFrame)
+```
+
+
+
+``` @raw html
+
1849×5 DataFrame
1824 rows omitted
Row
Zone
1
2
3
Total
String15
Float64
Float64
Float64
Float64
1
AnnualSum
1.68155e7
1.41088e7
4310.21
3.09286e7
2
t1
997.169
0.0
0.0
997.169
3
t2
997.169
0.0
0.0
997.169
4
t3
997.169
0.0
0.0
997.169
5
t4
997.169
0.0
0.0
997.169
6
t5
997.169
0.0
0.0
997.169
7
t6
997.169
0.0
0.0
997.169
8
t7
997.169
0.0
0.0
997.169
9
t8
997.169
0.0
0.0
997.169
10
t9
997.169
0.0
0.0
997.169
11
t10
1471.46
0.0
0.0
1471.46
12
t11
997.169
0.0
0.0
997.169
13
t12
1115.81
0.0
0.0
1115.81
⋮
⋮
⋮
⋮
⋮
⋮
1838
t1837
2789.35
1012.99
0.0
3802.34
1839
t1838
2835.21
1012.99
0.0
3848.2
1840
t1839
2520.57
1012.99
0.0
3533.56
1841
t1840
1496.47
445.85
0.0
1942.32
1842
t1841
2571.26
1012.99
0.0
3584.25
1843
t1842
2835.21
1012.99
0.0
3848.2
1844
t1843
2835.21
1012.99
0.0
3848.2
1845
t1844
2625.42
960.184
0.0
3585.6
1846
t1845
2506.32
342.391
0.0
2848.71
1847
t1846
2277.59
342.391
0.0
2619.98
1848
t1847
1960.08
524.526
0.0
2484.6
1849
t1848
1566.77
342.391
0.0
1909.16
+```
+
+
+
+```julia
+# Pre-processing
+tstart = 470
+tend = 1500
+names_emm = ["Zone 1","Zone 2","Zone 3"]
+
+emm_tot2 = DataFrame([emm2[3:end,2] emm2[3:end,3] emm2[3:end,4]],
+ ["Zone 1","Zone 2","Zone 3"])
+
+
+emm_plot2 = DataFrame([collect((tstart-3):(tend-3)) emm_tot2[tstart:tend,1] repeat([names_emm[1]],(tend-tstart+1))],
+ ["Hour","MW","Zone"]);
+
+for i in range(2,3)
+ emm_plot_temp = DataFrame([collect((tstart-3):(tend-3)) emm_tot2[tstart:tend,i] repeat([names_emm[i]],(tend-tstart+1))],["Hour","MW","Zone"])
+ emm_plot2 = [emm_plot2; emm_plot_temp]
+end
+```
+
+
+```julia
+emm_plot2 |>
+@vlplot(mark={:line},
+ x={:Hour,title="Time Step (hours)",labels="Zone:n",axis={values=tstart:24:tend}}, y={:MW,title="Emmissions (Tons)",type="quantitative"},
+ color={"Zone:n"},width=845,height=400,title="Emmissions per Time Step by Zone")
+```
+
+![svg](./files/t8_emm2.svg)
+
+
+
+
+We can see how the emmissions, summed over all zones, compare in the following plot:
+
+
+```julia
+emm1sum = sum(eachcol(emm_tot));
+emm2sum = sum(eachcol(emm_tot2));
+
+Plots.plot(collect((tstart-3):(tend-3)),emm1sum[tstart:tend],size=(800,400),label="Load Based CO2 Cap",
+ xlabel="Time Step (Hours)",ylabel="Emmissions (Tons)",thickness_scaling = 1.1,linewidth = 1.5,
+ title="Emmisions per Time Step",xticks=tstart:72:tend)
+Plots.plot!(collect((tstart-3):(tend-3)),emm2sum[tstart:tend],label="No CO2 Cap",linewidth = 1.5)
+```
+![svg](./files/t8_emm_comp.svg)
+
+
+
+Finally, set the CO2 Cap back to 2:
+
+
+```julia
+genx_settings_TZ["CO2Cap"] = 2
+YAML.write_file((joinpath(case,"settings/genx_settings.yml")), genx_settings_TZ)
+```
+
+
+```julia
+
+```
diff --git a/docs/src/Tutorials/Tutorials_intro.md b/docs/src/Tutorials/Tutorials_intro.md
index 014e215b39..62e59083c9 100644
--- a/docs/src/Tutorials/Tutorials_intro.md
+++ b/docs/src/Tutorials/Tutorials_intro.md
@@ -10,5 +10,7 @@ Here is a list of the tutorials:
4. [Tutorial 4: Model Generation](@ref)
5. [Tutorial 5: Solving the Model](@ref)
6. [Tutorial 6: Solver Settings](@ref)
+7. [Tutorial 7: Policy Constraints](@ref)
+8. [Tutorial 8: Outputs](@ref)
diff --git a/docs/src/Tutorials/files/Julia.png b/docs/src/Tutorials/files/Julia.png
new file mode 100644
index 0000000000..e881b2fa4b
Binary files /dev/null and b/docs/src/Tutorials/files/Julia.png differ
diff --git a/docs/src/Tutorials/files/LatexHierarchy.png b/docs/src/Tutorials/files/LatexHierarchy.png
index 05981f99c0..9e7c7295f9 100644
Binary files a/docs/src/Tutorials/files/LatexHierarchy.png and b/docs/src/Tutorials/files/LatexHierarchy.png differ
diff --git a/docs/src/Tutorials/files/OneZoneCase.png b/docs/src/Tutorials/files/OneZoneCase.png
deleted file mode 100644
index f394e93853..0000000000
Binary files a/docs/src/Tutorials/files/OneZoneCase.png and /dev/null differ
diff --git a/docs/src/Tutorials/files/addGenX.png b/docs/src/Tutorials/files/addGenX.png
new file mode 100644
index 0000000000..e101cc2509
Binary files /dev/null and b/docs/src/Tutorials/files/addGenX.png differ
diff --git a/docs/src/Tutorials/files/addIJulia.png b/docs/src/Tutorials/files/addIJulia.png
new file mode 100644
index 0000000000..3967abf71e
Binary files /dev/null and b/docs/src/Tutorials/files/addIJulia.png differ
diff --git a/docs/src/Tutorials/files/default_settings.png b/docs/src/Tutorials/files/default_settings.png
index 254fedc8b6..db05272964 100644
Binary files a/docs/src/Tutorials/files/default_settings.png and b/docs/src/Tutorials/files/default_settings.png differ
diff --git a/docs/src/Tutorials/files/genx_settings_none.png b/docs/src/Tutorials/files/genx_settings_none.png
index 9626204fdb..8e1d47783f 100644
Binary files a/docs/src/Tutorials/files/genx_settings_none.png and b/docs/src/Tutorials/files/genx_settings_none.png differ
diff --git a/docs/src/Tutorials/files/genxsettings.png b/docs/src/Tutorials/files/genxsettings.png
index f78f91c41e..82d6913e86 100644
Binary files a/docs/src/Tutorials/files/genxsettings.png and b/docs/src/Tutorials/files/genxsettings.png differ
diff --git a/docs/src/Tutorials/files/highs_defaults.png b/docs/src/Tutorials/files/highs_defaults.png
index 11e5471172..f337cb2ef6 100644
Binary files a/docs/src/Tutorials/files/highs_defaults.png and b/docs/src/Tutorials/files/highs_defaults.png differ
diff --git a/docs/src/Tutorials/files/jump_logo.png b/docs/src/Tutorials/files/jump_logo.png
index 634a648513..f21aa5f951 100644
Binary files a/docs/src/Tutorials/files/jump_logo.png and b/docs/src/Tutorials/files/jump_logo.png differ
diff --git a/docs/src/Tutorials/files/jupyter_screen.png b/docs/src/Tutorials/files/jupyter_screen.png
new file mode 100644
index 0000000000..6f5980b462
Binary files /dev/null and b/docs/src/Tutorials/files/jupyter_screen.png differ
diff --git a/docs/src/Tutorials/files/new_england.png b/docs/src/Tutorials/files/new_england.png
index b3076941c7..22df814aea 100644
Binary files a/docs/src/Tutorials/files/new_england.png and b/docs/src/Tutorials/files/new_england.png differ
diff --git a/docs/src/Tutorials/files/opennotebook.png b/docs/src/Tutorials/files/opennotebook.png
new file mode 100644
index 0000000000..4d87a20dae
Binary files /dev/null and b/docs/src/Tutorials/files/opennotebook.png differ
diff --git a/docs/src/Tutorials/files/output_58_0.svg b/docs/src/Tutorials/files/output_58_0.svg
deleted file mode 100644
index 41748d9d21..0000000000
--- a/docs/src/Tutorials/files/output_58_0.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/docs/src/Tutorials/files/output_65_0.svg b/docs/src/Tutorials/files/output_65_0.svg
deleted file mode 100644
index c66dee379a..0000000000
--- a/docs/src/Tutorials/files/output_65_0.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/docs/src/Tutorials/files/runcase.png b/docs/src/Tutorials/files/runcase.png
index c7c80ed0ae..d92fe4068c 100644
Binary files a/docs/src/Tutorials/files/runcase.png and b/docs/src/Tutorials/files/runcase.png differ
diff --git a/docs/src/Tutorials/files/statenames.csv b/docs/src/Tutorials/files/statenames.csv
deleted file mode 100644
index ac48dd9087..0000000000
--- a/docs/src/Tutorials/files/statenames.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-
-Alabama Arkansas Arizona California Colorado Connecticut District of Columbia Delaware Florida Georgia Iowa Idaho Illinois Indiana Kansas Kentucky Louisiana Massachusetts Maryland Maine Michigan Minnesota Missouri Mississippi Montana North Carolina North Dakota Nebraska New Hampshire New Jersey New Mexico Nevada New York Ohio Oklahoma Oregon Pennsylvania Rhode Island South Carolina South Dakota Tennessee Texas Utah Virginia Vermont Washington Wisconsin West Virginia Wyoming
diff --git a/docs/src/Tutorials/files/states.csv b/docs/src/Tutorials/files/states.csv
deleted file mode 100644
index 6d499af7bf..0000000000
--- a/docs/src/Tutorials/files/states.csv
+++ /dev/null
@@ -1,50 +0,0 @@
-State Longitude Latitude
-Alabama -86.902298 32.318231
-Arkansas -91.831833 35.20105
-Arizona -111.093731 34.048928
-California -119.417932 36.778261
-Colorado -105.782067 39.550051
-Connecticut -73.087749 41.603221
-District of Columbia -77.033418 38.905985
-Delaware -75.52767 38.910832
-Florida -81.515754 27.664827
-Georgia -82.907123 32.157435
-Iowa -93.097702 41.78003
-Idaho -114.742041 44.068202
-Illinois -89.398528 40.622125
-Indiana -85.602364 40.551217
-Kansas -98.484246 39.011902
-Kentucky -84.270018 37.839333
-Louisiana -92.145024 31.244823
-Massachusetts -71.382437 42.407211
-Maryland -76.641271 39.045755
-Maine -69.445469 45.2573783
-Michigan -85.602364 44.314844
-Minnesota -94.6859 46.729553
-Missouri -91.831833 37.9642553
-Mississippi -89.398528 32.354668
-Montana -110.362566 46.879682
-North Carolina -79.0193 35.759573
-North Dakota -101.002012 47.551493
-Nebraska -99.901813 41.492537
-New Hampshire -71.572395 43.193852
-New Jersey -74.405661 40.058324
-New Mexico -105.032363 34.97273
-Nevada -116.419389 38.80261
-New York -74.217933 43.299428
-Ohio -82.907123 40.417287
-Oklahoma -97.092877 35.007752
-Oregon -120.554201 43.804133
-Pennsylvania -77.194525 41.203322
-Rhode Island -71.477429 41.580095
-South Carolina -81.163725 33.836081
-South Dakota -99.901813 43.969515
-Tennessee -86.580447 35.517491
-Texas -99.901813 31.968599
-Utah -111.093731 39.32098
-Virginia -78.656894 37.431573
-Vermont -72.577841 44.558803
-Washington -120.740139 47.751074
-Wisconsin -88.787868 43.78444
-West Virginia -80.454903 38.597626
-Wyoming -107.290284 43.075968
diff --git a/docs/src/Tutorials/files/t3_TDR_demand.svg b/docs/src/Tutorials/files/t3_TDR_demand.svg
new file mode 100644
index 0000000000..061d38acd0
--- /dev/null
+++ b/docs/src/Tutorials/files/t3_TDR_demand.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/src/Tutorials/files/output_14_0.svg b/docs/src/Tutorials/files/t3_demand.svg
similarity index 99%
rename from docs/src/Tutorials/files/output_14_0.svg
rename to docs/src/Tutorials/files/t3_demand.svg
index 155924914a..fa35647fba 100644
--- a/docs/src/Tutorials/files/output_14_0.svg
+++ b/docs/src/Tutorials/files/t3_demand.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/src/Tutorials/files/t3_ext_periods.svg b/docs/src/Tutorials/files/t3_ext_periods.svg
new file mode 100644
index 0000000000..6a81ac5765
--- /dev/null
+++ b/docs/src/Tutorials/files/t3_ext_periods.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/src/Tutorials/files/t3_kmeans.png b/docs/src/Tutorials/files/t3_kmeans.png
new file mode 100644
index 0000000000..6c4be18e35
Binary files /dev/null and b/docs/src/Tutorials/files/t3_kmeans.png differ
diff --git a/docs/src/Tutorials/files/t3_nokmeans.png b/docs/src/Tutorials/files/t3_nokmeans.png
new file mode 100644
index 0000000000..8d609d6be4
Binary files /dev/null and b/docs/src/Tutorials/files/t3_nokmeans.png differ
diff --git a/docs/src/Tutorials/files/t3_obj_vals.svg b/docs/src/Tutorials/files/t3_obj_vals.svg
new file mode 100644
index 0000000000..ebb2e84541
--- /dev/null
+++ b/docs/src/Tutorials/files/t3_obj_vals.svg
@@ -0,0 +1,82 @@
+
+
diff --git a/docs/src/Tutorials/files/t3_recon.svg b/docs/src/Tutorials/files/t3_recon.svg
new file mode 100644
index 0000000000..3293cdb9bc
--- /dev/null
+++ b/docs/src/Tutorials/files/t3_recon.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/src/Tutorials/files/t7_1p_none.svg b/docs/src/Tutorials/files/t7_1p_none.svg
new file mode 100644
index 0000000000..fbfcdd2077
--- /dev/null
+++ b/docs/src/Tutorials/files/t7_1p_none.svg
@@ -0,0 +1,54 @@
+
+
diff --git a/docs/src/Tutorials/files/t7_2p_load_mass.svg b/docs/src/Tutorials/files/t7_2p_load_mass.svg
new file mode 100644
index 0000000000..875d0bd8d2
--- /dev/null
+++ b/docs/src/Tutorials/files/t7_2p_load_mass.svg
@@ -0,0 +1,87 @@
+
+
diff --git a/docs/src/Tutorials/files/t7_2p_mass_none.svg b/docs/src/Tutorials/files/t7_2p_mass_none.svg
new file mode 100644
index 0000000000..e125229143
--- /dev/null
+++ b/docs/src/Tutorials/files/t7_2p_mass_none.svg
@@ -0,0 +1,83 @@
+
+
diff --git a/docs/src/Tutorials/files/t7_2p_mass_zero.svg b/docs/src/Tutorials/files/t7_2p_mass_zero.svg
new file mode 100644
index 0000000000..a15ad2af9d
--- /dev/null
+++ b/docs/src/Tutorials/files/t7_2p_mass_zero.svg
@@ -0,0 +1,83 @@
+
+
diff --git a/docs/src/Tutorials/files/t7_3p_csm_esr_mass.svg b/docs/src/Tutorials/files/t7_3p_csm_esr_mass.svg
new file mode 100644
index 0000000000..e4357ce94a
--- /dev/null
+++ b/docs/src/Tutorials/files/t7_3p_csm_esr_mass.svg
@@ -0,0 +1,113 @@
+
+
diff --git a/docs/src/Tutorials/files/t7_3p_mass_load_gen.svg b/docs/src/Tutorials/files/t7_3p_mass_load_gen.svg
new file mode 100644
index 0000000000..94c4f16fbe
--- /dev/null
+++ b/docs/src/Tutorials/files/t7_3p_mass_load_gen.svg
@@ -0,0 +1,122 @@
+
+
diff --git a/docs/src/Tutorials/files/t7_3p_slack.svg b/docs/src/Tutorials/files/t7_3p_slack.svg
new file mode 100644
index 0000000000..4f19315cb7
--- /dev/null
+++ b/docs/src/Tutorials/files/t7_3p_slack.svg
@@ -0,0 +1,114 @@
+
+
diff --git a/docs/src/Tutorials/files/t7_4p_esr_mass_load_gen.svg b/docs/src/Tutorials/files/t7_4p_esr_mass_load_gen.svg
new file mode 100644
index 0000000000..591c86cc81
--- /dev/null
+++ b/docs/src/Tutorials/files/t7_4p_esr_mass_load_gen.svg
@@ -0,0 +1,154 @@
+
+
diff --git a/docs/src/Tutorials/files/t7_4p_mcr_csm_esr_mass.svg b/docs/src/Tutorials/files/t7_4p_mcr_csm_esr_mass.svg
new file mode 100644
index 0000000000..f921c1b881
--- /dev/null
+++ b/docs/src/Tutorials/files/t7_4p_mcr_csm_esr_mass.svg
@@ -0,0 +1,142 @@
+
+
diff --git a/docs/src/Tutorials/files/t7_mcr_esr_csm_load.svg b/docs/src/Tutorials/files/t7_mcr_esr_csm_load.svg
new file mode 100644
index 0000000000..948fe7b44d
--- /dev/null
+++ b/docs/src/Tutorials/files/t7_mcr_esr_csm_load.svg
@@ -0,0 +1,52 @@
+
+
diff --git a/docs/src/Tutorials/files/t8_cap.svg b/docs/src/Tutorials/files/t8_cap.svg
new file mode 100644
index 0000000000..a31c29aa48
--- /dev/null
+++ b/docs/src/Tutorials/files/t8_cap.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/src/Tutorials/files/t8_cost.svg b/docs/src/Tutorials/files/t8_cost.svg
new file mode 100644
index 0000000000..9ef410f7a0
--- /dev/null
+++ b/docs/src/Tutorials/files/t8_cost.svg
@@ -0,0 +1,291 @@
+
+
diff --git a/docs/src/Tutorials/files/t8_emm1.svg b/docs/src/Tutorials/files/t8_emm1.svg
new file mode 100644
index 0000000000..55cbae0e1d
--- /dev/null
+++ b/docs/src/Tutorials/files/t8_emm1.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/src/Tutorials/files/t8_emm2.svg b/docs/src/Tutorials/files/t8_emm2.svg
new file mode 100644
index 0000000000..025757e6c3
--- /dev/null
+++ b/docs/src/Tutorials/files/t8_emm2.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/src/Tutorials/files/t8_emm_comp.svg b/docs/src/Tutorials/files/t8_emm_comp.svg
new file mode 100644
index 0000000000..2fc9cc22b6
--- /dev/null
+++ b/docs/src/Tutorials/files/t8_emm_comp.svg
@@ -0,0 +1,66 @@
+
+
diff --git a/docs/src/Tutorials/files/t8_heatmap.svg b/docs/src/Tutorials/files/t8_heatmap.svg
new file mode 100644
index 0000000000..491da7bfcc
--- /dev/null
+++ b/docs/src/Tutorials/files/t8_heatmap.svg
@@ -0,0 +1,371 @@
+
+
diff --git a/docs/src/Tutorials/files/t8_resource_allocation.svg b/docs/src/Tutorials/files/t8_resource_allocation.svg
new file mode 100644
index 0000000000..b449044824
--- /dev/null
+++ b/docs/src/Tutorials/files/t8_resource_allocation.svg
@@ -0,0 +1,82 @@
+
+
diff --git a/docs/src/Tutorials/files/usingIJulia.png b/docs/src/Tutorials/files/usingIJulia.png
new file mode 100644
index 0000000000..e33b5da3de
Binary files /dev/null and b/docs/src/Tutorials/files/usingIJulia.png differ
diff --git a/docs/src/User_Guide/generate_alternatives.md b/docs/src/User_Guide/generate_alternatives.md
index 580629af75..f0fe836362 100644
--- a/docs/src/User_Guide/generate_alternatives.md
+++ b/docs/src/User_Guide/generate_alternatives.md
@@ -4,12 +4,10 @@ GenX includes a modeling to generate alternatives (MGA) package that can be used
1. Add a `Resource_Type` column in all the resource `.csv` files denoting the type of each technology.
2. Add a `MGA` column in all the resource `.csv` files denoting the availability of the technology.
-3. Set the `ModelingToGenerateAlternatives` flag in the `GenX_Settings.yml` file to 1.
-4. Set the `ModelingtoGenerateAlternativeSlack` flag in the `GenX_Settings.yml` file to the desirable level of slack.
-5. Create a `Rand_mga_objective_coefficients.csv` file to provide random objective function coefficients for each MGA iteration.
+3. Set the `ModelingToGenerateAlternatives` flag in the `genx_settings.yml` file to 1.
+4. Set the `ModelingtoGenerateAlternativeSlack` flag in the `genx_settings.yml` file to the desirable level of slack.
+5. Set the `ModelingToGenerateAlternativesIterations` flag to half the total number of desired solutions, as each iteration provides 2 solutions.
+6. Set the `MGAAnnualGeneration` flag in the `genx_settings.yml` file to the desired MGA formulation.
+7. Solve the model using `Run.jl` file.
-For each iteration, number of rows in the `Rand_mga_objective_coefficients`.csv file represents the number of distinct technology types while number of columns represent the number of model zones.
-
-Solve the model using `Run.jl` file.
-
-Results from the MGA algorithm would be saved in MGA_max and MGA_min folders in the `Example_Systems/` folder.
\ No newline at end of file
+Results from the MGA algorithm would be saved in MGA_max and MGA_min folders in the case folder.
\ No newline at end of file
diff --git a/docs/src/User_Guide/model_input.md b/docs/src/User_Guide/model_input.md
index af198ad780..c4812683b7 100644
--- a/docs/src/User_Guide/model_input.md
+++ b/docs/src/User_Guide/model_input.md
@@ -1,12 +1,12 @@
# GenX Inputs
-All input files are in CSV format. Running the GenX model requires a minimum of five **mandatory input files**:
+All input files are in CSV format. Running the GenX model requires a minimum of four **mandatory input files** and one folder, which consists of CSV files for generating resources:
1. Fuels\_data.csv: specify fuel type, CO2 emissions intensity, and time-series of fuel prices.
2. Network.csv: specify network topology, transmission fixed costs, capacity and loss parameters.
3. Demand\_data.csv: specify time-series of demand profiles for each model zone, weights for each time step, demand shedding costs, and optional time domain reduction parameters.
4. Generators\_variability.csv: specify time-series of capacity factor/availability for each resource.
-5. Generators\_data.csv: specify cost and performance data for generation, storage and demand flexibility resources.
+5. Resources folder: specify cost and performance data for generation, storage and demand flexibility resources.
Additionally, the user may need to specify eight more **settings-specific** input files based on model configuration and type of scenarios of interest:
1. Operational\_reserves.csv: specify operational reserve requirements as a function of demand and renewables generation and penalty for not meeting these requirements.
@@ -202,9 +202,9 @@ Each file contains cost and performance parameters for various generators and ot
|PWFU\_Heat\_Rate\_MMBTU\_per\_MWh\_*i| The slope of fuel usage function of the segment i.|
|PWFU\_Load\_Point\_MW\_*i| The end of segment i (MW).|
|**Multi-fuel parameters**|
-|MULTI_FUELS | {0, 1}, Flag to indicate membership in set of thermal resources that can burn multiple fuels at the same time (e.g., natural gas combined cycle cofiring with hydrogen, coal power plant cofiring with natural gas.|
-||MULTI_FUELS = 0: Not part of set (default) |
-||MULTI_FUELS = 1: Resources that can use fuel blending. |
+|MULTI\_FUELS | {0, 1}, Flag to indicate membership in set of thermal resources that can burn multiple fuels at the same time (e.g., natural gas combined cycle cofiring with hydrogen, coal power plant cofiring with natural gas.|
+||MULTI\_FUELS = 0: Not part of set (default) |
+||MULTI\_FUELS = 1: Resources that can use fuel blending. |
|Num\_Fuels |Number of fuels that a multi-fuel generator (MULTI_FUELS = 1) can use at the same time. The length of ['Fuel1', 'Fuel2', ...] should be equal to 'Num\_Fuels'. Each fuel will requires its corresponding heat rate, min cofire level, and max cofire level. |
|Fuel1 |Frist fuel needed for a mulit-fuel generator (MULTI_FUELS = 1). The names should match with the ones in the `Fuels_data.csv`. |
|Fuel2 |Second fuel needed for a mulit-fuel generator (MULTI_FUELS = 1). The names should match with the ones in the `Fuels_data.csv`. |
@@ -329,9 +329,9 @@ Each file contains cost and performance parameters for various generators and ot
---
|**Column Name** | **Description**|
| :------------ | :-----------|
-|Hydrogen_MWh_Per_Tonne| Electrolyzer efficiency in megawatt-hours (MWh) of electricity per metric tonne of hydrogen produced (MWh/t)|
-|Electrolyzer_Min_kt| Minimum annual quantity of hydrogen that must be produced by electrolyzer in kilotonnes (kt)|
-|Hydrogen_Price_Per_Tonne| Price (or value) of hydrogen per metric tonne ($/t)|
+|Hydrogen\_MWh\_Per\_Tonne| Electrolyzer efficiency in megawatt-hours (MWh) of electricity per metric tonne of hydrogen produced (MWh/t)|
+|Electrolyzer\_Min\_kt| Minimum annual quantity of hydrogen that must be produced by electrolyzer in kilotonnes (kt)|
+|Hydrogen\_Price\_Per\_Tonne| Price (or value) of hydrogen per metric tonne ($/t)|
|Min\_Power |[0,1], The minimum generation level for a unit as a fraction of total capacity. This value cannot be higher than the smallest time-dependent CF value for a resource in `Generators_variability.csv`.|
|Ramp\_Up\_Percentage |[0,1], Maximum increase in power output from between two periods (typically hours), reported as a fraction of nameplate capacity.|
|Ramp\_Dn\_Percentage |[0,1], Maximum decrease in power output from between two periods (typically hours), reported as a fraction of nameplate capacity.|
@@ -350,25 +350,25 @@ Each co-located VRE and storage resource can be easily configured to contain eit
|WIND | {0, 1}, Flag to indicate membership in the set of co-located VRE-storage resources with a wind component.|
||WIND = 0: Not part of set (default) |
||WIND = 1: If the co-located VRE-storage resource can produce wind energy. ||
-|STOR_DC_DISCHARGE | {0, 1, 2}, Flag to indicate membership in set of co-located VRE-storage resources that discharge behind the meter and through the inverter (DC).|
-||STOR_DC_DISCHARGE = 0: Not part of set (default) |
-||STOR_DC_DISCHARGE = 1: If the co-located VRE-storage resource contains symmetric charge/discharge power capacity with charging capacity equal to discharging capacity (e.g. lithium-ion battery storage). Note that if STOR_DC_DISCHARGE = 1, STOR_DC_CHARGE = 1.|
-||STOR_DC_DISCHARGE = 2: If the co-located VRE-storage resource has asymmetric discharge capacities using distinct processes (e.g. hydrogen electrolysis, storage, and conversion to power using fuel cell or combustion turbine).|
-|STOR_DC_CHARGE | {0, 1, 2}, Flag to indicate membership in set of co-located VRE-storage resources that charge through the inverter (DC).|
-||STOR_DC_CHARGE = 0: Not part of set (default) |
-||STOR_DC_CHARGE = 1: If the co-located VRE-storage resource contains symmetric charge/discharge power capacity with charging capacity equal to discharging capacity (e.g. lithium-ion battery storage). Note that if STOR_DC_CHARGE = 1, STOR_DC_DISCHARGE = 1.|
-||STOR_DC_CHARGE = 2: If the co-located VRE-storage resource has asymmetric charge capacities using distinct processes (e.g. hydrogen electrolysis, storage, and conversion to power using fuel cell or combustion turbine).|
-|STOR_AC_DISCHARGE | {0, 1, 2}, Flag to indicate membership in set of co-located VRE-storage resources that discharges AC.|
-||STOR_AC_DISCHARGE = 0: Not part of set (default) |
-||STOR_AC_DISCHARGE = 1: If the co-located VRE-storage resource contains symmetric charge/discharge power capacity with charging capacity equal to discharging capacity (e.g. lithium-ion battery storage). Note that if STOR_AC_DISCHARGE = 1, STOR_AC_CHARGE = 1.|
-||STOR_AC_DISCHARGE = 2: If the co-located VRE-storage resource has asymmetric discharge capacities using distinct processes (e.g. hydrogen electrolysis, storage, and conversion to power using fuel cell or combustion turbine).|
-|STOR_AC_CHARGE | {0, 1, 2}, Flag to indicate membership in set of co-located VRE-storage resources that charge AC.|
-||STOR_AC_CHARGE = 0: Not part of set (default) |
-||STOR_AC_CHARGE = 1: If the co-located VRE-storage resource contains symmetric charge/discharge power capacity with charging capacity equal to discharging capacity (e.g. lithium-ion battery storage). Note that if STOR_AC_CHARGE = 1, STOR_AC_DISCHARGE = 1.|
-||STOR_AC_CHARGE = 2: If the co-located VRE-storage resource has asymmetric charge capacities using distinct processes (e.g. hydrogen electrolysis, storage, and conversion to power using fuel cell or combustion turbine).|
-|LDS_VRE_STOR | {0, 1}, Flag to indicate the co-located VRE-storage resources eligible for long duration storage constraints with inter period linkage (e.g., reservoir hydro, hydrogen storage). |
-||LDS_VRE_STOR = 0: Not part of set (default) |
-||LDS_VRE_STOR = 1: Long duration storage resources|
+|STOR\_DC\_DISCHARGE | {0, 1, 2}, Flag to indicate membership in set of co-located VRE-storage resources that discharge behind the meter and through the inverter (DC).|
+||STOR\_DC\_DISCHARGE = 0: Not part of set (default) |
+||STOR\_DC\_DISCHARGE = 1: If the co-located VRE-storage resource contains symmetric charge/discharge power capacity with charging capacity equal to discharging capacity (e.g. lithium-ion battery storage). Note that if STOR\_DC\_DISCHARGE = 1, STOR\_DC\_CHARGE = 1.|
+||STOR\_DC\_DISCHARGE = 2: If the co-located VRE-storage resource has asymmetric discharge capacities using distinct processes (e.g. hydrogen electrolysis, storage, and conversion to power using fuel cell or combustion turbine).|
+|STOR\_DC\_CHARGE | {0, 1, 2}, Flag to indicate membership in set of co-located VRE-storage resources that charge through the inverter (DC).|
+||STOR\_DC\_CHARGE = 0: Not part of set (default) |
+||STOR\_DC\_CHARGE = 1: If the co-located VRE-storage resource contains symmetric charge/discharge power capacity with charging capacity equal to discharging capacity (e.g. lithium-ion battery storage). Note that if STOR\_DC\_CHARGE = 1, STOR\_DC\_DISCHARGE = 1.|
+||STOR\_DC\_CHARGE = 2: If the co-located VRE-storage resource has asymmetric charge capacities using distinct processes (e.g. hydrogen electrolysis, storage, and conversion to power using fuel cell or combustion turbine).|
+|STOR\_AC\_DISCHARGE | {0, 1, 2}, Flag to indicate membership in set of co-located VRE-storage resources that discharges AC.|
+||STOR\_AC\_DISCHARGE = 0: Not part of set (default) |
+||STOR\_AC\_DISCHARGE = 1: If the co-located VRE-storage resource contains symmetric charge/discharge power capacity with charging capacity equal to discharging capacity (e.g. lithium-ion battery storage). Note that if STOR\_AC\_DISCHARGE = 1, STOR\_AC\_CHARGE = 1.|
+||STOR\_AC\_DISCHARGE = 2: If the co-located VRE-storage resource has asymmetric discharge capacities using distinct processes (e.g. hydrogen electrolysis, storage, and conversion to power using fuel cell or combustion turbine).|
+|STOR\_AC\_CHARGE | {0, 1, 2}, Flag to indicate membership in set of co-located VRE-storage resources that charge AC.|
+||STOR\_AC\_CHARGE = 0: Not part of set (default) |
+||STOR\_AC\_CHARGE = 1: If the co-located VRE-storage resource contains symmetric charge/discharge power capacity with charging capacity equal to discharging capacity (e.g. lithium-ion battery storage). Note that if STOR\_AC\_CHARGE = 1, STOR\_AC\_DISCHARGE = 1.|
+||STOR\_AC\_CHARGE = 2: If the co-located VRE-storage resource has asymmetric charge capacities using distinct processes (e.g. hydrogen electrolysis, storage, and conversion to power using fuel cell or combustion turbine).|
+|LDS\_VRE\_STOR | {0, 1}, Flag to indicate the co-located VRE-storage resources eligible for long duration storage constraints with inter period linkage (e.g., reservoir hydro, hydrogen storage). |
+||LDS\_VRE\_STOR = 0: Not part of set (default) |
+||LDS\_VRE\_STOR = 1: Long duration storage resources|
|**Existing technology capacity**|
|Existing\_Cap\_MW |The existing AC grid connection capacity in MW. |
|Existing\_Cap\_MWh |The existing capacity of storage in MWh. |
@@ -426,8 +426,8 @@ Each co-located VRE and storage resource can be easily configured to contain eit
|**Technical performance parameters**|
|Self\_Disch |[0,1], The power loss of storage component of each resource per hour (fraction loss per hour). |
|EtaInverter |[0,1], Inverter efficiency representing losses from converting DC to AC power and vice versa for each technology |
-|Inverter_Ratio_Solar |-1 (default) - no required ratio between solar PV capacity built to inverter capacity built. If non-negative, represents the ratio of solar PV capacity built to inverter capacity built.|
-|Inverter_Ratio_Wind |-1 (default) - no required ratio between wind capacity built to grid connection capacity built. If non-negative, represents the ratio of wind capacity built to grid connection capacity built.|
+|Inverter\_Ratio_Solar |-1 (default) - no required ratio between solar PV capacity built to inverter capacity built. If non-negative, represents the ratio of solar PV capacity built to inverter capacity built.|
+|Inverter\_Ratio_Wind |-1 (default) - no required ratio between wind capacity built to grid connection capacity built. If non-negative, represents the ratio of wind capacity built to grid connection capacity built.|
|Power\_to\_Energy\_AC |The power to energy conversion for the storage component for AC discharging/charging of symmetric storage resources.|
|Power\_to\_Energy\_DC |The power to energy conversion for the storage component for DC discharging/charging of symmetric storage resources.|
|Eff\_Up\_DC |[0,1], Efficiency of DC charging storage – applies to storage technologies (all STOR types). |
@@ -463,7 +463,7 @@ The following table describes the columns in each of these four files.
This policy is applied when if `EnergyShareRequirement > 0` in the settings file. \* corresponds to the ith row of the file `Energy_share_requirement.csv`.
-##### Table 12: Energy share requirement policy parameters
+##### Table 12: Energy share requirement policy parameters in Resource_energy_share_requirement.csv
---
|**Column Name** | **Description**|
| :------------ | :-----------|
@@ -478,44 +478,43 @@ This policy is applied when if `EnergyShareRequirement > 0` in the settings file
This policy is applied when if `MinCapReq = 1` in the settings file. \* corresponds to the ith row of the file `Minimum_capacity_requirement.csv`.
-##### Table 13: Minimum capacity requirement policy parameters
+##### Table 13: Minimum capacity requirement policy parameters in Resource_minimum_capacity_requirement.csv
---
|**Column Name** | **Description**|
| :------------ | :-----------|
|Resource| Resource name corresponding to a resource in one of the resource data files described above.|
-|Min_Cap\_*| Flag to indicate which resources are considered for the Minimum Capacity Requirement constraint.|
+|Min\_Cap\_*| Flag to indicate which resources are considered for the Minimum Capacity Requirement constraint.|
|**co-located VRE-STOR resources only**|
-|Min_Cap_Solar\_*| Eligibility of resources with a solar PV component (multiplied by the inverter efficiency for AC terms) to participate in Minimum Technology Carveout constraint.|
-|Min_Cap_Wind\_*| Eligibility of resources with a wind component to participate in Minimum Technology Carveout constraint (AC terms).|
-|Min_Cap_Stor\_*| Eligibility of resources with a storage component to participate in Minimum Technology Carveout constraint (discharge capacity in AC terms).|
+|Min\_Cap\_Solar\_*| Eligibility of resources with a solar PV component (multiplied by the inverter efficiency for AC terms) to participate in Minimum Technology Carveout constraint.|
+|Min\_Cap\_Wind\_*| Eligibility of resources with a wind component to participate in Minimum Technology Carveout constraint (AC terms).|
+|Min\_Cap\_Stor\_*| Eligibility of resources with a storage component to participate in Minimum Technology Carveout constraint (discharge capacity in AC terms).|
This policy is applied when if `MaxCapReq = 1` in the settings file. \* corresponds to the ith row of the file `Maximum_capacity_requirement.csv`.
-##### Table 14: Maximum capacity requirement policy parameters
+##### Table 14: Maximum capacity requirement policy parameters in Resource_maximum_capacity_requirement.csv
---
|**Column Name** | **Description**|
| :------------ | :-----------|
|Resource| Resource name corresponding to a resource in one of the resource data files described above.|
-|Max_Cap\_*| Flag to indicate which resources are considered for the Maximum Capacity Requirement constraint.|
+|Max\_Cap\_*| Flag to indicate which resources are considered for the Maximum Capacity Requirement constraint.|
|**co-located VRE-STOR resources only**|
-|Max_Cap_Solar\_*| Eligibility of resources with a solar PV component (multiplied by the inverter efficiency for AC terms) to participate in Maximum Technology Carveout constraint.
-|Max_Cap_Wind\_*| Eligibility of resources with a wind component to participate in Maximum Technology Carveout constraint (AC terms).
-|Max_Cap_Stor\_*| Eligibility of resources with a storage component to participate in Maximum Technology Carveout constraint (discharge capacity in AC terms).|
+|Max\_Cap\_Solar\_*| Eligibility of resources with a solar PV component (multiplied by the inverter efficiency for AC terms) to participate in Maximum Technology Carveout constraint.
+|Max\_Cap\_Wind\_*| Eligibility of resources with a wind component to participate in Maximum Technology Carveout constraint (AC terms).
+|Max\_Cap\_Stor\_*| Eligibility of resources with a storage component to participate in Maximum Technology Carveout constraint (discharge capacity in AC terms).|
This policy is applied when if `CapacityReserveMargin > 0` in the settings file. \* corresponds to the ith row of the file `Capacity_reserve_margin.csv`.
-##### Table 15: Capacity reserve margin policy parameters
+##### Table 15: Capacity reserve margin policy parameters in Resource_capacity_reserve_margin.csv
---
|**Column Name** | **Description**|
| :------------ | :-----------|
|Resource| Resource name corresponding to a resource in one of the resource data files described above.|
-|Eligible_Cap_Res\_*| Fraction of the resource capacity eligible for contributing to the capacity reserve margin constraint (e.g. derate factor).|
+|Derating\_Factor\_*| Fraction of the resource capacity eligible for contributing to the capacity reserve margin constraint (e.g. derate factor).|
##### Additional module-related columns for all resources
In addition to the files described above, the `resources` folder can contain additional files that are used to specify attributes for specific resources and modules. Currently, the following files are supported:
-1) `Resource_multistage_data.csv`: mandatory if `MultiStage = 1` in the settings file
-
+`Resource_multistage_data.csv`: mandatory if `MultiStage = 1` in the settings file
!!! warning
The first column of each additional module file must contain the resource name corresponding to a resource in one of the resource data files described above. Note that the order of resources in these files is not important.
@@ -560,17 +559,20 @@ In addition to the files described above, the `resources` folder can contain add
This file contains the time-series of capacity factors / availability of each resource included in the resource `.csv` file in the `resources` folder for each time step (e.g. hour) modeled.
-• First column: The first column contains the time index of each row (starting in the second row) from 1 to N.
-
-• Second column onwards: Resources are listed from the second column onward with headers matching each resource name in the resource `.csv` file in the `resources` folder in any order. The availability for each resource at each time step is defined as a fraction of installed capacity and should be between 0 and 1. Note that for this reason, resource names specified in the resource `.csv` file must be unique. Note that for Hydro reservoir resources (i.e. `Hydro.csv`), values in this file correspond to inflows (in MWhs) to the hydro reservoir as a fraction of installed power capacity, rather than hourly capacity factor. Note that for co-located VRE and storage resources, solar PV and wind resource profiles should not be located in this file but rather in separate variability files (these variabilities can be in the `Generators_variability.csv` if time domain reduction functionalities will be utilized because the time domain reduction functionalities will separate the files after the clustering is completed).
+1) First column: The first column contains the time index of each row (starting in the second row) from 1 to N.
+2) Second column onwards: Resources are listed from the second column onward with headers matching each resource name in the resource `.csv` file in the `resources` folder in any order. The availability for each resource at each time step is defined as a fraction of installed capacity and should be between 0 and 1. Note that for this reason, resource names specified in the resource `.csv` file must be unique. Note that for Hydro reservoir resources (i.e. `Hydro.csv`), values in this file correspond to inflows (in MWhs) to the hydro reservoir as a fraction of installed power capacity, rather than hourly capacity factor. Note that for co-located VRE and storage resources, solar PV and wind resource profiles should not be located in this file but rather in separate variability files (these variabilities can be in the `Generators_variability.csv` if time domain reduction functionalities will be utilized because the time domain reduction functionalities will separate the files after the clustering is completed).
+###### Table 17: Structure of the Generator\_variability.csv file
+---
+|**Column Name** | **Description**|
+| :------------ | :-----------|
+|Resource| Resource name corresponding to a resource in one of the resource data files described above.|
|Self\_Disch |[0,1], The power loss of storage technologies per hour (fraction loss per hour)- only applies to storage techs. Note that for co-located VRE-STOR resources, this value applies to the storage component of each resource.|
|Min\_Power |[0,1], The minimum generation level for a unit as a fraction of total capacity. This value cannot be higher than the smallest time-dependent CF value for a resource in `Generators_variability.csv`. Applies to thermal plants, and reservoir hydro resource (`HYDRO = 1`).|
|Ramp\_Up\_Percentage |[0,1], Maximum increase in power output from between two periods (typically hours), reported as a fraction of nameplate capacity. Applies to thermal plants, and reservoir hydro resource (`HYDRO = 1`).|
|Ramp\_Dn\_Percentage |[0,1], Maximum decrease in power output from between two periods (typically hours), reported as a fraction of nameplate capacity. Applies to thermal plants, and reservoir hydro resource (`HYDRO = 1`).|
|Eff\_Up |[0,1], Efficiency of charging storage – applies to storage technologies (all STOR types except co-located storage resources).|
|Eff\_Down |[0,1], Efficiency of discharging storage – applies to storage technologies (all STOR types except co-located storage resources). |
-
|Min\_Duration |Specifies the minimum ratio of installed energy to discharged power capacity that can be installed. Applies to STOR types 1 and 2 (hours). Note that for co-located VRE-STOR resources, this value does not apply. |
|Max\_Duration |Specifies the maximum ratio of installed energy to discharged power capacity that can be installed. Applies to STOR types 1 and 2 (hours). Note that for co-located VRE-STOR resources, this value does not apply. |
|Max\_Flexible\_Demand\_Delay |Maximum number of hours that demand can be deferred or delayed. Applies to resources with FLEX type 1 (hours). |
@@ -599,7 +601,7 @@ This file contains the time-series of capacity factors / availability of the win
This file includes parameter inputs needed to model time-dependent procurement of regulation and spinning reserves. This file is needed if `OperationalReserves` flag is activated in the YAML file `genx_settings.yml`.
-###### Table 7: Structure of the Operational_reserves.csv file
+###### Table 18: Structure of the Operational_reserves.csv file
---
|**Column Name** | **Description**|
| :------------ | :-----------|
@@ -621,7 +623,7 @@ This file contains inputs specifying minimum energy share requirement policies,
Note: this file should use the same region name as specified in the the resource `.csv` file (inside the `Resource`).
-###### Table 8: Structure of the Energy\_share\_requirement.csv file
+###### Table 19: Structure of the Energy\_share\_requirement.csv file
---
|**Column Name** | **Description**|
| :------------ | :-----------|
@@ -635,7 +637,7 @@ Note: this file should use the same region name as specified in the the resource
This file contains inputs specifying CO2 emission limits policies (e.g. emissions cap and permit trading programs). This file is needed if `CO2Cap` flag is activated in the YAML file `genx_settings.yml`. `CO2Cap` flag set to 1 represents mass-based (tCO2 ) emission target. `CO2Cap` flag set to 2 is specified when emission target is given in terms of rate (tCO2/MWh) and is based on total demand met. `CO2Cap` flag set to 3 is specified when emission target is given in terms of rate (tCO2 /MWh) and is based on total generation.
-###### Table 9: Structure of the CO2\_cap.csv file
+###### Table 20: Structure of the CO2\_cap.csv file
---
|**Column Name** | **Description**|
| :------------ | :-----------|
@@ -652,7 +654,7 @@ This file contains the regional capacity reserve margin requirements. This file
Note: this file should use the same region name as specified in the resource `.csv` file (inside the `Resource`).
-###### Table 10: Structure of the Capacity\_reserve\_margin.csv file
+###### Table 21: Structure of the Capacity\_reserve\_margin.csv file
---
|**Column Name** | **Description**|
| :------------ | :-----------|
@@ -666,7 +668,7 @@ Note: this file should use the same region name as specified in the resource `.c
This file contains the minimum capacity carve-out requirement to be imposed (e.g. a storage capacity mandate or offshore wind capacity mandate). This file is needed if the `MinCapReq` flag has a non-zero value in the YAML file `genx_settings.yml`.
-###### Table 11: Structure of the Minimum\_capacity\_requirement.csv file
+###### Table 22: Structure of the Minimum\_capacity\_requirement.csv file
---
|**Column Name** | **Description**|
| :------------ | :-----------|
@@ -682,7 +684,7 @@ Some of the columns specified in the input files in Section 2.2 and 2.1 are not
This contains the maximum capacity limits to be imposed (e.g. limits on total deployment of solar, wind, or batteries in the system as a whole or in certain collections of zones).
It is required if the `MaxCapReq` flag has a non-zero value in `genx_settings.yml`.
-###### Table 12: Structure of the Maximum\_capacity\_requirement.csv file
+###### Table 23: Structure of the Maximum\_capacity\_requirement.csv file
---
|**Column Name** | **Description**|
| :------------ | :-----------|
@@ -697,7 +699,7 @@ Some of the columns specified in the input files in Section 2.2 and 2.1 are not
This file contains the settings parameters required to run the Method of Morris algorithm in GenX. This file is needed if the `MethodofMorris` flag is ON in the YAML file `genx_settings.yml`.
-###### Table 13: Structure of the Method\_of\_morris\_range.csv file
+###### Table 24: Structure of the Method\_of\_morris\_range.csv file
---
|**Column Name** | **Description**|
| :------------ | :-----------|
diff --git a/docs/src/developer_guide.md b/docs/src/developer_guide.md
index 584a60458b..bca5d37293 100644
--- a/docs/src/developer_guide.md
+++ b/docs/src/developer_guide.md
@@ -12,6 +12,19 @@ GenX is an open-source project, and we welcome contributions from the community.
The following sections describe in more detail how to work with GenX resources and how to add a new resource to GenX.
+## Style guide
+GenX project follows the [SciML Style Guide](https://github.com/SciML/SciMLStyle). We encourage contributors to follow this style guide when submitting code changes to GenX. Before submitting a new PR, please run the following command to format a file or a directory:
+```julia
+julia> using JuliaFormatter
+julia> format("path_to_directory", SciMLStyle(), verbose=true)
+```
+or
+```julia
+julia> using JuliaFormatter
+julia> format("path_to_file.jl", SciMLStyle(), verbose=true)
+```
+The GitHub repository of GenX is configured to verify the code style of each PR and will automatically provide comments to assist you in formatting the code according to the style guide.
+
## GenX resources
In GenX, a resource is defined as an instance of a `GenX resource type`, a subtype of an `AbstractResource`. This allows the code to use multiple dispatch and define a common interface (behavior) for all resources in the code.
diff --git a/example_systems/1_three_zones/Run.jl b/example_systems/1_three_zones/Run.jl
index 7ce1891834..b44ca23ec1 100644
--- a/example_systems/1_three_zones/Run.jl
+++ b/example_systems/1_three_zones/Run.jl
@@ -1,3 +1,3 @@
using GenX
-run_genx_case!(dirname(@__FILE__))
\ No newline at end of file
+run_genx_case!(dirname(@__FILE__))
diff --git a/example_systems/1_three_zones/resources/Vre.csv b/example_systems/1_three_zones/resources/Vre.csv
index c5b8c5ea95..0183631d73 100644
--- a/example_systems/1_three_zones/resources/Vre.csv
+++ b/example_systems/1_three_zones/resources/Vre.csv
@@ -1,5 +1,5 @@
-Resource,Zone,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Heat_Rate_MMBTU_per_MWh,Fuel,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,region,cluster
-MA_solar_pv,1,1,1,0,0,-1,0,85300,18760,0,9.13,None,0,0,0,0,MA,1
-CT_onshore_wind,2,1,1,0,0,-1,0,97200,43205,0.1,9.12,None,0,0,0,0,CT,1
-CT_solar_pv,2,1,1,0,0,-1,0,85300,18760,0,9.16,None,0,0,0,0,CT,1
-ME_onshore_wind,3,1,1,0,0,-1,0,97200,43205,0.1,9.12,None,0,0,0,0,ME,1
\ No newline at end of file
+Resource,Zone,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,region,cluster
+MA_solar_pv,1,1,1,0,0,-1,0,85300,18760,0,0,0,0,0,MA,1
+CT_onshore_wind,2,1,1,0,0,-1,0,97200,43205,0.1,0,0,0,0,CT,1
+CT_solar_pv,2,1,1,0,0,-1,0,85300,18760,0,0,0,0,0,CT,1
+ME_onshore_wind,3,1,1,0,0,-1,0,97200,43205,0.1,0,0,0,0,ME,1
\ No newline at end of file
diff --git a/example_systems/2_three_zones_w_electrolyzer/Run.jl b/example_systems/2_three_zones_w_electrolyzer/Run.jl
index 7ce1891834..b44ca23ec1 100644
--- a/example_systems/2_three_zones_w_electrolyzer/Run.jl
+++ b/example_systems/2_three_zones_w_electrolyzer/Run.jl
@@ -1,3 +1,3 @@
using GenX
-run_genx_case!(dirname(@__FILE__))
\ No newline at end of file
+run_genx_case!(dirname(@__FILE__))
diff --git a/example_systems/2_three_zones_w_electrolyzer/resources/Vre.csv b/example_systems/2_three_zones_w_electrolyzer/resources/Vre.csv
index f53a8ad0c1..1371042441 100644
--- a/example_systems/2_three_zones_w_electrolyzer/resources/Vre.csv
+++ b/example_systems/2_three_zones_w_electrolyzer/resources/Vre.csv
@@ -1,5 +1,5 @@
-Resource,Zone,Num_VRE_Bins,New_Build,Existing_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Heat_Rate_MMBTU_per_MWh,Qualified_Hydrogen_Supply,region,cluster
-MA_solar_pv,1,1,1,0,0,85300,18760,0,9.13,1,MA,1
-CT_onshore_wind,2,1,1,0,0,97200,43205,0.1,9.12,1,CT,1
-CT_solar_pv,2,1,1,0,0,85300,18760,0,9.16,1,CT,1
-ME_onshore_wind,3,1,1,0,0,97200,43205,0.1,9.12,1,ME,1
\ No newline at end of file
+Resource,Zone,Num_VRE_Bins,New_Build,Existing_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Qualified_Hydrogen_Supply,region,cluster
+MA_solar_pv,1,1,1,0,0,85300,18760,0,1,MA,1
+CT_onshore_wind,2,1,1,0,0,97200,43205,0.1,1,CT,1
+CT_solar_pv,2,1,1,0,0,85300,18760,0,1,CT,1
+ME_onshore_wind,3,1,1,0,0,97200,43205,0.1,1,ME,1
\ No newline at end of file
diff --git a/example_systems/3_three_zone_w_co2_capture/Run.jl b/example_systems/3_three_zone_w_co2_capture/Run.jl
deleted file mode 100644
index 7ce1891834..0000000000
--- a/example_systems/3_three_zone_w_co2_capture/Run.jl
+++ /dev/null
@@ -1,3 +0,0 @@
-using GenX
-
-run_genx_case!(dirname(@__FILE__))
\ No newline at end of file
diff --git a/example_systems/3_three_zone_w_co2_capture/resources/Vre.csv b/example_systems/3_three_zone_w_co2_capture/resources/Vre.csv
deleted file mode 100644
index b053e3eb8a..0000000000
--- a/example_systems/3_three_zone_w_co2_capture/resources/Vre.csv
+++ /dev/null
@@ -1,5 +0,0 @@
-Resource,Zone,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Heat_Rate_MMBTU_per_MWh,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,region,cluster
-MA_solar_pv,1,1,1,0,0,-1,0,85300,18760,0,9.13,0,0,0,0,MA,1
-CT_onshore_wind,2,1,1,0,0,-1,0,97200,43205,0.1,9.12,0,0,0,0,CT,1
-CT_solar_pv,2,1,1,0,0,-1,0,85300,18760,0,9.16,0,0,0,0,CT,1
-ME_onshore_wind,3,1,1,0,0,-1,0,97200,43205,0.1,9.12,0,0,0,0,ME,1
\ No newline at end of file
diff --git a/example_systems/3_three_zone_w_co2_capture/README.md b/example_systems/3_three_zones_w_co2_capture/README.md
similarity index 100%
rename from example_systems/3_three_zone_w_co2_capture/README.md
rename to example_systems/3_three_zones_w_co2_capture/README.md
diff --git a/example_systems/8_three_zone_w_retrofit/Run.jl b/example_systems/3_three_zones_w_co2_capture/Run.jl
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/Run.jl
rename to example_systems/3_three_zones_w_co2_capture/Run.jl
diff --git a/example_systems/3_three_zone_w_co2_capture/policies/CO2_cap.csv b/example_systems/3_three_zones_w_co2_capture/policies/CO2_cap.csv
similarity index 100%
rename from example_systems/3_three_zone_w_co2_capture/policies/CO2_cap.csv
rename to example_systems/3_three_zones_w_co2_capture/policies/CO2_cap.csv
diff --git a/example_systems/3_three_zone_w_co2_capture/policies/Minimum_capacity_requirement.csv b/example_systems/3_three_zones_w_co2_capture/policies/Minimum_capacity_requirement.csv
similarity index 100%
rename from example_systems/3_three_zone_w_co2_capture/policies/Minimum_capacity_requirement.csv
rename to example_systems/3_three_zones_w_co2_capture/policies/Minimum_capacity_requirement.csv
diff --git a/example_systems/3_three_zone_w_co2_capture/resources/Storage.csv b/example_systems/3_three_zones_w_co2_capture/resources/Storage.csv
similarity index 100%
rename from example_systems/3_three_zone_w_co2_capture/resources/Storage.csv
rename to example_systems/3_three_zones_w_co2_capture/resources/Storage.csv
diff --git a/example_systems/3_three_zone_w_co2_capture/resources/Thermal.csv b/example_systems/3_three_zones_w_co2_capture/resources/Thermal.csv
similarity index 100%
rename from example_systems/3_three_zone_w_co2_capture/resources/Thermal.csv
rename to example_systems/3_three_zones_w_co2_capture/resources/Thermal.csv
diff --git a/example_systems/3_three_zones_w_co2_capture/resources/Vre.csv b/example_systems/3_three_zones_w_co2_capture/resources/Vre.csv
new file mode 100644
index 0000000000..0183631d73
--- /dev/null
+++ b/example_systems/3_three_zones_w_co2_capture/resources/Vre.csv
@@ -0,0 +1,5 @@
+Resource,Zone,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,region,cluster
+MA_solar_pv,1,1,1,0,0,-1,0,85300,18760,0,0,0,0,0,MA,1
+CT_onshore_wind,2,1,1,0,0,-1,0,97200,43205,0.1,0,0,0,0,CT,1
+CT_solar_pv,2,1,1,0,0,-1,0,85300,18760,0,0,0,0,0,CT,1
+ME_onshore_wind,3,1,1,0,0,-1,0,97200,43205,0.1,0,0,0,0,ME,1
\ No newline at end of file
diff --git a/example_systems/3_three_zone_w_co2_capture/resources/policy_assignments/Resource_minimum_capacity_requirement.csv b/example_systems/3_three_zones_w_co2_capture/resources/policy_assignments/Resource_minimum_capacity_requirement.csv
similarity index 100%
rename from example_systems/3_three_zone_w_co2_capture/resources/policy_assignments/Resource_minimum_capacity_requirement.csv
rename to example_systems/3_three_zones_w_co2_capture/resources/policy_assignments/Resource_minimum_capacity_requirement.csv
diff --git a/example_systems/3_three_zone_w_co2_capture/settings/clp_settings.yml b/example_systems/3_three_zones_w_co2_capture/settings/clp_settings.yml
similarity index 100%
rename from example_systems/3_three_zone_w_co2_capture/settings/clp_settings.yml
rename to example_systems/3_three_zones_w_co2_capture/settings/clp_settings.yml
diff --git a/example_systems/3_three_zone_w_co2_capture/settings/cplex_settings.yml b/example_systems/3_three_zones_w_co2_capture/settings/cplex_settings.yml
similarity index 100%
rename from example_systems/3_three_zone_w_co2_capture/settings/cplex_settings.yml
rename to example_systems/3_three_zones_w_co2_capture/settings/cplex_settings.yml
diff --git a/example_systems/3_three_zone_w_co2_capture/settings/genx_settings.yml b/example_systems/3_three_zones_w_co2_capture/settings/genx_settings.yml
similarity index 100%
rename from example_systems/3_three_zone_w_co2_capture/settings/genx_settings.yml
rename to example_systems/3_three_zones_w_co2_capture/settings/genx_settings.yml
diff --git a/example_systems/3_three_zone_w_co2_capture/settings/gurobi_settings.yml b/example_systems/3_three_zones_w_co2_capture/settings/gurobi_settings.yml
similarity index 100%
rename from example_systems/3_three_zone_w_co2_capture/settings/gurobi_settings.yml
rename to example_systems/3_three_zones_w_co2_capture/settings/gurobi_settings.yml
diff --git a/example_systems/3_three_zone_w_co2_capture/settings/highs_settings.yml b/example_systems/3_three_zones_w_co2_capture/settings/highs_settings.yml
similarity index 100%
rename from example_systems/3_three_zone_w_co2_capture/settings/highs_settings.yml
rename to example_systems/3_three_zones_w_co2_capture/settings/highs_settings.yml
diff --git a/example_systems/3_three_zone_w_co2_capture/settings/time_domain_reduction_settings.yml b/example_systems/3_three_zones_w_co2_capture/settings/time_domain_reduction_settings.yml
similarity index 100%
rename from example_systems/3_three_zone_w_co2_capture/settings/time_domain_reduction_settings.yml
rename to example_systems/3_three_zones_w_co2_capture/settings/time_domain_reduction_settings.yml
diff --git a/example_systems/3_three_zone_w_co2_capture/system/Demand_data.csv b/example_systems/3_three_zones_w_co2_capture/system/Demand_data.csv
similarity index 100%
rename from example_systems/3_three_zone_w_co2_capture/system/Demand_data.csv
rename to example_systems/3_three_zones_w_co2_capture/system/Demand_data.csv
diff --git a/example_systems/3_three_zone_w_co2_capture/system/Fuels_data.csv b/example_systems/3_three_zones_w_co2_capture/system/Fuels_data.csv
similarity index 100%
rename from example_systems/3_three_zone_w_co2_capture/system/Fuels_data.csv
rename to example_systems/3_three_zones_w_co2_capture/system/Fuels_data.csv
diff --git a/example_systems/3_three_zone_w_co2_capture/system/Generators_variability.csv b/example_systems/3_three_zones_w_co2_capture/system/Generators_variability.csv
similarity index 100%
rename from example_systems/3_three_zone_w_co2_capture/system/Generators_variability.csv
rename to example_systems/3_three_zones_w_co2_capture/system/Generators_variability.csv
diff --git a/example_systems/3_three_zone_w_co2_capture/system/Network.csv b/example_systems/3_three_zones_w_co2_capture/system/Network.csv
similarity index 100%
rename from example_systems/3_three_zone_w_co2_capture/system/Network.csv
rename to example_systems/3_three_zones_w_co2_capture/system/Network.csv
diff --git a/example_systems/4_three_zones_w_policies_slack/Run.jl b/example_systems/4_three_zones_w_policies_slack/Run.jl
index 7ce1891834..b44ca23ec1 100644
--- a/example_systems/4_three_zones_w_policies_slack/Run.jl
+++ b/example_systems/4_three_zones_w_policies_slack/Run.jl
@@ -1,3 +1,3 @@
using GenX
-run_genx_case!(dirname(@__FILE__))
\ No newline at end of file
+run_genx_case!(dirname(@__FILE__))
diff --git a/example_systems/4_three_zones_w_policies_slack/resources/Vre.csv b/example_systems/4_three_zones_w_policies_slack/resources/Vre.csv
index b053e3eb8a..0183631d73 100644
--- a/example_systems/4_three_zones_w_policies_slack/resources/Vre.csv
+++ b/example_systems/4_three_zones_w_policies_slack/resources/Vre.csv
@@ -1,5 +1,5 @@
-Resource,Zone,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Heat_Rate_MMBTU_per_MWh,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,region,cluster
-MA_solar_pv,1,1,1,0,0,-1,0,85300,18760,0,9.13,0,0,0,0,MA,1
-CT_onshore_wind,2,1,1,0,0,-1,0,97200,43205,0.1,9.12,0,0,0,0,CT,1
-CT_solar_pv,2,1,1,0,0,-1,0,85300,18760,0,9.16,0,0,0,0,CT,1
-ME_onshore_wind,3,1,1,0,0,-1,0,97200,43205,0.1,9.12,0,0,0,0,ME,1
\ No newline at end of file
+Resource,Zone,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,region,cluster
+MA_solar_pv,1,1,1,0,0,-1,0,85300,18760,0,0,0,0,0,MA,1
+CT_onshore_wind,2,1,1,0,0,-1,0,97200,43205,0.1,0,0,0,0,CT,1
+CT_solar_pv,2,1,1,0,0,-1,0,85300,18760,0,0,0,0,0,CT,1
+ME_onshore_wind,3,1,1,0,0,-1,0,97200,43205,0.1,0,0,0,0,ME,1
\ No newline at end of file
diff --git a/example_systems/5_three_zones_w_piecewise_fuel/resources/Vre.csv b/example_systems/5_three_zones_w_piecewise_fuel/resources/Vre.csv
index b053e3eb8a..0183631d73 100644
--- a/example_systems/5_three_zones_w_piecewise_fuel/resources/Vre.csv
+++ b/example_systems/5_three_zones_w_piecewise_fuel/resources/Vre.csv
@@ -1,5 +1,5 @@
-Resource,Zone,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Heat_Rate_MMBTU_per_MWh,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,region,cluster
-MA_solar_pv,1,1,1,0,0,-1,0,85300,18760,0,9.13,0,0,0,0,MA,1
-CT_onshore_wind,2,1,1,0,0,-1,0,97200,43205,0.1,9.12,0,0,0,0,CT,1
-CT_solar_pv,2,1,1,0,0,-1,0,85300,18760,0,9.16,0,0,0,0,CT,1
-ME_onshore_wind,3,1,1,0,0,-1,0,97200,43205,0.1,9.12,0,0,0,0,ME,1
\ No newline at end of file
+Resource,Zone,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,region,cluster
+MA_solar_pv,1,1,1,0,0,-1,0,85300,18760,0,0,0,0,0,MA,1
+CT_onshore_wind,2,1,1,0,0,-1,0,97200,43205,0.1,0,0,0,0,CT,1
+CT_solar_pv,2,1,1,0,0,-1,0,85300,18760,0,0,0,0,0,CT,1
+ME_onshore_wind,3,1,1,0,0,-1,0,97200,43205,0.1,0,0,0,0,ME,1
\ No newline at end of file
diff --git a/example_systems/6_three_zones_w_multistage/Run.jl b/example_systems/6_three_zones_w_multistage/Run.jl
index 7ce1891834..b44ca23ec1 100644
--- a/example_systems/6_three_zones_w_multistage/Run.jl
+++ b/example_systems/6_three_zones_w_multistage/Run.jl
@@ -1,3 +1,3 @@
using GenX
-run_genx_case!(dirname(@__FILE__))
\ No newline at end of file
+run_genx_case!(dirname(@__FILE__))
diff --git a/example_systems/6_three_zones_w_multistage/inputs/inputs_p1/resources/Vre.csv b/example_systems/6_three_zones_w_multistage/inputs/inputs_p1/resources/Vre.csv
index 7aba14a14a..12d317ff1e 100644
--- a/example_systems/6_three_zones_w_multistage/inputs/inputs_p1/resources/Vre.csv
+++ b/example_systems/6_three_zones_w_multistage/inputs/inputs_p1/resources/Vre.csv
@@ -1,5 +1,5 @@
-Resource,Zone,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Heat_Rate_MMBTU_per_MWh,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,region,cluster
-MA_solar_pv,1,1,1,1,0,-1,0,85300,18760,0,9.13,0,0,0,0,MA,1
-CT_onshore_wind,2,1,1,1,0,-1,0,97200,43205,0.1,9.12,0,0,0,0,CT,1
-CT_solar_pv,2,1,1,1,0,-1,0,85300,18760,0,9.16,0,0,0,0,CT,1
-ME_onshore_wind,3,1,1,1,0,-1,0,97200,43205,0.1,9.12,0,0,0,0,ME,1
\ No newline at end of file
+Resource,Zone,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,region,cluster
+MA_solar_pv,1,1,1,1,0,-1,0,85300,18760,0,0,0,0,0,MA,1
+CT_onshore_wind,2,1,1,1,0,-1,0,97200,43205,0.1,0,0,0,0,CT,1
+CT_solar_pv,2,1,1,1,0,-1,0,85300,18760,0,0,0,0,0,CT,1
+ME_onshore_wind,3,1,1,1,0,-1,0,97200,43205,0.1,0,0,0,0,ME,1
\ No newline at end of file
diff --git a/example_systems/6_three_zones_w_multistage/inputs/inputs_p2/resources/Vre.csv b/example_systems/6_three_zones_w_multistage/inputs/inputs_p2/resources/Vre.csv
index 7aba14a14a..12d317ff1e 100644
--- a/example_systems/6_three_zones_w_multistage/inputs/inputs_p2/resources/Vre.csv
+++ b/example_systems/6_three_zones_w_multistage/inputs/inputs_p2/resources/Vre.csv
@@ -1,5 +1,5 @@
-Resource,Zone,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Heat_Rate_MMBTU_per_MWh,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,region,cluster
-MA_solar_pv,1,1,1,1,0,-1,0,85300,18760,0,9.13,0,0,0,0,MA,1
-CT_onshore_wind,2,1,1,1,0,-1,0,97200,43205,0.1,9.12,0,0,0,0,CT,1
-CT_solar_pv,2,1,1,1,0,-1,0,85300,18760,0,9.16,0,0,0,0,CT,1
-ME_onshore_wind,3,1,1,1,0,-1,0,97200,43205,0.1,9.12,0,0,0,0,ME,1
\ No newline at end of file
+Resource,Zone,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,region,cluster
+MA_solar_pv,1,1,1,1,0,-1,0,85300,18760,0,0,0,0,0,MA,1
+CT_onshore_wind,2,1,1,1,0,-1,0,97200,43205,0.1,0,0,0,0,CT,1
+CT_solar_pv,2,1,1,1,0,-1,0,85300,18760,0,0,0,0,0,CT,1
+ME_onshore_wind,3,1,1,1,0,-1,0,97200,43205,0.1,0,0,0,0,ME,1
\ No newline at end of file
diff --git a/example_systems/6_three_zones_w_multistage/inputs/inputs_p3/resources/Vre.csv b/example_systems/6_three_zones_w_multistage/inputs/inputs_p3/resources/Vre.csv
index 7aba14a14a..12d317ff1e 100644
--- a/example_systems/6_three_zones_w_multistage/inputs/inputs_p3/resources/Vre.csv
+++ b/example_systems/6_three_zones_w_multistage/inputs/inputs_p3/resources/Vre.csv
@@ -1,5 +1,5 @@
-Resource,Zone,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Heat_Rate_MMBTU_per_MWh,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,region,cluster
-MA_solar_pv,1,1,1,1,0,-1,0,85300,18760,0,9.13,0,0,0,0,MA,1
-CT_onshore_wind,2,1,1,1,0,-1,0,97200,43205,0.1,9.12,0,0,0,0,CT,1
-CT_solar_pv,2,1,1,1,0,-1,0,85300,18760,0,9.16,0,0,0,0,CT,1
-ME_onshore_wind,3,1,1,1,0,-1,0,97200,43205,0.1,9.12,0,0,0,0,ME,1
\ No newline at end of file
+Resource,Zone,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,region,cluster
+MA_solar_pv,1,1,1,1,0,-1,0,85300,18760,0,0,0,0,0,MA,1
+CT_onshore_wind,2,1,1,1,0,-1,0,97200,43205,0.1,0,0,0,0,CT,1
+CT_solar_pv,2,1,1,1,0,-1,0,85300,18760,0,0,0,0,0,CT,1
+ME_onshore_wind,3,1,1,1,0,-1,0,97200,43205,0.1,0,0,0,0,ME,1
\ No newline at end of file
diff --git a/example_systems/7_three_zones_w_colocated_VRE_storage/Run.jl b/example_systems/7_three_zones_w_colocated_VRE_storage/Run.jl
index 7ce1891834..b44ca23ec1 100644
--- a/example_systems/7_three_zones_w_colocated_VRE_storage/Run.jl
+++ b/example_systems/7_three_zones_w_colocated_VRE_storage/Run.jl
@@ -1,3 +1,3 @@
using GenX
-run_genx_case!(dirname(@__FILE__))
\ No newline at end of file
+run_genx_case!(dirname(@__FILE__))
diff --git a/example_systems/7_three_zones_w_colocated_VRE_storage/resources/Vre.csv b/example_systems/7_three_zones_w_colocated_VRE_storage/resources/Vre.csv
index b053e3eb8a..0183631d73 100644
--- a/example_systems/7_three_zones_w_colocated_VRE_storage/resources/Vre.csv
+++ b/example_systems/7_three_zones_w_colocated_VRE_storage/resources/Vre.csv
@@ -1,5 +1,5 @@
-Resource,Zone,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Heat_Rate_MMBTU_per_MWh,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,region,cluster
-MA_solar_pv,1,1,1,0,0,-1,0,85300,18760,0,9.13,0,0,0,0,MA,1
-CT_onshore_wind,2,1,1,0,0,-1,0,97200,43205,0.1,9.12,0,0,0,0,CT,1
-CT_solar_pv,2,1,1,0,0,-1,0,85300,18760,0,9.16,0,0,0,0,CT,1
-ME_onshore_wind,3,1,1,0,0,-1,0,97200,43205,0.1,9.12,0,0,0,0,ME,1
\ No newline at end of file
+Resource,Zone,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,region,cluster
+MA_solar_pv,1,1,1,0,0,-1,0,85300,18760,0,0,0,0,0,MA,1
+CT_onshore_wind,2,1,1,0,0,-1,0,97200,43205,0.1,0,0,0,0,CT,1
+CT_solar_pv,2,1,1,0,0,-1,0,85300,18760,0,0,0,0,0,CT,1
+ME_onshore_wind,3,1,1,0,0,-1,0,97200,43205,0.1,0,0,0,0,ME,1
\ No newline at end of file
diff --git a/example_systems/8_three_zone_w_retrofit/resources/Storage.csv b/example_systems/8_three_zone_w_retrofit/resources/Storage.csv
deleted file mode 100644
index 31d9d93a89..0000000000
--- a/example_systems/8_three_zone_w_retrofit/resources/Storage.csv
+++ /dev/null
@@ -1,4 +0,0 @@
-Resource,Zone,Model,LDS,New_Build,Can_Retire,Existing_Cap_MW,Existing_Cap_MWh,Existing_Charge_Cap_MW,Max_Cap_MW,Max_Cap_MWh,Max_Charge_Cap_MW,Min_Cap_MW,Min_Cap_MWh,Min_Charge_Cap_MW,Inv_Cost_per_MWyr,Inv_Cost_per_MWhyr,Inv_Cost_Charge_per_MWyr,Fixed_OM_Cost_per_MWyr,Fixed_OM_Cost_per_MWhyr,Fixed_OM_Cost_Charge_per_MWyr,Var_OM_Cost_per_MWh,Var_OM_Cost_per_MWh_In,Heat_Rate_MMBTU_per_MWh,Fuel,Cap_Size,Start_Cost_per_MW,Start_Fuel_MMBTU_per_MW,Ramp_Up_Percentage,Ramp_Dn_Percentage,Hydro_Energy_to_Power_Ratio,Min_Power,Self_Disch,Eff_Up,Eff_Down,Min_Duration,Max_Duration,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,MGA,Resource_Type,region,cluster
-MA_battery,1,1,0,1,0,0,0,0,-1,-1,-1,0,0,0,19584,22494,0,4895,5622,0,0.15,0.15,0.0,None,0,0,0,1.0,1.0,0,0.0,0,0.92,0.92,1,10,0.0,0.0,0,0,0,battery_mid,MA,0
-CT_battery,2,1,0,1,0,0,0,0,-1,-1,-1,0,0,0,19584,22494,0,4895,5622,0,0.15,0.15,0.0,None,0,0,0,1.0,1.0,0,0.0,0,0.92,0.92,1,10,0.0,0.0,0,0,0,battery_mid,CT,0
-ME_battery,3,1,0,1,0,0,0,0,-1,-1,-1,0,0,0,19584,22494,0,4895,5622,0,0.15,0.15,0.0,None,0,0,0,1.0,1.0,0,0.0,0,0.92,0.92,1,10,0.0,0.0,0,0,0,battery_mid,ME,0
diff --git a/example_systems/8_three_zone_w_retrofit/resources/Vre.csv b/example_systems/8_three_zone_w_retrofit/resources/Vre.csv
deleted file mode 100644
index 7b80e308d7..0000000000
--- a/example_systems/8_three_zone_w_retrofit/resources/Vre.csv
+++ /dev/null
@@ -1,5 +0,0 @@
-Resource,Zone,LDS,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Heat_Rate_MMBTU_per_MWh,Fuel,Cap_Size,Start_Cost_per_MW,Start_Fuel_MMBTU_per_MW,Ramp_Up_Percentage,Ramp_Dn_Percentage,Hydro_Energy_to_Power_Ratio,Min_Power,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,MGA,Resource_Type,region,cluster
-MA_solar_pv,1,0,1,1,0,0,-1,0,85300,18760,0.0,9.13,None,0,0,0,1.0,1.0,0,0.0,0.0,0.0,0,0,1,solar_photovoltaic,MA,1
-CT_onshore_wind,2,0,1,1,0,0,-1,0,97200,43205,0.1,9.12,None,0,0,0,1.0,1.0,0,0.0,0.0,0.0,0,0,1,onshore_wind_turbine,CT,1
-CT_solar_pv,2,0,1,1,0,0,-1,0,85300,18760,0.0,9.16,None,0,0,0,1.0,1.0,0,0.0,0.0,0.0,0,0,1,solar_photovoltaic,CT,1
-ME_onshore_wind,3,0,1,1,0,0,-1,0,97200,43205,0.1,9.12,None,0,0,0,1.0,1.0,0,0.0,0.0,0.0,0,0,1,onshore_wind_turbine,ME,1
diff --git a/example_systems/8_three_zone_w_retrofit/README.md b/example_systems/8_three_zones_w_retrofit/README.md
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/README.md
rename to example_systems/8_three_zones_w_retrofit/README.md
diff --git a/example_systems/8_three_zones_w_retrofit/Run.jl b/example_systems/8_three_zones_w_retrofit/Run.jl
new file mode 100644
index 0000000000..b44ca23ec1
--- /dev/null
+++ b/example_systems/8_three_zones_w_retrofit/Run.jl
@@ -0,0 +1,3 @@
+using GenX
+
+run_genx_case!(dirname(@__FILE__))
diff --git a/example_systems/8_three_zone_w_retrofit/policies/CO2_cap.csv b/example_systems/8_three_zones_w_retrofit/policies/CO2_cap.csv
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/policies/CO2_cap.csv
rename to example_systems/8_three_zones_w_retrofit/policies/CO2_cap.csv
diff --git a/example_systems/8_three_zone_w_retrofit/policies/Capacity_reserve_margin.csv b/example_systems/8_three_zones_w_retrofit/policies/Capacity_reserve_margin.csv
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/policies/Capacity_reserve_margin.csv
rename to example_systems/8_three_zones_w_retrofit/policies/Capacity_reserve_margin.csv
diff --git a/example_systems/8_three_zone_w_retrofit/policies/Energy_share_requirement.csv b/example_systems/8_three_zones_w_retrofit/policies/Energy_share_requirement.csv
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/policies/Energy_share_requirement.csv
rename to example_systems/8_three_zones_w_retrofit/policies/Energy_share_requirement.csv
diff --git a/example_systems/8_three_zone_w_retrofit/policies/Minimum_capacity_requirement.csv b/example_systems/8_three_zones_w_retrofit/policies/Minimum_capacity_requirement.csv
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/policies/Minimum_capacity_requirement.csv
rename to example_systems/8_three_zones_w_retrofit/policies/Minimum_capacity_requirement.csv
diff --git a/example_systems/8_three_zones_w_retrofit/resources/Storage.csv b/example_systems/8_three_zones_w_retrofit/resources/Storage.csv
new file mode 100644
index 0000000000..0e776f93a9
--- /dev/null
+++ b/example_systems/8_three_zones_w_retrofit/resources/Storage.csv
@@ -0,0 +1,4 @@
+Resource,Zone,Model,New_Build,Can_Retire,Existing_Cap_MW,Existing_Cap_MWh,Max_Cap_MW,Max_Cap_MWh,Min_Cap_MW,Min_Cap_MWh,Inv_Cost_per_MWyr,Inv_Cost_per_MWhyr,Fixed_OM_Cost_per_MWyr,Fixed_OM_Cost_per_MWhyr,Var_OM_Cost_per_MWh,Var_OM_Cost_per_MWh_In,Cap_Size,Start_Cost_per_MW,Ramp_Up_Percentage,Ramp_Dn_Percentage,Min_Power,Self_Disch,Eff_Up,Eff_Down,Min_Duration,Max_Duration,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,MGA,Resource_Type,region,cluster
+MA_battery,1,1,1,0,0,0,-1,-1,0,0,19584,22494,4895,5622,0.15,0.15,0,0,1,1,0,0,0.92,0.92,1,10,0,0,0,0,0,battery_mid,MA,0
+CT_battery,2,1,1,0,0,0,-1,-1,0,0,19584,22494,4895,5622,0.15,0.15,0,0,1,1,0,0,0.92,0.92,1,10,0,0,0,0,0,battery_mid,CT,0
+ME_battery,3,1,1,0,0,0,-1,-1,0,0,19584,22494,4895,5622,0.15,0.15,0,0,1,1,0,0,0.92,0.92,1,10,0,0,0,0,0,battery_mid,ME,0
\ No newline at end of file
diff --git a/example_systems/8_three_zone_w_retrofit/resources/Thermal.csv b/example_systems/8_three_zones_w_retrofit/resources/Thermal.csv
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/resources/Thermal.csv
rename to example_systems/8_three_zones_w_retrofit/resources/Thermal.csv
diff --git a/example_systems/8_three_zones_w_retrofit/resources/Vre.csv b/example_systems/8_three_zones_w_retrofit/resources/Vre.csv
new file mode 100644
index 0000000000..deb34b92f9
--- /dev/null
+++ b/example_systems/8_three_zones_w_retrofit/resources/Vre.csv
@@ -0,0 +1,5 @@
+Resource,Zone,Num_VRE_Bins,New_Build,Can_Retire,Existing_Cap_MW,Max_Cap_MW,Min_Cap_MW,Inv_Cost_per_MWyr,Fixed_OM_Cost_per_MWyr,Var_OM_Cost_per_MWh,Reg_Max,Rsv_Max,Reg_Cost,Rsv_Cost,MGA,Resource_Type,region,cluster
+MA_solar_pv,1,1,1,0,0,-1,0,85300,18760,0,0,0,0,0,1,solar_photovoltaic,MA,1
+CT_onshore_wind,2,1,1,0,0,-1,0,97200,43205,0.1,0,0,0,0,1,onshore_wind_turbine,CT,1
+CT_solar_pv,2,1,1,0,0,-1,0,85300,18760,0,0,0,0,0,1,solar_photovoltaic,CT,1
+ME_onshore_wind,3,1,1,0,0,-1,0,97200,43205,0.1,0,0,0,0,1,onshore_wind_turbine,ME,1
\ No newline at end of file
diff --git a/example_systems/8_three_zone_w_retrofit/resources/policy_assignments/Resource_minimum_capacity_requirement.csv b/example_systems/8_three_zones_w_retrofit/resources/policy_assignments/Resource_minimum_capacity_requirement.csv
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/resources/policy_assignments/Resource_minimum_capacity_requirement.csv
rename to example_systems/8_three_zones_w_retrofit/resources/policy_assignments/Resource_minimum_capacity_requirement.csv
diff --git a/example_systems/8_three_zone_w_retrofit/settings/clp_settings.yml b/example_systems/8_three_zones_w_retrofit/settings/clp_settings.yml
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/settings/clp_settings.yml
rename to example_systems/8_three_zones_w_retrofit/settings/clp_settings.yml
diff --git a/example_systems/8_three_zone_w_retrofit/settings/cplex_settings.yml b/example_systems/8_three_zones_w_retrofit/settings/cplex_settings.yml
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/settings/cplex_settings.yml
rename to example_systems/8_three_zones_w_retrofit/settings/cplex_settings.yml
diff --git a/example_systems/8_three_zone_w_retrofit/settings/genx_settings.yml b/example_systems/8_three_zones_w_retrofit/settings/genx_settings.yml
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/settings/genx_settings.yml
rename to example_systems/8_three_zones_w_retrofit/settings/genx_settings.yml
diff --git a/example_systems/8_three_zone_w_retrofit/settings/gurobi_settings.yml b/example_systems/8_three_zones_w_retrofit/settings/gurobi_settings.yml
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/settings/gurobi_settings.yml
rename to example_systems/8_three_zones_w_retrofit/settings/gurobi_settings.yml
diff --git a/example_systems/8_three_zone_w_retrofit/settings/highs_settings.yml b/example_systems/8_three_zones_w_retrofit/settings/highs_settings.yml
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/settings/highs_settings.yml
rename to example_systems/8_three_zones_w_retrofit/settings/highs_settings.yml
diff --git a/example_systems/8_three_zone_w_retrofit/settings/time_domain_reduction_settings.yml b/example_systems/8_three_zones_w_retrofit/settings/time_domain_reduction_settings.yml
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/settings/time_domain_reduction_settings.yml
rename to example_systems/8_three_zones_w_retrofit/settings/time_domain_reduction_settings.yml
diff --git a/example_systems/8_three_zone_w_retrofit/system/Demand_data.csv b/example_systems/8_three_zones_w_retrofit/system/Demand_data.csv
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/system/Demand_data.csv
rename to example_systems/8_three_zones_w_retrofit/system/Demand_data.csv
diff --git a/example_systems/8_three_zone_w_retrofit/system/Fuels_data.csv b/example_systems/8_three_zones_w_retrofit/system/Fuels_data.csv
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/system/Fuels_data.csv
rename to example_systems/8_three_zones_w_retrofit/system/Fuels_data.csv
diff --git a/example_systems/8_three_zone_w_retrofit/system/Generators_variability.csv b/example_systems/8_three_zones_w_retrofit/system/Generators_variability.csv
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/system/Generators_variability.csv
rename to example_systems/8_three_zones_w_retrofit/system/Generators_variability.csv
diff --git a/example_systems/8_three_zone_w_retrofit/system/Network.csv b/example_systems/8_three_zones_w_retrofit/system/Network.csv
similarity index 100%
rename from example_systems/8_three_zone_w_retrofit/system/Network.csv
rename to example_systems/8_three_zones_w_retrofit/system/Network.csv
diff --git a/example_systems/IEEE_9_bus_DC_OPF/README.md b/example_systems/9_IEEE_9_bus_DC_OPF/README.md
similarity index 100%
rename from example_systems/IEEE_9_bus_DC_OPF/README.md
rename to example_systems/9_IEEE_9_bus_DC_OPF/README.md
diff --git a/example_systems/9_IEEE_9_bus_DC_OPF/Run.jl b/example_systems/9_IEEE_9_bus_DC_OPF/Run.jl
new file mode 100644
index 0000000000..b44ca23ec1
--- /dev/null
+++ b/example_systems/9_IEEE_9_bus_DC_OPF/Run.jl
@@ -0,0 +1,3 @@
+using GenX
+
+run_genx_case!(dirname(@__FILE__))
diff --git a/example_systems/IEEE_9_bus_DC_OPF/resources/Thermal.csv b/example_systems/9_IEEE_9_bus_DC_OPF/resources/Thermal.csv
similarity index 100%
rename from example_systems/IEEE_9_bus_DC_OPF/resources/Thermal.csv
rename to example_systems/9_IEEE_9_bus_DC_OPF/resources/Thermal.csv
diff --git a/example_systems/IEEE_9_bus_DC_OPF/settings/cplex_settings.yml b/example_systems/9_IEEE_9_bus_DC_OPF/settings/cplex_settings.yml
similarity index 100%
rename from example_systems/IEEE_9_bus_DC_OPF/settings/cplex_settings.yml
rename to example_systems/9_IEEE_9_bus_DC_OPF/settings/cplex_settings.yml
diff --git a/example_systems/IEEE_9_bus_DC_OPF/settings/genx_settings.yml b/example_systems/9_IEEE_9_bus_DC_OPF/settings/genx_settings.yml
similarity index 100%
rename from example_systems/IEEE_9_bus_DC_OPF/settings/genx_settings.yml
rename to example_systems/9_IEEE_9_bus_DC_OPF/settings/genx_settings.yml
diff --git a/example_systems/IEEE_9_bus_DC_OPF/settings/gurobi_settings.yml b/example_systems/9_IEEE_9_bus_DC_OPF/settings/gurobi_settings.yml
similarity index 100%
rename from example_systems/IEEE_9_bus_DC_OPF/settings/gurobi_settings.yml
rename to example_systems/9_IEEE_9_bus_DC_OPF/settings/gurobi_settings.yml
diff --git a/example_systems/IEEE_9_bus_DC_OPF/settings/highs_settings.yml b/example_systems/9_IEEE_9_bus_DC_OPF/settings/highs_settings.yml
similarity index 100%
rename from example_systems/IEEE_9_bus_DC_OPF/settings/highs_settings.yml
rename to example_systems/9_IEEE_9_bus_DC_OPF/settings/highs_settings.yml
diff --git a/example_systems/IEEE_9_bus_DC_OPF/settings/time_domain_reduction_settings.yml b/example_systems/9_IEEE_9_bus_DC_OPF/settings/time_domain_reduction_settings.yml
similarity index 100%
rename from example_systems/IEEE_9_bus_DC_OPF/settings/time_domain_reduction_settings.yml
rename to example_systems/9_IEEE_9_bus_DC_OPF/settings/time_domain_reduction_settings.yml
diff --git a/example_systems/IEEE_9_bus_DC_OPF/system/Demand_data.csv b/example_systems/9_IEEE_9_bus_DC_OPF/system/Demand_data.csv
similarity index 100%
rename from example_systems/IEEE_9_bus_DC_OPF/system/Demand_data.csv
rename to example_systems/9_IEEE_9_bus_DC_OPF/system/Demand_data.csv
diff --git a/example_systems/IEEE_9_bus_DC_OPF/system/Fuels_data.csv b/example_systems/9_IEEE_9_bus_DC_OPF/system/Fuels_data.csv
similarity index 100%
rename from example_systems/IEEE_9_bus_DC_OPF/system/Fuels_data.csv
rename to example_systems/9_IEEE_9_bus_DC_OPF/system/Fuels_data.csv
diff --git a/example_systems/IEEE_9_bus_DC_OPF/system/Generators_variability.csv b/example_systems/9_IEEE_9_bus_DC_OPF/system/Generators_variability.csv
similarity index 100%
rename from example_systems/IEEE_9_bus_DC_OPF/system/Generators_variability.csv
rename to example_systems/9_IEEE_9_bus_DC_OPF/system/Generators_variability.csv
diff --git a/example_systems/IEEE_9_bus_DC_OPF/system/Network.csv b/example_systems/9_IEEE_9_bus_DC_OPF/system/Network.csv
similarity index 100%
rename from example_systems/IEEE_9_bus_DC_OPF/system/Network.csv
rename to example_systems/9_IEEE_9_bus_DC_OPF/system/Network.csv
diff --git a/example_systems/IEEE_9_bus_DC_OPF/Run.jl b/example_systems/IEEE_9_bus_DC_OPF/Run.jl
deleted file mode 100644
index 7ce1891834..0000000000
--- a/example_systems/IEEE_9_bus_DC_OPF/Run.jl
+++ /dev/null
@@ -1,3 +0,0 @@
-using GenX
-
-run_genx_case!(dirname(@__FILE__))
\ No newline at end of file
diff --git a/src/additional_tools/method_of_morris.jl b/src/additional_tools/method_of_morris.jl
index 0938c8022e..ed5cd5ddfa 100644
--- a/src/additional_tools/method_of_morris.jl
+++ b/src/additional_tools/method_of_morris.jl
@@ -1,28 +1,28 @@
const SEED = 1234
-
@doc raw"""
morris(EP::Model, path::AbstractString, setup::Dict, inputs::Dict, outpath::AbstractString, OPTIMIZER)
We apply the Method of Morris developed by [Morris, M., 1991](https://www.jstor.org/stable/1269043) in order to identify the input parameters that produce the largest change on total system cost. Method of Morris falls under the simplest class of one-factor-at-a-time (OAT) screening techniques. It assumes l levels per input factor and generates a set of trajectories through the input space. As such, the Method of Morris generates a grid of uncertain model input parameters, $x_i, i=1, ..., k$,, where the range $[x_i^{-}, x_i^{+}$ of each uncertain input parameter i is split into l intervals of equal length. Each trajectory starts at different realizations of input parameters chosen at random and are built by successively selecting one of the inputs randomly and moving it to an adjacent level. These trajectories are used to estimate the mean and the standard deviation of each input parameter on total system cost. A high estimated mean indicates that the input parameter is important; a high estimated standard deviation indicates important interactions between that input parameter and other inputs.
"""
-struct MatSpread{T1,T2}
+struct MatSpread{T1, T2}
mat::T1
spread::T2
end
-struct MorrisResult{T1,T2}
+struct MorrisResult{T1, T2}
means::T1
means_star::T1
variances::T1
elementary_effects::T2
end
-function generate_design_matrix(p_range, p_steps, rng;len_design_mat,groups)
- ps = [range(p_range[i][1], stop=p_range[i][2], length=p_steps[i]) for i in 1:length(p_range)]
+function generate_design_matrix(p_range, p_steps, rng; len_design_mat, groups)
+ ps = [range(p_range[i][1], stop = p_range[i][2], length = p_steps[i])
+ for i in 1:length(p_range)]
indices = [rand(rng, 1:i) for i in p_steps]
- all_idxs_original = Vector{typeof(indices)}(undef,len_design_mat)
-
+ all_idxs_original = Vector{typeof(indices)}(undef, len_design_mat)
+
for i in 1:len_design_mat
j = rand(rng, 1:length(p_range))
indices[j] += (rand(rng) < 0.5 ? -1 : 1)
@@ -34,20 +34,20 @@ function generate_design_matrix(p_range, p_steps, rng;len_design_mat,groups)
all_idxs_original[i] = copy(indices)
end
- df_all_idx_original = DataFrame(all_idxs_original,:auto)
+ df_all_idx_original = DataFrame(all_idxs_original, :auto)
println(df_all_idx_original)
all_idxs = similar(df_all_idx_original)
for g in unique(groups)
- temp = findall(x->x==g, groups)
+ temp = findall(x -> x == g, groups)
for k in temp
- all_idxs[k,:] = df_all_idx_original[temp[1],:]
+ all_idxs[k, :] = df_all_idx_original[temp[1], :]
end
end
println(all_idxs)
- B = Array{Array{Float64}}(undef,len_design_mat)
+ B = Array{Array{Float64}}(undef, len_design_mat)
for j in 1:len_design_mat
- cur_p = [ps[u][(all_idxs[:,j][u])] for u in 1:length(p_range)]
+ cur_p = [ps[u][(all_idxs[:, j][u])] for u in 1:length(p_range)]
B[j] = cur_p
end
reduce(hcat, B)
@@ -55,45 +55,67 @@ end
function calculate_spread(matrix)
spread = 0.0
- for i in 2:size(matrix,2)
- spread += sqrt(sum(abs2.(matrix[:,i] - matrix[:,i-1])))
+ for i in 2:size(matrix, 2)
+ spread += sqrt(sum(abs2.(matrix[:, i] - matrix[:, i - 1])))
end
spread
end
-function sample_matrices(p_range,p_steps, rng;num_trajectory,total_num_trajectory,len_design_mat,groups)
+function sample_matrices(p_range,
+ p_steps,
+ rng;
+ num_trajectory,
+ total_num_trajectory,
+ len_design_mat,
+ groups)
matrix_array = []
println(num_trajectory)
println(total_num_trajectory)
- if total_num_trajectory x.spread,rev=true)
+ sort!(matrix_array, by = x -> x.spread, rev = true)
matrices = [i.mat for i in matrix_array[1:num_trajectory]]
- reduce(hcat,matrices)
+ reduce(hcat, matrices)
end
-function my_gsa(f, p_steps, num_trajectory, total_num_trajectory, p_range::AbstractVector, len_design_mat, groups, random)
+function my_gsa(f,
+ p_steps,
+ num_trajectory,
+ total_num_trajectory,
+ p_range::AbstractVector,
+ len_design_mat,
+ groups,
+ random)
rng = Random.default_rng()
- if !random; Random.seed!(SEED); end
- design_matrices_original = sample_matrices(p_range, p_steps, rng;num_trajectory,
- total_num_trajectory,len_design_mat,groups)
+ if !random
+ Random.seed!(SEED)
+ end
+ design_matrices_original = sample_matrices(p_range, p_steps, rng; num_trajectory,
+ total_num_trajectory, len_design_mat, groups)
println(design_matrices_original)
- L = DataFrame(design_matrices_original,:auto)
+ L = DataFrame(design_matrices_original, :auto)
println(L)
- distinct_trajectories = Array{Int64}(undef,num_trajectory)
- design_matrices = Matrix(DataFrame(unique(last, pairs(eachcol(L[!,1:len_design_mat])))))
- distinct_trajectories[1] = length(design_matrices[1,:])
+ distinct_trajectories = Array{Int64}(undef, num_trajectory)
+ design_matrices = Matrix(DataFrame(unique(last,
+ pairs(eachcol(L[!, 1:len_design_mat])))))
+ distinct_trajectories[1] = length(design_matrices[1, :])
if num_trajectory > 1
for i in 2:num_trajectory
- design_matrices = hcat(design_matrices, Matrix(DataFrame(unique(last, pairs(eachcol(L[!,(i-1)*len_design_mat+1:i*len_design_mat]))))))
- distinct_trajectories[i] = length(Matrix(DataFrame(unique(last, pairs(eachcol(L[!,(i-1)*len_design_mat+1:i*len_design_mat])))))[1,:])
+ design_matrices = hcat(design_matrices,
+ Matrix(DataFrame(unique(last,
+ pairs(eachcol(L[!,
+ ((i - 1) * len_design_mat + 1):(i * len_design_mat)]))))))
+ distinct_trajectories[i] = length(Matrix(DataFrame(unique(last,
+ pairs(eachcol(L[!, ((i - 1) * len_design_mat + 1):(i * len_design_mat)])))))[
+ 1,
+ :])
end
end
println(distinct_trajectories)
@@ -102,26 +124,27 @@ function my_gsa(f, p_steps, num_trajectory, total_num_trajectory, p_range::Abstr
multioutput = false
desol = false
local y_size
-
- _y = [f(design_matrices[:,i]) for i in 1:size(design_matrices,2)]
+
+ _y = [f(design_matrices[:, i]) for i in 1:size(design_matrices, 2)]
multioutput = !(eltype(_y) <: Number)
if eltype(_y) <: RecursiveArrayTools.AbstractVectorOfArray
y_size = size(_y[1])
_y = vec.(_y)
desol = true
end
- all_y = multioutput ? reduce(hcat,_y) : _y
+ all_y = multioutput ? reduce(hcat, _y) : _y
println(all_y)
effects = []
- while(length(effects) < length(groups))
- push!(effects,Vector{Float64}[])
+ while (length(effects) < length(groups))
+ push!(effects, Vector{Float64}[])
end
for i in 1:num_trajectory
len_design_mat = distinct_trajectories[i]
- y1 = multioutput ? all_y[:,(i-1)*len_design_mat+1] : all_y[(i-1)*len_design_mat+1]
- for j in (i-1)*len_design_mat+1:(i*len_design_mat)-1
+ y1 = multioutput ? all_y[:, (i - 1) * len_design_mat + 1] :
+ all_y[(i - 1) * len_design_mat + 1]
+ for j in ((i - 1) * len_design_mat + 1):((i * len_design_mat) - 1)
y2 = y1
- del = design_matrices[:,j+1] - design_matrices[:,j]
+ del = design_matrices[:, j + 1] - design_matrices[:, j]
change_index = 0
for k in 1:length(del)
if abs(del[k]) > 0
@@ -130,14 +153,14 @@ function my_gsa(f, p_steps, num_trajectory, total_num_trajectory, p_range::Abstr
end
end
del = sum(del)
- y1 = multioutput ? all_y[:,j+1] : all_y[j+1]
- effect = @. (y1-y2)/(del)
+ y1 = multioutput ? all_y[:, j + 1] : all_y[j + 1]
+ effect = @. (y1 - y2) / (del)
elem_effect = typeof(y1) <: Number ? effect : mean(effect, dims = 2)
- temp_g_index = findall(x->x==groups[change_index], groups)
+ temp_g_index = findall(x -> x == groups[change_index], groups)
for g in temp_g_index
println(effects)
println(elem_effect)
- push!(effects[g],elem_effect)
+ push!(effects[g], elem_effect)
end
end
end
@@ -156,23 +179,32 @@ function my_gsa(f, p_steps, num_trajectory, total_num_trajectory, p_range::Abstr
end
end
if desol
- f_shape = x -> [reshape(x[:,i],y_size) for i in 1:size(x,2)]
- means = map(f_shape,means)
- means_star = map(f_shape,means_star)
- variances = map(f_shape,variances)
+ f_shape = x -> [reshape(x[:, i], y_size) for i in 1:size(x, 2)]
+ means = map(f_shape, means)
+ means_star = map(f_shape, means_star)
+ variances = map(f_shape, variances)
end
- MorrisResult(reduce(hcat, means),reduce(hcat, means_star),reduce(hcat, variances),effects)
+ MorrisResult(reduce(hcat, means),
+ reduce(hcat, means_star),
+ reduce(hcat, variances),
+ effects)
end
-function morris(EP::Model, path::AbstractString, setup::Dict, inputs::Dict, outpath::AbstractString, OPTIMIZER; random=true)
+function morris(EP::Model,
+ path::AbstractString,
+ setup::Dict,
+ inputs::Dict,
+ outpath::AbstractString,
+ OPTIMIZER;
+ random = true)
# Reading the input parameters
Morris_range = load_dataframe(joinpath(path, "Method_of_morris_range.csv"))
- groups = Morris_range[!,:Group]
- p_steps = Morris_range[!,:p_steps]
- total_num_trajectory = Morris_range[!,:total_num_trajectory][1]
- num_trajectory = Morris_range[!,:num_trajectory][1]
- len_design_mat = Morris_range[!,:len_design_mat][1]
- uncertain_columns = unique(Morris_range[!,:Parameter])
+ groups = Morris_range[!, :Group]
+ p_steps = Morris_range[!, :p_steps]
+ total_num_trajectory = Morris_range[!, :total_num_trajectory][1]
+ num_trajectory = Morris_range[!, :num_trajectory][1]
+ len_design_mat = Morris_range[!, :len_design_mat][1]
+ uncertain_columns = unique(Morris_range[!, :Parameter])
#save_parameters = zeros(length(Morris_range[!,:Parameter]))
gen = inputs["RESOURCES"]
@@ -181,40 +213,54 @@ function morris(EP::Model, path::AbstractString, setup::Dict, inputs::Dict, outp
for column in uncertain_columns
col_sym = Symbol(lowercase(column))
# column_f is the function to get the value "column" for each generator
- column_f = isdefined(GenX, col_sym) ? getfield(GenX, col_sym) : r -> getproperty(r, col_sym)
- sigma = [sigma; [column_f.(gen) .* (1 .+ Morris_range[Morris_range[!,:Parameter] .== column, :Lower_bound] ./100) column_f.(gen) .* (1 .+ Morris_range[Morris_range[!,:Parameter] .== column, :Upper_bound] ./100)]]
+ column_f = isdefined(GenX, col_sym) ? getfield(GenX, col_sym) :
+ r -> getproperty(r, col_sym)
+ sigma = [sigma;
+ [column_f.(gen) .* (1 .+
+ Morris_range[Morris_range[!, :Parameter] .== column, :Lower_bound] ./
+ 100) column_f.(gen) .*
+ (1 .+
+ Morris_range[Morris_range[!, :Parameter] .== column,
+ :Upper_bound] ./ 100)]]
end
- sigma = sigma[2:end,:]
+ sigma = sigma[2:end, :]
- p_range = mapslices(x->[x], sigma, dims=2)[:]
+ p_range = mapslices(x -> [x], sigma, dims = 2)[:]
# Creating a function for iteratively solving the model with different sets of input parameters
- f1 = function(sigma)
+ f1 = function (sigma)
#print(sigma)
print("\n")
#save_parameters = hcat(save_parameters, sigma)
for column in uncertain_columns
- index = findall(s -> s == column, Morris_range[!,:Parameter])
+ index = findall(s -> s == column, Morris_range[!, :Parameter])
attr_to_set = Symbol(lowercase(column))
gen[attr_to_set] = sigma[first(index):last(index)]
end
EP = generate_model(setup, inputs, OPTIMIZER)
#EP, solve_time = solve_model(EP, setup)
- redirect_stdout((()->optimize!(EP)),open("/dev/null", "w"))
+ redirect_stdout((() -> optimize!(EP)), open("/dev/null", "w"))
[objective_value(EP)]
end
# Perform the method of morris analysis
- m = my_gsa(f1,p_steps,num_trajectory,total_num_trajectory,p_range,len_design_mat,groups,random)
+ m = my_gsa(f1,
+ p_steps,
+ num_trajectory,
+ total_num_trajectory,
+ p_range,
+ len_design_mat,
+ groups,
+ random)
println(m.means)
println(DataFrame(m.means', :auto))
#save the mean effect of each uncertain variable on the objective fucntion
- Morris_range[!,:mean] = DataFrame(m.means', :auto)[!,:x1]
+ Morris_range[!, :mean] = DataFrame(m.means', :auto)[!, :x1]
println(DataFrame(m.variances', :auto))
#save the variance of effect of each uncertain variable on the objective function
- Morris_range[!,:variance] = DataFrame(m.variances', :auto)[!,:x1]
+ Morris_range[!, :variance] = DataFrame(m.variances', :auto)[!, :x1]
CSV.write(joinpath(outpath, "morris.csv"), Morris_range)
return Morris_range
diff --git a/src/additional_tools/modeling_to_generate_alternatives.jl b/src/additional_tools/modeling_to_generate_alternatives.jl
index 97400b54b1..0fe990c7be 100644
--- a/src/additional_tools/modeling_to_generate_alternatives.jl
+++ b/src/additional_tools/modeling_to_generate_alternatives.jl
@@ -3,111 +3,173 @@
We have implemented an updated Modeling to Generate Alternatives (MGA) Algorithm proposed by [Berntsen and Trutnevyte (2017)](https://www.sciencedirect.com/science/article/pii/S0360544217304097) to generate a set of feasible, near cost-optimal technology portfolios. This algorithm was developed by [Brill Jr, E. D., 1979](https://pubsonline.informs.org/doi/abs/10.1287/mnsc.25.5.413) and introduced to energy system planning by [DeCarolia, J. F., 2011](https://www.sciencedirect.com/science/article/pii/S0140988310000721).
-To create the MGA formulation, we replace the cost-minimizing objective function of GenX with a new objective function that creates multiple generation portfolios by zone. We further add a new budget constraint based on the optimal objective function value $f^*$ of the least-cost model and the user-specified value of slack $\delta$. After adding the slack constraint, the resulting MGA formulation is given as:
+To create the MGA formulation, we replace the cost-minimizing objective function of GenX with a new objective function that creates multiple generation portfolios by zone. We further add a new budget constraint based on the optimal objective function value $f^*$ of the least-cost model and the user-specified value of slack $\delta$. After adding the slack constraint, the resulting MGA formulation is given as (`MGAAnnualGeneration = 0` in the genx_settings.yml file, or not set):
```math
\begin{aligned}
\text{max/min} \quad
&\sum_{z \in \mathcal{Z}}\sum_{r \in \mathcal{R}} \beta_{z,r}^{k}P_{z,r}\\
\text{s.t.} \quad
- &P_{zr} = \sum_{y \in \mathcal{G}}\sum_{t \in \mathcal{T}} \omega_{t} \Theta_{y,t,z,r} \\
+ &P_{z,r} = \sum_{y \in \mathcal{G}}C_{y,z,r} \\
& f \leq f^* + \delta \\
&Ax = b
\end{aligned}
```
-where, $\beta_{zr}$ is a random objective fucntion coefficient betwen $[0,100]$ for MGA iteration $k$. $\Theta_{y,t,z,r}$ is a generation of technology $y$ in zone $z$ in time period $t$ that belongs to a resource type $r$. We aggregate $\Theta_{y,t,z,r}$ into a new variable $P_{z,r}$ that represents total generation from technology type $r$ in a zone $z$. In the second constraint above, $\delta$ denote the increase in budget from the least-cost solution and $f$ represents the expression for the total system cost. The constraint $Ax = b$ represents all other constraints in the power system model. We then solve the formulation with minimization and maximization objective function to explore near optimal solution space.
+where, $\beta_{z,r}$ is a random objective function coefficient betwen $[0,1]$ for MGA iteration $k$. We aggregate capacity into a new variable $P_{z,r}$ that represents total capacity from technology type $r$ in a zone $z$.
+
+If the users set `MGAAnnualGeneration = 1` in the genx_settings.yml file, the MGA formulation is given as:
+```math
+\begin{aligned}
+\text{max/min} \quad
+ &\sum_{z \in \mathcal{Z}}\sum_{r \in \mathcal{R}} \beta_{z,r}^{k}P_{z,r}\\
+ \text{s.t.} \quad
+ &P_{z,r} = \sum_{y \in \mathcal{G}}\sum_{t \in \mathcal{T}} \omega_{t} \Theta_{y,t,z,r} \\
+ & f \leq f^* + \delta \\
+ &Ax = b
+\end{aligned}
+```
+where, $\beta_{z,r}$ is a random objective function coefficient betwen $[0,1]$ for MGA iteration $k$. $\Theta_{y,t,z,r}$ is a generation of technology $y$ in zone $z$ in time period $t$ that belongs to a resource type $r$. We aggregate $\Theta_{y,t,z,r}$ into a new variable $P_{z,r}$ that represents total generation from technology type $r$ in a zone $z$.
+
+In the second constraint in both the above formulations, $\delta$ denote the increase in budget from the least-cost solution and $f$ represents the expression for the total system cost. The constraint $Ax = b$ represents all other constraints in the power system model. We then solve the formulation with minimization and maximization objective function to explore near optimal solution space.
"""
function mga(EP::Model, path::AbstractString, setup::Dict, inputs::Dict)
-
- if setup["ModelingToGenerateAlternatives"]==1
+ if setup["ModelingToGenerateAlternatives"] == 1
# Start MGA Algorithm
- println("MGA Module")
-
- # Objective function value of the least cost problem
- Least_System_Cost = objective_value(EP)
+ println("MGA Module")
- # Read sets
- gen = inputs["RESOURCES"]
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zonests
- zones = unique(inputs["R_ZONES"])
+ # Objective function value of the least cost problem
+ Least_System_Cost = objective_value(EP)
- # Create a set of unique technology types
- resources_with_mga = gen[ids_with_mga(gen)]
- TechTypes = unique(resource_type_mga.(resources_with_mga))
+ # Read sets
+ gen = inputs["RESOURCES"]
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zonests
+ zones = unique(inputs["R_ZONES"])
- # Read slack parameter representing desired increase in budget from the least cost solution
- slack = setup["ModelingtoGenerateAlternativeSlack"]
+ # Create a set of unique technology types
+ resources_with_mga = gen[ids_with_mga(gen)]
+ TechTypes = unique(resource_type_mga.(resources_with_mga))
- ### Variables ###
+ # Read slack parameter representing desired increase in budget from the least cost solution
+ slack = setup["ModelingtoGenerateAlternativeSlack"]
- @variable(EP, vSumvP[TechTypes = 1:length(TechTypes), z = 1:Z] >= 0) # Variable denoting total generation from eligible technology of a given type
+ ### Constraints ###
- ### End Variables ###
+ # Constraint to set budget for MGA iterations
+ @constraint(EP, budget, EP[:eObj]<=Least_System_Cost * (1 + slack))
+ ### End Constraints ###
- ### Constraints ###
+ ### Create Results Directory for MGA iterations
+ outpath_max = joinpath(path, "MGAResults_max")
+ if !(isdir(outpath_max))
+ mkdir(outpath_max)
+ end
+ outpath_min = joinpath(path, "MGAResults_min")
+ if !(isdir(outpath_min))
+ mkdir(outpath_min)
+ end
- # Constraint to set budget for MGA iterations
- @constraint(EP, budget, EP[:eObj] <= Least_System_Cost * (1 + slack) )
+ ### Begin MGA iterations for maximization and minimization objective ###
+ mga_start_time = time()
- # Constraint to compute total generation in each zone from a given Technology Type
- function resource_in_zone_with_TechType(tt::Int64, z::Int64)
- condition::BitVector = (resource_type_mga.(gen) .== TechTypes[tt]) .& (zone_id.(gen) .== z)
- return resource_id.(gen[condition])
- end
- @constraint(EP,cGeneration[tt = 1:length(TechTypes), z = 1:Z], vSumvP[tt,z] == sum(EP[:vP][y,t] * inputs["omega"][t] for y in resource_in_zone_with_TechType(tt,z), t in 1:T))
+ print("Starting the first MGA iteration")
- ### End Constraints ###
+ for i in 1:setup["ModelingToGenerateAlternativeIterations"]
- ### Create Results Directory for MGA iterations
- outpath_max = joinpath(path, "MGAResults_max")
- if !(isdir(outpath_max))
- mkdir(outpath_max)
- end
- outpath_min = joinpath(path, "MGAResults_min")
- if !(isdir(outpath_min))
- mkdir(outpath_min)
- end
+ # Create random coefficients for the generators that we want to include in the MGA run for the given budget
+ pRand = rand(length(TechTypes), length(zones))
- ### Begin MGA iterations for maximization and minimization objective ###
- mga_start_time = time()
+ ### Maximization objective
+ @objective(EP,
+ Max,
+ sum(pRand[tt, z] * EP[:vMGA][tt, z]
+ for tt in 1:length(TechTypes), z in 1:Z))
- print("Starting the first MGA iteration")
+ # Solve Model Iteration
+ status = optimize!(EP)
- for i in 1:setup["ModelingToGenerateAlternativeIterations"]
+ # Create path for saving MGA iterations
+ mgaoutpath_max = joinpath(outpath_max, string("MGA", "_", slack, "_", i))
- # Create random coefficients for the generators that we want to include in the MGA run for the given budget
- pRand = rand(length(TechTypes),length(zones))
+ # Write results
+ write_outputs(EP, mgaoutpath_max, setup, inputs)
- ### Maximization objective
- @objective(EP, Max, sum(pRand[tt,z] * vSumvP[tt,z] for tt in 1:length(TechTypes), z in 1:Z ))
+ ### Minimization objective
+ @objective(EP,
+ Min,
+ sum(pRand[tt, z] * EP[:vMGA][tt, z]
+ for tt in 1:length(TechTypes), z in 1:Z))
- # Solve Model Iteration
- status = optimize!(EP)
+ # Solve Model Iteration
+ status = optimize!(EP)
# Create path for saving MGA iterations
- mgaoutpath_max = joinpath(outpath_max, string("MGA", "_", slack,"_", i))
+ mgaoutpath_min = joinpath(outpath_min, string("MGA", "_", slack, "_", i))
- # Write results
- write_outputs(EP, mgaoutpath_max, setup, inputs)
+ # Write results
+ write_outputs(EP, mgaoutpath_min, setup, inputs)
+ end
- ### Minimization objective
- @objective(EP, Min, sum(pRand[tt,z] * vSumvP[tt,z] for tt in 1:length(TechTypes), z in 1:Z ))
+ total_time = time() - mga_start_time
+ ### End MGA Iterations ###
+ end
+end
- # Solve Model Iteration
- status = optimize!(EP)
+@doc raw"""
+ mga!(EP::Model, inputs::Dict, setup::Dict)
- # Create path for saving MGA iterations
- mgaoutpath_min = joinpath(outpath_min, string("MGA", "_", slack,"_", i))
+This function reads the input data, collect the resources with MGA flag on and creates a set of unique technology types.
+The function then adds a constraint to the model to compute total capacity in each zone from a given Technology Type.
- # Write results
- write_outputs(EP, mgaoutpath_min, setup, inputs)
+If the user set `MGAAnnualGeneration = 0` in the genx_settings.yml file, the constraint has the following form:
+```math
+P_{z,r} = \sum_{y \in \mathcal{G}}C_{y,z,r}
+```
+where, the aggregated capacity $P_{z,r}$ represents total capacity from technology type $r$ in a zone $z$.
- end
+If the user set `MGAAnnualGeneration = 1` in the genx_settings.yml file, the constraint has the following form:
+```math
+P_{z,r} = \sum_{y \in \mathcal{G}}\sum_{t \in \mathcal{T}} \omega_{t} \Theta_{y,t,z,r}
+```
+where $\Theta_{y,t,z,r}$ is a generation of technology $y$ in zone $z$ in time period $t$ that belongs to a resource type $r$. $\Theta_{y,t,z,r}$ is aggregated into a new variable $P_{z,r}$ that represents total generation from technology type $r$ in a zone $z$.
- total_time = time() - mga_start_time
- ### End MGA Iterations ###
- end
+# Arguments
+- `EP::Model`: GenX model object
+- `inputs::Dict`: Dictionary containing input data
+# Returns
+- This function updates the model object `EP` with the MGA variables and constraints in-place.
+"""
+function mga!(EP::Model, inputs::Dict, setup::Dict)
+ println("MGA Module")
+
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ gen = inputs["RESOURCES"] # Resources data
+ # Read set of MGA variables
+ annual_gen = setup["MGAAnnualGeneration"] ### Choose setting in genx_settings.yaml: MGAAnnualGeneration: 1 = annual generation, otherwise, sum of capacity
+ # Create a set of unique technology types
+ resources_with_mga_on = gen[ids_with_mga(gen)]
+ TechTypes = unique(resource_type_mga.(resources_with_mga_on))
+
+ function resource_in_zone_same_TechType(tt::Int64, z::Int64)
+ condition::BitVector = (resource_type_mga.(gen) .== TechTypes[tt]) .&
+ (zone_id.(gen) .== z)
+ return resource_id.(gen[condition])
+ end
+ # Constraint to compute total generation in each zone from a given Technology Type
+ ### Variables ###
+ @variable(EP, vMGA[TechTypes = 1:length(TechTypes), z = 1:Z]>=0)
+
+ ### Constraint ###
+ if annual_gen == 1 # annual generation
+ @constraint(EP, cGeneration[tt = 1:length(TechTypes), z = 1:Z],
+ vMGA[tt,z]==sum(EP[:vP][y, t] * inputs["omega"][t]
+ for y in resource_in_zone_same_TechType(tt, z), t in 1:T))
+ else
+ @constraint(EP, cCapEquiv[tt = 1:length(TechTypes), z = 1:Z],
+ vMGA[tt,z]==sum(EP[:eTotalCap][y]
+ for y in resource_in_zone_same_TechType(tt, z)))
+ end
end
diff --git a/src/case_runners/case_runner.jl b/src/case_runners/case_runner.jl
index bbdaae4f53..9cbcb076c3 100644
--- a/src/case_runners/case_runner.jl
+++ b/src/case_runners/case_runner.jl
@@ -28,7 +28,7 @@ run_genx_case!("path/to/case", HiGHS.Optimizer)
run_genx_case!("path/to/case", Gurobi.Optimizer)
```
"""
-function run_genx_case!(case::AbstractString, optimizer::Any=HiGHS.Optimizer)
+function run_genx_case!(case::AbstractString, optimizer::Any = HiGHS.Optimizer)
genx_settings = get_settings_path(case, "genx_settings.yml") # Settings YAML file path
writeoutput_settings = get_settings_path(case, "output_settings.yml") # Write-output settings YAML file path
mysetup = configure_settings(genx_settings, writeoutput_settings) # mysetup dictionary stores settings and GenX-specific parameters
@@ -86,12 +86,15 @@ function run_genx_case_simple!(case::AbstractString, mysetup::Dict, optimizer::A
if has_values(EP)
println("Writing Output")
outputs_path = get_default_output_folder(case)
- elapsed_time = @elapsed outputs_path = write_outputs(EP, outputs_path, mysetup, myinputs)
+ elapsed_time = @elapsed outputs_path = write_outputs(EP,
+ outputs_path,
+ mysetup,
+ myinputs)
println("Time elapsed for writing is")
println(elapsed_time)
if mysetup["ModelingToGenerateAlternatives"] == 1
println("Starting Model to Generate Alternatives (MGA) Iterations")
- mga(EP, case, mysetup, myinputs, outputs_path)
+ mga(EP, case, mysetup, myinputs)
end
if mysetup["MethodofMorris"] == 1
@@ -101,7 +104,6 @@ function run_genx_case_simple!(case::AbstractString, mysetup::Dict, optimizer::A
end
end
-
function run_genx_case_multistage!(case::AbstractString, mysetup::Dict, optimizer::Any)
settings_path = get_settings_path(case)
multistage_settings = get_settings_path(case, "multi_stage_settings.yml") # Multi stage settings YAML file path
@@ -111,13 +113,14 @@ function run_genx_case_multistage!(case::AbstractString, mysetup::Dict, optimize
if mysetup["TimeDomainReduction"] == 1
tdr_settings = get_settings_path(case, "time_domain_reduction_settings.yml") # Multi stage settings YAML file path
TDRSettingsDict = YAML.load(open(tdr_settings))
-
+
first_stage_path = joinpath(case, "inputs", "inputs_p1")
TDRpath = joinpath(first_stage_path, mysetup["TimeDomainReductionFolder"])
system_path = joinpath(first_stage_path, mysetup["SystemFolder"])
prevent_doubled_timedomainreduction(system_path)
if !time_domain_reduced_files_exist(TDRpath)
- if (mysetup["MultiStage"] == 1) && (TDRSettingsDict["MultiStageConcatenate"] == 0)
+ if (mysetup["MultiStage"] == 1) &&
+ (TDRSettingsDict["MultiStageConcatenate"] == 0)
println("Clustering Time Series Data (Individually)...")
for stage_id in 1:mysetup["MultiStageSettingsDict"]["NumStages"]
cluster_inputs(case, settings_path, mysetup, stage_id)
@@ -135,8 +138,8 @@ function run_genx_case_multistage!(case::AbstractString, mysetup::Dict, optimize
println("Configuring Solver")
OPTIMIZER = configure_solver(settings_path, optimizer)
- model_dict=Dict()
- inputs_dict=Dict()
+ model_dict = Dict()
+ inputs_dict = Dict()
for t in 1:mysetup["MultiStageSettingsDict"]["NumStages"]
@@ -144,26 +147,26 @@ function run_genx_case_multistage!(case::AbstractString, mysetup::Dict, optimize
mysetup["MultiStageSettingsDict"]["CurStage"] = t
# Step 1) Load Inputs
- inpath_sub = joinpath(case, "inputs", string("inputs_p",t))
+ inpath_sub = joinpath(case, "inputs", string("inputs_p", t))
inputs_dict[t] = load_inputs(mysetup, inpath_sub)
- inputs_dict[t] = configure_multi_stage_inputs(inputs_dict[t],mysetup["MultiStageSettingsDict"],mysetup["NetworkExpansion"])
+ inputs_dict[t] = configure_multi_stage_inputs(inputs_dict[t],
+ mysetup["MultiStageSettingsDict"],
+ mysetup["NetworkExpansion"])
- compute_cumulative_min_retirements!(inputs_dict,t)
+ compute_cumulative_min_retirements!(inputs_dict, t)
# Step 2) Generate model
model_dict[t] = generate_model(mysetup, inputs_dict[t], OPTIMIZER)
end
+ # check that resources do not switch from can_retire = 0 to can_retire = 1 between stages
+ validate_can_retire_multistage(
+ inputs_dict, mysetup["MultiStageSettingsDict"]["NumStages"])
### Solve model
println("Solving Model")
- # Step 3) Run DDP Algorithm
- ## Solve Model
- model_dict, mystats_d, inputs_dict = run_ddp(model_dict, mysetup, inputs_dict)
-
- # Step 4) Write final outputs from each stage
-
+ # Prepare folder for results
outpath = get_default_output_folder(case)
if mysetup["OverwriteResults"] == 1
@@ -178,6 +181,11 @@ function run_genx_case_multistage!(case::AbstractString, mysetup::Dict, optimize
mkdir(outpath)
end
+ # Step 3) Run DDP Algorithm
+ ## Solve Model
+ model_dict, mystats_d, inputs_dict = run_ddp(outpath, model_dict, mysetup, inputs_dict)
+
+ # Step 4) Write final outputs from each stage
for p in 1:mysetup["MultiStageSettingsDict"]["NumStages"]
outpath_cur = joinpath(outpath, "results_p$p")
write_outputs(model_dict[p], outpath_cur, mysetup, inputs_dict[p])
@@ -187,4 +195,3 @@ function run_genx_case_multistage!(case::AbstractString, mysetup::Dict, optimize
write_multi_stage_outputs(mystats_d, outpath, mysetup, inputs_dict)
end
-
diff --git a/src/configure_settings/configure_settings.jl b/src/configure_settings/configure_settings.jl
index 5d59a8a91b..26e240cce6 100644
--- a/src/configure_settings/configure_settings.jl
+++ b/src/configure_settings/configure_settings.jl
@@ -1,6 +1,5 @@
function default_settings()
- Dict{Any,Any}(
- "PrintModel" => 0,
+ Dict{Any, Any}("PrintModel" => 0,
"OverwriteResults" => 0,
"NetworkExpansion" => 0,
"Trans_Loss_Segments" => 1,
@@ -19,6 +18,7 @@ function default_settings()
"TimeDomainReductionFolder" => "TDR_results",
"ModelingToGenerateAlternatives" => 0,
"ModelingtoGenerateAlternativeSlack" => 0.1,
+ "MGAAnnualGeneration" => 0,
"MultiStage" => 0,
"MethodofMorris" => 0,
"IncludeLossesInESR" => 0,
@@ -32,7 +32,7 @@ function default_settings()
"ResourcePoliciesFolder" => "policy_assignments",
"SystemFolder" => "system",
"PoliciesFolder" => "policies",
- )
+ "ObjScale" => 1)
end
@doc raw"""
@@ -63,10 +63,10 @@ function configure_settings(settings_path::String, output_settings_path::String)
return settings
end
-function validate_settings!(settings::Dict{Any,Any})
+function validate_settings!(settings::Dict{Any, Any})
# Check for any settings combinations that are not allowed.
# If we find any then make a response and issue a note to the user.
-
+
# make WriteOutputs setting lowercase and check for valid value
settings["WriteOutputs"] = lowercase(settings["WriteOutputs"])
@assert settings["WriteOutputs"] ∈ ["annual", "full"]
@@ -80,20 +80,19 @@ function validate_settings!(settings::Dict{Any,Any})
if haskey(settings, "Reserves")
Base.depwarn("""The Reserves setting has been deprecated. Please use the
- OperationalReserves setting instead.""", :validate_settings!, force=true)
+ OperationalReserves setting instead.""",
+ :validate_settings!, force = true)
settings["OperationalReserves"] = settings["Reserves"]
delete!(settings, "Reserves")
end
- if settings["EnableJuMPStringNames"]==0 && settings["ComputeConflicts"]==1
- settings["EnableJuMPStringNames"]=1;
+ if settings["EnableJuMPStringNames"] == 0 && settings["ComputeConflicts"] == 1
+ settings["EnableJuMPStringNames"] = 1
end
-
end
function default_writeoutput()
- Dict{String,Bool}(
- "WriteCosts" => true,
+ Dict{String, Bool}("WriteCosts" => true,
"WriteCapacity" => true,
"WriteCapacityValue" => true,
"WriteCapacityFactor" => true,
@@ -139,12 +138,10 @@ function default_writeoutput()
"WriteTransmissionLosses" => true,
"WriteVirtualDischarge" => true,
"WriteVREStor" => true,
- "WriteAngles" => true
- )
+ "WriteAngles" => true)
end
function configure_writeoutput(output_settings_path::String, settings::Dict)
-
writeoutput = default_writeoutput()
# don't write files with hourly data if settings["WriteOutputs"] == "annual"
@@ -168,4 +165,4 @@ function configure_writeoutput(output_settings_path::String, settings::Dict)
merge!(writeoutput, model_writeoutput)
end
return writeoutput
-end
\ No newline at end of file
+end
diff --git a/src/configure_solver/configure_cbc.jl b/src/configure_solver/configure_cbc.jl
index afa3f41727..0379fbd43c 100644
--- a/src/configure_solver/configure_cbc.jl
+++ b/src/configure_solver/configure_cbc.jl
@@ -17,26 +17,23 @@ The Cbc optimizer instance is configured with the following default parameters i
"""
function configure_cbc(solver_settings_path::String, optimizer::Any)
-
- solver_settings = YAML.load(open(solver_settings_path))
- solver_settings = convert(Dict{String, Any}, solver_settings)
+ solver_settings = YAML.load(open(solver_settings_path))
+ solver_settings = convert(Dict{String, Any}, solver_settings)
default_settings = Dict("TimeLimit" => 1e-6,
- "logLevel" => 1e-6,
- "maxSolutions" => -1,
- "maxNodes" => -1,
- "allowableGap" => -1,
- "ratioGap" => Inf,
- "threads" => 1,
- )
+ "logLevel" => 1e-6,
+ "maxSolutions" => -1,
+ "maxNodes" => -1,
+ "allowableGap" => -1,
+ "ratioGap" => Inf,
+ "threads" => 1)
attributes = merge(default_settings, solver_settings)
- key_replacement = Dict("TimeLimit" => "seconds",
- )
+ key_replacement = Dict("TimeLimit" => "seconds")
attributes = rename_keys(attributes, key_replacement)
attributes::Dict{String, Any}
- return optimizer_with_attributes(optimizer, attributes...)
+ return optimizer_with_attributes(optimizer, attributes...)
end
diff --git a/src/configure_solver/configure_clp.jl b/src/configure_solver/configure_clp.jl
index 2da048e989..cd5af6e42d 100644
--- a/src/configure_solver/configure_clp.jl
+++ b/src/configure_solver/configure_clp.jl
@@ -21,12 +21,10 @@ The Clp optimizer instance is configured with the following default parameters i
"""
function configure_clp(solver_settings_path::String, optimizer::Any)
+ solver_settings = YAML.load(open(solver_settings_path))
+ solver_settings = convert(Dict{String, Any}, solver_settings)
- solver_settings = YAML.load(open(solver_settings_path))
- solver_settings = convert(Dict{String, Any}, solver_settings)
-
- default_settings = Dict{String,Any}(
- "Feasib_Tol" => 1e-7,
+ default_settings = Dict{String, Any}("Feasib_Tol" => 1e-7,
"DualObjectiveLimit" => 1e308,
"MaximumIterations" => 2147483647,
"TimeLimit" => -1.0,
@@ -35,16 +33,14 @@ function configure_clp(solver_settings_path::String, optimizer::Any)
"Method" => 5,
"InfeasibleReturn" => 0,
"Scaling" => 3,
- "Perturbation" => 100,
- )
+ "Perturbation" => 100)
attributes = merge(default_settings, solver_settings)
key_replacement = Dict("Feasib_Tol" => "PrimalTolerance",
- "TimeLimit" => "MaximumSeconds",
- "Pre_Solve" => "PresolveType",
- "Method" => "SolveType",
- )
+ "TimeLimit" => "MaximumSeconds",
+ "Pre_Solve" => "PresolveType",
+ "Method" => "SolveType")
attributes = rename_keys(attributes, key_replacement)
@@ -53,5 +49,5 @@ function configure_clp(solver_settings_path::String, optimizer::Any)
attributes["DualTolerance"] = attributes["PrimalTolerance"]
attributes::Dict{String, Any}
- return optimizer_with_attributes(optimizer, attributes...)
+ return optimizer_with_attributes(optimizer, attributes...)
end
diff --git a/src/configure_solver/configure_cplex.jl b/src/configure_solver/configure_cplex.jl
index fe860fb67d..b320857217 100644
--- a/src/configure_solver/configure_cplex.jl
+++ b/src/configure_solver/configure_cplex.jl
@@ -78,40 +78,35 @@ The optimizer instance is configured with the following default parameters if a
Any other attributes in the settings file (which typically start with `CPX_PARAM_`) will also be passed to the solver.
"""
function configure_cplex(solver_settings_path::String, optimizer::Any)
-
solver_settings = YAML.load(open(solver_settings_path))
solver_settings = convert(Dict{String, Any}, solver_settings)
default_settings = Dict("Feasib_Tol" => 1e-6,
- "Optimal_Tol" => 1e-4,
- "AggFill" => 10,
- "PreDual" => 0,
- "TimeLimit" => 1e+75,
- "MIPGap" => 1e-3,
- "Method" => 0,
- "BarConvTol" => 1e-8,
- "NumericFocus" => 0,
- "BarObjRng" => 1e+75,
- "SolutionType" => 2,
- )
-
+ "Optimal_Tol" => 1e-4,
+ "AggFill" => 10,
+ "PreDual" => 0,
+ "TimeLimit" => 1e+75,
+ "MIPGap" => 1e-3,
+ "Method" => 0,
+ "BarConvTol" => 1e-8,
+ "NumericFocus" => 0,
+ "BarObjRng" => 1e+75,
+ "SolutionType" => 2)
attributes = merge(default_settings, solver_settings)
- key_replacement = Dict(
- "Feasib_Tol" => "CPX_PARAM_EPRHS",
- "Optimal_Tol" => "CPX_PARAM_EPOPT",
- "AggFill" => "CPX_PARAM_AGGFILL",
- "PreDual" => "CPX_PARAM_PREDUAL",
- "TimeLimit" => "CPX_PARAM_TILIM",
- "MIPGap" => "CPX_PARAM_EPGAP",
- "Method" => "CPX_PARAM_LPMETHOD",
- "Pre_Solve" => "CPX_PARAM_PREIND", # https://www.ibm.com/docs/en/icos/12.8.0.0?topic=parameters-presolve-switch
- "BarConvTol" => "CPX_PARAM_BAREPCOMP",
- "NumericFocus" => "CPX_PARAM_NUMERICALEMPHASIS",
- "BarObjRng" => "CPX_PARAM_BAROBJRNG",
- "SolutionType" => "CPX_PARAM_SOLUTIONTYPE",
- )
+ key_replacement = Dict("Feasib_Tol" => "CPX_PARAM_EPRHS",
+ "Optimal_Tol" => "CPX_PARAM_EPOPT",
+ "AggFill" => "CPX_PARAM_AGGFILL",
+ "PreDual" => "CPX_PARAM_PREDUAL",
+ "TimeLimit" => "CPX_PARAM_TILIM",
+ "MIPGap" => "CPX_PARAM_EPGAP",
+ "Method" => "CPX_PARAM_LPMETHOD",
+ "Pre_Solve" => "CPX_PARAM_PREIND", # https://www.ibm.com/docs/en/icos/12.8.0.0?topic=parameters-presolve-switch
+ "BarConvTol" => "CPX_PARAM_BAREPCOMP",
+ "NumericFocus" => "CPX_PARAM_NUMERICALEMPHASIS",
+ "BarObjRng" => "CPX_PARAM_BAROBJRNG",
+ "SolutionType" => "CPX_PARAM_SOLUTIONTYPE")
attributes = rename_keys(attributes, key_replacement)
attributes::Dict{String, Any}
diff --git a/src/configure_solver/configure_gurobi.jl b/src/configure_solver/configure_gurobi.jl
index 2e5c8b7d39..00f132f34f 100644
--- a/src/configure_solver/configure_gurobi.jl
+++ b/src/configure_solver/configure_gurobi.jl
@@ -21,33 +21,30 @@ The Gurobi optimizer instance is configured with the following default parameter
"""
function configure_gurobi(solver_settings_path::String, optimizer::Any)
-
- solver_settings = YAML.load(open(solver_settings_path))
- solver_settings = convert(Dict{String, Any}, solver_settings)
+ solver_settings = YAML.load(open(solver_settings_path))
+ solver_settings = convert(Dict{String, Any}, solver_settings)
default_settings = Dict("Feasib_Tol" => 1e-6,
- "Optimal_Tol" => 1e-4,
- "Pre_Solve" => -1,
- "AggFill" => -1,
- "PreDual" => -1,
- "TimeLimit" => Inf,
- "MIPGap" => 1e-3,
- "Crossover" => -1,
- "Method" => -1,
- "BarConvTol" => 1e-8,
- "NumericFocus" => 0,
- "OutputFlag" => 1
- )
+ "Optimal_Tol" => 1e-4,
+ "Pre_Solve" => -1,
+ "AggFill" => -1,
+ "PreDual" => -1,
+ "TimeLimit" => Inf,
+ "MIPGap" => 1e-3,
+ "Crossover" => -1,
+ "Method" => -1,
+ "BarConvTol" => 1e-8,
+ "NumericFocus" => 0,
+ "OutputFlag" => 1)
attributes = merge(default_settings, solver_settings)
key_replacement = Dict("Feasib_Tol" => "FeasibilityTol",
- "Optimal_Tol" => "OptimalityTol",
- "Pre_Solve" => "Presolve",
- )
+ "Optimal_Tol" => "OptimalityTol",
+ "Pre_Solve" => "Presolve")
attributes = rename_keys(attributes, key_replacement)
attributes::Dict{String, Any}
- return optimizer_with_attributes(optimizer, attributes...)
+ return optimizer_with_attributes(optimizer, attributes...)
end
diff --git a/src/configure_solver/configure_highs.jl b/src/configure_solver/configure_highs.jl
index 3f5e04d811..549395d8dc 100644
--- a/src/configure_solver/configure_highs.jl
+++ b/src/configure_solver/configure_highs.jl
@@ -16,183 +16,14 @@ The HiGHS optimizer instance is configured with the following default parameters
Pre_Solve: choose # Presolve option: "off", "choose" or "on" # [type: string, advanced: false, default: "choose"]
Method: ipm #choose #HiGHS-specific solver settings # Solver option: "simplex", "choose" or "ipm" # [type: string, advanced: false, default: "choose"] In order to run a case when the UCommit is set to 1, i.e. MILP instance, set the Method to choose
- #HiGHS-specific solver settings
- # Parallel option: "off", "choose" or "on"
- # [type: string, advanced: false, default: "choose"]
- parallel: choose
-
- # Compute cost, bound, RHS and basic solution ranging: "off" or "on"
- # [type: string, advanced: false, default: "off"]
- ranging: off
-
- # Limit on cost coefficient: values larger than this will be treated as infinite
- # [type: double, advanced: false, range: [1e+15, inf], default: 1e+20]
- infinite_cost: 1e+20
-
- # Limit on |constraint bound|: values larger than this will be treated as infinite
- # [type: double, advanced: false, range: [1e+15, inf], default: 1e+20]
- infinite_bound: 1e+20
-
- # Lower limit on |matrix entries|: values smaller than this will be treated as zero
- # [type: double, advanced: false, range: [1e-12, inf], default: 1e-09]
- small_matrix_value: 1e-09
-
- # Upper limit on |matrix entries|: values larger than this will be treated as infinite
- # [type: double, advanced: false, range: [1, inf], default: 1e+15]
- large_matrix_value: 1e+15
-
# IPM optimality tolerance
# [type: double, advanced: false, range: [1e-12, inf], default: 1e-08]
ipm_optimality_tolerance: 1e-08
-
- # Objective bound for termination
- # [type: double, advanced: false, range: [-inf, inf], default: inf]
- objective_bound: Inf
-
- # Objective target for termination
- # [type: double, advanced: false, range: [-inf, inf], default: -inf]
- objective_target: -Inf
-
- # random seed used in HiGHS
- # [type: HighsInt, advanced: false, range: {0, 2147483647}, default: 0]
- random_seed: 0
-
- # number of threads used by HiGHS (0: automatic)
- # [type: HighsInt, advanced: false, range: {0, 2147483647}, default: 0]
- threads: 0
-
- # Debugging level in HiGHS
- # [type: HighsInt, advanced: false, range: {0, 3}, default: 0]
- highs_debug_level: 0
-
- # Analysis level in HiGHS
- # [type: HighsInt, advanced: false, range: {0, 63}, default: 0]
- highs_analysis_level: 0
-
- # Strategy for simplex solver 0 => Choose; 1 => Dual (serial); 2 => Dual (PAMI); 3 => Dual (SIP); 4 => Primal
- # [type: HighsInt, advanced: false, range: {0, 4}, default: 1]
- simplex_strategy: 1
-
- # Simplex scaling strategy: off / choose / equilibration / forced equilibration / max value 0 / max value 1 (0/1/2/3/4/5)
- # [type: HighsInt, advanced: false, range: {0, 5}, default: 1]
- simplex_scale_strategy: 1
-
- # Strategy for simplex crash: off / LTSSF / Bixby (0/1/2)
- # [type: HighsInt, advanced: false, range: {0, 9}, default: 0]
- simplex_crash_strategy: 0
-
- # Strategy for simplex dual edge weights: Choose / Dantzig / Devex / Steepest Edge (-1/0/1/2)
- # [type: HighsInt, advanced: false, range: {-1, 2}, default: -1]
- simplex_dual_edge_weight_strategy: -1
-
- # Strategy for simplex primal edge weights: Choose / Dantzig / Devex / Steepest Edge (-1/0/1/2)
- # [type: HighsInt, advanced: false, range: {-1, 2}, default: -1]
- simplex_primal_edge_weight_strategy: -1
-
- # Iteration limit for simplex solver
- # [type: HighsInt, advanced: false, range: {0, 2147483647}, default: 2147483647]
- simplex_iteration_limit: 2147483647
-
- # Limit on the number of simplex UPDATE operations
- # [type: HighsInt, advanced: false, range: {0, 2147483647}, default: 5000]
- simplex_update_limit: 5000
-
- # Iteration limit for IPM solver
- # [type: HighsInt, advanced: false, range: {0, 2147483647}, default: 2147483647]
- ipm_iteration_limit: 2147483647
-
- # Minimum level of concurrency in parallel simplex
- # [type: HighsInt, advanced: false, range: {1, 8}, default: 1]
- simplex_min_concurrency: 1
-
- # Maximum level of concurrency in parallel simplex
- # [type: HighsInt, advanced: false, range: {1, 8}, default: 8]
- simplex_max_concurrency: 8
-
- # Enables or disables solver output
- # [type: bool, advanced: false, range: {false, true}, default: true]
- output_flag: true
-
- # Enables or disables console logging
- # [type: bool, advanced: false, range: {false, true}, default: true]
- log_to_console: true
-
- # Solution file
- # [type: string, advanced: false, default: ""]
- solution_file: ""
-
- # Log file
- # [type: string, advanced: false, default: ""]
- log_file: ""
-
- # Write the primal and dual solution to a file
- # [type: bool, advanced: false, range: {false, true}, default: false]
- write_solution_to_file: false
-
- # Write the solution in style: 0=>Raw (computer-readable); 1=>Pretty (human-readable)
- # [type: HighsInt, advanced: false, range: {0, 2}, default: 0]
- write_solution_style: 0
-
- # Write model file
- # [type: string, advanced: false, default: ""]
- write_model_file: ""
-
- # Write the model to a file
- # [type: bool, advanced: false, range: {false, true}, default: false]
- write_model_to_file: false
-
- # Whether symmetry should be detected
- # [type: bool, advanced: false, range: {false, true}, default: true]
- mip_detect_symmetry: true
-
- # MIP solver max number of nodes
- # [type: HighsInt, advanced: false, range: {0, 2147483647}, default: 2147483647]
- mip_max_nodes: 2147483647
-
- # MIP solver max number of nodes where estimate is above cutoff bound
- # [type: HighsInt, advanced: false, range: {0, 2147483647}, default: 2147483647]
- mip_max_stall_nodes: 2147483647
-
- # MIP solver max number of leave nodes
- # [type: HighsInt, advanced: false, range: {0, 2147483647}, default: 2147483647]
- mip_max_leaves: 2147483647
-
- # limit on the number of improving solutions found to stop the MIP solver prematurely
- # [type: HighsInt, advanced: false, range: {1, 2147483647}, default: 2147483647]
- mip_max_improving_sols: 2147483647
-
- # maximal age of dynamic LP rows before they are removed from the LP relaxation
- # [type: HighsInt, advanced: false, range: {0, 32767}, default: 10]
- mip_lp_age_limit: 10
-
- # maximal age of rows in the cutpool before they are deleted
- # [type: HighsInt, advanced: false, range: {0, 1000}, default: 30]
- mip_pool_age_limit: 30
-
- # soft limit on the number of rows in the cutpool for dynamic age adjustment
- # [type: HighsInt, advanced: false, range: {1, 2147483647}, default: 10000]
- mip_pool_soft_limit: 10000
-
- # minimal number of observations before pseudo costs are considered reliable
- # [type: HighsInt, advanced: false, range: {0, 2147483647}, default: 8]
- mip_pscost_minreliable: 8
-
- # minimal number of entries in the cliquetable before neighborhood queries of the conflict graph use parallel processing
- # [type: HighsInt, advanced: false, range: {0, 2147483647}, default: 100000]
- mip_min_cliquetable_entries_for_parallelism: 100000
-
- # MIP solver reporting level
- # [type: HighsInt, advanced: false, range: {0, 2}, default: 1]
- mip_report_level: 1
-
- # MIP feasibility tolerance
- # [type: double, advanced: false, range: [1e-10, inf], default: 1e-06]
- mip_feasibility_tolerance: 1e-06
-
- # effort spent for MIP heuristics
- # [type: double, advanced: false, range: [0, 1], default: 0.05]
- mip_heuristic_effort: 0.05
-
+
+ # Run the crossover routine for IPX
+ # [type: string, advanced: "on", range: {"off", "on"}, default: "off"]
+ run_crossover: "off"
+
# tolerance on relative gap, |ub-lb|/|ub|, to determine whether optimality has been reached for a MIP instance
# [type: double, advanced: false, range: [0, inf], default: 0.0001]
mip_rel_gap: 0.0001
@@ -200,236 +31,28 @@ The HiGHS optimizer instance is configured with the following default parameters
# tolerance on absolute gap of MIP, |ub-lb|, to determine whether optimality has been reached for a MIP instance
# [type: double, advanced: false, range: [0, inf], default: 1e-06]
mip_abs_gap: 1e-06
-
- # Output development messages: 0 => none; 1 => info; 2 => verbose
- # [type: HighsInt, advanced: true, range: {0, 3}, default: 0]
- log_dev_level: 0
-
- # Run the crossover routine for IPX
- # [type: string, advanced: "on", range: {"off", "on"}, default: "off"]
- run_crossover: "off"
-
- # Allow ModelStatus::kUnboundedOrInfeasible
- # [type: bool, advanced: true, range: {false, true}, default: false]
- allow_unbounded_or_infeasible: false
-
- # Use relaxed implied bounds from presolve
- # [type: bool, advanced: true, range: {false, true}, default: false]
- use_implied_bounds_from_presolve: false
-
- # Prevents LP presolve steps for which postsolve cannot maintain a basis
- # [type: bool, advanced: true, range: {false, true}, default: true]
- lp_presolve_requires_basis_postsolve: true
-
- # Use the free format MPS file reader
- # [type: bool, advanced: true, range: {false, true}, default: true]
- mps_parser_type_free: true
-
- # For multiple N-rows in MPS files: delete rows / delete entries / keep rows (-1/0/1)
- # [type: HighsInt, advanced: true, range: {-1, 1}, default: -1]
- keep_n_rows: -1
-
- # Scaling factor for costs
- # [type: HighsInt, advanced: true, range: {-20, 20}, default: 0]
- cost_scale_factor: 0
-
- # Largest power-of-two factor permitted when scaling the constraint matrix
- # [type: HighsInt, advanced: true, range: {0, 30}, default: 20]
- allowed_matrix_scale_factor: 20
-
- # Largest power-of-two factor permitted when scaling the costs
- # [type: HighsInt, advanced: true, range: {0, 20}, default: 0]
- allowed_cost_scale_factor: 0
-
- # Strategy for permuting before simplex
- # [type: HighsInt, advanced: true, range: {-1, 1}, default: -1]
- simplex_permute_strategy: -1
-
- # Max level of dual simplex cleanup
- # [type: HighsInt, advanced: true, range: {0, 2147483647}, default: 1]
- max_dual_simplex_cleanup_level: 1
-
- # Max level of dual simplex phase 1 cleanup
- # [type: HighsInt, advanced: true, range: {0, 2147483647}, default: 2]
- max_dual_simplex_phase1_cleanup_level: 2
-
- # Strategy for PRICE in simplex
- # [type: HighsInt, advanced: true, range: {0, 3}, default: 3]
- simplex_price_strategy: 3
-
- Strategy for solving unscaled LP in simplex
- [type: HighsInt, advanced: true, range: {0, 2}, default: 1]
- simplex_unscaled_solution_strategy: 1
-
- Perform initial basis condition check in simplex
- [type: bool, advanced: true, range: {false, true}, default: true]
- simplex_initial_condition_check: true
-
- No unnecessary refactorization on simplex rebuild
- [type: bool, advanced: true, range: {false, true}, default: true]
- no_unnecessary_rebuild_refactor: true
-
- Tolerance on initial basis condition in simplex
- [type: double, advanced: true, range: [1, inf], default: 1e+14]
- simplex_initial_condition_tolerance: 1e+14
-
- Tolerance on solution error when considering refactorization on simplex rebuild
- [type: double, advanced: true, range: [-inf, inf], default: 1e-08]
- rebuild_refactor_solution_error_tolerance: 1e-08
-
- Tolerance on dual steepest edge weight errors
- [type: double, advanced: true, range: [0, inf], default: inf]
- dual_steepest_edge_weight_error_tolerance: Inf
-
- Threshold on dual steepest edge weight errors for Devex switch
- [type: double, advanced: true, range: [1, inf], default: 10]
- dual_steepest_edge_weight_log_error_threshold: 10.0
-
- Dual simplex cost perturbation multiplier: 0 => no perturbation
- [type: double, advanced: true, range: [0, inf], default: 1]
- dual_simplex_cost_perturbation_multiplier: 1.0
-
- Primal simplex bound perturbation multiplier: 0 => no perturbation
- [type: double, advanced: true, range: [0, inf], default: 1]
- primal_simplex_bound_perturbation_multiplier: 1.0
-
- Dual simplex pivot growth tolerance
- [type: double, advanced: true, range: [1e-12, inf], default: 1e-09]
- dual_simplex_pivot_growth_tolerance: 1e-09
-
- Matrix factorization pivot threshold for substitutions in presolve
- [type: double, advanced: true, range: [0.0008, 0.5], default: 0.01]
- presolve_pivot_threshold: 0.01
-
- Maximal fillin allowed for substitutions in presolve
- [type: HighsInt, advanced: true, range: {0, 2147483647}, default: 10]
- presolve_substitution_maxfillin: 10
-
- Matrix factorization pivot threshold
- [type: double, advanced: true, range: [0.0008, 0.5], default: 0.1]
- factor_pivot_threshold: 0.1
-
- Matrix factorization pivot tolerance
- [type: double, advanced: true, range: [0, 1], default: 1e-10]
- factor_pivot_tolerance: 1e-10
-
- Tolerance to be satisfied before IPM crossover will start
- [type: double, advanced: true, range: [1e-12, inf], default: 1e-08]
- start_crossover_tolerance: 1e-08
-
- Use original HFactor logic for sparse vs hyper-sparse TRANs
- [type: bool, advanced: true, range: {false, true}, default: true]
- use_original_HFactor_logic: true
-
- Check whether LP is candidate for LiDSE
- [type: bool, advanced: true, range: {false, true}, default: true]
- less_infeasible_DSE_check: true
-
- Use LiDSE if LP has right properties
- [type: bool, advanced: true, range: {false, true}, default: true]
- less_infeasible_DSE_choose_row: true
-
-
"""
function configure_highs(solver_settings_path::String, optimizer::Any)
+ solver_settings = YAML.load(open(solver_settings_path))
+ solver_settings = convert(Dict{String, Any}, solver_settings)
- solver_settings = YAML.load(open(solver_settings_path))
- solver_settings = convert(Dict{String, Any}, solver_settings)
-
- default_settings = Dict{String,Any}(
- "Feasib_Tol" => 1e-6,
+ default_settings = Dict{String, Any}("Feasib_Tol" => 1e-6,
"Optimal_Tol" => 1e-4,
"Pre_Solve" => "choose",
"TimeLimit" => Inf,
"Method" => "ipm",
- "parallel" => "choose",
- "ranging" => "off",
- "infinite_cost" => 1e+20,
- "infinite_bound" => 1e+20,
- "small_matrix_value" => 1e-09,
- "large_matrix_value" => 1e+15,
"ipm_optimality_tolerance" => 1e-08,
- "objective_bound" => Inf,
- "objective_target" => -Inf,
- "random_seed" => 0,
- "threads" => 0,
- "highs_debug_level" => 0,
- "highs_analysis_level" => 0,
- "simplex_strategy" => 1,
- "simplex_scale_strategy" => 1,
- "simplex_crash_strategy" => 0,
- "simplex_dual_edge_weight_strategy" => -1,
- "simplex_primal_edge_weight_strategy" => -1,
- "simplex_iteration_limit" => 2147483647,
- "simplex_update_limit" => 5000,
- "ipm_iteration_limit" => 2147483647,
- "simplex_min_concurrency" => 1,
- "simplex_max_concurrency" => 8,
- "output_flag" => true,
- "log_to_console" => true,
- "solution_file" => "",
- "log_file" => "",
- "write_solution_to_file" => false,
- "write_solution_style" => 0,
- "write_model_file" => "",
- "write_model_to_file" => false,
- "mip_detect_symmetry" => true,
- "mip_max_nodes" => 2147483647,
- "mip_max_stall_nodes" => 2147483647,
- "mip_max_leaves" => 2147483647,
- "mip_max_improving_sols" => 2147483647,
- "mip_lp_age_limit" => 10,
- "mip_pool_age_limit" => 30,
- "mip_pool_soft_limit" => 10000,
- "mip_pscost_minreliable" => 8,
- "mip_min_cliquetable_entries_for_parallelism" => 100000,
- "mip_report_level" => 1,
- "mip_feasibility_tolerance" => 1e-06,
- "mip_heuristic_effort" => 0.05,
- "mip_rel_gap" => 0.001,
- "mip_abs_gap" => 1e-06,
- "log_dev_level" => 0,
"run_crossover" => "off",
- "allow_unbounded_or_infeasible" => false,
- "use_implied_bounds_from_presolve" => false,
- "lp_presolve_requires_basis_postsolve" => true,
- "mps_parser_type_free" => true,
- "keep_n_rows" => -1,
- "cost_scale_factor" => 0,
- "allowed_matrix_scale_factor" => 20,
- "allowed_cost_scale_factor" => 0,
- "simplex_permute_strategy" => -1,
- "max_dual_simplex_cleanup_level" => 1,
- "max_dual_simplex_phase1_cleanup_level" => 2,
- "simplex_price_strategy" => 3,
- "simplex_unscaled_solution_strategy" => 1,
- "simplex_initial_condition_check" => true,
- "no_unnecessary_rebuild_refactor" => true,
- "simplex_initial_condition_tolerance" => 1e+14,
- "rebuild_refactor_solution_error_tolerance" => 1e-08,
- "dual_steepest_edge_weight_error_tolerance" => Inf,
- "dual_steepest_edge_weight_log_error_threshold" => 10.0,
- "dual_simplex_cost_perturbation_multiplier" => 1.0,
- "primal_simplex_bound_perturbation_multiplier" => 1.0,
- "dual_simplex_pivot_growth_tolerance" => 1e-09,
- "presolve_pivot_threshold" => 0.01,
- "presolve_substitution_maxfillin" => 10,
- "factor_pivot_threshold" => 0.1,
- "factor_pivot_tolerance" => 1e-10,
- "start_crossover_tolerance" => 1e-08,
- "use_original_HFactor_logic" => true,
- "less_infeasible_DSE_check" => true,
- "less_infeasible_DSE_choose_row" => true,
- )
+ "mip_rel_gap" => 0.001,
+ "mip_abs_gap" => 1e-06)
attributes = merge(default_settings, solver_settings)
key_replacement = Dict("Feasib_Tol" => "primal_feasibility_tolerance",
- "Optimal_Tol" => "dual_feasibility_tolerance",
- "TimeLimit" => "time_limit",
- "Pre_Solve" => "presolve",
- "Method" => "solver",
- )
+ "Optimal_Tol" => "dual_feasibility_tolerance",
+ "TimeLimit" => "time_limit",
+ "Pre_Solve" => "presolve",
+ "Method" => "solver")
attributes = rename_keys(attributes, key_replacement)
diff --git a/src/configure_solver/configure_scip.jl b/src/configure_solver/configure_scip.jl
index 3609657d66..591d36eeb7 100644
--- a/src/configure_solver/configure_scip.jl
+++ b/src/configure_solver/configure_scip.jl
@@ -12,21 +12,18 @@ The SCIP optimizer instance is configured with the following default parameters
"""
function configure_scip(solver_settings_path::String, optimizer::Any)
-
- solver_settings = YAML.load(open(solver_settings_path))
- solver_settings = convert(Dict{String, Any}, solver_settings)
+ solver_settings = YAML.load(open(solver_settings_path))
+ solver_settings = convert(Dict{String, Any}, solver_settings)
default_settings = Dict("Dispverblevel" => 0,
- "limitsgap" => 0.05,
- )
+ "limitsgap" => 0.05)
attributes = merge(default_settings, solver_settings)
key_replacement = Dict("Dispverblevel" => "display_verblevel",
- "limitsgap" => "limits_gap",
- )
+ "limitsgap" => "limits_gap")
attributes = rename_keys(attributes, key_replacement)
attributes::Dict{String, Any}
- return optimizer_with_attributes(optimizer, attributes...)
+ return optimizer_with_attributes(optimizer, attributes...)
end
diff --git a/src/configure_solver/configure_solver.jl b/src/configure_solver/configure_solver.jl
index 96a8bb2e02..76cf01cca7 100644
--- a/src/configure_solver/configure_solver.jl
+++ b/src/configure_solver/configure_solver.jl
@@ -6,7 +6,6 @@ function infer_solver(optimizer::Any)
return lowercase(string(parentmodule(optimizer)))
end
-
@doc raw"""
configure_solver(solver_settings_path::String, optimizer::Any)
@@ -24,15 +23,13 @@ function configure_solver(solver_settings_path::String, optimizer::Any)
solver_name = infer_solver(optimizer)
path = joinpath(solver_settings_path, solver_name * "_settings.yml")
- configure_functions = Dict(
- "highs" => configure_highs,
+ configure_functions = Dict("highs" => configure_highs,
"gurobi" => configure_gurobi,
"cplex" => configure_cplex,
"clp" => configure_clp,
"cbc" => configure_cbc,
- "scip" => configure_scip,
- )
-
+ "scip" => configure_scip)
+
return configure_functions[solver_name](path, optimizer)
end
@@ -50,7 +47,8 @@ function rename_keys(attributes::Dict, new_key_names::Dict)
else
new_key = new_key_names[old_key]
if haskey(attributes, new_key)
- @error "Colliding keys: '$old_key' needs to be renamed to '$new_key' but '$new_key' already exists in", attributes
+ @error "Colliding keys: '$old_key' needs to be renamed to '$new_key' but '$new_key' already exists in",
+ attributes
end
end
updated_attributes[new_key] = value
diff --git a/src/load_inputs/load_cap_reserve_margin.jl b/src/load_inputs/load_cap_reserve_margin.jl
index 646385d078..0a652bc78f 100644
--- a/src/load_inputs/load_cap_reserve_margin.jl
+++ b/src/load_inputs/load_cap_reserve_margin.jl
@@ -5,12 +5,12 @@ Read input parameters related to planning reserve margin constraints
"""
function load_cap_reserve_margin!(setup::Dict, path::AbstractString, inputs::Dict)
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
-
+
filename = "Capacity_reserve_margin_slack.csv"
if isfile(joinpath(path, filename))
df = load_dataframe(joinpath(path, filename))
inputs["dfCapRes_slack"] = df
- inputs["dfCapRes_slack"][!,:PriceCap] ./= scale_factor # Million $/GW if scaled, $/MW if not scaled
+ inputs["dfCapRes_slack"][!, :PriceCap] ./= scale_factor # Million $/GW if scaled, $/MW if not scaled
end
filename = "Capacity_reserve_margin.csv"
diff --git a/src/load_inputs/load_co2_cap.jl b/src/load_inputs/load_co2_cap.jl
index 0c93f3c199..08e6802a0a 100644
--- a/src/load_inputs/load_co2_cap.jl
+++ b/src/load_inputs/load_co2_cap.jl
@@ -5,14 +5,14 @@ Read input parameters related to CO$_2$ emissions cap constraints
"""
function load_co2_cap!(setup::Dict, path::AbstractString, inputs::Dict)
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
-
+
filename = "CO2_cap_slack.csv"
if isfile(joinpath(path, filename))
df = load_dataframe(joinpath(path, filename))
inputs["dfCO2Cap_slack"] = df
- inputs["dfCO2Cap_slack"][!,:PriceCap] ./= scale_factor # Million $/kton if scaled, $/ton if not scaled
- end
-
+ inputs["dfCO2Cap_slack"][!, :PriceCap] ./= scale_factor # Million $/kton if scaled, $/ton if not scaled
+ end
+
filename = "CO2_cap.csv"
df = load_dataframe(joinpath(path, filename))
@@ -21,7 +21,6 @@ function load_co2_cap!(setup::Dict, path::AbstractString, inputs::Dict)
inputs["dfCO2CapZones"] = mat
inputs["NCO2Cap"] = size(mat, 2)
-
# Emission limits
if setup["CO2Cap"] == 1
# CO2 emissions cap in mass
diff --git a/src/load_inputs/load_dataframe.jl b/src/load_inputs/load_dataframe.jl
index b6ca5ef552..bd212b75ef 100644
--- a/src/load_inputs/load_dataframe.jl
+++ b/src/load_inputs/load_dataframe.jl
@@ -64,7 +64,10 @@ function load_dataframe(dir::AbstractString, basenames::Vector{String})::DataFra
target = look_for_file_with_alternate_case(dir, base)
# admonish
if target != FILENOTFOUND
- Base.depwarn("""The filename '$target' is deprecated. '$best_basename' is preferred.""", :load_dataframe, force=true)
+ Base.depwarn(
+ """The filename '$target' is deprecated. '$best_basename' is preferred.""",
+ :load_dataframe,
+ force = true)
return load_dataframe_from_file(joinpath(dir, target))
end
end
@@ -107,7 +110,7 @@ end
function keep_duplicated_entries!(s, uniques)
for u in uniques
- deleteat!(s, first(findall(x->x==u, s)))
+ deleteat!(s, first(findall(x -> x == u, s)))
end
return s
end
@@ -126,23 +129,23 @@ end
function load_dataframe_from_file(path)::DataFrame
check_for_duplicate_keys(path)
- CSV.read(path, DataFrame, header=1)
+ CSV.read(path, DataFrame, header = 1)
end
function find_matrix_columns_in_dataframe(df::DataFrame,
columnprefix::AbstractString;
- prefixseparator='_')::Vector{Int}
+ prefixseparator = '_')::Vector{Int}
all_columns = names(df)
# 2 is the length of the '_' connector plus one for indexing
- get_integer_part(c) = tryparse(Int, c[length(columnprefix)+2:end])
+ get_integer_part(c) = tryparse(Int, c[(length(columnprefix) + 2):end])
# if prefix is "ESR", the column name should be like "ESR_1"
function is_of_this_column_type(c)
startswith(c, columnprefix) &&
- length(c) >= length(columnprefix) + 2 &&
- c[length(columnprefix) + 1] == prefixseparator &&
- !isnothing(get_integer_part(c))
+ length(c) >= length(columnprefix) + 2 &&
+ c[length(columnprefix) + 1] == prefixseparator &&
+ !isnothing(get_integer_part(c))
end
columns = filter(is_of_this_column_type, all_columns)
@@ -164,11 +167,13 @@ ESR_1, other_thing, ESR_3, ESR_2,
0.4, 2, 0.6, 0.5,
```
"""
-function extract_matrix_from_dataframe(df::DataFrame, columnprefix::AbstractString; prefixseparator='_')
+function extract_matrix_from_dataframe(df::DataFrame,
+ columnprefix::AbstractString;
+ prefixseparator = '_')
all_columns = names(df)
columnnumbers = find_matrix_columns_in_dataframe(df,
- columnprefix,
- prefixseparator=prefixseparator)
+ columnprefix,
+ prefixseparator = prefixseparator)
if length(columnnumbers) == 0
msg = """an input dataframe with columns $all_columns was searched for
@@ -188,10 +193,14 @@ function extract_matrix_from_dataframe(df::DataFrame, columnprefix::AbstractStri
Matrix(dropmissing(df[:, sorted_columns]))
end
-function extract_matrix_from_resources(rs::Vector{T}, columnprefix::AbstractString, default=0.0) where T<:AbstractResource
+function extract_matrix_from_resources(rs::Vector{T},
+ columnprefix::AbstractString,
+ default = 0.0) where {T <: AbstractResource}
# attributes starting with columnprefix with a numeric suffix
- attributes_n = [attr for attr in string.(attributes(rs[1])) if startswith(attr, columnprefix)]
+ attributes_n = [attr
+ for attr in string.(attributes(rs[1]))
+ if startswith(attr, columnprefix)]
# sort the attributes by the numeric suffix
sort!(attributes_n, by = x -> parse(Int, split(x, "_")[end]))
@@ -216,7 +225,7 @@ Check that the dataframe has all the required columns.
- `df_name::AbstractString`: the name of the dataframe, for error messages
- `required_cols::Vector{AbstractString}`: the names of the required columns
"""
-function validate_df_cols(df::DataFrame, df_name::AbstractString, required_cols)
+function validate_df_cols(df::DataFrame, df_name::AbstractString, required_cols)
for col in required_cols
if col ∉ names(df)
error("$df_name data file is missing column $col")
diff --git a/src/load_inputs/load_demand_data.jl b/src/load_inputs/load_demand_data.jl
index 509d0216bb..52c5bd7bf2 100644
--- a/src/load_inputs/load_demand_data.jl
+++ b/src/load_inputs/load_demand_data.jl
@@ -3,14 +3,17 @@ function get_demand_dataframe(path)
deprecated_synonym = "Load_data.csv"
df = load_dataframe(path, [filename, deprecated_synonym])
# update column names
- old_columns = find_matrix_columns_in_dataframe(df, DEMAND_COLUMN_PREFIX_DEPRECATED()[1:end-1],
- prefixseparator='z')
- old_column_symbols = Symbol.(DEMAND_COLUMN_PREFIX_DEPRECATED()*string(i) for i in old_columns)
+ old_columns = find_matrix_columns_in_dataframe(df,
+ DEMAND_COLUMN_PREFIX_DEPRECATED()[1:(end - 1)],
+ prefixseparator = 'z')
+ old_column_symbols = Symbol.(DEMAND_COLUMN_PREFIX_DEPRECATED() * string(i)
+ for i in old_columns)
if length(old_column_symbols) > 0
pref_prefix = DEMAND_COLUMN_PREFIX()
dep_prefix = DEMAND_COLUMN_PREFIX_DEPRECATED()
@info "$dep_prefix is deprecated. Use $pref_prefix."
- new_column_symbols = Symbol.(DEMAND_COLUMN_PREFIX()*string(i) for i in old_columns)
+ new_column_symbols = Symbol.(DEMAND_COLUMN_PREFIX() * string(i)
+ for i in old_columns)
rename!(df, Dict(old_column_symbols .=> new_column_symbols))
end
return df
@@ -26,7 +29,7 @@ Read input parameters related to electricity demand (load)
"""
function load_demand_data!(setup::Dict, path::AbstractString, inputs::Dict)
- # Load related inputs
+ # Load related inputs
TDR_directory = joinpath(path, setup["TimeDomainReductionFolder"])
# if TDR is used, my_dir = TDR_directory, else my_dir = "system"
my_dir = get_systemfiles_path(setup, TDR_directory, path)
@@ -35,17 +38,17 @@ function load_demand_data!(setup::Dict, path::AbstractString, inputs::Dict)
as_vector(col::Symbol) = collect(skipmissing(demand_in[!, col]))
- # Number of time steps (periods)
+ # Number of time steps (periods)
T = length(as_vector(:Time_Index))
- # Number of demand curtailment/lost load segments
+ # Number of demand curtailment/lost load segments
SEG = length(as_vector(:Demand_Segment))
- ## Set indices for internal use
+ ## Set indices for internal use
inputs["T"] = T
inputs["SEG"] = SEG
- Z = inputs["Z"] # Number of zones
+ Z = inputs["Z"] # Number of zones
- inputs["omega"] = zeros(Float64, T) # weights associated with operational sub-period in the model - sum of weight = 8760
+ inputs["omega"] = zeros(Float64, T) # weights associated with operational sub-period in the model - sum of weight = 8760
# Weights for each period - assumed same weights for each sub-period within a period
inputs["Weights"] = as_vector(:Sub_Weights) # Weights each period
@@ -56,30 +59,31 @@ function load_demand_data!(setup::Dict, path::AbstractString, inputs::Dict)
# Creating sub-period weights from weekly weights
for w in 1:inputs["REP_PERIOD"]
for h in 1:inputs["H"]
- t = inputs["H"]*(w-1)+h
- inputs["omega"][t] = inputs["Weights"][w]/inputs["H"]
+ t = inputs["H"] * (w - 1) + h
+ inputs["omega"][t] = inputs["Weights"][w] / inputs["H"]
end
end
- # Create time set steps indicies
- inputs["hours_per_subperiod"] = div.(T,inputs["REP_PERIOD"]) # total number of hours per subperiod
- hours_per_subperiod = inputs["hours_per_subperiod"] # set value for internal use
+ # Create time set steps indicies
+ inputs["hours_per_subperiod"] = div.(T, inputs["REP_PERIOD"]) # total number of hours per subperiod
+ hours_per_subperiod = inputs["hours_per_subperiod"] # set value for internal use
- inputs["START_SUBPERIODS"] = 1:hours_per_subperiod:T # set of indexes for all time periods that start a subperiod (e.g. sample day/week)
- inputs["INTERIOR_SUBPERIODS"] = setdiff(1:T, inputs["START_SUBPERIODS"]) # set of indexes for all time periods that do not start a subperiod
+ inputs["START_SUBPERIODS"] = 1:hours_per_subperiod:T # set of indexes for all time periods that start a subperiod (e.g. sample day/week)
+ inputs["INTERIOR_SUBPERIODS"] = setdiff(1:T, inputs["START_SUBPERIODS"]) # set of indexes for all time periods that do not start a subperiod
- # Demand in MW for each zone
+ # Demand in MW for each zone
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
# Max value of non-served energy
inputs["Voll"] = as_vector(:Voll) / scale_factor # convert from $/MWh $ million/GWh (assuming objective is divided by 1000)
# Demand in MW
inputs["pD"] = extract_matrix_from_dataframe(demand_in,
- DEMAND_COLUMN_PREFIX()[1:end-1],
- prefixseparator='z') / scale_factor
+ DEMAND_COLUMN_PREFIX()[1:(end - 1)],
+ prefixseparator = 'z') / scale_factor
- # Cost of non-served energy/demand curtailment
+ # Cost of non-served energy/demand curtailment
# Cost of each segment reported as a fraction of value of non-served energy - scaled implicitly
- inputs["pC_D_Curtail"] = as_vector(:Cost_of_Demand_Curtailment_per_MW) * inputs["Voll"][1]
+ inputs["pC_D_Curtail"] = as_vector(:Cost_of_Demand_Curtailment_per_MW) *
+ inputs["Voll"][1]
# Maximum hourly demand curtailable as % of the max demand (for each segment)
inputs["pMax_D_Curtail"] = as_vector(:Max_Demand_Curtailment)
@@ -106,13 +110,13 @@ function validatetimebasis(inputs::Dict)
expected_length_2 = H * number_of_representative_periods
check_equal = [T,
- demand_length,
- generators_variability_length,
- fuel_costs_length,
- expected_length_1,
- expected_length_2]
+ demand_length,
+ generators_variability_length,
+ fuel_costs_length,
+ expected_length_1,
+ expected_length_2]
- allequal(x) = all(y->y==x[1], x)
+ allequal(x) = all(y -> y == x[1], x)
ok = allequal(check_equal)
if ~ok
@@ -160,7 +164,6 @@ This function prevents TimeDomainReduction from running on a case which
already has more than one Representative Period or has more than one Sub_Weight specified.
"""
function prevent_doubled_timedomainreduction(path::AbstractString)
-
demand_in = get_demand_dataframe(path)
as_vector(col::Symbol) = collect(skipmissing(demand_in[!, col]))
representative_periods = convert(Int16, as_vector(:Rep_Periods)[1])
@@ -174,5 +177,4 @@ function prevent_doubled_timedomainreduction(path::AbstractString)
and the number of subperiod weight entries (:Sub_Weights) is ($num_sub_weights).
Each of these must be 1: only a single period can have TimeDomainReduction applied.""")
end
-
end
diff --git a/src/load_inputs/load_energy_share_requirement.jl b/src/load_inputs/load_energy_share_requirement.jl
index af6ef9b786..02b96fe7e7 100644
--- a/src/load_inputs/load_energy_share_requirement.jl
+++ b/src/load_inputs/load_energy_share_requirement.jl
@@ -11,9 +11,9 @@ function load_energy_share_requirement!(setup::Dict, path::AbstractString, input
if isfile(joinpath(path, filename))
df = load_dataframe(joinpath(path, filename))
inputs["dfESR_slack"] = df
- inputs["dfESR_slack"][!,:PriceCap] ./= scale_factor # million $/GWh if scaled, $/MWh if not scaled
- end
-
+ inputs["dfESR_slack"][!, :PriceCap] ./= scale_factor # million $/GWh if scaled, $/MWh if not scaled
+ end
+
filename = "Energy_share_requirement.csv"
df = load_dataframe(joinpath(path, filename))
mat = extract_matrix_from_dataframe(df, "ESR")
diff --git a/src/load_inputs/load_fuels_data.jl b/src/load_inputs/load_fuels_data.jl
index aa64ff43fa..61b0ff2f0f 100644
--- a/src/load_inputs/load_fuels_data.jl
+++ b/src/load_inputs/load_fuels_data.jl
@@ -9,7 +9,7 @@ function load_fuels_data!(setup::Dict, path::AbstractString, inputs::Dict)
TDR_directory = joinpath(path, setup["TimeDomainReductionFolder"])
# if TDR is used, my_dir = TDR_directory, else my_dir = "system"
my_dir = get_systemfiles_path(setup, TDR_directory, path)
-
+
filename = "Fuels_data.csv"
fuels_in = load_dataframe(joinpath(my_dir, filename))
@@ -26,11 +26,11 @@ function load_fuels_data!(setup::Dict, path::AbstractString, inputs::Dict)
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- for i = 1:length(fuels)
- # fuel cost is in $/MMBTU w/o scaling, $/Billon BTU w/ scaling
- fuel_costs[fuels[i]] = costs[:,i] / scale_factor
- # No need to scale fuel_CO2, fuel_CO2 is ton/MMBTU or kton/Billion BTU
- fuel_CO2[fuels[i]] = CO2_content[i]
+ for i in 1:length(fuels)
+ # fuel cost is in $/MMBTU w/o scaling, $/Billon BTU w/ scaling
+ fuel_costs[fuels[i]] = costs[:, i] / scale_factor
+ # No need to scale fuel_CO2, fuel_CO2 is ton/MMBTU or kton/Billion BTU
+ fuel_CO2[fuels[i]] = CO2_content[i]
end
inputs["fuels"] = fuels
diff --git a/src/load_inputs/load_generators_variability.jl b/src/load_inputs/load_generators_variability.jl
index 1ca02162ec..99294bffed 100644
--- a/src/load_inputs/load_generators_variability.jl
+++ b/src/load_inputs/load_generators_variability.jl
@@ -5,11 +5,11 @@ Read input parameters related to hourly maximum capacity factors for generators,
"""
function load_generators_variability!(setup::Dict, path::AbstractString, inputs::Dict)
- # Hourly capacity factors
+ # Hourly capacity factors
TDR_directory = joinpath(path, setup["TimeDomainReductionFolder"])
# if TDR is used, my_dir = TDR_directory, else my_dir = "system"
my_dir = get_systemfiles_path(setup, TDR_directory, path)
-
+
filename = "Generators_variability.csv"
gen_var = load_dataframe(joinpath(my_dir, filename))
@@ -23,11 +23,12 @@ function load_generators_variability!(setup::Dict, path::AbstractString, inputs:
end
end
- # Reorder DataFrame to R_ID order
- select!(gen_var, [:Time_Index; Symbol.(all_resources) ])
+ # Reorder DataFrame to R_ID order
+ select!(gen_var, [:Time_Index; Symbol.(all_resources)])
- # Maximum power output and variability of each energy resource
- inputs["pP_Max"] = transpose(Matrix{Float64}(gen_var[1:inputs["T"],2:(inputs["G"]+1)]))
+ # Maximum power output and variability of each energy resource
+ inputs["pP_Max"] = transpose(Matrix{Float64}(gen_var[1:inputs["T"],
+ 2:(inputs["G"] + 1)]))
- println(filename * " Successfully Read!")
+ println(filename * " Successfully Read!")
end
diff --git a/src/load_inputs/load_inputs.jl b/src/load_inputs/load_inputs.jl
index 9ef747a0ed..e17a390ced 100644
--- a/src/load_inputs/load_inputs.jl
+++ b/src/load_inputs/load_inputs.jl
@@ -9,94 +9,95 @@ path - string path to working directory
returns: Dict (dictionary) object containing all data inputs
"""
-function load_inputs(setup::Dict,path::AbstractString)
-
- ## Read input files
- println("Reading Input CSV Files")
- ## input paths
- system_path = joinpath(path, setup["SystemFolder"])
- resources_path = joinpath(path, setup["ResourcesFolder"])
- policies_path = joinpath(path, setup["PoliciesFolder"])
- ## Declare Dict (dictionary) object used to store parameters
- inputs = Dict()
- # Read input data about power network topology, operating and expansion attributes
- if isfile(joinpath(system_path,"Network.csv"))
- network_var = load_network_data!(setup, system_path, inputs)
- else
- inputs["Z"] = 1
- inputs["L"] = 0
- end
-
- # Read temporal-resolved load data, and clustering information if relevant
- load_demand_data!(setup, path, inputs)
- # Read fuel cost data, including time-varying fuel costs
- load_fuels_data!(setup, path, inputs)
- # Read in generator/resource related inputs
- load_resources_data!(inputs, setup, path, resources_path)
- # Read in generator/resource availability profiles
- load_generators_variability!(setup, path, inputs)
+function load_inputs(setup::Dict, path::AbstractString)
+
+ ## Read input files
+ println("Reading Input CSV Files")
+ ## input paths
+ system_path = joinpath(path, setup["SystemFolder"])
+ resources_path = joinpath(path, setup["ResourcesFolder"])
+ policies_path = joinpath(path, setup["PoliciesFolder"])
+ ## Declare Dict (dictionary) object used to store parameters
+ inputs = Dict()
+ # Read input data about power network topology, operating and expansion attributes
+ if isfile(joinpath(system_path, "Network.csv"))
+ network_var = load_network_data!(setup, system_path, inputs)
+ else
+ inputs["Z"] = 1
+ inputs["L"] = 0
+ end
+
+ # Read temporal-resolved load data, and clustering information if relevant
+ load_demand_data!(setup, path, inputs)
+ # Read fuel cost data, including time-varying fuel costs
+ load_fuels_data!(setup, path, inputs)
+ # Read in generator/resource related inputs
+ load_resources_data!(inputs, setup, path, resources_path)
+ # Read in generator/resource availability profiles
+ load_generators_variability!(setup, path, inputs)
validatetimebasis(inputs)
- if setup["CapacityReserveMargin"]==1
- load_cap_reserve_margin!(setup, policies_path, inputs)
- if inputs["Z"] >1
- load_cap_reserve_margin_trans!(setup, inputs, network_var)
- end
- end
+ if setup["CapacityReserveMargin"] == 1
+ load_cap_reserve_margin!(setup, policies_path, inputs)
+ if inputs["Z"] > 1
+ load_cap_reserve_margin_trans!(setup, inputs, network_var)
+ end
+ end
- # Read in general configuration parameters for operational reserves (resource-specific reserve parameters are read in load_resources_data)
- if setup["OperationalReserves"]==1
- load_operational_reserves!(setup, system_path, inputs)
- end
+ # Read in general configuration parameters for operational reserves (resource-specific reserve parameters are read in load_resources_data)
+ if setup["OperationalReserves"] == 1
+ load_operational_reserves!(setup, system_path, inputs)
+ end
- if setup["MinCapReq"] == 1
- load_minimum_capacity_requirement!(policies_path, inputs, setup)
- end
+ if setup["MinCapReq"] == 1
+ load_minimum_capacity_requirement!(policies_path, inputs, setup)
+ end
- if setup["MaxCapReq"] == 1
- load_maximum_capacity_requirement!(policies_path, inputs, setup)
- end
+ if setup["MaxCapReq"] == 1
+ load_maximum_capacity_requirement!(policies_path, inputs, setup)
+ end
- if setup["EnergyShareRequirement"]==1
- load_energy_share_requirement!(setup, policies_path, inputs)
- end
+ if setup["EnergyShareRequirement"] == 1
+ load_energy_share_requirement!(setup, policies_path, inputs)
+ end
- if setup["CO2Cap"] >= 1
- load_co2_cap!(setup, policies_path, inputs)
- end
+ if setup["CO2Cap"] >= 1
+ load_co2_cap!(setup, policies_path, inputs)
+ end
- if !isempty(inputs["VRE_STOR"])
- load_vre_stor_variability!(setup, path, inputs)
- end
+ if !isempty(inputs["VRE_STOR"])
+ load_vre_stor_variability!(setup, path, inputs)
+ end
- # Read in mapping of modeled periods to representative periods
- if is_period_map_necessary(inputs) && is_period_map_exist(setup, path)
- load_period_map!(setup, path, inputs)
- end
+ # Read in mapping of modeled periods to representative periods
+ if is_period_map_necessary(inputs) && is_period_map_exist(setup, path)
+ load_period_map!(setup, path, inputs)
+ end
- # Virtual charge discharge cost
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- inputs["VirtualChargeDischargeCost"] = setup["VirtualChargeDischargeCost"] / scale_factor
+ # Virtual charge discharge cost
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ inputs["VirtualChargeDischargeCost"] = setup["VirtualChargeDischargeCost"] /
+ scale_factor
- println("CSV Files Successfully Read In From $path")
+ println("CSV Files Successfully Read In From $path")
- return inputs
+ return inputs
end
function is_period_map_necessary(inputs::Dict)
- multiple_rep_periods = inputs["REP_PERIOD"] > 1
- has_stor_lds = !isempty(inputs["STOR_LONG_DURATION"])
- has_hydro_lds = !isempty(inputs["STOR_HYDRO_LONG_DURATION"])
- has_vre_stor_lds = !isempty(inputs["VRE_STOR"]) && !isempty(inputs["VS_LDS"])
+ multiple_rep_periods = inputs["REP_PERIOD"] > 1
+ has_stor_lds = !isempty(inputs["STOR_LONG_DURATION"])
+ has_hydro_lds = !isempty(inputs["STOR_HYDRO_LONG_DURATION"])
+ has_vre_stor_lds = !isempty(inputs["VRE_STOR"]) && !isempty(inputs["VS_LDS"])
multiple_rep_periods && (has_stor_lds || has_hydro_lds || has_vre_stor_lds)
end
function is_period_map_exist(setup::Dict, path::AbstractString)
- filename = "Period_map.csv"
- is_in_system_dir = isfile(joinpath(path, setup["SystemFolder"], filename))
- is_in_TDR_dir = isfile(joinpath(path, setup["TimeDomainReductionFolder"], filename))
- is_in_system_dir || is_in_TDR_dir
+ filename = "Period_map.csv"
+ is_in_system_dir = isfile(joinpath(path, setup["SystemFolder"], filename))
+ is_in_TDR_dir = isfile(joinpath(path, setup["TimeDomainReductionFolder"], filename))
+ is_in_system_dir || is_in_TDR_dir
end
"""
@@ -115,17 +116,21 @@ Parameters:
Returns:
- String: The directory path based on the setup parameters.
"""
-function get_systemfiles_path(setup::Dict, TDR_directory::AbstractString, path::AbstractString)
+function get_systemfiles_path(setup::Dict,
+ TDR_directory::AbstractString,
+ path::AbstractString)
if setup["TimeDomainReduction"] == 1 && time_domain_reduced_files_exist(TDR_directory)
return TDR_directory
else
- # If TDR is not used, then use the "system" directory specified in the setup
+ # If TDR is not used, then use the "system" directory specified in the setup
return joinpath(path, setup["SystemFolder"])
end
end
abstract type AbstractLogMsg end
-struct ErrorMsg <: AbstractLogMsg msg::String end
-struct WarnMsg <: AbstractLogMsg msg::String end
-
-
+struct ErrorMsg <: AbstractLogMsg
+ msg::String
+end
+struct WarnMsg <: AbstractLogMsg
+ msg::String
+end
diff --git a/src/load_inputs/load_minimum_capacity_requirement.jl b/src/load_inputs/load_minimum_capacity_requirement.jl
index fad1fbd165..d30f2d6425 100644
--- a/src/load_inputs/load_minimum_capacity_requirement.jl
+++ b/src/load_inputs/load_minimum_capacity_requirement.jl
@@ -6,14 +6,14 @@ Read input parameters related to mimimum capacity requirement constraints (e.g.
function load_minimum_capacity_requirement!(path::AbstractString, inputs::Dict, setup::Dict)
filename = "Minimum_capacity_requirement.csv"
df = load_dataframe(joinpath(path, filename))
- NumberOfMinCapReqs = length(df[!,:MinCapReqConstraint])
+ NumberOfMinCapReqs = length(df[!, :MinCapReqConstraint])
inputs["NumberOfMinCapReqs"] = NumberOfMinCapReqs
- inputs["MinCapReq"] = df[!,:Min_MW]
+ inputs["MinCapReq"] = df[!, :Min_MW]
if setup["ParameterScale"] == 1
inputs["MinCapReq"] /= ModelScalingFactor # Convert to GW
end
if "PriceCap" in names(df)
- inputs["MinCapPriceCap"] = df[!,:PriceCap]
+ inputs["MinCapPriceCap"] = df[!, :PriceCap]
if setup["ParameterScale"] == 1
inputs["MinCapPriceCap"] /= ModelScalingFactor # Convert to million $/GW
end
diff --git a/src/load_inputs/load_multistage_data.jl b/src/load_inputs/load_multistage_data.jl
index edd5021839..2a51aaf2e6 100644
--- a/src/load_inputs/load_multistage_data.jl
+++ b/src/load_inputs/load_multistage_data.jl
@@ -15,7 +15,7 @@ end
function validate_multistage_data!(multistage_df::DataFrame)
# cols that the user must provide
- required_cols = ("lifetime","capital_recovery_period")
+ required_cols = ("lifetime", "capital_recovery_period")
# check that all required columns are present
for col in required_cols
if col ∉ names(multistage_df)
@@ -26,17 +26,16 @@ end
function scale_multistage_data!(multistage_in::DataFrame, scale_factor::Float64)
columns_to_scale = [:min_retired_cap_mw, # to GW
- :min_retired_charge_cap_mw, # to GW
- :min_retired_energy_cap_mw, # to GW
-
- :min_retired_cap_inverter_mw,
- :min_retired_cap_solar_mw,
- :min_retired_cap_wind_mw,
- :min_retired_cap_charge_dc_mw,
- :min_retired_cap_charge_ac_mw,
- :min_retired_cap_discharge_dc_mw,
- :min_retired_cap_discharge_ac_mw,
- ]
+ :min_retired_charge_cap_mw, # to GW
+ :min_retired_energy_cap_mw, # to GW
+ :min_retired_cap_inverter_mw,
+ :min_retired_cap_solar_mw,
+ :min_retired_cap_wind_mw,
+ :min_retired_cap_charge_dc_mw,
+ :min_retired_cap_charge_ac_mw,
+ :min_retired_cap_discharge_dc_mw,
+ :min_retired_cap_discharge_ac_mw
+ ]
scale_columns!(multistage_in, columns_to_scale, scale_factor)
return nothing
-end
\ No newline at end of file
+end
diff --git a/src/load_inputs/load_network_data.jl b/src/load_inputs/load_network_data.jl
index 8116eaf02f..ac7f2b1c8c 100644
--- a/src/load_inputs/load_network_data.jl
+++ b/src/load_inputs/load_network_data.jl
@@ -4,7 +4,6 @@
Function for reading input parameters related to the electricity transmission network
"""
function load_network_data!(setup::Dict, path::AbstractString, inputs_nw::Dict)
-
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
filename = "Network.csv"
@@ -40,42 +39,46 @@ function load_network_data!(setup::Dict, path::AbstractString, inputs_nw::Dict)
if setup["DC_OPF"] == 1
if setup["NetworkExpansion"] == 1
@warn("Because the DC_OPF flag is active, GenX will not allow any transmission capacity expansion. Set the DC_OPF flag to 0 if you want to optimize tranmission capacity expansion.")
- setup["NetworkExpansion"] = 0;
+ setup["NetworkExpansion"] = 0
end
println("Reading DC-OPF values...")
# Transmission line voltage (in kV)
line_voltage_kV = to_floats(:Line_Voltage_kV)
# Transmission line reactance (in Ohms)
- line_reactance_Ohms = to_floats(:Line_Reactance_Ohms)
+ line_reactance_Ohms = to_floats(:Line_Reactance_Ohms)
# Line angle limit (in radians)
inputs_nw["Line_Angle_Limit"] = to_floats(:Angle_Limit_Rad)
# DC-OPF coefficient for each line (in MW when not scaled, in GW when scaled)
# MW = (kV)^2/Ohms
- inputs_nw["pDC_OPF_coeff"] = ((line_voltage_kV.^2)./line_reactance_Ohms)/scale_factor
+ inputs_nw["pDC_OPF_coeff"] = ((line_voltage_kV .^ 2) ./ line_reactance_Ohms) /
+ scale_factor
end
# Maximum possible flow after reinforcement for use in linear segments of piecewise approximation
inputs_nw["pTrans_Max_Possible"] = inputs_nw["pTrans_Max"]
- if setup["NetworkExpansion"]==1
+ if setup["NetworkExpansion"] == 1
# Read between zone network reinforcement costs per peak MW of capacity added
- inputs_nw["pC_Line_Reinforcement"] = to_floats(:Line_Reinforcement_Cost_per_MWyr) / scale_factor # convert to million $/GW/yr with objective function in millions
+ inputs_nw["pC_Line_Reinforcement"] = to_floats(:Line_Reinforcement_Cost_per_MWyr) /
+ scale_factor # convert to million $/GW/yr with objective function in millions
# Maximum reinforcement allowed in MW
#NOTE: values <0 indicate no expansion possible
- inputs_nw["pMax_Line_Reinforcement"] = map(x->max(0, x), to_floats(:Line_Max_Reinforcement_MW)) / scale_factor # convert to GW
+ inputs_nw["pMax_Line_Reinforcement"] = map(x -> max(0, x),
+ to_floats(:Line_Max_Reinforcement_MW)) / scale_factor # convert to GW
inputs_nw["pTrans_Max_Possible"] += inputs_nw["pMax_Line_Reinforcement"]
end
# Multi-Stage
if setup["MultiStage"] == 1
# Weighted Average Cost of Capital for Transmission Expansion
- if setup["NetworkExpansion"]>=1
- inputs_nw["transmission_WACC"]= to_floats(:WACC)
- inputs_nw["Capital_Recovery_Period_Trans"]= to_floats(:Capital_Recovery_Period)
+ if setup["NetworkExpansion"] >= 1
+ inputs_nw["transmission_WACC"] = to_floats(:WACC)
+ inputs_nw["Capital_Recovery_Period_Trans"] = to_floats(:Capital_Recovery_Period)
end
# Max Flow Possible on Each Line
- inputs_nw["pLine_Max_Flow_Possible_MW"] = to_floats(:Line_Max_Flow_Possible_MW) / scale_factor # Convert to GW
+ inputs_nw["pLine_Max_Flow_Possible_MW"] = to_floats(:Line_Max_Flow_Possible_MW) /
+ scale_factor # Convert to GW
end
# Transmission line (between zone) loss coefficient (resistance/voltage^2)
@@ -84,17 +87,18 @@ function load_network_data!(setup::Dict, path::AbstractString, inputs_nw::Dict)
inputs_nw["pTrans_Loss_Coef"] = inputs_nw["pPercent_Loss"]
elseif setup["Trans_Loss_Segments"] >= 2
# If zones are connected, loss coefficient is R/V^2 where R is resistance in Ohms and V is voltage in Volts
- inputs_nw["pTrans_Loss_Coef"] = (inputs_nw["Ohms"]/10^6)./(inputs_nw["kV"]/10^3)^2 * scale_factor # 1/GW ***
+ inputs_nw["pTrans_Loss_Coef"] = (inputs_nw["Ohms"] / 10^6) ./
+ (inputs_nw["kV"] / 10^3)^2 * scale_factor # 1/GW ***
end
## Sets and indices for transmission losses and expansion
inputs_nw["TRANS_LOSS_SEGS"] = setup["Trans_Loss_Segments"] # Number of segments used in piecewise linear approximations quadratic loss functions
- inputs_nw["LOSS_LINES"] = findall(inputs_nw["pTrans_Loss_Coef"].!=0) # Lines for which loss coefficients apply (are non-zero);
+ inputs_nw["LOSS_LINES"] = findall(inputs_nw["pTrans_Loss_Coef"] .!= 0) # Lines for which loss coefficients apply (are non-zero);
if setup["NetworkExpansion"] == 1
# Network lines and zones that are expandable have non-negative maximum reinforcement inputs
- inputs_nw["EXPANSION_LINES"] = findall(inputs_nw["pMax_Line_Reinforcement"].>=0)
- inputs_nw["NO_EXPANSION_LINES"] = findall(inputs_nw["pMax_Line_Reinforcement"].<0)
+ inputs_nw["EXPANSION_LINES"] = findall(inputs_nw["pMax_Line_Reinforcement"] .>= 0)
+ inputs_nw["NO_EXPANSION_LINES"] = findall(inputs_nw["pMax_Line_Reinforcement"] .< 0)
end
println(filename * " Successfully Read!")
@@ -138,9 +142,9 @@ starting zone of the line and the zone with entry -1 is the ending zone of the l
"""
function load_network_map_from_matrix(network_var::DataFrame, Z, L)
# Topology of the network source-sink matrix
- network_map_matrix_format_deprecation_warning()
+ network_map_matrix_format_deprecation_warning()
col = findall(s -> s == "z1", names(network_var))[1]
- mat = Matrix{Float64}(network_var[1:L, col:col+Z-1])
+ mat = Matrix{Float64}(network_var[1:L, col:(col + Z - 1)])
end
function load_network_map(network_var::DataFrame, Z, L)
@@ -150,7 +154,7 @@ function load_network_map(network_var::DataFrame, Z, L)
has_network_list = all([c in columns for c in list_columns])
zones_as_strings = ["z" * string(i) for i in 1:Z]
- has_network_matrix = all([c in columns for c in zones_as_strings])
+ has_network_matrix = all([c in columns for c in zones_as_strings])
instructions = """The transmission network should be specified in the form of a matrix
(with columns z1, z2, ... zN) or in the form of lists (with Start_Zone, End_Zone),
@@ -168,12 +172,12 @@ function load_network_map(network_var::DataFrame, Z, L)
end
function network_map_matrix_format_deprecation_warning()
- @warn """Specifying the network map as a matrix is deprecated as of v0.4
-and will be removed in v0.5. Instead, use the more compact list-style format.
-
-..., Network_Lines, Start_Zone, End_Zone, ...
- 1, 1, 2,
- 2, 1, 3,
- 3, 2, 3,
-""" maxlog=1
+ @warn """Specifying the network map as a matrix is deprecated as of v0.4
+ and will be removed in v0.5. Instead, use the more compact list-style format.
+
+ ..., Network_Lines, Start_Zone, End_Zone, ...
+ 1, 1, 2,
+ 2, 1, 3,
+ 3, 2, 3,
+ """ maxlog=1
end
diff --git a/src/load_inputs/load_operational_reserves.jl b/src/load_inputs/load_operational_reserves.jl
index 35508e9f5f..6b6d67cb78 100644
--- a/src/load_inputs/load_operational_reserves.jl
+++ b/src/load_inputs/load_operational_reserves.jl
@@ -5,10 +5,10 @@ Read input parameters related to frequency regulation and operating reserve requ
"""
function load_operational_reserves!(setup::Dict, path::AbstractString, inputs::Dict)
filename = "Operational_reserves.csv"
- deprecated_synonym = "Reserves.csv"
+ deprecated_synonym = "Reserves.csv"
res_in = load_dataframe(path, [filename, deprecated_synonym])
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
function load_field_with_deprecated_symbol(df::DataFrame, columns::Vector{Symbol})
best = popfirst!(columns)
@@ -19,49 +19,53 @@ function load_operational_reserves!(setup::Dict, path::AbstractString, inputs::D
end
for col in columns
if col in all_columns
- Base.depwarn("The column name $col in file $filename is deprecated; prefer $best", :load_operational_reserves, force=true)
+ Base.depwarn(
+ "The column name $col in file $filename is deprecated; prefer $best",
+ :load_operational_reserves,
+ force = true)
return float(df[firstrow, col])
end
end
error("None of the columns $columns were found in the file $filename")
end
- # Regulation requirement as a percent of hourly demand; here demand is the total across all model zones
- inputs["pReg_Req_Demand"] = load_field_with_deprecated_symbol(res_in,
- [:Reg_Req_Percent_Demand,
- :Reg_Req_Percent_Load])
+ # Regulation requirement as a percent of hourly demand; here demand is the total across all model zones
+ inputs["pReg_Req_Demand"] = load_field_with_deprecated_symbol(res_in,
+ [:Reg_Req_Percent_Demand,
+ :Reg_Req_Percent_Load])
- # Regulation requirement as a percent of hourly wind and solar generation (summed across all model zones)
- inputs["pReg_Req_VRE"] = float(res_in[1,:Reg_Req_Percent_VRE])
- # Spinning up reserve requirement as a percent of hourly demand (which is summed across all zones)
- inputs["pRsv_Req_Demand"] = load_field_with_deprecated_symbol(res_in,
- [:Rsv_Req_Percent_Demand,
- :Rsv_Req_Percent_Load])
- # Spinning up reserve requirement as a percent of hourly wind and solar generation (which is summed across all zones)
- inputs["pRsv_Req_VRE"] = float(res_in[1,:Rsv_Req_Percent_VRE])
+ # Regulation requirement as a percent of hourly wind and solar generation (summed across all model zones)
+ inputs["pReg_Req_VRE"] = float(res_in[1, :Reg_Req_Percent_VRE])
+ # Spinning up reserve requirement as a percent of hourly demand (which is summed across all zones)
+ inputs["pRsv_Req_Demand"] = load_field_with_deprecated_symbol(res_in,
+ [:Rsv_Req_Percent_Demand,
+ :Rsv_Req_Percent_Load])
+ # Spinning up reserve requirement as a percent of hourly wind and solar generation (which is summed across all zones)
+ inputs["pRsv_Req_VRE"] = float(res_in[1, :Rsv_Req_Percent_VRE])
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
# Penalty for not meeting hourly spinning reserve requirement
- inputs["pC_Rsv_Penalty"] = float(res_in[1,:Unmet_Rsv_Penalty_Dollar_per_MW]) / scale_factor # convert to million $/GW with objective function in millions
- inputs["pStatic_Contingency"] = float(res_in[1,:Static_Contingency_MW]) / scale_factor # convert to GW
+ inputs["pC_Rsv_Penalty"] = float(res_in[1, :Unmet_Rsv_Penalty_Dollar_per_MW]) /
+ scale_factor # convert to million $/GW with objective function in millions
+ inputs["pStatic_Contingency"] = float(res_in[1, :Static_Contingency_MW]) / scale_factor # convert to GW
- if setup["UCommit"] >= 1
- inputs["pDynamic_Contingency"] = convert(Int8, res_in[1,:Dynamic_Contingency] )
- # Set BigM value used for dynamic contingencies cases to be largest possible cluster size
- # Note: this BigM value is only relevant for units in the COMMIT set. See operational_reserves.jl for details on implementation of dynamic contingencies
- if inputs["pDynamic_Contingency"] > 0
- inputs["pContingency_BigM"] = zeros(Float64, inputs["G"])
- for y in inputs["COMMIT"]
- inputs["pContingency_BigM"][y] = max_cap_mw(gen[y])
- # When Max_Cap_MW == -1, there is no limit on capacity size
- if inputs["pContingency_BigM"][y] < 0
- # NOTE: this effectively acts as a maximum cluster size when not otherwise specified, adjust accordingly
- inputs["pContingency_BigM"][y] = 5000 * cap_size(gen[y])
- end
- end
- end
- end
+ if setup["UCommit"] >= 1
+ inputs["pDynamic_Contingency"] = convert(Int8, res_in[1, :Dynamic_Contingency])
+ # Set BigM value used for dynamic contingencies cases to be largest possible cluster size
+ # Note: this BigM value is only relevant for units in the COMMIT set. See operational_reserves.jl for details on implementation of dynamic contingencies
+ if inputs["pDynamic_Contingency"] > 0
+ inputs["pContingency_BigM"] = zeros(Float64, inputs["G"])
+ for y in inputs["COMMIT"]
+ inputs["pContingency_BigM"][y] = max_cap_mw(gen[y])
+ # When Max_Cap_MW == -1, there is no limit on capacity size
+ if inputs["pContingency_BigM"][y] < 0
+ # NOTE: this effectively acts as a maximum cluster size when not otherwise specified, adjust accordingly
+ inputs["pContingency_BigM"][y] = 5000 * cap_size(gen[y])
+ end
+ end
+ end
+ end
- println(filename * " Successfully Read!")
+ println(filename * " Successfully Read!")
end
diff --git a/src/load_inputs/load_period_map.jl b/src/load_inputs/load_period_map.jl
index dee0b1ccd2..3966ea03b3 100644
--- a/src/load_inputs/load_period_map.jl
+++ b/src/load_inputs/load_period_map.jl
@@ -4,16 +4,16 @@
Read input parameters related to mapping of representative time periods to full chronological time series
"""
function load_period_map!(setup::Dict, path::AbstractString, inputs::Dict)
- period_map = "Period_map.csv"
- data_directory = joinpath(path, setup["TimeDomainReductionFolder"])
- if setup["TimeDomainReduction"] == 1 && isfile(joinpath(data_directory, period_map)) # Use Time Domain Reduced data for GenX
- my_dir = data_directory
- else
+ period_map = "Period_map.csv"
+ data_directory = joinpath(path, setup["TimeDomainReductionFolder"])
+ if setup["TimeDomainReduction"] == 1 && isfile(joinpath(data_directory, period_map)) # Use Time Domain Reduced data for GenX
+ my_dir = data_directory
+ else
# If TDR is not used, then use the "system" directory specified in the setup
my_dir = joinpath(path, setup["SystemFolder"])
- end
- file_path = joinpath(my_dir, period_map)
+ end
+ file_path = joinpath(my_dir, period_map)
inputs["Period_Map"] = load_dataframe(file_path)
- println(period_map * " Successfully Read!")
+ println(period_map * " Successfully Read!")
end
diff --git a/src/load_inputs/load_resources_data.jl b/src/load_inputs/load_resources_data.jl
index c5a37486a1..31c3d9d1c3 100644
--- a/src/load_inputs/load_resources_data.jl
+++ b/src/load_inputs/load_resources_data.jl
@@ -8,16 +8,14 @@ Internal function to get resource information (filename and GenX type) for each
"""
function _get_resource_info()
- resource_info = (
- hydro = (filename="Hydro.csv", type=Hydro),
- thermal = (filename="Thermal.csv", type=Thermal),
- vre = (filename="Vre.csv", type=Vre),
- storage = (filename="Storage.csv", type=Storage),
- flex_demand = (filename="Flex_demand.csv", type=FlexDemand),
- must_run = (filename="Must_run.csv", type=MustRun),
- electrolyzer = (filename="Electrolyzer.csv", type=Electrolyzer),
- vre_stor = (filename="Vre_stor.csv", type=VreStorage)
- )
+ resource_info = (hydro = (filename = "Hydro.csv", type = Hydro),
+ thermal = (filename = "Thermal.csv", type = Thermal),
+ vre = (filename = "Vre.csv", type = Vre),
+ storage = (filename = "Storage.csv", type = Storage),
+ flex_demand = (filename = "Flex_demand.csv", type = FlexDemand),
+ must_run = (filename = "Must_run.csv", type = MustRun),
+ electrolyzer = (filename = "Electrolyzer.csv", type = Electrolyzer),
+ vre_stor = (filename = "Vre_stor.csv", type = VreStorage))
return resource_info
end
@@ -38,11 +36,11 @@ function _get_policyfile_info()
max_cap_filenames = ["Resource_maximum_capacity_requirement.csv"]
policyfile_info = (
- esr = (filenames=esr_filenames, setup_param="EnergyShareRequirement"),
- cap_res = (filenames=cap_res_filenames, setup_param="CapacityReserveMargin"),
- min_cap = (filenames=min_cap_filenames, setup_param="MinCapReq"),
- max_cap = (filenames=max_cap_filenames, setup_param="MaxCapReq"),
- )
+ esr = (filenames = esr_filenames,
+ setup_param = "EnergyShareRequirement"),
+ cap_res = (filenames = cap_res_filenames, setup_param = "CapacityReserveMargin"),
+ min_cap = (filenames = min_cap_filenames, setup_param = "MinCapReq"),
+ max_cap = (filenames = max_cap_filenames, setup_param = "MaxCapReq"))
return policyfile_info
end
@@ -52,18 +50,16 @@ end
Internal function to get a map of GenX resource type their corresponding names in the summary table.
"""
function _get_summary_map()
- names_map = Dict{Symbol,String}(
- :Electrolyzer => "Electrolyzer",
+ names_map = Dict{Symbol, String}(:Electrolyzer => "Electrolyzer",
:FlexDemand => "Flexible_demand",
:Hydro => "Hydro",
:Storage => "Storage",
:Thermal => "Thermal",
:Vre => "VRE",
:MustRun => "Must_run",
- :VreStorage => "VRE_and_storage",
- )
+ :VreStorage => "VRE_and_storage")
max_length = maximum(length.(values(names_map)))
- for (k,v) in names_map
+ for (k, v) in names_map
names_map[k] = v * repeat(" ", max_length - length(v))
end
return names_map
@@ -82,43 +78,31 @@ See documentation for descriptions of each column being scaled.
"""
function scale_resources_data!(resource_in::DataFrame, scale_factor::Float64)
columns_to_scale = [:existing_charge_cap_mw, # to GW
- :existing_cap_mwh, # to GWh
- :existing_cap_mw, # to GW
-
- :cap_size, # to GW
-
- :min_cap_mw, # to GW
- :min_cap_mwh, # to GWh
- :min_charge_cap_mw, # to GWh
-
- :max_cap_mw, # to GW
- :max_cap_mwh, # to GWh
- :max_charge_cap_mw, # to GW
-
- :inv_cost_per_mwyr, # to $M/GW/yr
- :inv_cost_per_mwhyr, # to $M/GWh/yr
- :inv_cost_charge_per_mwyr, # to $M/GW/yr
-
- :fixed_om_cost_per_mwyr, # to $M/GW/yr
- :fixed_om_cost_per_mwhyr, # to $M/GWh/yr
- :fixed_om_cost_charge_per_mwyr, # to $M/GW/yr
-
- :var_om_cost_per_mwh, # to $M/GWh
- :var_om_cost_per_mwh_in, # to $M/GWh
-
- :reg_cost, # to $M/GW
- :rsv_cost, # to $M/GW
-
- :min_retired_cap_mw, # to GW
- :min_retired_charge_cap_mw, # to GW
- :min_retired_energy_cap_mw, # to GW
-
- :start_cost_per_mw, # to $M/GW
-
- :ccs_disposal_cost_per_metric_ton,
-
- :hydrogen_mwh_per_tonne # to GWh/t
- ]
+ :existing_cap_mwh, # to GWh
+ :existing_cap_mw, # to GW
+ :cap_size, # to GW
+ :min_cap_mw, # to GW
+ :min_cap_mwh, # to GWh
+ :min_charge_cap_mw, # to GWh
+ :max_cap_mw, # to GW
+ :max_cap_mwh, # to GWh
+ :max_charge_cap_mw, # to GW
+ :inv_cost_per_mwyr, # to $M/GW/yr
+ :inv_cost_per_mwhyr, # to $M/GWh/yr
+ :inv_cost_charge_per_mwyr, # to $M/GW/yr
+ :fixed_om_cost_per_mwyr, # to $M/GW/yr
+ :fixed_om_cost_per_mwhyr, # to $M/GWh/yr
+ :fixed_om_cost_charge_per_mwyr, # to $M/GW/yr
+ :var_om_cost_per_mwh, # to $M/GWh
+ :var_om_cost_per_mwh_in, # to $M/GWh
+ :reg_cost, # to $M/GW
+ :rsv_cost, # to $M/GW
+ :min_retired_cap_mw, # to GW
+ :min_retired_charge_cap_mw, # to GW
+ :min_retired_energy_cap_mw, # to GW
+ :start_cost_per_mw, # to $M/GW
+ :ccs_disposal_cost_per_metric_ton, :hydrogen_mwh_per_tonne # to GWh/t
+ ]
scale_columns!(resource_in, columns_to_scale, scale_factor)
return nothing
@@ -137,53 +121,53 @@ See documentation for descriptions of each column being scaled.
"""
function scale_vre_stor_data!(vre_stor_in::DataFrame, scale_factor::Float64)
columns_to_scale = [:existing_cap_inverter_mw,
- :existing_cap_solar_mw,
- :existing_cap_wind_mw,
- :existing_cap_charge_dc_mw,
- :existing_cap_charge_ac_mw,
- :existing_cap_discharge_dc_mw,
- :existing_cap_discharge_ac_mw,
- :min_cap_inverter_mw,
- :max_cap_inverter_mw,
- :min_cap_solar_mw,
- :max_cap_solar_mw,
- :min_cap_wind_mw,
- :max_cap_wind_mw,
- :min_cap_charge_ac_mw,
- :max_cap_charge_ac_mw,
- :min_cap_charge_dc_mw,
- :max_cap_charge_dc_mw,
- :min_cap_discharge_ac_mw,
- :max_cap_discharge_ac_mw,
- :min_cap_discharge_dc_mw,
- :max_cap_discharge_dc_mw,
- :inv_cost_inverter_per_mwyr,
- :fixed_om_inverter_cost_per_mwyr,
- :inv_cost_solar_per_mwyr,
- :fixed_om_solar_cost_per_mwyr,
- :inv_cost_wind_per_mwyr,
- :fixed_om_wind_cost_per_mwyr,
- :inv_cost_discharge_dc_per_mwyr,
- :fixed_om_cost_discharge_dc_per_mwyr,
- :inv_cost_charge_dc_per_mwyr,
- :fixed_om_cost_charge_dc_per_mwyr,
- :inv_cost_discharge_ac_per_mwyr,
- :fixed_om_cost_discharge_ac_per_mwyr,
- :inv_cost_charge_ac_per_mwyr,
- :fixed_om_cost_charge_ac_per_mwyr,
- :var_om_cost_per_mwh_solar,
- :var_om_cost_per_mwh_wind,
- :var_om_cost_per_mwh_charge_dc,
- :var_om_cost_per_mwh_discharge_dc,
- :var_om_cost_per_mwh_charge_ac,
- :var_om_cost_per_mwh_discharge_ac,
- :min_retired_cap_inverter_mw,
- :min_retired_cap_solar_mw,
- :min_retired_cap_wind_mw,
- :min_retired_cap_charge_dc_mw,
- :min_retired_cap_charge_ac_mw,
- :min_retired_cap_discharge_dc_mw,
- :min_retired_cap_discharge_ac_mw]
+ :existing_cap_solar_mw,
+ :existing_cap_wind_mw,
+ :existing_cap_charge_dc_mw,
+ :existing_cap_charge_ac_mw,
+ :existing_cap_discharge_dc_mw,
+ :existing_cap_discharge_ac_mw,
+ :min_cap_inverter_mw,
+ :max_cap_inverter_mw,
+ :min_cap_solar_mw,
+ :max_cap_solar_mw,
+ :min_cap_wind_mw,
+ :max_cap_wind_mw,
+ :min_cap_charge_ac_mw,
+ :max_cap_charge_ac_mw,
+ :min_cap_charge_dc_mw,
+ :max_cap_charge_dc_mw,
+ :min_cap_discharge_ac_mw,
+ :max_cap_discharge_ac_mw,
+ :min_cap_discharge_dc_mw,
+ :max_cap_discharge_dc_mw,
+ :inv_cost_inverter_per_mwyr,
+ :fixed_om_inverter_cost_per_mwyr,
+ :inv_cost_solar_per_mwyr,
+ :fixed_om_solar_cost_per_mwyr,
+ :inv_cost_wind_per_mwyr,
+ :fixed_om_wind_cost_per_mwyr,
+ :inv_cost_discharge_dc_per_mwyr,
+ :fixed_om_cost_discharge_dc_per_mwyr,
+ :inv_cost_charge_dc_per_mwyr,
+ :fixed_om_cost_charge_dc_per_mwyr,
+ :inv_cost_discharge_ac_per_mwyr,
+ :fixed_om_cost_discharge_ac_per_mwyr,
+ :inv_cost_charge_ac_per_mwyr,
+ :fixed_om_cost_charge_ac_per_mwyr,
+ :var_om_cost_per_mwh_solar,
+ :var_om_cost_per_mwh_wind,
+ :var_om_cost_per_mwh_charge_dc,
+ :var_om_cost_per_mwh_discharge_dc,
+ :var_om_cost_per_mwh_charge_ac,
+ :var_om_cost_per_mwh_discharge_ac,
+ :min_retired_cap_inverter_mw,
+ :min_retired_cap_solar_mw,
+ :min_retired_cap_wind_mw,
+ :min_retired_cap_charge_dc_mw,
+ :min_retired_cap_charge_ac_mw,
+ :min_retired_cap_discharge_dc_mw,
+ :min_retired_cap_discharge_ac_mw]
scale_columns!(vre_stor_in, columns_to_scale, scale_factor)
return nothing
end
@@ -199,7 +183,9 @@ Scales in-place the columns in `columns_to_scale` of a dataframe `df` by a `scal
- `scale_factor` (Float64): A scaling factor for energy and currency units.
"""
-function scale_columns!(df::DataFrame, columns_to_scale::Vector{Symbol}, scale_factor::Float64)
+function scale_columns!(df::DataFrame,
+ columns_to_scale::Vector{Symbol},
+ scale_factor::Float64)
for column in columns_to_scale
if string(column) in names(df)
df[!, column] /= scale_factor
@@ -246,7 +232,7 @@ Computes the indices for the resources loaded from a single dataframe by shiftin
"""
function compute_resource_indices(resources_in::DataFrame, offset::Int64)
- range = (1,nrow(resources_in)) .+ offset
+ range = (1, nrow(resources_in)) .+ offset
return UnitRange{Int64}(range...)
end
@@ -314,7 +300,9 @@ Construct the array of resources from multiple files of different types located
- `Error`: If no resources data is found. Check the data path or the configuration file "genx_settings.yml" inside Settings.
"""
-function create_resource_array(resource_folder::AbstractString, resources_info::NamedTuple, scale_factor::Float64=1.0)
+function create_resource_array(resource_folder::AbstractString,
+ resources_info::NamedTuple,
+ scale_factor::Float64 = 1.0)
resource_id_offset = 0
resources = []
# loop over available types and load all resources in resource_folder
@@ -333,7 +321,8 @@ function create_resource_array(resource_folder::AbstractString, resources_info::
@info filename * " Successfully Read."
end
end
- isempty(resources) && error("No resources data found. Check data path or configuration file \"genx_settings.yml\" inside Settings.")
+ isempty(resources) &&
+ error("No resources data found. Check data path or configuration file \"genx_settings.yml\" inside Settings.")
return reduce(vcat, resources)
end
@@ -353,15 +342,17 @@ function check_mustrun_reserve_contribution(r::AbstractResource)
reg_max_r = reg_max(r)
if reg_max_r != 0
- e = string("Resource ", resource_name(r), " is of MUST_RUN type but :Reg_Max = ", reg_max_r, ".\n",
- "MUST_RUN units must have Reg_Max = 0 since they cannot contribute to reserves.")
+ e = string("Resource ", resource_name(r), " is of MUST_RUN type but :Reg_Max = ",
+ reg_max_r, ".\n",
+ "MUST_RUN units must have Reg_Max = 0 since they cannot contribute to reserves.")
push!(error_strings, e)
end
-
+
rsv_max_r = rsv_max(r)
if rsv_max_r != 0
- e = string("Resource ", resource_name(r), " is of MUST_RUN type but :Rsv_Max = ", rsv_max_r, ".\n",
- "MUST_RUN units must have Rsv_Max = 0 since they cannot contribute to reserves.")
+ e = string("Resource ", resource_name(r), " is of MUST_RUN type but :Rsv_Max = ",
+ rsv_max_r, ".\n",
+ "MUST_RUN units must have Rsv_Max = 0 since they cannot contribute to reserves.")
push!(error_strings, e)
end
return ErrorMsg.(error_strings)
@@ -377,7 +368,7 @@ function check_LDS_applicability(r::AbstractResource)
# LDS is available only for Hydro and Storage
if !isa(r, applicable_resources) && lds_value > 0
e = string("Resource ", resource_name(r), " has :lds = ", lds_value, ".\n",
- "This setting is valid only for resources where the type is one of $applicable_resources.")
+ "This setting is valid only for resources where the type is one of $applicable_resources.")
push!(error_strings, e)
end
return ErrorMsg.(error_strings)
@@ -388,9 +379,9 @@ function check_maintenance_applicability(r::AbstractResource)
not_set = default_zero
maint_value = get(r, :maint, not_set)
-
+
error_strings = String[]
-
+
if maint_value == not_set
# not MAINT so the rest is not applicable
return error_strings
@@ -399,13 +390,13 @@ function check_maintenance_applicability(r::AbstractResource)
# MAINT is available only for Thermal
if !isa(r, applicable_resources) && maint_value > 0
e = string("Resource ", resource_name(r), " has :maint = ", maint_value, ".\n",
- "This setting is valid only for resources where the type is one of $applicable_resources.")
+ "This setting is valid only for resources where the type is one of $applicable_resources.")
push!(error_strings, e)
end
if get(r, :model, not_set) == 2
e = string("Resource ", resource_name(r), " has :maint = ", maint_value, ".\n",
- "This is valid only for resources with unit commitment (:model = 1);\n",
- "this has :model = 2.")
+ "This is valid only for resources with unit commitment (:model = 1);\n",
+ "this has :model = 2.")
push!(error_strings, e)
end
return ErrorMsg.(error_strings)
@@ -416,27 +407,29 @@ function check_retrofit_resource(r::AbstractResource)
# check that retrofit_id is set only for retrofitting units and not for new builds or units that can retire
if can_retrofit(r) == true && can_retire(r) == false
- e = string("Resource ", resource_name(r), " has :can_retrofit = ", can_retrofit(r), " but :can_retire = ", can_retire(r), ".\n",
- "A unit that can be retrofitted must also be eligible for retirement (:can_retire = 1)")
+ e = string("Resource ", resource_name(r), " has :can_retrofit = ", can_retrofit(r),
+ " but :can_retire = ", can_retire(r), ".\n",
+ "A unit that can be retrofitted must also be eligible for retirement (:can_retire = 1)")
push!(error_strings, e)
elseif is_retrofit_option(r) == true && new_build(r) == false
- e = string("Resource ", resource_name(r), " has :retrofit = ", is_retrofit_option(r), " but :new_build = ", new_build(r), ".\n",
- "This setting is valid only for resources that have :new_build = 1")
+ e = string("Resource ", resource_name(r), " has :retrofit = ",
+ is_retrofit_option(r), " but :new_build = ", new_build(r), ".\n",
+ "This setting is valid only for resources that have :new_build = 1")
push!(error_strings, e)
end
return ErrorMsg.(error_strings)
-end
+end
function check_resource(r::AbstractResource)
e = []
e = [e; check_LDS_applicability(r)]
- e = [e; check_maintenance_applicability(r)]
+ e = [e; check_maintenance_applicability(r)]
e = [e; check_mustrun_reserve_contribution(r)]
e = [e; check_retrofit_resource(r)]
return e
end
-function check_retrofit_id(rs::Vector{T}) where T <: AbstractResource
+function check_retrofit_id(rs::Vector{T}) where {T <: AbstractResource}
warning_strings = String[]
units_can_retrofit = ids_can_retrofit(rs)
@@ -445,7 +438,7 @@ function check_retrofit_id(rs::Vector{T}) where T <: AbstractResource
# check that all retrofit_ids for resources that can retrofit and retrofit options match
if Set(rs[units_can_retrofit].retrofit_id) != Set(rs[retrofit_options].retrofit_id)
msg = string("Retrofit IDs for resources that \"can retrofit\" and \"retrofit options\" do not match.\n" *
- "All retrofitting units must be associated with a retrofit option.")
+ "All retrofitting units must be associated with a retrofit option.")
push!(warning_strings, msg)
end
@@ -458,7 +451,7 @@ end
Validate the consistency of a vector of GenX resources
Reports any errors/warnings as a vector of messages.
"""
-function check_resource(resources::Vector{T}) where T <: AbstractResource
+function check_resource(resources::Vector{T}) where {T <: AbstractResource}
e = []
for r in resources
e = [e; check_resource(r)]
@@ -488,7 +481,7 @@ function announce_errors_and_halt(e::Vector)
return nothing
end
-function validate_resources(resources::Vector{T}) where T <: AbstractResource
+function validate_resources(resources::Vector{T}) where {T <: AbstractResource}
e = check_resource(resources)
if length(e) > 0
announce_errors_and_halt(e)
@@ -510,7 +503,7 @@ Function that loads and scales resources data from folder specified in resources
"""
function create_resource_array(setup::Dict, resources_path::AbstractString)
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1.0
-
+
# get filename and GenX type for each type of resources available in GenX
resources_info = _get_resource_info()
@@ -521,7 +514,6 @@ function create_resource_array(setup::Dict, resources_path::AbstractString)
return resources
end
-
"""
validate_policy_files(resource_policies_path::AbstractString, setup::Dict)
@@ -538,8 +530,14 @@ Validate the policy files by checking if they exist in the specified folder and
function validate_policy_files(resource_policies_path::AbstractString, setup::Dict)
policyfile_info = _get_policyfile_info()
for (filenames, setup_param) in values(policyfile_info)
- if setup[setup_param] == 1 && any(!isfile(joinpath(resource_policies_path, filename)) for filename in filenames)
- msg = string(setup_param, " is set to 1 in settings but the required file(s) ", filenames, " was (were) not found in ", resource_policies_path)
+ if setup[setup_param] == 1 &&
+ any(!isfile(joinpath(resource_policies_path, filename))
+ for filename in filenames)
+ msg = string(setup_param,
+ " is set to 1 in settings but the required file(s) ",
+ filenames,
+ " was (were) not found in ",
+ resource_policies_path)
@warn(msg)
end
end
@@ -564,15 +562,16 @@ function validate_policy_dataframe!(filename::AbstractString, policy_in::DataFra
error(msg)
end
# if the single column attribute does not have a tag number, add a tag number of 1
- if n_cols == 2 && cols[2][end-1:end] != "_1"
+ if n_cols == 2 && cols[2][(end - 1):end] != "_1"
rename!(policy_in, Symbol.(cols[2]) => Symbol.(cols[2], "_1"))
end
# get policy column names
cols = lowercase.(names(policy_in))
- filter!(col -> col ≠ "resource",cols)
-
+ filter!(col -> col ≠ "resource", cols)
+
accepted_cols = ["derating_factor", "esr", "esr_vrestor",
- [string(cap, type) for cap in ["min_cap", "max_cap"] for type in ("", "_stor", "_solar", "_wind")]...]
+ [string(cap, type) for cap in ["min_cap", "max_cap"]
+ for type in ("", "_stor", "_solar", "_wind")]...]
# Check that all policy columns have names in accepted_cols
if !all(x -> replace(x, r"(_*|_*\d*)$" => "") in accepted_cols, cols)
@@ -581,7 +580,8 @@ function validate_policy_dataframe!(filename::AbstractString, policy_in::DataFra
error(msg)
end
# Check that all policy columns have names with format "[policy_name]_[tagnum]"
- if !all(any([occursin(Regex("$(y)")*r"_\d", col) for y in accepted_cols]) for col in cols)
+ if !all(any([occursin(Regex("$(y)") * r"_\d", col) for y in accepted_cols])
+ for col in cols)
msg = "Columns in policy file $filename must have names with format \"[policy_name]_[tagnum]\", case insensitive. (e.g., ESR_1, Min_Cap_1, Max_Cap_2, etc.)."
error(msg)
end
@@ -599,14 +599,16 @@ Adds a set of new attributes (names and corresponding values) to a resource. The
- `new_values::DataFrameRow`: DataFrameRow containing the values of the new attributes.
"""
-function add_attributes_to_resource!(resource::AbstractResource, new_symbols::Vector{Symbol}, new_values::T) where T <: DataFrameRow
+function add_attributes_to_resource!(resource::AbstractResource,
+ new_symbols::Vector{Symbol},
+ new_values::T) where {T <: DataFrameRow}
# loop over new attributes
for (sym, value) in zip(new_symbols, new_values)
# add attribute to resource
setproperty!(resource, sym, value)
end
return nothing
-end
+end
"""
add_df_to_resources!(resources::Vector{<:AbstractResource}, module_in::DataFrame)
@@ -642,7 +644,9 @@ Loads a single policy file and adds the columns as new attributes to resources i
- `path::AbstractString`: The path to the policy file.
- `filename::AbstractString`: The name of the policy file.
"""
-function add_policy_to_resources!(resources::Vector{<:AbstractResource}, path::AbstractString, filename::AbstractString)
+function add_policy_to_resources!(resources::Vector{<:AbstractResource},
+ path::AbstractString,
+ filename::AbstractString)
policy_in = load_dataframe(path)
# check if policy file has any attributes, validate column names
validate_policy_dataframe!(filename, policy_in)
@@ -660,15 +664,16 @@ Reads policy files and adds policies-related attributes to resources in the mode
- `resources::Vector{<:AbstractResource}`: Vector of resources in the model.
- `resources_path::AbstractString`: The path to the resources folder.
"""
-function add_policies_to_resources!(resources::Vector{<:AbstractResource}, resource_policy_path::AbstractString)
+function add_policies_to_resources!(resources::Vector{<:AbstractResource},
+ resource_policy_path::AbstractString)
# get filename for each type of policy available in GenX
policies_info = _get_policyfile_info()
# loop over policy files
- for (filenames,_) in values(policies_info)
+ for (filenames, _) in values(policies_info)
for filename in filenames
path = joinpath(resource_policy_path, filename)
# if file exists, add policy to resources
- if isfile(path)
+ if isfile(path)
add_policy_to_resources!(resources, path, filename)
@info filename * " Successfully Read."
end
@@ -686,7 +691,8 @@ Reads module dataframe and adds columns as new attributes to the resources in th
- `resources::Vector{<:AbstractResource}`: A vector of resources.
- `module_in::DataFrame`: The dataframe with the columns to add to the resources.
"""
-function add_module_to_resources!(resources::Vector{<:AbstractResource}, module_in::DataFrame)
+function add_module_to_resources!(resources::Vector{<:AbstractResource},
+ module_in::DataFrame)
# add module columns to resources as new attributes
add_df_to_resources!(resources, module_in)
return nothing
@@ -702,7 +708,9 @@ Reads module dataframes, loops over files and adds columns as new attributes to
- `setup (Dict)`: A dictionary containing GenX settings.
- `resources_path::AbstractString`: The path to the resources folder.
"""
-function add_modules_to_resources!(resources::Vector{<:AbstractResource}, setup::Dict, resources_path::AbstractString)
+function add_modules_to_resources!(resources::Vector{<:AbstractResource},
+ setup::Dict,
+ resources_path::AbstractString)
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1.0
modules = Vector{DataFrame}()
@@ -715,7 +723,7 @@ function add_modules_to_resources!(resources::Vector{<:AbstractResource}, setup:
push!(modules, multistage_in)
@info "Multistage data successfully read."
end
-
+
## Loop over modules and add attributes to resources
add_module_to_resources!.(Ref(resources), modules)
@@ -723,32 +731,32 @@ function add_modules_to_resources!(resources::Vector{<:AbstractResource}, setup:
end
function validate_piecewisefuelusage(heat_rate_mat, load_point_mat)
- # it's possible to construct piecewise fuel consumption with n of heat rate and n-1 of load point.
- # if a user feed n of heat rate and more than n of load point, throw a error message, and then use
- # n of heat rate and n-1 load point to construct the piecewise fuel usage fuction
- if size(heat_rate_mat)[2] < size(load_point_mat)[2]
- @error """ The numbers of heatrate data are less than load points, we found $(size(heat_rate_mat)[2]) of heat rate,
- and $(size(load_point_mat)[2]) of load points. We will just use $(size(heat_rate_mat)[2]) of heat rate, and $(size(heat_rate_mat)[2]-1)
- load point to create piecewise fuel usage
- """
- end
-
- # check if values for piecewise fuel consumption make sense. Negative heat rate or load point are not allowed
- if any(heat_rate_mat .< 0) | any(load_point_mat .< 0)
- @error """ Neither heat rate nor load point can be negative
- """
- error("Invalid inputs detected for piecewise fuel usage")
- end
- # for non-zero values, heat rates and load points should follow an increasing trend
- if any([any(diff(filter(!=(0), row)) .< 0) for row in eachrow(heat_rate_mat)])
- @error """ Heat rates should follow an increasing trend
- """
- error("Invalid inputs detected for piecewise fuel usage")
- elseif any([any(diff(filter(!=(0), row)) .< 0) for row in eachrow(load_point_mat)])
- @error """load points should follow an increasing trend
- """
- error("Invalid inputs detected for piecewise fuel usage")
- end
+ # it's possible to construct piecewise fuel consumption with n of heat rate and n-1 of load point.
+ # if a user feed n of heat rate and more than n of load point, throw a error message, and then use
+ # n of heat rate and n-1 load point to construct the piecewise fuel usage fuction
+ if size(heat_rate_mat)[2] < size(load_point_mat)[2]
+ @error """ The numbers of heatrate data are less than load points, we found $(size(heat_rate_mat)[2]) of heat rate,
+ and $(size(load_point_mat)[2]) of load points. We will just use $(size(heat_rate_mat)[2]) of heat rate, and $(size(heat_rate_mat)[2]-1)
+ load point to create piecewise fuel usage
+ """
+ end
+
+ # check if values for piecewise fuel consumption make sense. Negative heat rate or load point are not allowed
+ if any(heat_rate_mat .< 0) | any(load_point_mat .< 0)
+ @error """ Neither heat rate nor load point can be negative
+ """
+ error("Invalid inputs detected for piecewise fuel usage")
+ end
+ # for non-zero values, heat rates and load points should follow an increasing trend
+ if any([any(diff(filter(!=(0), row)) .< 0) for row in eachrow(heat_rate_mat)])
+ @error """ Heat rates should follow an increasing trend
+ """
+ error("Invalid inputs detected for piecewise fuel usage")
+ elseif any([any(diff(filter(!=(0), row)) .< 0) for row in eachrow(load_point_mat)])
+ @error """load points should follow an increasing trend
+ """
+ error("Invalid inputs detected for piecewise fuel usage")
+ end
end
"""
@@ -762,20 +770,23 @@ Reads piecewise fuel usage data from the vector of generators, create a PWFU_dat
- `gen::Vector{<:AbstractResource}`: The vector of generators in the model
- `inputs::Dict`: The dictionary containing the input data
"""
-function process_piecewisefuelusage!(setup::Dict, gen::Vector{<:AbstractResource}, inputs::Dict)
+function process_piecewisefuelusage!(setup::Dict,
+ gen::Vector{<:AbstractResource},
+ inputs::Dict)
inputs["PWFU_Num_Segments"] = 0
inputs["THERM_COMMIT_PWFU"] = Int64[]
-
- if any(haskey.(gen, :pwfu_fuel_usage_zero_load_mmbtu_per_h))
+ if any(haskey.(gen, :pwfu_fuel_usage_zero_load_mmbtu_per_h))
thermal_gen = gen.Thermal
has_pwfu = haskey.(thermal_gen, :pwfu_fuel_usage_zero_load_mmbtu_per_h)
@assert all(has_pwfu) "Piecewise fuel usage data is not consistent across thermal generators"
- heat_rate_mat_therm = extract_matrix_from_resources(thermal_gen, "pwfu_heat_rate_mmbtu_per_mwh")
- load_point_mat_therm = extract_matrix_from_resources(thermal_gen, "pwfu_load_point_mw")
-
- num_segments = size(heat_rate_mat_therm)[2]
+ heat_rate_mat_therm = extract_matrix_from_resources(thermal_gen,
+ "pwfu_heat_rate_mmbtu_per_mwh")
+ load_point_mat_therm = extract_matrix_from_resources(thermal_gen,
+ "pwfu_load_point_mw")
+
+ num_segments = size(heat_rate_mat_therm)[2]
# create a matrix to store the heat rate and load point for each generator in the model
heat_rate_mat = zeros(length(gen), num_segments)
@@ -784,74 +795,79 @@ function process_piecewisefuelusage!(setup::Dict, gen::Vector{<:AbstractResource
heat_rate_mat[THERM, :] = heat_rate_mat_therm
load_point_mat[THERM, :] = load_point_mat_therm
- # check data input
- validate_piecewisefuelusage(heat_rate_mat, load_point_mat)
+ # check data input
+ validate_piecewisefuelusage(heat_rate_mat, load_point_mat)
# determine if a generator contains piecewise fuel usage segment based on non-zero heatrate
- nonzero_rows = any(heat_rate_mat .!= 0 , dims = 2)[:]
- HAS_PWFU = resource_id.(gen[nonzero_rows])
+ nonzero_rows = any(heat_rate_mat .!= 0, dims = 2)[:]
+ HAS_PWFU = resource_id.(gen[nonzero_rows])
- # translate the inital fuel usage, heat rate, and load points into intercept for each segment
+ # translate the inital fuel usage, heat rate, and load points into intercept for each segment
fuel_usage_zero_load = zeros(length(gen))
- fuel_usage_zero_load[THERM] = pwfu_fuel_usage_zero_load_mmbtu_per_h.(thermal_gen)
- # construct a matrix for intercept
- intercept_mat = zeros(size(heat_rate_mat))
- # PWFU_Fuel_Usage_MMBTU_per_h is always the intercept of the first segment
- intercept_mat[:,1] = fuel_usage_zero_load
-
- # create a function to compute intercept if we have more than one segment
- function calculate_intercepts(slope, intercept_1, load_point)
- m, n = size(slope)
- # Initialize the intercepts matrix with zeros
- intercepts = zeros(m, n)
- # The first segment's intercepts should be intercept_1 vector
- intercepts[:, 1] = intercept_1
- # Calculate intercepts for the other segments using the load points (i.e., intersection points)
- for j in 1:n-1
- for i in 1:m
- current_slope = slope[i, j+1]
- previous_slope = slope[i, j]
- # If the current slope is 0, then skip the calculation and return 0
- if current_slope == 0
- intercepts[i, j+1] = 0.0
- else
- # y = a*x + b; => b = y - ax
- # Calculate y-coordinate of the intersection
- y = previous_slope * load_point[i, j] + intercepts[i, j]
- # determine the new intercept
- b = y - current_slope * load_point[i, j]
- intercepts[i, j+1] = b
- end
- end
- end
- return intercepts
- end
-
- if num_segments > 1
- # determine the intercept for the rest of segment if num_segments > 1
- intercept_mat = calculate_intercepts(heat_rate_mat, fuel_usage_zero_load, load_point_mat)
- end
-
- # create a PWFU_data that contain processed intercept and slope (i.e., heat rate)
- intercept_cols = [Symbol("pwfu_intercept_", i) for i in 1:num_segments]
- intercept_df = DataFrame(intercept_mat, Symbol.(intercept_cols))
- slope_cols = Symbol.(filter(colname -> startswith(string(colname),"pwfu_heat_rate_mmbtu_per_mwh"), collect(attributes(thermal_gen[1]))))
+ fuel_usage_zero_load[THERM] = pwfu_fuel_usage_zero_load_mmbtu_per_h.(thermal_gen)
+ # construct a matrix for intercept
+ intercept_mat = zeros(size(heat_rate_mat))
+ # PWFU_Fuel_Usage_MMBTU_per_h is always the intercept of the first segment
+ intercept_mat[:, 1] = fuel_usage_zero_load
+
+ # create a function to compute intercept if we have more than one segment
+ function calculate_intercepts(slope, intercept_1, load_point)
+ m, n = size(slope)
+ # Initialize the intercepts matrix with zeros
+ intercepts = zeros(m, n)
+ # The first segment's intercepts should be intercept_1 vector
+ intercepts[:, 1] = intercept_1
+ # Calculate intercepts for the other segments using the load points (i.e., intersection points)
+ for j in 1:(n - 1)
+ for i in 1:m
+ current_slope = slope[i, j + 1]
+ previous_slope = slope[i, j]
+ # If the current slope is 0, then skip the calculation and return 0
+ if current_slope == 0
+ intercepts[i, j + 1] = 0.0
+ else
+ # y = a*x + b; => b = y - ax
+ # Calculate y-coordinate of the intersection
+ y = previous_slope * load_point[i, j] + intercepts[i, j]
+ # determine the new intercept
+ b = y - current_slope * load_point[i, j]
+ intercepts[i, j + 1] = b
+ end
+ end
+ end
+ return intercepts
+ end
+
+ if num_segments > 1
+ # determine the intercept for the rest of segment if num_segments > 1
+ intercept_mat = calculate_intercepts(heat_rate_mat,
+ fuel_usage_zero_load,
+ load_point_mat)
+ end
+
+ # create a PWFU_data that contain processed intercept and slope (i.e., heat rate)
+ intercept_cols = [Symbol("pwfu_intercept_", i) for i in 1:num_segments]
+ intercept_df = DataFrame(intercept_mat, Symbol.(intercept_cols))
+ slope_cols = Symbol.(filter(
+ colname -> startswith(string(colname),
+ "pwfu_heat_rate_mmbtu_per_mwh"),
+ collect(attributes(thermal_gen[1]))))
sort!(slope_cols, by = x -> parse(Int, split(string(x), "_")[end]))
- slope_df = DataFrame(heat_rate_mat, Symbol.(slope_cols))
- PWFU_data = hcat(slope_df, intercept_df)
- # no need to scale sclope, but intercept should be scaled when parameterscale is on (MMBTU -> billion BTU)
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- PWFU_data[!, intercept_cols] ./= scale_factor
-
- inputs["slope_cols"] = slope_cols
- inputs["intercept_cols"] = intercept_cols
- inputs["PWFU_data"] = PWFU_data
- inputs["PWFU_Num_Segments"] = num_segments
- inputs["THERM_COMMIT_PWFU"] = intersect(ids_with_unit_commitment(gen), HAS_PWFU)
-
- @info "Piecewise fuel usage data successfully read!"
- end
- return nothing
+ slope_df = DataFrame(heat_rate_mat, Symbol.(slope_cols))
+ PWFU_data = hcat(slope_df, intercept_df)
+ # no need to scale sclope, but intercept should be scaled when parameterscale is on (MMBTU -> billion BTU)
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ PWFU_data[!, intercept_cols] ./= scale_factor
+
+ inputs["slope_cols"] = slope_cols
+ inputs["intercept_cols"] = intercept_cols
+ inputs["PWFU_data"] = PWFU_data
+ inputs["PWFU_Num_Segments"] = num_segments
+ inputs["THERM_COMMIT_PWFU"] = intersect(ids_with_unit_commitment(gen), HAS_PWFU)
+
+ @info "Piecewise fuel usage data successfully read!"
+ end
+ return nothing
end
@doc raw"""
@@ -863,59 +879,61 @@ For co-located VRE-storage resources, this function returns the storage type
"""
function split_storage_resources!(inputs::Dict, gen::Vector{<:AbstractResource})
- # All Storage Resources
- inputs["VS_STOR"] = union(storage_dc_charge(gen), storage_dc_discharge(gen),
- storage_ac_charge(gen), storage_ac_discharge(gen))
-
- STOR = inputs["VS_STOR"]
+ # All Storage Resources
+ inputs["VS_STOR"] = union(storage_dc_charge(gen), storage_dc_discharge(gen),
+ storage_ac_charge(gen), storage_ac_discharge(gen))
+
+ STOR = inputs["VS_STOR"]
- # Storage DC Discharge Resources
- inputs["VS_STOR_DC_DISCHARGE"] = storage_dc_discharge(gen)
- inputs["VS_SYM_DC_DISCHARGE"] = storage_sym_dc_discharge(gen)
- inputs["VS_ASYM_DC_DISCHARGE"] = storage_asym_dc_discharge(gen)
+ # Storage DC Discharge Resources
+ inputs["VS_STOR_DC_DISCHARGE"] = storage_dc_discharge(gen)
+ inputs["VS_SYM_DC_DISCHARGE"] = storage_sym_dc_discharge(gen)
+ inputs["VS_ASYM_DC_DISCHARGE"] = storage_asym_dc_discharge(gen)
- # Storage DC Charge Resources
- inputs["VS_STOR_DC_CHARGE"] = storage_dc_charge(gen)
- inputs["VS_SYM_DC_CHARGE"] = storage_sym_dc_charge(gen)
+ # Storage DC Charge Resources
+ inputs["VS_STOR_DC_CHARGE"] = storage_dc_charge(gen)
+ inputs["VS_SYM_DC_CHARGE"] = storage_sym_dc_charge(gen)
inputs["VS_ASYM_DC_CHARGE"] = storage_asym_dc_charge(gen)
- # Storage AC Discharge Resources
- inputs["VS_STOR_AC_DISCHARGE"] = storage_ac_discharge(gen)
- inputs["VS_SYM_AC_DISCHARGE"] = storage_sym_ac_discharge(gen)
- inputs["VS_ASYM_AC_DISCHARGE"] = storage_asym_ac_discharge(gen)
+ # Storage AC Discharge Resources
+ inputs["VS_STOR_AC_DISCHARGE"] = storage_ac_discharge(gen)
+ inputs["VS_SYM_AC_DISCHARGE"] = storage_sym_ac_discharge(gen)
+ inputs["VS_ASYM_AC_DISCHARGE"] = storage_asym_ac_discharge(gen)
- # Storage AC Charge Resources
- inputs["VS_STOR_AC_CHARGE"] = storage_ac_charge(gen)
- inputs["VS_SYM_AC_CHARGE"] = storage_sym_ac_charge(gen)
- inputs["VS_ASYM_AC_CHARGE"] = storage_asym_ac_charge(gen)
+ # Storage AC Charge Resources
+ inputs["VS_STOR_AC_CHARGE"] = storage_ac_charge(gen)
+ inputs["VS_SYM_AC_CHARGE"] = storage_sym_ac_charge(gen)
+ inputs["VS_ASYM_AC_CHARGE"] = storage_asym_ac_charge(gen)
- # Storage LDS & Non-LDS Resources
- inputs["VS_LDS"] = is_LDS_VRE_STOR(gen)
- inputs["VS_nonLDS"] = setdiff(STOR, inputs["VS_LDS"])
+ # Storage LDS & Non-LDS Resources
+ inputs["VS_LDS"] = is_LDS_VRE_STOR(gen)
+ inputs["VS_nonLDS"] = setdiff(STOR, inputs["VS_LDS"])
# Symmetric and asymmetric storage resources
- inputs["VS_ASYM"] = union(inputs["VS_ASYM_DC_CHARGE"], inputs["VS_ASYM_DC_DISCHARGE"],
- inputs["VS_ASYM_AC_DISCHARGE"], inputs["VS_ASYM_AC_CHARGE"])
- inputs["VS_SYM_DC"] = intersect(inputs["VS_SYM_DC_CHARGE"], inputs["VS_SYM_DC_DISCHARGE"])
- inputs["VS_SYM_AC"] = intersect(inputs["VS_SYM_AC_CHARGE"], inputs["VS_SYM_AC_DISCHARGE"])
+ inputs["VS_ASYM"] = union(inputs["VS_ASYM_DC_CHARGE"], inputs["VS_ASYM_DC_DISCHARGE"],
+ inputs["VS_ASYM_AC_DISCHARGE"], inputs["VS_ASYM_AC_CHARGE"])
+ inputs["VS_SYM_DC"] = intersect(inputs["VS_SYM_DC_CHARGE"],
+ inputs["VS_SYM_DC_DISCHARGE"])
+ inputs["VS_SYM_AC"] = intersect(inputs["VS_SYM_AC_CHARGE"],
+ inputs["VS_SYM_AC_DISCHARGE"])
# Send warnings for symmetric/asymmetric resources
- if (!isempty(setdiff(inputs["VS_SYM_DC_DISCHARGE"], inputs["VS_SYM_DC_CHARGE"]))
- || !isempty(setdiff(inputs["VS_SYM_DC_CHARGE"], inputs["VS_SYM_DC_DISCHARGE"]))
- || !isempty(setdiff(inputs["VS_SYM_AC_DISCHARGE"], inputs["VS_SYM_AC_CHARGE"]))
- || !isempty(setdiff(inputs["VS_SYM_AC_CHARGE"], inputs["VS_SYM_AC_DISCHARGE"])))
+ if (!isempty(setdiff(inputs["VS_SYM_DC_DISCHARGE"], inputs["VS_SYM_DC_CHARGE"]))
+ || !isempty(setdiff(inputs["VS_SYM_DC_CHARGE"], inputs["VS_SYM_DC_DISCHARGE"]))
+ || !isempty(setdiff(inputs["VS_SYM_AC_DISCHARGE"], inputs["VS_SYM_AC_CHARGE"]))
+ || !isempty(setdiff(inputs["VS_SYM_AC_CHARGE"], inputs["VS_SYM_AC_DISCHARGE"])))
@warn("Symmetric capacities must both be DC or AC.")
end
- # Send warnings for battery resources discharging
- if !isempty(intersect(inputs["VS_STOR_DC_DISCHARGE"], inputs["VS_STOR_AC_DISCHARGE"]))
- @warn("Both AC and DC discharging functionalities are turned on.")
- end
+ # Send warnings for battery resources discharging
+ if !isempty(intersect(inputs["VS_STOR_DC_DISCHARGE"], inputs["VS_STOR_AC_DISCHARGE"]))
+ @warn("Both AC and DC discharging functionalities are turned on.")
+ end
- # Send warnings for battery resources charging
- if !isempty(intersect(inputs["VS_STOR_DC_CHARGE"], inputs["VS_STOR_AC_CHARGE"]))
- @warn("Both AC and DC charging functionalities are turned on.")
- end
+ # Send warnings for battery resources charging
+ if !isempty(intersect(inputs["VS_STOR_DC_CHARGE"], inputs["VS_STOR_AC_CHARGE"]))
+ @warn("Both AC and DC charging functionalities are turned on.")
+ end
end
"""
@@ -926,7 +944,7 @@ Updates the retrofit_id of a resource that can be retrofit or is a retrofit opti
# Arguments
- `r::AbstractResource`: The resource to update.
"""
-function update_retrofit_id(r::AbstractResource)
+function update_retrofit_id(r::AbstractResource)
if haskey(r, :retrofit_id) && (can_retrofit(r) == true || is_retrofit_option(r) == true)
r.retrofit_id = string(r.retrofit_id, "_", region(r))
else
@@ -946,21 +964,25 @@ Adds resources to the `inputs` `Dict` with the key "RESOURCES" together with sev
- `gen (Vector{<:AbstractResource})`: Array of GenX resources.
"""
-function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::AbstractString, gen::Vector{<:AbstractResource})
-
+function add_resources_to_input_data!(inputs::Dict,
+ setup::Dict,
+ case_path::AbstractString,
+ gen::Vector{<:AbstractResource})
+
# Number of resources
G = length(gen)
inputs["G"] = G
# Number of time steps (periods)
T = inputs["T"]
-
+
## HYDRO
# Set of all reservoir hydro resources
inputs["HYDRO_RES"] = hydro(gen)
# Set of hydro resources modeled with known reservoir energy capacity
if !isempty(inputs["HYDRO_RES"])
- inputs["HYDRO_RES_KNOWN_CAP"] = intersect(inputs["HYDRO_RES"], ids_with_positive(gen, hydro_energy_to_power_ratio))
+ inputs["HYDRO_RES_KNOWN_CAP"] = intersect(inputs["HYDRO_RES"],
+ ids_with_positive(gen, hydro_energy_to_power_ratio))
end
## STORAGE
@@ -969,12 +991,12 @@ function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::Abst
# Set of storage resources with asymmetric (separte) charge/discharge capacity components
inputs["STOR_ASYMMETRIC"] = asymmetric_storage(gen)
# Set of all storage resources
- inputs["STOR_ALL"] = union(inputs["STOR_SYMMETRIC"],inputs["STOR_ASYMMETRIC"])
+ inputs["STOR_ALL"] = union(inputs["STOR_SYMMETRIC"], inputs["STOR_ASYMMETRIC"])
# Set of storage resources with long duration storage capabilitites
inputs["STOR_HYDRO_LONG_DURATION"] = intersect(inputs["HYDRO_RES"], is_LDS(gen))
inputs["STOR_HYDRO_SHORT_DURATION"] = intersect(inputs["HYDRO_RES"], is_SDS(gen))
- inputs["STOR_LONG_DURATION"] = intersect(inputs["STOR_ALL"], is_LDS(gen))
+ inputs["STOR_LONG_DURATION"] = intersect(inputs["STOR_ALL"], is_LDS(gen))
inputs["STOR_SHORT_DURATION"] = intersect(inputs["STOR_ALL"], is_SDS(gen))
## VRE
@@ -1011,10 +1033,10 @@ function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::Abst
# Set of thermal resources without unit commitment
inputs["THERM_NO_COMMIT"] = no_unit_commitment(gen)
# Start-up cost is sum of fixed cost per start startup
- inputs["C_Start"] = zeros(Float64, G, T)
+ inputs["C_Start"] = zeros(Float64, G, T)
for g in inputs["THERM_COMMIT"]
start_up_cost = start_cost_per_mw(gen[g]) * cap_size(gen[g])
- inputs["C_Start"][g,:] .= start_up_cost
+ inputs["C_Start"][g, :] .= start_up_cost
end
# Piecewise fuel usage option
process_piecewisefuelusage!(setup, gen, inputs)
@@ -1027,27 +1049,28 @@ function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::Abst
# For now, the only resources eligible for UC are themal resources
inputs["COMMIT"] = inputs["THERM_COMMIT"]
- # Set of CCS resources (optional set):
+ # Set of CCS resources (optional set):
inputs["CCS"] = ids_with_positive(gen, co2_capture_fraction)
# Single-fuel resources
- inputs["SINGLE_FUEL"] = ids_with_singlefuel(gen)
- # Multi-fuel resources
- inputs["MULTI_FUELS"] = ids_with_multifuels(gen)
- if !isempty(inputs["MULTI_FUELS"]) # If there are any resources using multi fuels, read relevant data
- load_multi_fuels_data!(inputs, gen, setup, case_path)
- end
+ inputs["SINGLE_FUEL"] = ids_with_singlefuel(gen)
+ # Multi-fuel resources
+ inputs["MULTI_FUELS"] = ids_with_multifuels(gen)
+ if !isempty(inputs["MULTI_FUELS"]) # If there are any resources using multi fuels, read relevant data
+ load_multi_fuels_data!(inputs, gen, setup, case_path)
+ end
buildable = is_buildable(gen)
retirable = is_retirable(gen)
units_can_retrofit = ids_can_retrofit(gen)
-
+
# Set of all resources eligible for new capacity
inputs["NEW_CAP"] = intersect(buildable, ids_with(gen, max_cap_mw))
# Set of all resources eligible for capacity retirements
inputs["RET_CAP"] = intersect(retirable, ids_with_nonneg(gen, existing_cap_mw))
# Set of all resources eligible for capacity retrofitting (by Yifu, same with retirement)
- inputs["RETROFIT_CAP"] = intersect(units_can_retrofit, ids_with_nonneg(gen, existing_cap_mw))
+ inputs["RETROFIT_CAP"] = intersect(units_can_retrofit,
+ ids_with_nonneg(gen, existing_cap_mw))
inputs["RETROFIT_OPTIONS"] = ids_retrofit_options(gen)
# Retrofit
@@ -1060,14 +1083,15 @@ function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::Abst
# in the same cluster either all have Contribute_Min_Retirement set to 1 or none of them do
if setup["MultiStage"] == 1
for retrofit_res in inputs["RETROFIT_CAP"]
- if !has_all_options_contributing(gen[retrofit_res], gen) && !has_all_options_not_contributing(gen[retrofit_res], gen)
+ if !has_all_options_contributing(gen[retrofit_res], gen) &&
+ !has_all_options_not_contributing(gen[retrofit_res], gen)
msg = "Retrofit options in the same cluster either all have Contribute_Min_Retirement set to 1 or none of them do. \n" *
- "Check column Contribute_Min_Retirement in the \"Resource_multistage_data.csv\" file for resource $(resource_name(gen[retrofit_res]))."
+ "Check column Contribute_Min_Retirement in the \"Resource_multistage_data.csv\" file for resource $(resource_name(gen[retrofit_res]))."
@error msg
error("Invalid input detected for Contribute_Min_Retirement.")
-
end
- if has_all_options_not_contributing(gen[retrofit_res], gen) && setup["MultiStageSettingsDict"]["Myopic"]==1
+ if has_all_options_not_contributing(gen[retrofit_res], gen) &&
+ setup["MultiStageSettingsDict"]["Myopic"] == 1
@error "When performing myopic multistage expansion all retrofit options need to have Contribute_Min_Retirement set to 1 to avoid model infeasibilities."
error("Invalid input detected for Contribute_Min_Retirement.")
end
@@ -1079,34 +1103,44 @@ function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::Abst
ret_cap_energy = Set{Int64}()
if !isempty(inputs["STOR_ALL"])
# Set of all storage resources eligible for new energy capacity
- new_cap_energy = intersect(buildable, ids_with(gen, max_cap_mwh), inputs["STOR_ALL"])
+ new_cap_energy = intersect(buildable,
+ ids_with(gen, max_cap_mwh),
+ inputs["STOR_ALL"])
# Set of all storage resources eligible for energy capacity retirements
- ret_cap_energy = intersect(retirable, ids_with_nonneg(gen, existing_cap_mwh), inputs["STOR_ALL"])
+ ret_cap_energy = intersect(retirable,
+ ids_with_nonneg(gen, existing_cap_mwh),
+ inputs["STOR_ALL"])
end
inputs["NEW_CAP_ENERGY"] = new_cap_energy
inputs["RET_CAP_ENERGY"] = ret_cap_energy
- new_cap_charge = Set{Int64}()
- ret_cap_charge = Set{Int64}()
- if !isempty(inputs["STOR_ASYMMETRIC"])
- # Set of asymmetric charge/discharge storage resources eligible for new charge capacity
- new_cap_charge = intersect(buildable, ids_with(gen, max_charge_cap_mw), inputs["STOR_ASYMMETRIC"])
- # Set of asymmetric charge/discharge storage resources eligible for charge capacity retirements
- ret_cap_charge = intersect(retirable, ids_with_nonneg(gen, existing_charge_cap_mw), inputs["STOR_ASYMMETRIC"])
- end
- inputs["NEW_CAP_CHARGE"] = new_cap_charge
- inputs["RET_CAP_CHARGE"] = ret_cap_charge
+ new_cap_charge = Set{Int64}()
+ ret_cap_charge = Set{Int64}()
+ if !isempty(inputs["STOR_ASYMMETRIC"])
+ # Set of asymmetric charge/discharge storage resources eligible for new charge capacity
+ new_cap_charge = intersect(buildable,
+ ids_with(gen, max_charge_cap_mw),
+ inputs["STOR_ASYMMETRIC"])
+ # Set of asymmetric charge/discharge storage resources eligible for charge capacity retirements
+ ret_cap_charge = intersect(retirable,
+ ids_with_nonneg(gen, existing_charge_cap_mw),
+ inputs["STOR_ASYMMETRIC"])
+ end
+ inputs["NEW_CAP_CHARGE"] = new_cap_charge
+ inputs["RET_CAP_CHARGE"] = ret_cap_charge
## Co-located resources
# VRE and storage
inputs["VRE_STOR"] = vre_stor(gen)
# Check if VRE-STOR resources exist
- if !isempty(inputs["VRE_STOR"])
+ if !isempty(inputs["VRE_STOR"])
# Solar PV Resources
inputs["VS_SOLAR"] = solar(gen)
# DC Resources
- inputs["VS_DC"] = union(storage_dc_discharge(gen), storage_dc_charge(gen), solar(gen))
+ inputs["VS_DC"] = union(storage_dc_discharge(gen),
+ storage_dc_charge(gen),
+ solar(gen))
# Wind Resources
inputs["VS_WIND"] = wind(gen)
@@ -1116,39 +1150,71 @@ function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::Abst
gen_VRE_STOR = gen.VreStorage
# Set of all VRE-STOR resources eligible for new solar capacity
- inputs["NEW_CAP_SOLAR"] = intersect(buildable, solar(gen), ids_with(gen_VRE_STOR, max_cap_solar_mw))
+ inputs["NEW_CAP_SOLAR"] = intersect(buildable,
+ solar(gen),
+ ids_with(gen_VRE_STOR, max_cap_solar_mw))
# Set of all VRE_STOR resources eligible for solar capacity retirements
- inputs["RET_CAP_SOLAR"] = intersect(retirable, solar(gen), ids_with_nonneg(gen_VRE_STOR, existing_cap_solar_mw))
+ inputs["RET_CAP_SOLAR"] = intersect(retirable,
+ solar(gen),
+ ids_with_nonneg(gen_VRE_STOR, existing_cap_solar_mw))
# Set of all VRE-STOR resources eligible for new wind capacity
- inputs["NEW_CAP_WIND"] = intersect(buildable, wind(gen), ids_with(gen_VRE_STOR, max_cap_wind_mw))
+ inputs["NEW_CAP_WIND"] = intersect(buildable,
+ wind(gen),
+ ids_with(gen_VRE_STOR, max_cap_wind_mw))
# Set of all VRE_STOR resources eligible for wind capacity retirements
- inputs["RET_CAP_WIND"] = intersect(retirable, wind(gen), ids_with_nonneg(gen_VRE_STOR, existing_cap_wind_mw))
+ inputs["RET_CAP_WIND"] = intersect(retirable,
+ wind(gen),
+ ids_with_nonneg(gen_VRE_STOR, existing_cap_wind_mw))
# Set of all VRE-STOR resources eligible for new inverter capacity
- inputs["NEW_CAP_DC"] = intersect(buildable, ids_with(gen_VRE_STOR, max_cap_inverter_mw), inputs["VS_DC"])
+ inputs["NEW_CAP_DC"] = intersect(buildable,
+ ids_with(gen_VRE_STOR, max_cap_inverter_mw),
+ inputs["VS_DC"])
# Set of all VRE_STOR resources eligible for inverter capacity retirements
- inputs["RET_CAP_DC"] = intersect(retirable, ids_with_nonneg(gen_VRE_STOR, existing_cap_inverter_mw), inputs["VS_DC"])
+ inputs["RET_CAP_DC"] = intersect(retirable,
+ ids_with_nonneg(gen_VRE_STOR, existing_cap_inverter_mw),
+ inputs["VS_DC"])
# Set of all storage resources eligible for new energy capacity
- inputs["NEW_CAP_STOR"] = intersect(buildable, ids_with(gen_VRE_STOR, max_cap_mwh), inputs["VS_STOR"])
+ inputs["NEW_CAP_STOR"] = intersect(buildable,
+ ids_with(gen_VRE_STOR, max_cap_mwh),
+ inputs["VS_STOR"])
# Set of all storage resources eligible for energy capacity retirements
- inputs["RET_CAP_STOR"] = intersect(retirable, ids_with_nonneg(gen_VRE_STOR, existing_cap_mwh), inputs["VS_STOR"])
+ inputs["RET_CAP_STOR"] = intersect(retirable,
+ ids_with_nonneg(gen_VRE_STOR, existing_cap_mwh),
+ inputs["VS_STOR"])
if !isempty(inputs["VS_ASYM"])
# Set of asymmetric charge DC storage resources eligible for new charge capacity
- inputs["NEW_CAP_CHARGE_DC"] = intersect(buildable, ids_with(gen_VRE_STOR, max_cap_charge_dc_mw), inputs["VS_ASYM_DC_CHARGE"])
+ inputs["NEW_CAP_CHARGE_DC"] = intersect(buildable,
+ ids_with(gen_VRE_STOR, max_cap_charge_dc_mw),
+ inputs["VS_ASYM_DC_CHARGE"])
# Set of asymmetric charge DC storage resources eligible for charge capacity retirements
- inputs["RET_CAP_CHARGE_DC"] = intersect(retirable, ids_with_nonneg(gen_VRE_STOR, existing_cap_charge_dc_mw), inputs["VS_ASYM_DC_CHARGE"])
+ inputs["RET_CAP_CHARGE_DC"] = intersect(retirable,
+ ids_with_nonneg(gen_VRE_STOR, existing_cap_charge_dc_mw),
+ inputs["VS_ASYM_DC_CHARGE"])
# Set of asymmetric discharge DC storage resources eligible for new discharge capacity
- inputs["NEW_CAP_DISCHARGE_DC"] = intersect(buildable, ids_with(gen_VRE_STOR, max_cap_discharge_dc_mw), inputs["VS_ASYM_DC_DISCHARGE"])
+ inputs["NEW_CAP_DISCHARGE_DC"] = intersect(buildable,
+ ids_with(gen_VRE_STOR, max_cap_discharge_dc_mw),
+ inputs["VS_ASYM_DC_DISCHARGE"])
# Set of asymmetric discharge DC storage resources eligible for discharge capacity retirements
- inputs["RET_CAP_DISCHARGE_DC"] = intersect(retirable, ids_with_nonneg(gen_VRE_STOR, existing_cap_discharge_dc_mw), inputs["VS_ASYM_DC_DISCHARGE"])
+ inputs["RET_CAP_DISCHARGE_DC"] = intersect(retirable,
+ ids_with_nonneg(gen_VRE_STOR, existing_cap_discharge_dc_mw),
+ inputs["VS_ASYM_DC_DISCHARGE"])
# Set of asymmetric charge AC storage resources eligible for new charge capacity
- inputs["NEW_CAP_CHARGE_AC"] = intersect(buildable, ids_with(gen_VRE_STOR, max_cap_charge_ac_mw), inputs["VS_ASYM_AC_CHARGE"])
+ inputs["NEW_CAP_CHARGE_AC"] = intersect(buildable,
+ ids_with(gen_VRE_STOR, max_cap_charge_ac_mw),
+ inputs["VS_ASYM_AC_CHARGE"])
# Set of asymmetric charge AC storage resources eligible for charge capacity retirements
- inputs["RET_CAP_CHARGE_AC"] = intersect(retirable, ids_with_nonneg(gen_VRE_STOR, existing_cap_charge_ac_mw), inputs["VS_ASYM_AC_CHARGE"])
+ inputs["RET_CAP_CHARGE_AC"] = intersect(retirable,
+ ids_with_nonneg(gen_VRE_STOR, existing_cap_charge_ac_mw),
+ inputs["VS_ASYM_AC_CHARGE"])
# Set of asymmetric discharge AC storage resources eligible for new discharge capacity
- inputs["NEW_CAP_DISCHARGE_AC"] = intersect(buildable, ids_with(gen_VRE_STOR, max_cap_discharge_ac_mw), inputs["VS_ASYM_AC_DISCHARGE"])
+ inputs["NEW_CAP_DISCHARGE_AC"] = intersect(buildable,
+ ids_with(gen_VRE_STOR, max_cap_discharge_ac_mw),
+ inputs["VS_ASYM_AC_DISCHARGE"])
# Set of asymmetric discharge AC storage resources eligible for discharge capacity retirements
- inputs["RET_CAP_DISCHARGE_AC"] = intersect(retirable, ids_with_nonneg(gen_VRE_STOR, existing_cap_discharge_ac_mw), inputs["VS_ASYM_AC_DISCHARGE"])
- end
+ inputs["RET_CAP_DISCHARGE_AC"] = intersect(retirable,
+ ids_with_nonneg(gen_VRE_STOR, existing_cap_discharge_ac_mw),
+ inputs["VS_ASYM_AC_DISCHARGE"])
+ end
# Names for systemwide resources
inputs["RESOURCE_NAMES_VRE_STOR"] = resource_name(gen_VRE_STOR)
@@ -1174,7 +1240,7 @@ function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::Abst
# Zones resources are located in
zones = zone_id(gen)
-
+
# Resource identifiers by zone (just zones in resource order + resource and zone concatenated)
inputs["R_ZONES"] = zones
inputs["RESOURCE_ZONES"] = inputs["RESOURCE_NAMES"] .* "_z" .* string.(zones)
@@ -1185,7 +1251,7 @@ function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::Abst
inputs["HAS_FUEL"] = union(inputs["HAS_FUEL"], inputs["MULTI_FUELS"])
sort!(inputs["HAS_FUEL"])
end
-
+
inputs["RESOURCES"] = gen
return nothing
end
@@ -1205,10 +1271,11 @@ function summary(rs::Vector{<:AbstractResource})
println(repeat("-", line_width))
println("\tResource type \t\tNumber of resources")
println(repeat("=", line_width))
- for r_type ∈ resource_types
+ for r_type in resource_types
num_rs = length(rs[nameof.(typeof.(rs)) .== r_type])
if num_rs > 0
- r_type ∉ keys(rs_summary_names) && error("Resource type $r_type not found in summary map. Please add it to the map.")
+ r_type ∉ keys(rs_summary_names) &&
+ error("Resource type $r_type not found in summary map. Please add it to the map.")
println("\t", rs_summary_names[r_type], "\t\t", num_rs)
end
end
@@ -1232,11 +1299,14 @@ This function loads resources data from the resources_path folder and create the
Raises:
DeprecationWarning: If the `Generators_data.csv` file is found, a deprecation warning is issued, together with an error message.
"""
-function load_resources_data!(inputs::Dict, setup::Dict, case_path::AbstractString, resources_path::AbstractString)
+function load_resources_data!(inputs::Dict,
+ setup::Dict,
+ case_path::AbstractString,
+ resources_path::AbstractString)
if isfile(joinpath(case_path, "Generators_data.csv"))
msg = "The `Generators_data.csv` file was deprecated in release v0.4. " *
- "Please use the new interface for generators creation, and see the documentation for additional details."
- Base.depwarn(msg, :load_resources_data!, force=true)
+ "Please use the new interface for generators creation, and see the documentation for additional details."
+ Base.depwarn(msg, :load_resources_data!, force = true)
error("Exiting GenX...")
end
# create vector of resources from dataframes
@@ -1249,7 +1319,7 @@ function load_resources_data!(inputs::Dict, setup::Dict, case_path::AbstractStri
# read module files add module-related attributes to resource dataframe
add_modules_to_resources!(resources, setup, resources_path)
-
+
# add resources information to inputs dict
add_resources_to_input_data!(inputs, setup, case_path, resources)
@@ -1264,36 +1334,38 @@ end
Function for reading input parameters related to multi fuels
"""
-function load_multi_fuels_data!(inputs::Dict, gen::Vector{<:AbstractResource}, setup::Dict, path::AbstractString)
-
- inputs["NUM_FUELS"] = num_fuels.(gen) # Number of fuels that this resource can use
- max_fuels = maximum(inputs["NUM_FUELS"])
- inputs["FUEL_COLS"] = [ Symbol(string("Fuel",f)) for f in 1:max_fuels ]
- fuel_types = [fuel_cols.(gen, tag=f) for f in 1:max_fuels]
- heat_rates = [heat_rate_cols.(gen, tag=f) for f in 1:max_fuels]
- max_cofire = [max_cofire_cols.(gen, tag=f) for f in 1:max_fuels]
- min_cofire = [min_cofire_cols.(gen, tag=f) for f in 1:max_fuels]
- max_cofire_start = [max_cofire_start_cols.(gen, tag=f) for f in 1:max_fuels]
- min_cofire_start = [min_cofire_start_cols.(gen, tag=f) for f in 1:max_fuels]
- inputs["HEAT_RATES"] = heat_rates
- inputs["MAX_COFIRE"] = max_cofire
- inputs["MIN_COFIRE"] = min_cofire
- inputs["MAX_COFIRE_START"] = max_cofire_start
- inputs["MIN_COFIRE_START"] = min_cofire_start
- inputs["FUEL_TYPES"] = fuel_types
- inputs["MAX_NUM_FUELS"] = max_fuels
+function load_multi_fuels_data!(inputs::Dict,
+ gen::Vector{<:AbstractResource},
+ setup::Dict,
+ path::AbstractString)
+ inputs["NUM_FUELS"] = num_fuels.(gen) # Number of fuels that this resource can use
+ max_fuels = maximum(inputs["NUM_FUELS"])
+ inputs["FUEL_COLS"] = [Symbol(string("Fuel", f)) for f in 1:max_fuels]
+ fuel_types = [fuel_cols.(gen, tag = f) for f in 1:max_fuels]
+ heat_rates = [heat_rate_cols.(gen, tag = f) for f in 1:max_fuels]
+ max_cofire = [max_cofire_cols.(gen, tag = f) for f in 1:max_fuels]
+ min_cofire = [min_cofire_cols.(gen, tag = f) for f in 1:max_fuels]
+ max_cofire_start = [max_cofire_start_cols.(gen, tag = f) for f in 1:max_fuels]
+ min_cofire_start = [min_cofire_start_cols.(gen, tag = f) for f in 1:max_fuels]
+ inputs["HEAT_RATES"] = heat_rates
+ inputs["MAX_COFIRE"] = max_cofire
+ inputs["MIN_COFIRE"] = min_cofire
+ inputs["MAX_COFIRE_START"] = max_cofire_start
+ inputs["MIN_COFIRE_START"] = min_cofire_start
+ inputs["FUEL_TYPES"] = fuel_types
+ inputs["MAX_NUM_FUELS"] = max_fuels
inputs["MAX_NUM_FUELS"] = max_fuels
- # check whether non-zero heat rates are used for resources that only use a single fuel
- for f in 1:max_fuels
- for hr in heat_rates[f][inputs["SINGLE_FUEL"]]
- if hr > 0
- error("Heat rates for multi fuels must be zero when only one fuel is used")
- end
- end
- end
- # do not allow the multi-fuel option when piece-wise heat rates are used
+ # check whether non-zero heat rates are used for resources that only use a single fuel
+ for f in 1:max_fuels
+ for hr in heat_rates[f][inputs["SINGLE_FUEL"]]
+ if hr > 0
+ error("Heat rates for multi fuels must be zero when only one fuel is used")
+ end
+ end
+ end
+ # do not allow the multi-fuel option when piece-wise heat rates are used
if haskey(inputs, "THERM_COMMIT_PWFU") && !isempty(inputs["THERM_COMMIT_PWFU"])
- error("Multi-fuel option is not available when piece-wise heat rates are used. Please remove multi fuels to avoid this error.")
- end
+ error("Multi-fuel option is not available when piece-wise heat rates are used. Please remove multi fuels to avoid this error.")
+ end
end
diff --git a/src/load_inputs/load_vre_stor_variability.jl b/src/load_inputs/load_vre_stor_variability.jl
index 591d2d9876..188780c6ec 100644
--- a/src/load_inputs/load_vre_stor_variability.jl
+++ b/src/load_inputs/load_vre_stor_variability.jl
@@ -7,39 +7,41 @@ Read input parameters related to hourly maximum capacity factors for the solar P
"""
function load_vre_stor_variability!(setup::Dict, path::AbstractString, inputs::Dict)
- # Hourly capacity factors
+ # Hourly capacity factors
TDR_directory = joinpath(path, setup["TimeDomainReductionFolder"])
# if TDR is used, my_dir = TDR_directory, else my_dir = "system"
my_dir = get_systemfiles_path(setup, TDR_directory, path)
-
- filename1 = "Vre_and_stor_solar_variability.csv"
- vre_stor_solar = load_dataframe(joinpath(my_dir, filename1))
- filename2 = "Vre_and_stor_wind_variability.csv"
- vre_stor_wind = load_dataframe(joinpath(my_dir, filename2))
+ filename1 = "Vre_and_stor_solar_variability.csv"
+ vre_stor_solar = load_dataframe(joinpath(my_dir, filename1))
- all_resources = inputs["RESOURCE_NAMES"]
+ filename2 = "Vre_and_stor_wind_variability.csv"
+ vre_stor_wind = load_dataframe(joinpath(my_dir, filename2))
- function ensure_column_zeros!(vre_stor_df, all_resources)
- existing_variability = names(vre_stor_df)
- for r in all_resources
- if r ∉ existing_variability
- ensure_column!(vre_stor_df, r, 0.0)
- end
- end
- end
+ all_resources = inputs["RESOURCE_NAMES"]
- ensure_column_zeros!(vre_stor_solar, all_resources)
- ensure_column_zeros!(vre_stor_wind, all_resources)
+ function ensure_column_zeros!(vre_stor_df, all_resources)
+ existing_variability = names(vre_stor_df)
+ for r in all_resources
+ if r ∉ existing_variability
+ ensure_column!(vre_stor_df, r, 0.0)
+ end
+ end
+ end
- # Reorder DataFrame to R_ID order (order provided in Vre_and_stor_data.csv)
- select!(vre_stor_solar, [:Time_Index; Symbol.(all_resources) ])
- select!(vre_stor_wind, [:Time_Index; Symbol.(all_resources) ])
+ ensure_column_zeros!(vre_stor_solar, all_resources)
+ ensure_column_zeros!(vre_stor_wind, all_resources)
- # Maximum power output and variability of each energy resource
- inputs["pP_Max_Solar"] = transpose(Matrix{Float64}(vre_stor_solar[1:inputs["T"],2:(inputs["G"]+1)]))
- inputs["pP_Max_Wind"] = transpose(Matrix{Float64}(vre_stor_wind[1:inputs["T"],2:(inputs["G"]+1)]))
+ # Reorder DataFrame to R_ID order (order provided in Vre_and_stor_data.csv)
+ select!(vre_stor_solar, [:Time_Index; Symbol.(all_resources)])
+ select!(vre_stor_wind, [:Time_Index; Symbol.(all_resources)])
- println(filename1 * " Successfully Read!")
- println(filename2 * " Successfully Read!")
+ # Maximum power output and variability of each energy resource
+ inputs["pP_Max_Solar"] = transpose(Matrix{Float64}(vre_stor_solar[1:inputs["T"],
+ 2:(inputs["G"] + 1)]))
+ inputs["pP_Max_Wind"] = transpose(Matrix{Float64}(vre_stor_wind[1:inputs["T"],
+ 2:(inputs["G"] + 1)]))
+
+ println(filename1 * " Successfully Read!")
+ println(filename2 * " Successfully Read!")
end
diff --git a/src/model/core/co2.jl b/src/model/core/co2.jl
index 0b4f861bba..7b03d7920a 100644
--- a/src/model/core/co2.jl
+++ b/src/model/core/co2.jl
@@ -51,7 +51,6 @@ eEmissionsCaptureByPlant_{g,t} = CO2\_Capture\_Fraction_y * vFuel_{y,t} * CO2_{
"""
function co2!(EP::Model, inputs::Dict)
-
println("CO2 Module")
gen = inputs["RESOURCES"]
@@ -66,65 +65,76 @@ function co2!(EP::Model, inputs::Dict)
omega = inputs["omega"]
if !isempty(MULTI_FUELS)
max_fuels = inputs["MAX_NUM_FUELS"]
- end
+ end
### Expressions ###
# CO2 emissions from power plants in "Generators_data.csv"
# If all the CO2 capture fractions from Generators_data are zeros, the CO2 emissions from thermal generators are determined by fuel consumption times CO2 content per MMBTU
if isempty(CCS)
- @expression(EP, eEmissionsByPlant[y=1:G, t=1:T],
+ @expression(EP, eEmissionsByPlant[y = 1:G, t = 1:T],
if y in SINGLE_FUEL
- ((1-biomass(gen[y])) *(EP[:vFuel][y, t] + EP[:vStartFuel][y, t]) * fuel_CO2[fuel(gen[y])])
+ ((1 - biomass(gen[y])) * (EP[:vFuel][y, t] + EP[:vStartFuel][y, t]) *
+ fuel_CO2[fuel(gen[y])])
else
- sum(((1-biomass(gen[y])) *(EP[:vMulFuels][y, i, t] + EP[:vMulStartFuels][y, i, t]) * fuel_CO2[fuel_cols(gen[y], tag=i)]) for i = 1:max_fuels)
- end)
- else
+ sum(((1 - biomass(gen[y])) *
+ (EP[:vMulFuels][y, i, t] + EP[:vMulStartFuels][y, i, t]) *
+ fuel_CO2[fuel_cols(gen[y], tag = i)]) for i in 1:max_fuels)
+ end)
+ else
@info "Using the CO2 module to determine the CO2 emissions of CCS-equipped plants"
# CO2_Capture_Fraction refers to the CO2 capture rate of CCS equiped power plants at a steady state
# CO2_Capture_Fraction_Startup refers to the CO2 capture rate of CCS equiped power plants during startup events
- @expression(EP, eEmissionsByPlant[y=1:G, t=1:T],
+ @expression(EP, eEmissionsByPlant[y = 1:G, t = 1:T],
if y in SINGLE_FUEL
- (1-biomass(gen[y]) - co2_capture_fraction(gen[y])) * EP[:vFuel][y, t] * fuel_CO2[fuel(gen[y])]+
- (1-biomass(gen[y]) - co2_capture_fraction_startup(gen[y])) * EP[:eStartFuel][y, t] * fuel_CO2[fuel(gen[y])]
+ (1 - biomass(gen[y]) - co2_capture_fraction(gen[y])) * EP[:vFuel][y, t] *
+ fuel_CO2[fuel(gen[y])] +
+ (1 - biomass(gen[y]) - co2_capture_fraction_startup(gen[y])) *
+ EP[:eStartFuel][y, t] * fuel_CO2[fuel(gen[y])]
else
- sum((1-biomass(gen[y]) - co2_capture_fraction(gen[y])) * EP[:vMulFuels][y, i, t] * fuel_CO2[fuel_cols(gen[y], tag=i)] for i = 1:max_fuels)+
- sum((1-biomass(gen[y]) - co2_capture_fraction_startup(gen[y])) * EP[:vMulStartFuels][y, i, t] * fuel_CO2[fuel_cols(gen[y], tag=i)] for i = 1:max_fuels)
+ sum((1 - biomass(gen[y]) - co2_capture_fraction(gen[y])) *
+ EP[:vMulFuels][y, i, t] * fuel_CO2[fuel_cols(gen[y], tag = i)]
+ for i in 1:max_fuels) +
+ sum((1 - biomass(gen[y]) - co2_capture_fraction_startup(gen[y])) *
+ EP[:vMulStartFuels][y, i, t] * fuel_CO2[fuel_cols(gen[y], tag = i)]
+ for i in 1:max_fuels)
end)
# CO2 captured from power plants in "Generators_data.csv"
- @expression(EP, eEmissionsCaptureByPlant[y in CCS, t=1:T],
+ @expression(EP, eEmissionsCaptureByPlant[y in CCS, t = 1:T],
if y in SINGLE_FUEL
- co2_capture_fraction(gen[y]) * EP[:vFuel][y, t] * fuel_CO2[fuel(gen[y])]+
- co2_capture_fraction_startup(gen[y]) * EP[:eStartFuel][y, t] * fuel_CO2[fuel(gen[y])]
+ co2_capture_fraction(gen[y]) * EP[:vFuel][y, t] * fuel_CO2[fuel(gen[y])] +
+ co2_capture_fraction_startup(gen[y]) * EP[:eStartFuel][y, t] *
+ fuel_CO2[fuel(gen[y])]
else
- sum(co2_capture_fraction(gen[y]) * EP[:vMulFuels][y, i, t] * fuel_CO2[fuel_cols(gen[y], tag=i)] for i = 1:max_fuels)+
- sum(co2_capture_fraction_startup(gen[y]) * EP[:vMulStartFuels][y, i, t] * fuel_CO2[fuel_cols(gen[y], tag=i)] for i = 1:max_fuels)
+ sum(co2_capture_fraction(gen[y]) * EP[:vMulFuels][y, i, t] *
+ fuel_CO2[fuel_cols(gen[y], tag = i)] for i in 1:max_fuels) +
+ sum(co2_capture_fraction_startup(gen[y]) * EP[:vMulStartFuels][y, i, t] *
+ fuel_CO2[fuel_cols(gen[y], tag = i)] for i in 1:max_fuels)
end)
- @expression(EP, eEmissionsCaptureByPlantYear[y in CCS],
- sum(omega[t] * eEmissionsCaptureByPlant[y, t]
- for t in 1:T))
+ @expression(EP, eEmissionsCaptureByPlantYear[y in CCS],
+ sum(omega[t] * eEmissionsCaptureByPlant[y, t]
+ for t in 1:T))
# add CO2 sequestration cost to objective function
# when scale factor is on tCO2/MWh = > kt CO2/GWh
- @expression(EP, ePlantCCO2Sequestration[y in CCS],
- sum(omega[t] * eEmissionsCaptureByPlant[y, t] *
+ @expression(EP, ePlantCCO2Sequestration[y in CCS],
+ sum(omega[t] * eEmissionsCaptureByPlant[y, t] *
ccs_disposal_cost_per_metric_ton(gen[y]) for t in 1:T))
-
- @expression(EP, eZonalCCO2Sequestration[z=1:Z],
- sum(ePlantCCO2Sequestration[y]
- for y in intersect(resources_in_zone_by_rid(gen,z), CCS)))
-
- @expression(EP, eTotaleCCO2Sequestration,
+
+ @expression(EP, eZonalCCO2Sequestration[z = 1:Z],
+ sum(ePlantCCO2Sequestration[y]
+ for y in intersect(resources_in_zone_by_rid(gen, z), CCS)))
+
+ @expression(EP, eTotaleCCO2Sequestration,
sum(eZonalCCO2Sequestration[z] for z in 1:Z))
-
+
add_to_expression!(EP[:eObj], EP[:eTotaleCCO2Sequestration])
end
# emissions by zone
- @expression(EP, eEmissionsByZone[z = 1:Z, t = 1:T],
- sum(eEmissionsByPlant[y, t] for y in resources_in_zone_by_rid(gen,z)))
+ @expression(EP, eEmissionsByZone[z = 1:Z, t = 1:T],
+ sum(eEmissionsByPlant[y, t] for y in resources_in_zone_by_rid(gen, z)))
return EP
-
end
diff --git a/src/model/core/discharge/discharge.jl b/src/model/core/discharge/discharge.jl
index 6955bfffb1..17bc2187dd 100644
--- a/src/model/core/discharge/discharge.jl
+++ b/src/model/core/discharge/discharge.jl
@@ -11,40 +11,40 @@ This module additionally defines contributions to the objective function from va
```
"""
function discharge!(EP::Model, inputs::Dict, setup::Dict)
+ println("Discharge Module")
- println("Discharge Module")
+ gen = inputs["RESOURCES"]
- gen = inputs["RESOURCES"]
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps
+ ### Variables ###
- ### Variables ###
+ # Energy injected into the grid by resource "y" at hour "t"
+ @variable(EP, vP[y = 1:G, t = 1:T]>=0)
- # Energy injected into the grid by resource "y" at hour "t"
- @variable(EP, vP[y=1:G,t=1:T] >=0);
+ ### Expressions ###
- ### Expressions ###
+ ## Objective Function Expressions ##
- ## Objective Function Expressions ##
+ # Variable costs of "generation" for resource "y" during hour "t" = variable O&M
+ @expression(EP,
+ eCVar_out[y = 1:G, t = 1:T],
+ (inputs["omega"][t]*(var_om_cost_per_mwh(gen[y]) * vP[y, t])))
+ # Sum individual resource contributions to variable discharging costs to get total variable discharging costs
+ @expression(EP, eTotalCVarOutT[t = 1:T], sum(eCVar_out[y, t] for y in 1:G))
+ @expression(EP, eTotalCVarOut, sum(eTotalCVarOutT[t] for t in 1:T))
- # Variable costs of "generation" for resource "y" during hour "t" = variable O&M
- @expression(EP, eCVar_out[y=1:G,t=1:T], (inputs["omega"][t]*(var_om_cost_per_mwh(gen[y])*vP[y,t])))
- # Sum individual resource contributions to variable discharging costs to get total variable discharging costs
- @expression(EP, eTotalCVarOutT[t=1:T], sum(eCVar_out[y,t] for y in 1:G))
- @expression(EP, eTotalCVarOut, sum(eTotalCVarOutT[t] for t in 1:T))
-
- # Add total variable discharging cost contribution to the objective function
- add_to_expression!(EP[:eObj], eTotalCVarOut)
-
- # ESR Policy
- if setup["EnergyShareRequirement"] >= 1
-
- @expression(EP, eESRDischarge[ESR=1:inputs["nESR"]],
- + sum(inputs["omega"][t] * esr(gen[y],tag=ESR) * EP[:vP][y,t] for y=ids_with_policy(gen, esr, tag=ESR), t=1:T)
- - sum(inputs["dfESR"][z,ESR]*inputs["omega"][t]*inputs["pD"][t,z] for t=1:T, z=findall(x->x>0,inputs["dfESR"][:,ESR]))
- )
- add_similar_to_expression!(EP[:eESR], eESRDischarge)
- end
+ # Add total variable discharging cost contribution to the objective function
+ add_to_expression!(EP[:eObj], eTotalCVarOut)
+ # ESR Policy
+ if setup["EnergyShareRequirement"] >= 1
+ @expression(EP, eESRDischarge[ESR = 1:inputs["nESR"]],
+ +sum(inputs["omega"][t] * esr(gen[y], tag = ESR) * EP[:vP][y, t]
+ for y in ids_with_policy(gen, esr, tag = ESR), t in 1:T)
+ -sum(inputs["dfESR"][z, ESR] * inputs["omega"][t] * inputs["pD"][t, z]
+ for t in 1:T, z in findall(x -> x > 0, inputs["dfESR"][:, ESR])))
+ add_similar_to_expression!(EP[:eESR], eESRDischarge)
+ end
end
diff --git a/src/model/core/discharge/investment_discharge.jl b/src/model/core/discharge/investment_discharge.jl
index 1bd1a5a07e..2db459fcb8 100755
--- a/src/model/core/discharge/investment_discharge.jl
+++ b/src/model/core/discharge/investment_discharge.jl
@@ -33,136 +33,150 @@ In addition, this function adds investment and fixed O\&M related costs related
```
"""
function investment_discharge!(EP::Model, inputs::Dict, setup::Dict)
+ println("Investment Discharge Module")
+ MultiStage = setup["MultiStage"]
- println("Investment Discharge Module")
- MultiStage = setup["MultiStage"]
+ gen = inputs["RESOURCES"]
- gen = inputs["RESOURCES"]
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
-
- NEW_CAP = inputs["NEW_CAP"] # Set of all resources eligible for new capacity
- RET_CAP = inputs["RET_CAP"] # Set of all resources eligible for capacity retirements
- COMMIT = inputs["COMMIT"] # Set of all resources eligible for unit commitment
- RETROFIT_CAP = inputs["RETROFIT_CAP"] # Set of all resources being retrofitted
+ NEW_CAP = inputs["NEW_CAP"] # Set of all resources eligible for new capacity
+ RET_CAP = inputs["RET_CAP"] # Set of all resources eligible for capacity retirements
+ COMMIT = inputs["COMMIT"] # Set of all resources eligible for unit commitment
+ RETROFIT_CAP = inputs["RETROFIT_CAP"] # Set of all resources being retrofitted
- ### Variables ###
+ ### Variables ###
- # Retired capacity of resource "y" from existing capacity
- @variable(EP, vRETCAP[y in RET_CAP] >= 0);
+ # Retired capacity of resource "y" from existing capacity
+ @variable(EP, vRETCAP[y in RET_CAP]>=0)
# New installed capacity of resource "y"
- @variable(EP, vCAP[y in NEW_CAP] >= 0);
-
- if MultiStage == 1
- @variable(EP, vEXISTINGCAP[y=1:G] >= 0);
- end
-
- # Being retrofitted capacity of resource y
- @variable(EP, vRETROFITCAP[y in RETROFIT_CAP] >= 0);
-
-
- ### Expressions ###
-
- if MultiStage == 1
- @expression(EP, eExistingCap[y in 1:G], vEXISTINGCAP[y])
- else
- @expression(EP, eExistingCap[y in 1:G], existing_cap_mw(gen[y]))
- end
-
- @expression(EP, eTotalCap[y in 1:G],
- if y in intersect(NEW_CAP, RET_CAP, RETROFIT_CAP) # Resources eligible for new capacity, retirements and being retrofitted
- if y in COMMIT
- eExistingCap[y] + cap_size(gen[y])*(EP[:vCAP][y] - EP[:vRETCAP][y] - EP[:vRETROFITCAP][y])
- else
- eExistingCap[y] + EP[:vCAP][y] - EP[:vRETCAP][y] - EP[:vRETROFITCAP][y]
- end
- elseif y in intersect(setdiff(RET_CAP, NEW_CAP), setdiff(RET_CAP, RETROFIT_CAP)) # Resources eligible for only capacity retirements
- if y in COMMIT
- eExistingCap[y] - cap_size(gen[y])*EP[:vRETCAP][y]
- else
- eExistingCap[y] - EP[:vRETCAP][y]
- end
- elseif y in setdiff(intersect(RET_CAP, NEW_CAP), RETROFIT_CAP) # Resources eligible for retirement and new capacity
- if y in COMMIT
- eExistingCap[y] + cap_size(gen[y])* (EP[:vCAP][y] - EP[:vRETCAP][y])
- else
- eExistingCap[y] + EP[:vCAP][y] - EP[:vRETCAP][y]
- end
- elseif y in setdiff(intersect(RET_CAP, RETROFIT_CAP), NEW_CAP) # Resources eligible for retirement and retrofitting
- if y in COMMIT
- eExistingCap[y] - cap_size(gen[y]) * (EP[:vRETROFITCAP][y] + EP[:vRETCAP][y])
- else
- eExistingCap[y] - (EP[:vRETROFITCAP][y] + EP[:vRETCAP][y])
- end
- elseif y in intersect(setdiff(NEW_CAP, RET_CAP),setdiff(NEW_CAP, RETROFIT_CAP)) # Resources eligible for only new capacity
- if y in COMMIT
- eExistingCap[y] + cap_size(gen[y])*EP[:vCAP][y]
- else
- eExistingCap[y] + EP[:vCAP][y]
- end
- else # Resources not eligible for new capacity or retirement
- eExistingCap[y] + EP[:vZERO]
- end
-)
-
- ### Need editting ##
- @expression(EP, eCFix[y in 1:G],
- if y in NEW_CAP # Resources eligible for new capacity (Non-Retrofit)
- if y in COMMIT
- inv_cost_per_mwyr(gen[y])*cap_size(gen[y])*vCAP[y] + fixed_om_cost_per_mwyr(gen[y])*eTotalCap[y]
- else
- inv_cost_per_mwyr(gen[y])*vCAP[y] + fixed_om_cost_per_mwyr(gen[y])*eTotalCap[y]
- end
- else
- fixed_om_cost_per_mwyr(gen[y])*eTotalCap[y]
- end
-)
- # Sum individual resource contributions to fixed costs to get total fixed costs
- @expression(EP, eTotalCFix, sum(EP[:eCFix][y] for y in 1:G))
-
- # Add term to objective function expression
- if MultiStage == 1
- # OPEX multiplier scales fixed costs to account for multiple years between two model stages
- # We divide by OPEXMULT since we are going to multiply the entire objective function by this term later,
- # and we have already accounted for multiple years between stages for fixed costs.
- add_to_expression!(EP[:eObj], 1/inputs["OPEXMULT"], eTotalCFix)
- else
- add_to_expression!(EP[:eObj], eTotalCFix)
- end
-
- ### Constratints ###
-
- if MultiStage == 1
- # Existing capacity variable is equal to existing capacity specified in the input file
- @constraint(EP, cExistingCap[y in 1:G], EP[:vEXISTINGCAP][y] == existing_cap_mw(gen[y]))
- end
-
- ## Constraints on retirements and capacity additions
- # Cannot retire more capacity than existing capacity
- @constraint(EP, cMaxRetNoCommit[y in setdiff(RET_CAP,COMMIT)], vRETCAP[y] <= eExistingCap[y])
- @constraint(EP, cMaxRetCommit[y in intersect(RET_CAP,COMMIT)], cap_size(gen[y])*vRETCAP[y] <= eExistingCap[y])
- @constraint(EP, cMaxRetroNoCommit[y in setdiff(RETROFIT_CAP,COMMIT)], vRETROFITCAP[y] + vRETCAP[y] <= eExistingCap[y])
- @constraint(EP, cMaxRetroCommit[y in intersect(RETROFIT_CAP,COMMIT)], cap_size(gen[y]) * (vRETROFITCAP[y] + vRETCAP[y]) <= eExistingCap[y])
-
- ## Constraints on new built capacity
- # Constraint on maximum capacity (if applicable) [set input to -1 if no constraint on maximum capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
- MAX_CAP = ids_with_positive(gen, max_cap_mw)
- @constraint(EP, cMaxCap[y in MAX_CAP], eTotalCap[y] <= max_cap_mw(gen[y]))
-
- # Constraint on minimum capacity (if applicable) [set input to -1 if no constraint on minimum capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is <= Min_Cap_MW and lead to infeasabilty
- MIN_CAP = ids_with_positive(gen, min_cap_mw)
- @constraint(EP, cMinCap[y in MIN_CAP], eTotalCap[y] >= min_cap_mw(gen[y]))
-
- if setup["MinCapReq"] == 1
- @expression(EP, eMinCapResInvest[mincap = 1:inputs["NumberOfMinCapReqs"]], sum(EP[:eTotalCap][y] for y in ids_with_policy(gen, min_cap, tag=mincap)))
- add_similar_to_expression!(EP[:eMinCapRes], eMinCapResInvest)
- end
-
- if setup["MaxCapReq"] == 1
- @expression(EP, eMaxCapResInvest[maxcap = 1:inputs["NumberOfMaxCapReqs"]], sum(EP[:eTotalCap][y] for y in ids_with_policy(gen, max_cap, tag=maxcap)))
- add_similar_to_expression!(EP[:eMaxCapRes], eMaxCapResInvest)
- end
+ @variable(EP, vCAP[y in NEW_CAP]>=0)
+
+ if MultiStage == 1
+ @variable(EP, vEXISTINGCAP[y = 1:G]>=0)
+ end
+
+ # Being retrofitted capacity of resource y
+ @variable(EP, vRETROFITCAP[y in RETROFIT_CAP]>=0)
+
+ ### Expressions ###
+
+ if MultiStage == 1
+ @expression(EP, eExistingCap[y in 1:G], vEXISTINGCAP[y])
+ else
+ @expression(EP, eExistingCap[y in 1:G], existing_cap_mw(gen[y]))
+ end
+
+ @expression(EP, eTotalCap[y in 1:G],
+ if y in intersect(NEW_CAP, RET_CAP, RETROFIT_CAP) # Resources eligible for new capacity, retirements and being retrofitted
+ if y in COMMIT
+ eExistingCap[y] +
+ cap_size(gen[y]) * (EP[:vCAP][y] - EP[:vRETCAP][y] - EP[:vRETROFITCAP][y])
+ else
+ eExistingCap[y] + EP[:vCAP][y] - EP[:vRETCAP][y] - EP[:vRETROFITCAP][y]
+ end
+ elseif y in intersect(setdiff(RET_CAP, NEW_CAP), setdiff(RET_CAP, RETROFIT_CAP)) # Resources eligible for only capacity retirements
+ if y in COMMIT
+ eExistingCap[y] - cap_size(gen[y]) * EP[:vRETCAP][y]
+ else
+ eExistingCap[y] - EP[:vRETCAP][y]
+ end
+ elseif y in setdiff(intersect(RET_CAP, NEW_CAP), RETROFIT_CAP) # Resources eligible for retirement and new capacity
+ if y in COMMIT
+ eExistingCap[y] + cap_size(gen[y]) * (EP[:vCAP][y] - EP[:vRETCAP][y])
+ else
+ eExistingCap[y] + EP[:vCAP][y] - EP[:vRETCAP][y]
+ end
+ elseif y in setdiff(intersect(RET_CAP, RETROFIT_CAP), NEW_CAP) # Resources eligible for retirement and retrofitting
+ if y in COMMIT
+ eExistingCap[y] -
+ cap_size(gen[y]) * (EP[:vRETROFITCAP][y] + EP[:vRETCAP][y])
+ else
+ eExistingCap[y] - (EP[:vRETROFITCAP][y] + EP[:vRETCAP][y])
+ end
+ elseif y in intersect(setdiff(NEW_CAP, RET_CAP), setdiff(NEW_CAP, RETROFIT_CAP)) # Resources eligible for only new capacity
+ if y in COMMIT
+ eExistingCap[y] + cap_size(gen[y]) * EP[:vCAP][y]
+ else
+ eExistingCap[y] + EP[:vCAP][y]
+ end
+ else # Resources not eligible for new capacity or retirement
+ eExistingCap[y] + EP[:vZERO]
+ end)
+
+ ### Need editting ##
+ @expression(EP, eCFix[y in 1:G],
+ if y in NEW_CAP # Resources eligible for new capacity (Non-Retrofit)
+ if y in COMMIT
+ inv_cost_per_mwyr(gen[y]) * cap_size(gen[y]) * vCAP[y] +
+ fixed_om_cost_per_mwyr(gen[y]) * eTotalCap[y]
+ else
+ inv_cost_per_mwyr(gen[y]) * vCAP[y] +
+ fixed_om_cost_per_mwyr(gen[y]) * eTotalCap[y]
+ end
+ else
+ fixed_om_cost_per_mwyr(gen[y]) * eTotalCap[y]
+ end)
+ # Sum individual resource contributions to fixed costs to get total fixed costs
+ @expression(EP, eTotalCFix, sum(EP[:eCFix][y] for y in 1:G))
+
+ # Add term to objective function expression
+ if MultiStage == 1
+ # OPEX multiplier scales fixed costs to account for multiple years between two model stages
+ # We divide by OPEXMULT since we are going to multiply the entire objective function by this term later,
+ # and we have already accounted for multiple years between stages for fixed costs.
+ add_to_expression!(EP[:eObj], 1 / inputs["OPEXMULT"], eTotalCFix)
+ else
+ add_to_expression!(EP[:eObj], eTotalCFix)
+ end
+
+ ### Constratints ###
+
+ if MultiStage == 1
+ # Existing capacity variable is equal to existing capacity specified in the input file
+ @constraint(EP,
+ cExistingCap[y in 1:G],
+ EP[:vEXISTINGCAP][y]==existing_cap_mw(gen[y]))
+ end
+
+ ## Constraints on retirements and capacity additions
+ # Cannot retire more capacity than existing capacity
+ @constraint(EP,
+ cMaxRetNoCommit[y in setdiff(RET_CAP, COMMIT)],
+ vRETCAP[y]<=eExistingCap[y])
+ @constraint(EP,
+ cMaxRetCommit[y in intersect(RET_CAP, COMMIT)],
+ cap_size(gen[y]) * vRETCAP[y]<=eExistingCap[y])
+ @constraint(EP,
+ cMaxRetroNoCommit[y in setdiff(RETROFIT_CAP, COMMIT)],
+ vRETROFITCAP[y] + vRETCAP[y]<=eExistingCap[y])
+ @constraint(EP,
+ cMaxRetroCommit[y in intersect(RETROFIT_CAP, COMMIT)],
+ cap_size(gen[y]) * (vRETROFITCAP[y] + vRETCAP[y])<=eExistingCap[y])
+
+ ## Constraints on new built capacity
+ # Constraint on maximum capacity (if applicable) [set input to -1 if no constraint on maximum capacity]
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
+ MAX_CAP = ids_with_positive(gen, max_cap_mw)
+ @constraint(EP, cMaxCap[y in MAX_CAP], eTotalCap[y]<=max_cap_mw(gen[y]))
+
+ # Constraint on minimum capacity (if applicable) [set input to -1 if no constraint on minimum capacity]
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is <= Min_Cap_MW and lead to infeasabilty
+ MIN_CAP = ids_with_positive(gen, min_cap_mw)
+ @constraint(EP, cMinCap[y in MIN_CAP], eTotalCap[y]>=min_cap_mw(gen[y]))
+
+ if setup["MinCapReq"] == 1
+ @expression(EP,
+ eMinCapResInvest[mincap = 1:inputs["NumberOfMinCapReqs"]],
+ sum(EP[:eTotalCap][y] for y in ids_with_policy(gen, min_cap, tag = mincap)))
+ add_similar_to_expression!(EP[:eMinCapRes], eMinCapResInvest)
+ end
+
+ if setup["MaxCapReq"] == 1
+ @expression(EP,
+ eMaxCapResInvest[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
+ sum(EP[:eTotalCap][y] for y in ids_with_policy(gen, max_cap, tag = maxcap)))
+ add_similar_to_expression!(EP[:eMaxCapRes], eMaxCapResInvest)
+ end
end
diff --git a/src/model/core/fuel.jl b/src/model/core/fuel.jl
index b8a8395ff2..e7665c65a7 100644
--- a/src/model/core/fuel.jl
+++ b/src/model/core/fuel.jl
@@ -56,7 +56,7 @@ fuel $i$ consumption by plant $y$ at time $t$ ($vMulFuel_{y,i,t}$); startup fuel
For plants using multiple fuels:
-During startup, heat input from multiple startup fuels are equal to startup fuel requirements in plant $y$ at time $t$: $StartFuelMMBTUperMW$ times $Capsize$.
+During startup, heat input from multiple startup fuels are equal to startup fuel requirements in plant $y$ at time $t$: $StartFuelMMBTUperMW$ $\times$ $Capsize$.
```math
\begin{aligned}
\sum_{i \in \mathcal{I} } vMulStartFuels_{y, i, t}= CapSize_{y} \times StartFuelMMBTUperMW_{y} \times vSTART_{y,t}
@@ -76,11 +76,11 @@ vMulFuels_{y, i, t} >= vPower_{y,t} \times MinCofire_{i}
\begin{aligned}
vMulFuels_{y, i, t} <= vPower_{y,t} \times MaxCofire_{i}
\end{aligned}
-
+```
"""
function fuel!(EP::Model, inputs::Dict, setup::Dict)
println("Fuel Module")
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
T = inputs["T"] # Number of time steps (hours)
Z = inputs["Z"] # Number of zones
@@ -89,17 +89,17 @@ function fuel!(EP::Model, inputs::Dict, setup::Dict)
HAS_FUEL = inputs["HAS_FUEL"]
MULTI_FUELS = inputs["MULTI_FUELS"]
SINGLE_FUEL = inputs["SINGLE_FUEL"]
-
+
fuels = inputs["fuels"]
fuel_costs = inputs["fuel_costs"]
omega = inputs["omega"]
NUM_FUEL = length(fuels)
-
+
# create variable for fuel consumption for output
# for resources that only use a single fuel
- @variable(EP, vFuel[y in SINGLE_FUEL, t = 1:T] >= 0)
- @variable(EP, vStartFuel[y in SINGLE_FUEL, t = 1:T] >= 0)
+ @variable(EP, vFuel[y in SINGLE_FUEL, t = 1:T]>=0)
+ @variable(EP, vStartFuel[y in SINGLE_FUEL, t = 1:T]>=0)
# for resources that use multi fuels
# vMulFuels[y, f, t]: y - resource ID; f - fuel ID; t: time
@@ -108,71 +108,76 @@ function fuel!(EP::Model, inputs::Dict, setup::Dict)
heat_rates = inputs["HEAT_RATES"]
min_cofire = inputs["MIN_COFIRE"]
max_cofire = inputs["MAX_COFIRE"]
- min_cofire_start =inputs["MIN_COFIRE_START"]
- max_cofire_start =inputs["MAX_COFIRE_START"]
-
- COFIRE_MAX = [findall(g -> max_cofire_cols(g, tag=i) < 1, gen[MULTI_FUELS]) for i in 1:max_fuels]
- COFIRE_MAX_START = [findall(g -> max_cofire_start_cols(g, tag=i) < 1, gen[MULTI_FUELS]) for i in 1:max_fuels]
- COFIRE_MIN = [findall(g -> min_cofire_cols(g, tag=i) > 0, gen[MULTI_FUELS]) for i in 1:max_fuels]
- COFIRE_MIN_START = [findall(g -> min_cofire_start_cols(g, tag=i) > 0, gen[MULTI_FUELS]) for i in 1:max_fuels]
-
- @variable(EP, vMulFuels[y in MULTI_FUELS, i = 1:max_fuels, t = 1:T] >= 0)
- @variable(EP, vMulStartFuels[y in MULTI_FUELS, i = 1:max_fuels, t = 1:T] >= 0)
- end
+ min_cofire_start = inputs["MIN_COFIRE_START"]
+ max_cofire_start = inputs["MAX_COFIRE_START"]
+
+ COFIRE_MAX = [findall(g -> max_cofire_cols(g, tag = i) < 1, gen[MULTI_FUELS])
+ for i in 1:max_fuels]
+ COFIRE_MAX_START = [findall(g -> max_cofire_start_cols(g, tag = i) < 1,
+ gen[MULTI_FUELS]) for i in 1:max_fuels]
+ COFIRE_MIN = [findall(g -> min_cofire_cols(g, tag = i) > 0, gen[MULTI_FUELS])
+ for i in 1:max_fuels]
+ COFIRE_MIN_START = [findall(g -> min_cofire_start_cols(g, tag = i) > 0,
+ gen[MULTI_FUELS]) for i in 1:max_fuels]
+
+ @variable(EP, vMulFuels[y in MULTI_FUELS, i = 1:max_fuels, t = 1:T]>=0)
+ @variable(EP, vMulStartFuels[y in MULTI_FUELS, i = 1:max_fuels, t = 1:T]>=0)
+ end
### Expressions ####
# Fuel consumed on start-up (MMBTU or kMMBTU (scaled))
# if unit commitment is modelled
@expression(EP, eStartFuel[y in 1:G, t = 1:T],
if y in THERM_COMMIT
- (cap_size(gen[y]) * EP[:vSTART][y, t] *
- start_fuel_mmbtu_per_mw(gen[y]))
+ (cap_size(gen[y]) * EP[:vSTART][y, t] *
+ start_fuel_mmbtu_per_mw(gen[y]))
else
0
end)
-
+
# time-series fuel consumption by plant
@expression(EP, ePlantFuel_generation[y in 1:G, t = 1:T],
if y in SINGLE_FUEL # for single fuel plants
EP[:vFuel][y, t]
else # for multi fuel plants
- sum(EP[:vMulFuels][y, i, t] for i in 1:max_fuels)
+ sum(EP[:vMulFuels][y, i, t] for i in 1:max_fuels)
end)
@expression(EP, ePlantFuel_start[y in 1:G, t = 1:T],
if y in SINGLE_FUEL # for single fuel plants
EP[:vStartFuel][y, t]
else # for multi fuel plants
- sum(EP[:vMulStartFuels][y, i, t] for i in 1:max_fuels)
+ sum(EP[:vMulStartFuels][y, i, t] for i in 1:max_fuels)
end)
# for multi-fuel resources
# annual fuel consumption by plant and fuel type
if !isempty(MULTI_FUELS)
- @expression(EP, ePlantFuelConsumptionYear_multi_generation[y in MULTI_FUELS, i in 1:max_fuels],
+ @expression(EP,
+ ePlantFuelConsumptionYear_multi_generation[y in MULTI_FUELS, i in 1:max_fuels],
sum(omega[t] * EP[:vMulFuels][y, i, t] for t in 1:T))
- @expression(EP, ePlantFuelConsumptionYear_multi_start[y in MULTI_FUELS, i in 1:max_fuels],
+ @expression(EP,
+ ePlantFuelConsumptionYear_multi_start[y in MULTI_FUELS, i in 1:max_fuels],
sum(omega[t] * EP[:vMulStartFuels][y, i, t] for t in 1:T))
- @expression(EP, ePlantFuelConsumptionYear_multi[y in MULTI_FUELS, i in 1:max_fuels],
- EP[:ePlantFuelConsumptionYear_multi_generation][y, i] + EP[:ePlantFuelConsumptionYear_multi_start][y, i])
+ @expression(EP, ePlantFuelConsumptionYear_multi[y in MULTI_FUELS, i in 1:max_fuels],
+ EP[:ePlantFuelConsumptionYear_multi_generation][y,
+ i]+EP[:ePlantFuelConsumptionYear_multi_start][y, i])
end
# fuel_cost is in $/MMBTU (M$/billion BTU if scaled)
# vFuel and eStartFuel is MMBTU (or billion BTU if scaled)
# eCFuel_start or eCFuel_out is $ or Million$
-
+
# Start up fuel cost
# for multi-fuel resources
if !isempty(MULTI_FUELS)
# time-series fuel consumption costs by plant and fuel type during startup
- @expression(EP, eCFuelOut_multi_start[y in MULTI_FUELS , i in 1:max_fuels, t = 1:T],
- fuel_costs[fuel_cols(gen[y], tag=i)][t] * EP[:vMulStartFuels][y, i, t]
- )
+ @expression(EP, eCFuelOut_multi_start[y in MULTI_FUELS, i in 1:max_fuels, t = 1:T],
+ fuel_costs[fuel_cols(gen[y], tag = i)][t]*EP[:vMulStartFuels][y, i, t])
# annual plant level fuel cost by fuel type during generation
- @expression(EP, ePlantCFuelOut_multi_start[y in MULTI_FUELS, i in 1:max_fuels],
+ @expression(EP, ePlantCFuelOut_multi_start[y in MULTI_FUELS, i in 1:max_fuels],
sum(omega[t] * EP[:eCFuelOut_multi_start][y, i, t] for t in 1:T))
-
end
- @expression(EP, eCFuelStart[y = 1:G, t = 1:T],
+ @expression(EP, eCFuelStart[y = 1:G, t = 1:T],
if y in SINGLE_FUEL
(fuel_costs[fuel(gen[y])][t] * EP[:vStartFuel][y, t])
else
@@ -180,44 +185,40 @@ function fuel!(EP::Model, inputs::Dict, setup::Dict)
end)
# plant level start-up fuel cost for output
- @expression(EP, ePlantCFuelStart[y = 1:G],
+ @expression(EP, ePlantCFuelStart[y = 1:G],
sum(omega[t] * EP[:eCFuelStart][y, t] for t in 1:T))
# zonal level total fuel cost for output
- @expression(EP, eZonalCFuelStart[z = 1:Z],
- sum(EP[:ePlantCFuelStart][y] for y in resources_in_zone_by_rid(gen,z)))
+ @expression(EP, eZonalCFuelStart[z = 1:Z],
+ sum(EP[:ePlantCFuelStart][y] for y in resources_in_zone_by_rid(gen, z)))
# Fuel cost for power generation
# for multi-fuel resources
if !isempty(MULTI_FUELS)
# time-series fuel consumption costs by plant and fuel type during generation
- @expression(EP, eCFuelOut_multi[y in MULTI_FUELS , i in 1:max_fuels, t = 1:T],
- fuel_costs[fuel_cols(gen[y], tag=i)][t] * EP[:vMulFuels][y,i,t]
- )
+ @expression(EP, eCFuelOut_multi[y in MULTI_FUELS, i in 1:max_fuels, t = 1:T],
+ fuel_costs[fuel_cols(gen[y], tag = i)][t]*EP[:vMulFuels][y, i, t])
# annual plant level fuel cost by fuel type during generation
- @expression(EP, ePlantCFuelOut_multi[y in MULTI_FUELS, i in 1:max_fuels],
+ @expression(EP, ePlantCFuelOut_multi[y in MULTI_FUELS, i in 1:max_fuels],
sum(omega[t] * EP[:eCFuelOut_multi][y, i, t] for t in 1:T))
-
end
- @expression(EP, eCFuelOut[y = 1:G, t = 1:T],
+ @expression(EP, eCFuelOut[y = 1:G, t = 1:T],
if y in SINGLE_FUEL
(fuel_costs[fuel(gen[y])][t] * EP[:vFuel][y, t])
else
sum(EP[:eCFuelOut_multi][y, i, t] for i in 1:max_fuels)
end)
# plant level start-up fuel cost for output
- @expression(EP, ePlantCFuelOut[y = 1:G],
+ @expression(EP, ePlantCFuelOut[y = 1:G],
sum(omega[t] * EP[:eCFuelOut][y, t] for t in 1:T))
# zonal level total fuel cost for output
- @expression(EP, eZonalCFuelOut[z = 1:Z],
- sum(EP[:ePlantCFuelOut][y] for y in resources_in_zone_by_rid(gen,z)))
-
+ @expression(EP, eZonalCFuelOut[z = 1:Z],
+ sum(EP[:ePlantCFuelOut][y] for y in resources_in_zone_by_rid(gen, z)))
# system level total fuel cost for output
@expression(EP, eTotalCFuelOut, sum(eZonalCFuelOut[z] for z in 1:Z))
@expression(EP, eTotalCFuelStart, sum(eZonalCFuelStart[z] for z in 1:Z))
-
add_to_expression!(EP[:eObj], EP[:eTotalCFuelOut] + EP[:eTotalCFuelStart])
#fuel consumption (MMBTU or Billion BTU)
@@ -225,40 +226,46 @@ function fuel!(EP::Model, inputs::Dict, setup::Dict)
if !isempty(MULTI_FUELS)
@expression(EP, eFuelConsumption_multi[f in 1:NUM_FUEL, t in 1:T],
sum((EP[:vMulFuels][y, i, t] + EP[:vMulStartFuels][y, i, t]) #i: fuel id
- for i in 1:max_fuels,
- y in intersect(resource_id.(gen[fuel_cols.(gen, tag=i) .== string(fuels[f])]), MULTI_FUELS))
- )
+ for i in 1:max_fuels,
+ y in intersect(
+ resource_id.(gen[fuel_cols.(gen, tag = i) .== string(fuels[f])]),
+ MULTI_FUELS)))
end
@expression(EP, eFuelConsumption_single[f in 1:NUM_FUEL, t in 1:T],
- sum(EP[:vFuel][y, t] + EP[:eStartFuel][y,t]
- for y in intersect(resources_with_fuel(gen, fuels[f]), SINGLE_FUEL)))
-
+ sum(EP[:vFuel][y, t] + EP[:eStartFuel][y, t]
+ for y in intersect(resources_with_fuel(gen, fuels[f]), SINGLE_FUEL)))
+
@expression(EP, eFuelConsumption[f in 1:NUM_FUEL, t in 1:T],
if !isempty(MULTI_FUELS)
- eFuelConsumption_multi[f, t] + eFuelConsumption_single[f,t]
+ eFuelConsumption_multi[f, t] + eFuelConsumption_single[f, t]
else
- eFuelConsumption_single[f,t]
+ eFuelConsumption_single[f, t]
end)
@expression(EP, eFuelConsumptionYear[f in 1:NUM_FUEL],
sum(omega[t] * EP[:eFuelConsumption][f, t] for t in 1:T))
-
### Constraint ###
### only apply constraint to generators with fuel type other than None
- @constraint(EP, cFuelCalculation_single[y in intersect(SINGLE_FUEL, setdiff(HAS_FUEL, THERM_COMMIT)), t = 1:T],
- EP[:vFuel][y, t] - EP[:vP][y, t] * heat_rate_mmbtu_per_mwh(gen[y]) == 0)
+ @constraint(EP,
+ cFuelCalculation_single[
+ y in intersect(SINGLE_FUEL, setdiff(HAS_FUEL, THERM_COMMIT)),
+ t = 1:T],
+ EP[:vFuel][y, t] - EP[:vP][y, t] * heat_rate_mmbtu_per_mwh(gen[y])==0)
if !isempty(MULTI_FUELS)
- @constraint(EP, cFuelCalculation_multi[y in intersect(MULTI_FUELS, setdiff(HAS_FUEL, THERM_COMMIT)), t = 1:T],
- sum(EP[:vMulFuels][y, i, t]/heat_rates[i][y] for i in 1:max_fuels) - EP[:vP][y, t] == 0
- )
+ @constraint(EP,
+ cFuelCalculation_multi[
+ y in intersect(MULTI_FUELS,
+ setdiff(HAS_FUEL, THERM_COMMIT)),
+ t = 1:T],
+ sum(EP[:vMulFuels][y, i, t] / heat_rates[i][y] for i in 1:max_fuels) -
+ EP[:vP][y, t]==0)
end
-
- if !isempty(THERM_COMMIT)
+ if !isempty(THERM_COMMIT)
# Only apply piecewise fuel consumption to thermal generators in THERM_COMMIT_PWFU set
THERM_COMMIT_PWFU = inputs["THERM_COMMIT_PWFU"]
# segemnt for piecewise fuel usage
@@ -270,61 +277,76 @@ function fuel!(EP::Model, inputs::Dict, setup::Dict)
segment_intercept(y, seg) = PWFU_data[y, intercept_cols[seg]]
segment_slope(y, seg) = PWFU_data[y, slope_cols[seg]]
# constraint for piecewise fuel consumption
- @constraint(EP, PiecewiseFuelUsage[y in THERM_COMMIT_PWFU, t = 1:T, seg in segs],
- EP[:vFuel][y, t] >= (EP[:vP][y, t] * segment_slope(y, seg) +
- EP[:vCOMMIT][y, t] * segment_intercept(y, seg)))
+ @constraint(EP,
+ PiecewiseFuelUsage[y in THERM_COMMIT_PWFU, t = 1:T, seg in segs],
+ EP[:vFuel][y,
+ t]>=(EP[:vP][y, t] * segment_slope(y, seg) +
+ EP[:vCOMMIT][y, t] * segment_intercept(y, seg)))
end
-
+
# constraint for fuel consumption at a constant heat rate
- @constraint(EP, FuelCalculationCommit_single[y in intersect(setdiff(THERM_COMMIT,THERM_COMMIT_PWFU), SINGLE_FUEL), t = 1:T],
- EP[:vFuel][y, t] - EP[:vP][y, t] * heat_rate_mmbtu_per_mwh(gen[y]) == 0)
+ @constraint(EP,
+ FuelCalculationCommit_single[
+ y in intersect(setdiff(THERM_COMMIT,
+ THERM_COMMIT_PWFU),
+ SINGLE_FUEL),
+ t = 1:T],
+ EP[:vFuel][y, t] - EP[:vP][y, t] * heat_rate_mmbtu_per_mwh(gen[y])==0)
if !isempty(MULTI_FUELS)
- @constraint(EP, FuelCalculationCommit_multi[y in intersect(setdiff(THERM_COMMIT,THERM_COMMIT_PWFU), MULTI_FUELS), t = 1:T],
- sum(EP[:vMulFuels][y, i, t]/heat_rates[i][y] for i in 1:max_fuels) - EP[:vP][y, t] .== 0
- )
+ @constraint(EP,
+ FuelCalculationCommit_multi[
+ y in intersect(setdiff(THERM_COMMIT,
+ THERM_COMMIT_PWFU),
+ MULTI_FUELS),
+ t = 1:T],
+ sum(EP[:vMulFuels][y, i, t] / heat_rates[i][y] for i in 1:max_fuels) -
+ EP[:vP][y, t].==0)
end
end
# constraints on start up fuel use
@constraint(EP, cStartFuel_single[y in intersect(THERM_COMMIT, SINGLE_FUEL), t = 1:T],
- EP[:vStartFuel][y, t] - (cap_size(gen[y]) * EP[:vSTART][y, t] * start_fuel_mmbtu_per_mw(gen[y])) .== 0
- )
+ EP[:vStartFuel][y, t] -
+ (cap_size(gen[y]) * EP[:vSTART][y, t] * start_fuel_mmbtu_per_mw(gen[y])).==0)
if !isempty(MULTI_FUELS)
- @constraint(EP, cStartFuel_multi[y in intersect(THERM_COMMIT, MULTI_FUELS), t = 1:T],
- sum(EP[:vMulStartFuels][y, i, t] for i in 1:max_fuels) - (cap_size(gen[y]) * EP[:vSTART][y, t] * start_fuel_mmbtu_per_mw(gen[y])) .== 0
- )
+ @constraint(EP,
+ cStartFuel_multi[y in intersect(THERM_COMMIT, MULTI_FUELS), t = 1:T],
+ sum(EP[:vMulStartFuels][y, i, t] for i in 1:max_fuels) -
+ (cap_size(gen[y]) * EP[:vSTART][y, t] * start_fuel_mmbtu_per_mw(gen[y])).==0)
end
# constraints on co-fire ratio of different fuels used by one generator
# for example,
# fuel2/heat rate >= min_cofire_level * total power
# fuel2/heat rate <= max_cofire_level * total power without retrofit
- if !isempty(MULTI_FUELS)
+ if !isempty(MULTI_FUELS)
for i in 1:max_fuels
# during power generation
# cofire constraints without the name due to the loop
- @constraint(EP, [y in intersect(MULTI_FUELS, COFIRE_MIN[i]), t = 1:T],
- EP[:vMulFuels][y, i, t] >= min_cofire[i][y] * EP[:ePlantFuel_generation][y,t]
- )
- @constraint(EP, [y in intersect(MULTI_FUELS, COFIRE_MAX[i]), t = 1:T],
- EP[:vMulFuels][y, i, t] <= max_cofire[i][y] * EP[:ePlantFuel_generation][y,t]
- )
+ @constraint(EP, [y in intersect(MULTI_FUELS, COFIRE_MIN[i]), t = 1:T],
+ EP[:vMulFuels][y,
+ i,
+ t]>=min_cofire[i][y] * EP[:ePlantFuel_generation][y, t])
+ @constraint(EP, [y in intersect(MULTI_FUELS, COFIRE_MAX[i]), t = 1:T],
+ EP[:vMulFuels][y,
+ i,
+ t]<=max_cofire[i][y] * EP[:ePlantFuel_generation][y, t])
# startup
- @constraint(EP, [y in intersect(MULTI_FUELS, COFIRE_MIN_START[i]), t = 1:T],
- EP[:vMulStartFuels][y, i, t] >= min_cofire_start[i][y] * EP[:ePlantFuel_start][y,t]
- )
- @constraint(EP, [y in intersect(MULTI_FUELS, COFIRE_MAX_START[i]), t = 1:T],
- EP[:vMulStartFuels][y, i, t] <= max_cofire_start[i][y] * EP[:ePlantFuel_start][y,t]
- )
+ @constraint(EP, [y in intersect(MULTI_FUELS, COFIRE_MIN_START[i]), t = 1:T],
+ EP[:vMulStartFuels][y,
+ i,
+ t]>=min_cofire_start[i][y] * EP[:ePlantFuel_start][y, t])
+ @constraint(EP, [y in intersect(MULTI_FUELS, COFIRE_MAX_START[i]), t = 1:T],
+ EP[:vMulStartFuels][y,
+ i,
+ t]<=max_cofire_start[i][y] * EP[:ePlantFuel_start][y, t])
end
end
return EP
end
-
function resources_with_fuel(rs::Vector{<:AbstractResource}, fuel_name::AbstractString)
condition::BitVector = fuel.(rs) .== fuel_name
return resource_id.(rs[condition])
end
-
diff --git a/src/model/core/non_served_energy.jl b/src/model/core/non_served_energy.jl
index 4df302172f..458ac6348a 100644
--- a/src/model/core/non_served_energy.jl
+++ b/src/model/core/non_served_energy.jl
@@ -52,54 +52,61 @@ Additionally, total demand curtailed in each time step cannot exceed total deman
```
"""
function non_served_energy!(EP::Model, inputs::Dict, setup::Dict)
+ println("Non-served Energy Module")
- println("Non-served Energy Module")
+ T = inputs["T"] # Number of time steps
+ Z = inputs["Z"] # Number of zones
+ SEG = inputs["SEG"] # Number of demand curtailment segments
- T = inputs["T"] # Number of time steps
- Z = inputs["Z"] # Number of zones
- SEG = inputs["SEG"] # Number of demand curtailment segments
+ ### Variables ###
- ### Variables ###
+ # Non-served energy/curtailed demand in the segment "s" at hour "t" in zone "z"
+ @variable(EP, vNSE[s = 1:SEG, t = 1:T, z = 1:Z]>=0)
- # Non-served energy/curtailed demand in the segment "s" at hour "t" in zone "z"
- @variable(EP, vNSE[s=1:SEG,t=1:T,z=1:Z] >= 0);
+ ### Expressions ###
- ### Expressions ###
+ ## Objective Function Expressions ##
- ## Objective Function Expressions ##
+ # Cost of non-served energy/curtailed demand at hour "t" in zone "z"
+ @expression(EP,
+ eCNSE[s = 1:SEG, t = 1:T, z = 1:Z],
+ (inputs["omega"][t]*inputs["pC_D_Curtail"][s]*vNSE[s, t, z]))
- # Cost of non-served energy/curtailed demand at hour "t" in zone "z"
- @expression(EP, eCNSE[s=1:SEG,t=1:T,z=1:Z], (inputs["omega"][t]*inputs["pC_D_Curtail"][s]*vNSE[s,t,z]))
+ # Sum individual demand segment contributions to non-served energy costs to get total non-served energy costs
+ # Julia is fastest when summing over one row one column at a time
+ @expression(EP, eTotalCNSETS[t = 1:T, z = 1:Z], sum(eCNSE[s, t, z] for s in 1:SEG))
+ @expression(EP, eTotalCNSET[t = 1:T], sum(eTotalCNSETS[t, z] for z in 1:Z))
+ @expression(EP, eTotalCNSE, sum(eTotalCNSET[t] for t in 1:T))
- # Sum individual demand segment contributions to non-served energy costs to get total non-served energy costs
- # Julia is fastest when summing over one row one column at a time
- @expression(EP, eTotalCNSETS[t=1:T,z=1:Z], sum(eCNSE[s,t,z] for s in 1:SEG))
- @expression(EP, eTotalCNSET[t=1:T], sum(eTotalCNSETS[t,z] for z in 1:Z))
- @expression(EP, eTotalCNSE, sum(eTotalCNSET[t] for t in 1:T))
+ # Add total cost contribution of non-served energy/curtailed demand to the objective function
+ add_to_expression!(EP[:eObj], eTotalCNSE)
- # Add total cost contribution of non-served energy/curtailed demand to the objective function
- add_to_expression!(EP[:eObj], eTotalCNSE)
+ ## Power Balance Expressions ##
+ @expression(EP, ePowerBalanceNse[t = 1:T, z = 1:Z], sum(vNSE[s, t, z] for s in 1:SEG))
- ## Power Balance Expressions ##
- @expression(EP, ePowerBalanceNse[t=1:T, z=1:Z], sum(vNSE[s,t,z] for s=1:SEG))
+ # Add non-served energy/curtailed demand contribution to power balance expression
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceNse)
- # Add non-served energy/curtailed demand contribution to power balance expression
- add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceNse)
+ # Capacity Reserves Margin policy
+ if setup["CapacityReserveMargin"] > 0
+ if SEG >= 2
+ @expression(EP,
+ eCapResMarBalanceNSE[res = 1:inputs["NCapacityReserveMargin"], t = 1:T],
+ sum(EP[:vNSE][s, t, z]
+ for s in 2:SEG, z in findall(x -> x != 0, inputs["dfCapRes"][:, res])))
+ add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceNSE)
+ end
+ end
- # Capacity Reserves Margin policy
- if setup["CapacityReserveMargin"] > 0
- if SEG >=2
- @expression(EP, eCapResMarBalanceNSE[res=1:inputs["NCapacityReserveMargin"], t=1:T], sum(EP[:vNSE][s,t,z] for s in 2:SEG, z in findall(x->x!=0,inputs["dfCapRes"][:,res])))
- add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceNSE)
- end
- end
+ ### Constratints ###
- ### Constratints ###
-
- # Demand curtailed in each segment of curtailable demands cannot exceed maximum allowable share of demand
- @constraint(EP, cNSEPerSeg[s=1:SEG, t=1:T, z=1:Z], vNSE[s,t,z] <= inputs["pMax_D_Curtail"][s]*inputs["pD"][t,z])
-
- # Total demand curtailed in each time step (hourly) cannot exceed total demand
- @constraint(EP, cMaxNSE[t=1:T, z=1:Z], sum(vNSE[s,t,z] for s=1:SEG) <= inputs["pD"][t,z])
+ # Demand curtailed in each segment of curtailable demands cannot exceed maximum allowable share of demand
+ @constraint(EP,
+ cNSEPerSeg[s = 1:SEG, t = 1:T, z = 1:Z],
+ vNSE[s, t, z]<=inputs["pMax_D_Curtail"][s] * inputs["pD"][t, z])
+ # Total demand curtailed in each time step (hourly) cannot exceed total demand
+ @constraint(EP,
+ cMaxNSE[t = 1:T, z = 1:Z],
+ sum(vNSE[s, t, z] for s in 1:SEG)<=inputs["pD"][t, z])
end
diff --git a/src/model/core/operational_reserves.jl b/src/model/core/operational_reserves.jl
index ef26fbe01e..9616cf96b0 100644
--- a/src/model/core/operational_reserves.jl
+++ b/src/model/core/operational_reserves.jl
@@ -9,17 +9,16 @@
This function sets up reserve decisions and constraints, using the operational_reserves_core()` and operational_reserves_contingency()` functions.
"""
function operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
+ UCommit = setup["UCommit"]
- UCommit = setup["UCommit"]
-
- if inputs["pStatic_Contingency"] > 0 || (UCommit >= 1 && inputs["pDynamic_Contingency"] >= 1)
- operational_reserves_contingency!(EP, inputs, setup)
- end
+ if inputs["pStatic_Contingency"] > 0 ||
+ (UCommit >= 1 && inputs["pDynamic_Contingency"] >= 1)
+ operational_reserves_contingency!(EP, inputs, setup)
+ end
- operational_reserves_core!(EP, inputs, setup)
+ operational_reserves_core!(EP, inputs, setup)
end
-
@doc raw"""
operational_reserves_contingency!(EP::Model, inputs::Dict, setup::Dict)
@@ -47,9 +46,9 @@ where $\epsilon^{contingency}$ is static contingency requirement in MWs.
Option 2 (dynamic capacity-based contingency) is expressed by the following constraints:
```math
\begin{aligned}
- &Contingency \geq \Omega^{size}_{y,z} \times \alpha^{Contingency,Aux}_{y,z} & \forall y \in \mathcal{UC}, z \in \mathcal{Z}\\
- &\alpha^{Contingency,Aux}_{y,z} \leq \Delta^{\text{total}}_{y,z} & \forall y \in \mathcal{UC}, z \in \mathcal{Z}\\
- &\alpha^{Contingency,Aux}_{y,z} \geq M_y \times \Delta^{\text{total}}_{y,z} & \forall y \in \mathcal{UC}, z \in \mathcal{Z}\\
+ & Contingency \geq \Omega^{size}_{y,z} \times \alpha^{Contingency,Aux}_{y,z} & \forall y \in \mathcal{UC}, z \in \mathcal{Z}\\
+ & \alpha^{Contingency,Aux}_{y,z} \leq \Delta^{\text{total}}_{y,z} & \forall y \in \mathcal{UC}, z \in \mathcal{Z}\\
+ & \Delta^{\text{total}}_{y,z} \leq M_y \times \alpha^{Contingency,Aux}_{y,z} & \forall y \in \mathcal{UC}, z \in \mathcal{Z}\\
\end{aligned}
```
@@ -61,78 +60,85 @@ Option 3 (dynamic commitment-based contingency) is expressed by the following se
\begin{aligned}
& Contingency \geq \Omega^{size}_{y,z} \times Contingency\_Aux_{y,z,t} & \forall y \in \mathcal{UC}, z \in \mathcal{Z}\\
& Contingency\_Aux_{y,z,t} \leq \nu_{y,z,t} & \forall y \in \mathcal{UC}, z \in \mathcal{Z}\\
- & Contingency\_Aux_{y,z,t} \geq M_y \times \nu_{y,z,t} & \forall y \in \mathcal{UC}, z \in \mathcal{Z}\\
+ & \nu_{y,z,t} \leq M_y \times Contingency\_Aux_{y,z,t} & \forall y \in \mathcal{UC}, z \in \mathcal{Z}\\
\end{aligned}
```
where $M_y$ is a `big M' constant equal to the largest possible capacity that can be installed for generation cluster $y$, and $Contingency\_Aux_{y,z,t} \in [0,1]$ is a binary auxiliary variable that is forced by the second and third equations above to be 1 if the commitment state for that generation cluster $\nu_{y,z,t} > 0$ for any generator $y \in \mathcal{UC}$ and zone $z$ and time period $t$, and can be 0 otherwise. Note that this dynamic commitment-based contingency can only be specified if discrete unit commitment decisions are used (e.g. it will not work if relaxed unit commitment is used).
"""
function operational_reserves_contingency!(EP::Model, inputs::Dict, setup::Dict)
+ println("Operational Reserves Contingency Module")
- println("Operational Reserves Contingency Module")
-
- gen = inputs["RESOURCES"]
-
- T = inputs["T"] # Number of time steps (hours)
- UCommit = setup["UCommit"]
- COMMIT = inputs["COMMIT"]
-
- if UCommit >= 1
- pDynamic_Contingency = inputs["pDynamic_Contingency"]
- end
-
- ### Variables ###
-
- # NOTE: If Dynamic_Contingency == 0, then contingency is a fixed parameter equal the value specified in Operational_reserves.csv via pStatic_Contingency.
- if UCommit == 1 && pDynamic_Contingency == 1
- # Contingency = largest installed thermal unit
- @variable(EP, vLARGEST_CONTINGENCY >= 0)
- # Auxiliary variable that is 0 if vCAP = 0, 1 otherwise
- @variable(EP, vCONTINGENCY_AUX[y in COMMIT], Bin)
- elseif UCommit == 1 && pDynamic_Contingency == 2
- # Contingency = largest committed thermal unit in each time period
- @variable(EP, vLARGEST_CONTINGENCY[t=1:T] >= 0)
- # Auxiliary variable that is 0 if vCOMMIT = 0, 1 otherwise
- @variable(EP, vCONTINGENCY_AUX[y in COMMIT, t=1:T], Bin)
- end
-
- ### Expressions ###
- if UCommit == 1 && pDynamic_Contingency == 1
- # Largest contingency defined as largest installed generator
- println("Dynamic Contingency Type 1: Modeling the largest contingency as the largest installed generator")
- @expression(EP, eContingencyReq[t=1:T], vLARGEST_CONTINGENCY)
- elseif UCommit == 1 && pDynamic_Contingency == 2
- # Largest contingency defined for each hour as largest committed generator
- println("Dynamic Contingency Type 2: Modeling the largest contingency as the largest largest committed generator")
- @expression(EP, eContingencyReq[t=1:T], vLARGEST_CONTINGENCY[t])
- else
- # Largest contingency defined fixed as user-specifed static contingency in MW
- println("Static Contingency: Modeling the largest contingency as user-specifed static contingency")
- @expression(EP, eContingencyReq[t=1:T], inputs["pStatic_Contingency"])
- end
-
- ### Constraints ###
-
- # Dynamic contingency related constraints
- # option 1: ensures vLARGEST_CONTINGENCY is greater than the capacity of the largest installed generator
- if UCommit == 1 && pDynamic_Contingency == 1
- @constraint(EP, cContingency[y in COMMIT], vLARGEST_CONTINGENCY >= cap_size(gen[y])*vCONTINGENCY_AUX[y] )
- # Ensure vCONTINGENCY_AUX = 0 if total capacity = 0
- @constraint(EP, cContAux1[y in COMMIT], vCONTINGENCY_AUX[y] <= EP[:eTotalCap][y])
- # Ensure vCONTINGENCY_AUX = 1 if total capacity > 0
- @constraint(EP, cContAux2[y in COMMIT], EP[:eTotalCap][y] <= inputs["pContingency_BigM"][y]*vCONTINGENCY_AUX[y])
-
- # option 2: ensures vLARGEST_CONTINGENCY is greater than the capacity of the largest commited generator in each hour
- elseif UCommit == 1 && pDynamic_Contingency == 2
- @constraint(EP, cContingency[y in COMMIT, t=1:T], vLARGEST_CONTINGENCY[t] >= cap_size(gen[y])*vCONTINGENCY_AUX[y,t] )
- # Ensure vCONTINGENCY_AUX = 0 if vCOMMIT = 0
- @constraint(EP, cContAux[y in COMMIT, t=1:T], vCONTINGENCY_AUX[y,t] <= EP[:vCOMMIT][y,t])
- # Ensure vCONTINGENCY_AUX = 1 if vCOMMIT > 0
- @constraint(EP, cContAux2[y in COMMIT, t=1:T], EP[:vCOMMIT][y, t] <= inputs["pContingency_BigM"][y]*vCONTINGENCY_AUX[y,t])
- end
+ gen = inputs["RESOURCES"]
-end
+ T = inputs["T"] # Number of time steps (hours)
+ UCommit = setup["UCommit"]
+ COMMIT = inputs["COMMIT"]
+
+ if UCommit >= 1
+ pDynamic_Contingency = inputs["pDynamic_Contingency"]
+ end
+ ### Variables ###
+
+ # NOTE: If Dynamic_Contingency == 0, then contingency is a fixed parameter equal the value specified in Operational_reserves.csv via pStatic_Contingency.
+ if UCommit == 1 && pDynamic_Contingency == 1
+ # Contingency = largest installed thermal unit
+ @variable(EP, vLARGEST_CONTINGENCY>=0)
+ # Auxiliary variable that is 0 if vCAP = 0, 1 otherwise
+ @variable(EP, vCONTINGENCY_AUX[y in COMMIT], Bin)
+ elseif UCommit == 1 && pDynamic_Contingency == 2
+ # Contingency = largest committed thermal unit in each time period
+ @variable(EP, vLARGEST_CONTINGENCY[t = 1:T]>=0)
+ # Auxiliary variable that is 0 if vCOMMIT = 0, 1 otherwise
+ @variable(EP, vCONTINGENCY_AUX[y in COMMIT, t = 1:T], Bin)
+ end
+
+ ### Expressions ###
+ if UCommit == 1 && pDynamic_Contingency == 1
+ # Largest contingency defined as largest installed generator
+ println("Dynamic Contingency Type 1: Modeling the largest contingency as the largest installed generator")
+ @expression(EP, eContingencyReq[t = 1:T], vLARGEST_CONTINGENCY)
+ elseif UCommit == 1 && pDynamic_Contingency == 2
+ # Largest contingency defined for each hour as largest committed generator
+ println("Dynamic Contingency Type 2: Modeling the largest contingency as the largest largest committed generator")
+ @expression(EP, eContingencyReq[t = 1:T], vLARGEST_CONTINGENCY[t])
+ else
+ # Largest contingency defined fixed as user-specifed static contingency in MW
+ println("Static Contingency: Modeling the largest contingency as user-specifed static contingency")
+ @expression(EP, eContingencyReq[t = 1:T], inputs["pStatic_Contingency"])
+ end
+
+ ### Constraints ###
+
+ # Dynamic contingency related constraints
+ # option 1: ensures vLARGEST_CONTINGENCY is greater than the capacity of the largest installed generator
+ if UCommit == 1 && pDynamic_Contingency == 1
+ @constraint(EP,
+ cContingency[y in COMMIT],
+ vLARGEST_CONTINGENCY>=cap_size(gen[y]) * vCONTINGENCY_AUX[y])
+ # Ensure vCONTINGENCY_AUX = 0 if total capacity = 0
+ @constraint(EP, cContAux1[y in COMMIT], vCONTINGENCY_AUX[y]<=EP[:eTotalCap][y])
+ # Ensure vCONTINGENCY_AUX = 1 if total capacity > 0
+ @constraint(EP,
+ cContAux2[y in COMMIT],
+ EP[:eTotalCap][y]<=inputs["pContingency_BigM"][y] * vCONTINGENCY_AUX[y])
+
+ # option 2: ensures vLARGEST_CONTINGENCY is greater than the capacity of the largest commited generator in each hour
+ elseif UCommit == 1 && pDynamic_Contingency == 2
+ @constraint(EP,
+ cContingency[y in COMMIT, t = 1:T],
+ vLARGEST_CONTINGENCY[t]>=cap_size(gen[y]) * vCONTINGENCY_AUX[y, t])
+ # Ensure vCONTINGENCY_AUX = 0 if vCOMMIT = 0
+ @constraint(EP,
+ cContAux[y in COMMIT, t = 1:T],
+ vCONTINGENCY_AUX[y, t]<=EP[:vCOMMIT][y, t])
+ # Ensure vCONTINGENCY_AUX = 1 if vCOMMIT > 0
+ @constraint(EP,
+ cContAux2[y in COMMIT, t = 1:T],
+ EP[:vCOMMIT][y, t]<=inputs["pContingency_BigM"][y] * vCONTINGENCY_AUX[y, t])
+ end
+end
@doc raw"""
operational_reserves_core!(EP::Model, inputs::Dict, setup::Dict)
@@ -202,67 +208,83 @@ and $\epsilon^{demand}_{rsv}$ and $\epsilon^{vre}_{rsv}$ are parameters specifyi
"""
function operational_reserves_core!(EP::Model, inputs::Dict, setup::Dict)
- # DEV NOTE: After simplifying reserve changes are integrated/confirmed, should we revise such that reserves can be modeled without UC constraints on?
- # Is there a use case for economic dispatch constraints with reserves?
+ # DEV NOTE: After simplifying reserve changes are integrated/confirmed, should we revise such that reserves can be modeled without UC constraints on?
+ # Is there a use case for economic dispatch constraints with reserves?
- println("Operational Reserves Core Module")
+ println("Operational Reserves Core Module")
- gen = inputs["RESOURCES"]
- UCommit = setup["UCommit"]
+ gen = inputs["RESOURCES"]
+ UCommit = setup["UCommit"]
- T = inputs["T"] # Number of time steps (hours)
+ T = inputs["T"] # Number of time steps (hours)
- REG = inputs["REG"]
- RSV = inputs["RSV"]
+ REG = inputs["REG"]
+ RSV = inputs["RSV"]
STOR_ALL = inputs["STOR_ALL"]
pDemand = inputs["pD"]
pP_Max(y, t) = inputs["pP_Max"][y, t]
- systemwide_hourly_demand = sum(pDemand, dims=2)
- must_run_vre_generation(t) = sum(pP_Max(y, t) * EP[:eTotalCap][y] for y in intersect(inputs["VRE"], inputs["MUST_RUN"]); init=0)
-
- ### Variables ###
-
- ## Integer Unit Commitment configuration for variables
-
- ## Decision variables for operational reserves
- @variable(EP, vREG[y in REG, t=1:T] >= 0) # Contribution to regulation (primary reserves), assumed to be symmetric (up & down directions equal)
- @variable(EP, vRSV[y in RSV, t=1:T] >= 0) # Contribution to operating reserves (secondary reserves or contingency reserves); only model upward reserve requirements
-
- # Storage techs have two pairs of auxilary variables to reflect contributions to regulation and reserves
- # when charging and discharging (primary variable becomes equal to sum of these auxilary variables)
- @variable(EP, vREG_discharge[y in intersect(STOR_ALL, REG), t=1:T] >= 0) # Contribution to regulation (primary reserves) (mirrored variable used for storage devices)
- @variable(EP, vRSV_discharge[y in intersect(STOR_ALL, RSV), t=1:T] >= 0) # Contribution to operating reserves (secondary reserves) (mirrored variable used for storage devices)
- @variable(EP, vREG_charge[y in intersect(STOR_ALL, REG), t=1:T] >= 0) # Contribution to regulation (primary reserves) (mirrored variable used for storage devices)
- @variable(EP, vRSV_charge[y in intersect(STOR_ALL, RSV), t=1:T] >= 0) # Contribution to operating reserves (secondary reserves) (mirrored variable used for storage devices)
-
- @variable(EP, vUNMET_RSV[t=1:T] >= 0) # Unmet operating reserves penalty/cost
-
- ### Expressions ###
- ## Total system reserve expressions
- # Regulation requirements as a percentage of demand and scheduled variable renewable energy production in each hour
- # Reg up and down requirements are symmetric
- @expression(EP, eRegReq[t=1:T], inputs["pReg_Req_Demand"] * systemwide_hourly_demand[t] +
- inputs["pReg_Req_VRE"] * must_run_vre_generation(t))
- # Operating reserve up / contingency reserve requirements as ˚a percentage of demand and scheduled variable renewable energy production in each hour
- # and the largest single contingency (generator or transmission line outage)
- @expression(EP, eRsvReq[t=1:T], inputs["pRsv_Req_Demand"] * systemwide_hourly_demand[t] +
- inputs["pRsv_Req_VRE"] * must_run_vre_generation(t))
+ systemwide_hourly_demand = sum(pDemand, dims = 2)
+ function must_run_vre_generation(t)
+ sum(
+ pP_Max(y, t) * EP[:eTotalCap][y]
+ for y in intersect(inputs["VRE"], inputs["MUST_RUN"]);
+ init = 0)
+ end
- # N-1 contingency requirement is considered only if Unit Commitment is being modeled
- if UCommit >= 1 && (inputs["pDynamic_Contingency"] >= 1 || inputs["pStatic_Contingency"] > 0)
+ ### Variables ###
+
+ ## Integer Unit Commitment configuration for variables
+
+ ## Decision variables for operational reserves
+ @variable(EP, vREG[y in REG, t = 1:T]>=0) # Contribution to regulation (primary reserves), assumed to be symmetric (up & down directions equal)
+ @variable(EP, vRSV[y in RSV, t = 1:T]>=0) # Contribution to operating reserves (secondary reserves or contingency reserves); only model upward reserve requirements
+
+ # Storage techs have two pairs of auxilary variables to reflect contributions to regulation and reserves
+ # when charging and discharging (primary variable becomes equal to sum of these auxilary variables)
+ @variable(EP, vREG_discharge[y in intersect(STOR_ALL, REG), t = 1:T]>=0) # Contribution to regulation (primary reserves) (mirrored variable used for storage devices)
+ @variable(EP, vRSV_discharge[y in intersect(STOR_ALL, RSV), t = 1:T]>=0) # Contribution to operating reserves (secondary reserves) (mirrored variable used for storage devices)
+ @variable(EP, vREG_charge[y in intersect(STOR_ALL, REG), t = 1:T]>=0) # Contribution to regulation (primary reserves) (mirrored variable used for storage devices)
+ @variable(EP, vRSV_charge[y in intersect(STOR_ALL, RSV), t = 1:T]>=0) # Contribution to operating reserves (secondary reserves) (mirrored variable used for storage devices)
+
+ @variable(EP, vUNMET_RSV[t = 1:T]>=0) # Unmet operating reserves penalty/cost
+
+ ### Expressions ###
+ ## Total system reserve expressions
+ # Regulation requirements as a percentage of demand and scheduled variable renewable energy production in each hour
+ # Reg up and down requirements are symmetric
+ @expression(EP,
+ eRegReq[t = 1:T],
+ inputs["pReg_Req_Demand"] *
+ systemwide_hourly_demand[t]+
+ inputs["pReg_Req_VRE"] * must_run_vre_generation(t))
+ # Operating reserve up / contingency reserve requirements as ˚a percentage of demand and scheduled variable renewable energy production in each hour
+ # and the largest single contingency (generator or transmission line outage)
+ @expression(EP,
+ eRsvReq[t = 1:T],
+ inputs["pRsv_Req_Demand"] *
+ systemwide_hourly_demand[t]+
+ inputs["pRsv_Req_VRE"] * must_run_vre_generation(t))
+
+ # N-1 contingency requirement is considered only if Unit Commitment is being modeled
+ if UCommit >= 1 &&
+ (inputs["pDynamic_Contingency"] >= 1 || inputs["pStatic_Contingency"] > 0)
add_to_expression!(EP[:eRsvReq], EP[:eContingencyReq])
- end
-
- ## Objective Function Expressions ##
+ end
- # Penalty for unmet operating reserves
- @expression(EP, eCRsvPen[t=1:T], inputs["omega"][t]*inputs["pC_Rsv_Penalty"]*vUNMET_RSV[t])
- @expression(EP, eTotalCRsvPen, sum(eCRsvPen[t] for t=1:T) +
- sum(reg_cost(gen[y])*vRSV[y,t] for y in RSV, t=1:T) +
- sum(rsv_cost(gen[y])*vREG[y,t] for y in REG, t=1:T) )
- add_to_expression!(EP[:eObj], eTotalCRsvPen)
+ ## Objective Function Expressions ##
+
+ # Penalty for unmet operating reserves
+ @expression(EP,
+ eCRsvPen[t = 1:T],
+ inputs["omega"][t]*inputs["pC_Rsv_Penalty"]*vUNMET_RSV[t])
+ @expression(EP,
+ eTotalCRsvPen,
+ sum(eCRsvPen[t] for t in 1:T)+
+ sum(reg_cost(gen[y]) * vRSV[y, t] for y in RSV, t in 1:T)+
+ sum(rsv_cost(gen[y]) * vREG[y, t] for y in REG, t in 1:T))
+ add_to_expression!(EP[:eObj], eTotalCRsvPen)
end
function operational_reserves_constraints!(EP, inputs)
@@ -283,9 +305,13 @@ function operational_reserves_constraints!(EP, inputs)
# contributing to regulation are assumed to contribute equal capacity to both up
# and down directions
if !isempty(REG)
- @constraint(EP, cReg[t=1:T], sum(vREG[y,t] for y in REG) >= eRegulationRequirement[t])
+ @constraint(EP,
+ cReg[t = 1:T],
+ sum(vREG[y, t] for y in REG)>=eRegulationRequirement[t])
end
if !isempty(RSV)
- @constraint(EP, cRsvReq[t=1:T], sum(vRSV[y,t] for y in RSV) + vUNMET_RSV[t] >= eReserveRequirement[t])
+ @constraint(EP,
+ cRsvReq[t = 1:T],
+ sum(vRSV[y, t] for y in RSV) + vUNMET_RSV[t]>=eReserveRequirement[t])
end
end
diff --git a/src/model/core/transmission/dcopf_transmission.jl b/src/model/core/transmission/dcopf_transmission.jl
index 1b2b853ddd..de2dcfd5bf 100644
--- a/src/model/core/transmission/dcopf_transmission.jl
+++ b/src/model/core/transmission/dcopf_transmission.jl
@@ -23,32 +23,37 @@ Finally, we enforce the reference voltage phase angle constraint:
"""
function dcopf_transmission!(EP::Model, inputs::Dict, setup::Dict)
-
- println("DC-OPF Module")
-
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
- L = inputs["L"] # Number of transmission lines
-
- ### DC-OPF variables ###
-
- # Voltage angle variables of each zone "z" at hour "t"
- @variable(EP, vANGLE[z=1:Z,t=1:T])
-
- ### DC-OPF constraints ###
-
- # Power flow constraint:: vFLOW = DC_OPF_coeff * (vANGLE[START_ZONE] - vANGLE[END_ZONE])
- @constraint(EP, cPOWER_FLOW_OPF[l=1:L, t=1:T], EP[:vFLOW][l,t] == inputs["pDC_OPF_coeff"][l] * sum(inputs["pNet_Map"][l,z] * vANGLE[z,t] for z=1:Z))
-
- # Bus angle limits (except slack bus)
- @constraints(EP, begin
- cANGLE_ub[l=1:L, t=1:T], sum(inputs["pNet_Map"][l,z] * vANGLE[z,t] for z=1:Z) <= inputs["Line_Angle_Limit"][l]
- cANGLE_lb[l=1:L, t=1:T], sum(inputs["pNet_Map"][l,z] * vANGLE[z,t] for z=1:Z) >= -inputs["Line_Angle_Limit"][l]
- end)
-
- # Slack Bus angle limit
- @constraint(EP, cANGLE_SLACK[t=1:T], vANGLE[1,t]== 0)
-
-
-
+ println("DC-OPF Module")
+
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ L = inputs["L"] # Number of transmission lines
+
+ ### DC-OPF variables ###
+
+ # Voltage angle variables of each zone "z" at hour "t"
+ @variable(EP, vANGLE[z = 1:Z, t = 1:T])
+
+ ### DC-OPF constraints ###
+
+ # Power flow constraint:: vFLOW = DC_OPF_coeff * (vANGLE[START_ZONE] - vANGLE[END_ZONE])
+ @constraint(EP,
+ cPOWER_FLOW_OPF[l = 1:L, t = 1:T],
+ EP[:vFLOW][l,
+ t]==inputs["pDC_OPF_coeff"][l] *
+ sum(inputs["pNet_Map"][l, z] * vANGLE[z, t] for z in 1:Z))
+
+ # Bus angle limits (except slack bus)
+ @constraints(EP,
+ begin
+ cANGLE_ub[l = 1:L, t = 1:T],
+ sum(inputs["pNet_Map"][l, z] * vANGLE[z, t] for z in 1:Z) <=
+ inputs["Line_Angle_Limit"][l]
+ cANGLE_lb[l = 1:L, t = 1:T],
+ sum(inputs["pNet_Map"][l, z] * vANGLE[z, t] for z in 1:Z) >=
+ -inputs["Line_Angle_Limit"][l]
+ end)
+
+ # Slack Bus angle limit
+ @constraint(EP, cANGLE_SLACK[t = 1:T], vANGLE[1, t]==0)
end
diff --git a/src/model/core/transmission/investment_transmission.jl b/src/model/core/transmission/investment_transmission.jl
index 671d7e47cd..a61b628f91 100644
--- a/src/model/core/transmission/investment_transmission.jl
+++ b/src/model/core/transmission/investment_transmission.jl
@@ -1,106 +1,113 @@
@doc raw"""
function investment_transmission!(EP::Model, inputs::Dict, setup::Dict)
- The function model transmission expansion and adds transmission reinforcement or construction costs to the objective function. Transmission reinforcement costs are equal to the sum across all lines of the product between the transmission reinforcement/construction cost, $pi^{TCAP}_{l}$, times the additional transmission capacity variable, $\bigtriangleup\varphi^{cap}_{l}$.
- ```math
- \begin{aligned}
- & \sum_{l \in \mathcal{L}}\left(\pi^{TCAP}_{l} \times \bigtriangleup\varphi^{cap}_{l}\right)
- \end{aligned}
- ```
- Note that fixed O\&M and replacement capital costs (depreciation) for existing transmission capacity is treated as a sunk cost and not included explicitly in the GenX objective function.
- **Accounting for Transmission Between Zones**
- Available transmission capacity between zones is set equal to the existing line's maximum power transfer capacity, $\overline{\varphi^{cap}_{l}}$, plus any transmission capacity added on that line (for lines eligible for expansion in the set $\mathcal{E}$).
- \begin{aligned}
- &\varphi^{cap}_{l} = \overline{\varphi^{cap}_{l}} , &\quad \forall l \in (\mathcal{L} \setminus \mathcal{E} ),\forall t \in \mathcal{T}\\
- % trasmission expansion
- &\varphi^{cap}_{l} = \overline{\varphi^{cap}_{l}} + \bigtriangleup\varphi^{cap}_{l} , &\quad \forall l \in \mathcal{E},\forall t \in \mathcal{T}
- \end{aligned}
- The additional transmission capacity, $\bigtriangleup\varphi^{cap}_{l} $, is constrained by a maximum allowed reinforcement, $\overline{\bigtriangleup\varphi^{cap}_{l}}$, for each line $l \in \mathcal{E}$.
- \begin{aligned}
- & \bigtriangleup\varphi^{cap}_{l} \leq \overline{\bigtriangleup\varphi^{cap}_{l}}, &\quad \forall l \in \mathcal{E}
- \end{aligned}
+This function model transmission expansion and adds transmission reinforcement or construction costs to the objective function. Transmission reinforcement costs are equal to the sum across all lines of the product between the transmission reinforcement/construction cost, $pi^{TCAP}_{l}$, times the additional transmission capacity variable, $\bigtriangleup\varphi^{cap}_{l}$.
+```math
+\begin{aligned}
+ & \sum_{l \in \mathcal{L}}\left(\pi^{TCAP}_{l} \times \bigtriangleup\varphi^{cap}_{l}\right)
+\end{aligned}
+```
+Note that fixed O\&M and replacement capital costs (depreciation) for existing transmission capacity is treated as a sunk cost and not included explicitly in the GenX objective function.
+**Accounting for Transmission Between Zones**
+Available transmission capacity between zones is set equal to the existing line's maximum power transfer capacity, $\overline{\varphi^{cap}_{l}}$, plus any transmission capacity added on that line (for lines eligible for expansion in the set $\mathcal{E}$).
+```math
+\begin{aligned}
+ &\varphi^{cap}_{l} = \overline{\varphi^{cap}_{l}} , &\quad \forall l \in (\mathcal{L} \setminus \mathcal{E} ),\forall t \in \mathcal{T}\\
+ % trasmission expansion
+ &\varphi^{cap}_{l} = \overline{\varphi^{cap}_{l}} + \bigtriangleup\varphi^{cap}_{l} , &\quad \forall l \in \mathcal{E},\forall t \in \mathcal{T}
+\end{aligned}
+```
+The additional transmission capacity, $\bigtriangleup\varphi^{cap}_{l} $, is constrained by a maximum allowed reinforcement, $\overline{\bigtriangleup\varphi^{cap}_{l}}$, for each line $l \in \mathcal{E}$.
+```math
+\begin{aligned}
+ & \bigtriangleup\varphi^{cap}_{l} \leq \overline{\bigtriangleup\varphi^{cap}_{l}}, &\quad \forall l \in \mathcal{E}
+\end{aligned}
+```
"""
function investment_transmission!(EP::Model, inputs::Dict, setup::Dict)
-
- println("Investment Transmission Module")
-
- L = inputs["L"] # Number of transmission lines
- NetworkExpansion = setup["NetworkExpansion"]
- MultiStage = setup["MultiStage"]
-
- if NetworkExpansion == 1
- # Network lines and zones that are expandable have non-negative maximum reinforcement inputs
- EXPANSION_LINES = inputs["EXPANSION_LINES"]
- end
-
- ### Variables ###
-
- if MultiStage == 1
- @variable(EP, vTRANSMAX[l=1:L] >= 0)
- end
-
- if NetworkExpansion == 1
- # Transmission network capacity reinforcements per line
- @variable(EP, vNEW_TRANS_CAP[l in EXPANSION_LINES] >= 0)
- end
-
-
- ### Expressions ###
-
- if MultiStage == 1
- @expression(EP, eTransMax[l=1:L], vTRANSMAX[l])
- else
- @expression(EP, eTransMax[l=1:L], inputs["pTrans_Max"][l])
- end
-
- ## Transmission power flow and loss related expressions:
- # Total availabile maximum transmission capacity is the sum of existing maximum transmission capacity plus new transmission capacity
- if NetworkExpansion == 1
- @expression(EP, eAvail_Trans_Cap[l=1:L],
- if l in EXPANSION_LINES
- eTransMax[l] + vNEW_TRANS_CAP[l]
- else
- eTransMax[l] + EP[:vZERO]
- end
- )
- else
- @expression(EP, eAvail_Trans_Cap[l=1:L], eTransMax[l] + EP[:vZERO])
- end
-
- ## Objective Function Expressions ##
-
- if NetworkExpansion == 1
- @expression(EP, eTotalCNetworkExp, sum(vNEW_TRANS_CAP[l]*inputs["pC_Line_Reinforcement"][l] for l in EXPANSION_LINES))
-
- if MultiStage == 1
- # OPEX multiplier to count multiple years between two model stages
- # We divide by OPEXMULT since we are going to multiply the entire objective function by this term later,
- # and we have already accounted for multiple years between stages for fixed costs.
- add_to_expression!(EP[:eObj], (1/inputs["OPEXMULT"]), eTotalCNetworkExp)
- else
- add_to_expression!(EP[:eObj], eTotalCNetworkExp)
- end
- end
-
- ## End Objective Function Expressions ##
-
- ### Constraints ###
-
- if MultiStage == 1
- # Linking constraint for existing transmission capacity
- @constraint(EP, cExistingTransCap[l=1:L], vTRANSMAX[l] == inputs["pTrans_Max"][l])
- end
-
-
- # If network expansion is used:
- if NetworkExpansion == 1
- # Transmission network related power flow and capacity constraints
- if MultiStage == 1
- # Constrain maximum possible flow for lines eligible for expansion regardless of previous expansions
- @constraint(EP, cMaxFlowPossible[l in EXPANSION_LINES], eAvail_Trans_Cap[l] <= inputs["pTrans_Max_Possible"][l])
- end
- # Constrain maximum single-stage line capacity reinforcement for lines eligible for expansion
- @constraint(EP, cMaxLineReinforcement[l in EXPANSION_LINES], vNEW_TRANS_CAP[l] <= inputs["pMax_Line_Reinforcement"][l])
- end
- #END network expansion contraints
+ println("Investment Transmission Module")
+
+ L = inputs["L"] # Number of transmission lines
+ NetworkExpansion = setup["NetworkExpansion"]
+ MultiStage = setup["MultiStage"]
+
+ if NetworkExpansion == 1
+ # Network lines and zones that are expandable have non-negative maximum reinforcement inputs
+ EXPANSION_LINES = inputs["EXPANSION_LINES"]
+ end
+
+ ### Variables ###
+
+ if MultiStage == 1
+ @variable(EP, vTRANSMAX[l = 1:L]>=0)
+ end
+
+ if NetworkExpansion == 1
+ # Transmission network capacity reinforcements per line
+ @variable(EP, vNEW_TRANS_CAP[l in EXPANSION_LINES]>=0)
+ end
+
+ ### Expressions ###
+
+ if MultiStage == 1
+ @expression(EP, eTransMax[l = 1:L], vTRANSMAX[l])
+ else
+ @expression(EP, eTransMax[l = 1:L], inputs["pTrans_Max"][l])
+ end
+
+ ## Transmission power flow and loss related expressions:
+ # Total availabile maximum transmission capacity is the sum of existing maximum transmission capacity plus new transmission capacity
+ if NetworkExpansion == 1
+ @expression(EP, eAvail_Trans_Cap[l = 1:L],
+ if l in EXPANSION_LINES
+ eTransMax[l] + vNEW_TRANS_CAP[l]
+ else
+ eTransMax[l] + EP[:vZERO]
+ end)
+ else
+ @expression(EP, eAvail_Trans_Cap[l = 1:L], eTransMax[l]+EP[:vZERO])
+ end
+
+ ## Objective Function Expressions ##
+
+ if NetworkExpansion == 1
+ @expression(EP,
+ eTotalCNetworkExp,
+ sum(vNEW_TRANS_CAP[l] * inputs["pC_Line_Reinforcement"][l]
+ for l in EXPANSION_LINES))
+
+ if MultiStage == 1
+ # OPEX multiplier to count multiple years between two model stages
+ # We divide by OPEXMULT since we are going to multiply the entire objective function by this term later,
+ # and we have already accounted for multiple years between stages for fixed costs.
+ add_to_expression!(EP[:eObj], (1 / inputs["OPEXMULT"]), eTotalCNetworkExp)
+ else
+ add_to_expression!(EP[:eObj], eTotalCNetworkExp)
+ end
+ end
+
+ ## End Objective Function Expressions ##
+
+ ### Constraints ###
+
+ if MultiStage == 1
+ # Linking constraint for existing transmission capacity
+ @constraint(EP, cExistingTransCap[l = 1:L], vTRANSMAX[l]==inputs["pTrans_Max"][l])
+ end
+
+ # If network expansion is used:
+ if NetworkExpansion == 1
+ # Transmission network related power flow and capacity constraints
+ if MultiStage == 1
+ # Constrain maximum possible flow for lines eligible for expansion regardless of previous expansions
+ @constraint(EP,
+ cMaxFlowPossible[l in EXPANSION_LINES],
+ eAvail_Trans_Cap[l]<=inputs["pTrans_Max_Possible"][l])
+ end
+ # Constrain maximum single-stage line capacity reinforcement for lines eligible for expansion
+ @constraint(EP,
+ cMaxLineReinforcement[l in EXPANSION_LINES],
+ vNEW_TRANS_CAP[l]<=inputs["pMax_Line_Reinforcement"][l])
+ end
+ #END network expansion contraints
end
diff --git a/src/model/core/transmission/transmission.jl b/src/model/core/transmission/transmission.jl
index 12aa50cd85..ca5c637159 100644
--- a/src/model/core/transmission/transmission.jl
+++ b/src/model/core/transmission/transmission.jl
@@ -84,177 +84,235 @@ As with losses option 2, this segment-wise approximation of a quadratic loss fun
```
"""
function transmission!(EP::Model, inputs::Dict, setup::Dict)
-
- println("Transmission Module")
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
- L = inputs["L"] # Number of transmission lines
-
- UCommit = setup["UCommit"]
- CapacityReserveMargin = setup["CapacityReserveMargin"]
- EnergyShareRequirement = setup["EnergyShareRequirement"]
- IncludeLossesInESR = setup["IncludeLossesInESR"]
-
- ## sets and indices for transmission losses
- TRANS_LOSS_SEGS = inputs["TRANS_LOSS_SEGS"] # Number of segments used in piecewise linear approximations quadratic loss functions - can only take values of TRANS_LOSS_SEGS =1, 2
- LOSS_LINES = inputs["LOSS_LINES"] # Lines for which loss coefficients apply (are non-zero);
-
-
- ### Variables ###
-
- # Power flow on each transmission line "l" at hour "t"
- @variable(EP, vFLOW[l=1:L,t=1:T]);
-
- if (TRANS_LOSS_SEGS==1) #loss is a constant times absolute value of power flow
- # Positive and negative flow variables
- @variable(EP, vTAUX_NEG[l in LOSS_LINES,t=1:T] >= 0)
- @variable(EP, vTAUX_POS[l in LOSS_LINES,t=1:T] >= 0)
-
- if UCommit == 1
- # Single binary variable to ensure positive or negative flows only
- @variable(EP, vTAUX_POS_ON[l in LOSS_LINES,t=1:T],Bin)
- # Continuous variable representing product of binary variable (vTAUX_POS_ON) and avail transmission capacity
- @variable(EP, vPROD_TRANSCAP_ON[l in LOSS_LINES,t=1:T]>=0)
- end
- else # TRANS_LOSS_SEGS>1
- # Auxiliary variables for linear piecewise interpolation of quadratic losses
- @variable(EP, vTAUX_NEG[l in LOSS_LINES, s=0:TRANS_LOSS_SEGS, t=1:T] >= 0)
- @variable(EP, vTAUX_POS[l in LOSS_LINES, s=0:TRANS_LOSS_SEGS, t=1:T] >= 0)
- if UCommit == 1
- # Binary auxilary variables for each segment >1 to ensure segments fill in order
- @variable(EP, vTAUX_POS_ON[l in LOSS_LINES, s=1:TRANS_LOSS_SEGS, t=1:T], Bin)
- @variable(EP, vTAUX_NEG_ON[l in LOSS_LINES, s=1:TRANS_LOSS_SEGS, t=1:T], Bin)
- end
+ println("Transmission Module")
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ L = inputs["L"] # Number of transmission lines
+
+ UCommit = setup["UCommit"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
+ EnergyShareRequirement = setup["EnergyShareRequirement"]
+ IncludeLossesInESR = setup["IncludeLossesInESR"]
+
+ ## sets and indices for transmission losses
+ TRANS_LOSS_SEGS = inputs["TRANS_LOSS_SEGS"] # Number of segments used in piecewise linear approximations quadratic loss functions - can only take values of TRANS_LOSS_SEGS =1, 2
+ LOSS_LINES = inputs["LOSS_LINES"] # Lines for which loss coefficients apply (are non-zero);
+
+ ### Variables ###
+
+ # Power flow on each transmission line "l" at hour "t"
+ @variable(EP, vFLOW[l = 1:L, t = 1:T])
+
+ if (TRANS_LOSS_SEGS == 1) #loss is a constant times absolute value of power flow
+ # Positive and negative flow variables
+ @variable(EP, vTAUX_NEG[l in LOSS_LINES, t = 1:T]>=0)
+ @variable(EP, vTAUX_POS[l in LOSS_LINES, t = 1:T]>=0)
+
+ if UCommit == 1
+ # Single binary variable to ensure positive or negative flows only
+ @variable(EP, vTAUX_POS_ON[l in LOSS_LINES, t = 1:T], Bin)
+ # Continuous variable representing product of binary variable (vTAUX_POS_ON) and avail transmission capacity
+ @variable(EP, vPROD_TRANSCAP_ON[l in LOSS_LINES, t = 1:T]>=0)
+ end
+ else # TRANS_LOSS_SEGS>1
+ # Auxiliary variables for linear piecewise interpolation of quadratic losses
+ @variable(EP, vTAUX_NEG[l in LOSS_LINES, s = 0:TRANS_LOSS_SEGS, t = 1:T]>=0)
+ @variable(EP, vTAUX_POS[l in LOSS_LINES, s = 0:TRANS_LOSS_SEGS, t = 1:T]>=0)
+ if UCommit == 1
+ # Binary auxilary variables for each segment >1 to ensure segments fill in order
+ @variable(EP,
+ vTAUX_POS_ON[l in LOSS_LINES, s = 1:TRANS_LOSS_SEGS, t = 1:T],
+ Bin)
+ @variable(EP,
+ vTAUX_NEG_ON[l in LOSS_LINES, s = 1:TRANS_LOSS_SEGS, t = 1:T],
+ Bin)
+ end
end
- # Transmission losses on each transmission line "l" at hour "t"
- @variable(EP, vTLOSS[l in LOSS_LINES,t=1:T] >= 0)
-
- ### Expressions ###
-
- ## Transmission power flow and loss related expressions:
-
- # Net power flow outgoing from zone "z" at hour "t" in MW
- @expression(EP, eNet_Export_Flows[z=1:Z,t=1:T], sum(inputs["pNet_Map"][l,z] * vFLOW[l,t] for l=1:L))
-
- # Losses from power flows into or out of zone "z" in MW
- @expression(EP, eLosses_By_Zone[z=1:Z,t=1:T], sum(abs(inputs["pNet_Map"][l,z]) * (1/2) *vTLOSS[l,t] for l in LOSS_LINES))
-
- ## Power Balance Expressions ##
-
- @expression(EP, ePowerBalanceNetExportFlows[t=1:T, z=1:Z],
- -eNet_Export_Flows[z,t])
- @expression(EP, ePowerBalanceLossesByZone[t=1:T, z=1:Z],
- -eLosses_By_Zone[z,t])
+ # Transmission losses on each transmission line "l" at hour "t"
+ @variable(EP, vTLOSS[l in LOSS_LINES, t = 1:T]>=0)
- add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceLossesByZone)
- add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceNetExportFlows)
+ ### Expressions ###
- # Capacity Reserves Margin policy
- if CapacityReserveMargin > 0
- if Z > 1
- @expression(EP, eCapResMarBalanceTrans[res=1:inputs["NCapacityReserveMargin"], t=1:T], sum(inputs["dfTransCapRes_excl"][l,res] * inputs["dfDerateTransCapRes"][l,res]* EP[:vFLOW][l,t] for l in 1:L))
- add_similar_to_expression!(EP[:eCapResMarBalance], -eCapResMarBalanceTrans)
- end
- end
+ ## Transmission power flow and loss related expressions:
- ### Constraints ###
+ # Net power flow outgoing from zone "z" at hour "t" in MW
+ @expression(EP,
+ eNet_Export_Flows[z = 1:Z, t = 1:T],
+ sum(inputs["pNet_Map"][l, z] * vFLOW[l, t] for l in 1:L))
- ## Power flow and transmission (between zone) loss related constraints
+ # Losses from power flows into or out of zone "z" in MW
+ @expression(EP,
+ eLosses_By_Zone[z = 1:Z, t = 1:T],
+ sum(abs(inputs["pNet_Map"][l, z]) * (1 / 2) * vTLOSS[l, t] for l in LOSS_LINES))
- # Maximum power flows, power flow on each transmission line cannot exceed maximum capacity of the line at any hour "t"
- @constraints(EP, begin
- cMaxFlow_out[l=1:L, t=1:T], vFLOW[l,t] <= EP[:eAvail_Trans_Cap][l]
- cMaxFlow_in[l=1:L, t=1:T], vFLOW[l,t] >= -EP[:eAvail_Trans_Cap][l]
- end)
+ ## Power Balance Expressions ##
- # Transmission loss related constraints - linear losses as a function of absolute value
- if TRANS_LOSS_SEGS == 1
+ @expression(EP, ePowerBalanceNetExportFlows[t = 1:T, z = 1:Z],
+ -eNet_Export_Flows[z, t])
+ @expression(EP, ePowerBalanceLossesByZone[t = 1:T, z = 1:Z],
+ -eLosses_By_Zone[z, t])
- @constraints(EP, begin
- # Losses are alpha times absolute values
- cTLoss[l in LOSS_LINES, t=1:T], vTLOSS[l,t] == inputs["pPercent_Loss"][l]*(vTAUX_POS[l,t]+vTAUX_NEG[l,t])
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceLossesByZone)
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceNetExportFlows)
- # Power flow is sum of positive and negative components
- cTAuxSum[l in LOSS_LINES, t=1:T], vTAUX_POS[l,t]-vTAUX_NEG[l,t] == vFLOW[l,t]
-
- # Sum of auxiliary flow variables in either direction cannot exceed maximum line flow capacity
- cTAuxLimit[l in LOSS_LINES, t=1:T], vTAUX_POS[l,t]+vTAUX_NEG[l,t] <= EP[:eAvail_Trans_Cap][l]
- end)
-
- if UCommit == 1
- # Constraints to limit phantom losses that can occur to avoid discrete cycling costs/opportunity costs due to min down
- @constraints(EP, begin
- cTAuxPosUB[l in LOSS_LINES, t=1:T], vTAUX_POS[l,t] <= vPROD_TRANSCAP_ON[l,t]
-
- # Either negative or positive flows are activated, not both
- cTAuxNegUB[l in LOSS_LINES, t=1:T], vTAUX_NEG[l,t] <= EP[:eAvail_Trans_Cap][l]-vPROD_TRANSCAP_ON[l,t]
-
- # McCormick representation of product of continuous and binary variable
- # (in this case, of: vPROD_TRANSCAP_ON[l,t] = EP[:eAvail_Trans_Cap][l] * vTAUX_POS_ON[l,t])
- # McCormick constraint 1
- [l in LOSS_LINES,t=1:T], vPROD_TRANSCAP_ON[l,t] <= inputs["pTrans_Max_Possible"][l]*vTAUX_POS_ON[l,t]
-
- # McCormick constraint 2
- [l in LOSS_LINES,t=1:T], vPROD_TRANSCAP_ON[l,t] <= EP[:eAvail_Trans_Cap][l]
-
- # McCormick constraint 3
- [l in LOSS_LINES,t=1:T], vPROD_TRANSCAP_ON[l,t] >= EP[:eAvail_Trans_Cap][l]-(1-vTAUX_POS_ON[l,t])*inputs["pTrans_Max_Possible"][l]
- end)
- end
-
- end # End if(TRANS_LOSS_SEGS == 1) block
-
- # When number of segments is greater than 1
- if (TRANS_LOSS_SEGS > 1)
- ## between zone transmission loss constraints
- # Losses are expressed as a piecewise approximation of a quadratic function of power flows across each line
- # Eq 1: Total losses are function of loss coefficient times the sum of auxilary segment variables across all segments of piecewise approximation
- # (Includes both positive domain and negative domain segments)
- @constraint(EP, cTLoss[l in LOSS_LINES, t=1:T], vTLOSS[l,t] ==
- (inputs["pTrans_Loss_Coef"][l]*sum((2*s-1)*(inputs["pTrans_Max_Possible"][l]/TRANS_LOSS_SEGS)*vTAUX_POS[l,s,t] for s=1:TRANS_LOSS_SEGS)) +
- (inputs["pTrans_Loss_Coef"][l]*sum((2*s-1)*(inputs["pTrans_Max_Possible"][l]/TRANS_LOSS_SEGS)*vTAUX_NEG[l,s,t] for s=1:TRANS_LOSS_SEGS)) )
- # Eq 2: Sum of auxilary segment variables (s >= 1) minus the "zero" segment (which allows values to go negative)
- # from both positive and negative domains must total the actual power flow across the line
- @constraints(EP, begin
- cTAuxSumPos[l in LOSS_LINES, t=1:T], sum(vTAUX_POS[l,s,t] for s=1:TRANS_LOSS_SEGS)-vTAUX_POS[l,0,t] == vFLOW[l,t]
- cTAuxSumNeg[l in LOSS_LINES, t=1:T], sum(vTAUX_NEG[l,s,t] for s=1:TRANS_LOSS_SEGS) - vTAUX_NEG[l,0,t] == -vFLOW[l,t]
- end)
- if UCommit == 0 || UCommit == 2
- # Eq 3: Each auxilary segment variables (s >= 1) must be less than the maximum power flow in the zone / number of segments
- @constraints(EP, begin
- cTAuxMaxPos[l in LOSS_LINES, s=1:TRANS_LOSS_SEGS, t=1:T], vTAUX_POS[l,s,t] <= (inputs["pTrans_Max_Possible"][l]/TRANS_LOSS_SEGS)
- cTAuxMaxNeg[l in LOSS_LINES, s=1:TRANS_LOSS_SEGS, t=1:T], vTAUX_NEG[l,s,t] <= (inputs["pTrans_Max_Possible"][l]/TRANS_LOSS_SEGS)
- end)
- else # Constraints that can be ommitted if problem is convex (i.e. if not using MILP unit commitment constraints)
- # Eqs 3-4: Ensure that auxilary segment variables do not exceed maximum value per segment and that they
- # "fill" in order: i.e. one segment cannot be non-zero unless prior segment is at it's maximum value
- # (These constraints are necessary to prevents phantom losses in MILP problems)
- @constraints(EP, begin
- cTAuxOrderPos1[l in LOSS_LINES, s=1:TRANS_LOSS_SEGS, t=1:T], vTAUX_POS[l,s,t] <= (inputs["pTrans_Max_Possible"][l]/TRANS_LOSS_SEGS)*vTAUX_POS_ON[l,s,t]
- cTAuxOrderNeg1[l in LOSS_LINES, s=1:TRANS_LOSS_SEGS, t=1:T], vTAUX_NEG[l,s,t] <= (inputs["pTrans_Max_Possible"][l]/TRANS_LOSS_SEGS)*vTAUX_NEG_ON[l,s,t]
- cTAuxOrderPos2[l in LOSS_LINES, s=1:(TRANS_LOSS_SEGS-1), t=1:T], vTAUX_POS[l,s,t] >= (inputs["pTrans_Max_Possible"][l]/TRANS_LOSS_SEGS)*vTAUX_POS_ON[l,s+1,t]
- cTAuxOrderNeg2[l in LOSS_LINES, s=1:(TRANS_LOSS_SEGS-1), t=1:T], vTAUX_NEG[l,s,t] >= (inputs["pTrans_Max_Possible"][l]/TRANS_LOSS_SEGS)*vTAUX_NEG_ON[l,s+1,t]
- end)
-
- # Eq 5: Binary constraints to deal with absolute value of vFLOW.
- @constraints(EP, begin
- # If flow is positive, vTAUX_POS segment 0 must be zero; If flow is negative, vTAUX_POS segment 0 must be positive
- # (and takes on value of the full negative flow), forcing all vTAUX_POS other segments (s>=1) to be zero
- cTAuxSegmentZeroPos[l in LOSS_LINES, t=1:T], vTAUX_POS[l,0,t] <= inputs["pTrans_Max_Possible"][l]*(1-vTAUX_POS_ON[l,1,t])
-
- # If flow is negative, vTAUX_NEG segment 0 must be zero; If flow is positive, vTAUX_NEG segment 0 must be positive
- # (and takes on value of the full positive flow), forcing all other vTAUX_NEG segments (s>=1) to be zero
- cTAuxSegmentZeroNeg[l in LOSS_LINES, t=1:T], vTAUX_NEG[l,0,t] <= inputs["pTrans_Max_Possible"][l]*(1-vTAUX_NEG_ON[l,1,t])
- end)
- end
- end # End if(TRANS_LOSS_SEGS > 0) block
-
- # ESR Lossses
- if EnergyShareRequirement >= 1 && IncludeLossesInESR ==1
- @expression(EP, eESRTran[ESR=1:inputs["nESR"]],
- sum(inputs["dfESR"][z,ESR]*sum(inputs["omega"][t]*EP[:eLosses_By_Zone][z,t] for t in 1:T) for z=findall(x->x>0,inputs["dfESR"][:,ESR])))
- add_similar_to_expression!(EP[:eESR], -eESRTran)
- end
+ # Capacity Reserves Margin policy
+ if CapacityReserveMargin > 0
+ if Z > 1
+ @expression(EP,
+ eCapResMarBalanceTrans[res = 1:inputs["NCapacityReserveMargin"], t = 1:T],
+ sum(inputs["dfTransCapRes_excl"][l, res] *
+ inputs["dfDerateTransCapRes"][l, res] * EP[:vFLOW][l, t] for l in 1:L))
+ add_similar_to_expression!(EP[:eCapResMarBalance], -eCapResMarBalanceTrans)
+ end
+ end
-end
\ No newline at end of file
+ ### Constraints ###
+
+ ## Power flow and transmission (between zone) loss related constraints
+
+ # Maximum power flows, power flow on each transmission line cannot exceed maximum capacity of the line at any hour "t"
+ @constraints(EP,
+ begin
+ cMaxFlow_out[l = 1:L, t = 1:T], vFLOW[l, t] <= EP[:eAvail_Trans_Cap][l]
+ cMaxFlow_in[l = 1:L, t = 1:T], vFLOW[l, t] >= -EP[:eAvail_Trans_Cap][l]
+ end)
+
+ # Transmission loss related constraints - linear losses as a function of absolute value
+ if TRANS_LOSS_SEGS == 1
+ @constraints(EP,
+ begin
+ # Losses are alpha times absolute values
+ cTLoss[l in LOSS_LINES, t = 1:T],
+ vTLOSS[l, t] ==
+ inputs["pPercent_Loss"][l] * (vTAUX_POS[l, t] + vTAUX_NEG[l, t])
+
+ # Power flow is sum of positive and negative components
+ cTAuxSum[l in LOSS_LINES, t = 1:T],
+ vTAUX_POS[l, t] - vTAUX_NEG[l, t] == vFLOW[l, t]
+
+ # Sum of auxiliary flow variables in either direction cannot exceed maximum line flow capacity
+ cTAuxLimit[l in LOSS_LINES, t = 1:T],
+ vTAUX_POS[l, t] + vTAUX_NEG[l, t] <= EP[:eAvail_Trans_Cap][l]
+ end)
+
+ if UCommit == 1
+ # Constraints to limit phantom losses that can occur to avoid discrete cycling costs/opportunity costs due to min down
+ @constraints(EP,
+ begin
+ cTAuxPosUB[l in LOSS_LINES, t = 1:T],
+ vTAUX_POS[l, t] <= vPROD_TRANSCAP_ON[l, t]
+
+ # Either negative or positive flows are activated, not both
+ cTAuxNegUB[l in LOSS_LINES, t = 1:T],
+ vTAUX_NEG[l, t] <= EP[:eAvail_Trans_Cap][l] - vPROD_TRANSCAP_ON[l, t]
+
+ # McCormick representation of product of continuous and binary variable
+ # (in this case, of: vPROD_TRANSCAP_ON[l,t] = EP[:eAvail_Trans_Cap][l] * vTAUX_POS_ON[l,t])
+ # McCormick constraint 1
+ [l in LOSS_LINES, t = 1:T],
+ vPROD_TRANSCAP_ON[l, t] <=
+ inputs["pTrans_Max_Possible"][l] * vTAUX_POS_ON[l, t]
+
+ # McCormick constraint 2
+ [l in LOSS_LINES, t = 1:T],
+ vPROD_TRANSCAP_ON[l, t] <= EP[:eAvail_Trans_Cap][l]
+
+ # McCormick constraint 3
+ [l in LOSS_LINES, t = 1:T],
+ vPROD_TRANSCAP_ON[l, t] >=
+ EP[:eAvail_Trans_Cap][l] -
+ (1 - vTAUX_POS_ON[l, t]) * inputs["pTrans_Max_Possible"][l]
+ end)
+ end
+ end # End if(TRANS_LOSS_SEGS == 1) block
+
+ # When number of segments is greater than 1
+ if (TRANS_LOSS_SEGS > 1)
+ ## between zone transmission loss constraints
+ # Losses are expressed as a piecewise approximation of a quadratic function of power flows across each line
+ # Eq 1: Total losses are function of loss coefficient times the sum of auxilary segment variables across all segments of piecewise approximation
+ # (Includes both positive domain and negative domain segments)
+ @constraint(EP,
+ cTLoss[l in LOSS_LINES, t = 1:T],
+ vTLOSS[l,
+ t]==
+ (inputs["pTrans_Loss_Coef"][l] *
+ sum((2 * s - 1) * (inputs["pTrans_Max_Possible"][l] / TRANS_LOSS_SEGS) *
+ vTAUX_POS[l, s, t] for s in 1:TRANS_LOSS_SEGS)) +
+ (inputs["pTrans_Loss_Coef"][l] *
+ sum((2 * s - 1) * (inputs["pTrans_Max_Possible"][l] / TRANS_LOSS_SEGS) *
+ vTAUX_NEG[l, s, t] for s in 1:TRANS_LOSS_SEGS)))
+ # Eq 2: Sum of auxilary segment variables (s >= 1) minus the "zero" segment (which allows values to go negative)
+ # from both positive and negative domains must total the actual power flow across the line
+ @constraints(EP,
+ begin
+ cTAuxSumPos[l in LOSS_LINES, t = 1:T],
+ sum(vTAUX_POS[l, s, t] for s in 1:TRANS_LOSS_SEGS) - vTAUX_POS[l, 0, t] ==
+ vFLOW[l, t]
+ cTAuxSumNeg[l in LOSS_LINES, t = 1:T],
+ sum(vTAUX_NEG[l, s, t] for s in 1:TRANS_LOSS_SEGS) - vTAUX_NEG[l, 0, t] ==
+ -vFLOW[l, t]
+ end)
+ if UCommit == 0 || UCommit == 2
+ # Eq 3: Each auxilary segment variables (s >= 1) must be less than the maximum power flow in the zone / number of segments
+ @constraints(EP,
+ begin
+ cTAuxMaxPos[l in LOSS_LINES, s = 1:TRANS_LOSS_SEGS, t = 1:T],
+ vTAUX_POS[l, s, t] <=
+ (inputs["pTrans_Max_Possible"][l] / TRANS_LOSS_SEGS)
+ cTAuxMaxNeg[l in LOSS_LINES, s = 1:TRANS_LOSS_SEGS, t = 1:T],
+ vTAUX_NEG[l, s, t] <=
+ (inputs["pTrans_Max_Possible"][l] / TRANS_LOSS_SEGS)
+ end)
+ else # Constraints that can be ommitted if problem is convex (i.e. if not using MILP unit commitment constraints)
+ # Eqs 3-4: Ensure that auxilary segment variables do not exceed maximum value per segment and that they
+ # "fill" in order: i.e. one segment cannot be non-zero unless prior segment is at it's maximum value
+ # (These constraints are necessary to prevents phantom losses in MILP problems)
+ @constraints(EP,
+ begin
+ cTAuxOrderPos1[l in LOSS_LINES, s = 1:TRANS_LOSS_SEGS, t = 1:T],
+ vTAUX_POS[l, s, t] <=
+ (inputs["pTrans_Max_Possible"][l] / TRANS_LOSS_SEGS) *
+ vTAUX_POS_ON[l, s, t]
+ cTAuxOrderNeg1[l in LOSS_LINES, s = 1:TRANS_LOSS_SEGS, t = 1:T],
+ vTAUX_NEG[l, s, t] <=
+ (inputs["pTrans_Max_Possible"][l] / TRANS_LOSS_SEGS) *
+ vTAUX_NEG_ON[l, s, t]
+ cTAuxOrderPos2[l in LOSS_LINES, s = 1:(TRANS_LOSS_SEGS - 1), t = 1:T],
+ vTAUX_POS[l, s, t] >=
+ (inputs["pTrans_Max_Possible"][l] / TRANS_LOSS_SEGS) *
+ vTAUX_POS_ON[l, s + 1, t]
+ cTAuxOrderNeg2[l in LOSS_LINES, s = 1:(TRANS_LOSS_SEGS - 1), t = 1:T],
+ vTAUX_NEG[l, s, t] >=
+ (inputs["pTrans_Max_Possible"][l] / TRANS_LOSS_SEGS) *
+ vTAUX_NEG_ON[l, s + 1, t]
+ end)
+
+ # Eq 5: Binary constraints to deal with absolute value of vFLOW.
+ @constraints(EP,
+ begin
+ # If flow is positive, vTAUX_POS segment 0 must be zero; If flow is negative, vTAUX_POS segment 0 must be positive
+ # (and takes on value of the full negative flow), forcing all vTAUX_POS other segments (s>=1) to be zero
+ cTAuxSegmentZeroPos[l in LOSS_LINES, t = 1:T],
+ vTAUX_POS[l, 0, t] <=
+ inputs["pTrans_Max_Possible"][l] * (1 - vTAUX_POS_ON[l, 1, t])
+
+ # If flow is negative, vTAUX_NEG segment 0 must be zero; If flow is positive, vTAUX_NEG segment 0 must be positive
+ # (and takes on value of the full positive flow), forcing all other vTAUX_NEG segments (s>=1) to be zero
+ cTAuxSegmentZeroNeg[l in LOSS_LINES, t = 1:T],
+ vTAUX_NEG[l, 0, t] <=
+ inputs["pTrans_Max_Possible"][l] * (1 - vTAUX_NEG_ON[l, 1, t])
+ end)
+ end
+ end # End if(TRANS_LOSS_SEGS > 0) block
+
+ # ESR Lossses
+ if EnergyShareRequirement >= 1 && IncludeLossesInESR == 1
+ @expression(EP, eESRTran[ESR = 1:inputs["nESR"]],
+ sum(inputs["dfESR"][z, ESR] *
+ sum(inputs["omega"][t] * EP[:eLosses_By_Zone][z, t] for t in 1:T)
+ for z in findall(x -> x > 0, inputs["dfESR"][:, ESR])))
+ add_similar_to_expression!(EP[:eESR], -eESRTran)
+ end
+end
diff --git a/src/model/core/ucommit.jl b/src/model/core/ucommit.jl
index c85a03b31a..5db836a24b 100644
--- a/src/model/core/ucommit.jl
+++ b/src/model/core/ucommit.jl
@@ -23,52 +23,53 @@ The total cost of start-ups across all generators subject to unit commitment ($y
The sum of start-up costs is added to the objective function.
"""
function ucommit!(EP::Model, inputs::Dict, setup::Dict)
-
- println("Unit Commitment Module")
-
- T = inputs["T"] # Number of time steps (hours)
- COMMIT = inputs["COMMIT"] # For not, thermal resources are the only ones eligible for Unit Committment
-
- ### Variables ###
-
- ## Decision variables for unit commitment
- # commitment state variable
- @variable(EP, vCOMMIT[y in COMMIT, t=1:T] >= 0)
- # startup event variable
- @variable(EP, vSTART[y in COMMIT, t=1:T] >= 0)
- # shutdown event variable
- @variable(EP, vSHUT[y in COMMIT, t=1:T] >= 0)
-
- ### Expressions ###
-
- ## Objective Function Expressions ##
-
- # Startup costs of "generation" for resource "y" during hour "t"
- @expression(EP, eCStart[y in COMMIT, t=1:T],(inputs["omega"][t]*inputs["C_Start"][y,t]*vSTART[y,t]))
-
- # Julia is fastest when summing over one row one column at a time
- @expression(EP, eTotalCStartT[t=1:T], sum(eCStart[y,t] for y in COMMIT))
- @expression(EP, eTotalCStart, sum(eTotalCStartT[t] for t=1:T))
-
- add_to_expression!(EP[:eObj], eTotalCStart)
-
- ### Constratints ###
- ## Declaration of integer/binary variables
- if setup["UCommit"] == 1 # Integer UC constraints
- for y in COMMIT
- set_integer.(vCOMMIT[y,:])
- set_integer.(vSTART[y,:])
- set_integer.(vSHUT[y,:])
- if y in inputs["RET_CAP"]
- set_integer(EP[:vRETCAP][y])
- end
- if y in inputs["NEW_CAP"]
- set_integer(EP[:vCAP][y])
- end
- if y in inputs["RETROFIT_CAP"]
- set_integer(EP[:vRETROFITCAP][y])
- end
- end
- end #END unit commitment configuration
- return EP
+ println("Unit Commitment Module")
+
+ T = inputs["T"] # Number of time steps (hours)
+ COMMIT = inputs["COMMIT"] # For not, thermal resources are the only ones eligible for Unit Committment
+
+ ### Variables ###
+
+ ## Decision variables for unit commitment
+ # commitment state variable
+ @variable(EP, vCOMMIT[y in COMMIT, t = 1:T]>=0)
+ # startup event variable
+ @variable(EP, vSTART[y in COMMIT, t = 1:T]>=0)
+ # shutdown event variable
+ @variable(EP, vSHUT[y in COMMIT, t = 1:T]>=0)
+
+ ### Expressions ###
+
+ ## Objective Function Expressions ##
+
+ # Startup costs of "generation" for resource "y" during hour "t"
+ @expression(EP,
+ eCStart[y in COMMIT, t = 1:T],
+ (inputs["omega"][t]*inputs["C_Start"][y, t]*vSTART[y, t]))
+
+ # Julia is fastest when summing over one row one column at a time
+ @expression(EP, eTotalCStartT[t = 1:T], sum(eCStart[y, t] for y in COMMIT))
+ @expression(EP, eTotalCStart, sum(eTotalCStartT[t] for t in 1:T))
+
+ add_to_expression!(EP[:eObj], eTotalCStart)
+
+ ### Constratints ###
+ ## Declaration of integer/binary variables
+ if setup["UCommit"] == 1 # Integer UC constraints
+ for y in COMMIT
+ set_integer.(vCOMMIT[y, :])
+ set_integer.(vSTART[y, :])
+ set_integer.(vSHUT[y, :])
+ if y in inputs["RET_CAP"]
+ set_integer(EP[:vRETCAP][y])
+ end
+ if y in inputs["NEW_CAP"]
+ set_integer(EP[:vCAP][y])
+ end
+ if y in inputs["RETROFIT_CAP"]
+ set_integer(EP[:vRETROFITCAP][y])
+ end
+ end
+ end #END unit commitment configuration
+ return EP
end
diff --git a/src/model/expression_manipulation.jl b/src/model/expression_manipulation.jl
index fe4fe6e7be..33b0b8e8e2 100644
--- a/src/model/expression_manipulation.jl
+++ b/src/model/expression_manipulation.jl
@@ -25,7 +25,9 @@ This can lead to errors later if a method can only operate on expressions.
We don't currently have a method to do this with non-contiguous indexing.
"""
-function create_empty_expression!(EP::Model, exprname::Symbol, dims::NTuple{N, Int64}) where N
+function create_empty_expression!(EP::Model,
+ exprname::Symbol,
+ dims::NTuple{N, Int64}) where {N}
temp = Array{AffExpr}(undef, dims)
fill_with_zeros!(temp)
EP[exprname] = temp
@@ -49,7 +51,7 @@ end
Fill an array of expressions with zeros in-place.
"""
-function fill_with_zeros!(arr::AbstractArray{GenericAffExpr{C,T}, dims}) where {C,T,dims}
+function fill_with_zeros!(arr::AbstractArray{GenericAffExpr{C, T}, dims}) where {C, T, dims}
for i::Int64 in eachindex(IndexLinear(), arr)::Base.OneTo{Int64}
arr[i] = AffExpr(0.0)
end
@@ -64,7 +66,8 @@ Fill an array of expressions with the specified constant, in-place.
In the future we could expand this to non AffExpr, using GenericAffExpr
e.g. if we wanted to use Float32 instead of Float64
"""
-function fill_with_const!(arr::AbstractArray{GenericAffExpr{C,T}, dims}, con::Real) where {C,T,dims}
+function fill_with_const!(arr::AbstractArray{GenericAffExpr{C, T}, dims},
+ con::Real) where {C, T, dims}
for i in eachindex(arr)
arr[i] = AffExpr(con)
end
@@ -77,7 +80,7 @@ end
###### ###### ###### ###### ###### ######
#
function extract_time_series_to_expression(var::Matrix{VariableRef},
- set::AbstractVector{Int})
+ set::AbstractVector{Int})
TIME_DIM = 2
time_range = 1:size(var)[TIME_DIM]
@@ -87,8 +90,14 @@ function extract_time_series_to_expression(var::Matrix{VariableRef},
return expr
end
-function extract_time_series_to_expression(var::JuMP.Containers.DenseAxisArray{VariableRef, 2, Tuple{X, Base.OneTo{Int64}}, Y},
- set::AbstractVector{Int}) where {X, Y}
+function extract_time_series_to_expression(
+ var::JuMP.Containers.DenseAxisArray{
+ VariableRef,
+ 2,
+ Tuple{X, Base.OneTo{Int64}},
+ Y
+ },
+ set::AbstractVector{Int}) where {X, Y}
TIME_DIM = 2
time_range = var.axes[TIME_DIM]
@@ -104,7 +113,7 @@ end
###### ###### ###### ###### ###### ######
# Version for single element
-function add_similar_to_expression!(expr1::GenericAffExpr{C,T}, expr2::V) where {C,T,V}
+function add_similar_to_expression!(expr1::GenericAffExpr{C, T}, expr2::V) where {C, T, V}
add_to_expression!(expr1, expr2)
return nothing
end
@@ -116,7 +125,8 @@ Add an array of some type `V` to an array of expressions, in-place.
This will work on JuMP DenseContainers which do not have linear indexing from 1:length(arr).
However, the accessed parts of both arrays must have the same dimensions.
"""
-function add_similar_to_expression!(expr1::AbstractArray{GenericAffExpr{C,T}, dim1}, expr2::AbstractArray{V, dim2}) where {C,T,V,dim1,dim2}
+function add_similar_to_expression!(expr1::AbstractArray{GenericAffExpr{C, T}, dim1},
+ expr2::AbstractArray{V, dim2}) where {C, T, V, dim1, dim2}
# This is defined for Arrays of different dimensions
# despite the fact it will definitely throw an error
# because the error will tell the user / developer
@@ -134,7 +144,7 @@ end
###### ###### ###### ###### ###### ######
# Version for single element
-function add_term_to_expression!(expr1::GenericAffExpr{C,T}, expr2::V) where {C,T,V}
+function add_term_to_expression!(expr1::GenericAffExpr{C, T}, expr2::V) where {C, T, V}
add_to_expression!(expr1, expr2)
return nothing
end
@@ -145,7 +155,8 @@ end
Add an entry of type `V` to an array of expressions, in-place.
This will work on JuMP DenseContainers which do not have linear indexing from 1:length(arr).
"""
-function add_term_to_expression!(expr1::AbstractArray{GenericAffExpr{C,T}, dims}, expr2::V) where {C,T,V,dims}
+function add_term_to_expression!(expr1::AbstractArray{GenericAffExpr{C, T}, dims},
+ expr2::V) where {C, T, V, dims}
for i in eachindex(expr1)
add_to_expression!(expr1[i], expr2)
end
@@ -162,7 +173,8 @@ end
Check that two arrays have the same dimensions.
If not, return an error message which includes the dimensions of both arrays.
"""
-function check_sizes_match(expr1::AbstractArray{C, dim1}, expr2::AbstractArray{T, dim2}) where {C,T,dim1, dim2}
+function check_sizes_match(expr1::AbstractArray{C, dim1},
+ expr2::AbstractArray{T, dim2}) where {C, T, dim1, dim2}
# After testing, this appears to be just as fast as a method for Array{GenericAffExpr{C,T}, dims} or Array{AffExpr, dims}
if size(expr1) != size(expr2)
error("
@@ -181,7 +193,7 @@ as the method only works on the constituent types making up the GenericAffExpr,
Also, the default MethodError from add_to_expression! is sometime more informative than the error message here.
"""
function check_addable_to_expr(C::DataType, T::DataType)
- if !(hasmethod(add_to_expression!, (C,T)))
+ if !(hasmethod(add_to_expression!, (C, T)))
error("No method found for add_to_expression! with types $(C) and $(T)")
end
end
@@ -196,11 +208,11 @@ end
Sum an array of expressions into a single expression and return the result.
We're using errors from add_to_expression!() to check that the types are compatible.
"""
-function sum_expression(expr::AbstractArray{C, dims}) :: AffExpr where {C,dims}
+function sum_expression(expr::AbstractArray{C, dims})::AffExpr where {C, dims}
# check_addable_to_expr(C,C)
total = AffExpr(0.0)
for i in eachindex(expr)
add_to_expression!(total, expr[i])
end
return total
-end
\ No newline at end of file
+end
diff --git a/src/model/generate_model.jl b/src/model/generate_model.jl
index b834773188..bac3e661a8 100644
--- a/src/model/generate_model.jl
+++ b/src/model/generate_model.jl
@@ -42,7 +42,7 @@ The seventh summation represents the total cost of not meeting hourly operating
The eighth summation corresponds to the startup costs incurred by technologies to which unit commitment decisions apply (e.g. $y \in \mathcal{UC}$), equal to the cost of start-up, $\pi^{START}_{y,z}$, times the number of startup events, $\chi_{y,z,t}$, for the cluster of units in each zone and time step (weighted by $\omega_t$).
-The last term corresponds to the transmission reinforcement or construction costs, for each transmission line in the model. Transmission reinforcement costs are equal to the sum across all lines of the product between the transmission reinforcement/construction cost, $pi^{TCAP}_{l}$, times the additional transmission capacity variable, $\bigtriangleup\varphi^{max}_{l}$. Note that fixed O\&M and replacement capital costs (depreciation) for existing transmission capacity is treated as a sunk cost and not included explicitly in the GenX objective function.
+The last term corresponds to the transmission reinforcement or construction costs, for each transmission line in the model. Transmission reinforcement costs are equal to the sum across all lines of the product between the transmission reinforcement/construction cost, $\pi^{TCAP}_{l}$, times the additional transmission capacity variable, $\bigtriangleup\varphi^{max}_{l}$. Note that fixed O\&M and replacement capital costs (depreciation) for existing transmission capacity is treated as a sunk cost and not included explicitly in the GenX objective function.
In summary, the objective function can be understood as the minimization of costs associated with five sets of different decisions: (1) where and how to invest on capacity, (2) how to dispatch or operate that capacity, (3) which consumer demand segments to serve or curtail, (4) how to cycle and commit thermal units subject to unit commitment decisions, (5) and where and how to invest in additional transmission network capacity to increase power transfer capacity between zones. Note however that each of these components are considered jointly and the optimization is performed over the whole problem at once as a monolithic co-optimization problem.
@@ -67,178 +67,185 @@ The power balance constraint of the model ensures that electricity demand is met
# Returns
- `Model`: The model object containing the entire optimization problem model to be solved by solve_model.jl
"""
-function generate_model(setup::Dict,inputs::Dict,OPTIMIZER::MOI.OptimizerWithAttributes)
+function generate_model(setup::Dict, inputs::Dict, OPTIMIZER::MOI.OptimizerWithAttributes)
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ ## Start pre-solve timer
+ presolver_start_time = time()
- ## Start pre-solve timer
- presolver_start_time = time()
+ # Generate Energy Portfolio (EP) Model
+ EP = Model(OPTIMIZER)
+ set_string_names_on_creation(EP, Bool(setup["EnableJuMPStringNames"]))
+ # Introduce dummy variable fixed to zero to ensure that expressions like eTotalCap,
+ # eTotalCapCharge, eTotalCapEnergy and eAvail_Trans_Cap all have a JuMP variable
+ @variable(EP, vZERO==0)
- # Generate Energy Portfolio (EP) Model
- EP = Model(OPTIMIZER)
- set_string_names_on_creation(EP, Bool(setup["EnableJuMPStringNames"]))
- # Introduce dummy variable fixed to zero to ensure that expressions like eTotalCap,
- # eTotalCapCharge, eTotalCapEnergy and eAvail_Trans_Cap all have a JuMP variable
- @variable(EP, vZERO == 0);
+ # Initialize Power Balance Expression
+ # Expression for "baseline" power balance constraint
+ create_empty_expression!(EP, :ePowerBalance, (T, Z))
+
+ # Initialize Objective Function Expression
+ EP[:eObj] = AffExpr(0.0)
+
+ create_empty_expression!(EP, :eGenerationByZone, (Z, T))
+
+ # Energy losses related to technologies
+ create_empty_expression!(EP, :eELOSSByZone, Z)
- # Initialize Power Balance Expression
- # Expression for "baseline" power balance constraint
- create_empty_expression!(EP, :ePowerBalance, (T, Z))
-
- # Initialize Objective Function Expression
- EP[:eObj] = AffExpr(0.0)
-
- create_empty_expression!(EP, :eGenerationByZone, (Z, T))
-
- # Energy losses related to technologies
- create_empty_expression!(EP, :eELOSSByZone, Z)
-
- # Initialize Capacity Reserve Margin Expression
- if setup["CapacityReserveMargin"] > 0
- create_empty_expression!(EP, :eCapResMarBalance, (inputs["NCapacityReserveMargin"], T))
- end
-
- # Energy Share Requirement
- if setup["EnergyShareRequirement"] >= 1
- create_empty_expression!(EP, :eESR, inputs["nESR"])
- end
-
- if setup["MinCapReq"] == 1
- create_empty_expression!(EP, :eMinCapRes, inputs["NumberOfMinCapReqs"])
- end
-
- if setup["MaxCapReq"] == 1
- create_empty_expression!(EP, :eMaxCapRes, inputs["NumberOfMaxCapReqs"])
- end
-
- # Infrastructure
- discharge!(EP, inputs, setup)
-
- non_served_energy!(EP, inputs, setup)
-
- investment_discharge!(EP, inputs, setup)
-
- if setup["UCommit"] > 0
- ucommit!(EP, inputs, setup)
- end
-
- fuel!(EP, inputs, setup)
-
- co2!(EP, inputs)
-
- if setup["OperationalReserves"] > 0
- operational_reserves!(EP, inputs, setup)
- end
-
- if Z > 1
- investment_transmission!(EP, inputs, setup)
- transmission!(EP, inputs, setup)
- end
-
- if Z > 1 && setup["DC_OPF"] != 0
- dcopf_transmission!(EP, inputs, setup)
- end
-
- # Technologies
- # Model constraints, variables, expression related to dispatchable renewable resources
-
- if !isempty(inputs["VRE"])
- curtailable_variable_renewable!(EP, inputs, setup)
- end
-
- # Model constraints, variables, expression related to non-dispatchable renewable resources
- if !isempty(inputs["MUST_RUN"])
- must_run!(EP, inputs, setup)
- end
-
- # Model constraints, variables, expression related to energy storage modeling
- if !isempty(inputs["STOR_ALL"])
- storage!(EP, inputs, setup)
- end
-
- # Model constraints, variables, expression related to reservoir hydropower resources
- if !isempty(inputs["HYDRO_RES"])
- hydro_res!(EP, inputs, setup)
- end
-
- if !isempty(inputs["ELECTROLYZER"])
- electrolyzer!(EP, inputs, setup)
- end
-
- # Model constraints, variables, expression related to reservoir hydropower resources with long duration storage
- if inputs["REP_PERIOD"] > 1 && !isempty(inputs["STOR_HYDRO_LONG_DURATION"])
- hydro_inter_period_linkage!(EP, inputs)
- end
-
- # Model constraints, variables, expression related to demand flexibility resources
- if !isempty(inputs["FLEX"])
- flexible_demand!(EP, inputs, setup)
- end
- # Model constraints, variables, expression related to thermal resource technologies
- if !isempty(inputs["THERM_ALL"])
- thermal!(EP, inputs, setup)
- end
-
- # Model constraints, variables, expression related to retrofit technologies
- if !isempty(inputs["RETROFIT_OPTIONS"])
- EP = retrofit(EP, inputs)
- end
-
- # Model constraints, variables, expressions related to the co-located VRE-storage resources
- if !isempty(inputs["VRE_STOR"])
- vre_stor!(EP, inputs, setup)
- end
-
- # Policies
-
- if setup["OperationalReserves"] > 0
- operational_reserves_constraints!(EP, inputs)
- end
-
- # CO2 emissions limits
- if setup["CO2Cap"] > 0
- co2_cap!(EP, inputs, setup)
- end
-
- # Endogenous Retirements
- if setup["MultiStage"] > 0
- endogenous_retirement!(EP, inputs, setup)
- end
-
- # Energy Share Requirement
- if setup["EnergyShareRequirement"] >= 1
- energy_share_requirement!(EP, inputs, setup)
- end
-
- #Capacity Reserve Margin
- if setup["CapacityReserveMargin"] > 0
- cap_reserve_margin!(EP, inputs, setup)
- end
-
- if (setup["MinCapReq"] == 1)
- minimum_capacity_requirement!(EP, inputs, setup)
- end
-
- if setup["MaxCapReq"] == 1
- maximum_capacity_requirement!(EP, inputs, setup)
- end
-
- ## Define the objective function
- @objective(EP,Min,EP[:eObj])
-
- ## Power balance constraints
- # demand = generation + storage discharge - storage charge - demand deferral + deferred demand satisfaction - demand curtailment (NSE)
- # + incoming power flows - outgoing power flows - flow losses - charge of heat storage + generation from NACC
- @constraint(EP, cPowerBalance[t=1:T, z=1:Z], EP[:ePowerBalance][t,z] == inputs["pD"][t,z])
-
- ## Record pre-solver time
- presolver_time = time() - presolver_start_time
- if setup["PrintModel"] == 1
- filepath = joinpath(pwd(), "YourModel.lp")
- JuMP.write_to_file(EP, filepath)
- println("Model Printed")
- end
+ # Initialize Capacity Reserve Margin Expression
+ if setup["CapacityReserveMargin"] > 0
+ create_empty_expression!(EP,
+ :eCapResMarBalance,
+ (inputs["NCapacityReserveMargin"], T))
+ end
+
+ # Energy Share Requirement
+ if setup["EnergyShareRequirement"] >= 1
+ create_empty_expression!(EP, :eESR, inputs["nESR"])
+ end
+
+ if setup["MinCapReq"] == 1
+ create_empty_expression!(EP, :eMinCapRes, inputs["NumberOfMinCapReqs"])
+ end
+
+ if setup["MaxCapReq"] == 1
+ create_empty_expression!(EP, :eMaxCapRes, inputs["NumberOfMaxCapReqs"])
+ end
+
+ # Infrastructure
+ discharge!(EP, inputs, setup)
+
+ non_served_energy!(EP, inputs, setup)
+
+ investment_discharge!(EP, inputs, setup)
+
+ if setup["UCommit"] > 0
+ ucommit!(EP, inputs, setup)
+ end
+
+ fuel!(EP, inputs, setup)
+
+ co2!(EP, inputs)
+
+ if setup["OperationalReserves"] > 0
+ operational_reserves!(EP, inputs, setup)
+ end
+
+ if Z > 1
+ investment_transmission!(EP, inputs, setup)
+ transmission!(EP, inputs, setup)
+ end
+
+ if Z > 1 && setup["DC_OPF"] != 0
+ dcopf_transmission!(EP, inputs, setup)
+ end
+
+ # Technologies
+ # Model constraints, variables, expression related to dispatchable renewable resources
+
+ if !isempty(inputs["VRE"])
+ curtailable_variable_renewable!(EP, inputs, setup)
+ end
+
+ # Model constraints, variables, expression related to non-dispatchable renewable resources
+ if !isempty(inputs["MUST_RUN"])
+ must_run!(EP, inputs, setup)
+ end
+
+ # Model constraints, variables, expression related to energy storage modeling
+ if !isempty(inputs["STOR_ALL"])
+ storage!(EP, inputs, setup)
+ end
+
+ # Model constraints, variables, expression related to reservoir hydropower resources
+ if !isempty(inputs["HYDRO_RES"])
+ hydro_res!(EP, inputs, setup)
+ end
+
+ if !isempty(inputs["ELECTROLYZER"])
+ electrolyzer!(EP, inputs, setup)
+ end
+
+ # Model constraints, variables, expression related to reservoir hydropower resources with long duration storage
+ if inputs["REP_PERIOD"] > 1 && !isempty(inputs["STOR_HYDRO_LONG_DURATION"])
+ hydro_inter_period_linkage!(EP, inputs)
+ end
+
+ # Model constraints, variables, expression related to demand flexibility resources
+ if !isempty(inputs["FLEX"])
+ flexible_demand!(EP, inputs, setup)
+ end
+ # Model constraints, variables, expression related to thermal resource technologies
+ if !isempty(inputs["THERM_ALL"])
+ thermal!(EP, inputs, setup)
+ end
+
+ # Model constraints, variables, expression related to retrofit technologies
+ if !isempty(inputs["RETROFIT_OPTIONS"])
+ EP = retrofit(EP, inputs)
+ end
+
+ # Model constraints, variables, expressions related to the co-located VRE-storage resources
+ if !isempty(inputs["VRE_STOR"])
+ vre_stor!(EP, inputs, setup)
+ end
+
+ # Policies
+
+ if setup["OperationalReserves"] > 0
+ operational_reserves_constraints!(EP, inputs)
+ end
+
+ # CO2 emissions limits
+ if setup["CO2Cap"] > 0
+ co2_cap!(EP, inputs, setup)
+ end
+
+ # Endogenous Retirements
+ if setup["MultiStage"] > 0
+ endogenous_retirement!(EP, inputs, setup)
+ end
+
+ # Energy Share Requirement
+ if setup["EnergyShareRequirement"] >= 1
+ energy_share_requirement!(EP, inputs, setup)
+ end
+
+ #Capacity Reserve Margin
+ if setup["CapacityReserveMargin"] > 0
+ cap_reserve_margin!(EP, inputs, setup)
+ end
+
+ if (setup["MinCapReq"] == 1)
+ minimum_capacity_requirement!(EP, inputs, setup)
+ end
+
+ if setup["MaxCapReq"] == 1
+ maximum_capacity_requirement!(EP, inputs, setup)
+ end
+
+ if setup["ModelingToGenerateAlternatives"] == 1
+ mga!(EP, inputs, setup)
+ end
+
+ ## Define the objective function
+ @objective(EP, Min, setup["ObjScale"]*EP[:eObj])
+
+ ## Power balance constraints
+ # demand = generation + storage discharge - storage charge - demand deferral + deferred demand satisfaction - demand curtailment (NSE)
+ # + incoming power flows - outgoing power flows - flow losses - charge of heat storage + generation from NACC
+ @constraint(EP,
+ cPowerBalance[t = 1:T, z = 1:Z],
+ EP[:ePowerBalance][t, z]==inputs["pD"][t, z])
+
+ ## Record pre-solver time
+ presolver_time = time() - presolver_start_time
+ if setup["PrintModel"] == 1
+ filepath = joinpath(pwd(), "YourModel.lp")
+ JuMP.write_to_file(EP, filepath)
+ println("Model Printed")
+ end
return EP
end
diff --git a/src/model/policies/cap_reserve_margin.jl b/src/model/policies/cap_reserve_margin.jl
index 5a6aa1ba1d..bdbd077ce9 100755
--- a/src/model/policies/cap_reserve_margin.jl
+++ b/src/model/policies/cap_reserve_margin.jl
@@ -57,23 +57,30 @@ The expressions establishing the capacity reserve margin contributions of each t
class are included in their respective technology modules.
"""
function cap_reserve_margin!(EP::Model, inputs::Dict, setup::Dict)
- # capacity reserve margin constraint
- T = inputs["T"]
- NCRM = inputs["NCapacityReserveMargin"]
- println("Capacity Reserve Margin Policies Module")
+ # capacity reserve margin constraint
+ T = inputs["T"]
+ NCRM = inputs["NCapacityReserveMargin"]
+ println("Capacity Reserve Margin Policies Module")
- # if input files are present, add capacity reserve margin slack variables
- if haskey(inputs, "dfCapRes_slack")
- @variable(EP,vCapResSlack[res=1:NCRM, t=1:T]>=0)
- add_similar_to_expression!(EP[:eCapResMarBalance], vCapResSlack)
+ # if input files are present, add capacity reserve margin slack variables
+ if haskey(inputs, "dfCapRes_slack")
+ @variable(EP, vCapResSlack[res = 1:NCRM, t = 1:T]>=0)
+ add_similar_to_expression!(EP[:eCapResMarBalance], vCapResSlack)
- @expression(EP, eCapResSlack_Year[res=1:NCRM], sum(EP[:vCapResSlack][res,t] * inputs["omega"][t] for t in 1:T))
- @expression(EP, eCCapResSlack[res=1:NCRM], inputs["dfCapRes_slack"][res,:PriceCap] * EP[:eCapResSlack_Year][res])
- @expression(EP, eCTotalCapResSlack, sum(EP[:eCCapResSlack][res] for res = 1:NCRM))
- add_to_expression!(EP[:eObj], eCTotalCapResSlack)
- end
+ @expression(EP,
+ eCapResSlack_Year[res = 1:NCRM],
+ sum(EP[:vCapResSlack][res, t] * inputs["omega"][t] for t in 1:T))
+ @expression(EP,
+ eCCapResSlack[res = 1:NCRM],
+ inputs["dfCapRes_slack"][res, :PriceCap]*EP[:eCapResSlack_Year][res])
+ @expression(EP, eCTotalCapResSlack, sum(EP[:eCCapResSlack][res] for res in 1:NCRM))
+ add_to_expression!(EP[:eObj], eCTotalCapResSlack)
+ end
- @constraint(EP, cCapacityResMargin[res=1:NCRM, t=1:T], EP[:eCapResMarBalance][res, t]
- >= sum(inputs["pD"][t,z] * (1 + inputs["dfCapRes"][z,res])
- for z=findall(x->x!=0,inputs["dfCapRes"][:,res])))
+ @constraint(EP,
+ cCapacityResMargin[res = 1:NCRM, t = 1:T],
+ EP[:eCapResMarBalance][res,
+ t]
+ >=sum(inputs["pD"][t, z] * (1 + inputs["dfCapRes"][z, res])
+ for z in findall(x -> x != 0, inputs["dfCapRes"][:, res])))
end
diff --git a/src/model/policies/co2_cap.jl b/src/model/policies/co2_cap.jl
index 252cb3a7f3..4c8badbfe8 100644
--- a/src/model/policies/co2_cap.jl
+++ b/src/model/policies/co2_cap.jl
@@ -66,54 +66,59 @@ Similarly, a generation based emission constraint is defined by setting the emis
Note that the generator-side rate-based constraint can be used to represent a fee-rebate (``feebate'') system: the dirty generators that emit above the bar ($\epsilon_{z,p,gen}^{maxCO_2}$) have to buy emission allowances from the emission regulator in the region $z$ where they are located; in the same vein, the clean generators get rebates from the emission regulator at an emission allowance price being the dual variable of the emissions rate constraint.
"""
function co2_cap!(EP::Model, inputs::Dict, setup::Dict)
-
- println("CO2 Policies Module")
-
- SEG = inputs["SEG"] # Number of lines
- T = inputs["T"] # Number of time steps (hours)
-
- ### Variable ###
- # if input files are present, add CO2 cap slack variables
- if haskey(inputs, "dfCO2Cap_slack")
- @variable(EP, vCO2Cap_slack[cap = 1:inputs["NCO2Cap"]]>=0)
-
- @expression(EP, eCCO2Cap_slack[cap = 1:inputs["NCO2Cap"]],
- inputs["dfCO2Cap_slack"][cap,:PriceCap] * EP[:vCO2Cap_slack][cap])
- @expression(EP, eCTotalCO2CapSlack,
- sum(EP[:eCCO2Cap_slack][cap] for cap = 1:inputs["NCO2Cap"]))
-
- add_to_expression!(EP[:eObj], eCTotalCO2CapSlack)
- else
- @variable(EP, vCO2Cap_slack[cap = 1:inputs["NCO2Cap"]]==0)
- end
-
- ### Constraints ###
-
- ## Mass-based: Emissions constraint in absolute emissions limit (tons)
- if setup["CO2Cap"] == 1
- @constraint(EP, cCO2Emissions_systemwide[cap=1:inputs["NCO2Cap"]],
- sum(inputs["omega"][t] * EP[:eEmissionsByZone][z,t] for z=findall(x->x==1, inputs["dfCO2CapZones"][:,cap]), t=1:T) -
- vCO2Cap_slack[cap] <=
- sum(inputs["dfMaxCO2"][z,cap] for z=findall(x->x==1, inputs["dfCO2CapZones"][:,cap]))
- )
-
- ## (fulfilled) demand + Rate-based: Emissions constraint in terms of rate (tons/MWh)
- elseif setup["CO2Cap"] == 2 ##This part moved to non_served_energy.jl
-
- @constraint(EP, cCO2Emissions_systemwide[cap=1:inputs["NCO2Cap"]],
- sum(inputs["omega"][t] * EP[:eEmissionsByZone][z,t] for z=findall(x->x==1, inputs["dfCO2CapZones"][:,cap]), t=1:T) -
- vCO2Cap_slack[cap] <=
- sum(inputs["dfMaxCO2Rate"][z,cap] * sum(inputs["omega"][t] * (inputs["pD"][t,z] - sum(EP[:vNSE][s,t,z] for s in 1:SEG)) for t=1:T) for z = findall(x->x==1, inputs["dfCO2CapZones"][:,cap])) +
- sum(inputs["dfMaxCO2Rate"][z,cap] * setup["StorageLosses"] * EP[:eELOSSByZone][z] for z=findall(x->x==1, inputs["dfCO2CapZones"][:,cap]))
- )
-
- ## Generation + Rate-based: Emissions constraint in terms of rate (tons/MWh)
- elseif (setup["CO2Cap"]==3)
- @constraint(EP, cCO2Emissions_systemwide[cap=1:inputs["NCO2Cap"]],
- sum(inputs["omega"][t] * EP[:eEmissionsByZone][z,t] for z=findall(x->x==1, inputs["dfCO2CapZones"][:,cap]), t=1:T) -
- vCO2Cap_slack[cap] <=
- sum(inputs["dfMaxCO2Rate"][z,cap] * inputs["omega"][t] * EP[:eGenerationByZone][z,t] for t=1:T, z=findall(x->x==1, inputs["dfCO2CapZones"][:,cap]))
- )
- end
-
+ println("CO2 Policies Module")
+
+ SEG = inputs["SEG"] # Number of lines
+ T = inputs["T"] # Number of time steps (hours)
+
+ ### Variable ###
+ # if input files are present, add CO2 cap slack variables
+ if haskey(inputs, "dfCO2Cap_slack")
+ @variable(EP, vCO2Cap_slack[cap = 1:inputs["NCO2Cap"]]>=0)
+
+ @expression(EP, eCCO2Cap_slack[cap = 1:inputs["NCO2Cap"]],
+ inputs["dfCO2Cap_slack"][cap, :PriceCap]*EP[:vCO2Cap_slack][cap])
+ @expression(EP, eCTotalCO2CapSlack,
+ sum(EP[:eCCO2Cap_slack][cap] for cap in 1:inputs["NCO2Cap"]))
+
+ add_to_expression!(EP[:eObj], eCTotalCO2CapSlack)
+ else
+ @variable(EP, vCO2Cap_slack[cap = 1:inputs["NCO2Cap"]]==0)
+ end
+
+ ### Constraints ###
+
+ ## Mass-based: Emissions constraint in absolute emissions limit (tons)
+ if setup["CO2Cap"] == 1
+ @constraint(EP, cCO2Emissions_systemwide[cap = 1:inputs["NCO2Cap"]],
+ sum(inputs["omega"][t] * EP[:eEmissionsByZone][z, t]
+ for z in findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap]), t in 1:T) -
+ vCO2Cap_slack[cap]<=
+ sum(inputs["dfMaxCO2"][z, cap]
+ for z in findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap])))
+
+ ## (fulfilled) demand + Rate-based: Emissions constraint in terms of rate (tons/MWh)
+ elseif setup["CO2Cap"] == 2 ##This part moved to non_served_energy.jl
+ @constraint(EP, cCO2Emissions_systemwide[cap = 1:inputs["NCO2Cap"]],
+ sum(inputs["omega"][t] * EP[:eEmissionsByZone][z, t]
+ for z in findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap]), t in 1:T) -
+ vCO2Cap_slack[cap]<=
+ sum(inputs["dfMaxCO2Rate"][z, cap] * sum(inputs["omega"][t] *
+ (inputs["pD"][t, z] - sum(EP[:vNSE][s, t, z] for s in 1:SEG))
+ for t in 1:T)
+ for z in findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap])) +
+ sum(inputs["dfMaxCO2Rate"][z, cap] * setup["StorageLosses"] *
+ EP[:eELOSSByZone][z]
+ for z in findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap])))
+
+ ## Generation + Rate-based: Emissions constraint in terms of rate (tons/MWh)
+ elseif (setup["CO2Cap"] == 3)
+ @constraint(EP, cCO2Emissions_systemwide[cap = 1:inputs["NCO2Cap"]],
+ sum(inputs["omega"][t] * EP[:eEmissionsByZone][z, t]
+ for z in findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap]), t in 1:T) -
+ vCO2Cap_slack[cap]<=
+ sum(inputs["dfMaxCO2Rate"][z, cap] * inputs["omega"][t] *
+ EP[:eGenerationByZone][z, t]
+ for t in 1:T, z in findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap])))
+ end
end
diff --git a/src/model/policies/energy_share_requirement.jl b/src/model/policies/energy_share_requirement.jl
index 2c65aa61ee..4ed3728777 100644
--- a/src/model/policies/energy_share_requirement.jl
+++ b/src/model/policies/energy_share_requirement.jl
@@ -4,7 +4,7 @@ This function establishes constraints that can be flexibily applied to define al
These policies usually require that the annual MWh generation from a subset of qualifying generators has to be higher than a pre-specified percentage of demand from qualifying zones.
The implementation allows for user to define one or multiple RPS/CES style minimum energy share constraints,
where each constraint can cover different combination of model zones to mimic real-world policy implementation (e.g. multiple state policies, multiple RPS tiers or overlapping RPS and CES policies).
-The number of energy share requirement constraints is specified by the user by the value of the GenX settings parameter ```EnergyShareRequirement``` (this value should be an integer >=0).
+Including an energy share requirement constraint is specified by the user by the value of the GenX settings parameter ```EnergyShareRequirement``` (this value should either 0 or 1).
For each constraint $p \in \mathcal{P}^{ESR}$, we define a subset of zones $z \in \mathcal{Z}^{ESR}_{p} \subset \mathcal{Z}$ that are eligible for trading renewable/clean energy credits to meet the corresponding renewable/clean energy requirement.
For each energy share requirement constraint $p \in \mathcal{P}^{ESR}$,
we specify the share of total demand in each eligible model zone,
@@ -24,22 +24,23 @@ In practice, most existing renewable portfolio standard policies do not account
However, with 100% RPS or CES policies enacted in several jurisdictions, policy makers may wish to include storage losses in the minimum energy share, as otherwise there will be a difference between total generation and total demand that will permit continued use of non-qualifying resources (e.g. emitting generators).
"""
function energy_share_requirement!(EP::Model, inputs::Dict, setup::Dict)
+ println("Energy Share Requirement Policies Module")
- println("Energy Share Requirement Policies Module")
-
- # if input files are present, add energy share requirement slack variables
- if haskey(inputs, "dfESR_slack")
- @variable(EP, vESR_slack[ESR=1:inputs["nESR"]]>=0)
- add_similar_to_expression!(EP[:eESR], vESR_slack)
+ # if input files are present, add energy share requirement slack variables
+ if haskey(inputs, "dfESR_slack")
+ @variable(EP, vESR_slack[ESR = 1:inputs["nESR"]]>=0)
+ add_similar_to_expression!(EP[:eESR], vESR_slack)
- @expression(EP, eCESRSlack[ESR=1:inputs["nESR"]], inputs["dfESR_slack"][ESR,:PriceCap] * EP[:vESR_slack][ESR])
- @expression(EP, eCTotalESRSlack, sum(EP[:eCESRSlack][ESR] for ESR = 1:inputs["nESR"]))
-
- add_to_expression!(EP[:eObj], eCTotalESRSlack)
- end
-
- ## Energy Share Requirements (minimum energy share from qualifying renewable resources) constraint
- @constraint(EP, cESRShare[ESR=1:inputs["nESR"]], EP[:eESR][ESR] >= 0)
+ @expression(EP,
+ eCESRSlack[ESR = 1:inputs["nESR"]],
+ inputs["dfESR_slack"][ESR, :PriceCap]*EP[:vESR_slack][ESR])
+ @expression(EP,
+ eCTotalESRSlack,
+ sum(EP[:eCESRSlack][ESR] for ESR in 1:inputs["nESR"]))
+ add_to_expression!(EP[:eObj], eCTotalESRSlack)
+ end
+ ## Energy Share Requirements (minimum energy share from qualifying renewable resources) constraint
+ @constraint(EP, cESRShare[ESR = 1:inputs["nESR"]], EP[:eESR][ESR]>=0)
end
diff --git a/src/model/policies/maximum_capacity_requirement.jl b/src/model/policies/maximum_capacity_requirement.jl
index c36d994b99..4f92aa4017 100644
--- a/src/model/policies/maximum_capacity_requirement.jl
+++ b/src/model/policies/maximum_capacity_requirement.jl
@@ -9,21 +9,25 @@ The maximum capacity requirement constraint allows for modeling maximum deployme
Note that $\epsilon_{y,z,p}^{MaxCapReq}$ is the eligiblity of a generator of technology $y$ in zone $z$ of requirement $p$ and will be equal to $1$ for eligible generators and will be zero for ineligible resources. The dual value of each maximum capacity constraint can be interpreted as the required payment (e.g. subsidy) per MW per year required to ensure adequate revenue for the qualifying resources.
"""
function maximum_capacity_requirement!(EP::Model, inputs::Dict, setup::Dict)
+ println("Maximum Capacity Requirement Module")
+ NumberOfMaxCapReqs = inputs["NumberOfMaxCapReqs"]
- println("Maximum Capacity Requirement Module")
- NumberOfMaxCapReqs = inputs["NumberOfMaxCapReqs"]
+ # if input files are present, add maximum capacity requirement slack variables
+ if haskey(inputs, "MaxCapPriceCap")
+ @variable(EP, vMaxCap_slack[maxcap = 1:NumberOfMaxCapReqs]>=0)
+ add_similar_to_expression!(EP[:eMaxCapRes], -vMaxCap_slack)
- # if input files are present, add maximum capacity requirement slack variables
- if haskey(inputs, "MaxCapPriceCap")
- @variable(EP, vMaxCap_slack[maxcap = 1:NumberOfMaxCapReqs]>=0)
- add_similar_to_expression!(EP[:eMaxCapRes], -vMaxCap_slack)
+ @expression(EP,
+ eCMaxCap_slack[maxcap = 1:NumberOfMaxCapReqs],
+ inputs["MaxCapPriceCap"][maxcap]*EP[:vMaxCap_slack][maxcap])
+ @expression(EP,
+ eTotalCMaxCapSlack,
+ sum(EP[:eCMaxCap_slack][maxcap] for maxcap in 1:NumberOfMaxCapReqs))
- @expression(EP, eCMaxCap_slack[maxcap = 1:NumberOfMaxCapReqs], inputs["MaxCapPriceCap"][maxcap] * EP[:vMaxCap_slack][maxcap])
- @expression(EP, eTotalCMaxCapSlack, sum(EP[:eCMaxCap_slack][maxcap] for maxcap = 1:NumberOfMaxCapReqs))
-
- add_to_expression!(EP[:eObj], eTotalCMaxCapSlack)
- end
-
- @constraint(EP, cZoneMaxCapReq[maxcap = 1:NumberOfMaxCapReqs], EP[:eMaxCapRes][maxcap] <= inputs["MaxCapReq"][maxcap])
+ add_to_expression!(EP[:eObj], eTotalCMaxCapSlack)
+ end
+ @constraint(EP,
+ cZoneMaxCapReq[maxcap = 1:NumberOfMaxCapReqs],
+ EP[:eMaxCapRes][maxcap]<=inputs["MaxCapReq"][maxcap])
end
diff --git a/src/model/policies/minimum_capacity_requirement.jl b/src/model/policies/minimum_capacity_requirement.jl
index c07b10821e..333c6b551d 100644
--- a/src/model/policies/minimum_capacity_requirement.jl
+++ b/src/model/policies/minimum_capacity_requirement.jl
@@ -15,22 +15,25 @@ Also note that co-located VRE and storage resources, there are three different c
requirements.
"""
function minimum_capacity_requirement!(EP::Model, inputs::Dict, setup::Dict)
+ println("Minimum Capacity Requirement Module")
+ NumberOfMinCapReqs = inputs["NumberOfMinCapReqs"]
- println("Minimum Capacity Requirement Module")
- NumberOfMinCapReqs = inputs["NumberOfMinCapReqs"]
+ # if input files are present, add minimum capacity requirement slack variables
+ if haskey(inputs, "MinCapPriceCap")
+ @variable(EP, vMinCap_slack[mincap = 1:NumberOfMinCapReqs]>=0)
+ add_similar_to_expression!(EP[:eMinCapRes], vMinCap_slack)
- # if input files are present, add minimum capacity requirement slack variables
- if haskey(inputs, "MinCapPriceCap")
- @variable(EP, vMinCap_slack[mincap = 1:NumberOfMinCapReqs]>=0)
- add_similar_to_expression!(EP[:eMinCapRes], vMinCap_slack)
-
- @expression(EP, eCMinCap_slack[mincap = 1:NumberOfMinCapReqs], inputs["MinCapPriceCap"][mincap] * EP[:vMinCap_slack][mincap])
- @expression(EP, eTotalCMinCapSlack, sum(EP[:eCMinCap_slack][mincap] for mincap = 1:NumberOfMinCapReqs))
-
- add_to_expression!(EP[:eObj], eTotalCMinCapSlack)
- end
-
- @constraint(EP, cZoneMinCapReq[mincap = 1:NumberOfMinCapReqs], EP[:eMinCapRes][mincap] >= inputs["MinCapReq"][mincap])
+ @expression(EP,
+ eCMinCap_slack[mincap = 1:NumberOfMinCapReqs],
+ inputs["MinCapPriceCap"][mincap]*EP[:vMinCap_slack][mincap])
+ @expression(EP,
+ eTotalCMinCapSlack,
+ sum(EP[:eCMinCap_slack][mincap] for mincap in 1:NumberOfMinCapReqs))
+ add_to_expression!(EP[:eObj], eTotalCMinCapSlack)
+ end
+ @constraint(EP,
+ cZoneMinCapReq[mincap = 1:NumberOfMinCapReqs],
+ EP[:eMinCapRes][mincap]>=inputs["MinCapReq"][mincap])
end
diff --git a/src/model/resources/curtailable_variable_renewable/curtailable_variable_renewable.jl b/src/model/resources/curtailable_variable_renewable/curtailable_variable_renewable.jl
index e86179f132..779cce88e8 100644
--- a/src/model/resources/curtailable_variable_renewable/curtailable_variable_renewable.jl
+++ b/src/model/resources/curtailable_variable_renewable/curtailable_variable_renewable.jl
@@ -14,41 +14,43 @@ The above constraint is defined as an inequality instead of an equality to allow
Note that if ```OperationalReserves=1``` indicating that frequency regulation and operating reserves are modeled, then this function calls ```curtailable_variable_renewable_operational_reserves!()```, which replaces the above constraints with a formulation inclusive of reserve provision.
"""
function curtailable_variable_renewable!(EP::Model, inputs::Dict, setup::Dict)
- ## Controllable variable renewable generators
- ### Option of modeling VRE generators with multiple availability profiles and capacity limits - Num_VRE_Bins in Vre.csv >1
- ## Default value of Num_VRE_Bins ==1
- println("Dispatchable Resources Module")
+ ## Controllable variable renewable generators
+ ### Option of modeling VRE generators with multiple availability profiles and capacity limits - Num_VRE_Bins in Vre.csv >1
+ ## Default value of Num_VRE_Bins ==1
+ println("Dispatchable Resources Module")
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
- OperationalReserves = setup["OperationalReserves"]
- CapacityReserveMargin = setup["CapacityReserveMargin"]
+ OperationalReserves = setup["OperationalReserves"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- VRE = inputs["VRE"]
+ VRE = inputs["VRE"]
- VRE_POWER_OUT = intersect(VRE, ids_with_positive(gen, num_vre_bins))
- VRE_NO_POWER_OUT = setdiff(VRE, VRE_POWER_OUT)
+ VRE_POWER_OUT = intersect(VRE, ids_with_positive(gen, num_vre_bins))
+ VRE_NO_POWER_OUT = setdiff(VRE, VRE_POWER_OUT)
- ### Expressions ###
+ ### Expressions ###
- ## Power Balance Expressions ##
+ ## Power Balance Expressions ##
- @expression(EP, ePowerBalanceDisp[t=1:T, z=1:Z],
- sum(EP[:vP][y,t] for y in intersect(VRE, resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:ePowerBalance], EP[:ePowerBalanceDisp])
+ @expression(EP, ePowerBalanceDisp[t = 1:T, z = 1:Z],
+ sum(EP[:vP][y, t] for y in intersect(VRE, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:ePowerBalance], EP[:ePowerBalanceDisp])
- # Capacity Reserves Margin policy
- if CapacityReserveMargin > 0
- @expression(EP, eCapResMarBalanceVRE[res=1:inputs["NCapacityReserveMargin"], t=1:T], sum(derating_factor(gen[y], tag=res) * EP[:eTotalCap][y] * inputs["pP_Max"][y,t] for y in VRE))
- add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceVRE)
- end
+ # Capacity Reserves Margin policy
+ if CapacityReserveMargin > 0
+ @expression(EP,
+ eCapResMarBalanceVRE[res = 1:inputs["NCapacityReserveMargin"], t = 1:T],
+ sum(derating_factor(gen[y], tag = res) * EP[:eTotalCap][y] *
+ inputs["pP_Max"][y, t] for y in VRE))
+ add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceVRE)
+ end
- ### Constraints ###
+ ### Constraints ###
if OperationalReserves == 1
# Constraints on power output and contribution to regulation and reserves
curtailable_variable_renewable_operational_reserves!(EP, inputs)
@@ -58,25 +60,28 @@ function curtailable_variable_renewable!(EP::Model, inputs::Dict, setup::Dict)
for y in VRE_POWER_OUT
# Define the set of generator indices corresponding to the different sites (or bins) of a particular VRE technology (E.g. wind or solar) in a particular zone.
# For example the wind resource in a particular region could be include three types of bins corresponding to different sites with unique interconnection, hourly capacity factor and maximim available capacity limits.
- VRE_BINS = intersect(resource_id.(gen[resource_id.(gen) .>= y]), resource_id.(gen[resource_id.(gen) .<= y+num_vre_bins(gen[y])-1]))
+ VRE_BINS = intersect(resource_id.(gen[resource_id.(gen) .>= y]),
+ resource_id.(gen[resource_id.(gen) .<= y + num_vre_bins(gen[y]) - 1]))
# Maximum power generated per hour by renewable generators must be less than
# sum of product of hourly capacity factor for each bin times its the bin installed capacity
# Note: inequality constraint allows curtailment of output below maximum level.
- @constraint(EP, [t=1:T], EP[:vP][y,t] <= sum(inputs["pP_Max"][yy,t]*EP[:eTotalCap][yy] for yy in VRE_BINS))
+ @constraint(EP,
+ [t = 1:T],
+ EP[:vP][y,t]<=sum(inputs["pP_Max"][yy, t] * EP[:eTotalCap][yy]
+ for yy in VRE_BINS))
end
end
- # Set power variables for all bins that are not being modeled for hourly output to be zero
- for y in VRE_NO_POWER_OUT
- fix.(EP[:vP][y,:], 0.0, force=true)
- end
- ##CO2 Polcy Module VRE Generation by zone
- @expression(EP, eGenerationByVRE[z=1:Z, t=1:T], # the unit is GW
- sum(EP[:vP][y,t] for y in intersect(inputs["VRE"], resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:eGenerationByZone], eGenerationByVRE)
-
+ # Set power variables for all bins that are not being modeled for hourly output to be zero
+ for y in VRE_NO_POWER_OUT
+ fix.(EP[:vP][y, :], 0.0, force = true)
+ end
+ ##CO2 Polcy Module VRE Generation by zone
+ @expression(EP, eGenerationByVRE[z = 1:Z, t = 1:T], # the unit is GW
+ sum(EP[:vP][y, t]
+ for y in intersect(inputs["VRE"], resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:eGenerationByZone], eGenerationByVRE)
end
@doc raw"""
@@ -103,11 +108,11 @@ The amount of frequency regulation and operating reserves procured in each time
```
"""
function curtailable_variable_renewable_operational_reserves!(EP::Model, inputs::Dict)
- gen = inputs["RESOURCES"]
- T = inputs["T"]
+ gen = inputs["RESOURCES"]
+ T = inputs["T"]
VRE = inputs["VRE"]
- VRE_POWER_OUT = intersect(VRE, ids_with_positive(gen, num_vre_bins))
+ VRE_POWER_OUT = intersect(VRE, ids_with_positive(gen, num_vre_bins))
REG = intersect(VRE_POWER_OUT, inputs["REG"])
RSV = intersect(VRE_POWER_OUT, inputs["RSV"])
@@ -121,33 +126,37 @@ function curtailable_variable_renewable_operational_reserves!(EP::Model, inputs:
resources_in_bin(y) = UnitRange(y, y + num_vre_bins(gen[y]) - 1)
hourly_bin_capacity(y, t) = sum(hourly_capacity(yy, t) for yy in resources_in_bin(y))
- @constraint(EP, [y in REG, t in 1:T], vREG[y, t] <= reg_max(gen[y]) * hourly_bin_capacity(y, t))
- @constraint(EP, [y in RSV, t in 1:T], vRSV[y, t] <= rsv_max(gen[y]) * hourly_bin_capacity(y, t))
+ @constraint(EP,
+ [y in REG, t in 1:T],
+ vREG[y, t]<=reg_max(gen[y]) * hourly_bin_capacity(y, t))
+ @constraint(EP,
+ [y in RSV, t in 1:T],
+ vRSV[y, t]<=rsv_max(gen[y]) * hourly_bin_capacity(y, t))
expr = extract_time_series_to_expression(vP, VRE_POWER_OUT)
add_similar_to_expression!(expr[REG, :], -vREG[REG, :])
- @constraint(EP, [y in VRE_POWER_OUT, t in 1:T], expr[y, t] >= 0)
+ @constraint(EP, [y in VRE_POWER_OUT, t in 1:T], expr[y, t]>=0)
expr = extract_time_series_to_expression(vP, VRE_POWER_OUT)
add_similar_to_expression!(expr[REG, :], +vREG[REG, :])
add_similar_to_expression!(expr[RSV, :], +vRSV[RSV, :])
- @constraint(EP, [y in VRE_POWER_OUT, t in 1:T], expr[y, t] <= hourly_bin_capacity(y, t))
+ @constraint(EP, [y in VRE_POWER_OUT, t in 1:T], expr[y, t]<=hourly_bin_capacity(y, t))
end
function remove_operational_reserves_for_binned_vre_resources!(EP::Model, inputs::Dict)
gen = inputs["RESOURCES"]
VRE = inputs["VRE"]
- VRE_POWER_OUT = intersect(VRE, ids_with_positive(gen, num_vre_bins))
+ VRE_POWER_OUT = intersect(VRE, ids_with_positive(gen, num_vre_bins))
REG = inputs["REG"]
RSV = inputs["RSV"]
VRE_NO_POWER_OUT = setdiff(VRE, VRE_POWER_OUT)
for y in intersect(VRE_NO_POWER_OUT, REG)
- fix.(EP[:vREG][y,:], 0.0, force=true)
- end
+ fix.(EP[:vREG][y, :], 0.0, force = true)
+ end
for y in intersect(VRE_NO_POWER_OUT, RSV)
- fix.(EP[:vRSV][y,:], 0.0, force=true)
- end
+ fix.(EP[:vRSV][y, :], 0.0, force = true)
+ end
end
diff --git a/src/model/resources/flexible_demand/flexible_demand.jl b/src/model/resources/flexible_demand/flexible_demand.jl
index 7562d4ac43..56e1487c7b 100644
--- a/src/model/resources/flexible_demand/flexible_demand.jl
+++ b/src/model/resources/flexible_demand/flexible_demand.jl
@@ -18,6 +18,12 @@ At any given time step, the amount of demand that can be shifted or deferred can
\Pi_{y,t} \leq \rho^{max}_{y,z,t}\Delta_{y,z} \hspace{4 cm} \forall y \in \mathcal{DF}, z \in \mathcal{Z}, t \in \mathcal{T}
\end{aligned}
```
+At any given time step, the amount of demand that can be met cannot exceed the capacity of the FLEX resources.
+```math
+\begin{aligned}
+\eta_{y,z}^{dflex}\Theta_{y,z,t} \leq \Delta_{y,z} \hspace{4 cm} \forall y \in \mathcal{DF}, z \in \mathcal{Z}, t \in \mathcal{T}
+\end{aligned}
+```
**Maximum time delay and advancements**
Delayed demand must then be served within a fixed number of time steps. This is done by enforcing the sum of demand satisfied ($\Theta_{y,z,t}$) in the following $\tau^{delay}_{y,z}$ time steps (e.g., t + 1 to t + $\tau^{delay}_{y,z}$) to be greater than or equal to the level of energy deferred during time step $t$.
```math
@@ -36,89 +42,99 @@ A similar constraints maximum time steps of demand advancement. This is done by
If $t$ is first time step of the year (or the first time step of the representative period), then the above two constraints are implemented to look back over the last n time steps, starting with the last time step of the year (or the last time step of the representative period). This time-wrapping implementation is similar to the time-wrapping implementations used for defining the storage balance constraints for hydropower reservoir resources and energy storage resources.
"""
function flexible_demand!(EP::Model, inputs::Dict, setup::Dict)
-## Flexible demand resources available during all hours and can be either delayed or advanced (virtual storage-shiftable demand) - DR ==1
-
-println("Flexible Demand Resources Module")
-
-T = inputs["T"] # Number of time steps (hours)
-Z = inputs["Z"] # Number of zones
-FLEX = inputs["FLEX"] # Set of flexible demand resources
+ ## Flexible demand resources available during all hours and can be either delayed or advanced (virtual storage-shiftable demand) - DR ==1
-gen = inputs["RESOURCES"]
+ println("Flexible Demand Resources Module")
-hours_per_subperiod = inputs["hours_per_subperiod"] # Total number of hours per subperiod
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ FLEX = inputs["FLEX"] # Set of flexible demand resources
-### Variables ###
+ gen = inputs["RESOURCES"]
-# Variable tracking total advanced (negative) or deferred (positive) demand for demand flex resource y in period t
-@variable(EP, vS_FLEX[y in FLEX, t=1:T]);
-
-# Variable tracking demand deferred by demand flex resource y in period t
-@variable(EP, vCHARGE_FLEX[y in FLEX, t=1:T] >= 0);
-
-### Expressions ###
-
-## Power Balance Expressions ##
-@expression(EP, ePowerBalanceDemandFlex[t=1:T, z=1:Z],
- sum(-EP[:vP][y,t]+EP[:vCHARGE_FLEX][y,t] for y in intersect(FLEX, resources_in_zone_by_rid(gen,z)))
-)
-add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceDemandFlex)
-
-# Capacity Reserves Margin policy
-if setup["CapacityReserveMargin"] > 0
- @expression(EP, eCapResMarBalanceFlex[res=1:inputs["NCapacityReserveMargin"], t=1:T], sum(derating_factor(gen[y], tag=res) * (EP[:vCHARGE_FLEX][y,t] - EP[:vP][y,t]) for y in FLEX))
- add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceFlex)
-end
+ hours_per_subperiod = inputs["hours_per_subperiod"] # Total number of hours per subperiod
-## Objective Function Expressions ##
+ ### Variables ###
-# Variable costs of "charging" for technologies "y" during hour "t" in zone "z"
-@expression(EP, eCVarFlex_in[y in FLEX,t=1:T], inputs["omega"][t]*var_om_cost_per_mwh_in(gen[y])*vCHARGE_FLEX[y,t])
+ # Variable tracking total advanced (negative) or deferred (positive) demand for demand flex resource y in period t
+ @variable(EP, vS_FLEX[y in FLEX, t = 1:T])
-# Sum individual resource contributions to variable charging costs to get total variable charging costs
-@expression(EP, eTotalCVarFlexInT[t=1:T], sum(eCVarFlex_in[y,t] for y in FLEX))
-@expression(EP, eTotalCVarFlexIn, sum(eTotalCVarFlexInT[t] for t in 1:T))
-add_to_expression!(EP[:eObj], eTotalCVarFlexIn)
+ # Variable tracking demand deferred by demand flex resource y in period t
+ @variable(EP, vCHARGE_FLEX[y in FLEX, t = 1:T]>=0)
-### Constraints ###
+ ### Expressions ###
-## Flexible demand is available only during specified hours with time delay or time advance (virtual storage-shiftable demand)
-for z in 1:Z
- # NOTE: Flexible demand operates by zone since capacity is now related to zone demand
- FLEX_Z = intersect(FLEX, resources_in_zone_by_rid(gen,z))
+ ## Power Balance Expressions ##
+ @expression(EP, ePowerBalanceDemandFlex[t = 1:T, z = 1:Z],
+ sum(-EP[:vP][y, t] + EP[:vCHARGE_FLEX][y, t]
+ for y in intersect(FLEX, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceDemandFlex)
- @constraints(EP, begin
- # State of "charge" constraint (equals previous state + charge - discharge)
- # NOTE: no maximum energy "stored" or deferred for later hours
- # NOTE: Flexible_Demand_Energy_Eff corresponds to energy loss due to time shifting
- [y in FLEX_Z, t in 1:T], EP[:vS_FLEX][y,t] == EP[:vS_FLEX][y, hoursbefore(hours_per_subperiod, t, 1)] - flexible_demand_energy_eff(gen[y]) * EP[:vP][y,t] + EP[:vCHARGE_FLEX][y,t]
-
- # Maximum charging rate
- # NOTE: the maximum amount that can be shifted is given by hourly availability of the resource times the maximum capacity of the resource
- [y in FLEX_Z, t=1:T], EP[:vCHARGE_FLEX][y,t] <= inputs["pP_Max"][y,t]*EP[:eTotalCap][y]
- # NOTE: no maximum discharge rate unless constrained by other factors like transmission, etc.
- end)
-
-
- for y in FLEX_Z
-
- # Require deferred demands to be satisfied within the specified time delay
- max_flex_demand_delay = Int(floor(max_flexible_demand_delay(gen[y])))
-
- # Require advanced demands to be satisfied within the specified time period
- max_flex_demand_advance = Int(floor(max_flexible_demand_advance(gen[y])))
-
- @constraint(EP, [t in 1:T],
- # cFlexibleDemandDelay: Constraints looks forward over next n hours, where n = max_flexible_demand_delay
- sum(EP[:vP][y,e] for e=hoursafter(hours_per_subperiod, t, 1:max_flex_demand_delay)) >= EP[:vS_FLEX][y,t])
-
- @constraint(EP, [t in 1:T],
- # cFlexibleDemandAdvance: Constraint looks forward over next n hours, where n = max_flexible_demand_advance
- sum(EP[:vCHARGE_FLEX][y,e] for e=hoursafter(hours_per_subperiod, t, 1:max_flex_demand_advance)) >= -EP[:vS_FLEX][y,t])
+ # Capacity Reserves Margin policy
+ if setup["CapacityReserveMargin"] > 0
+ @expression(EP,
+ eCapResMarBalanceFlex[res = 1:inputs["NCapacityReserveMargin"], t = 1:T],
+ sum(derating_factor(gen[y], tag = res) *
+ (EP[:vCHARGE_FLEX][y, t] - EP[:vP][y, t]) for y in FLEX))
+ add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceFlex)
+ end
+ ## Objective Function Expressions ##
+
+ # Variable costs of "charging" for technologies "y" during hour "t" in zone "z"
+ @expression(EP,
+ eCVarFlex_in[y in FLEX, t = 1:T],
+ inputs["omega"][t]*var_om_cost_per_mwh_in(gen[y])*vCHARGE_FLEX[y, t])
+
+ # Sum individual resource contributions to variable charging costs to get total variable charging costs
+ @expression(EP, eTotalCVarFlexInT[t = 1:T], sum(eCVarFlex_in[y, t] for y in FLEX))
+ @expression(EP, eTotalCVarFlexIn, sum(eTotalCVarFlexInT[t] for t in 1:T))
+ add_to_expression!(EP[:eObj], eTotalCVarFlexIn)
+
+ ### Constraints ###
+
+ ## Flexible demand is available only during specified hours with time delay or time advance (virtual storage-shiftable demand)
+ for z in 1:Z
+ # NOTE: Flexible demand operates by zone since capacity is now related to zone demand
+ FLEX_Z = intersect(FLEX, resources_in_zone_by_rid(gen, z))
+
+ @constraints(EP,
+ begin
+ # State of "charge" constraint (equals previous state + charge - discharge)
+ # NOTE: no maximum energy "stored" or deferred for later hours
+ # NOTE: Flexible_Demand_Energy_Eff corresponds to energy loss due to time shifting
+ [y in FLEX_Z, t in 1:T],
+ EP[:vS_FLEX][y, t] ==
+ EP[:vS_FLEX][y, hoursbefore(hours_per_subperiod, t, 1)] -
+ flexible_demand_energy_eff(gen[y]) * EP[:vP][y, t] +
+ EP[:vCHARGE_FLEX][y, t]
+
+ # Maximum charging rate
+ [y in FLEX_Z, t = 1:T],
+ EP[:vCHARGE_FLEX][y, t] <= inputs["pP_Max"][y, t] * EP[:eTotalCap][y]
+ # Maximum discharging rate
+ [y in FLEX_Z, t = 1:T],
+ flexible_demand_energy_eff(gen[y]) * EP[:vP][y, t] <= EP[:eTotalCap][y]
+ end)
+ for y in FLEX_Z
+
+ # Require deferred demands to be satisfied within the specified time delay
+ max_flex_demand_delay = Int(floor(max_flexible_demand_delay(gen[y])))
+
+ # Require advanced demands to be satisfied within the specified time period
+ max_flex_demand_advance = Int(floor(max_flexible_demand_advance(gen[y])))
+
+ @constraint(EP, [t in 1:T],
+ # cFlexibleDemandDelay: Constraints looks forward over next n hours, where n = max_flexible_demand_delay
+ sum(EP[:vP][y, e]
+ for e in hoursafter(hours_per_subperiod, t, 1:max_flex_demand_delay))>=EP[:vS_FLEX][y,t])
+
+ @constraint(EP, [t in 1:T],
+ # cFlexibleDemandAdvance: Constraint looks forward over next n hours, where n = max_flexible_demand_advance
+ sum(EP[:vCHARGE_FLEX][y, e]
+ for e in hoursafter(hours_per_subperiod, t, 1:max_flex_demand_advance))>=-EP[:vS_FLEX][y,t])
+ end
end
-end
-return EP
+ return EP
end
-
diff --git a/src/model/resources/hydro/hydro_inter_period_linkage.jl b/src/model/resources/hydro/hydro_inter_period_linkage.jl
index 8ea3836047..c1812173b4 100644
--- a/src/model/resources/hydro/hydro_inter_period_linkage.jl
+++ b/src/model/resources/hydro/hydro_inter_period_linkage.jl
@@ -44,55 +44,71 @@ Finally, the next constraint enforces that the initial storage level for each in
```
"""
function hydro_inter_period_linkage!(EP::Model, inputs::Dict)
-
- println("Long Duration Storage Module for Hydro Reservoir")
-
- gen = inputs["RESOURCES"]
-
- REP_PERIOD = inputs["REP_PERIOD"] # Number of representative periods
-
- STOR_HYDRO_LONG_DURATION = inputs["STOR_HYDRO_LONG_DURATION"]
-
- hours_per_subperiod = inputs["hours_per_subperiod"] #total number of hours per subperiod
-
- dfPeriodMap = inputs["Period_Map"] # Dataframe that maps modeled periods to representative periods
- NPeriods = size(inputs["Period_Map"])[1] # Number of modeled periods
-
- MODELED_PERIODS_INDEX = 1:NPeriods
- REP_PERIODS_INDEX = MODELED_PERIODS_INDEX[dfPeriodMap[!,:Rep_Period] .== MODELED_PERIODS_INDEX]
-
- ### Variables ###
-
- # Variables to define inter-period energy transferred between modeled periods
-
- # State of charge of storage at beginning of each modeled period n
- @variable(EP, vSOC_HYDROw[y in STOR_HYDRO_LONG_DURATION, n in MODELED_PERIODS_INDEX] >= 0)
-
- # Build up in storage inventory over each representative period w
- # Build up inventory can be positive or negative
- @variable(EP, vdSOC_HYDRO[y in STOR_HYDRO_LONG_DURATION, w=1:REP_PERIOD])
-
- ### Constraints ###
-
- # Links last time step with first time step, ensuring position in hour 1 is within eligible change from final hour position
- # Modified initial state of storage for long-duration storage - initialize wth value carried over from last period
- # Alternative to cSoCBalStart constraint which is included when not modeling operations wrapping and long duration storage
- # Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
- @constraint(EP, cHydroReservoirLongDurationStorageStart[w=1:REP_PERIOD, y in STOR_HYDRO_LONG_DURATION],
- EP[:vS_HYDRO][y,hours_per_subperiod*(w-1)+1] == (EP[:vS_HYDRO][y,hours_per_subperiod*w]-vdSOC_HYDRO[y,w])-(1/efficiency_down(gen[y])*EP[:vP][y,hours_per_subperiod*(w-1)+1])-EP[:vSPILL][y,hours_per_subperiod*(w-1)+1]+inputs["pP_Max"][y,hours_per_subperiod*(w-1)+1]*EP[:eTotalCap][y])
- # Storage at beginning of period w = storage at beginning of period w-1 + storage built up in period w (after n representative periods)
- ## Multiply storage build up term from prior period with corresponding weight
- @constraint(EP, cHydroReservoirLongDurationStorage[y in STOR_HYDRO_LONG_DURATION, r in MODELED_PERIODS_INDEX],
- vSOC_HYDROw[y, mod1(r+1, NPeriods)] == vSOC_HYDROw[y,r] + vdSOC_HYDRO[y,dfPeriodMap[r,:Rep_Period_Index]])
-
- # Storage at beginning of each modeled period cannot exceed installed energy capacity
- @constraint(EP, cHydroReservoirLongDurationStorageUpper[y in STOR_HYDRO_LONG_DURATION, r in MODELED_PERIODS_INDEX],
- vSOC_HYDROw[y,r] <= hydro_energy_to_power_ratio(gen[y])*EP[:eTotalCap][y])
-
- # Initial storage level for representative periods must also adhere to sub-period storage inventory balance
- # Initial storage = Final storage - change in storage inventory across representative period
- @constraint(EP, cHydroReservoirLongDurationStorageSub[y in STOR_HYDRO_LONG_DURATION, r in REP_PERIODS_INDEX],
- vSOC_HYDROw[y,r] == EP[:vS_HYDRO][y,hours_per_subperiod*dfPeriodMap[r,:Rep_Period_Index]] - vdSOC_HYDRO[y,dfPeriodMap[r,:Rep_Period_Index]])
-
-
+ println("Long Duration Storage Module for Hydro Reservoir")
+
+ gen = inputs["RESOURCES"]
+
+ REP_PERIOD = inputs["REP_PERIOD"] # Number of representative periods
+
+ STOR_HYDRO_LONG_DURATION = inputs["STOR_HYDRO_LONG_DURATION"]
+
+ hours_per_subperiod = inputs["hours_per_subperiod"] #total number of hours per subperiod
+
+ dfPeriodMap = inputs["Period_Map"] # Dataframe that maps modeled periods to representative periods
+ NPeriods = size(inputs["Period_Map"])[1] # Number of modeled periods
+
+ MODELED_PERIODS_INDEX = 1:NPeriods
+ REP_PERIODS_INDEX = MODELED_PERIODS_INDEX[dfPeriodMap[!, :Rep_Period] .== MODELED_PERIODS_INDEX]
+
+ ### Variables ###
+
+ # Variables to define inter-period energy transferred between modeled periods
+
+ # State of charge of storage at beginning of each modeled period n
+ @variable(EP, vSOC_HYDROw[y in STOR_HYDRO_LONG_DURATION, n in MODELED_PERIODS_INDEX]>=0)
+
+ # Build up in storage inventory over each representative period w
+ # Build up inventory can be positive or negative
+ @variable(EP, vdSOC_HYDRO[y in STOR_HYDRO_LONG_DURATION, w = 1:REP_PERIOD])
+
+ ### Constraints ###
+
+ # Links last time step with first time step, ensuring position in hour 1 is within eligible change from final hour position
+ # Modified initial state of storage for long-duration storage - initialize wth value carried over from last period
+ # Alternative to cSoCBalStart constraint which is included when not modeling operations wrapping and long duration storage
+ # Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
+ @constraint(EP,
+ cHydroReservoirLongDurationStorageStart[w = 1:REP_PERIOD,
+ y in STOR_HYDRO_LONG_DURATION],
+ EP[:vS_HYDRO][y,
+ hours_per_subperiod * (w - 1) + 1]==(EP[:vS_HYDRO][y, hours_per_subperiod * w] -
+ vdSOC_HYDRO[y, w]) -
+ (1 / efficiency_down(gen[y]) * EP[:vP][
+ y, hours_per_subperiod * (w - 1) + 1]) -
+ EP[:vSPILL][
+ y, hours_per_subperiod * (w - 1) + 1] +
+ inputs["pP_Max"][
+ y, hours_per_subperiod * (w - 1) + 1] * EP[:eTotalCap][y])
+ # Storage at beginning of period w = storage at beginning of period w-1 + storage built up in period w (after n representative periods)
+ ## Multiply storage build up term from prior period with corresponding weight
+ @constraint(EP,
+ cHydroReservoirLongDurationStorage[y in STOR_HYDRO_LONG_DURATION,
+ r in MODELED_PERIODS_INDEX],
+ vSOC_HYDROw[y,
+ mod1(r + 1, NPeriods)]==vSOC_HYDROw[y, r] +
+ vdSOC_HYDRO[y, dfPeriodMap[r, :Rep_Period_Index]])
+
+ # Storage at beginning of each modeled period cannot exceed installed energy capacity
+ @constraint(EP,
+ cHydroReservoirLongDurationStorageUpper[y in STOR_HYDRO_LONG_DURATION,
+ r in MODELED_PERIODS_INDEX],
+ vSOC_HYDROw[y, r]<=hydro_energy_to_power_ratio(gen[y]) * EP[:eTotalCap][y])
+
+ # Initial storage level for representative periods must also adhere to sub-period storage inventory balance
+ # Initial storage = Final storage - change in storage inventory across representative period
+ @constraint(EP,
+ cHydroReservoirLongDurationStorageSub[y in STOR_HYDRO_LONG_DURATION,
+ r in REP_PERIODS_INDEX],
+ vSOC_HYDROw[y,r]==EP[:vS_HYDRO][y, hours_per_subperiod * dfPeriodMap[r, :Rep_Period_Index]] -
+ vdSOC_HYDRO[y, dfPeriodMap[r, :Rep_Period_Index]])
end
diff --git a/src/model/resources/hydro/hydro_res.jl b/src/model/resources/hydro/hydro_res.jl
index e9734ed975..f4b90df76f 100644
--- a/src/model/resources/hydro/hydro_res.jl
+++ b/src/model/resources/hydro/hydro_res.jl
@@ -61,24 +61,23 @@ In case the reservoir capacity is known ($y \in W^{cap}$), then an additional co
```
"""
function hydro_res!(EP::Model, inputs::Dict, setup::Dict)
+ println("Hydro Reservoir Core Resources Module")
- println("Hydro Reservoir Core Resources Module")
+ gen = inputs["RESOURCES"]
- gen = inputs["RESOURCES"]
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ p = inputs["hours_per_subperiod"] # total number of hours per subperiod
- p = inputs["hours_per_subperiod"] # total number of hours per subperiod
+ HYDRO_RES = inputs["HYDRO_RES"]# Set of all reservoir hydro resources, used for common constraints
+ HYDRO_RES_KNOWN_CAP = inputs["HYDRO_RES_KNOWN_CAP"] # Reservoir hydro resources modeled with unknown reservoir energy capacity
- HYDRO_RES = inputs["HYDRO_RES"] # Set of all reservoir hydro resources, used for common constraints
- HYDRO_RES_KNOWN_CAP = inputs["HYDRO_RES_KNOWN_CAP"] # Reservoir hydro resources modeled with unknown reservoir energy capacity
+ STOR_HYDRO_SHORT_DURATION = inputs["STOR_HYDRO_SHORT_DURATION"]
+ representative_periods = inputs["REP_PERIOD"]
- STOR_HYDRO_SHORT_DURATION = inputs["STOR_HYDRO_SHORT_DURATION"]
- representative_periods = inputs["REP_PERIOD"]
-
- START_SUBPERIODS = inputs["START_SUBPERIODS"]
- INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
+ START_SUBPERIODS = inputs["START_SUBPERIODS"]
+ INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
# These variables are used in the ramp-up and ramp-down expressions
reserves_term = @expression(EP, [y in HYDRO_RES, t in 1:T], 0)
@@ -88,81 +87,99 @@ function hydro_res!(EP::Model, inputs::Dict, setup::Dict)
HYDRO_RES_REG = intersect(HYDRO_RES, inputs["REG"]) # Set of reservoir hydro resources with regulation reserves
HYDRO_RES_RSV = intersect(HYDRO_RES, inputs["RSV"]) # Set of reservoir hydro resources with spinning reserves
regulation_term = @expression(EP, [y in HYDRO_RES, t in 1:T],
- y ∈ HYDRO_RES_REG ? EP[:vREG][y,t] - EP[:vREG][y, hoursbefore(p, t, 1)] : 0)
+ y ∈ HYDRO_RES_REG ? EP[:vREG][y, t] - EP[:vREG][y, hoursbefore(p, t, 1)] : 0)
reserves_term = @expression(EP, [y in HYDRO_RES, t in 1:T],
- y ∈ HYDRO_RES_RSV ? EP[:vRSV][y,t] : 0)
+ y ∈ HYDRO_RES_RSV ? EP[:vRSV][y, t] : 0)
+ end
+
+ ### Variables ###
+
+ # Reservoir hydro storage level of resource "y" at hour "t" [MWh] on zone "z" - unbounded
+ @variable(EP, vS_HYDRO[y in HYDRO_RES, t = 1:T]>=0)
+
+ # Hydro reservoir overflow (water spill) variable
+ @variable(EP, vSPILL[y in HYDRO_RES, t = 1:T]>=0)
+
+ ### Expressions ###
+
+ ## Power Balance Expressions ##
+ @expression(EP, ePowerBalanceHydroRes[t = 1:T, z = 1:Z],
+ sum(EP[:vP][y, t] for y in intersect(HYDRO_RES, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceHydroRes)
+
+ # Capacity Reserves Margin policy
+ if setup["CapacityReserveMargin"] > 0
+ @expression(EP,
+ eCapResMarBalanceHydro[res = 1:inputs["NCapacityReserveMargin"], t = 1:T],
+ sum(derating_factor(gen[y], tag = res) * EP[:vP][y, t] for y in HYDRO_RES))
+ add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceHydro)
end
- ### Variables ###
-
- # Reservoir hydro storage level of resource "y" at hour "t" [MWh] on zone "z" - unbounded
- @variable(EP, vS_HYDRO[y in HYDRO_RES, t=1:T] >= 0);
-
- # Hydro reservoir overflow (water spill) variable
- @variable(EP, vSPILL[y in HYDRO_RES, t=1:T] >= 0)
-
- ### Expressions ###
-
- ## Power Balance Expressions ##
- @expression(EP, ePowerBalanceHydroRes[t=1:T, z=1:Z],
- sum(EP[:vP][y,t] for y in intersect(HYDRO_RES, resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceHydroRes)
-
- # Capacity Reserves Margin policy
- if setup["CapacityReserveMargin"] > 0
- @expression(EP, eCapResMarBalanceHydro[res=1:inputs["NCapacityReserveMargin"], t=1:T], sum(derating_factor(gen[y], tag=res) * EP[:vP][y,t] for y in HYDRO_RES))
- add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceHydro)
- end
-
- ### Constratints ###
-
- if representative_periods > 1 && !isempty(inputs["STOR_HYDRO_LONG_DURATION"])
- CONSTRAINTSET = STOR_HYDRO_SHORT_DURATION
- else
- CONSTRAINTSET = HYDRO_RES
- end
-
- @constraint(EP, cHydroReservoirStart[y in CONSTRAINTSET,t in START_SUBPERIODS], EP[:vS_HYDRO][y,t] == EP[:vS_HYDRO][y, hoursbefore(p,t,1)]- (1/efficiency_down(gen[y])*EP[:vP][y,t]) - vSPILL[y,t] + inputs["pP_Max"][y,t]*EP[:eTotalCap][y])
-
- ### Constraints commmon to all reservoir hydro (y in set HYDRO_RES) ###
- @constraints(EP, begin
- ### NOTE: time coupling constraints in this block do not apply to first hour in each sample period;
- # Energy stored in reservoir at end of each other hour is equal to energy at end of prior hour less generation and spill and + inflows in the current hour
- # The ["pP_Max"][y,t] term here refers to inflows as a fraction of peak discharge power capacity.
- # DEV NOTE: Last inputs["pP_Max"][y,t] term above is inflows; currently part of capacity factors inputs in Generators_variability.csv but should be moved to its own Hydro_inflows.csv input in future.
-
- # Constraints for reservoir hydro
- cHydroReservoirInterior[y in HYDRO_RES, t in INTERIOR_SUBPERIODS], EP[:vS_HYDRO][y,t] == (EP[:vS_HYDRO][y, hoursbefore(p,t,1)] - (1/efficiency_down(gen[y])*EP[:vP][y,t]) - vSPILL[y,t] + inputs["pP_Max"][y,t]*EP[:eTotalCap][y])
-
- # Maximum ramp up and down
- cRampUp[y in HYDRO_RES, t in 1:T], EP[:vP][y,t] + regulation_term[y,t] + reserves_term[y,t] - EP[:vP][y, hoursbefore(p,t,1)] <= ramp_up_fraction(gen[y])*EP[:eTotalCap][y]
- cRampDown[y in HYDRO_RES, t in 1:T], EP[:vP][y, hoursbefore(p,t,1)] - EP[:vP][y,t] - regulation_term[y,t] + reserves_term[y, hoursbefore(p,t,1)] <= ramp_down_fraction(gen[y])*EP[:eTotalCap][y]
- # Minimum streamflow running requirements (power generation and spills must be >= min value) in all hours
- cHydroMinFlow[y in HYDRO_RES, t in 1:T], EP[:vP][y,t] + EP[:vSPILL][y,t] >= min_power(gen[y])*EP[:eTotalCap][y]
- # DEV NOTE: When creating new hydro inputs, should rename Min_Power with Min_flow or similar for clarity since this includes spilled water as well
-
- # Maximum discharging rate must be less than power rating OR available stored energy at start of hour, whichever is less
- # DEV NOTE: We do not currently account for hydro power plant outages - leave it for later to figure out if we should.
- # DEV NOTE (CONTD): If we defin pPMax as hourly availability of the plant and define inflows as a separate parameter, then notation will be consistent with its use for other resources
- cHydroMaxPower[y in HYDRO_RES, t in 1:T], EP[:vP][y,t] <= EP[:eTotalCap][y]
- cHydroMaxOutflow[y in HYDRO_RES, t in 1:T], EP[:vP][y,t] <= EP[:vS_HYDRO][y, hoursbefore(p,t,1)]
- end)
-
- ### Constraints to limit maximum energy in storage based on known limits on reservoir energy capacity (only for HYDRO_RES_KNOWN_CAP)
- # Maximum energy stored in reservoir must be less than energy capacity in all hours - only applied to HYDRO_RES_KNOWN_CAP
- @constraint(EP, cHydroMaxEnergy[y in HYDRO_RES_KNOWN_CAP, t in 1:T], EP[:vS_HYDRO][y,t] <= hydro_energy_to_power_ratio(gen[y])*EP[:eTotalCap][y])
-
- if setup["OperationalReserves"] == 1
- ### Reserve related constraints for reservoir hydro resources (y in HYDRO_RES), if used
- hydro_res_operational_reserves!(EP, inputs)
- end
- ##CO2 Polcy Module Hydro Res Generation by zone
- @expression(EP, eGenerationByHydroRes[z=1:Z, t=1:T], # the unit is GW
- sum(EP[:vP][y,t] for y in intersect(HYDRO_RES, resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:eGenerationByZone], eGenerationByHydroRes)
+ ### Constratints ###
+
+ if representative_periods > 1 && !isempty(inputs["STOR_HYDRO_LONG_DURATION"])
+ CONSTRAINTSET = STOR_HYDRO_SHORT_DURATION
+ else
+ CONSTRAINTSET = HYDRO_RES
+ end
+ @constraint(EP,
+ cHydroReservoirStart[y in CONSTRAINTSET, t in START_SUBPERIODS],
+ EP[:vS_HYDRO][y,
+ t]==EP[:vS_HYDRO][y, hoursbefore(p, t, 1)] -
+ (1 / efficiency_down(gen[y]) * EP[:vP][y, t]) - vSPILL[y, t] +
+ inputs["pP_Max"][y, t] * EP[:eTotalCap][y])
+
+ ### Constraints commmon to all reservoir hydro (y in set HYDRO_RES) ###
+ @constraints(EP,
+ begin
+ ### NOTE: time coupling constraints in this block do not apply to first hour in each sample period;
+ # Energy stored in reservoir at end of each other hour is equal to energy at end of prior hour less generation and spill and + inflows in the current hour
+ # The ["pP_Max"][y,t] term here refers to inflows as a fraction of peak discharge power capacity.
+ # DEV NOTE: Last inputs["pP_Max"][y,t] term above is inflows; currently part of capacity factors inputs in Generators_variability.csv but should be moved to its own Hydro_inflows.csv input in future.
+
+ # Constraints for reservoir hydro
+ cHydroReservoirInterior[y in HYDRO_RES, t in INTERIOR_SUBPERIODS],
+ EP[:vS_HYDRO][y, t] == (EP[:vS_HYDRO][y, hoursbefore(p, t, 1)] -
+ (1 / efficiency_down(gen[y]) * EP[:vP][y, t]) - vSPILL[y, t] +
+ inputs["pP_Max"][y, t] * EP[:eTotalCap][y])
+
+ # Maximum ramp up and down
+ cRampUp[y in HYDRO_RES, t in 1:T],
+ EP[:vP][y, t] + regulation_term[y, t] + reserves_term[y, t] -
+ EP[:vP][y, hoursbefore(p, t, 1)] <=
+ ramp_up_fraction(gen[y]) * EP[:eTotalCap][y]
+ cRampDown[y in HYDRO_RES, t in 1:T],
+ EP[:vP][y, hoursbefore(p, t, 1)] - EP[:vP][y, t] - regulation_term[y, t] +
+ reserves_term[y, hoursbefore(p, t, 1)] <=
+ ramp_down_fraction(gen[y]) * EP[:eTotalCap][y]
+ # Minimum streamflow running requirements (power generation and spills must be >= min value) in all hours
+ cHydroMinFlow[y in HYDRO_RES, t in 1:T],
+ EP[:vP][y, t] + EP[:vSPILL][y, t] >= min_power(gen[y]) * EP[:eTotalCap][y]
+ # DEV NOTE: When creating new hydro inputs, should rename Min_Power with Min_flow or similar for clarity since this includes spilled water as well
+
+ # Maximum discharging rate must be less than power rating OR available stored energy at start of hour, whichever is less
+ # DEV NOTE: We do not currently account for hydro power plant outages - leave it for later to figure out if we should.
+ # DEV NOTE (CONTD): If we defin pPMax as hourly availability of the plant and define inflows as a separate parameter, then notation will be consistent with its use for other resources
+ cHydroMaxPower[y in HYDRO_RES, t in 1:T], EP[:vP][y, t] <= EP[:eTotalCap][y]
+ cHydroMaxOutflow[y in HYDRO_RES, t in 1:T],
+ EP[:vP][y, t] <= EP[:vS_HYDRO][y, hoursbefore(p, t, 1)]
+ end)
+
+ ### Constraints to limit maximum energy in storage based on known limits on reservoir energy capacity (only for HYDRO_RES_KNOWN_CAP)
+ # Maximum energy stored in reservoir must be less than energy capacity in all hours - only applied to HYDRO_RES_KNOWN_CAP
+ @constraint(EP,
+ cHydroMaxEnergy[y in HYDRO_RES_KNOWN_CAP, t in 1:T],
+ EP[:vS_HYDRO][y, t]<=hydro_energy_to_power_ratio(gen[y]) * EP[:eTotalCap][y])
+
+ if setup["OperationalReserves"] == 1
+ ### Reserve related constraints for reservoir hydro resources (y in HYDRO_RES), if used
+ hydro_res_operational_reserves!(EP, inputs)
+ end
+ ##CO2 Polcy Module Hydro Res Generation by zone
+ @expression(EP, eGenerationByHydroRes[z = 1:Z, t = 1:T], # the unit is GW
+ sum(EP[:vP][y, t] for y in intersect(HYDRO_RES, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:eGenerationByZone], eGenerationByHydroRes)
end
@doc raw"""
@@ -195,19 +212,18 @@ r_{y,z, t} \leq \upsilon^{rsv}_{y,z}\times \Delta^{total}_{y,z}
```
"""
function hydro_res_operational_reserves!(EP::Model, inputs::Dict)
+ println("Hydro Reservoir Operational Reserves Module")
- println("Hydro Reservoir Operational Reserves Module")
-
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
- T = inputs["T"] # Number of time steps (hours)
+ T = inputs["T"] # Number of time steps (hours)
- HYDRO_RES = inputs["HYDRO_RES"]
+ HYDRO_RES = inputs["HYDRO_RES"]
REG = inputs["REG"]
RSV = inputs["RSV"]
- HYDRO_RES_REG = intersect(HYDRO_RES, REG) # Set of reservoir hydro resources with regulation reserves
- HYDRO_RES_RSV = intersect(HYDRO_RES, RSV) # Set of reservoir hydro resources with spinning reserves
+ HYDRO_RES_REG = intersect(HYDRO_RES, REG) # Set of reservoir hydro resources with regulation reserves
+ HYDRO_RES_RSV = intersect(HYDRO_RES, RSV) # Set of reservoir hydro resources with spinning reserves
vP = EP[:vP]
vREG = EP[:vREG]
@@ -224,9 +240,13 @@ function hydro_res_operational_reserves!(EP::Model, inputs::Dict)
S = HYDRO_RES_RSV
add_similar_to_expression!(max_up_reserves_lhs[S, :], vRSV[S, :])
- @constraint(EP, [y in HYDRO_RES, t in 1:T], max_up_reserves_lhs[y, t] <= eTotalCap[y])
- @constraint(EP, [y in HYDRO_RES, t in 1:T], max_dn_reserves_lhs[y, t] >= 0)
+ @constraint(EP, [y in HYDRO_RES, t in 1:T], max_up_reserves_lhs[y, t]<=eTotalCap[y])
+ @constraint(EP, [y in HYDRO_RES, t in 1:T], max_dn_reserves_lhs[y, t]>=0)
- @constraint(EP, [y in HYDRO_RES_REG, t in 1:T], vREG[y, t] <= reg_max(gen[y]) * eTotalCap[y])
- @constraint(EP, [y in HYDRO_RES_RSV, t in 1:T], vRSV[y, t] <= rsv_max(gen[y]) * eTotalCap[y])
+ @constraint(EP,
+ [y in HYDRO_RES_REG, t in 1:T],
+ vREG[y, t]<=reg_max(gen[y]) * eTotalCap[y])
+ @constraint(EP,
+ [y in HYDRO_RES_RSV, t in 1:T],
+ vRSV[y, t]<=rsv_max(gen[y]) * eTotalCap[y])
end
diff --git a/src/model/resources/hydrogen/electrolyzer.jl b/src/model/resources/hydrogen/electrolyzer.jl
index f14b9a8c38..08945a62b7 100644
--- a/src/model/resources/hydrogen/electrolyzer.jl
+++ b/src/model/resources/hydrogen/electrolyzer.jl
@@ -78,99 +78,117 @@ This optional constraint (enabled by setting `HydrogenHourlyMatching==1` in `gen
This constraint permits modeling of the 'three pillars' requirements for clean hydrogen supply of (1) new clean supply (if only new clean resources are designated as eligible), (2) that is deliverable to the electrolyzer (assuming co-location within the same modeled zone = deliverability), and (3) produced within the same hour as the electrolyzer consumes power (otherwise known as 'additionality/new supply', 'deliverability', and 'temporal matching requirements') See Ricks, Xu & Jenkins (2023), ''Minimizing emissions from grid-based hydrogen production in the United States'' *Environ. Res. Lett.* 18 014025 [doi:10.1088/1748-9326/acacb5](https://iopscience.iop.org/article/10.1088/1748-9326/acacb5/meta) for more.
"""
function electrolyzer!(EP::Model, inputs::Dict, setup::Dict)
- println("Electrolyzer Resources Module")
+ println("Electrolyzer Resources Module")
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
- ELECTROLYZERS = inputs["ELECTROLYZER"]
- STORAGE = inputs["STOR_ALL"]
+ ELECTROLYZERS = inputs["ELECTROLYZER"]
+ STORAGE = inputs["STOR_ALL"]
- p = inputs["hours_per_subperiod"] #total number of hours per subperiod
+ p = inputs["hours_per_subperiod"] #total number of hours per subperiod
- ### Variables ###
+ ### Variables ###
- # Electrical energy consumed by electrolyzer resource "y" at hour "t"
- @variable(EP, vUSE[y=ELECTROLYZERS, t in 1:T] >=0);
+ # Electrical energy consumed by electrolyzer resource "y" at hour "t"
+ @variable(EP, vUSE[y = ELECTROLYZERS, t in 1:T]>=0)
- ### Expressions ###
+ ### Expressions ###
- ## Power Balance Expressions ##
+ ## Power Balance Expressions ##
- @expression(EP, ePowerBalanceElectrolyzers[t in 1:T, z in 1:Z],
- sum(EP[:vUSE][y,t] for y in intersect(ELECTROLYZERS, resources_in_zone_by_rid(gen,z))))
+ @expression(EP, ePowerBalanceElectrolyzers[t in 1:T, z in 1:Z],
+ sum(EP[:vUSE][y, t]
+ for y in intersect(ELECTROLYZERS, resources_in_zone_by_rid(gen, z))))
- # Electrolyzers consume electricity so their vUSE is subtracted from power balance
- EP[:ePowerBalance] -= ePowerBalanceElectrolyzers
+ # Electrolyzers consume electricity so their vUSE is subtracted from power balance
+ EP[:ePowerBalance] -= ePowerBalanceElectrolyzers
- # Capacity Reserves Margin policy
- ## Electrolyzers currently do not contribute to capacity reserve margin. Could allow them to contribute as a curtailable demand in future.
+ # Capacity Reserves Margin policy
+ ## Electrolyzers currently do not contribute to capacity reserve margin. Could allow them to contribute as a curtailable demand in future.
- ### Constraints ###
+ ### Constraints ###
- ### Maximum ramp up and down between consecutive hours (Constraints #1-2)
- @constraints(EP, begin
- ## Maximum ramp up between consecutive hours
- [y in ELECTROLYZERS, t in 1:T], EP[:vUSE][y,t] - EP[:vUSE][y, hoursbefore(p,t,1)] <= ramp_up_fraction(gen[y])*EP[:eTotalCap][y]
+ ### Maximum ramp up and down between consecutive hours (Constraints #1-2)
+ @constraints(EP,
+ begin
+ ## Maximum ramp up between consecutive hours
+ [y in ELECTROLYZERS, t in 1:T],
+ EP[:vUSE][y, t] - EP[:vUSE][y, hoursbefore(p, t, 1)] <=
+ ramp_up_fraction(gen[y]) * EP[:eTotalCap][y]
- ## Maximum ramp down between consecutive hours
- [y in ELECTROLYZERS, t in 1:T], EP[:vUSE][y, hoursbefore(p,t,1)] - EP[:vUSE][y,t] <= ramp_down_fraction(gen[y])*EP[:eTotalCap][y]
- end)
+ ## Maximum ramp down between consecutive hours
+ [y in ELECTROLYZERS, t in 1:T],
+ EP[:vUSE][y, hoursbefore(p, t, 1)] - EP[:vUSE][y, t] <=
+ ramp_down_fraction(gen[y]) * EP[:eTotalCap][y]
+ end)
- ### Minimum and maximum power output constraints (Constraints #3-4)
+ ### Minimum and maximum power output constraints (Constraints #3-4)
# Electrolyzers currently do not contribute to operating reserves, so there is not
# special case (for OperationalReserves == 1) here.
# Could allow them to contribute as a curtailable demand in future.
+ @constraints(EP,
+ begin
+ # Minimum stable power generated per technology "y" at hour "t" Min_Power
+ [y in ELECTROLYZERS, t in 1:T],
+ EP[:vUSE][y, t] >= min_power(gen[y]) * EP[:eTotalCap][y]
+
+ # Maximum power generated per technology "y" at hour "t"
+ [y in ELECTROLYZERS, t in 1:T],
+ EP[:vUSE][y, t] <= inputs["pP_Max"][y, t] * EP[:eTotalCap][y]
+ end)
+
+ ### Minimum hydrogen production constraint (if any) (Constraint #5)
+ kt_to_t = 10^3
+ @constraint(EP,
+ cHydrogenMin[y in ELECTROLYZERS],
+ sum(inputs["omega"][t] * EP[:vUSE][y, t] / hydrogen_mwh_per_tonne(gen[y])
+ for t in 1:T)>=electrolyzer_min_kt(gen[y]) * kt_to_t)
+
+ ### Remove vP (electrolyzers do not produce power so vP = 0 for all periods)
@constraints(EP, begin
- # Minimum stable power generated per technology "y" at hour "t" Min_Power
- [y in ELECTROLYZERS, t in 1:T], EP[:vUSE][y,t] >= min_power(gen[y])*EP[:eTotalCap][y]
-
- # Maximum power generated per technology "y" at hour "t"
- [y in ELECTROLYZERS, t in 1:T], EP[:vUSE][y,t] <= inputs["pP_Max"][y,t]*EP[:eTotalCap][y]
+ [y in ELECTROLYZERS, t in 1:T], EP[:vP][y, t] == 0
end)
- ### Minimum hydrogen production constraint (if any) (Constraint #5)
- kt_to_t = 10^3
- @constraint(EP,
- cHydrogenMin[y in ELECTROLYZERS],
- sum(inputs["omega"][t] * EP[:vUSE][y,t] / hydrogen_mwh_per_tonne(gen[y]) for t=1:T) >= electrolyzer_min_kt(gen[y]) * kt_to_t
- )
-
- ### Remove vP (electrolyzers do not produce power so vP = 0 for all periods)
- @constraints(EP, begin
- [y in ELECTROLYZERS, t in 1:T], EP[:vP][y,t] == 0
- end)
-
- ### Hydrogen Hourly Supply Matching Constraint (Constraint #6) ###
- # Requires generation from qualified resources (indicated by Qualified_Hydrogen_Supply==1 in the resource .csv files)
- # from within the same zone as the electrolyzers are located to be >= hourly consumption from electrolyzers in the zone
- # (and any charging by qualified storage within the zone used to help increase electrolyzer utilization).
- if setup["HydrogenHourlyMatching"] == 1
- HYDROGEN_ZONES = unique(zone_id.(gen.Electrolyzer))
- QUALIFIED_SUPPLY = ids_with(gen, qualified_hydrogen_supply)
- @constraint(EP, cHourlyMatching[z in HYDROGEN_ZONES, t in 1:T],
- sum(EP[:vP][y,t] for y=intersect(resources_in_zone_by_rid(gen,z), QUALIFIED_SUPPLY)) >= sum(EP[:vUSE][y,t] for y=intersect(resources_in_zone_by_rid(gen,z), ELECTROLYZERS)) + sum(EP[:vCHARGE][y,t] for y=intersect(resources_in_zone_by_rid(gen,z), QUALIFIED_SUPPLY, STORAGE))
- )
- end
-
-
- ### Energy Share Requirement Policy ###
- # Since we're using vUSE to denote electrolyzer consumption, we subtract this from the eESR Energy Share Requirement balance to increase demand for clean resources if desired
- # Electrolyzer demand is only accounted for in an ESR that the electrolyzer resources is tagged in in Generates_data.csv (e.g. ESR_N > 0) and
- # a share of electrolyzer demand equal to df[y,:ESR_N] must be met by resources qualifying for ESR_N for each electrolyzer resource y.
- if setup["EnergyShareRequirement"] >= 1
- @expression(EP, eElectrolyzerESR[ESR in 1:inputs["nESR"]], sum(inputs["omega"][t]*EP[:vUSE][y,t] for y=intersect(ELECTROLYZERS, ids_with_policy(gen,esr,tag=ESR)), t in 1:T))
- EP[:eESR] -= eElectrolyzerESR
- end
-
- ### Objective Function ###
- # Subtract hydrogen revenue from objective function
- scale_factor = setup["ParameterScale"] == 1 ? 10^6 : 1 # If ParameterScale==1, costs are in millions of $
- @expression(EP, eHydrogenValue[y in ELECTROLYZERS, t in 1:T], (inputs["omega"][t] * EP[:vUSE][y,t] / hydrogen_mwh_per_tonne(gen[y]) * hydrogen_price_per_tonne(gen[y]) / scale_factor))
- @expression(EP, eTotalHydrogenValueT[t in 1:T], sum(eHydrogenValue[y,t] for y in ELECTROLYZERS))
- @expression(EP, eTotalHydrogenValue, sum(eTotalHydrogenValueT[t] for t in 1:T))
- EP[:eObj] -= eTotalHydrogenValue
-
+ ### Hydrogen Hourly Supply Matching Constraint (Constraint #6) ###
+ # Requires generation from qualified resources (indicated by Qualified_Hydrogen_Supply==1 in the resource .csv files)
+ # from within the same zone as the electrolyzers are located to be >= hourly consumption from electrolyzers in the zone
+ # (and any charging by qualified storage within the zone used to help increase electrolyzer utilization).
+ if setup["HydrogenHourlyMatching"] == 1
+ HYDROGEN_ZONES = unique(zone_id.(gen.Electrolyzer))
+ QUALIFIED_SUPPLY = ids_with(gen, qualified_hydrogen_supply)
+ @constraint(EP, cHourlyMatching[z in HYDROGEN_ZONES, t in 1:T],
+ sum(EP[:vP][y, t]
+ for y in intersect(resources_in_zone_by_rid(gen, z), QUALIFIED_SUPPLY))>=sum(EP[:vUSE][y,t]
+ for y in intersect(resources_in_zone_by_rid(gen,z), ELECTROLYZERS)) + sum(EP[:vCHARGE][y,t]
+ for y in intersect(resources_in_zone_by_rid(gen,z), QUALIFIED_SUPPLY, STORAGE)))
+ end
+
+ ### Energy Share Requirement Policy ###
+ # Since we're using vUSE to denote electrolyzer consumption, we subtract this from the eESR Energy Share Requirement balance to increase demand for clean resources if desired
+ # Electrolyzer demand is only accounted for in an ESR that the electrolyzer resources is tagged in in Generates_data.csv (e.g. ESR_N > 0) and
+ # a share of electrolyzer demand equal to df[y,:ESR_N] must be met by resources qualifying for ESR_N for each electrolyzer resource y.
+ if setup["EnergyShareRequirement"] >= 1
+ @expression(EP,
+ eElectrolyzerESR[ESR in 1:inputs["nESR"]],
+ sum(inputs["omega"][t] * EP[:vUSE][y, t]
+ for y in intersect(ELECTROLYZERS, ids_with_policy(gen, esr, tag = ESR)),
+ t in 1:T))
+ EP[:eESR] -= eElectrolyzerESR
+ end
+
+ ### Objective Function ###
+ # Subtract hydrogen revenue from objective function
+ scale_factor = setup["ParameterScale"] == 1 ? 10^6 : 1 # If ParameterScale==1, costs are in millions of $
+ @expression(EP,
+ eHydrogenValue[y in ELECTROLYZERS, t in 1:T],
+ (inputs["omega"][t] * EP[:vUSE][y, t] / hydrogen_mwh_per_tonne(gen[y]) *
+ hydrogen_price_per_tonne(gen[y])/scale_factor))
+ @expression(EP,
+ eTotalHydrogenValueT[t in 1:T],
+ sum(eHydrogenValue[y, t] for y in ELECTROLYZERS))
+ @expression(EP, eTotalHydrogenValue, sum(eTotalHydrogenValueT[t] for t in 1:T))
+ EP[:eObj] -= eTotalHydrogenValue
end
diff --git a/src/model/resources/maintenance.jl b/src/model/resources/maintenance.jl
index 1499fa09c8..1a9bda5f04 100644
--- a/src/model/resources/maintenance.jl
+++ b/src/model/resources/maintenance.jl
@@ -12,7 +12,7 @@ const MAINTENANCE_SHUT_VARS = "MaintenanceShutVariables"
"""
function resources_with_maintenance(df::DataFrame)::Vector{Int}
if "MAINT" in names(df)
- df[df.MAINT.>0, :R_ID]
+ df[df.MAINT .> 0, :R_ID]
else
Vector{Int}[]
end
@@ -58,13 +58,11 @@ end
maintenance_duration: length of a maintenance period
maintenance_begin_hours: collection of hours in which maintenance is allowed to start
"""
-function controlling_maintenance_start_hours(
- p::Int,
- t::Int,
- maintenance_duration::Int,
- maintenance_begin_hours,
-)
- controlled_hours = hoursbefore(p, t, 0:(maintenance_duration-1))
+function controlling_maintenance_start_hours(p::Int,
+ t::Int,
+ maintenance_duration::Int,
+ maintenance_begin_hours)
+ controlled_hours = hoursbefore(p, t, 0:(maintenance_duration - 1))
return intersect(controlled_hours, maintenance_begin_hours)
end
@@ -103,20 +101,17 @@ end
Creates maintenance-tracking variables and adds their Symbols to two Sets in `inputs`.
Adds constraints which act on the vCOMMIT-like variable.
"""
-function maintenance_formulation!(
- EP::Model,
- inputs::Dict,
- resource_component::AbstractString,
- r_id::Int,
- maint_begin_cadence::Int,
- maint_dur::Int,
- maint_freq_years::Int,
- cap::Float64,
- vcommit::Symbol,
- ecap::Symbol,
- integer_operational_unit_commitment::Bool,
-)
-
+function maintenance_formulation!(EP::Model,
+ inputs::Dict,
+ resource_component::AbstractString,
+ r_id::Int,
+ maint_begin_cadence::Int,
+ maint_dur::Int,
+ maint_freq_years::Int,
+ cap::Float64,
+ vcommit::Symbol,
+ ecap::Symbol,
+ integer_operational_unit_commitment::Bool)
T = 1:inputs["T"]
hours_per_subperiod = inputs["hours_per_subperiod"]
@@ -132,14 +127,11 @@ function maintenance_formulation!(
maintenance_begin_hours = 1:maint_begin_cadence:T[end]
# create variables
- vMDOWN = EP[down] = @variable(EP, [t in T], base_name = down_name, lower_bound = 0)
- vMSHUT =
- EP[shut] = @variable(
- EP,
- [t in maintenance_begin_hours],
- base_name = shut_name,
- lower_bound = 0
- )
+ vMDOWN = EP[down] = @variable(EP, [t in T], base_name=down_name, lower_bound=0)
+ vMSHUT = EP[shut] = @variable(EP,
+ [t in maintenance_begin_hours],
+ base_name=shut_name,
+ lower_bound=0)
if integer_operational_unit_commitment
set_integer.(vMDOWN)
@@ -155,22 +147,20 @@ function maintenance_formulation!(
end)
# Plant is non-committed during maintenance
- @constraint(EP, [t in T], vMDOWN[t] + vcommit[y, t] <= ecap[y] / cap)
-
- controlling_hours(t) = controlling_maintenance_start_hours(
- hours_per_subperiod,
- t,
- maint_dur,
- maintenance_begin_hours,
- )
+ @constraint(EP, [t in T], vMDOWN[t] + vcommit[y, t]<=ecap[y] / cap)
+
+ function controlling_hours(t)
+ controlling_maintenance_start_hours(hours_per_subperiod,
+ t,
+ maint_dur,
+ maintenance_begin_hours)
+ end
# Plant is down for the required number of hours
- @constraint(EP, [t in T], vMDOWN[t] == sum(vMSHUT[controlling_hours(t)]))
+ @constraint(EP, [t in T], vMDOWN[t]==sum(vMSHUT[controlling_hours(t)]))
# Plant requires maintenance every (certain number of) year(s)
- @constraint(
- EP,
- sum(vMSHUT[t] for t in maintenance_begin_hours) >= ecap[y] / cap / maint_freq_years
- )
+ @constraint(EP,
+ sum(vMSHUT[t] for t in maintenance_begin_hours)>=ecap[y] / cap / maint_freq_years)
return
end
diff --git a/src/model/resources/must_run/must_run.jl b/src/model/resources/must_run/must_run.jl
index a16efb1141..fddcba6258 100644
--- a/src/model/resources/must_run/must_run.jl
+++ b/src/model/resources/must_run/must_run.jl
@@ -13,40 +13,41 @@ For must-run resources ($y\in \mathcal{MR}$) output in each time period $t$ must
```
"""
function must_run!(EP::Model, inputs::Dict, setup::Dict)
+ println("Must-Run Resources Module")
- println("Must-Run Resources Module")
+ gen = inputs["RESOURCES"]
- gen = inputs["RESOURCES"]
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ G = inputs["G"] # Number of generators
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
- G = inputs["G"] # Number of generators
+ MUST_RUN = inputs["MUST_RUN"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
- MUST_RUN = inputs["MUST_RUN"]
- CapacityReserveMargin = setup["CapacityReserveMargin"]
+ ### Expressions ###
- ### Expressions ###
+ ## Power Balance Expressions ##
- ## Power Balance Expressions ##
+ @expression(EP, ePowerBalanceNdisp[t = 1:T, z = 1:Z],
+ sum(EP[:vP][y, t] for y in intersect(MUST_RUN, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceNdisp)
- @expression(EP, ePowerBalanceNdisp[t=1:T, z=1:Z],
- sum(EP[:vP][y,t] for y in intersect(MUST_RUN, resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceNdisp)
+ # Capacity Reserves Margin policy
+ if CapacityReserveMargin > 0
+ @expression(EP,
+ eCapResMarBalanceMustRun[res = 1:inputs["NCapacityReserveMargin"], t = 1:T],
+ sum(derating_factor(gen[y], tag = res) * EP[:eTotalCap][y] *
+ inputs["pP_Max"][y, t] for y in MUST_RUN))
+ add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceMustRun)
+ end
- # Capacity Reserves Margin policy
- if CapacityReserveMargin > 0
- @expression(EP, eCapResMarBalanceMustRun[res=1:inputs["NCapacityReserveMargin"], t=1:T], sum(derating_factor(gen[y], tag=res) * EP[:eTotalCap][y] * inputs["pP_Max"][y,t] for y in MUST_RUN))
- add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceMustRun)
- end
-
- ### Constratints ###
-
- @constraint(EP, [y in MUST_RUN, t=1:T], EP[:vP][y,t] == inputs["pP_Max"][y,t]*EP[:eTotalCap][y])
- ##CO2 Polcy Module Must Run Generation by zone
- @expression(EP, eGenerationByMustRun[z=1:Z, t=1:T], # the unit is GW
- sum(EP[:vP][y,t] for y in intersect(MUST_RUN, resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:eGenerationByZone], eGenerationByMustRun)
+ ### Constratints ###
+ @constraint(EP,
+ [y in MUST_RUN, t = 1:T],
+ EP[:vP][y, t]==inputs["pP_Max"][y, t] * EP[:eTotalCap][y])
+ ##CO2 Polcy Module Must Run Generation by zone
+ @expression(EP, eGenerationByMustRun[z = 1:Z, t = 1:T], # the unit is GW
+ sum(EP[:vP][y, t] for y in intersect(MUST_RUN, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:eGenerationByZone], eGenerationByMustRun)
end
diff --git a/src/model/resources/resources.jl b/src/model/resources/resources.jl
index 4b86f9ee7d..8eb07c98e8 100644
--- a/src/model/resources/resources.jl
+++ b/src/model/resources/resources.jl
@@ -14,20 +14,20 @@ Possible values:
- :Electrolyzer
"""
const resource_types = (:Thermal,
- :Vre,
- :Hydro,
- :Storage,
- :MustRun,
- :FlexDemand,
- :VreStorage,
- :Electrolyzer)
+ :Vre,
+ :Hydro,
+ :Storage,
+ :MustRun,
+ :FlexDemand,
+ :VreStorage,
+ :Electrolyzer)
# Create composite types (structs) for each resource type in resource_types
for r in resource_types
let dict = :dict, r = r
@eval begin
- struct $r{names<:Symbol, T<:Any} <: AbstractResource
- $dict::Dict{names,T}
+ struct $r{names <: Symbol, T <: Any} <: AbstractResource
+ $dict::Dict{names, T}
end
Base.parent(r::$r) = getfield(r, $(QuoteNode(dict)))
end
@@ -66,7 +66,9 @@ Allows to set the attribute `sym` of an `AbstractResource` object using dot synt
- `value`: The value to set for the attribute.
"""
-Base.setproperty!(r::AbstractResource, sym::Symbol, value) = setindex!(parent(r), value, sym)
+Base.setproperty!(r::AbstractResource, sym::Symbol, value) = setindex!(parent(r),
+ value,
+ sym)
"""
haskey(r::AbstractResource, sym::Symbol)
@@ -97,8 +99,8 @@ Retrieves the value of a specific attribute from an `AbstractResource` object. I
- The value of the attribute if it exists in the parent object, `default` otherwise.
"""
-function Base.get(r::AbstractResource, sym::Symbol, default)
- return haskey(r, sym) ? getproperty(r,sym) : default
+function Base.get(r::AbstractResource, sym::Symbol, default)
+ return haskey(r, sym) ? getproperty(r, sym) : default
end
"""
@@ -124,7 +126,7 @@ julia> vre_gen.zone
"""
function Base.getproperty(rs::Vector{<:AbstractResource}, sym::Symbol)
# if sym is Type then return a vector resources of that type
- if sym ∈ resource_types
+ if sym ∈ resource_types
res_type = eval(sym)
return Vector{res_type}(rs[isa.(rs, res_type)])
end
@@ -149,7 +151,7 @@ Set the attributes specified by `sym` to the corresponding values in `value` for
function Base.setproperty!(rs::Vector{<:AbstractResource}, sym::Symbol, value::Vector)
# if sym is a field of the resource then set that field for all resources
@assert length(rs) == length(value)
- for (r,v) in zip(rs, value)
+ for (r, v) in zip(rs, value)
setproperty!(r, sym, v)
end
return rs
@@ -172,7 +174,7 @@ Define dot syntax for setting the attributes specified by `sym` to the correspon
function Base.setindex!(rs::Vector{<:AbstractResource}, value::Vector, sym::Symbol)
# if sym is a field of the resource then set that field for all resources
@assert length(rs) == length(value)
- for (r,v) in zip(rs, value)
+ for (r, v) in zip(rs, value)
setproperty!(r, sym, v)
end
return rs
@@ -207,8 +209,8 @@ function Base.show(io::IO, r::AbstractResource)
value_length = length(resource_name(r)) + 3
println(io, "\nResource: $(r.resource) (id: $(r.id))")
println(io, repeat("-", key_length + value_length))
- for (k,v) in pairs(r)
- k,v = string(k), string(v)
+ for (k, v) in pairs(r)
+ k, v = string(k), string(v)
k = k * repeat(" ", key_length - length(k))
println(io, "$k | $v")
end
@@ -231,7 +233,6 @@ function attributes(r::AbstractResource)
return tuple(keys(parent(r))...)
end
-
"""
findall(f::Function, rs::Vector{<:AbstractResource})
@@ -254,7 +255,8 @@ julia> findall(r -> max_cap_mwh(r) != 0, gen.Storage)
50
```
"""
-Base.findall(f::Function, rs::Vector{<:AbstractResource}) = resource_id.(filter(r -> f(r), rs))
+Base.findall(f::Function, rs::Vector{<:AbstractResource}) = resource_id.(filter(r -> f(r),
+ rs))
"""
interface(name, default=default_zero, type=AbstractResource)
@@ -283,7 +285,7 @@ julia> max_cap_mw.(gen.Vre) # vectorized
9.848441999999999
```
"""
-macro interface(name, default=default_zero, type=AbstractResource)
+macro interface(name, default = default_zero, type = AbstractResource)
quote
function $(esc(name))(r::$(esc(type)))
return get(r, $(QuoteNode(name)), $(esc(default)))
@@ -314,7 +316,7 @@ julia> max_cap_mw(gen[3])
4.888236
```
"""
-function ids_with_positive(rs::Vector{T}, f::Function) where T <: AbstractResource
+function ids_with_positive(rs::Vector{T}, f::Function) where {T <: AbstractResource}
return findall(r -> f(r) > 0, rs)
end
@@ -341,13 +343,14 @@ julia> max_cap_mw(gen[3])
4.888236
```
"""
-function ids_with_positive(rs::Vector{T}, name::Symbol) where T <: AbstractResource
+function ids_with_positive(rs::Vector{T}, name::Symbol) where {T <: AbstractResource}
# if the getter function exists in GenX then use it, otherwise get the attribute directly
f = isdefined(GenX, name) ? getfield(GenX, name) : r -> getproperty(r, name)
return ids_with_positive(rs, f)
end
-function ids_with_positive(rs::Vector{T}, name::AbstractString) where T <: AbstractResource
+function ids_with_positive(rs::Vector{T},
+ name::AbstractString) where {T <: AbstractResource}
return ids_with_positive(rs, Symbol(lowercase(name)))
end
@@ -368,7 +371,7 @@ Function for finding resources in a vector `rs` where the attribute specified by
julia> ids_with_nonneg(gen, max_cap_mw)
```
"""
-function ids_with_nonneg(rs::Vector{T}, f::Function) where T <: AbstractResource
+function ids_with_nonneg(rs::Vector{T}, f::Function) where {T <: AbstractResource}
return findall(r -> f(r) >= 0, rs)
end
@@ -389,13 +392,13 @@ Function for finding resources in a vector `rs` where the attribute specified by
julia> ids_with_nonneg(gen, max_cap_mw)
```
"""
-function ids_with_nonneg(rs::Vector{T}, name::Symbol) where T <: AbstractResource
+function ids_with_nonneg(rs::Vector{T}, name::Symbol) where {T <: AbstractResource}
# if the getter function exists in GenX then use it, otherwise get the attribute directly
f = isdefined(GenX, name) ? getfield(GenX, name) : r -> getproperty(r, name)
return ids_with_nonneg(rs, f)
end
-function ids_with_nonneg(rs::Vector{T}, name::AbstractString) where T <: AbstractResource
+function ids_with_nonneg(rs::Vector{T}, name::AbstractString) where {T <: AbstractResource}
return ids_with_nonneg(rs, Symbol(lowercase(name)))
end
@@ -425,7 +428,9 @@ julia> existing_cap_mw(gen[21])
7.0773
```
"""
-function ids_with(rs::Vector{T}, f::Function, default=default_zero) where T <: AbstractResource
+function ids_with(rs::Vector{T},
+ f::Function,
+ default = default_zero) where {T <: AbstractResource}
return findall(r -> f(r) != default, rs)
end
@@ -454,13 +459,17 @@ julia> existing_cap_mw(gen[21])
7.0773
```
"""
-function ids_with(rs::Vector{T}, name::Symbol, default=default_zero) where T <: AbstractResource
+function ids_with(rs::Vector{T},
+ name::Symbol,
+ default = default_zero) where {T <: AbstractResource}
# if the getter function exists in GenX then use it, otherwise get the attribute directly
f = isdefined(GenX, name) ? getfield(GenX, name) : r -> getproperty(r, name)
return ids_with(rs, f, default)
end
-function ids_with(rs::Vector{T}, name::AbstractString, default=default_zero) where T <: AbstractResource
+function ids_with(rs::Vector{T},
+ name::AbstractString,
+ default = default_zero) where {T <: AbstractResource}
return ids_with(rs, Symbol(lowercase(name)), default)
end
@@ -477,8 +486,10 @@ Function for finding resources in a vector `rs` where the policy specified by `f
# Returns
- `ids (Vector{Int64})`: The vector of resource ids with a positive value for policy `f` and tag `tag`.
"""
-function ids_with_policy(rs::Vector{T}, f::Function; tag::Int64) where T <: AbstractResource
- return findall(r -> f(r, tag=tag) > 0, rs)
+function ids_with_policy(rs::Vector{T},
+ f::Function;
+ tag::Int64) where {T <: AbstractResource}
+ return findall(r -> f(r, tag = tag) > 0, rs)
end
"""
@@ -494,17 +505,21 @@ Function for finding resources in a vector `rs` where the policy specified by `n
# Returns
- `ids (Vector{Int64})`: The vector of resource ids with a positive value for policy `name` and tag `tag`.
"""
-function ids_with_policy(rs::Vector{T}, name::Symbol; tag::Int64) where T <: AbstractResource
+function ids_with_policy(rs::Vector{T},
+ name::Symbol;
+ tag::Int64) where {T <: AbstractResource}
# if the getter function exists in GenX then use it, otherwise get the attribute directly
if isdefined(GenX, name)
f = getfield(GenX, name)
- return ids_with_policy(rs, f, tag=tag)
+ return ids_with_policy(rs, f, tag = tag)
end
return findall(r -> getproperty(r, Symbol(string(name, "_$tag"))) > 0, rs)
end
-function ids_with_policy(rs::Vector{T}, name::AbstractString; tag::Int64) where T <: AbstractResource
- return ids_with_policy(rs, Symbol(lowercase(name)), tag=tag)
+function ids_with_policy(rs::Vector{T},
+ name::AbstractString;
+ tag::Int64) where {T <: AbstractResource}
+ return ids_with_policy(rs, Symbol(lowercase(name)), tag = tag)
end
"""
@@ -512,18 +527,18 @@ end
Default value for resource attributes.
"""
-const default_zero = 0
+const default_zero = 0
# INTERFACE FOR ALL RESOURCES
resource_name(r::AbstractResource) = r.resource
-resource_name(rs::Vector{T}) where T <: AbstractResource = rs.resource
+resource_name(rs::Vector{T}) where {T <: AbstractResource} = rs.resource
resource_id(r::AbstractResource)::Int64 = r.id
-resource_id(rs::Vector{T}) where T <: AbstractResource = resource_id.(rs)
+resource_id(rs::Vector{T}) where {T <: AbstractResource} = resource_id.(rs)
resource_type_mga(r::AbstractResource) = r.resource_type
zone_id(r::AbstractResource) = r.zone
-zone_id(rs::Vector{T}) where T <: AbstractResource = rs.zone
+zone_id(rs::Vector{T}) where {T <: AbstractResource} = rs.zone
# getter for boolean attributes (true or false) with validation
function new_build(r::AbstractResource)
@@ -551,7 +566,7 @@ function can_contribute_min_retirement(r::AbstractResource)
return Bool(get(r, :contribute_min_retirement, true))
end
-const default_minmax_cap = -1.
+const default_minmax_cap = -1.0
max_cap_mw(r::AbstractResource) = get(r, :max_cap_mw, default_minmax_cap)
min_cap_mw(r::AbstractResource) = get(r, :min_cap_mw, default_minmax_cap)
@@ -569,9 +584,13 @@ cap_size(r::AbstractResource) = get(r, :cap_size, default_zero)
num_vre_bins(r::AbstractResource) = get(r, :num_vre_bins, default_zero)
-hydro_energy_to_power_ratio(r::AbstractResource) = get(r, :hydro_energy_to_power_ratio, default_zero)
+function hydro_energy_to_power_ratio(r::AbstractResource)
+ get(r, :hydro_energy_to_power_ratio, default_zero)
+end
-qualified_hydrogen_supply(r::AbstractResource) = get(r, :qualified_hydrogen_supply, default_zero)
+function qualified_hydrogen_supply(r::AbstractResource)
+ get(r, :qualified_hydrogen_supply, default_zero)
+end
retrofit_id(r::AbstractResource)::String = get(r, :retrofit_id, "None")
function retrofit_efficiency(r::AbstractResource)
@@ -590,32 +609,58 @@ inv_cost_per_mwyr(r::AbstractResource) = get(r, :inv_cost_per_mwyr, default_zero
fixed_om_cost_per_mwyr(r::AbstractResource) = get(r, :fixed_om_cost_per_mwyr, default_zero)
var_om_cost_per_mwh(r::AbstractResource) = get(r, :var_om_cost_per_mwh, default_zero)
inv_cost_per_mwhyr(r::AbstractResource) = get(r, :inv_cost_per_mwhyr, default_zero)
-fixed_om_cost_per_mwhyr(r::AbstractResource) = get(r, :fixed_om_cost_per_mwhyr, default_zero)
-inv_cost_charge_per_mwyr(r::AbstractResource) = get(r, :inv_cost_charge_per_mwyr, default_zero)
-fixed_om_cost_charge_per_mwyr(r::AbstractResource) = get(r, :fixed_om_cost_charge_per_mwyr, default_zero)
+function fixed_om_cost_per_mwhyr(r::AbstractResource)
+ get(r, :fixed_om_cost_per_mwhyr, default_zero)
+end
+function inv_cost_charge_per_mwyr(r::AbstractResource)
+ get(r, :inv_cost_charge_per_mwyr, default_zero)
+end
+function fixed_om_cost_charge_per_mwyr(r::AbstractResource)
+ get(r, :fixed_om_cost_charge_per_mwyr, default_zero)
+end
start_cost_per_mw(r::AbstractResource) = get(r, :start_cost_per_mw, default_zero)
# fuel
fuel(r::AbstractResource) = get(r, :fuel, "None")
-start_fuel_mmbtu_per_mw(r::AbstractResource) = get(r, :start_fuel_mmbtu_per_mw, default_zero)
-heat_rate_mmbtu_per_mwh(r::AbstractResource) = get(r, :heat_rate_mmbtu_per_mwh, default_zero)
+function start_fuel_mmbtu_per_mw(r::AbstractResource)
+ get(r, :start_fuel_mmbtu_per_mw, default_zero)
+end
+function heat_rate_mmbtu_per_mwh(r::AbstractResource)
+ get(r, :heat_rate_mmbtu_per_mwh, default_zero)
+end
co2_capture_fraction(r::AbstractResource) = get(r, :co2_capture_fraction, default_zero)
-co2_capture_fraction_startup(r::AbstractResource) = get(r, :co2_capture_fraction_startup, default_zero)
-ccs_disposal_cost_per_metric_ton(r::AbstractResource) = get(r, :ccs_disposal_cost_per_metric_ton, default_zero)
+function co2_capture_fraction_startup(r::AbstractResource)
+ get(r, :co2_capture_fraction_startup, default_zero)
+end
+function ccs_disposal_cost_per_metric_ton(r::AbstractResource)
+ get(r, :ccs_disposal_cost_per_metric_ton, default_zero)
+end
biomass(r::AbstractResource) = get(r, :biomass, default_zero)
multi_fuels(r::AbstractResource) = get(r, :multi_fuels, default_zero)
-fuel_cols(r::AbstractResource; tag::Int64) = get(r, Symbol(string("fuel",tag)), "None")
+fuel_cols(r::AbstractResource; tag::Int64) = get(r, Symbol(string("fuel", tag)), "None")
num_fuels(r::AbstractResource) = get(r, :num_fuels, default_zero)
-heat_rate_cols(r::AbstractResource; tag::Int64) = get(r, Symbol(string("heat_rate",tag, "_mmbtu_per_mwh")), default_zero)
-max_cofire_cols(r::AbstractResource; tag::Int64) = get(r, Symbol(string("fuel",tag, "_max_cofire_level")), 1)
-min_cofire_cols(r::AbstractResource; tag::Int64) = get(r, Symbol(string("fuel",tag, "_min_cofire_level")), default_zero)
-max_cofire_start_cols(r::AbstractResource; tag::Int64) = get(r, Symbol(string("fuel",tag, "_max_cofire_level_start")), 1)
-min_cofire_start_cols(r::AbstractResource; tag::Int64) = get(r, Symbol(string("fuel",tag, "_min_cofire_level_start")), default_zero)
+function heat_rate_cols(r::AbstractResource; tag::Int64)
+ get(r, Symbol(string("heat_rate", tag, "_mmbtu_per_mwh")), default_zero)
+end
+function max_cofire_cols(r::AbstractResource; tag::Int64)
+ get(r, Symbol(string("fuel", tag, "_max_cofire_level")), 1)
+end
+function min_cofire_cols(r::AbstractResource; tag::Int64)
+ get(r, Symbol(string("fuel", tag, "_min_cofire_level")), default_zero)
+end
+function max_cofire_start_cols(r::AbstractResource; tag::Int64)
+ get(r, Symbol(string("fuel", tag, "_max_cofire_level_start")), 1)
+end
+function min_cofire_start_cols(r::AbstractResource; tag::Int64)
+ get(r, Symbol(string("fuel", tag, "_min_cofire_level_start")), default_zero)
+end
# Reservoir hydro and storage
const default_percent = 1.0
-efficiency_up(r::T) where T <: Union{Hydro,Storage} = get(r, :eff_up, default_percent)
-efficiency_down(r::T) where T <: Union{Hydro,Storage} = get(r, :eff_down, default_percent)
+efficiency_up(r::T) where {T <: Union{Hydro, Storage}} = get(r, :eff_up, default_percent)
+function efficiency_down(r::T) where {T <: Union{Hydro, Storage}}
+ get(r, :eff_down, default_percent)
+end
# Ramp up and down
const VarPower = Union{Electrolyzer, Hydro, Thermal}
@@ -630,8 +675,12 @@ capital_recovery_period(r::Storage) = get(r, :capital_recovery_period, 15)
capital_recovery_period(r::AbstractResource) = get(r, :capital_recovery_period, 30)
tech_wacc(r::AbstractResource) = get(r, :wacc, default_zero)
min_retired_cap_mw(r::AbstractResource) = get(r, :min_retired_cap_mw, default_zero)
-min_retired_energy_cap_mw(r::AbstractResource) = get(r, :min_retired_energy_cap_mw, default_zero)
-min_retired_charge_cap_mw(r::AbstractResource) = get(r, :min_retired_charge_cap_mw, default_zero)
+function min_retired_energy_cap_mw(r::AbstractResource)
+ get(r, :min_retired_energy_cap_mw, default_zero)
+end
+function min_retired_charge_cap_mw(r::AbstractResource)
+ get(r, :min_retired_charge_cap_mw, default_zero)
+end
cum_min_retired_cap_mw(r::AbstractResource) = r.cum_min_retired_cap_mw
cum_min_retired_energy_cap_mw(r::AbstractResource) = r.cum_min_retired_energy_cap_mw
cum_min_retired_charge_cap_mw(r::AbstractResource) = r.cum_min_retired_charge_cap_mw
@@ -643,45 +692,83 @@ mga(r::AbstractResource) = get(r, :mga, default_zero)
esr(r::AbstractResource; tag::Int64) = get(r, Symbol("esr_$tag"), default_zero)
min_cap(r::AbstractResource; tag::Int64) = get(r, Symbol("min_cap_$tag"), default_zero)
max_cap(r::AbstractResource; tag::Int64) = get(r, Symbol("max_cap_$tag"), default_zero)
-derating_factor(r::AbstractResource; tag::Int64) = get(r, Symbol("derating_factor_$tag"), default_zero)
+function derating_factor(r::AbstractResource; tag::Int64)
+ get(r, Symbol("derating_factor_$tag"), default_zero)
+end
# write_outputs
region(r::AbstractResource) = r.region
cluster(r::AbstractResource) = r.cluster
# UTILITY FUNCTIONS for working with resources
-is_LDS(rs::Vector{T}) where T <: AbstractResource = findall(r -> get(r, :lds, default_zero) == 1, rs)
-is_SDS(rs::Vector{T}) where T <: AbstractResource = findall(r -> get(r, :lds, default_zero) == 0, rs)
+function is_LDS(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> get(r, :lds, default_zero) == 1, rs)
+end
+function is_SDS(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> get(r, :lds, default_zero) == 0, rs)
+end
-ids_with_mga(rs::Vector{T}) where T <: AbstractResource = findall(r -> mga(r) == 1, rs)
+ids_with_mga(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> mga(r) == 1, rs)
-ids_with_fuel(rs::Vector{T}) where T <: AbstractResource = findall(r -> fuel(r) != "None", rs)
+function ids_with_fuel(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> fuel(r) != "None", rs)
+end
-ids_with_singlefuel(rs::Vector{T}) where T <: AbstractResource = findall(r -> multi_fuels(r) == 0, rs)
-ids_with_multifuels(rs::Vector{T}) where T <: AbstractResource = findall(r -> multi_fuels(r) == 1, rs)
+function ids_with_singlefuel(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> multi_fuels(r) == 0, rs)
+end
+function ids_with_multifuels(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> multi_fuels(r) == 1, rs)
+end
-is_buildable(rs::Vector{T}) where T <: AbstractResource = findall(r -> new_build(r) == true, rs)
-is_retirable(rs::Vector{T}) where T <: AbstractResource = findall(r -> can_retire(r) == true, rs)
-ids_can_retrofit(rs::Vector{T}) where T <: AbstractResource = findall(r -> can_retrofit(r) == true, rs)
-ids_retrofit_options(rs::Vector{T}) where T <: AbstractResource = findall(r -> is_retrofit_option(r) == true, rs)
+function is_buildable(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> new_build(r) == true, rs)
+end
+function is_retirable(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> can_retire(r) == true, rs)
+end
+function ids_can_retrofit(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> can_retrofit(r) == true, rs)
+end
+function ids_retrofit_options(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> is_retrofit_option(r) == true, rs)
+end
# Unit commitment
-ids_with_unit_commitment(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Thermal) && r.model == 1, rs)
+function ids_with_unit_commitment(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, Thermal) && r.model == 1, rs)
+end
# Without unit commitment
-no_unit_commitment(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Thermal) && r.model == 2, rs)
+function no_unit_commitment(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, Thermal) && r.model == 2, rs)
+end
# Operational Reserves
-ids_with_regulation_reserve_requirements(rs::Vector{T}) where T <: AbstractResource = findall(r -> reg_max(r) > 0, rs)
-ids_with_spinning_reserve_requirements(rs::Vector{T}) where T <: AbstractResource = findall(r -> rsv_max(r) > 0, rs)
+function ids_with_regulation_reserve_requirements(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> reg_max(r) > 0, rs)
+end
+function ids_with_spinning_reserve_requirements(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> rsv_max(r) > 0, rs)
+end
# Maintenance
-ids_with_maintenance(rs::Vector{T}) where T <: AbstractResource = findall(r -> get(r, :maint, default_zero) == 1, rs)
+function ids_with_maintenance(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> get(r, :maint, default_zero) == 1, rs)
+end
maintenance_duration(r::AbstractResource) = get(r, :maintenance_duration, default_zero)
-maintenance_cycle_length_years(r::AbstractResource) = get(r, :maintenance_cycle_length_years, default_zero)
-maintenance_begin_cadence(r::AbstractResource) = get(r, :maintenance_begin_cadence, default_zero)
+function maintenance_cycle_length_years(r::AbstractResource)
+ get(r, :maintenance_cycle_length_years, default_zero)
+end
+function maintenance_begin_cadence(r::AbstractResource)
+ get(r, :maintenance_begin_cadence, default_zero)
+end
-ids_contribute_min_retirement(rs::Vector{T}) where T <: AbstractResource = findall(r -> can_contribute_min_retirement(r) == true, rs)
-ids_not_contribute_min_retirement(rs::Vector{T}) where T <: AbstractResource = findall(r -> can_contribute_min_retirement(r) == false, rs)
+function ids_contribute_min_retirement(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> can_contribute_min_retirement(r) == true, rs)
+end
+function ids_not_contribute_min_retirement(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> can_contribute_min_retirement(r) == false, rs)
+end
# STORAGE interface
"""
@@ -689,14 +776,18 @@ ids_not_contribute_min_retirement(rs::Vector{T}) where T <: AbstractResource = f
Returns the indices of all storage resources in the vector `rs`.
"""
-storage(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Storage), rs)
+storage(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r, Storage), rs)
self_discharge(r::Storage) = r.self_disch
min_duration(r::Storage) = r.min_duration
max_duration(r::Storage) = r.max_duration
var_om_cost_per_mwh_in(r::Storage) = get(r, :var_om_cost_per_mwh_in, default_zero)
-symmetric_storage(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Storage) && r.model == 1, rs)
-asymmetric_storage(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Storage) && r.model == 2, rs)
+function symmetric_storage(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, Storage) && r.model == 1, rs)
+end
+function asymmetric_storage(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, Storage) && r.model == 2, rs)
+end
# HYDRO interface
"""
@@ -704,7 +795,7 @@ asymmetric_storage(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa
Returns the indices of all hydro resources in the vector `rs`.
"""
-hydro(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Hydro), rs)
+hydro(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r, Hydro), rs)
# THERMAL interface
"""
@@ -712,10 +803,12 @@ hydro(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Hydro), rs
Returns the indices of all thermal resources in the vector `rs`.
"""
-thermal(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Thermal), rs)
+thermal(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r, Thermal), rs)
up_time(r::Thermal) = get(r, :up_time, default_zero)
down_time(r::Thermal) = get(r, :down_time, default_zero)
-pwfu_fuel_usage_zero_load_mmbtu_per_h(r::Thermal) = get(r, :pwfu_fuel_usage_zero_load_mmbtu_per_h, default_zero)
+function pwfu_fuel_usage_zero_load_mmbtu_per_h(r::Thermal)
+ get(r, :pwfu_fuel_usage_zero_load_mmbtu_per_h, default_zero)
+end
# VRE interface
"""
@@ -723,7 +816,7 @@ pwfu_fuel_usage_zero_load_mmbtu_per_h(r::Thermal) = get(r, :pwfu_fuel_usage_zero
Returns the indices of all Vre resources in the vector `rs`.
"""
-vre(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Vre), rs)
+vre(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r, Vre), rs)
# ELECTROLYZER interface
"""
@@ -731,7 +824,10 @@ vre(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Vre), rs)
Returns the indices of all electrolyzer resources in the vector `rs`.
"""
-electrolyzer(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Electrolyzer), rs)
+electrolyzer(rs::Vector{T}) where {T <: AbstractResource} = findall(
+ r -> isa(r,
+ Electrolyzer),
+ rs)
electrolyzer_min_kt(r::Electrolyzer) = r.electrolyzer_min_kt
hydrogen_mwh_per_tonne(r::Electrolyzer) = r.hydrogen_mwh_per_tonne
hydrogen_price_per_tonne(r::Electrolyzer) = r.hydrogen_price_per_tonne
@@ -742,7 +838,8 @@ hydrogen_price_per_tonne(r::Electrolyzer) = r.hydrogen_price_per_tonne
Returns the indices of all flexible demand resources in the vector `rs`.
"""
-flex_demand(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,FlexDemand), rs)
+flex_demand(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r, FlexDemand),
+ rs)
flexible_demand_energy_eff(r::FlexDemand) = r.flexible_demand_energy_eff
max_flexible_demand_delay(r::FlexDemand) = r.max_flexible_demand_delay
max_flexible_demand_advance(r::FlexDemand) = r.max_flexible_demand_advance
@@ -754,7 +851,7 @@ var_om_cost_per_mwh_in(r::FlexDemand) = get(r, :var_om_cost_per_mwh_in, default_
Returns the indices of all must-run resources in the vector `rs`.
"""
-must_run(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,MustRun), rs)
+must_run(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r, MustRun), rs)
# VRE_STOR interface
"""
@@ -762,7 +859,7 @@ must_run(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,MustRun
Returns the indices of all VRE_STOR resources in the vector `rs`.
"""
-vre_stor(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage), rs)
+vre_stor(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r, VreStorage), rs)
technology(r::VreStorage) = r.technology
self_discharge(r::VreStorage) = r.self_disch
@@ -771,154 +868,206 @@ self_discharge(r::VreStorage) = r.self_disch
Returns the indices of all co-located solar resources in the vector `rs`.
"""
-solar(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.solar != 0, rs)
+solar(rs::Vector{T}) where {T <: AbstractResource} = findall(
+ r -> isa(r, VreStorage) &&
+ r.solar != 0,
+ rs)
"""
wind(rs::Vector{T}) where T <: AbstractResource
Returns the indices of all co-located wind resources in the vector `rs`.
"""
-wind(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.wind != 0, rs)
+wind(rs::Vector{T}) where {T <: AbstractResource} = findall(
+ r -> isa(r, VreStorage) &&
+ r.wind != 0,
+ rs)
"""
storage_dc_discharge(rs::Vector{T}) where T <: AbstractResource
Returns the indices of all co-located storage resources in the vector `rs` that discharge DC.
"""
-storage_dc_discharge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_dc_discharge >= 1, rs)
-storage_sym_dc_discharge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_dc_discharge == 1, rs)
-storage_asym_dc_discharge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_dc_discharge == 2, rs)
+storage_dc_discharge(rs::Vector{T}) where {T <: AbstractResource} = findall(
+ r -> isa(r,
+ VreStorage) && r.stor_dc_discharge >= 1,
+ rs)
+function storage_sym_dc_discharge(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, VreStorage) && r.stor_dc_discharge == 1, rs)
+end
+function storage_asym_dc_discharge(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, VreStorage) && r.stor_dc_discharge == 2, rs)
+end
"""
storage_dc_charge(rs::Vector{T}) where T <: AbstractResource
Returns the indices of all co-located storage resources in the vector `rs` that charge DC.
"""
-storage_dc_charge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_dc_charge >= 1, rs)
-storage_sym_dc_charge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_dc_charge == 1, rs)
-storage_asym_dc_charge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_dc_charge == 2, rs)
+storage_dc_charge(rs::Vector{T}) where {T <: AbstractResource} = findall(
+ r -> isa(r,
+ VreStorage) && r.stor_dc_charge >= 1,
+ rs)
+function storage_sym_dc_charge(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, VreStorage) && r.stor_dc_charge == 1, rs)
+end
+function storage_asym_dc_charge(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, VreStorage) && r.stor_dc_charge == 2, rs)
+end
"""
storage_ac_discharge(rs::Vector{T}) where T <: AbstractResource
Returns the indices of all co-located storage resources in the vector `rs` that discharge AC.
"""
-storage_ac_discharge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_ac_discharge >= 1, rs)
-storage_sym_ac_discharge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_ac_discharge == 1, rs)
-storage_asym_ac_discharge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_ac_discharge == 2, rs)
+storage_ac_discharge(rs::Vector{T}) where {T <: AbstractResource} = findall(
+ r -> isa(r,
+ VreStorage) && r.stor_ac_discharge >= 1,
+ rs)
+function storage_sym_ac_discharge(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, VreStorage) && r.stor_ac_discharge == 1, rs)
+end
+function storage_asym_ac_discharge(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, VreStorage) && r.stor_ac_discharge == 2, rs)
+end
"""
storage_ac_charge(rs::Vector{T}) where T <: AbstractResource
Returns the indices of all co-located storage resources in the vector `rs` that charge AC.
"""
-storage_ac_charge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_ac_charge >= 1, rs)
-storage_sym_ac_charge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_ac_charge == 1, rs)
-storage_asym_ac_charge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_ac_charge == 2, rs)
+storage_ac_charge(rs::Vector{T}) where {T <: AbstractResource} = findall(
+ r -> isa(r,
+ VreStorage) && r.stor_ac_charge >= 1,
+ rs)
+function storage_sym_ac_charge(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, VreStorage) && r.stor_ac_charge == 1, rs)
+end
+function storage_asym_ac_charge(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, VreStorage) && r.stor_ac_charge == 2, rs)
+end
-is_LDS_VRE_STOR(rs::Vector{T}) where T <: AbstractResource = findall(r -> get(r, :lds_vre_stor, default_zero) != 0, rs)
+function is_LDS_VRE_STOR(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> get(r, :lds_vre_stor, default_zero) != 0, rs)
+end
# loop over the above attributes and define function interfaces for each one
-for attr in (:existing_cap_solar_mw,
- :existing_cap_wind_mw,
- :existing_cap_inverter_mw,
- :existing_cap_charge_dc_mw,
- :existing_cap_charge_ac_mw,
- :existing_cap_discharge_dc_mw,
- :existing_cap_discharge_ac_mw)
+for attr in (:existing_cap_solar_mw,
+ :existing_cap_wind_mw,
+ :existing_cap_inverter_mw,
+ :existing_cap_charge_dc_mw,
+ :existing_cap_charge_ac_mw,
+ :existing_cap_discharge_dc_mw,
+ :existing_cap_discharge_ac_mw)
@eval @interface $attr
end
-for attr in (:max_cap_solar_mw,
- :max_cap_wind_mw,
- :max_cap_inverter_mw,
- :max_cap_charge_dc_mw,
- :max_cap_charge_ac_mw,
- :max_cap_discharge_dc_mw,
- :max_cap_discharge_ac_mw,
- :min_cap_solar_mw,
- :min_cap_wind_mw,
- :min_cap_inverter_mw,
- :min_cap_charge_dc_mw,
- :min_cap_charge_ac_mw,
- :min_cap_discharge_dc_mw,
- :min_cap_discharge_ac_mw,
- :inverter_ratio_solar,
- :inverter_ratio_wind,)
+for attr in (:max_cap_solar_mw,
+ :max_cap_wind_mw,
+ :max_cap_inverter_mw,
+ :max_cap_charge_dc_mw,
+ :max_cap_charge_ac_mw,
+ :max_cap_discharge_dc_mw,
+ :max_cap_discharge_ac_mw,
+ :min_cap_solar_mw,
+ :min_cap_wind_mw,
+ :min_cap_inverter_mw,
+ :min_cap_charge_dc_mw,
+ :min_cap_charge_ac_mw,
+ :min_cap_discharge_dc_mw,
+ :min_cap_discharge_ac_mw,
+ :inverter_ratio_solar,
+ :inverter_ratio_wind)
@eval @interface $attr default_minmax_cap
end
for attr in (:etainverter,
- :inv_cost_inverter_per_mwyr,
- :inv_cost_solar_per_mwyr,
- :inv_cost_wind_per_mwyr,
- :inv_cost_discharge_dc_per_mwyr,
- :inv_cost_charge_dc_per_mwyr,
- :inv_cost_discharge_ac_per_mwyr,
- :inv_cost_charge_ac_per_mwyr,
- :fixed_om_inverter_cost_per_mwyr,
- :fixed_om_solar_cost_per_mwyr,
- :fixed_om_wind_cost_per_mwyr,
- :fixed_om_cost_discharge_dc_per_mwyr,
- :fixed_om_cost_charge_dc_per_mwyr,
- :fixed_om_cost_discharge_ac_per_mwyr,
- :fixed_om_cost_charge_ac_per_mwyr,
- :var_om_cost_per_mwh_solar,
- :var_om_cost_per_mwh_wind,
- :var_om_cost_per_mwh_charge_dc,
- :var_om_cost_per_mwh_discharge_dc,
- :var_om_cost_per_mwh_charge_ac,
- :var_om_cost_per_mwh_discharge_ac,
- :eff_up_ac,
- :eff_down_ac,
- :eff_up_dc,
- :eff_down_dc,
- :power_to_energy_ac,
- :power_to_energy_dc)
+ :inv_cost_inverter_per_mwyr,
+ :inv_cost_solar_per_mwyr,
+ :inv_cost_wind_per_mwyr,
+ :inv_cost_discharge_dc_per_mwyr,
+ :inv_cost_charge_dc_per_mwyr,
+ :inv_cost_discharge_ac_per_mwyr,
+ :inv_cost_charge_ac_per_mwyr,
+ :fixed_om_inverter_cost_per_mwyr,
+ :fixed_om_solar_cost_per_mwyr,
+ :fixed_om_wind_cost_per_mwyr,
+ :fixed_om_cost_discharge_dc_per_mwyr,
+ :fixed_om_cost_charge_dc_per_mwyr,
+ :fixed_om_cost_discharge_ac_per_mwyr,
+ :fixed_om_cost_charge_ac_per_mwyr,
+ :var_om_cost_per_mwh_solar,
+ :var_om_cost_per_mwh_wind,
+ :var_om_cost_per_mwh_charge_dc,
+ :var_om_cost_per_mwh_discharge_dc,
+ :var_om_cost_per_mwh_charge_ac,
+ :var_om_cost_per_mwh_discharge_ac,
+ :eff_up_ac,
+ :eff_down_ac,
+ :eff_up_dc,
+ :eff_down_dc,
+ :power_to_energy_ac,
+ :power_to_energy_dc)
@eval @interface $attr default_zero VreStorage
end
# Multistage
for attr in (:capital_recovery_period_dc,
- :capital_recovery_period_solar,
- :capital_recovery_period_wind,
- :capital_recovery_period_charge_dc,
- :capital_recovery_period_discharge_dc,
- :capital_recovery_period_charge_ac,
- :capital_recovery_period_discharge_ac,
- :tech_wacc_dc,
- :tech_wacc_solar,
- :tech_wacc_wind,
- :tech_wacc_charge_dc,
- :tech_wacc_discharge_dc,
- :tech_wacc_charge_ac,
- :tech_wacc_discharge_ac)
+ :capital_recovery_period_solar,
+ :capital_recovery_period_wind,
+ :capital_recovery_period_charge_dc,
+ :capital_recovery_period_discharge_dc,
+ :capital_recovery_period_charge_ac,
+ :capital_recovery_period_discharge_ac,
+ :tech_wacc_dc,
+ :tech_wacc_solar,
+ :tech_wacc_wind,
+ :tech_wacc_charge_dc,
+ :tech_wacc_discharge_dc,
+ :tech_wacc_charge_ac,
+ :tech_wacc_discharge_ac)
@eval @interface $attr default_zero VreStorage
end
# Endogenous retirement
-for attr in (:min_retired_cap_inverter_mw,
- :min_retired_cap_solar_mw,
- :min_retired_cap_wind_mw,
- :min_retired_cap_discharge_dc_mw,
- :min_retired_cap_charge_dc_mw,
- :min_retired_cap_discharge_ac_mw,
- :min_retired_cap_charge_ac_mw,)
- @eval @interface $attr default_zero
- cum_attr = Symbol("cum_"*String(attr))
- @eval @interface $cum_attr default_zero
+for attr in (:min_retired_cap_inverter_mw,
+ :min_retired_cap_solar_mw,
+ :min_retired_cap_wind_mw,
+ :min_retired_cap_discharge_dc_mw,
+ :min_retired_cap_charge_dc_mw,
+ :min_retired_cap_discharge_ac_mw,
+ :min_retired_cap_charge_ac_mw)
+ @eval @interface $attr default_zero
+ cum_attr = Symbol("cum_" * String(attr))
+ @eval @interface $cum_attr default_zero
end
## policies
# co-located storage
-esr_vrestor(r::AbstractResource; tag::Int64) = get(r, Symbol("esr_vrestor_$tag"), default_zero)
-min_cap_stor(r::AbstractResource; tag::Int64) = get(r, Symbol("min_cap_stor_$tag"), default_zero)
-max_cap_stor(r::AbstractResource; tag::Int64) = get(r, Symbol("max_cap_stor_$tag"), default_zero)
+function esr_vrestor(r::AbstractResource; tag::Int64)
+ get(r, Symbol("esr_vrestor_$tag"), default_zero)
+end
+function min_cap_stor(r::AbstractResource; tag::Int64)
+ get(r, Symbol("min_cap_stor_$tag"), default_zero)
+end
+function max_cap_stor(r::AbstractResource; tag::Int64)
+ get(r, Symbol("max_cap_stor_$tag"), default_zero)
+end
# vre part
-min_cap_solar(r::AbstractResource; tag::Int64) = get(r, Symbol("min_cap_solar_$tag"), default_zero)
-max_cap_solar(r::AbstractResource; tag::Int64) = get(r, Symbol("max_cap_solar_$tag"), default_zero)
-min_cap_wind(r::AbstractResource; tag::Int64) = get(r, Symbol("min_cap_wind_$tag"), default_zero)
-max_cap_wind(r::AbstractResource; tag::Int64) = get(r, Symbol("max_cap_wind_$tag"), default_zero)
+function min_cap_solar(r::AbstractResource; tag::Int64)
+ get(r, Symbol("min_cap_solar_$tag"), default_zero)
+end
+function max_cap_solar(r::AbstractResource; tag::Int64)
+ get(r, Symbol("max_cap_solar_$tag"), default_zero)
+end
+function min_cap_wind(r::AbstractResource; tag::Int64)
+ get(r, Symbol("min_cap_wind_$tag"), default_zero)
+end
+function max_cap_wind(r::AbstractResource; tag::Int64)
+ get(r, Symbol("max_cap_wind_$tag"), default_zero)
+end
## Utility functions for working with resources
in_zone(r::AbstractResource, zone::Int) = zone_id(r) == zone
-resources_in_zone(rs::Vector{<:AbstractResource}, zone::Int) = filter(r -> in_zone(r, zone), rs)
+function resources_in_zone(rs::Vector{<:AbstractResource}, zone::Int)
+ filter(r -> in_zone(r, zone), rs)
+end
@doc raw"""
resources_in_zone_by_rid(rs::Vector{<:AbstractResource}, zone::Int)
@@ -940,7 +1089,8 @@ Find R_ID's of resources with retrofit cluster id `cluster_id`.
# Returns
- `Vector{Int64}`: The vector of resource ids in the retrofit cluster.
"""
-function resources_in_retrofit_cluster_by_rid(rs::Vector{<:AbstractResource}, cluster_id::String)
+function resources_in_retrofit_cluster_by_rid(rs::Vector{<:AbstractResource},
+ cluster_id::String)
return resource_id.(rs[retrofit_id.(rs) .== cluster_id])
end
@@ -959,7 +1109,8 @@ Find the resource with `name` in the vector `rs`.
function resource_by_name(rs::Vector{<:AbstractResource}, name::AbstractString)
r_id = findfirst(r -> resource_name(r) == name, rs)
# check that the resource exists
- isnothing(r_id) && error("Resource $name not found in resource data. \nHint: Make sure that the resource names in input files match the ones in the \"resource\" folder.\n")
+ isnothing(r_id) &&
+ error("Resource $name not found in resource data. \nHint: Make sure that the resource names in input files match the ones in the \"resource\" folder.\n")
return rs[r_id]
end
@@ -976,7 +1127,7 @@ function validate_boolean_attribute(r::AbstractResource, attr::Symbol)
attr_value = get(r, attr, 0)
if attr_value != 0 && attr_value != 1
error("Attribute $attr in resource $(resource_name(r)) must be boolean." *
- "The only valid values are {0,1}, not $attr_value.")
+ "The only valid values are {0,1}, not $attr_value.")
end
end
@@ -991,7 +1142,7 @@ Find the resource ids of the retrofit units in the vector `rs` where all retrofi
# Returns
- `Vector{Int64}`: The vector of resource ids.
"""
-function ids_with_all_options_contributing(rs::Vector{T}) where T <: AbstractResource
+function ids_with_all_options_contributing(rs::Vector{T}) where {T <: AbstractResource}
# select resources that can retrofit
units_can_retrofit = ids_can_retrofit(rs)
# check if all retrofit options in the retrofit cluster of each retrofit resource contribute to min retirement
@@ -1011,10 +1162,13 @@ Check if all retrofit options in the retrofit cluster of the retrofit resource `
# Returns
- `Bool`: True if all retrofit options contribute to min retirement, otherwise false.
"""
-function has_all_options_contributing(retrofit_res::AbstractResource, rs::Vector{T}) where T <: AbstractResource
+function has_all_options_contributing(retrofit_res::AbstractResource,
+ rs::Vector{T}) where {T <: AbstractResource}
retro_id = retrofit_id(retrofit_res)
- return isempty(intersect(resources_in_retrofit_cluster_by_rid(rs, retro_id), ids_retrofit_options(rs), ids_not_contribute_min_retirement(rs)))
-end
+ return isempty(intersect(resources_in_retrofit_cluster_by_rid(rs, retro_id),
+ ids_retrofit_options(rs),
+ ids_not_contribute_min_retirement(rs)))
+end
"""
ids_with_all_options_not_contributing(rs::Vector{T}) where T <: AbstractResource
@@ -1027,11 +1181,12 @@ Find the resource ids of the retrofit units in the vector `rs` where all retrofi
# Returns
- `Vector{Int64}`: The vector of resource ids.
"""
-function ids_with_all_options_not_contributing(rs::Vector{T}) where T <: AbstractResource
+function ids_with_all_options_not_contributing(rs::Vector{T}) where {T <: AbstractResource}
# select resources that can retrofit
units_can_retrofit = ids_can_retrofit(rs)
# check if all retrofit options in the retrofit cluster of each retrofit resource contribute to min retirement
- condition::Vector{Bool} = has_all_options_not_contributing.(rs[units_can_retrofit], Ref(rs))
+ condition::Vector{Bool} = has_all_options_not_contributing.(rs[units_can_retrofit],
+ Ref(rs))
return units_can_retrofit[condition]
end
@@ -1047,7 +1202,10 @@ Check if all retrofit options in the retrofit cluster of the retrofit resource `
# Returns
- `Bool`: True if all retrofit options do not contribute to min retirement, otherwise false.
"""
-function has_all_options_not_contributing(retrofit_res::AbstractResource, rs::Vector{T}) where T <: AbstractResource
+function has_all_options_not_contributing(retrofit_res::AbstractResource,
+ rs::Vector{T}) where {T <: AbstractResource}
retro_id = retrofit_id(retrofit_res)
- return isempty(intersect(resources_in_retrofit_cluster_by_rid(rs, retro_id), ids_retrofit_options(rs), ids_contribute_min_retirement(rs)))
-end
\ No newline at end of file
+ return isempty(intersect(resources_in_retrofit_cluster_by_rid(rs, retro_id),
+ ids_retrofit_options(rs),
+ ids_contribute_min_retirement(rs)))
+end
diff --git a/src/model/resources/retrofits/retrofits.jl b/src/model/resources/retrofits/retrofits.jl
index d29d13fe33..164cf59025 100644
--- a/src/model/resources/retrofits/retrofits.jl
+++ b/src/model/resources/retrofits/retrofits.jl
@@ -17,25 +17,48 @@ where ${RETROFIT}$ represents the set of all retrofit IDs (clusters) in the mode
"""
function retrofit(EP::Model, inputs::Dict)
+ println("Retrofit Resources Module")
- println("Retrofit Resources Module")
+ gen = inputs["RESOURCES"]
- gen = inputs["RESOURCES"]
+ COMMIT = inputs["COMMIT"] # Set of all resources subject to unit commitment
+ RETROFIT_CAP = inputs["RETROFIT_CAP"] # Set of all resources being retrofitted
+ RETROFIT_OPTIONS = inputs["RETROFIT_OPTIONS"] # Set of all resources being created
+ RETROFIT_IDS = inputs["RETROFIT_IDS"] # Set of unique IDs for retrofit resources
- COMMIT = inputs["COMMIT"] # Set of all resources subject to unit commitment
- RETROFIT_CAP = inputs["RETROFIT_CAP"] # Set of all resources being retrofitted
- RETROFIT_OPTIONS = inputs["RETROFIT_OPTIONS"] # Set of all resources being created
- RETROFIT_IDS = inputs["RETROFIT_IDS"] # Set of unique IDs for retrofit resources
+ @expression(EP, eRetrofittedCapByRetroId[id in RETROFIT_IDS],
+ sum(
+ cap_size(gen[y]) * EP[:vRETROFITCAP][y]
+ for y in intersect(RETROFIT_CAP,
+ COMMIT,
+ resources_in_retrofit_cluster_by_rid(gen, id));
+ init = 0)
+ +sum(
+ EP[:vRETROFITCAP][y]
+ for y in setdiff(
+ intersect(RETROFIT_CAP,
+ resources_in_retrofit_cluster_by_rid(gen, id)),
+ COMMIT);
+ init = 0))
- @expression(EP,eRetrofittedCapByRetroId[id in RETROFIT_IDS],
- sum(cap_size(gen[y]) * EP[:vRETROFITCAP][y] for y in intersect(RETROFIT_CAP, COMMIT, resources_in_retrofit_cluster_by_rid(gen,id)); init=0)
- + sum(EP[:vRETROFITCAP][y] for y in setdiff(intersect(RETROFIT_CAP, resources_in_retrofit_cluster_by_rid(gen,id)), COMMIT); init=0))
+ @expression(EP, eRetrofitCapByRetroId[id in RETROFIT_IDS],
+ sum(
+ cap_size(gen[y]) * EP[:vCAP][y] * (1 / retrofit_efficiency(gen[y]))
+ for y in intersect(RETROFIT_OPTIONS,
+ COMMIT,
+ resources_in_retrofit_cluster_by_rid(gen, id));
+ init = 0)
+ +sum(
+ EP[:vCAP][y] * (1 / retrofit_efficiency(gen[y]))
+ for y in setdiff(
+ intersect(RETROFIT_OPTIONS,
+ resources_in_retrofit_cluster_by_rid(gen, id)),
+ COMMIT);
+ init = 0))
- @expression(EP,eRetrofitCapByRetroId[id in RETROFIT_IDS],
- sum(cap_size(gen[y]) * EP[:vCAP][y] * (1/retrofit_efficiency(gen[y])) for y in intersect(RETROFIT_OPTIONS, COMMIT, resources_in_retrofit_cluster_by_rid(gen,id)); init=0)
- + sum(EP[:vCAP][y] * (1/retrofit_efficiency(gen[y])) for y in setdiff(intersect(RETROFIT_OPTIONS, resources_in_retrofit_cluster_by_rid(gen,id)), COMMIT); init=0))
+ @constraint(EP,
+ cRetrofitCapacity[id in RETROFIT_IDS],
+ eRetrofittedCapByRetroId[id]==eRetrofitCapByRetroId[id])
- @constraint(EP, cRetrofitCapacity[id in RETROFIT_IDS], eRetrofittedCapByRetroId[id] == eRetrofitCapByRetroId[id])
-
- return EP
+ return EP
end
diff --git a/src/model/resources/storage/investment_charge.jl b/src/model/resources/storage/investment_charge.jl
index 77f67f76bc..5f92ec684c 100644
--- a/src/model/resources/storage/investment_charge.jl
+++ b/src/model/resources/storage/investment_charge.jl
@@ -39,97 +39,105 @@ In addition, this function adds investment and fixed O&M related costs related t
```
"""
function investment_charge!(EP::Model, inputs::Dict, setup::Dict)
-
- println("Charge Investment Module")
-
- gen = inputs["RESOURCES"]
-
- MultiStage = setup["MultiStage"]
-
- STOR_ASYMMETRIC = inputs["STOR_ASYMMETRIC"] # Set of storage resources with asymmetric (separte) charge/discharge capacity components
-
- NEW_CAP_CHARGE = inputs["NEW_CAP_CHARGE"] # Set of asymmetric charge/discharge storage resources eligible for new charge capacity
- RET_CAP_CHARGE = inputs["RET_CAP_CHARGE"] # Set of asymmetric charge/discharge storage resources eligible for charge capacity retirements
-
- ### Variables ###
-
- ## Storage capacity built and retired for storage resources with independent charge and discharge power capacities (STOR=2)
-
- # New installed charge capacity of resource "y"
- @variable(EP, vCAPCHARGE[y in NEW_CAP_CHARGE] >= 0)
-
- # Retired charge capacity of resource "y" from existing capacity
- @variable(EP, vRETCAPCHARGE[y in RET_CAP_CHARGE] >= 0)
-
- if MultiStage == 1
- @variable(EP, vEXISTINGCAPCHARGE[y in STOR_ASYMMETRIC] >= 0);
- end
-
- ### Expressions ###
-
- if MultiStage == 1
- @expression(EP, eExistingCapCharge[y in STOR_ASYMMETRIC], vEXISTINGCAPCHARGE[y])
- else
- @expression(EP, eExistingCapCharge[y in STOR_ASYMMETRIC], existing_charge_cap_mw(gen[y]))
- end
-
- @expression(EP, eTotalCapCharge[y in STOR_ASYMMETRIC],
- if (y in intersect(NEW_CAP_CHARGE, RET_CAP_CHARGE))
- eExistingCapCharge[y] + EP[:vCAPCHARGE][y] - EP[:vRETCAPCHARGE][y]
- elseif (y in setdiff(NEW_CAP_CHARGE, RET_CAP_CHARGE))
- eExistingCapCharge[y] + EP[:vCAPCHARGE][y]
- elseif (y in setdiff(RET_CAP_CHARGE, NEW_CAP_CHARGE))
- eExistingCapCharge[y] - EP[:vRETCAPCHARGE][y]
- else
- eExistingCapCharge[y] + EP[:vZERO]
- end
- )
-
- ## Objective Function Expressions ##
-
- # Fixed costs for resource "y" = annuitized investment cost plus fixed O&M costs
- # If resource is not eligible for new charge capacity, fixed costs are only O&M costs
- @expression(EP, eCFixCharge[y in STOR_ASYMMETRIC],
- if y in NEW_CAP_CHARGE # Resources eligible for new charge capacity
- inv_cost_charge_per_mwyr(gen[y])*vCAPCHARGE[y] + fixed_om_cost_charge_per_mwyr(gen[y])*eTotalCapCharge[y]
- else
- fixed_om_cost_charge_per_mwyr(gen[y])*eTotalCapCharge[y]
- end
- )
-
- # Sum individual resource contributions to fixed costs to get total fixed costs
- @expression(EP, eTotalCFixCharge, sum(EP[:eCFixCharge][y] for y in STOR_ASYMMETRIC))
-
- # Add term to objective function expression
- if MultiStage == 1
- # OPEX multiplier scales fixed costs to account for multiple years between two model stages
- # We divide by OPEXMULT since we are going to multiply the entire objective function by this term later,
- # and we have already accounted for multiple years between stages for fixed costs.
- add_to_expression!(EP[:eObj], (1/inputs["OPEXMULT"]), eTotalCFixCharge)
- else
- add_to_expression!(EP[:eObj], eTotalCFixCharge)
- end
-
- ### Constratints ###
-
- if MultiStage == 1
- # Existing capacity variable is equal to existing capacity specified in the input file
- @constraint(EP, cExistingCapCharge[y in STOR_ASYMMETRIC], EP[:vEXISTINGCAPCHARGE][y] == existing_charge_cap_mw(gen[y]))
- end
-
- ## Constraints on retirements and capacity additions
- #Cannot retire more charge capacity than existing charge capacity
- @constraint(EP, cMaxRetCharge[y in RET_CAP_CHARGE], vRETCAPCHARGE[y] <= eExistingCapCharge[y])
-
- #Constraints on new built capacity
-
- # Constraint on maximum charge capacity (if applicable) [set input to -1 if no constraint on maximum charge capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is >= Max_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cMaxCapCharge[y in intersect(ids_with_positive(gen, max_charge_cap_mw), STOR_ASYMMETRIC)], eTotalCapCharge[y] <= max_charge_cap_mw(gen[y]))
-
- # Constraint on minimum charge capacity (if applicable) [set input to -1 if no constraint on minimum charge capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is <= Min_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cMinCapCharge[y in intersect(ids_with_positive(gen, min_charge_cap_mw), STOR_ASYMMETRIC)], eTotalCapCharge[y] >= min_charge_cap_mw(gen[y]))
-
-
+ println("Charge Investment Module")
+
+ gen = inputs["RESOURCES"]
+
+ MultiStage = setup["MultiStage"]
+
+ STOR_ASYMMETRIC = inputs["STOR_ASYMMETRIC"] # Set of storage resources with asymmetric (separte) charge/discharge capacity components
+
+ NEW_CAP_CHARGE = inputs["NEW_CAP_CHARGE"] # Set of asymmetric charge/discharge storage resources eligible for new charge capacity
+ RET_CAP_CHARGE = inputs["RET_CAP_CHARGE"] # Set of asymmetric charge/discharge storage resources eligible for charge capacity retirements
+
+ ### Variables ###
+
+ ## Storage capacity built and retired for storage resources with independent charge and discharge power capacities (STOR=2)
+
+ # New installed charge capacity of resource "y"
+ @variable(EP, vCAPCHARGE[y in NEW_CAP_CHARGE]>=0)
+
+ # Retired charge capacity of resource "y" from existing capacity
+ @variable(EP, vRETCAPCHARGE[y in RET_CAP_CHARGE]>=0)
+
+ if MultiStage == 1
+ @variable(EP, vEXISTINGCAPCHARGE[y in STOR_ASYMMETRIC]>=0)
+ end
+
+ ### Expressions ###
+
+ if MultiStage == 1
+ @expression(EP, eExistingCapCharge[y in STOR_ASYMMETRIC], vEXISTINGCAPCHARGE[y])
+ else
+ @expression(EP,
+ eExistingCapCharge[y in STOR_ASYMMETRIC],
+ existing_charge_cap_mw(gen[y]))
+ end
+
+ @expression(EP, eTotalCapCharge[y in STOR_ASYMMETRIC],
+ if (y in intersect(NEW_CAP_CHARGE, RET_CAP_CHARGE))
+ eExistingCapCharge[y] + EP[:vCAPCHARGE][y] - EP[:vRETCAPCHARGE][y]
+ elseif (y in setdiff(NEW_CAP_CHARGE, RET_CAP_CHARGE))
+ eExistingCapCharge[y] + EP[:vCAPCHARGE][y]
+ elseif (y in setdiff(RET_CAP_CHARGE, NEW_CAP_CHARGE))
+ eExistingCapCharge[y] - EP[:vRETCAPCHARGE][y]
+ else
+ eExistingCapCharge[y] + EP[:vZERO]
+ end)
+
+ ## Objective Function Expressions ##
+
+ # Fixed costs for resource "y" = annuitized investment cost plus fixed O&M costs
+ # If resource is not eligible for new charge capacity, fixed costs are only O&M costs
+ @expression(EP, eCFixCharge[y in STOR_ASYMMETRIC],
+ if y in NEW_CAP_CHARGE # Resources eligible for new charge capacity
+ inv_cost_charge_per_mwyr(gen[y]) * vCAPCHARGE[y] +
+ fixed_om_cost_charge_per_mwyr(gen[y]) * eTotalCapCharge[y]
+ else
+ fixed_om_cost_charge_per_mwyr(gen[y]) * eTotalCapCharge[y]
+ end)
+
+ # Sum individual resource contributions to fixed costs to get total fixed costs
+ @expression(EP, eTotalCFixCharge, sum(EP[:eCFixCharge][y] for y in STOR_ASYMMETRIC))
+
+ # Add term to objective function expression
+ if MultiStage == 1
+ # OPEX multiplier scales fixed costs to account for multiple years between two model stages
+ # We divide by OPEXMULT since we are going to multiply the entire objective function by this term later,
+ # and we have already accounted for multiple years between stages for fixed costs.
+ add_to_expression!(EP[:eObj], (1 / inputs["OPEXMULT"]), eTotalCFixCharge)
+ else
+ add_to_expression!(EP[:eObj], eTotalCFixCharge)
+ end
+
+ ### Constratints ###
+
+ if MultiStage == 1
+ # Existing capacity variable is equal to existing capacity specified in the input file
+ @constraint(EP,
+ cExistingCapCharge[y in STOR_ASYMMETRIC],
+ EP[:vEXISTINGCAPCHARGE][y]==existing_charge_cap_mw(gen[y]))
+ end
+
+ ## Constraints on retirements and capacity additions
+ #Cannot retire more charge capacity than existing charge capacity
+ @constraint(EP,
+ cMaxRetCharge[y in RET_CAP_CHARGE],
+ vRETCAPCHARGE[y]<=eExistingCapCharge[y])
+
+ #Constraints on new built capacity
+
+ # Constraint on maximum charge capacity (if applicable) [set input to -1 if no constraint on maximum charge capacity]
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is >= Max_Charge_Cap_MWh and lead to infeasabilty
+ @constraint(EP,
+ cMaxCapCharge[y in intersect(ids_with_positive(gen, max_charge_cap_mw),
+ STOR_ASYMMETRIC)],
+ eTotalCapCharge[y]<=max_charge_cap_mw(gen[y]))
+
+ # Constraint on minimum charge capacity (if applicable) [set input to -1 if no constraint on minimum charge capacity]
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is <= Min_Charge_Cap_MWh and lead to infeasabilty
+ @constraint(EP,
+ cMinCapCharge[y in intersect(ids_with_positive(gen, min_charge_cap_mw),
+ STOR_ASYMMETRIC)],
+ eTotalCapCharge[y]>=min_charge_cap_mw(gen[y]))
end
diff --git a/src/model/resources/storage/investment_energy.jl b/src/model/resources/storage/investment_energy.jl
index 35757fca6b..af28ba15c2 100644
--- a/src/model/resources/storage/investment_energy.jl
+++ b/src/model/resources/storage/investment_energy.jl
@@ -42,97 +42,106 @@ In addition, this function adds investment and fixed O\&M related costs related
```
"""
function investment_energy!(EP::Model, inputs::Dict, setup::Dict)
-
- println("Storage Investment Module")
-
- gen = inputs["RESOURCES"]
-
- MultiStage = setup["MultiStage"]
-
- STOR_ALL = inputs["STOR_ALL"] # Set of all storage resources
- NEW_CAP_ENERGY = inputs["NEW_CAP_ENERGY"] # Set of all storage resources eligible for new energy capacity
- RET_CAP_ENERGY = inputs["RET_CAP_ENERGY"] # Set of all storage resources eligible for energy capacity retirements
-
- ### Variables ###
-
- ## Energy storage reservoir capacity (MWh capacity) built/retired for storage with variable power to energy ratio (STOR=1 or STOR=2)
-
- # New installed energy capacity of resource "y"
- @variable(EP, vCAPENERGY[y in NEW_CAP_ENERGY] >= 0)
-
- # Retired energy capacity of resource "y" from existing capacity
- @variable(EP, vRETCAPENERGY[y in RET_CAP_ENERGY] >= 0)
-
- if MultiStage == 1
- @variable(EP, vEXISTINGCAPENERGY[y in STOR_ALL] >= 0);
- end
-
- ### Expressions ###
-
- if MultiStage == 1
- @expression(EP, eExistingCapEnergy[y in STOR_ALL], vEXISTINGCAPENERGY[y])
- else
- @expression(EP, eExistingCapEnergy[y in STOR_ALL], existing_cap_mwh(gen[y]))
- end
-
- @expression(EP, eTotalCapEnergy[y in STOR_ALL],
- if (y in intersect(NEW_CAP_ENERGY, RET_CAP_ENERGY))
- eExistingCapEnergy[y] + EP[:vCAPENERGY][y] - EP[:vRETCAPENERGY][y]
- elseif (y in setdiff(NEW_CAP_ENERGY, RET_CAP_ENERGY))
- eExistingCapEnergy[y] + EP[:vCAPENERGY][y]
- elseif (y in setdiff(RET_CAP_ENERGY, NEW_CAP_ENERGY))
- eExistingCapEnergy[y] - EP[:vRETCAPENERGY][y]
- else
- eExistingCapEnergy[y] + EP[:vZERO]
- end
- )
-
- ## Objective Function Expressions ##
-
- # Fixed costs for resource "y" = annuitized investment cost plus fixed O&M costs
- # If resource is not eligible for new energy capacity, fixed costs are only O&M costs
- @expression(EP, eCFixEnergy[y in STOR_ALL],
- if y in NEW_CAP_ENERGY # Resources eligible for new capacity
- inv_cost_per_mwhyr(gen[y])*vCAPENERGY[y] + fixed_om_cost_per_mwhyr(gen[y])*eTotalCapEnergy[y]
- else
- fixed_om_cost_per_mwhyr(gen[y])*eTotalCapEnergy[y]
- end
- )
-
- # Sum individual resource contributions to fixed costs to get total fixed costs
- @expression(EP, eTotalCFixEnergy, sum(EP[:eCFixEnergy][y] for y in STOR_ALL))
-
- # Add term to objective function expression
- if MultiStage == 1
- # OPEX multiplier scales fixed costs to account for multiple years between two model stages
- # We divide by OPEXMULT since we are going to multiply the entire objective function by this term later,
- # and we have already accounted for multiple years between stages for fixed costs.
- add_to_expression!(EP[:eObj], (1/inputs["OPEXMULT"]), eTotalCFixEnergy)
- else
- add_to_expression!(EP[:eObj], eTotalCFixEnergy)
- end
-
- ### Constraints ###
-
- if MultiStage == 1
- @constraint(EP, cExistingCapEnergy[y in STOR_ALL], EP[:vEXISTINGCAPENERGY][y] == existing_cap_mwh(gen[y]))
- end
-
- ## Constraints on retirements and capacity additions
- # Cannot retire more energy capacity than existing energy capacity
- @constraint(EP, cMaxRetEnergy[y in RET_CAP_ENERGY], vRETCAPENERGY[y] <= eExistingCapEnergy[y])
-
- ## Constraints on new built energy capacity
- # Constraint on maximum energy capacity (if applicable) [set input to -1 if no constraint on maximum energy capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MWh is >= Max_Cap_MWh and lead to infeasabilty
- @constraint(EP, cMaxCapEnergy[y in intersect(ids_with_positive(gen, max_cap_mwh), STOR_ALL)], eTotalCapEnergy[y] <= max_cap_mwh(gen[y]))
-
- # Constraint on minimum energy capacity (if applicable) [set input to -1 if no constraint on minimum energy apacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MWh is <= Min_Cap_MWh and lead to infeasabilty
- @constraint(EP, cMinCapEnergy[y in intersect(ids_with_positive(gen, min_cap_mwh), STOR_ALL)], eTotalCapEnergy[y] >= min_cap_mwh(gen[y]))
-
- # Max and min constraints on energy storage capacity built (as proportion to discharge power capacity)
- @constraint(EP, cMinCapEnergyDuration[y in STOR_ALL], EP[:eTotalCapEnergy][y] >= min_duration(gen[y]) * EP[:eTotalCap][y])
- @constraint(EP, cMaxCapEnergyDuration[y in STOR_ALL], EP[:eTotalCapEnergy][y] <= max_duration(gen[y]) * EP[:eTotalCap][y])
-
+ println("Storage Investment Module")
+
+ gen = inputs["RESOURCES"]
+
+ MultiStage = setup["MultiStage"]
+
+ STOR_ALL = inputs["STOR_ALL"] # Set of all storage resources
+ NEW_CAP_ENERGY = inputs["NEW_CAP_ENERGY"] # Set of all storage resources eligible for new energy capacity
+ RET_CAP_ENERGY = inputs["RET_CAP_ENERGY"] # Set of all storage resources eligible for energy capacity retirements
+
+ ### Variables ###
+
+ ## Energy storage reservoir capacity (MWh capacity) built/retired for storage with variable power to energy ratio (STOR=1 or STOR=2)
+
+ # New installed energy capacity of resource "y"
+ @variable(EP, vCAPENERGY[y in NEW_CAP_ENERGY]>=0)
+
+ # Retired energy capacity of resource "y" from existing capacity
+ @variable(EP, vRETCAPENERGY[y in RET_CAP_ENERGY]>=0)
+
+ if MultiStage == 1
+ @variable(EP, vEXISTINGCAPENERGY[y in STOR_ALL]>=0)
+ end
+
+ ### Expressions ###
+
+ if MultiStage == 1
+ @expression(EP, eExistingCapEnergy[y in STOR_ALL], vEXISTINGCAPENERGY[y])
+ else
+ @expression(EP, eExistingCapEnergy[y in STOR_ALL], existing_cap_mwh(gen[y]))
+ end
+
+ @expression(EP, eTotalCapEnergy[y in STOR_ALL],
+ if (y in intersect(NEW_CAP_ENERGY, RET_CAP_ENERGY))
+ eExistingCapEnergy[y] + EP[:vCAPENERGY][y] - EP[:vRETCAPENERGY][y]
+ elseif (y in setdiff(NEW_CAP_ENERGY, RET_CAP_ENERGY))
+ eExistingCapEnergy[y] + EP[:vCAPENERGY][y]
+ elseif (y in setdiff(RET_CAP_ENERGY, NEW_CAP_ENERGY))
+ eExistingCapEnergy[y] - EP[:vRETCAPENERGY][y]
+ else
+ eExistingCapEnergy[y] + EP[:vZERO]
+ end)
+
+ ## Objective Function Expressions ##
+
+ # Fixed costs for resource "y" = annuitized investment cost plus fixed O&M costs
+ # If resource is not eligible for new energy capacity, fixed costs are only O&M costs
+ @expression(EP, eCFixEnergy[y in STOR_ALL],
+ if y in NEW_CAP_ENERGY # Resources eligible for new capacity
+ inv_cost_per_mwhyr(gen[y]) * vCAPENERGY[y] +
+ fixed_om_cost_per_mwhyr(gen[y]) * eTotalCapEnergy[y]
+ else
+ fixed_om_cost_per_mwhyr(gen[y]) * eTotalCapEnergy[y]
+ end)
+
+ # Sum individual resource contributions to fixed costs to get total fixed costs
+ @expression(EP, eTotalCFixEnergy, sum(EP[:eCFixEnergy][y] for y in STOR_ALL))
+
+ # Add term to objective function expression
+ if MultiStage == 1
+ # OPEX multiplier scales fixed costs to account for multiple years between two model stages
+ # We divide by OPEXMULT since we are going to multiply the entire objective function by this term later,
+ # and we have already accounted for multiple years between stages for fixed costs.
+ add_to_expression!(EP[:eObj], (1 / inputs["OPEXMULT"]), eTotalCFixEnergy)
+ else
+ add_to_expression!(EP[:eObj], eTotalCFixEnergy)
+ end
+
+ ### Constraints ###
+
+ if MultiStage == 1
+ @constraint(EP,
+ cExistingCapEnergy[y in STOR_ALL],
+ EP[:vEXISTINGCAPENERGY][y]==existing_cap_mwh(gen[y]))
+ end
+
+ ## Constraints on retirements and capacity additions
+ # Cannot retire more energy capacity than existing energy capacity
+ @constraint(EP,
+ cMaxRetEnergy[y in RET_CAP_ENERGY],
+ vRETCAPENERGY[y]<=eExistingCapEnergy[y])
+
+ ## Constraints on new built energy capacity
+ # Constraint on maximum energy capacity (if applicable) [set input to -1 if no constraint on maximum energy capacity]
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MWh is >= Max_Cap_MWh and lead to infeasabilty
+ @constraint(EP,
+ cMaxCapEnergy[y in intersect(ids_with_positive(gen, max_cap_mwh), STOR_ALL)],
+ eTotalCapEnergy[y]<=max_cap_mwh(gen[y]))
+
+ # Constraint on minimum energy capacity (if applicable) [set input to -1 if no constraint on minimum energy apacity]
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MWh is <= Min_Cap_MWh and lead to infeasabilty
+ @constraint(EP,
+ cMinCapEnergy[y in intersect(ids_with_positive(gen, min_cap_mwh), STOR_ALL)],
+ eTotalCapEnergy[y]>=min_cap_mwh(gen[y]))
+
+ # Max and min constraints on energy storage capacity built (as proportion to discharge power capacity)
+ @constraint(EP,
+ cMinCapEnergyDuration[y in STOR_ALL],
+ EP[:eTotalCapEnergy][y]>=min_duration(gen[y]) * EP[:eTotalCap][y])
+ @constraint(EP,
+ cMaxCapEnergyDuration[y in STOR_ALL],
+ EP[:eTotalCapEnergy][y]<=max_duration(gen[y]) * EP[:eTotalCap][y])
end
diff --git a/src/model/resources/storage/long_duration_storage.jl b/src/model/resources/storage/long_duration_storage.jl
index f88b22b0f6..3730d3dff1 100644
--- a/src/model/resources/storage/long_duration_storage.jl
+++ b/src/model/resources/storage/long_duration_storage.jl
@@ -58,92 +58,127 @@ If the capacity reserve margin constraint is enabled, a similar set of constrain
All other constraints are identical to those used to track the actual state of charge, except with the new variables $Q^{CRM}_{o,z,n}$ and $\Delta Q^{CRM}_{o,z,n}$ used in place of $Q_{o,z,n}$ and $\Delta Q_{o,z,n}$, respectively.
"""
function long_duration_storage!(EP::Model, inputs::Dict, setup::Dict)
-
- println("Long Duration Storage Module")
-
- gen = inputs["RESOURCES"]
-
- CapacityReserveMargin = setup["CapacityReserveMargin"]
-
- REP_PERIOD = inputs["REP_PERIOD"] # Number of representative periods
-
- STOR_LONG_DURATION = inputs["STOR_LONG_DURATION"]
-
- hours_per_subperiod = inputs["hours_per_subperiod"] #total number of hours per subperiod
-
- dfPeriodMap = inputs["Period_Map"] # Dataframe that maps modeled periods to representative periods
- NPeriods = size(inputs["Period_Map"])[1] # Number of modeled periods
-
- MODELED_PERIODS_INDEX = 1:NPeriods
- REP_PERIODS_INDEX = MODELED_PERIODS_INDEX[dfPeriodMap[!,:Rep_Period] .== MODELED_PERIODS_INDEX]
-
- ### Variables ###
-
- # Variables to define inter-period energy transferred between modeled periods
-
- # State of charge of storage at beginning of each modeled period n
- @variable(EP, vSOCw[y in STOR_LONG_DURATION, n in MODELED_PERIODS_INDEX] >= 0)
-
- # Build up in storage inventory over each representative period w
- # Build up inventory can be positive or negative
- @variable(EP, vdSOC[y in STOR_LONG_DURATION, w=1:REP_PERIOD])
-
- if CapacityReserveMargin > 0
- # State of charge held in reserve for storage at beginning of each modeled period n
- @variable(EP, vCAPRES_socw[y in STOR_LONG_DURATION, n in MODELED_PERIODS_INDEX] >= 0)
-
- # Build up in storage inventory held in reserve over each representative period w
- # Build up inventory can be positive or negative
- @variable(EP, vCAPRES_dsoc[y in STOR_LONG_DURATION, w=1:REP_PERIOD])
- end
-
- ### Constraints ###
-
- # Links last time step with first time step, ensuring position in hour 1 is within eligible change from final hour position
- # Modified initial state of storage for long-duration storage - initialize wth value carried over from last period
- # Alternative to cSoCBalStart constraint which is included when not modeling operations wrapping and long duration storage
- # Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
- @constraint(EP, cSoCBalLongDurationStorageStart[w=1:REP_PERIOD, y in STOR_LONG_DURATION],
- EP[:vS][y,hours_per_subperiod*(w-1)+1] == (1-self_discharge(gen[y]))*(EP[:vS][y,hours_per_subperiod*w]-vdSOC[y,w])
- -(1/efficiency_down(gen[y])*EP[:vP][y,hours_per_subperiod*(w-1)+1])+(efficiency_up(gen[y])*EP[:vCHARGE][y,hours_per_subperiod*(w-1)+1]))
-
- # Storage at beginning of period w = storage at beginning of period w-1 + storage built up in period w (after n representative periods)
- ## Multiply storage build up term from prior period with corresponding weight
- @constraint(EP, cSoCBalLongDurationStorage[y in STOR_LONG_DURATION, r in MODELED_PERIODS_INDEX],
- vSOCw[y, mod1(r+1, NPeriods)] == vSOCw[y,r] + vdSOC[y,dfPeriodMap[r,:Rep_Period_Index]])
-
- # Storage at beginning of each modeled period cannot exceed installed energy capacity
- @constraint(EP, cSoCBalLongDurationStorageUpper[y in STOR_LONG_DURATION, r in MODELED_PERIODS_INDEX],
- vSOCw[y,r] <= EP[:eTotalCapEnergy][y])
-
- # Initial storage level for representative periods must also adhere to sub-period storage inventory balance
- # Initial storage = Final storage - change in storage inventory across representative period
- @constraint(EP, cSoCBalLongDurationStorageSub[y in STOR_LONG_DURATION, r in REP_PERIODS_INDEX],
- vSOCw[y,r] == EP[:vS][y,hours_per_subperiod*dfPeriodMap[r,:Rep_Period_Index]] - vdSOC[y,dfPeriodMap[r,:Rep_Period_Index]])
-
- # Capacity Reserve Margin policy
- if CapacityReserveMargin > 0
- # LDES Constraints for storage held in reserve
-
- # Links last time step with first time step, ensuring position in hour 1 is within eligible change from final hour position
- # Modified initial virtual state of storage for long-duration storage - initialize wth value carried over from last period
- # Alternative to cVSoCBalStart constraint which is included when not modeling operations wrapping and long duration storage
- # Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
- @constraint(EP, cVSoCBalLongDurationStorageStart[w=1:REP_PERIOD, y in STOR_LONG_DURATION],
- EP[:vCAPRES_socinreserve][y,hours_per_subperiod*(w-1)+1] == (1-self_discharge(gen[y]))*(EP[:vCAPRES_socinreserve][y,hours_per_subperiod*w]-vCAPRES_dsoc[y,w])
- +(1/efficiency_down(gen[y])*EP[:vCAPRES_discharge][y,hours_per_subperiod*(w-1)+1])-(efficiency_up(gen[y])*EP[:vCAPRES_charge][y,hours_per_subperiod*(w-1)+1]))
-
- # Storage held in reserve at beginning of period w = storage at beginning of period w-1 + storage built up in period w (after n representative periods)
- ## Multiply storage build up term from prior period with corresponding weight
- @constraint(EP, cVSoCBalLongDurationStorage[y in STOR_LONG_DURATION, r in MODELED_PERIODS_INDEX],
- vCAPRES_socw[y,mod1(r+1, NPeriods)] == vCAPRES_socw[y,r] + vCAPRES_dsoc[y,dfPeriodMap[r,:Rep_Period_Index]])
-
- # Initial reserve storage level for representative periods must also adhere to sub-period storage inventory balance
- # Initial storage = Final storage - change in storage inventory across representative period
- @constraint(EP, cVSoCBalLongDurationStorageSub[y in STOR_LONG_DURATION, r in REP_PERIODS_INDEX],
- vCAPRES_socw[y,r] == EP[:vCAPRES_socinreserve][y,hours_per_subperiod*dfPeriodMap[r,:Rep_Period_Index]] - vCAPRES_dsoc[y,dfPeriodMap[r,:Rep_Period_Index]])
-
- # energy held in reserve at the beginning of each modeled period acts as a lower bound on the total energy held in storage
- @constraint(EP, cSOCMinCapResLongDurationStorage[y in STOR_LONG_DURATION, r in MODELED_PERIODS_INDEX], vSOCw[y,r] >= vCAPRES_socw[y,r])
- end
+ println("Long Duration Storage Module")
+
+ gen = inputs["RESOURCES"]
+
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
+
+ REP_PERIOD = inputs["REP_PERIOD"] # Number of representative periods
+
+ STOR_LONG_DURATION = inputs["STOR_LONG_DURATION"]
+
+ hours_per_subperiod = inputs["hours_per_subperiod"] #total number of hours per subperiod
+
+ dfPeriodMap = inputs["Period_Map"] # Dataframe that maps modeled periods to representative periods
+ NPeriods = size(inputs["Period_Map"])[1] # Number of modeled periods
+
+ MODELED_PERIODS_INDEX = 1:NPeriods
+ REP_PERIODS_INDEX = MODELED_PERIODS_INDEX[dfPeriodMap[!, :Rep_Period] .== MODELED_PERIODS_INDEX]
+
+ ### Variables ###
+
+ # Variables to define inter-period energy transferred between modeled periods
+
+ # State of charge of storage at beginning of each modeled period n
+ @variable(EP, vSOCw[y in STOR_LONG_DURATION, n in MODELED_PERIODS_INDEX]>=0)
+
+ # Build up in storage inventory over each representative period w
+ # Build up inventory can be positive or negative
+ @variable(EP, vdSOC[y in STOR_LONG_DURATION, w = 1:REP_PERIOD])
+
+ if CapacityReserveMargin > 0
+ # State of charge held in reserve for storage at beginning of each modeled period n
+ @variable(EP, vCAPRES_socw[y in STOR_LONG_DURATION, n in MODELED_PERIODS_INDEX]>=0)
+
+ # Build up in storage inventory held in reserve over each representative period w
+ # Build up inventory can be positive or negative
+ @variable(EP, vCAPRES_dsoc[y in STOR_LONG_DURATION, w = 1:REP_PERIOD])
+ end
+
+ ### Constraints ###
+
+ # Links last time step with first time step, ensuring position in hour 1 is within eligible change from final hour position
+ # Modified initial state of storage for long-duration storage - initialize wth value carried over from last period
+ # Alternative to cSoCBalStart constraint which is included when not modeling operations wrapping and long duration storage
+ # Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
+ @constraint(EP,
+ cSoCBalLongDurationStorageStart[w = 1:REP_PERIOD, y in STOR_LONG_DURATION],
+ EP[:vS][y,
+ hours_per_subperiod * (w - 1) + 1]==(1 - self_discharge(gen[y])) *
+ (EP[:vS][y, hours_per_subperiod * w] -
+ vdSOC[y, w])
+ -
+ (1 / efficiency_down(gen[y]) * EP[:vP][
+ y, hours_per_subperiod * (w - 1) + 1]) +
+ (efficiency_up(gen[y]) * EP[:vCHARGE][
+ y, hours_per_subperiod * (w - 1) + 1]))
+
+ # Storage at beginning of period w = storage at beginning of period w-1 + storage built up in period w (after n representative periods)
+ ## Multiply storage build up term from prior period with corresponding weight
+ @constraint(EP,
+ cSoCBalLongDurationStorage[y in STOR_LONG_DURATION, r in MODELED_PERIODS_INDEX],
+ vSOCw[y,
+ mod1(r + 1, NPeriods)]==vSOCw[y, r] +
+ vdSOC[y, dfPeriodMap[r, :Rep_Period_Index]])
+
+ # Storage at beginning of each modeled period cannot exceed installed energy capacity
+ @constraint(EP,
+ cSoCBalLongDurationStorageUpper[y in STOR_LONG_DURATION,
+ r in MODELED_PERIODS_INDEX],
+ vSOCw[y, r]<=EP[:eTotalCapEnergy][y])
+
+ # Initial storage level for representative periods must also adhere to sub-period storage inventory balance
+ # Initial storage = Final storage - change in storage inventory across representative period
+ @constraint(EP,
+ cSoCBalLongDurationStorageSub[y in STOR_LONG_DURATION, r in REP_PERIODS_INDEX],
+ vSOCw[y,
+ r]==EP[:vS][y, hours_per_subperiod * dfPeriodMap[r, :Rep_Period_Index]] -
+ vdSOC[y, dfPeriodMap[r, :Rep_Period_Index]])
+
+ # Capacity Reserve Margin policy
+ if CapacityReserveMargin > 0
+ # LDES Constraints for storage held in reserve
+
+ # Links last time step with first time step, ensuring position in hour 1 is within eligible change from final hour position
+ # Modified initial virtual state of storage for long-duration storage - initialize wth value carried over from last period
+ # Alternative to cVSoCBalStart constraint which is included when not modeling operations wrapping and long duration storage
+ # Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
+ @constraint(EP,
+ cVSoCBalLongDurationStorageStart[w = 1:REP_PERIOD, y in STOR_LONG_DURATION],
+ EP[:vCAPRES_socinreserve][y,
+ hours_per_subperiod * (w - 1) + 1]==(1 - self_discharge(gen[y])) *
+ (EP[:vCAPRES_socinreserve][
+ y, hours_per_subperiod * w] - vCAPRES_dsoc[y, w])
+ +
+ (1 / efficiency_down(gen[y]) *
+ EP[:vCAPRES_discharge][
+ y, hours_per_subperiod * (w - 1) + 1]) -
+ (efficiency_up(gen[y]) *
+ EP[:vCAPRES_charge][
+ y, hours_per_subperiod * (w - 1) + 1]))
+
+ # Storage held in reserve at beginning of period w = storage at beginning of period w-1 + storage built up in period w (after n representative periods)
+ ## Multiply storage build up term from prior period with corresponding weight
+ @constraint(EP,
+ cVSoCBalLongDurationStorage[y in STOR_LONG_DURATION,
+ r in MODELED_PERIODS_INDEX],
+ vCAPRES_socw[y,
+ mod1(r + 1, NPeriods)]==vCAPRES_socw[y, r] +
+ vCAPRES_dsoc[y, dfPeriodMap[r, :Rep_Period_Index]])
+
+ # Initial reserve storage level for representative periods must also adhere to sub-period storage inventory balance
+ # Initial storage = Final storage - change in storage inventory across representative period
+ @constraint(EP,
+ cVSoCBalLongDurationStorageSub[y in STOR_LONG_DURATION, r in REP_PERIODS_INDEX],
+ vCAPRES_socw[y,r]==EP[:vCAPRES_socinreserve][y,
+ hours_per_subperiod * dfPeriodMap[r, :Rep_Period_Index]] -
+ vCAPRES_dsoc[y, dfPeriodMap[r, :Rep_Period_Index]])
+
+ # energy held in reserve at the beginning of each modeled period acts as a lower bound on the total energy held in storage
+ @constraint(EP,
+ cSOCMinCapResLongDurationStorage[y in STOR_LONG_DURATION,
+ r in MODELED_PERIODS_INDEX],
+ vSOCw[y, r]>=vCAPRES_socw[y, r])
+ end
end
diff --git a/src/model/resources/storage/storage.jl b/src/model/resources/storage/storage.jl
index baa03217aa..c4f9e6d90d 100644
--- a/src/model/resources/storage/storage.jl
+++ b/src/model/resources/storage/storage.jl
@@ -129,55 +129,65 @@ Finally, the constraints on maximum discharge rate are replaced by the following
The above reserve related constraints are established by ```storage_all_operational_reserves!()``` in ```storage_all.jl```
"""
function storage!(EP::Model, inputs::Dict, setup::Dict)
+ println("Storage Resources Module")
+ gen = inputs["RESOURCES"]
+ T = inputs["T"]
+ STOR_ALL = inputs["STOR_ALL"]
- println("Storage Resources Module")
- gen = inputs["RESOURCES"]
- T = inputs["T"]
- STOR_ALL = inputs["STOR_ALL"]
-
- p = inputs["hours_per_subperiod"]
+ p = inputs["hours_per_subperiod"]
rep_periods = inputs["REP_PERIOD"]
- EnergyShareRequirement = setup["EnergyShareRequirement"]
- CapacityReserveMargin = setup["CapacityReserveMargin"]
- IncludeLossesInESR = setup["IncludeLossesInESR"]
- StorageVirtualDischarge = setup["StorageVirtualDischarge"]
-
- if !isempty(STOR_ALL)
- investment_energy!(EP, inputs, setup)
- storage_all!(EP, inputs, setup)
+ EnergyShareRequirement = setup["EnergyShareRequirement"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
+ IncludeLossesInESR = setup["IncludeLossesInESR"]
+ StorageVirtualDischarge = setup["StorageVirtualDischarge"]
- # Include Long Duration Storage only when modeling representative periods and long-duration storage
- if rep_periods > 1 && !isempty(inputs["STOR_LONG_DURATION"])
- long_duration_storage!(EP, inputs, setup)
- end
- end
+ if !isempty(STOR_ALL)
+ investment_energy!(EP, inputs, setup)
+ storage_all!(EP, inputs, setup)
- if !isempty(inputs["STOR_ASYMMETRIC"])
- investment_charge!(EP, inputs, setup)
- storage_asymmetric!(EP, inputs, setup)
- end
+ # Include Long Duration Storage only when modeling representative periods and long-duration storage
+ if rep_periods > 1 && !isempty(inputs["STOR_LONG_DURATION"])
+ long_duration_storage!(EP, inputs, setup)
+ end
+ end
- if !isempty(inputs["STOR_SYMMETRIC"])
- storage_symmetric!(EP, inputs, setup)
- end
+ if !isempty(inputs["STOR_ASYMMETRIC"])
+ investment_charge!(EP, inputs, setup)
+ storage_asymmetric!(EP, inputs, setup)
+ end
- # ESR Lossses
- if EnergyShareRequirement >= 1
- if IncludeLossesInESR == 1
- @expression(EP, eESRStor[ESR=1:inputs["nESR"]], sum(inputs["dfESR"][z,ESR]*sum(EP[:eELOSS][y] for y in intersect(resources_in_zone_by_rid(gen,z),STOR_ALL)) for z=findall(x->x>0,inputs["dfESR"][:,ESR])))
- add_similar_to_expression!(EP[:eESR], -eESRStor)
- end
- end
+ if !isempty(inputs["STOR_SYMMETRIC"])
+ storage_symmetric!(EP, inputs, setup)
+ end
- # Capacity Reserves Margin policy
- if CapacityReserveMargin > 0
- @expression(EP, eCapResMarBalanceStor[res=1:inputs["NCapacityReserveMargin"], t=1:T], sum(derating_factor(gen[y], tag=res) * (EP[:vP][y,t] - EP[:vCHARGE][y,t]) for y in STOR_ALL))
- if StorageVirtualDischarge > 0
- @expression(EP, eCapResMarBalanceStorVirtual[res=1:inputs["NCapacityReserveMargin"], t=1:T], sum(derating_factor(gen[y], tag=res) * (EP[:vCAPRES_discharge][y,t] - EP[:vCAPRES_charge][y,t]) for y in STOR_ALL))
- add_similar_to_expression!(eCapResMarBalanceStor,eCapResMarBalanceStorVirtual)
- end
- add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceStor)
- end
+ # ESR Lossses
+ if EnergyShareRequirement >= 1
+ if IncludeLossesInESR == 1
+ @expression(EP,
+ eESRStor[ESR = 1:inputs["nESR"]],
+ sum(inputs["dfESR"][z, ESR] * sum(EP[:eELOSS][y]
+ for y in intersect(resources_in_zone_by_rid(gen, z), STOR_ALL))
+ for z in findall(x -> x > 0, inputs["dfESR"][:, ESR])))
+ add_similar_to_expression!(EP[:eESR], -eESRStor)
+ end
+ end
+ # Capacity Reserves Margin policy
+ if CapacityReserveMargin > 0
+ @expression(EP,
+ eCapResMarBalanceStor[res = 1:inputs["NCapacityReserveMargin"], t = 1:T],
+ sum(derating_factor(gen[y], tag = res) * (EP[:vP][y, t] - EP[:vCHARGE][y, t])
+ for y in STOR_ALL))
+ if StorageVirtualDischarge > 0
+ @expression(EP,
+ eCapResMarBalanceStorVirtual[res = 1:inputs["NCapacityReserveMargin"],
+ t = 1:T],
+ sum(derating_factor(gen[y], tag = res) *
+ (EP[:vCAPRES_discharge][y, t] - EP[:vCAPRES_charge][y, t])
+ for y in STOR_ALL))
+ add_similar_to_expression!(eCapResMarBalanceStor, eCapResMarBalanceStorVirtual)
+ end
+ add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceStor)
+ end
end
diff --git a/src/model/resources/storage/storage_all.jl b/src/model/resources/storage/storage_all.jl
index 13c433235c..84000f190c 100644
--- a/src/model/resources/storage/storage_all.jl
+++ b/src/model/resources/storage/storage_all.jl
@@ -4,155 +4,204 @@
Sets up variables and constraints common to all storage resources. See ```storage()``` in ```storage.jl``` for description of constraints.
"""
function storage_all!(EP::Model, inputs::Dict, setup::Dict)
- # Setup variables, constraints, and expressions common to all storage resources
- println("Storage Core Resources Module")
+ # Setup variables, constraints, and expressions common to all storage resources
+ println("Storage Core Resources Module")
- gen = inputs["RESOURCES"]
- OperationalReserves = setup["OperationalReserves"]
- CapacityReserveMargin = setup["CapacityReserveMargin"]
+ gen = inputs["RESOURCES"]
+ OperationalReserves = setup["OperationalReserves"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
+
+ virtual_discharge_cost = inputs["VirtualChargeDischargeCost"]
+
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+
+ STOR_ALL = inputs["STOR_ALL"]
+ STOR_SHORT_DURATION = inputs["STOR_SHORT_DURATION"]
+ representative_periods = inputs["REP_PERIOD"]
+
+ START_SUBPERIODS = inputs["START_SUBPERIODS"]
+ INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
- virtual_discharge_cost = inputs["VirtualChargeDischargeCost"]
+ hours_per_subperiod = inputs["hours_per_subperiod"] #total number of hours per subperiod
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ ### Variables ###
- STOR_ALL = inputs["STOR_ALL"]
- STOR_SHORT_DURATION = inputs["STOR_SHORT_DURATION"]
- representative_periods = inputs["REP_PERIOD"]
+ # Storage level of resource "y" at hour "t" [MWh] on zone "z" - unbounded
+ @variable(EP, vS[y in STOR_ALL, t = 1:T]>=0)
- START_SUBPERIODS = inputs["START_SUBPERIODS"]
- INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
+ # Energy withdrawn from grid by resource "y" at hour "t" [MWh] on zone "z"
+ @variable(EP, vCHARGE[y in STOR_ALL, t = 1:T]>=0)
- hours_per_subperiod = inputs["hours_per_subperiod"] #total number of hours per subperiod
-
- ### Variables ###
-
- # Storage level of resource "y" at hour "t" [MWh] on zone "z" - unbounded
- @variable(EP, vS[y in STOR_ALL, t=1:T] >= 0);
-
- # Energy withdrawn from grid by resource "y" at hour "t" [MWh] on zone "z"
- @variable(EP, vCHARGE[y in STOR_ALL, t=1:T] >= 0);
-
- if CapacityReserveMargin > 0
- # Virtual discharge contributing to capacity reserves at timestep t for storage cluster y
- @variable(EP, vCAPRES_discharge[y in STOR_ALL, t=1:T] >= 0)
-
- # Virtual charge contributing to capacity reserves at timestep t for storage cluster y
- @variable(EP, vCAPRES_charge[y in STOR_ALL, t=1:T] >= 0)
-
- # Total state of charge being held in reserve at timestep t for storage cluster y
- @variable(EP, vCAPRES_socinreserve[y in STOR_ALL, t=1:T] >= 0)
- end
-
- ### Expressions ###
-
- # Energy losses related to technologies (increase in effective demand)
- @expression(EP, eELOSS[y in STOR_ALL], sum(inputs["omega"][t]*EP[:vCHARGE][y,t] for t in 1:T) - sum(inputs["omega"][t]*EP[:vP][y,t] for t in 1:T))
-
- ## Objective Function Expressions ##
-
- #Variable costs of "charging" for technologies "y" during hour "t" in zone "z"
- @expression(EP, eCVar_in[y in STOR_ALL,t=1:T], inputs["omega"][t]*var_om_cost_per_mwh_in(gen[y])*vCHARGE[y,t])
-
- # Sum individual resource contributions to variable charging costs to get total variable charging costs
- @expression(EP, eTotalCVarInT[t=1:T], sum(eCVar_in[y,t] for y in STOR_ALL))
- @expression(EP, eTotalCVarIn, sum(eTotalCVarInT[t] for t in 1:T))
- add_to_expression!(EP[:eObj], eTotalCVarIn)
-
- if CapacityReserveMargin > 0
- #Variable costs of "virtual charging" for technologies "y" during hour "t" in zone "z"
- @expression(EP, eCVar_in_virtual[y in STOR_ALL,t=1:T], inputs["omega"][t]*virtual_discharge_cost*vCAPRES_charge[y,t])
- @expression(EP, eTotalCVarInT_virtual[t=1:T], sum(eCVar_in_virtual[y,t] for y in STOR_ALL))
- @expression(EP, eTotalCVarIn_virtual, sum(eTotalCVarInT_virtual[t] for t in 1:T))
- EP[:eObj] += eTotalCVarIn_virtual
-
- #Variable costs of "virtual discharging" for technologies "y" during hour "t" in zone "z"
- @expression(EP, eCVar_out_virtual[y in STOR_ALL,t=1:T], inputs["omega"][t]*virtual_discharge_cost*vCAPRES_discharge[y,t])
- @expression(EP, eTotalCVarOutT_virtual[t=1:T], sum(eCVar_out_virtual[y,t] for y in STOR_ALL))
- @expression(EP, eTotalCVarOut_virtual, sum(eTotalCVarOutT_virtual[t] for t in 1:T))
- EP[:eObj] += eTotalCVarOut_virtual
- end
-
- ## Power Balance Expressions ##
-
- # Term to represent net dispatch from storage in any period
- @expression(EP, ePowerBalanceStor[t=1:T, z=1:Z],
- sum(EP[:vP][y,t]-EP[:vCHARGE][y,t] for y in intersect(resources_in_zone_by_rid(gen,z),STOR_ALL))
- )
- add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceStor)
-
- ### Constraints ###
-
- ## Storage energy capacity and state of charge related constraints:
-
- # Links state of charge in first time step with decisions in last time step of each subperiod
- # We use a modified formulation of this constraint (cSoCBalLongDurationStorageStart) when operations wrapping and long duration storage are being modeled
- if representative_periods > 1 && !isempty(inputs["STOR_LONG_DURATION"])
- CONSTRAINTSET = STOR_SHORT_DURATION
- else
- CONSTRAINTSET = STOR_ALL
- end
- @constraint(EP, cSoCBalStart[t in START_SUBPERIODS, y in CONSTRAINTSET], EP[:vS][y,t] ==
- EP[:vS][y,t+hours_per_subperiod-1] - (1/efficiency_down(gen[y]) * EP[:vP][y,t])
- + (efficiency_up(gen[y])*EP[:vCHARGE][y,t]) - (self_discharge(gen[y]) * EP[:vS][y,t+hours_per_subperiod-1]))
-
- @constraints(EP, begin
-
- # Maximum energy stored must be less than energy capacity
- [y in STOR_ALL, t in 1:T], EP[:vS][y,t] <= EP[:eTotalCapEnergy][y]
-
- # energy stored for the next hour
- cSoCBalInterior[t in INTERIOR_SUBPERIODS, y in STOR_ALL], EP[:vS][y,t] ==
- EP[:vS][y,t-1]-(1/efficiency_down(gen[y])*EP[:vP][y,t])+(efficiency_up(gen[y])*EP[:vCHARGE][y,t])-(self_discharge(gen[y])*EP[:vS][y,t-1])
- end)
-
- # Storage discharge and charge power (and reserve contribution) related constraints:
- if OperationalReserves == 1
- storage_all_operational_reserves!(EP, inputs, setup)
- else
- if CapacityReserveMargin > 0
- # Note: maximum charge rate is also constrained by maximum charge power capacity, but as this differs by storage type,
- # this constraint is set in functions below for each storage type
-
- # Maximum discharging rate must be less than power rating OR available stored energy in the prior period, whichever is less
- # wrapping from end of sample period to start of sample period for energy capacity constraint
- @constraints(EP, begin
- [y in STOR_ALL, t=1:T], EP[:vP][y,t] + EP[:vCAPRES_discharge][y,t] <= EP[:eTotalCap][y]
- [y in STOR_ALL, t=1:T], EP[:vP][y,t] + EP[:vCAPRES_discharge][y,t] <= EP[:vS][y, hoursbefore(hours_per_subperiod,t,1)]*efficiency_down(gen[y])
- end)
- else
- @constraints(EP, begin
- [y in STOR_ALL, t=1:T], EP[:vP][y,t] <= EP[:eTotalCap][y]
- [y in STOR_ALL, t=1:T], EP[:vP][y,t] <= EP[:vS][y, hoursbefore(hours_per_subperiod,t,1)]*efficiency_down(gen[y])
- end)
- end
- end
-
- # From CO2 Policy module
- expr = @expression(EP, [z=1:Z], sum(EP[:eELOSS][y] for y in intersect(STOR_ALL, resources_in_zone_by_rid(gen,z))))
- add_similar_to_expression!(EP[:eELOSSByZone], expr)
-
- # Capacity Reserve Margin policy
- if CapacityReserveMargin > 0
- # Constraints governing energy held in reserve when storage makes virtual capacity reserve margin contributions:
-
- # Links energy held in reserve in first time step with decisions in last time step of each subperiod
- # We use a modified formulation of this constraint (cVSoCBalLongDurationStorageStart) when operations wrapping and long duration storage are being modeled
- @constraint(EP, cVSoCBalStart[t in START_SUBPERIODS, y in CONSTRAINTSET], EP[:vCAPRES_socinreserve][y,t] ==
- EP[:vCAPRES_socinreserve][y,t+hours_per_subperiod-1] + (1/efficiency_down(gen[y]) * EP[:vCAPRES_discharge][y,t])
- - (efficiency_up(gen[y])*EP[:vCAPRES_charge][y,t]) - (self_discharge(gen[y]) * EP[:vCAPRES_socinreserve][y,t+hours_per_subperiod-1]))
-
- # energy held in reserve for the next hour
- @constraint(EP, cVSoCBalInterior[t in INTERIOR_SUBPERIODS, y in STOR_ALL], EP[:vCAPRES_socinreserve][y,t] ==
- EP[:vCAPRES_socinreserve][y,t-1]+(1/efficiency_down(gen[y])*EP[:vCAPRES_discharge][y,t])-(efficiency_up(gen[y])*EP[:vCAPRES_charge][y,t])-(self_discharge(gen[y])*EP[:vCAPRES_socinreserve][y,t-1]))
-
- # energy held in reserve acts as a lower bound on the total energy held in storage
- @constraint(EP, cSOCMinCapRes[t in 1:T, y in STOR_ALL], EP[:vS][y,t] >= EP[:vCAPRES_socinreserve][y,t])
- end
+ if CapacityReserveMargin > 0
+ # Virtual discharge contributing to capacity reserves at timestep t for storage cluster y
+ @variable(EP, vCAPRES_discharge[y in STOR_ALL, t = 1:T]>=0)
+
+ # Virtual charge contributing to capacity reserves at timestep t for storage cluster y
+ @variable(EP, vCAPRES_charge[y in STOR_ALL, t = 1:T]>=0)
+
+ # Total state of charge being held in reserve at timestep t for storage cluster y
+ @variable(EP, vCAPRES_socinreserve[y in STOR_ALL, t = 1:T]>=0)
+ end
+
+ ### Expressions ###
+
+ # Energy losses related to technologies (increase in effective demand)
+ @expression(EP,
+ eELOSS[y in STOR_ALL],
+ sum(inputs["omega"][t] * EP[:vCHARGE][y, t]
+ for t in 1:T)-sum(inputs["omega"][t] *
+ EP[:vP][y, t]
+ for t in 1:T))
+
+ ## Objective Function Expressions ##
+
+ #Variable costs of "charging" for technologies "y" during hour "t" in zone "z"
+ @expression(EP,
+ eCVar_in[y in STOR_ALL, t = 1:T],
+ inputs["omega"][t]*var_om_cost_per_mwh_in(gen[y])*vCHARGE[y, t])
+
+ # Sum individual resource contributions to variable charging costs to get total variable charging costs
+ @expression(EP, eTotalCVarInT[t = 1:T], sum(eCVar_in[y, t] for y in STOR_ALL))
+ @expression(EP, eTotalCVarIn, sum(eTotalCVarInT[t] for t in 1:T))
+ add_to_expression!(EP[:eObj], eTotalCVarIn)
+
+ if CapacityReserveMargin > 0
+ #Variable costs of "virtual charging" for technologies "y" during hour "t" in zone "z"
+ @expression(EP,
+ eCVar_in_virtual[y in STOR_ALL, t = 1:T],
+ inputs["omega"][t]*virtual_discharge_cost*vCAPRES_charge[y, t])
+ @expression(EP,
+ eTotalCVarInT_virtual[t = 1:T],
+ sum(eCVar_in_virtual[y, t] for y in STOR_ALL))
+ @expression(EP, eTotalCVarIn_virtual, sum(eTotalCVarInT_virtual[t] for t in 1:T))
+ EP[:eObj] += eTotalCVarIn_virtual
+
+ #Variable costs of "virtual discharging" for technologies "y" during hour "t" in zone "z"
+ @expression(EP,
+ eCVar_out_virtual[y in STOR_ALL, t = 1:T],
+ inputs["omega"][t]*virtual_discharge_cost*vCAPRES_discharge[y, t])
+ @expression(EP,
+ eTotalCVarOutT_virtual[t = 1:T],
+ sum(eCVar_out_virtual[y, t] for y in STOR_ALL))
+ @expression(EP, eTotalCVarOut_virtual, sum(eTotalCVarOutT_virtual[t] for t in 1:T))
+ EP[:eObj] += eTotalCVarOut_virtual
+ end
+
+ ## Power Balance Expressions ##
+
+ # Term to represent net dispatch from storage in any period
+ @expression(EP, ePowerBalanceStor[t = 1:T, z = 1:Z],
+ sum(EP[:vP][y, t] - EP[:vCHARGE][y, t]
+ for y in intersect(resources_in_zone_by_rid(gen, z), STOR_ALL)))
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceStor)
+
+ ### Constraints ###
+
+ ## Storage energy capacity and state of charge related constraints:
+
+ # Links state of charge in first time step with decisions in last time step of each subperiod
+ # We use a modified formulation of this constraint (cSoCBalLongDurationStorageStart) when operations wrapping and long duration storage are being modeled
+ if representative_periods > 1 && !isempty(inputs["STOR_LONG_DURATION"])
+ CONSTRAINTSET = STOR_SHORT_DURATION
+ else
+ CONSTRAINTSET = STOR_ALL
+ end
+ @constraint(EP,
+ cSoCBalStart[t in START_SUBPERIODS, y in CONSTRAINTSET],
+ EP[:vS][y,
+ t]==
+ EP[:vS][y, t + hours_per_subperiod - 1] -
+ (1 / efficiency_down(gen[y]) * EP[:vP][y, t])
+ +
+ (efficiency_up(gen[y]) * EP[:vCHARGE][y, t]) -
+ (self_discharge(gen[y]) * EP[:vS][y, t + hours_per_subperiod - 1]))
+
+ @constraints(EP,
+ begin
+
+ # Maximum energy stored must be less than energy capacity
+ [y in STOR_ALL, t in 1:T], EP[:vS][y, t] <= EP[:eTotalCapEnergy][y]
+
+ # energy stored for the next hour
+ cSoCBalInterior[t in INTERIOR_SUBPERIODS, y in STOR_ALL],
+ EP[:vS][y, t] ==
+ EP[:vS][y, t - 1] - (1 / efficiency_down(gen[y]) * EP[:vP][y, t]) +
+ (efficiency_up(gen[y]) * EP[:vCHARGE][y, t]) -
+ (self_discharge(gen[y]) * EP[:vS][y, t - 1])
+ end)
+
+ # Storage discharge and charge power (and reserve contribution) related constraints:
+ if OperationalReserves == 1
+ storage_all_operational_reserves!(EP, inputs, setup)
+ else
+ if CapacityReserveMargin > 0
+ # Note: maximum charge rate is also constrained by maximum charge power capacity, but as this differs by storage type,
+ # this constraint is set in functions below for each storage type
+
+ # Maximum discharging rate must be less than power rating OR available stored energy in the prior period, whichever is less
+ # wrapping from end of sample period to start of sample period for energy capacity constraint
+ @constraints(EP,
+ begin
+ [y in STOR_ALL, t = 1:T],
+ EP[:vP][y, t] + EP[:vCAPRES_discharge][y, t] <= EP[:eTotalCap][y]
+ [y in STOR_ALL, t = 1:T],
+ EP[:vP][y, t] + EP[:vCAPRES_discharge][y, t] <=
+ EP[:vS][y, hoursbefore(hours_per_subperiod, t, 1)] *
+ efficiency_down(gen[y])
+ end)
+ else
+ @constraints(EP,
+ begin
+ [y in STOR_ALL, t = 1:T], EP[:vP][y, t] <= EP[:eTotalCap][y]
+ [y in STOR_ALL, t = 1:T],
+ EP[:vP][y, t] <=
+ EP[:vS][y, hoursbefore(hours_per_subperiod, t, 1)] *
+ efficiency_down(gen[y])
+ end)
+ end
+ end
+
+ # From CO2 Policy module
+ expr = @expression(EP,
+ [z = 1:Z],
+ sum(EP[:eELOSS][y] for y in intersect(STOR_ALL, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:eELOSSByZone], expr)
+
+ # Capacity Reserve Margin policy
+ if CapacityReserveMargin > 0
+ # Constraints governing energy held in reserve when storage makes virtual capacity reserve margin contributions:
+
+ # Links energy held in reserve in first time step with decisions in last time step of each subperiod
+ # We use a modified formulation of this constraint (cVSoCBalLongDurationStorageStart) when operations wrapping and long duration storage are being modeled
+ @constraint(EP,
+ cVSoCBalStart[t in START_SUBPERIODS, y in CONSTRAINTSET],
+ EP[:vCAPRES_socinreserve][y,
+ t]==
+ EP[:vCAPRES_socinreserve][y, t + hours_per_subperiod - 1] +
+ (1 / efficiency_down(gen[y]) * EP[:vCAPRES_discharge][y, t])
+ -
+ (efficiency_up(gen[y]) * EP[:vCAPRES_charge][y, t]) - (self_discharge(gen[y]) *
+ EP[:vCAPRES_socinreserve][y, t + hours_per_subperiod - 1]))
+
+ # energy held in reserve for the next hour
+ @constraint(EP,
+ cVSoCBalInterior[t in INTERIOR_SUBPERIODS, y in STOR_ALL],
+ EP[:vCAPRES_socinreserve][y,
+ t]==
+ EP[:vCAPRES_socinreserve][y, t - 1] +
+ (1 / efficiency_down(gen[y]) * EP[:vCAPRES_discharge][y, t]) -
+ (efficiency_up(gen[y]) * EP[:vCAPRES_charge][y, t]) -
+ (self_discharge(gen[y]) * EP[:vCAPRES_socinreserve][y, t - 1]))
+
+ # energy held in reserve acts as a lower bound on the total energy held in storage
+ @constraint(EP,
+ cSOCMinCapRes[t in 1:T, y in STOR_ALL],
+ EP[:vS][y, t]>=EP[:vCAPRES_socinreserve][y, t])
+ end
end
function storage_all_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
-
gen = inputs["RESOURCES"]
T = inputs["T"]
p = inputs["hours_per_subperiod"]
@@ -176,27 +225,35 @@ function storage_all_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
eTotalCap = EP[:eTotalCap]
eTotalCapEnergy = EP[:eTotalCapEnergy]
- # Maximum storage contribution to reserves is a specified fraction of installed capacity
- @constraint(EP, [y in STOR_REG, t in 1:T], vREG[y, t] <= reg_max(gen[y]) * eTotalCap[y])
- @constraint(EP, [y in STOR_RSV, t in 1:T], vRSV[y, t] <= rsv_max(gen[y]) * eTotalCap[y])
+ # Maximum storage contribution to reserves is a specified fraction of installed capacity
+ @constraint(EP, [y in STOR_REG, t in 1:T], vREG[y, t]<=reg_max(gen[y]) * eTotalCap[y])
+ @constraint(EP, [y in STOR_RSV, t in 1:T], vRSV[y, t]<=rsv_max(gen[y]) * eTotalCap[y])
- # Actual contribution to regulation and reserves is sum of auxilary variables for portions contributed during charging and discharging
- @constraint(EP, [y in STOR_REG, t in 1:T], vREG[y, t] == vREG_charge[y, t] + vREG_discharge[y, t])
- @constraint(EP, [y in STOR_RSV, t in 1:T], vRSV[y, t] == vRSV_charge[y, t] + vRSV_discharge[y, t])
+ # Actual contribution to regulation and reserves is sum of auxilary variables for portions contributed during charging and discharging
+ @constraint(EP,
+ [y in STOR_REG, t in 1:T],
+ vREG[y, t]==vREG_charge[y, t] + vREG_discharge[y, t])
+ @constraint(EP,
+ [y in STOR_RSV, t in 1:T],
+ vRSV[y, t]==vRSV_charge[y, t] + vRSV_discharge[y, t])
# Maximum charging rate plus contribution to reserves up must be greater than zero
# Note: when charging, reducing charge rate is contributing to upwards reserve & regulation as it drops net demand
expr = extract_time_series_to_expression(vCHARGE, STOR_ALL)
add_similar_to_expression!(expr[STOR_REG, :], -vREG_charge[STOR_REG, :])
add_similar_to_expression!(expr[STOR_RSV, :], -vRSV_charge[STOR_RSV, :])
- @constraint(EP, [y in STOR_ALL, t in 1:T], expr[y, t] >= 0)
+ @constraint(EP, [y in STOR_ALL, t in 1:T], expr[y, t]>=0)
# Maximum discharging rate and contribution to reserves down must be greater than zero
# Note: when discharging, reducing discharge rate is contributing to downwards regulation as it drops net supply
- @constraint(EP, [y in STOR_REG, t in 1:T], vP[y, t] - vREG_discharge[y, t] >= 0)
+ @constraint(EP, [y in STOR_REG, t in 1:T], vP[y, t] - vREG_discharge[y, t]>=0)
# Maximum charging rate plus contribution to regulation down must be less than available storage capacity
- @constraint(EP, [y in STOR_REG, t in 1:T], efficiency_up(gen[y])*(vCHARGE[y, t]+vREG_charge[y, t]) <= eTotalCapEnergy[y]-vS[y, hoursbefore(p,t,1)])
+ @constraint(EP,
+ [y in STOR_REG, t in 1:T],
+ efficiency_up(gen[y]) *
+ (vCHARGE[y, t] +
+ vREG_charge[y, t])<=eTotalCapEnergy[y] - vS[y, hoursbefore(p, t, 1)])
# Note: maximum charge rate is also constrained by maximum charge power capacity, but as this differs by storage type,
# this constraint is set in functions below for each storage type
@@ -208,7 +265,9 @@ function storage_all_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
add_similar_to_expression!(expr[STOR_ALL, :], vCAPRES_discharge[STOR_ALL, :])
end
# Maximum discharging rate and contribution to reserves up must be less than power rating
- @constraint(EP, [y in STOR_ALL, t in 1:T], expr[y, t] <= eTotalCap[y])
+ @constraint(EP, [y in STOR_ALL, t in 1:T], expr[y, t]<=eTotalCap[y])
# Maximum discharging rate and contribution to reserves up must be less than available stored energy in prior period
- @constraint(EP, [y in STOR_ALL, t in 1:T], expr[y, t] <= vS[y, hoursbefore(p,t,1)] * efficiency_down(gen[y]))
+ @constraint(EP,
+ [y in STOR_ALL, t in 1:T],
+ expr[y, t]<=vS[y, hoursbefore(p, t, 1)] * efficiency_down(gen[y]))
end
diff --git a/src/model/resources/storage/storage_asymmetric.jl b/src/model/resources/storage/storage_asymmetric.jl
index f77fe0fa23..8554d129e8 100644
--- a/src/model/resources/storage/storage_asymmetric.jl
+++ b/src/model/resources/storage/storage_asymmetric.jl
@@ -4,34 +4,37 @@
Sets up variables and constraints specific to storage resources with asymmetric charge and discharge capacities. See ```storage()``` in ```storage.jl``` for description of constraints.
"""
function storage_asymmetric!(EP::Model, inputs::Dict, setup::Dict)
- # Set up additional variables, constraints, and expressions associated with storage resources with asymmetric charge & discharge capacity
- # (e.g. most chemical, thermal, and mechanical storage options with distinct charge & discharge components/processes)
- # STOR = 2 corresponds to storage with distinct power and energy capacity decisions and distinct charge and discharge power capacity decisions/ratings
+ # Set up additional variables, constraints, and expressions associated with storage resources with asymmetric charge & discharge capacity
+ # (e.g. most chemical, thermal, and mechanical storage options with distinct charge & discharge components/processes)
+ # STOR = 2 corresponds to storage with distinct power and energy capacity decisions and distinct charge and discharge power capacity decisions/ratings
- println("Storage Resources with Asmymetric Charge/Discharge Capacity Module")
+ println("Storage Resources with Asmymetric Charge/Discharge Capacity Module")
- OperationalReserves = setup["OperationalReserves"]
- CapacityReserveMargin = setup["CapacityReserveMargin"]
+ OperationalReserves = setup["OperationalReserves"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
- T = inputs["T"] # Number of time steps (hours)
+ T = inputs["T"] # Number of time steps (hours)
- STOR_ASYMMETRIC = inputs["STOR_ASYMMETRIC"]
+ STOR_ASYMMETRIC = inputs["STOR_ASYMMETRIC"]
- ### Constraints ###
-
- # Storage discharge and charge power (and reserve contribution) related constraints for symmetric storage resources:
- if OperationalReserves == 1
- storage_asymmetric_operational_reserves!(EP, inputs, setup)
- else
- if CapacityReserveMargin > 0
- # Maximum charging rate (including virtual charging to move energy held in reserve back to available storage) must be less than charge power rating
- @constraint(EP, [y in STOR_ASYMMETRIC, t in 1:T], EP[:vCHARGE][y,t] + EP[:vCAPRES_charge][y,t] <= EP[:eTotalCapCharge][y])
- else
- # Maximum charging rate (including virtual charging to move energy held in reserve back to available storage) must be less than charge power rating
- @constraint(EP, [y in STOR_ASYMMETRIC, t in 1:T], EP[:vCHARGE][y,t] <= EP[:eTotalCapCharge][y])
- end
- end
+ ### Constraints ###
+ # Storage discharge and charge power (and reserve contribution) related constraints for symmetric storage resources:
+ if OperationalReserves == 1
+ storage_asymmetric_operational_reserves!(EP, inputs, setup)
+ else
+ if CapacityReserveMargin > 0
+ # Maximum charging rate (including virtual charging to move energy held in reserve back to available storage) must be less than charge power rating
+ @constraint(EP,
+ [y in STOR_ASYMMETRIC, t in 1:T],
+ EP[:vCHARGE][y, t] + EP[:vCAPRES_charge][y, t]<=EP[:eTotalCapCharge][y])
+ else
+ # Maximum charging rate (including virtual charging to move energy held in reserve back to available storage) must be less than charge power rating
+ @constraint(EP,
+ [y in STOR_ASYMMETRIC, t in 1:T],
+ EP[:vCHARGE][y, t]<=EP[:eTotalCapCharge][y])
+ end
+ end
end
@doc raw"""
@@ -40,12 +43,11 @@ end
Sets up variables and constraints specific to storage resources with asymmetric charge and discharge capacities when reserves are modeled. See ```storage()``` in ```storage.jl``` for description of constraints.
"""
function storage_asymmetric_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
+ T = inputs["T"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"] > 0
- T = inputs["T"]
- CapacityReserveMargin = setup["CapacityReserveMargin"] > 0
-
- STOR_ASYMMETRIC = inputs["STOR_ASYMMETRIC"]
- STOR_ASYM_REG = intersect(STOR_ASYMMETRIC, inputs["REG"]) # Set of asymmetric storage resources with REG reserves
+ STOR_ASYMMETRIC = inputs["STOR_ASYMMETRIC"]
+ STOR_ASYM_REG = intersect(STOR_ASYMMETRIC, inputs["REG"]) # Set of asymmetric storage resources with REG reserves
vCHARGE = EP[:vCHARGE]
vREG_charge = EP[:vREG_charge]
@@ -55,7 +57,8 @@ function storage_asymmetric_operational_reserves!(EP::Model, inputs::Dict, setup
add_similar_to_expression!(expr[STOR_ASYM_REG, :], vREG_charge[STOR_ASYM_REG, :])
if CapacityReserveMargin
vCAPRES_charge = EP[:vCAPRES_charge]
- add_similar_to_expression!(expr[STOR_ASYMMETRIC, :], vCAPRES_charge[STOR_ASYMMETRIC, :])
+ add_similar_to_expression!(expr[STOR_ASYMMETRIC, :],
+ vCAPRES_charge[STOR_ASYMMETRIC, :])
end
- @constraint(EP, [y in STOR_ASYMMETRIC, t in 1:T], expr[y, t] <= eTotalCapCharge[y])
+ @constraint(EP, [y in STOR_ASYMMETRIC, t in 1:T], expr[y, t]<=eTotalCapCharge[y])
end
diff --git a/src/model/resources/storage/storage_symmetric.jl b/src/model/resources/storage/storage_symmetric.jl
index 3ac73f2ed2..3c20d2368b 100644
--- a/src/model/resources/storage/storage_symmetric.jl
+++ b/src/model/resources/storage/storage_symmetric.jl
@@ -4,40 +4,44 @@
Sets up variables and constraints specific to storage resources with symmetric charge and discharge capacities. See ```storage()``` in ```storage.jl``` for description of constraints.
"""
function storage_symmetric!(EP::Model, inputs::Dict, setup::Dict)
- # Set up additional variables, constraints, and expressions associated with storage resources with symmetric charge & discharge capacity
- # (e.g. most electrochemical batteries that use same components for charge & discharge)
- # STOR = 1 corresponds to storage with distinct power and energy capacity decisions but symmetric charge/discharge power ratings
+ # Set up additional variables, constraints, and expressions associated with storage resources with symmetric charge & discharge capacity
+ # (e.g. most electrochemical batteries that use same components for charge & discharge)
+ # STOR = 1 corresponds to storage with distinct power and energy capacity decisions but symmetric charge/discharge power ratings
- println("Storage Resources with Symmetric Charge/Discharge Capacity Module")
+ println("Storage Resources with Symmetric Charge/Discharge Capacity Module")
- OperationalReserves = setup["OperationalReserves"]
- CapacityReserveMargin = setup["CapacityReserveMargin"]
+ OperationalReserves = setup["OperationalReserves"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
- T = inputs["T"] # Number of time steps (hours)
+ T = inputs["T"] # Number of time steps (hours)
- STOR_SYMMETRIC = inputs["STOR_SYMMETRIC"]
+ STOR_SYMMETRIC = inputs["STOR_SYMMETRIC"]
- ### Constraints ###
-
- # Storage discharge and charge power (and reserve contribution) related constraints for symmetric storage resources:
- if OperationalReserves == 1
- storage_symmetric_operational_reserves!(EP, inputs, setup)
- else
- if CapacityReserveMargin > 0
- @constraints(EP, begin
- # Maximum charging rate (including virtual charging to move energy held in reserve back to available storage) must be less than symmetric power rating
- # Max simultaneous charge and discharge cannot be greater than capacity
- [y in STOR_SYMMETRIC, t in 1:T], EP[:vP][y,t]+EP[:vCHARGE][y,t]+EP[:vCAPRES_discharge][y,t]+EP[:vCAPRES_charge][y,t] <= EP[:eTotalCap][y]
- end)
- else
- @constraints(EP, begin
- # Maximum charging rate (including virtual charging to move energy held in reserve back to available storage) must be less than symmetric power rating
- # Max simultaneous charge and discharge cannot be greater than capacity
- [y in STOR_SYMMETRIC, t in 1:T], EP[:vP][y,t]+EP[:vCHARGE][y,t] <= EP[:eTotalCap][y]
- end)
- end
- end
+ ### Constraints ###
+ # Storage discharge and charge power (and reserve contribution) related constraints for symmetric storage resources:
+ if OperationalReserves == 1
+ storage_symmetric_operational_reserves!(EP, inputs, setup)
+ else
+ if CapacityReserveMargin > 0
+ @constraints(EP,
+ begin
+ # Maximum charging rate (including virtual charging to move energy held in reserve back to available storage) must be less than symmetric power rating
+ # Max simultaneous charge and discharge cannot be greater than capacity
+ [y in STOR_SYMMETRIC, t in 1:T],
+ EP[:vP][y, t] + EP[:vCHARGE][y, t] + EP[:vCAPRES_discharge][y, t] +
+ EP[:vCAPRES_charge][y, t] <= EP[:eTotalCap][y]
+ end)
+ else
+ @constraints(EP,
+ begin
+ # Maximum charging rate (including virtual charging to move energy held in reserve back to available storage) must be less than symmetric power rating
+ # Max simultaneous charge and discharge cannot be greater than capacity
+ [y in STOR_SYMMETRIC, t in 1:T],
+ EP[:vP][y, t] + EP[:vCHARGE][y, t] <= EP[:eTotalCap][y]
+ end)
+ end
+ end
end
@doc raw"""
@@ -46,14 +50,13 @@ end
Sets up variables and constraints specific to storage resources with symmetric charge and discharge capacities when reserves are modeled. See ```storage()``` in ```storage.jl``` for description of constraints.
"""
function storage_symmetric_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
+ T = inputs["T"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"] > 0
- T = inputs["T"]
- CapacityReserveMargin = setup["CapacityReserveMargin"] > 0
-
- SYMMETRIC = inputs["STOR_SYMMETRIC"]
+ SYMMETRIC = inputs["STOR_SYMMETRIC"]
- REG = intersect(SYMMETRIC, inputs["REG"])
- RSV = intersect(SYMMETRIC, inputs["RSV"])
+ REG = intersect(SYMMETRIC, inputs["REG"])
+ RSV = intersect(SYMMETRIC, inputs["RSV"])
vP = EP[:vP]
vCHARGE = EP[:vCHARGE]
@@ -65,7 +68,7 @@ function storage_symmetric_operational_reserves!(EP::Model, inputs::Dict, setup:
# Maximum charging rate plus contribution to regulation down must be less than symmetric power rating
# Max simultaneous charge and discharge rates cannot be greater than symmetric charge/discharge capacity
- expr = @expression(EP, [y in SYMMETRIC, t in 1:T], vP[y, t] + vCHARGE[y, t])
+ expr = @expression(EP, [y in SYMMETRIC, t in 1:T], vP[y, t]+vCHARGE[y, t])
add_similar_to_expression!(expr[REG, :], vREG_charge[REG, :])
add_similar_to_expression!(expr[REG, :], vREG_discharge[REG, :])
add_similar_to_expression!(expr[RSV, :], vRSV_discharge[RSV, :])
@@ -75,5 +78,5 @@ function storage_symmetric_operational_reserves!(EP::Model, inputs::Dict, setup:
add_similar_to_expression!(expr[SYMMETRIC, :], vCAPRES_charge[SYMMETRIC, :])
add_similar_to_expression!(expr[SYMMETRIC, :], vCAPRES_discharge[SYMMETRIC, :])
end
- @constraint(EP, [y in SYMMETRIC, t in 1:T], expr[y, t] <= eTotalCap[y])
+ @constraint(EP, [y in SYMMETRIC, t in 1:T], expr[y, t]<=eTotalCap[y])
end
diff --git a/src/model/resources/thermal/thermal.jl b/src/model/resources/thermal/thermal.jl
index 894c2da2c0..2735a094a7 100644
--- a/src/model/resources/thermal/thermal.jl
+++ b/src/model/resources/thermal/thermal.jl
@@ -4,46 +4,39 @@ The thermal module creates decision variables, expressions, and constraints rela
This module uses the following 'helper' functions in separate files: ```thermal_commit()``` for thermal resources subject to unit commitment decisions and constraints (if any) and ```thermal_no_commit()``` for thermal resources not subject to unit commitment (if any).
"""
function thermal!(EP::Model, inputs::Dict, setup::Dict)
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
- THERM_COMMIT = inputs["THERM_COMMIT"]
- THERM_NO_COMMIT = inputs["THERM_NO_COMMIT"]
- THERM_ALL = inputs["THERM_ALL"]
+ THERM_COMMIT = inputs["THERM_COMMIT"]
+ THERM_NO_COMMIT = inputs["THERM_NO_COMMIT"]
+ THERM_ALL = inputs["THERM_ALL"]
- if !isempty(THERM_COMMIT)
- thermal_commit!(EP, inputs, setup)
- end
+ if !isempty(THERM_COMMIT)
+ thermal_commit!(EP, inputs, setup)
+ end
- if !isempty(THERM_NO_COMMIT)
- thermal_no_commit!(EP, inputs, setup)
- end
- ##CO2 Polcy Module Thermal Generation by zone
- @expression(EP, eGenerationByThermAll[z=1:Z, t=1:T], # the unit is GW
- sum(EP[:vP][y,t] for y in intersect(inputs["THERM_ALL"], resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:eGenerationByZone], eGenerationByThermAll)
+ if !isempty(THERM_NO_COMMIT)
+ thermal_no_commit!(EP, inputs, setup)
+ end
+ ##CO2 Polcy Module Thermal Generation by zone
+ @expression(EP, eGenerationByThermAll[z = 1:Z, t = 1:T], # the unit is GW
+ sum(EP[:vP][y, t]
+ for y in intersect(inputs["THERM_ALL"], resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:eGenerationByZone], eGenerationByThermAll)
- # Capacity Reserves Margin policy
- if setup["CapacityReserveMargin"] > 0
+ # Capacity Reserves Margin policy
+ if setup["CapacityReserveMargin"] > 0
ncapres = inputs["NCapacityReserveMargin"]
@expression(EP, eCapResMarBalanceThermal[capres in 1:ncapres, t in 1:T],
- sum(derating_factor(gen[y], tag=capres) * EP[:eTotalCap][y] for y in THERM_ALL))
- add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceThermal)
+ sum(derating_factor(gen[y], tag = capres) * EP[:eTotalCap][y]
+ for y in THERM_ALL))
+ add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceThermal)
MAINT = ids_with_maintenance(gen)
if !isempty(intersect(MAINT, THERM_COMMIT))
thermal_maintenance_capacity_reserve_margin_adjustment!(EP, inputs)
end
- end
-#=
- ##CO2 Polcy Module Thermal Generation by zone
- @expression(EP, eGenerationByThermAll[z=1:Z, t=1:T], # the unit is GW
- sum(EP[:vP][y,t] for y in intersect(inputs["THERM_ALL"], resources_in_zone_by_rid(gen,z)))
- )
- EP[:eGenerationByZone] += eGenerationByThermAll
- =# ##From main
+ end
end
-
diff --git a/src/model/resources/thermal/thermal_commit.jl b/src/model/resources/thermal/thermal_commit.jl
index 84e3e7020d..ca7a74c7ba 100644
--- a/src/model/resources/thermal/thermal_commit.jl
+++ b/src/model/resources/thermal/thermal_commit.jl
@@ -125,20 +125,19 @@ Like with the ramping constraints, the minimum up and down constraint time also
It is recommended that users of GenX must use longer subperiods than the longest min up/down time if modeling UC. Otherwise, the model will report error.
"""
function thermal_commit!(EP::Model, inputs::Dict, setup::Dict)
+ println("Thermal (Unit Commitment) Resources Module")
- println("Thermal (Unit Commitment) Resources Module")
-
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- p = inputs["hours_per_subperiod"] #total number of hours per subperiod
+ p = inputs["hours_per_subperiod"] #total number of hours per subperiod
- THERM_COMMIT = inputs["THERM_COMMIT"]
+ THERM_COMMIT = inputs["THERM_COMMIT"]
- ### Expressions ###
+ ### Expressions ###
# These variables are used in the ramp-up and ramp-down expressions
reserves_term = @expression(EP, [y in THERM_COMMIT, t in 1:T], 0)
@@ -148,76 +147,100 @@ function thermal_commit!(EP::Model, inputs::Dict, setup::Dict)
THERM_COMMIT_REG = intersect(THERM_COMMIT, inputs["REG"]) # Set of thermal resources with regulation reserves
THERM_COMMIT_RSV = intersect(THERM_COMMIT, inputs["RSV"]) # Set of thermal resources with spinning reserves
regulation_term = @expression(EP, [y in THERM_COMMIT, t in 1:T],
- y ∈ THERM_COMMIT_REG ? EP[:vREG][y,t] - EP[:vREG][y, hoursbefore(p, t, 1)] : 0)
+ y ∈ THERM_COMMIT_REG ? EP[:vREG][y, t] - EP[:vREG][y, hoursbefore(p, t, 1)] : 0)
reserves_term = @expression(EP, [y in THERM_COMMIT, t in 1:T],
- y ∈ THERM_COMMIT_RSV ? EP[:vRSV][y,t] : 0)
+ y ∈ THERM_COMMIT_RSV ? EP[:vRSV][y, t] : 0)
+ end
+
+ ## Power Balance Expressions ##
+ @expression(EP, ePowerBalanceThermCommit[t = 1:T, z = 1:Z],
+ sum(EP[:vP][y, t]
+ for y in intersect(THERM_COMMIT, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceThermCommit)
+
+ ### Constraints ###
+
+ ### Capacitated limits on unit commitment decision variables (Constraints #1-3)
+ @constraints(EP,
+ begin
+ [y in THERM_COMMIT, t = 1:T],
+ EP[:vCOMMIT][y, t] <= EP[:eTotalCap][y] / cap_size(gen[y])
+ [y in THERM_COMMIT, t = 1:T],
+ EP[:vSTART][y, t] <= EP[:eTotalCap][y] / cap_size(gen[y])
+ [y in THERM_COMMIT, t = 1:T],
+ EP[:vSHUT][y, t] <= EP[:eTotalCap][y] / cap_size(gen[y])
+ end)
+
+ # Commitment state constraint linking startup and shutdown decisions (Constraint #4)
+ @constraints(EP,
+ begin
+ [y in THERM_COMMIT, t in 1:T],
+ EP[:vCOMMIT][y, t] ==
+ EP[:vCOMMIT][y, hoursbefore(p, t, 1)] + EP[:vSTART][y, t] - EP[:vSHUT][y, t]
+ end)
+
+ ### Maximum ramp up and down between consecutive hours (Constraints #5-6)
+
+ ## For Start Hours
+ # Links last time step with first time step, ensuring position in hour 1 is within eligible ramp of final hour position
+ # rampup constraints
+ @constraint(EP, [y in THERM_COMMIT, t in 1:T],
+ EP[:vP][y, t] - EP[:vP][y, hoursbefore(p, t, 1)] + regulation_term[y, t] +
+ reserves_term[y, t]<=ramp_up_fraction(gen[y]) * cap_size(gen[y]) *
+ (EP[:vCOMMIT][y, t] - EP[:vSTART][y, t])
+ +
+ min(inputs["pP_Max"][y, t],
+ max(min_power(gen[y]), ramp_up_fraction(gen[y]))) *
+ cap_size(gen[y]) * EP[:vSTART][y, t]
+ -
+ min_power(gen[y]) * cap_size(gen[y]) * EP[:vSHUT][y, t])
+
+ # rampdown constraints
+ @constraint(EP, [y in THERM_COMMIT, t in 1:T],
+ EP[:vP][y, hoursbefore(p, t, 1)] - EP[:vP][y, t] - regulation_term[y, t] +
+ reserves_term[y,
+ hoursbefore(p, t, 1)]<=ramp_down_fraction(gen[y]) * cap_size(gen[y]) *
+ (EP[:vCOMMIT][y, t] - EP[:vSTART][y, t])
+ -
+ min_power(gen[y]) * cap_size(gen[y]) * EP[:vSTART][y, t]
+ +
+ min(inputs["pP_Max"][y, t],
+ max(min_power(gen[y]), ramp_down_fraction(gen[y]))) *
+ cap_size(gen[y]) * EP[:vSHUT][y, t])
+
+ ### Minimum and maximum power output constraints (Constraints #7-8)
+ if setup["OperationalReserves"] == 1
+ # If modeling with regulation and reserves, constraints are established by thermal_commit_operational_reserves() function below
+ thermal_commit_operational_reserves!(EP, inputs)
+ else
+ @constraints(EP,
+ begin
+ # Minimum stable power generated per technology "y" at hour "t" > Min power
+ [y in THERM_COMMIT, t = 1:T],
+ EP[:vP][y, t] >= min_power(gen[y]) * cap_size(gen[y]) * EP[:vCOMMIT][y, t]
+
+ # Maximum power generated per technology "y" at hour "t" < Max power
+ [y in THERM_COMMIT, t = 1:T],
+ EP[:vP][y, t] <=
+ inputs["pP_Max"][y, t] * cap_size(gen[y]) * EP[:vCOMMIT][y, t]
+ end)
end
- ## Power Balance Expressions ##
- @expression(EP, ePowerBalanceThermCommit[t=1:T, z=1:Z],
- sum(EP[:vP][y,t] for y in intersect(THERM_COMMIT, resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceThermCommit)
-
- ### Constraints ###
-
- ### Capacitated limits on unit commitment decision variables (Constraints #1-3)
- @constraints(EP, begin
- [y in THERM_COMMIT, t=1:T], EP[:vCOMMIT][y,t] <= EP[:eTotalCap][y]/cap_size(gen[y])
- [y in THERM_COMMIT, t=1:T], EP[:vSTART][y,t] <= EP[:eTotalCap][y]/cap_size(gen[y])
- [y in THERM_COMMIT, t=1:T], EP[:vSHUT][y,t] <= EP[:eTotalCap][y]/cap_size(gen[y])
- end)
-
- # Commitment state constraint linking startup and shutdown decisions (Constraint #4)
- @constraints(EP, begin
- [y in THERM_COMMIT, t in 1:T], EP[:vCOMMIT][y,t] == EP[:vCOMMIT][y, hoursbefore(p, t, 1)] + EP[:vSTART][y,t] - EP[:vSHUT][y,t]
- end)
-
- ### Maximum ramp up and down between consecutive hours (Constraints #5-6)
-
- ## For Start Hours
- # Links last time step with first time step, ensuring position in hour 1 is within eligible ramp of final hour position
- # rampup constraints
- @constraint(EP,[y in THERM_COMMIT, t in 1:T],
- EP[:vP][y,t] - EP[:vP][y, hoursbefore(p, t, 1)] + regulation_term[y,t] + reserves_term[y,t] <= ramp_up_fraction(gen[y])*cap_size(gen[y])*(EP[:vCOMMIT][y,t]-EP[:vSTART][y,t])
- + min(inputs["pP_Max"][y,t],max(min_power(gen[y]),ramp_up_fraction(gen[y])))*cap_size(gen[y])*EP[:vSTART][y,t]
- - min_power(gen[y])*cap_size(gen[y])*EP[:vSHUT][y,t])
-
- # rampdown constraints
- @constraint(EP,[y in THERM_COMMIT, t in 1:T],
- EP[:vP][y, hoursbefore(p,t,1)] - EP[:vP][y,t] - regulation_term[y,t] + reserves_term[y, hoursbefore(p,t,1)] <= ramp_down_fraction(gen[y])*cap_size(gen[y])*(EP[:vCOMMIT][y,t]-EP[:vSTART][y,t])
- - min_power(gen[y])*cap_size(gen[y])*EP[:vSTART][y,t]
- + min(inputs["pP_Max"][y,t],max(min_power(gen[y]),ramp_down_fraction(gen[y])))*cap_size(gen[y])*EP[:vSHUT][y,t])
-
-
- ### Minimum and maximum power output constraints (Constraints #7-8)
- if setup["OperationalReserves"] == 1
- # If modeling with regulation and reserves, constraints are established by thermal_commit_operational_reserves() function below
- thermal_commit_operational_reserves!(EP, inputs)
- else
- @constraints(EP, begin
- # Minimum stable power generated per technology "y" at hour "t" > Min power
- [y in THERM_COMMIT, t=1:T], EP[:vP][y,t] >= min_power(gen[y])*cap_size(gen[y])*EP[:vCOMMIT][y,t]
-
- # Maximum power generated per technology "y" at hour "t" < Max power
- [y in THERM_COMMIT, t=1:T], EP[:vP][y,t] <= inputs["pP_Max"][y,t]*cap_size(gen[y])*EP[:vCOMMIT][y,t]
- end)
- end
-
- ### Minimum up and down times (Constraints #9-10)
- Up_Time = zeros(Int, G)
- Up_Time[THERM_COMMIT] .= Int.(floor.(up_time.(gen[THERM_COMMIT])))
- @constraint(EP, [y in THERM_COMMIT, t in 1:T],
- EP[:vCOMMIT][y,t] >= sum(EP[:vSTART][y, u] for u in hoursbefore(p, t, 0:(Up_Time[y] - 1)))
- )
-
- Down_Time = zeros(Int, G)
- Down_Time[THERM_COMMIT] .= Int.(floor.(down_time.(gen[THERM_COMMIT])))
- @constraint(EP, [y in THERM_COMMIT, t in 1:T],
- EP[:eTotalCap][y]/cap_size(gen[y])-EP[:vCOMMIT][y,t] >= sum(EP[:vSHUT][y, u] for u in hoursbefore(p, t, 0:(Down_Time[y] - 1)))
- )
-
- ## END Constraints for thermal units subject to integer (discrete) unit commitment decisions
+ ### Minimum up and down times (Constraints #9-10)
+ Up_Time = zeros(Int, G)
+ Up_Time[THERM_COMMIT] .= Int.(floor.(up_time.(gen[THERM_COMMIT])))
+ @constraint(EP, [y in THERM_COMMIT, t in 1:T],
+ EP[:vCOMMIT][y,
+ t]>=sum(EP[:vSTART][y, u] for u in hoursbefore(p, t, 0:(Up_Time[y] - 1))))
+
+ Down_Time = zeros(Int, G)
+ Down_Time[THERM_COMMIT] .= Int.(floor.(down_time.(gen[THERM_COMMIT])))
+ @constraint(EP, [y in THERM_COMMIT, t in 1:T],
+ EP[:eTotalCap][y] / cap_size(gen[y]) -
+ EP[:vCOMMIT][y,
+ t]>=sum(EP[:vSHUT][y, u] for u in hoursbefore(p, t, 0:(Down_Time[y] - 1))))
+
+ ## END Constraints for thermal units subject to integer (discrete) unit commitment decisions
if !isempty(ids_with_maintenance(gen))
maintenance_formulation_thermal_commit!(EP, inputs, setup)
end
@@ -266,39 +289,46 @@ When modeling frequency regulation and spinning reserves contributions, thermal
"""
function thermal_commit_operational_reserves!(EP::Model, inputs::Dict)
+ println("Thermal Commit Operational Reserves Module")
- println("Thermal Commit Operational Reserves Module")
-
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
- T = inputs["T"] # Number of time steps (hours)
+ T = inputs["T"] # Number of time steps (hours)
- THERM_COMMIT = inputs["THERM_COMMIT"]
+ THERM_COMMIT = inputs["THERM_COMMIT"]
- REG = intersect(THERM_COMMIT, inputs["REG"]) # Set of thermal resources with regulation reserves
- RSV = intersect(THERM_COMMIT, inputs["RSV"]) # Set of thermal resources with spinning reserves
+ REG = intersect(THERM_COMMIT, inputs["REG"]) # Set of thermal resources with regulation reserves
+ RSV = intersect(THERM_COMMIT, inputs["RSV"]) # Set of thermal resources with spinning reserves
vP = EP[:vP]
vREG = EP[:vREG]
vRSV = EP[:vRSV]
- commit(y,t) = cap_size(gen[y]) * EP[:vCOMMIT][y,t]
- max_power(y,t) = inputs["pP_Max"][y,t]
+ commit(y, t) = cap_size(gen[y]) * EP[:vCOMMIT][y, t]
+ max_power(y, t) = inputs["pP_Max"][y, t]
# Maximum regulation and reserve contributions
- @constraint(EP, [y in REG, t in 1:T], vREG[y, t] <= max_power(y, t) * reg_max(gen[y]) * commit(y, t))
- @constraint(EP, [y in RSV, t in 1:T], vRSV[y, t] <= max_power(y, t) * rsv_max(gen[y]) * commit(y, t))
+ @constraint(EP,
+ [y in REG, t in 1:T],
+ vREG[y, t]<=max_power(y, t) * reg_max(gen[y]) * commit(y, t))
+ @constraint(EP,
+ [y in RSV, t in 1:T],
+ vRSV[y, t]<=max_power(y, t) * rsv_max(gen[y]) * commit(y, t))
# Minimum stable power generated per technology "y" at hour "t" and contribution to regulation must be > min power
expr = extract_time_series_to_expression(vP, THERM_COMMIT)
add_similar_to_expression!(expr[REG, :], -vREG[REG, :])
- @constraint(EP, [y in THERM_COMMIT, t in 1:T], expr[y, t] >= min_power(gen[y]) * commit(y, t))
+ @constraint(EP,
+ [y in THERM_COMMIT, t in 1:T],
+ expr[y, t]>=min_power(gen[y]) * commit(y, t))
# Maximum power generated per technology "y" at hour "t" and contribution to regulation and reserves up must be < max power
expr = extract_time_series_to_expression(vP, THERM_COMMIT)
add_similar_to_expression!(expr[REG, :], vREG[REG, :])
add_similar_to_expression!(expr[RSV, :], vRSV[RSV, :])
- @constraint(EP, [y in THERM_COMMIT, t in 1:T], expr[y, t] <= max_power(y, t) * commit(y, t))
+ @constraint(EP,
+ [y in THERM_COMMIT, t in 1:T],
+ expr[y, t]<=max_power(y, t) * commit(y, t))
end
@doc raw"""
@@ -307,12 +337,11 @@ end
Creates maintenance variables and constraints for thermal-commit plants.
"""
function maintenance_formulation_thermal_commit!(EP::Model, inputs::Dict, setup::Dict)
-
@info "Maintenance Module for Thermal plants"
ensure_maintenance_variable_records!(inputs)
gen = inputs["RESOURCES"]
-
+
by_rid(rid, sym) = by_rid_res(rid, sym, gen)
MAINT = ids_with_maintenance(gen)
@@ -331,16 +360,16 @@ function maintenance_formulation_thermal_commit!(EP::Model, inputs::Dict, setup:
for y in MAINT
maintenance_formulation!(EP,
- inputs,
- resource_component(y),
- y,
- maint_begin_cadence(y),
- maint_dur(y),
- maint_freq(y),
- cap(y),
- vcommit,
- ecap,
- integer_operational_unit_committment)
+ inputs,
+ resource_component(y),
+ y,
+ maint_begin_cadence(y),
+ maint_dur(y),
+ maint_freq(y),
+ cap(y),
+ vcommit,
+ ecap,
+ integer_operational_unit_committment)
end
end
@@ -350,7 +379,7 @@ end
Eliminates the contribution of a plant to the capacity reserve margin while it is down for maintenance.
"""
function thermal_maintenance_capacity_reserve_margin_adjustment!(EP::Model,
- inputs::Dict)
+ inputs::Dict)
gen = inputs["RESOURCES"]
T = inputs["T"] # Number of time steps (hours)
@@ -360,18 +389,22 @@ function thermal_maintenance_capacity_reserve_margin_adjustment!(EP::Model,
applicable_resources = intersect(MAINT, THERM_COMMIT)
maint_adj = @expression(EP, [capres in 1:ncapres, t in 1:T],
- sum(thermal_maintenance_capacity_reserve_margin_adjustment(EP, inputs, y, capres, t) for y in applicable_resources))
+ sum(thermal_maintenance_capacity_reserve_margin_adjustment(EP,
+ inputs,
+ y,
+ capres,
+ t) for y in applicable_resources))
add_similar_to_expression!(EP[:eCapResMarBalance], maint_adj)
end
function thermal_maintenance_capacity_reserve_margin_adjustment(EP::Model,
- inputs::Dict,
- y::Int,
- capres::Int,
- t)
+ inputs::Dict,
+ y::Int,
+ capres::Int,
+ t)
gen = inputs["RESOURCES"]
resource_component = resource_name(gen[y])
- capresfactor = derating_factor(gen[y], tag=capres)
+ capresfactor = derating_factor(gen[y], tag = capres)
cap = cap_size(gen[y])
down_var = EP[Symbol(maintenance_down_name(resource_component))]
return -capresfactor * down_var[t] * cap
diff --git a/src/model/resources/thermal/thermal_no_commit.jl b/src/model/resources/thermal/thermal_no_commit.jl
index 975a8c67f3..dd1253e0bd 100644
--- a/src/model/resources/thermal/thermal_no_commit.jl
+++ b/src/model/resources/thermal/thermal_no_commit.jl
@@ -42,53 +42,59 @@ When not modeling regulation and reserves, thermal units not subject to unit com
(See Constraints 3-4 in the code)
"""
function thermal_no_commit!(EP::Model, inputs::Dict, setup::Dict)
+ println("Thermal (No Unit Commitment) Resources Module")
- println("Thermal (No Unit Commitment) Resources Module")
+ gen = inputs["RESOURCES"]
- gen = inputs["RESOURCES"]
-
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
-
- p = inputs["hours_per_subperiod"] #total number of hours per subperiod
-
- THERM_NO_COMMIT = inputs["THERM_NO_COMMIT"]
-
- ### Expressions ###
-
- ## Power Balance Expressions ##
- @expression(EP, ePowerBalanceThermNoCommit[t=1:T, z=1:Z],
- sum(EP[:vP][y,t] for y in intersect(THERM_NO_COMMIT, resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceThermNoCommit)
-
- ### Constraints ###
-
- ### Maximum ramp up and down between consecutive hours (Constraints #1-2)
- @constraints(EP, begin
-
- ## Maximum ramp up between consecutive hours
- [y in THERM_NO_COMMIT, t in 1:T], EP[:vP][y,t] - EP[:vP][y, hoursbefore(p,t,1)] <= ramp_up_fraction(gen[y])*EP[:eTotalCap][y]
-
- ## Maximum ramp down between consecutive hours
- [y in THERM_NO_COMMIT, t in 1:T], EP[:vP][y, hoursbefore(p,t,1)] - EP[:vP][y,t] <= ramp_down_fraction(gen[y])*EP[:eTotalCap][y]
- end)
-
- ### Minimum and maximum power output constraints (Constraints #3-4)
- if setup["OperationalReserves"] == 1
- # If modeling with regulation and reserves, constraints are established by thermal_no_commit_operational_reserves() function below
- thermal_no_commit_operational_reserves!(EP, inputs)
- else
- @constraints(EP, begin
- # Minimum stable power generated per technology "y" at hour "t" Min_Power
- [y in THERM_NO_COMMIT, t=1:T], EP[:vP][y,t] >= min_power(gen[y])*EP[:eTotalCap][y]
-
- # Maximum power generated per technology "y" at hour "t"
- [y in THERM_NO_COMMIT, t=1:T], EP[:vP][y,t] <= inputs["pP_Max"][y,t]*EP[:eTotalCap][y]
- end)
-
- end
- # END Constraints for thermal resources not subject to unit commitment
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+
+ p = inputs["hours_per_subperiod"] #total number of hours per subperiod
+
+ THERM_NO_COMMIT = inputs["THERM_NO_COMMIT"]
+
+ ### Expressions ###
+
+ ## Power Balance Expressions ##
+ @expression(EP, ePowerBalanceThermNoCommit[t = 1:T, z = 1:Z],
+ sum(EP[:vP][y, t]
+ for y in intersect(THERM_NO_COMMIT, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceThermNoCommit)
+
+ ### Constraints ###
+
+ ### Maximum ramp up and down between consecutive hours (Constraints #1-2)
+ @constraints(EP,
+ begin
+
+ ## Maximum ramp up between consecutive hours
+ [y in THERM_NO_COMMIT, t in 1:T],
+ EP[:vP][y, t] - EP[:vP][y, hoursbefore(p, t, 1)] <=
+ ramp_up_fraction(gen[y]) * EP[:eTotalCap][y]
+
+ ## Maximum ramp down between consecutive hours
+ [y in THERM_NO_COMMIT, t in 1:T],
+ EP[:vP][y, hoursbefore(p, t, 1)] - EP[:vP][y, t] <=
+ ramp_down_fraction(gen[y]) * EP[:eTotalCap][y]
+ end)
+
+ ### Minimum and maximum power output constraints (Constraints #3-4)
+ if setup["OperationalReserves"] == 1
+ # If modeling with regulation and reserves, constraints are established by thermal_no_commit_operational_reserves() function below
+ thermal_no_commit_operational_reserves!(EP, inputs)
+ else
+ @constraints(EP,
+ begin
+ # Minimum stable power generated per technology "y" at hour "t" Min_Power
+ [y in THERM_NO_COMMIT, t = 1:T],
+ EP[:vP][y, t] >= min_power(gen[y]) * EP[:eTotalCap][y]
+
+ # Maximum power generated per technology "y" at hour "t"
+ [y in THERM_NO_COMMIT, t = 1:T],
+ EP[:vP][y, t] <= inputs["pP_Max"][y, t] * EP[:eTotalCap][y]
+ end)
+ end
+ # END Constraints for thermal resources not subject to unit commitment
end
@doc raw"""
@@ -135,10 +141,9 @@ When modeling regulation and spinning reserves, thermal units not subject to uni
Note there are multiple versions of these constraints in the code in order to avoid creation of unecessary constraints and decision variables for thermal units unable to provide regulation and/or reserves contributions due to input parameters (e.g. ```Reg_Max=0``` and/or ```RSV_Max=0```).
"""
function thermal_no_commit_operational_reserves!(EP::Model, inputs::Dict)
+ println("Thermal No Commit Reserves Module")
- println("Thermal No Commit Reserves Module")
-
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
T = inputs["T"] # Number of time steps (hours)
@@ -152,20 +157,28 @@ function thermal_no_commit_operational_reserves!(EP::Model, inputs::Dict)
vRSV = EP[:vRSV]
eTotalCap = EP[:eTotalCap]
- max_power(y,t) = inputs["pP_Max"][y,t]
+ max_power(y, t) = inputs["pP_Max"][y, t]
# Maximum regulation and reserve contributions
- @constraint(EP, [y in REG, t in 1:T], vREG[y, t] <= max_power(y, t) * reg_max(gen[y]) * eTotalCap[y])
- @constraint(EP, [y in RSV, t in 1:T], vRSV[y, t] <= max_power(y, t) * rsv_max(gen[y]) * eTotalCap[y])
+ @constraint(EP,
+ [y in REG, t in 1:T],
+ vREG[y, t]<=max_power(y, t) * reg_max(gen[y]) * eTotalCap[y])
+ @constraint(EP,
+ [y in RSV, t in 1:T],
+ vRSV[y, t]<=max_power(y, t) * rsv_max(gen[y]) * eTotalCap[y])
# Minimum stable power generated per technology "y" at hour "t" and contribution to regulation must be > min power
expr = extract_time_series_to_expression(vP, THERM_NO_COMMIT)
add_similar_to_expression!(expr[REG, :], -vREG[REG, :])
- @constraint(EP, [y in THERM_NO_COMMIT, t in 1:T], expr[y, t] >= min_power(gen[y]) * eTotalCap[y])
+ @constraint(EP,
+ [y in THERM_NO_COMMIT, t in 1:T],
+ expr[y, t]>=min_power(gen[y]) * eTotalCap[y])
# Maximum power generated per technology "y" at hour "t" and contribution to regulation and reserves up must be < max power
expr = extract_time_series_to_expression(vP, THERM_NO_COMMIT)
add_similar_to_expression!(expr[REG, :], vREG[REG, :])
add_similar_to_expression!(expr[RSV, :], vRSV[RSV, :])
- @constraint(EP, [y in THERM_NO_COMMIT, t in 1:T], expr[y, t] <= max_power(y, t) * eTotalCap[y])
+ @constraint(EP,
+ [y in THERM_NO_COMMIT, t in 1:T],
+ expr[y, t]<=max_power(y, t) * eTotalCap[y])
end
diff --git a/src/model/resources/vre_stor/vre_stor.jl b/src/model/resources/vre_stor/vre_stor.jl
index 7ad0a07dd1..0e1ca1709e 100644
--- a/src/model/resources/vre_stor/vre_stor.jl
+++ b/src/model/resources/vre_stor/vre_stor.jl
@@ -79,69 +79,70 @@ The second constraint with both capacity reserve margins and operating reserves
The rest of the constraints are dependent upon specific configurable components within the module and are listed below.
"""
function vre_stor!(EP::Model, inputs::Dict, setup::Dict)
-
- println("VRE-Storage Module")
+ println("VRE-Storage Module")
### LOAD DATA ###
# Load generators dataframe, sets, and time periods
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
# Load VRE-storage inputs
- VRE_STOR = inputs["VRE_STOR"] # Set of VRE-STOR generators (indices)
+ VRE_STOR = inputs["VRE_STOR"] # Set of VRE-STOR generators (indices)
gen_VRE_STOR = gen.VreStorage # Set of VRE-STOR generators (objects)
SOLAR = inputs["VS_SOLAR"] # Set of VRE-STOR generators with solar-component
DC = inputs["VS_DC"] # Set of VRE-STOR generators with inverter-component
WIND = inputs["VS_WIND"] # Set of VRE-STOR generators with wind-component
STOR = inputs["VS_STOR"] # Set of VRE-STOR generators with storage-component
NEW_CAP = intersect(VRE_STOR, inputs["NEW_CAP"]) # Set of VRE-STOR generators eligible for new buildout
-
+
# Policy flags
EnergyShareRequirement = setup["EnergyShareRequirement"]
- CapacityReserveMargin = setup["CapacityReserveMargin"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
MinCapReq = setup["MinCapReq"]
MaxCapReq = setup["MaxCapReq"]
IncludeLossesInESR = setup["IncludeLossesInESR"]
OperationalReserves = setup["OperationalReserves"]
-
+
by_rid(rid, sym) = by_rid_res(rid, sym, gen_VRE_STOR)
### VARIABLES ARE DEFINED IN RESPECTIVE MODULES ###
-
- ### EXPRESSIONS ###
+
+ ### EXPRESSIONS ###
## 1. Objective Function Expressions ##
# Separate grid costs
@expression(EP, eCGrid[y in VRE_STOR],
if y in NEW_CAP # Resources eligible for new capacity
- inv_cost_per_mwyr(gen[y])*EP[:vCAP][y] + fixed_om_cost_per_mwyr(gen[y])*EP[:eTotalCap][y]
+ inv_cost_per_mwyr(gen[y]) * EP[:vCAP][y] +
+ fixed_om_cost_per_mwyr(gen[y]) * EP[:eTotalCap][y]
else
- fixed_om_cost_per_mwyr(gen[y])*EP[:eTotalCap][y]
- end
- )
+ fixed_om_cost_per_mwyr(gen[y]) * EP[:eTotalCap][y]
+ end)
@expression(EP, eTotalCGrid, sum(eCGrid[y] for y in VRE_STOR))
- ## 2. Power Balance Expressions ##
+ ## 2. Power Balance Expressions ##
# Note: The subtraction of the charging component can be found in STOR function
- @expression(EP, ePowerBalance_VRE_STOR[t=1:T, z=1:Z], JuMP.AffExpr())
- for t=1:T, z=1:Z
+ @expression(EP, ePowerBalance_VRE_STOR[t = 1:T, z = 1:Z], JuMP.AffExpr())
+ for t in 1:T, z in 1:Z
if !isempty(resources_in_zone_by_rid(gen_VRE_STOR, z))
- ePowerBalance_VRE_STOR[t,z] += sum(EP[:vP][y,t] for y=resources_in_zone_by_rid(gen_VRE_STOR, z))
+ ePowerBalance_VRE_STOR[t, z] += sum(EP[:vP][y, t]
+ for y in resources_in_zone_by_rid(gen_VRE_STOR,
+ z))
end
end
## 3. Module Expressions ##
# Inverter AC Balance
- @expression(EP, eInvACBalance[y in VRE_STOR, t=1:T], JuMP.AffExpr())
+ @expression(EP, eInvACBalance[y in VRE_STOR, t = 1:T], JuMP.AffExpr())
# Grid Exports
- @expression(EP, eGridExport[y in VRE_STOR, t=1:T], JuMP.AffExpr())
+ @expression(EP, eGridExport[y in VRE_STOR, t = 1:T], JuMP.AffExpr())
### COMPONENT MODULE CONSTRAINTS ###
@@ -169,87 +170,115 @@ function vre_stor!(EP::Model, inputs::Dict, setup::Dict)
# Energy Share Requirement
if EnergyShareRequirement >= 1
- @expression(EP, eESRVREStor[ESR=1:inputs["nESR"]],
- sum(inputs["omega"][t]*esr_vrestor(gen[y],tag=ESR)*EP[:vP_SOLAR][y,t]*by_rid(y,:etainverter)
- for y=intersect(SOLAR, ids_with_policy(gen, esr_vrestor, tag=ESR)), t=1:T)
- + sum(inputs["omega"][t]*esr_vrestor(gen[y],tag=ESR)*EP[:vP_WIND][y,t]
- for y=intersect(WIND, ids_with_policy(gen, esr_vrestor, tag=ESR)), t=1:T))
+ @expression(EP, eESRVREStor[ESR = 1:inputs["nESR"]],
+ sum(inputs["omega"][t] * esr_vrestor(gen[y], tag = ESR) * EP[:vP_SOLAR][y, t] *
+ by_rid(y, :etainverter)
+ for y in intersect(SOLAR, ids_with_policy(gen, esr_vrestor, tag = ESR)),
+ t in 1:T)
+ +sum(inputs["omega"][t] * esr_vrestor(gen[y], tag = ESR) * EP[:vP_WIND][y, t]
+ for y in intersect(WIND, ids_with_policy(gen, esr_vrestor, tag = ESR)),
+ t in 1:T))
EP[:eESR] += eESRVREStor
if IncludeLossesInESR == 1
- @expression(EP, eESRVREStorLosses[ESR=1:inputs["nESR"]],
- sum(inputs["dfESR"][z,ESR]*sum(EP[:eELOSS_VRE_STOR][y]
- for y=intersect(STOR, resources_in_zone_by_rid(gen_VRE_STOR, z))) for z=findall(x->x>0,inputs["dfESR"][:,ESR])))
+ @expression(EP, eESRVREStorLosses[ESR = 1:inputs["nESR"]],
+ sum(inputs["dfESR"][z, ESR] * sum(EP[:eELOSS_VRE_STOR][y]
+ for y in intersect(STOR, resources_in_zone_by_rid(gen_VRE_STOR, z)))
+ for z in findall(x -> x > 0, inputs["dfESR"][:, ESR])))
EP[:eESR] -= eESRVREStorLosses
end
end
# Minimum Capacity Requirement
if MinCapReq == 1
- @expression(EP, eMinCapResSolar[mincap = 1:inputs["NumberOfMinCapReqs"]],
- sum(by_rid(y,:etainverter)*EP[:eTotalCap_SOLAR][y] for y in intersect(SOLAR, ids_with_policy(gen_VRE_STOR, min_cap_solar, tag=mincap))))
- EP[:eMinCapRes] += eMinCapResSolar
-
- @expression(EP, eMinCapResWind[mincap = 1:inputs["NumberOfMinCapReqs"]],
- sum(EP[:eTotalCap_WIND][y] for y in intersect(WIND, ids_with_policy(gen_VRE_STOR, min_cap_wind, tag=mincap))))
- EP[:eMinCapRes] += eMinCapResWind
+ @expression(EP, eMinCapResSolar[mincap = 1:inputs["NumberOfMinCapReqs"]],
+ sum(by_rid(y, :etainverter) * EP[:eTotalCap_SOLAR][y]
+ for y in intersect(SOLAR,
+ ids_with_policy(gen_VRE_STOR, min_cap_solar, tag = mincap))))
+ EP[:eMinCapRes] += eMinCapResSolar
+
+ @expression(EP, eMinCapResWind[mincap = 1:inputs["NumberOfMinCapReqs"]],
+ sum(EP[:eTotalCap_WIND][y]
+ for y in intersect(WIND,
+ ids_with_policy(gen_VRE_STOR, min_cap_wind, tag = mincap))))
+ EP[:eMinCapRes] += eMinCapResWind
if !isempty(inputs["VS_ASYM_AC_DISCHARGE"])
- @expression(EP, eMinCapResACDis[mincap = 1:inputs["NumberOfMinCapReqs"]],
- sum(EP[:eTotalCapDischarge_AC][y] for y in intersect(inputs["VS_ASYM_AC_DISCHARGE"], ids_with_policy(gen_VRE_STOR, min_cap_stor, tag=mincap))))
- EP[:eMinCapRes] += eMinCapResACDis
+ @expression(EP, eMinCapResACDis[mincap = 1:inputs["NumberOfMinCapReqs"]],
+ sum(EP[:eTotalCapDischarge_AC][y]
+ for y in intersect(inputs["VS_ASYM_AC_DISCHARGE"],
+ ids_with_policy(gen_VRE_STOR, min_cap_stor, tag = mincap))))
+ EP[:eMinCapRes] += eMinCapResACDis
end
if !isempty(inputs["VS_ASYM_DC_DISCHARGE"])
- @expression(EP, eMinCapResDCDis[mincap = 1:inputs["NumberOfMinCapReqs"]],
- sum(EP[:eTotalCapDischarge_DC][y] for y in intersect(inputs["VS_ASYM_DC_DISCHARGE"], ids_with_policy(gen_VRE_STOR, min_cap_stor, tag=mincap))))
- EP[:eMinCapRes] += eMinCapResDCDis
+ @expression(EP, eMinCapResDCDis[mincap = 1:inputs["NumberOfMinCapReqs"]],
+ sum(EP[:eTotalCapDischarge_DC][y]
+ for y in intersect(inputs["VS_ASYM_DC_DISCHARGE"],
+ ids_with_policy(gen_VRE_STOR, min_cap_stor, tag = mincap))))
+ EP[:eMinCapRes] += eMinCapResDCDis
end
if !isempty(inputs["VS_SYM_AC"])
- @expression(EP, eMinCapResACStor[mincap = 1:inputs["NumberOfMinCapReqs"]],
- sum(by_rid(y,:power_to_energy_ac)*EP[:eTotalCap_STOR][y] for y in intersect(inputs["VS_SYM_AC"], ids_with_policy(gen_VRE_STOR, min_cap_stor, tag=mincap))))
- EP[:eMinCapRes] += eMinCapResACStor
+ @expression(EP, eMinCapResACStor[mincap = 1:inputs["NumberOfMinCapReqs"]],
+ sum(by_rid(y, :power_to_energy_ac) * EP[:eTotalCap_STOR][y]
+ for y in intersect(inputs["VS_SYM_AC"],
+ ids_with_policy(gen_VRE_STOR, min_cap_stor, tag = mincap))))
+ EP[:eMinCapRes] += eMinCapResACStor
end
if !isempty(inputs["VS_SYM_DC"])
- @expression(EP, eMinCapResDCStor[mincap = 1:inputs["NumberOfMinCapReqs"]],
- sum(by_rid(y,:power_to_energy_dc)*EP[:eTotalCap_STOR][y] for y in intersect(inputs["VS_SYM_DC"], ids_with_policy(gen_VRE_STOR, min_cap_stor, tag=mincap))))
- EP[:eMinCapRes] += eMinCapResDCStor
+ @expression(EP, eMinCapResDCStor[mincap = 1:inputs["NumberOfMinCapReqs"]],
+ sum(by_rid(y, :power_to_energy_dc) * EP[:eTotalCap_STOR][y]
+ for y in intersect(inputs["VS_SYM_DC"],
+ ids_with_policy(gen_VRE_STOR, min_cap_stor, tag = mincap))))
+ EP[:eMinCapRes] += eMinCapResDCStor
end
end
# Maximum Capacity Requirement
if MaxCapReq == 1
- @expression(EP, eMaxCapResSolar[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
- sum(by_rid(y,:etainverter)*EP[:eTotalCap_SOLAR][y] for y in intersect(SOLAR, ids_with_policy(gen_VRE_STOR, max_cap_solar, tag=maxcap))))
- EP[:eMaxCapRes] += eMaxCapResSolar
-
- @expression(EP, eMaxCapResWind[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
- sum(EP[:eTotalCap_WIND][y] for y in intersect(WIND, ids_with_policy(gen_VRE_STOR, max_cap_wind, tag=maxcap))))
- EP[:eMaxCapRes] += eMaxCapResWind
+ @expression(EP, eMaxCapResSolar[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
+ sum(by_rid(y, :etainverter) * EP[:eTotalCap_SOLAR][y]
+ for y in intersect(SOLAR,
+ ids_with_policy(gen_VRE_STOR, max_cap_solar, tag = maxcap))))
+ EP[:eMaxCapRes] += eMaxCapResSolar
+
+ @expression(EP, eMaxCapResWind[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
+ sum(EP[:eTotalCap_WIND][y]
+ for y in intersect(WIND,
+ ids_with_policy(gen_VRE_STOR, max_cap_wind, tag = maxcap))))
+ EP[:eMaxCapRes] += eMaxCapResWind
if !isempty(inputs["VS_ASYM_AC_DISCHARGE"])
- @expression(EP, eMaxCapResACDis[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
- sum(EP[:eTotalCapDischarge_AC][y] for y in intersect(inputs["VS_ASYM_AC_DISCHARGE"], ids_with_policy(gen_VRE_STOR, max_cap_stor, tag=maxcap))))
- EP[:eMaxCapRes] += eMaxCapResACDis
+ @expression(EP, eMaxCapResACDis[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
+ sum(EP[:eTotalCapDischarge_AC][y]
+ for y in intersect(inputs["VS_ASYM_AC_DISCHARGE"],
+ ids_with_policy(gen_VRE_STOR, max_cap_stor, tag = maxcap))))
+ EP[:eMaxCapRes] += eMaxCapResACDis
end
if !isempty(inputs["VS_ASYM_DC_DISCHARGE"])
- @expression(EP, eMaxCapResDCDis[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
- sum(EP[:eTotalCapDischarge_DC][y] for y in intersect(inputs["VS_ASYM_DC_DISCHARGE"], ids_with_policy(gen_VRE_STOR, max_cap_stor, tag=maxcap))))
- EP[:eMaxCapRes] += eMaxCapResDCDis
+ @expression(EP, eMaxCapResDCDis[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
+ sum(EP[:eTotalCapDischarge_DC][y]
+ for y in intersect(inputs["VS_ASYM_DC_DISCHARGE"],
+ ids_with_policy(gen_VRE_STOR, max_cap_stor, tag = maxcap))))
+ EP[:eMaxCapRes] += eMaxCapResDCDis
end
if !isempty(inputs["VS_SYM_AC"])
- @expression(EP, eMaxCapResACStor[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
- sum(by_rid(y,:power_to_energy_ac)*EP[:eTotalCap_STOR][y] for y in intersect(inputs["VS_SYM_AC"], ids_with_policy(gen_VRE_STOR, max_cap_stor, tag=maxcap))))
- EP[:eMaxCapRes] += eMaxCapResACStor
+ @expression(EP, eMaxCapResACStor[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
+ sum(by_rid(y, :power_to_energy_ac) * EP[:eTotalCap_STOR][y]
+ for y in intersect(inputs["VS_SYM_AC"],
+ ids_with_policy(gen_VRE_STOR, max_cap_stor, tag = maxcap))))
+ EP[:eMaxCapRes] += eMaxCapResACStor
end
if !isempty(inputs["VS_SYM_DC"])
- @expression(EP, eMaxCapResDCStor[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
- sum(by_rid(y,:power_to_energy_dc)*EP[:eTotalCap_STOR][y] for y in intersect(inputs["VS_SYM_DC"], ids_with_policy(gen_VRE_STOR, max_cap_stor, tag=maxcap))))
- EP[:eMaxCapRes] += eMaxCapResDCStor
+ @expression(EP, eMaxCapResDCStor[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
+ sum(by_rid(y, :power_to_energy_dc) * EP[:eTotalCap_STOR][y]
+ for y in intersect(inputs["VS_SYM_DC"],
+ ids_with_policy(gen_VRE_STOR, max_cap_stor, tag = maxcap))))
+ EP[:eMaxCapRes] += eMaxCapResDCStor
end
end
@@ -269,33 +298,49 @@ function vre_stor!(EP::Model, inputs::Dict, setup::Dict)
### CONSTRAINTS ###
# Constraint 1: Energy Balance Constraint
- @constraint(EP, cEnergyBalance[y in VRE_STOR, t=1:T],
- EP[:vP][y,t] == eInvACBalance[y,t])
-
+ @constraint(EP, cEnergyBalance[y in VRE_STOR, t = 1:T],
+ EP[:vP][y, t]==eInvACBalance[y, t])
+
# Constraint 2: Grid Export/Import Maximum
- @constraint(EP, cGridExport[y in VRE_STOR, t=1:T],
- EP[:vP][y,t] + eGridExport[y,t] <= EP[:eTotalCap][y])
-
+ @constraint(EP, cGridExport[y in VRE_STOR, t = 1:T],
+ EP[:vP][y, t] + eGridExport[y, t]<=EP[:eTotalCap][y])
+
# Constraint 3: Inverter Export/Import Maximum (implemented in main module due to potential capacity reserve margin and operating reserve constraints)
- @constraint(EP, cInverterExport[y in DC, t=1:T], EP[:eInverterExport][y,t] <= EP[:eTotalCap_DC][y])
+ @constraint(EP,
+ cInverterExport[y in DC, t = 1:T],
+ EP[:eInverterExport][y, t]<=EP[:eTotalCap_DC][y])
# Constraint 4: PV Generation (implemented in main module due to potential capacity reserve margin and operating reserve constraints)
- @constraint(EP, cSolarGenMaxS[y in SOLAR, t=1:T], EP[:eSolarGenMaxS][y,t] <= inputs["pP_Max_Solar"][y,t]*EP[:eTotalCap_SOLAR][y])
+ @constraint(EP,
+ cSolarGenMaxS[y in SOLAR, t = 1:T],
+ EP[:eSolarGenMaxS][y, t]<=inputs["pP_Max_Solar"][y, t] * EP[:eTotalCap_SOLAR][y])
# Constraint 5: Wind Generation (implemented in main module due to potential capacity reserve margin and operating reserve constraints)
- @constraint(EP, cWindGenMaxW[y in WIND, t=1:T], EP[:eWindGenMaxW][y,t] <= inputs["pP_Max_Wind"][y,t]*EP[:eTotalCap_WIND][y])
+ @constraint(EP,
+ cWindGenMaxW[y in WIND, t = 1:T],
+ EP[:eWindGenMaxW][y, t]<=inputs["pP_Max_Wind"][y, t] * EP[:eTotalCap_WIND][y])
# Constraint 6: Symmetric Storage Resources (implemented in main module due to potential capacity reserve margin and operating reserve constraints)
- @constraint(EP, cChargeDischargeMaxDC[y in inputs["VS_SYM_DC"], t=1:T],
- EP[:eChargeDischargeMaxDC][y,t] <= by_rid(y,:power_to_energy_dc)*EP[:eTotalCap_STOR][y])
- @constraint(EP, cChargeDischargeMaxAC[y in inputs["VS_SYM_AC"], t=1:T],
- EP[:eChargeDischargeMaxAC][y,t] <= by_rid(y,:power_to_energy_ac)*EP[:eTotalCap_STOR][y])
+ @constraint(EP, cChargeDischargeMaxDC[y in inputs["VS_SYM_DC"], t = 1:T],
+ EP[:eChargeDischargeMaxDC][y,
+ t]<=by_rid(y, :power_to_energy_dc) * EP[:eTotalCap_STOR][y])
+ @constraint(EP, cChargeDischargeMaxAC[y in inputs["VS_SYM_AC"], t = 1:T],
+ EP[:eChargeDischargeMaxAC][y,
+ t]<=by_rid(y, :power_to_energy_ac) * EP[:eTotalCap_STOR][y])
# Constraint 7: Asymmetric Storage Resources (implemented in main module due to potential capacity reserve margin and operating reserve constraints)
- @constraint(EP, cVreStorMaxDischargingDC[y in inputs["VS_ASYM_DC_DISCHARGE"], t=1:T], EP[:eVreStorMaxDischargingDC][y,t] <= EP[:eTotalCapDischarge_DC][y])
- @constraint(EP, cVreStorMaxChargingDC[y in inputs["VS_ASYM_DC_CHARGE"], t=1:T], EP[:eVreStorMaxChargingDC][y,t] <= EP[:eTotalCapCharge_DC][y])
- @constraint(EP, cVreStorMaxDischargingAC[y in inputs["VS_ASYM_AC_DISCHARGE"], t=1:T], EP[:eVreStorMaxDischargingAC][y,t] <= EP[:eTotalCapDischarge_AC][y])
- @constraint(EP, cVreStorMaxChargingAC[y in inputs["VS_ASYM_AC_CHARGE"], t=1:T], EP[:eVreStorMaxChargingAC][y,t] <= EP[:eTotalCapCharge_AC][y])
+ @constraint(EP,
+ cVreStorMaxDischargingDC[y in inputs["VS_ASYM_DC_DISCHARGE"], t = 1:T],
+ EP[:eVreStorMaxDischargingDC][y, t]<=EP[:eTotalCapDischarge_DC][y])
+ @constraint(EP,
+ cVreStorMaxChargingDC[y in inputs["VS_ASYM_DC_CHARGE"], t = 1:T],
+ EP[:eVreStorMaxChargingDC][y, t]<=EP[:eTotalCapCharge_DC][y])
+ @constraint(EP,
+ cVreStorMaxDischargingAC[y in inputs["VS_ASYM_AC_DISCHARGE"], t = 1:T],
+ EP[:eVreStorMaxDischargingAC][y, t]<=EP[:eTotalCapDischarge_AC][y])
+ @constraint(EP,
+ cVreStorMaxChargingAC[y in inputs["VS_ASYM_AC_CHARGE"], t = 1:T],
+ EP[:eVreStorMaxChargingAC][y, t]<=EP[:eTotalCapCharge_AC][y])
end
@doc raw"""
@@ -371,7 +416,6 @@ In addition, this function adds investment and fixed O&M related costs related t
```
"""
function inverter_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
-
println("VRE-STOR Inverter Module")
### LOAD DATA ###
@@ -382,7 +426,7 @@ function inverter_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
RET_CAP_DC = inputs["RET_CAP_DC"]
gen = inputs["RESOURCES"]
gen_VRE_STOR = gen.VreStorage
-
+
MultiStage = setup["MultiStage"]
by_rid(rid, sym) = by_rid_res(rid, sym, gen_VRE_STOR)
@@ -396,72 +440,73 @@ function inverter_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
end)
if MultiStage == 1
- @variable(EP, vEXISTINGDCCAP[y in DC] >= 0);
+ @variable(EP, vEXISTINGDCCAP[y in DC]>=0)
end
### EXPRESSIONS ###
# 0. Multistage existing capacity definition
if MultiStage == 1
- @expression(EP, eExistingCapDC[y in DC], vEXISTINGDCCAP[y])
- else
- @expression(EP, eExistingCapDC[y in DC], by_rid(y,:existing_cap_inverter_mw))
- end
+ @expression(EP, eExistingCapDC[y in DC], vEXISTINGDCCAP[y])
+ else
+ @expression(EP, eExistingCapDC[y in DC], by_rid(y, :existing_cap_inverter_mw))
+ end
# 1. Total inverter capacity
@expression(EP, eTotalCap_DC[y in DC],
- if (y in intersect(NEW_CAP_DC, RET_CAP_DC)) # Resources eligible for new capacity and retirements
- eExistingCapDC[y] + EP[:vDCCAP][y] - EP[:vRETDCCAP][y]
- elseif (y in setdiff(NEW_CAP_DC, RET_CAP_DC)) # Resources eligible for only new capacity
- eExistingCapDC[y] + EP[:vDCCAP][y]
- elseif (y in setdiff(RET_CAP_DC, NEW_CAP_DC)) # Resources eligible for only capacity retirements
- eExistingCapDC[y] - EP[:vRETDCCAP][y]
- else
- eExistingCapDC[y]
- end
- )
+ if (y in intersect(NEW_CAP_DC, RET_CAP_DC)) # Resources eligible for new capacity and retirements
+ eExistingCapDC[y] + EP[:vDCCAP][y] - EP[:vRETDCCAP][y]
+ elseif (y in setdiff(NEW_CAP_DC, RET_CAP_DC)) # Resources eligible for only new capacity
+ eExistingCapDC[y] + EP[:vDCCAP][y]
+ elseif (y in setdiff(RET_CAP_DC, NEW_CAP_DC)) # Resources eligible for only capacity retirements
+ eExistingCapDC[y] - EP[:vRETDCCAP][y]
+ else
+ eExistingCapDC[y]
+ end)
# 2. Objective function additions
# Fixed costs for inverter component (if resource is not eligible for new inverter capacity, fixed costs are only O&M costs)
@expression(EP, eCFixDC[y in DC],
if y in NEW_CAP_DC # Resources eligible for new capacity
- by_rid(y,:inv_cost_inverter_per_mwyr)*vDCCAP[y] + by_rid(y,:fixed_om_inverter_cost_per_mwyr)*eTotalCap_DC[y]
+ by_rid(y, :inv_cost_inverter_per_mwyr) * vDCCAP[y] +
+ by_rid(y, :fixed_om_inverter_cost_per_mwyr) * eTotalCap_DC[y]
else
- by_rid(y,:fixed_om_inverter_cost_per_mwyr)*eTotalCap_DC[y]
- end
- )
-
+ by_rid(y, :fixed_om_inverter_cost_per_mwyr) * eTotalCap_DC[y]
+ end)
+
# Sum individual resource contributions
@expression(EP, eTotalCFixDC, sum(eCFixDC[y] for y in DC))
if MultiStage == 1
- EP[:eObj] += eTotalCFixDC/inputs["OPEXMULT"]
+ EP[:eObj] += eTotalCFixDC / inputs["OPEXMULT"]
else
EP[:eObj] += eTotalCFixDC
end
# 3. Inverter exports expression
- @expression(EP, eInverterExport[y in DC, t=1:T], JuMP.AffExpr())
+ @expression(EP, eInverterExport[y in DC, t = 1:T], JuMP.AffExpr())
### CONSTRAINTS ###
# Constraint 0: Existing capacity variable is equal to existing capacity specified in the input file
if MultiStage == 1
- @constraint(EP, cExistingCapDC[y in DC], EP[:vEXISTINGDCCAP][y] == by_rid(y,:existing_cap_inverter_mw))
+ @constraint(EP,
+ cExistingCapDC[y in DC],
+ EP[:vEXISTINGDCCAP][y]==by_rid(y, :existing_cap_inverter_mw))
end
# Constraints 1: Retirements and capacity additions
# Cannot retire more capacity than existing capacity for VRE-STOR technologies
- @constraint(EP, cMaxRet_DC[y=RET_CAP_DC], vRETDCCAP[y] <= eExistingCapDC[y])
+ @constraint(EP, cMaxRet_DC[y = RET_CAP_DC], vRETDCCAP[y]<=eExistingCapDC[y])
# Constraint on maximum capacity (if applicable) [set input to -1 if no constraint on maximum capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
- @constraint(EP, cMaxCap_DC[y in ids_with_nonneg(gen_VRE_STOR, max_cap_inverter_mw)],
- eTotalCap_DC[y] <= by_rid(y,:max_cap_inverter_mw))
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
+ @constraint(EP, cMaxCap_DC[y in ids_with_nonneg(gen_VRE_STOR, max_cap_inverter_mw)],
+ eTotalCap_DC[y]<=by_rid(y, :max_cap_inverter_mw))
# Constraint on Minimum capacity (if applicable) [set input to -1 if no constraint on minimum capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is <= Min_Cap_MW and lead to infeasabilty
- @constraint(EP, cMinCap_DC[y in ids_with_positive(gen_VRE_STOR, min_cap_inverter_mw)],
- eTotalCap_DC[y] >= by_rid(y,:min_cap_inverter_mw))
+ @constraint(EP, cMinCap_DC[y in ids_with_positive(gen_VRE_STOR, min_cap_inverter_mw)],
+ eTotalCap_DC[y]>=by_rid(y, :min_cap_inverter_mw))
# Constraint 2: Inverter Exports Maximum: see main module because capacity reserve margin/operating reserves may alter constraint
end
@@ -530,7 +575,6 @@ In addition, this function adds investment, fixed O&M, and variable O&M costs re
```
"""
function solar_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
-
println("VRE-STOR Solar Module")
### LOAD DATA ###
@@ -554,91 +598,94 @@ function solar_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
vSOLARCAP[y in NEW_CAP_SOLAR] >= 0 # New installed solar capacity [MW DC]
# Solar-component generation [MWh]
- vP_SOLAR[y in SOLAR, t=1:T] >= 0
+ vP_SOLAR[y in SOLAR, t = 1:T] >= 0
end)
if MultiStage == 1
- @variable(EP, vEXISTINGSOLARCAP[y in SOLAR] >= 0);
+ @variable(EP, vEXISTINGSOLARCAP[y in SOLAR]>=0)
end
### EXPRESSIONS ###
# 0. Multistage existing capacity definition
if MultiStage == 1
- @expression(EP, eExistingCapSolar[y in SOLAR], vEXISTINGSOLARCAP[y])
- else
- @expression(EP, eExistingCapSolar[y in SOLAR], by_rid(y,:existing_cap_solar_mw))
- end
+ @expression(EP, eExistingCapSolar[y in SOLAR], vEXISTINGSOLARCAP[y])
+ else
+ @expression(EP, eExistingCapSolar[y in SOLAR], by_rid(y, :existing_cap_solar_mw))
+ end
# 1. Total solar capacity
@expression(EP, eTotalCap_SOLAR[y in SOLAR],
- if (y in intersect(NEW_CAP_SOLAR, RET_CAP_SOLAR)) # Resources eligible for new capacity and retirements
- eExistingCapSolar[y] + EP[:vSOLARCAP][y] - EP[:vRETSOLARCAP][y]
- elseif (y in setdiff(NEW_CAP_SOLAR, RET_CAP_SOLAR)) # Resources eligible for only new capacity
- eExistingCapSolar[y] + EP[:vSOLARCAP][y]
- elseif (y in setdiff(RET_CAP_SOLAR, NEW_CAP_SOLAR)) # Resources eligible for only capacity retirements
- eExistingCapSolar[y] - EP[:vRETSOLARCAP][y]
- else
- eExistingCapSolar[y]
- end
- )
+ if (y in intersect(NEW_CAP_SOLAR, RET_CAP_SOLAR)) # Resources eligible for new capacity and retirements
+ eExistingCapSolar[y] + EP[:vSOLARCAP][y] - EP[:vRETSOLARCAP][y]
+ elseif (y in setdiff(NEW_CAP_SOLAR, RET_CAP_SOLAR)) # Resources eligible for only new capacity
+ eExistingCapSolar[y] + EP[:vSOLARCAP][y]
+ elseif (y in setdiff(RET_CAP_SOLAR, NEW_CAP_SOLAR)) # Resources eligible for only capacity retirements
+ eExistingCapSolar[y] - EP[:vRETSOLARCAP][y]
+ else
+ eExistingCapSolar[y]
+ end)
# 2. Objective function additions
# Fixed costs for solar resources (if resource is not eligible for new solar capacity, fixed costs are only O&M costs)
@expression(EP, eCFixSolar[y in SOLAR],
if y in NEW_CAP_SOLAR # Resources eligible for new capacity
- by_rid(y,:inv_cost_solar_per_mwyr)*vSOLARCAP[y] + by_rid(y,:fixed_om_solar_cost_per_mwyr)*eTotalCap_SOLAR[y]
+ by_rid(y, :inv_cost_solar_per_mwyr) * vSOLARCAP[y] +
+ by_rid(y, :fixed_om_solar_cost_per_mwyr) * eTotalCap_SOLAR[y]
else
- by_rid(y,:fixed_om_solar_cost_per_mwyr)*eTotalCap_SOLAR[y]
- end
- )
+ by_rid(y, :fixed_om_solar_cost_per_mwyr) * eTotalCap_SOLAR[y]
+ end)
@expression(EP, eTotalCFixSolar, sum(eCFixSolar[y] for y in SOLAR))
if MultiStage == 1
- EP[:eObj] += eTotalCFixSolar/inputs["OPEXMULT"]
+ EP[:eObj] += eTotalCFixSolar / inputs["OPEXMULT"]
else
EP[:eObj] += eTotalCFixSolar
end
# Variable costs of "generation" for solar resource "y" during hour "t"
- @expression(EP, eCVarOutSolar[y in SOLAR, t=1:T],
- inputs["omega"][t]*by_rid(y,:var_om_cost_per_mwh_solar)*by_rid(y,:etainverter)*EP[:vP_SOLAR][y,t])
- @expression(EP, eTotalCVarOutSolar, sum(eCVarOutSolar[y,t] for y in SOLAR, t=1:T))
+ @expression(EP, eCVarOutSolar[y in SOLAR, t = 1:T],
+ inputs["omega"][t]*by_rid(y, :var_om_cost_per_mwh_solar)*by_rid(y, :etainverter)*
+ EP[:vP_SOLAR][y, t])
+ @expression(EP, eTotalCVarOutSolar, sum(eCVarOutSolar[y, t] for y in SOLAR, t in 1:T))
EP[:eObj] += eTotalCVarOutSolar
# 3. Inverter Balance, PV Generation Maximum
- @expression(EP, eSolarGenMaxS[y in SOLAR, t=1:T], JuMP.AffExpr())
- for y in SOLAR, t=1:T
- EP[:eInvACBalance][y,t] += by_rid(y,:etainverter)*EP[:vP_SOLAR][y,t]
- EP[:eInverterExport][y,t] += by_rid(y,:etainverter)*EP[:vP_SOLAR][y,t]
- eSolarGenMaxS[y,t] += EP[:vP_SOLAR][y,t]
+ @expression(EP, eSolarGenMaxS[y in SOLAR, t = 1:T], JuMP.AffExpr())
+ for y in SOLAR, t in 1:T
+ EP[:eInvACBalance][y, t] += by_rid(y, :etainverter) * EP[:vP_SOLAR][y, t]
+ EP[:eInverterExport][y, t] += by_rid(y, :etainverter) * EP[:vP_SOLAR][y, t]
+ eSolarGenMaxS[y, t] += EP[:vP_SOLAR][y, t]
end
### CONSTRAINTS ###
# Constraint 0: Existing capacity variable is equal to existing capacity specified in the input file
if MultiStage == 1
- @constraint(EP, cExistingCapSolar[y in SOLAR], EP[:vEXISTINGSOLARCAP][y] == by_rid(y,:existing_cap_solar_mw))
- end
+ @constraint(EP,
+ cExistingCapSolar[y in SOLAR],
+ EP[:vEXISTINGSOLARCAP][y]==by_rid(y, :existing_cap_solar_mw))
+ end
# Constraints 1: Retirements and capacity additions
# Cannot retire more capacity than existing capacity for VRE-STOR technologies
- @constraint(EP, cMaxRet_Solar[y=RET_CAP_SOLAR], vRETSOLARCAP[y] <= eExistingCapSolar[y])
+ @constraint(EP, cMaxRet_Solar[y = RET_CAP_SOLAR], vRETSOLARCAP[y]<=eExistingCapSolar[y])
# Constraint on maximum capacity (if applicable) [set input to -1 if no constraint on maximum capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
- @constraint(EP, cMaxCap_Solar[y in ids_with_nonneg(gen_VRE_STOR, max_cap_solar_mw)],
- eTotalCap_SOLAR[y] <= by_rid(y,:max_cap_solar_mw))
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
+ @constraint(EP, cMaxCap_Solar[y in ids_with_nonneg(gen_VRE_STOR, max_cap_solar_mw)],
+ eTotalCap_SOLAR[y]<=by_rid(y, :max_cap_solar_mw))
# Constraint on Minimum capacity (if applicable) [set input to -1 if no constraint on minimum capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is <= Min_Cap_MW and lead to infeasabilty
- @constraint(EP, cMinCap_Solar[y in ids_with_positive(gen_VRE_STOR, min_cap_solar_mw)],
- eTotalCap_SOLAR[y] >= by_rid(y,:min_cap_solar_mw))
+ @constraint(EP, cMinCap_Solar[y in ids_with_positive(gen_VRE_STOR, min_cap_solar_mw)],
+ eTotalCap_SOLAR[y]>=by_rid(y, :min_cap_solar_mw))
# Constraint 2: PV Generation: see main module because operating reserves may alter constraint
# Constraint 3: Inverter Ratio between solar capacity and grid
- @constraint(EP, cInverterRatio_Solar[y in ids_with_positive(gen_VRE_STOR, inverter_ratio_solar)],
- EP[:eTotalCap_SOLAR][y] == by_rid(y,:inverter_ratio_solar)*EP[:eTotalCap_DC][y])
+ @constraint(EP,
+ cInverterRatio_Solar[y in ids_with_positive(gen_VRE_STOR, inverter_ratio_solar)],
+ EP[:eTotalCap_SOLAR][y]==by_rid(y, :inverter_ratio_solar) * EP[:eTotalCap_DC][y])
end
@doc raw"""
@@ -705,7 +752,6 @@ In addition, this function adds investment, fixed O&M, and variable O&M costs re
```
"""
function wind_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
-
println("VRE-STOR Wind Module")
### LOAD DATA ###
@@ -729,89 +775,93 @@ function wind_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
vWINDCAP[y in NEW_CAP_WIND] >= 0 # New installed wind capacity [MW AC]
# Wind-component generation [MWh]
- vP_WIND[y in WIND, t=1:T] >= 0
+ vP_WIND[y in WIND, t = 1:T] >= 0
end)
if MultiStage == 1
- @variable(EP, vEXISTINGWINDCAP[y in WIND] >= 0);
- end
+ @variable(EP, vEXISTINGWINDCAP[y in WIND]>=0)
+ end
### EXPRESSIONS ###
# 0. Multistage existing capacity definition
if MultiStage == 1
- @expression(EP, eExistingCapWind[y in WIND], vEXISTINGWINDCAP[y])
- else
- @expression(EP, eExistingCapWind[y in WIND], by_rid(y,:existing_cap_wind_mw))
- end
+ @expression(EP, eExistingCapWind[y in WIND], vEXISTINGWINDCAP[y])
+ else
+ @expression(EP, eExistingCapWind[y in WIND], by_rid(y, :existing_cap_wind_mw))
+ end
# 1. Total wind capacity
@expression(EP, eTotalCap_WIND[y in WIND],
- if (y in intersect(NEW_CAP_WIND, RET_CAP_WIND)) # Resources eligible for new capacity and retirements
- eExistingCapWind[y] + EP[:vWINDCAP][y] - EP[:vRETWINDCAP][y]
- elseif (y in setdiff(NEW_CAP_WIND, RET_CAP_WIND)) # Resources eligible for only new capacity
- eExistingCapWind[y] + EP[:vWINDCAP][y]
- elseif (y in setdiff(RET_CAP_WIND, NEW_CAP_WIND)) # Resources eligible for only capacity retirements
- eExistingCapWind[y] - EP[:vRETWINDCAP][y]
- else
- eExistingCapWind[y]
- end
- )
+ if (y in intersect(NEW_CAP_WIND, RET_CAP_WIND)) # Resources eligible for new capacity and retirements
+ eExistingCapWind[y] + EP[:vWINDCAP][y] - EP[:vRETWINDCAP][y]
+ elseif (y in setdiff(NEW_CAP_WIND, RET_CAP_WIND)) # Resources eligible for only new capacity
+ eExistingCapWind[y] + EP[:vWINDCAP][y]
+ elseif (y in setdiff(RET_CAP_WIND, NEW_CAP_WIND)) # Resources eligible for only capacity retirements
+ eExistingCapWind[y] - EP[:vRETWINDCAP][y]
+ else
+ eExistingCapWind[y]
+ end)
# 2. Objective function additions
# Fixed costs for wind resources (if resource is not eligible for new wind capacity, fixed costs are only O&M costs)
@expression(EP, eCFixWind[y in WIND],
if y in NEW_CAP_WIND # Resources eligible for new capacity
- by_rid(y,:inv_cost_wind_per_mwyr)*vWINDCAP[y] + by_rid(y,:fixed_om_wind_cost_per_mwyr)*eTotalCap_WIND[y]
+ by_rid(y, :inv_cost_wind_per_mwyr) * vWINDCAP[y] +
+ by_rid(y, :fixed_om_wind_cost_per_mwyr) * eTotalCap_WIND[y]
else
- by_rid(y,:fixed_om_wind_cost_per_mwyr)*eTotalCap_WIND[y]
- end
- )
+ by_rid(y, :fixed_om_wind_cost_per_mwyr) * eTotalCap_WIND[y]
+ end)
@expression(EP, eTotalCFixWind, sum(eCFixWind[y] for y in WIND))
if MultiStage == 1
- EP[:eObj] += eTotalCFixWind/inputs["OPEXMULT"]
+ EP[:eObj] += eTotalCFixWind / inputs["OPEXMULT"]
else
EP[:eObj] += eTotalCFixWind
end
# Variable costs of "generation" for wind resource "y" during hour "t"
- @expression(EP, eCVarOutWind[y in WIND, t=1:T], inputs["omega"][t]*by_rid(y,:var_om_cost_per_mwh_wind)*EP[:vP_WIND][y,t])
- @expression(EP, eTotalCVarOutWind, sum(eCVarOutWind[y,t] for y in WIND, t=1:T))
+ @expression(EP,
+ eCVarOutWind[y in WIND, t = 1:T],
+ inputs["omega"][t]*by_rid(y, :var_om_cost_per_mwh_wind)*EP[:vP_WIND][y, t])
+ @expression(EP, eTotalCVarOutWind, sum(eCVarOutWind[y, t] for y in WIND, t in 1:T))
EP[:eObj] += eTotalCVarOutWind
# 3. Inverter Balance, Wind Generation Maximum
- @expression(EP, eWindGenMaxW[y in WIND, t=1:T], JuMP.AffExpr())
- for y in WIND, t=1:T
- EP[:eInvACBalance][y,t] += EP[:vP_WIND][y,t]
- eWindGenMaxW[y,t] += EP[:vP_WIND][y,t]
+ @expression(EP, eWindGenMaxW[y in WIND, t = 1:T], JuMP.AffExpr())
+ for y in WIND, t in 1:T
+ EP[:eInvACBalance][y, t] += EP[:vP_WIND][y, t]
+ eWindGenMaxW[y, t] += EP[:vP_WIND][y, t]
end
### CONSTRAINTS ###
# Constraint 0: Existing capacity variable is equal to existing capacity specified in the input file
if MultiStage == 1
- @constraint(EP, cExistingCapWind[y in WIND], EP[:vEXISTINGWINDCAP][y] == by_rid(y,:existing_cap_wind_mw))
- end
+ @constraint(EP,
+ cExistingCapWind[y in WIND],
+ EP[:vEXISTINGWINDCAP][y]==by_rid(y, :existing_cap_wind_mw))
+ end
# Constraints 1: Retirements and capacity additions
# Cannot retire more capacity than existing capacity for VRE-STOR technologies
- @constraint(EP, cMaxRet_Wind[y=RET_CAP_WIND], vRETWINDCAP[y] <= eExistingCapWind[y])
+ @constraint(EP, cMaxRet_Wind[y = RET_CAP_WIND], vRETWINDCAP[y]<=eExistingCapWind[y])
# Constraint on maximum capacity (if applicable) [set input to -1 if no constraint on maximum capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
- @constraint(EP, cMaxCap_Wind[y in ids_with_nonneg(gen_VRE_STOR, max_cap_wind_mw)],
- eTotalCap_WIND[y] <= by_rid(y,:max_cap_wind_mw))
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
+ @constraint(EP, cMaxCap_Wind[y in ids_with_nonneg(gen_VRE_STOR, max_cap_wind_mw)],
+ eTotalCap_WIND[y]<=by_rid(y, :max_cap_wind_mw))
# Constraint on Minimum capacity (if applicable) [set input to -1 if no constraint on minimum capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is <= Min_Cap_MW and lead to infeasabilty
- @constraint(EP, cMinCap_Wind[y in ids_with_positive(gen_VRE_STOR, min_cap_wind_mw)],
- eTotalCap_WIND[y] >= by_rid(y,:min_cap_wind_mw))
+ @constraint(EP, cMinCap_Wind[y in ids_with_positive(gen_VRE_STOR, min_cap_wind_mw)],
+ eTotalCap_WIND[y]>=by_rid(y, :min_cap_wind_mw))
# Constraint 2: Wind Generation: see main module because capacity reserve margin/operating reserves may alter constraint
# Constraint 3: Inverter Ratio between wind capacity and grid
- @constraint(EP, cInverterRatio_Wind[y in ids_with_positive(gen_VRE_STOR, inverter_ratio_wind)],
- EP[:eTotalCap_WIND][y] == by_rid(y,:inverter_ratio_wind)*EP[:eTotalCap][y])
+ @constraint(EP,
+ cInverterRatio_Wind[y in ids_with_positive(gen_VRE_STOR, inverter_ratio_wind)],
+ EP[:eTotalCap_WIND][y]==by_rid(y, :inverter_ratio_wind) * EP[:eTotalCap][y])
end
@doc raw"""
@@ -941,12 +991,11 @@ In addition, this function adds investment, fixed O&M, and variable O&M costs re
```
"""
function stor_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
-
println("VRE-STOR Storage Module")
### LOAD DATA ###
- T = inputs["T"]
+ T = inputs["T"]
Z = inputs["Z"]
gen = inputs["RESOURCES"]
@@ -964,8 +1013,8 @@ function stor_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
VS_LDS = inputs["VS_LDS"]
START_SUBPERIODS = inputs["START_SUBPERIODS"]
- INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
- hours_per_subperiod = inputs["hours_per_subperiod"] # total number of hours per subperiod
+ INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
+ hours_per_subperiod = inputs["hours_per_subperiod"] # total number of hours per subperiod
rep_periods = inputs["REP_PERIOD"]
MultiStage = setup["MultiStage"]
@@ -978,104 +1027,111 @@ function stor_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
# Storage energy capacity
vCAPENERGY_VS[y in NEW_CAP_STOR] >= 0 # Energy storage reservoir capacity (MWh capacity) built for VRE storage [MWh]
vRETCAPENERGY_VS[y in RET_CAP_STOR] >= 0 # Energy storage reservoir capacity retired for VRE storage [MWh]
-
+
# State of charge variable
- vS_VRE_STOR[y in STOR, t=1:T] >= 0 # Storage level of resource "y" at hour "t" [MWh] on zone "z"
+ vS_VRE_STOR[y in STOR, t = 1:T] >= 0 # Storage level of resource "y" at hour "t" [MWh] on zone "z"
# DC-battery discharge [MWh]
- vP_DC_DISCHARGE[y in DC_DISCHARGE, t=1:T] >= 0
+ vP_DC_DISCHARGE[y in DC_DISCHARGE, t = 1:T] >= 0
# DC-battery charge [MWh]
- vP_DC_CHARGE[y in DC_CHARGE, t=1:T] >= 0
+ vP_DC_CHARGE[y in DC_CHARGE, t = 1:T] >= 0
# AC-battery discharge [MWh]
- vP_AC_DISCHARGE[y in AC_DISCHARGE, t=1:T] >= 0
+ vP_AC_DISCHARGE[y in AC_DISCHARGE, t = 1:T] >= 0
# AC-battery charge [MWh]
- vP_AC_CHARGE[y in AC_CHARGE, t=1:T] >= 0
+ vP_AC_CHARGE[y in AC_CHARGE, t = 1:T] >= 0
# Grid-interfacing charge (Energy withdrawn from grid by resource VRE_STOR at hour "t") [MWh]
- vCHARGE_VRE_STOR[y in STOR, t=1:T] >= 0
+ vCHARGE_VRE_STOR[y in STOR, t = 1:T] >= 0
end)
if MultiStage == 1
- @variable(EP, vEXISTINGCAPENERGY_VS[y in STOR] >= 0);
- end
+ @variable(EP, vEXISTINGCAPENERGY_VS[y in STOR]>=0)
+ end
### EXPRESSIONS ###
# 0. Multistage existing capacity definition
if MultiStage == 1
- @expression(EP, eExistingCapEnergy_VS[y in STOR], vEXISTINGCAPENERGY_VS[y])
- else
- @expression(EP, eExistingCapEnergy_VS[y in STOR], existing_cap_mwh(gen[y]))
- end
+ @expression(EP, eExistingCapEnergy_VS[y in STOR], vEXISTINGCAPENERGY_VS[y])
+ else
+ @expression(EP, eExistingCapEnergy_VS[y in STOR], existing_cap_mwh(gen[y]))
+ end
# 1. Total storage energy capacity
@expression(EP, eTotalCap_STOR[y in STOR],
- if (y in intersect(NEW_CAP_STOR, RET_CAP_STOR)) # Resources eligible for new capacity and retirements
- eExistingCapEnergy_VS[y] + EP[:vCAPENERGY_VS][y] - EP[:vRETCAPENERGY_VS][y]
- elseif (y in setdiff(NEW_CAP_STOR, RET_CAP_STOR)) # Resources eligible for only new capacity
- eExistingCapEnergy_VS[y] + EP[:vCAPENERGY_VS][y]
- elseif (y in setdiff(RET_CAP_STOR, NEW_CAP_STOR)) # Resources eligible for only capacity retirements
- eExistingCapEnergy_VS[y] - EP[:vRETCAPENERGY_VS][y]
- else
- eExistingCapEnergy_VS[y]
- end
- )
+ if (y in intersect(NEW_CAP_STOR, RET_CAP_STOR)) # Resources eligible for new capacity and retirements
+ eExistingCapEnergy_VS[y] + EP[:vCAPENERGY_VS][y] - EP[:vRETCAPENERGY_VS][y]
+ elseif (y in setdiff(NEW_CAP_STOR, RET_CAP_STOR)) # Resources eligible for only new capacity
+ eExistingCapEnergy_VS[y] + EP[:vCAPENERGY_VS][y]
+ elseif (y in setdiff(RET_CAP_STOR, NEW_CAP_STOR)) # Resources eligible for only capacity retirements
+ eExistingCapEnergy_VS[y] - EP[:vRETCAPENERGY_VS][y]
+ else
+ eExistingCapEnergy_VS[y]
+ end)
# 2. Objective function additions
# Fixed costs for storage resources (if resource is not eligible for new energy capacity, fixed costs are only O&M costs)
- @expression(EP, eCFixEnergy_VS[y in STOR],
+ @expression(EP, eCFixEnergy_VS[y in STOR],
if y in NEW_CAP_STOR # Resources eligible for new capacity
- inv_cost_per_mwhyr(gen[y])*vCAPENERGY_VS[y] + fixed_om_cost_per_mwhyr(gen[y])*eTotalCap_STOR[y]
+ inv_cost_per_mwhyr(gen[y]) * vCAPENERGY_VS[y] +
+ fixed_om_cost_per_mwhyr(gen[y]) * eTotalCap_STOR[y]
else
- fixed_om_cost_per_mwhyr(gen[y])*eTotalCap_STOR[y]
- end
- )
+ fixed_om_cost_per_mwhyr(gen[y]) * eTotalCap_STOR[y]
+ end)
@expression(EP, eTotalCFixStor, sum(eCFixEnergy_VS[y] for y in STOR))
if MultiStage == 1
- EP[:eObj] += eTotalCFixStor/inputs["OPEXMULT"]
+ EP[:eObj] += eTotalCFixStor / inputs["OPEXMULT"]
else
EP[:eObj] += eTotalCFixStor
end
# Variable costs of charging DC for VRE-STOR resources "y" during hour "t"
- @expression(EP, eCVar_Charge_DC[y in DC_CHARGE, t=1:T],
- inputs["omega"][t]*by_rid(y,:var_om_cost_per_mwh_charge_dc)*EP[:vP_DC_CHARGE][y,t]/by_rid(y,:etainverter))
+ @expression(EP, eCVar_Charge_DC[y in DC_CHARGE, t = 1:T],
+ inputs["omega"][t] * by_rid(y, :var_om_cost_per_mwh_charge_dc) *
+ EP[:vP_DC_CHARGE][y, t]/by_rid(y, :etainverter))
# Variable costs of discharging DC for VRE-STOR resources "y" during hour "t"
- @expression(EP, eCVar_Discharge_DC[y in DC_DISCHARGE, t=1:T],
- inputs["omega"][t]*by_rid(y,:var_om_cost_per_mwh_discharge_dc)*by_rid(y,:etainverter)*EP[:vP_DC_DISCHARGE][y,t])
+ @expression(EP, eCVar_Discharge_DC[y in DC_DISCHARGE, t = 1:T],
+ inputs["omega"][t]*by_rid(y, :var_om_cost_per_mwh_discharge_dc)*
+ by_rid(y, :etainverter)*EP[:vP_DC_DISCHARGE][y, t])
# Variable costs of charging AC for VRE-STOR resources "y" during hour "t"
- @expression(EP, eCVar_Charge_AC[y in AC_CHARGE, t=1:T],
- inputs["omega"][t]*by_rid(y,:var_om_cost_per_mwh_charge_ac)*EP[:vP_AC_CHARGE][y,t])
+ @expression(EP, eCVar_Charge_AC[y in AC_CHARGE, t = 1:T],
+ inputs["omega"][t]*by_rid(y, :var_om_cost_per_mwh_charge_ac)*
+ EP[:vP_AC_CHARGE][y, t])
# Variable costs of discharging AC for VRE-STOR resources "y" during hour "t"
- @expression(EP, eCVar_Discharge_AC[y in AC_DISCHARGE, t=1:T],
- inputs["omega"][t]*by_rid(y,:var_om_cost_per_mwh_discharge_ac)*EP[:vP_AC_DISCHARGE][y,t])
+ @expression(EP, eCVar_Discharge_AC[y in AC_DISCHARGE, t = 1:T],
+ inputs["omega"][t]*by_rid(y, :var_om_cost_per_mwh_discharge_ac)*
+ EP[:vP_AC_DISCHARGE][y, t])
# Sum individual resource contributions
- @expression(EP, eTotalCVarStor, sum(eCVar_Charge_DC[y,t] for y in DC_CHARGE, t=1:T)
- + sum(eCVar_Discharge_DC[y,t] for y in DC_DISCHARGE, t=1:T)
- + sum(eCVar_Charge_AC[y,t] for y in AC_CHARGE, t=1:T)
- + sum(eCVar_Discharge_AC[y,t] for y in AC_CHARGE, t=1:T))
+ @expression(EP,
+ eTotalCVarStor,
+ sum(eCVar_Charge_DC[y, t] for y in DC_CHARGE, t in 1:T)
+ +sum(eCVar_Discharge_DC[y, t] for y in DC_DISCHARGE, t in 1:T)
+ +sum(eCVar_Charge_AC[y, t] for y in AC_CHARGE, t in 1:T)
+ +sum(eCVar_Discharge_AC[y, t] for y in AC_CHARGE, t in 1:T))
EP[:eObj] += eTotalCVarStor
# 3. Inverter & Power Balance, SoC Expressions
# Check for rep_periods > 1 & LDS=1
- if rep_periods > 1 && !isempty(VS_LDS)
- CONSTRAINTSET = inputs["VS_nonLDS"]
- else
- CONSTRAINTSET = STOR
- end
+ if rep_periods > 1 && !isempty(VS_LDS)
+ CONSTRAINTSET = inputs["VS_nonLDS"]
+ else
+ CONSTRAINTSET = STOR
+ end
# SoC expressions
@expression(EP, eSoCBalStart_VRE_STOR[y in CONSTRAINTSET, t in START_SUBPERIODS],
- vS_VRE_STOR[y,t+hours_per_subperiod-1] - self_discharge(gen[y])*vS_VRE_STOR[y,t+hours_per_subperiod-1])
+ vS_VRE_STOR[y,
+ t + hours_per_subperiod - 1]-self_discharge(gen[y]) *
+ vS_VRE_STOR[y, t + hours_per_subperiod - 1])
@expression(EP, eSoCBalInterior_VRE_STOR[y in STOR, t in INTERIOR_SUBPERIODS],
- vS_VRE_STOR[y,t-1] - self_discharge(gen[y])*vS_VRE_STOR[y,t-1])
+ vS_VRE_STOR[y, t - 1]-self_discharge(gen[y]) * vS_VRE_STOR[y, t - 1])
# Expression for energy losses related to technologies (increase in effective demand)
@expression(EP, eELOSS_VRE_STOR[y in STOR], JuMP.AffExpr())
@@ -1085,117 +1141,134 @@ function stor_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
AC_CHARGE_CONSTRAINTSET = intersect(CONSTRAINTSET, AC_CHARGE)
for t in START_SUBPERIODS
for y in DC_DISCHARGE_CONSTRAINTSET
- eSoCBalStart_VRE_STOR[y,t] -= EP[:vP_DC_DISCHARGE][y,t]/by_rid(y,:eff_down_dc)
+ eSoCBalStart_VRE_STOR[y, t] -= EP[:vP_DC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_dc)
end
for y in DC_CHARGE_CONSTRAINTSET
- eSoCBalStart_VRE_STOR[y,t] += by_rid(y,:eff_up_dc)*EP[:vP_DC_CHARGE][y,t]
+ eSoCBalStart_VRE_STOR[y, t] += by_rid(y, :eff_up_dc) * EP[:vP_DC_CHARGE][y, t]
end
for y in AC_DISCHARGE_CONSTRAINTSET
- eSoCBalStart_VRE_STOR[y,t] -= EP[:vP_AC_DISCHARGE][y,t]/by_rid(y,:eff_down_ac)
+ eSoCBalStart_VRE_STOR[y, t] -= EP[:vP_AC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_ac)
end
for y in AC_CHARGE_CONSTRAINTSET
- eSoCBalStart_VRE_STOR[y,t] += by_rid(y,:eff_up_ac)*EP[:vP_AC_CHARGE][y,t]
+ eSoCBalStart_VRE_STOR[y, t] += by_rid(y, :eff_up_ac) * EP[:vP_AC_CHARGE][y, t]
end
end
for y in DC_DISCHARGE
- EP[:eELOSS_VRE_STOR][y] -= sum(inputs["omega"][t]*vP_DC_DISCHARGE[y,t]*by_rid(y,:etainverter) for t=1:T)
- for t=1:T
- EP[:eInvACBalance][y,t] += by_rid(y,:etainverter)*vP_DC_DISCHARGE[y,t]
- EP[:eInverterExport][y,t] += by_rid(y,:etainverter)*vP_DC_DISCHARGE[y,t]
+ EP[:eELOSS_VRE_STOR][y] -= sum(inputs["omega"][t] * vP_DC_DISCHARGE[y, t] *
+ by_rid(y, :etainverter) for t in 1:T)
+ for t in 1:T
+ EP[:eInvACBalance][y, t] += by_rid(y, :etainverter) * vP_DC_DISCHARGE[y, t]
+ EP[:eInverterExport][y, t] += by_rid(y, :etainverter) * vP_DC_DISCHARGE[y, t]
end
for t in INTERIOR_SUBPERIODS
- eSoCBalInterior_VRE_STOR[y,t] -= EP[:vP_DC_DISCHARGE][y,t]/by_rid(y,:eff_down_dc)
+ eSoCBalInterior_VRE_STOR[y, t] -= EP[:vP_DC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_dc)
end
end
for y in DC_CHARGE
- EP[:eELOSS_VRE_STOR][y] += sum(inputs["omega"][t]*vP_DC_CHARGE[y,t]/by_rid(y,:etainverter) for t=1:T)
- for t=1:T
- EP[:eInvACBalance][y,t] -= vP_DC_CHARGE[y,t]/by_rid(y,:etainverter)
- EP[:eInverterExport][y,t] += vP_DC_CHARGE[y,t]/by_rid(y,:etainverter)
+ EP[:eELOSS_VRE_STOR][y] += sum(inputs["omega"][t] * vP_DC_CHARGE[y, t] /
+ by_rid(y, :etainverter) for t in 1:T)
+ for t in 1:T
+ EP[:eInvACBalance][y, t] -= vP_DC_CHARGE[y, t] / by_rid(y, :etainverter)
+ EP[:eInverterExport][y, t] += vP_DC_CHARGE[y, t] / by_rid(y, :etainverter)
end
for t in INTERIOR_SUBPERIODS
- eSoCBalInterior_VRE_STOR[y,t] += by_rid(y,:eff_up_dc)*EP[:vP_DC_CHARGE][y,t]
+ eSoCBalInterior_VRE_STOR[y, t] += by_rid(y, :eff_up_dc) *
+ EP[:vP_DC_CHARGE][y, t]
end
end
for y in AC_DISCHARGE
- EP[:eELOSS_VRE_STOR][y] -= sum(inputs["omega"][t]*vP_AC_DISCHARGE[y,t] for t=1:T)
- for t=1:T
- EP[:eInvACBalance][y,t] += vP_AC_DISCHARGE[y,t]
+ EP[:eELOSS_VRE_STOR][y] -= sum(inputs["omega"][t] * vP_AC_DISCHARGE[y, t]
+ for t in 1:T)
+ for t in 1:T
+ EP[:eInvACBalance][y, t] += vP_AC_DISCHARGE[y, t]
end
for t in INTERIOR_SUBPERIODS
- eSoCBalInterior_VRE_STOR[y,t] -= EP[:vP_AC_DISCHARGE][y,t]/by_rid(y,:eff_down_ac)
+ eSoCBalInterior_VRE_STOR[y, t] -= EP[:vP_AC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_ac)
end
end
for y in AC_CHARGE
- EP[:eELOSS_VRE_STOR][y] += sum(inputs["omega"][t]*vP_AC_CHARGE[y,t] for t=1:T)
- for t=1:T
- EP[:eInvACBalance][y,t] -= vP_AC_CHARGE[y,t]
+ EP[:eELOSS_VRE_STOR][y] += sum(inputs["omega"][t] * vP_AC_CHARGE[y, t] for t in 1:T)
+ for t in 1:T
+ EP[:eInvACBalance][y, t] -= vP_AC_CHARGE[y, t]
end
for t in INTERIOR_SUBPERIODS
- eSoCBalInterior_VRE_STOR[y,t] += by_rid(y,:eff_up_ac)*EP[:vP_AC_CHARGE][y,t]
+ eSoCBalInterior_VRE_STOR[y, t] += by_rid(y, :eff_up_ac) *
+ EP[:vP_AC_CHARGE][y, t]
end
end
- for y in STOR, t=1:T
- EP[:eInvACBalance][y,t] += vCHARGE_VRE_STOR[y,t]
- EP[:eGridExport][y,t] += vCHARGE_VRE_STOR[y,t]
+ for y in STOR, t in 1:T
+ EP[:eInvACBalance][y, t] += vCHARGE_VRE_STOR[y, t]
+ EP[:eGridExport][y, t] += vCHARGE_VRE_STOR[y, t]
end
- for z in 1:Z, t=1:T
- if !isempty(resources_in_zone_by_rid(gen_VRE_STOR,z))
- EP[:ePowerBalance_VRE_STOR][t, z] -= sum(vCHARGE_VRE_STOR[y,t] for y=intersect(resources_in_zone_by_rid(gen_VRE_STOR,z),STOR))
+ for z in 1:Z, t in 1:T
+ if !isempty(resources_in_zone_by_rid(gen_VRE_STOR, z))
+ EP[:ePowerBalance_VRE_STOR][t, z] -= sum(vCHARGE_VRE_STOR[y, t]
+ for y in intersect(resources_in_zone_by_rid(gen_VRE_STOR,
+ z),
+ STOR))
end
end
# 4. Energy Share Requirement & CO2 Policy Module
# From CO2 Policy module
- @expression(EP, eELOSSByZone_VRE_STOR[z=1:Z],
- sum(EP[:eELOSS_VRE_STOR][y] for y in intersect(resources_in_zone_by_rid(gen_VRE_STOR,z),STOR)))
+ @expression(EP, eELOSSByZone_VRE_STOR[z = 1:Z],
+ sum(EP[:eELOSS_VRE_STOR][y]
+ for y in intersect(resources_in_zone_by_rid(gen_VRE_STOR, z), STOR)))
add_similar_to_expression!(EP[:eELOSSByZone], eELOSSByZone_VRE_STOR)
### CONSTRAINTS ###
# Constraint 0: Existing capacity variable is equal to existing capacity specified in the input file
if MultiStage == 1
- @constraint(EP, cExistingCapEnergy_VS[y in STOR], EP[:vEXISTINGCAPENERGY_VS][y] == existing_cap_mwh(gen[y]))
- end
+ @constraint(EP,
+ cExistingCapEnergy_VS[y in STOR],
+ EP[:vEXISTINGCAPENERGY_VS][y]==existing_cap_mwh(gen[y]))
+ end
# Constraints 1: Retirements and capacity additions
# Cannot retire more capacity than existing capacity for VRE-STOR technologies
- @constraint(EP, cMaxRet_Stor[y=RET_CAP_STOR], vRETCAPENERGY_VS[y] <= eExistingCapEnergy_VS[y])
+ @constraint(EP,
+ cMaxRet_Stor[y = RET_CAP_STOR],
+ vRETCAPENERGY_VS[y]<=eExistingCapEnergy_VS[y])
# Constraint on maximum capacity (if applicable) [set input to -1 if no constraint on maximum capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
- @constraint(EP, cMaxCap_Stor[y in intersect(ids_with_nonneg(gen, max_cap_mwh), STOR)],
- eTotalCap_STOR[y] <= max_cap_mwh(gen[y]))
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
+ @constraint(EP, cMaxCap_Stor[y in intersect(ids_with_nonneg(gen, max_cap_mwh), STOR)],
+ eTotalCap_STOR[y]<=max_cap_mwh(gen[y]))
# Constraint on minimum capacity (if applicable) [set input to -1 if no constraint on minimum capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is <= Min_Cap_MW and lead to infeasabilty
- @constraint(EP, cMinCap_Stor[y in intersect(ids_with_positive(gen, min_cap_mwh), STOR)],
- eTotalCap_STOR[y] >= min_cap_mwh(gen[y]))
+ @constraint(EP, cMinCap_Stor[y in intersect(ids_with_positive(gen, min_cap_mwh), STOR)],
+ eTotalCap_STOR[y]>=min_cap_mwh(gen[y]))
# Constraint 2: SOC Maximum
- @constraint(EP, cSOCMax[y in STOR, t=1:T], vS_VRE_STOR[y,t] <= eTotalCap_STOR[y])
+ @constraint(EP, cSOCMax[y in STOR, t = 1:T], vS_VRE_STOR[y, t]<=eTotalCap_STOR[y])
# Constraint 3: State of Charge (energy stored for the next hour)
@constraint(EP, cSoCBalStart_VRE_STOR[y in CONSTRAINTSET, t in START_SUBPERIODS],
- vS_VRE_STOR[y,t] == eSoCBalStart_VRE_STOR[y,t])
- @constraint(EP, cSoCBalInterior_VRE_STOR[y in STOR, t in INTERIOR_SUBPERIODS],
- vS_VRE_STOR[y,t] == eSoCBalInterior_VRE_STOR[y,t])
+ vS_VRE_STOR[y, t]==eSoCBalStart_VRE_STOR[y, t])
+ @constraint(EP, cSoCBalInterior_VRE_STOR[y in STOR, t in INTERIOR_SUBPERIODS],
+ vS_VRE_STOR[y, t]==eSoCBalInterior_VRE_STOR[y, t])
### SYMMETRIC RESOURCE CONSTRAINTS ###
if !isempty(VS_SYM_DC)
# Constraint 4: Charging + Discharging DC Maximum: see main module because capacity reserve margin/operating reserves may alter constraint
- @expression(EP, eChargeDischargeMaxDC[y in VS_SYM_DC, t=1:T],
- EP[:vP_DC_DISCHARGE][y,t] + EP[:vP_DC_CHARGE][y,t])
+ @expression(EP, eChargeDischargeMaxDC[y in VS_SYM_DC, t = 1:T],
+ EP[:vP_DC_DISCHARGE][y, t]+EP[:vP_DC_CHARGE][y, t])
end
if !isempty(VS_SYM_AC)
# Constraint 4: Charging + Discharging AC Maximum: see main module because capacity reserve margin/operating reserves may alter constraint
- @expression(EP, eChargeDischargeMaxAC[y in VS_SYM_AC, t=1:T],
- EP[:vP_AC_DISCHARGE][y,t] + EP[:vP_AC_CHARGE][y,t])
+ @expression(EP, eChargeDischargeMaxAC[y in VS_SYM_AC, t = 1:T],
+ EP[:vP_AC_DISCHARGE][y, t]+EP[:vP_AC_CHARGE][y, t])
end
### ASYMMETRIC RESOURCE MODULE ###
@@ -1234,7 +1307,6 @@ The rest of the long duration energy storage constraints are copied and applied
long duration energy storage resources are further elaborated upon in ```vre_stor_capres!()```.
"""
function lds_vre_stor!(EP::Model, inputs::Dict)
-
println("VRE-STOR LDS Module")
### LOAD DATA ###
@@ -1244,11 +1316,11 @@ function lds_vre_stor!(EP::Model, inputs::Dict)
gen_VRE_STOR = gen.VreStorage
REP_PERIOD = inputs["REP_PERIOD"] # Number of representative periods
- dfPeriodMap = inputs["Period_Map"] # Dataframe that maps modeled periods to representative periods
- NPeriods = size(inputs["Period_Map"])[1] # Number of modeled periods
+ dfPeriodMap = inputs["Period_Map"] # Dataframe that maps modeled periods to representative periods
+ NPeriods = size(inputs["Period_Map"])[1] # Number of modeled periods
hours_per_subperiod = inputs["hours_per_subperiod"] #total number of hours per subperiod
- MODELED_PERIODS_INDEX = 1:NPeriods
- REP_PERIODS_INDEX = MODELED_PERIODS_INDEX[dfPeriodMap[!,:Rep_Period] .== MODELED_PERIODS_INDEX]
+ MODELED_PERIODS_INDEX = 1:NPeriods
+ REP_PERIODS_INDEX = MODELED_PERIODS_INDEX[dfPeriodMap[!, :Rep_Period] .== MODELED_PERIODS_INDEX]
by_rid(rid, sym) = by_rid_res(rid, sym, gen_VRE_STOR)
@@ -1259,57 +1331,74 @@ function lds_vre_stor!(EP::Model, inputs::Dict)
vSOCw_VRE_STOR[y in VS_LDS, n in MODELED_PERIODS_INDEX] >= 0
# Build up in storage inventory over each representative period w (can be pos or neg)
- vdSOC_VRE_STOR[y in VS_LDS, w=1:REP_PERIOD]
+ vdSOC_VRE_STOR[y in VS_LDS, w = 1:REP_PERIOD]
end)
### EXPRESSIONS ###
# Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
- @expression(EP, eVreStorSoCBalLongDurationStorageStart[y in VS_LDS, w=1:REP_PERIOD],
- (1-self_discharge(gen[y])) * (EP[:vS_VRE_STOR][y,hours_per_subperiod*w]-EP[:vdSOC_VRE_STOR][y,w]))
-
+ @expression(EP, eVreStorSoCBalLongDurationStorageStart[y in VS_LDS, w = 1:REP_PERIOD],
+ (1 -
+ self_discharge(gen[y]))*(EP[:vS_VRE_STOR][y, hours_per_subperiod * w] -
+ EP[:vdSOC_VRE_STOR][y, w]))
+
DC_DISCHARGE_CONSTRAINTSET = intersect(inputs["VS_STOR_DC_DISCHARGE"], VS_LDS)
DC_CHARGE_CONSTRAINTSET = intersect(inputs["VS_STOR_DC_CHARGE"], VS_LDS)
AC_DISCHARGE_CONSTRAINTSET = intersect(inputs["VS_STOR_AC_DISCHARGE"], VS_LDS)
AC_CHARGE_CONSTRAINTSET = intersect(inputs["VS_STOR_AC_CHARGE"], VS_LDS)
- for w=1:REP_PERIOD
+ for w in 1:REP_PERIOD
for y in DC_DISCHARGE_CONSTRAINTSET
- EP[:eVreStorSoCBalLongDurationStorageStart][y,w] -= EP[:vP_DC_DISCHARGE][y,hours_per_subperiod*(w-1)+1]/by_rid(y,:eff_down_dc)
+ EP[:eVreStorSoCBalLongDurationStorageStart][y, w] -= EP[:vP_DC_DISCHARGE][y,
+ hours_per_subperiod * (w - 1) + 1] / by_rid(y, :eff_down_dc)
end
for y in DC_CHARGE_CONSTRAINTSET
- EP[:eVreStorSoCBalLongDurationStorageStart][y,w] += by_rid(y,:eff_up_dc)*EP[:vP_DC_CHARGE][y,hours_per_subperiod*(w-1)+1]
+ EP[:eVreStorSoCBalLongDurationStorageStart][y, w] += by_rid(y, :eff_up_dc) *
+ EP[:vP_DC_CHARGE][y,
+ hours_per_subperiod * (w - 1) + 1]
end
for y in AC_DISCHARGE_CONSTRAINTSET
- EP[:eVreStorSoCBalLongDurationStorageStart][y,w] -= EP[:vP_AC_DISCHARGE][y,hours_per_subperiod*(w-1)+1]/by_rid(y,:eff_down_ac)
+ EP[:eVreStorSoCBalLongDurationStorageStart][y, w] -= EP[:vP_AC_DISCHARGE][y,
+ hours_per_subperiod * (w - 1) + 1] / by_rid(y, :eff_down_ac)
end
for y in AC_CHARGE_CONSTRAINTSET
- EP[:eVreStorSoCBalLongDurationStorageStart][y,w] += by_rid(y,:eff_up_ac)*EP[:vP_AC_CHARGE][y,hours_per_subperiod*(w-1)+1]
+ EP[:eVreStorSoCBalLongDurationStorageStart][y, w] += by_rid(y, :eff_up_ac) *
+ EP[:vP_AC_CHARGE][y,
+ hours_per_subperiod * (w - 1) + 1]
end
end
### CONSTRAINTS ###
# Constraint 1: Link the state of charge between the start of periods for LDS resources
- @constraint(EP, cVreStorSoCBalLongDurationStorageStart[y in VS_LDS, w=1:REP_PERIOD],
- EP[:vS_VRE_STOR][y,hours_per_subperiod*(w-1)+1] == EP[:eVreStorSoCBalLongDurationStorageStart][y,w])
+ @constraint(EP, cVreStorSoCBalLongDurationStorageStart[y in VS_LDS, w = 1:REP_PERIOD],
+ EP[:vS_VRE_STOR][y,
+ hours_per_subperiod * (w - 1) + 1]==EP[:eVreStorSoCBalLongDurationStorageStart][y, w])
# Constraint 2: Storage at beginning of period w = storage at beginning of period w-1 + storage built up in period w (after n representative periods)
# Multiply storage build up term from prior period with corresponding weight
- @constraint(EP, cVreStorSoCBalLongDurationStorage[y in VS_LDS, r in MODELED_PERIODS_INDEX],
- EP[:vSOCw_VRE_STOR][y,mod1(r+1, NPeriods)] == EP[:vSOCw_VRE_STOR][y,r] + EP[:vdSOC_VRE_STOR][y,dfPeriodMap[r,:Rep_Period_Index]])
+ @constraint(EP,
+ cVreStorSoCBalLongDurationStorage[y in VS_LDS, r in MODELED_PERIODS_INDEX],
+ EP[:vSOCw_VRE_STOR][y,
+ mod1(r + 1, NPeriods)]==EP[:vSOCw_VRE_STOR][y, r] +
+ EP[:vdSOC_VRE_STOR][
+ y, dfPeriodMap[r, :Rep_Period_Index]])
# Constraint 3: Storage at beginning of each modeled period cannot exceed installed energy capacity
- @constraint(EP, cVreStorSoCBalLongDurationStorageUpper[y in VS_LDS, r in MODELED_PERIODS_INDEX],
- EP[:vSOCw_VRE_STOR][y,r] <= EP[:eTotalCap_STOR][y])
+ @constraint(EP,
+ cVreStorSoCBalLongDurationStorageUpper[y in VS_LDS, r in MODELED_PERIODS_INDEX],
+ EP[:vSOCw_VRE_STOR][y, r]<=EP[:eTotalCap_STOR][y])
# Constraint 4: Initial storage level for representative periods must also adhere to sub-period storage inventory balance
# Initial storage = Final storage - change in storage inventory across representative period
- @constraint(EP, cVreStorSoCBalLongDurationStorageSub[y in VS_LDS, r in REP_PERIODS_INDEX],
- EP[:vSOCw_VRE_STOR][y,r] == EP[:vS_VRE_STOR][y,hours_per_subperiod*dfPeriodMap[r,:Rep_Period_Index]]
- - EP[:vdSOC_VRE_STOR][y,dfPeriodMap[r,:Rep_Period_Index]])
+ @constraint(EP,
+ cVreStorSoCBalLongDurationStorageSub[y in VS_LDS, r in REP_PERIODS_INDEX],
+ EP[:vSOCw_VRE_STOR][y,r]==EP[:vS_VRE_STOR][
+ y, hours_per_subperiod * dfPeriodMap[r, :Rep_Period_Index]]
+ -
+ EP[:vdSOC_VRE_STOR][y, dfPeriodMap[r, :Rep_Period_Index]])
end
@doc raw"""
@@ -1437,7 +1526,6 @@ In addition, this function adds investment and fixed O&M costs related to charge
```
"""
function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
-
println("VRE-STOR Charge Investment Module")
### LOAD INPUTS ###
@@ -1464,8 +1552,13 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
by_rid(rid, sym) = by_rid_res(rid, sym, gen_VRE_STOR)
if !isempty(VS_ASYM_DC_DISCHARGE)
- MAX_DC_DISCHARGE = intersect(ids_with_nonneg(gen_VRE_STOR, max_cap_discharge_dc_mw), VS_ASYM_DC_DISCHARGE)
- MIN_DC_DISCHARGE = intersect(ids_with_positive(gen_VRE_STOR, min_cap_discharge_dc_mw), VS_ASYM_DC_DISCHARGE)
+ MAX_DC_DISCHARGE = intersect(
+ ids_with_nonneg(gen_VRE_STOR, max_cap_discharge_dc_mw),
+ VS_ASYM_DC_DISCHARGE)
+ MIN_DC_DISCHARGE = intersect(
+ ids_with_positive(gen_VRE_STOR,
+ min_cap_discharge_dc_mw),
+ VS_ASYM_DC_DISCHARGE)
### VARIABLES ###
@variables(EP, begin
@@ -1474,47 +1567,53 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
end)
if MultiStage == 1
- @variable(EP, vEXISTINGCAPDISCHARGEDC[y in VS_ASYM_DC_DISCHARGE] >= 0);
+ @variable(EP, vEXISTINGCAPDISCHARGEDC[y in VS_ASYM_DC_DISCHARGE]>=0)
end
### EXPRESSIONS ###
# 0. Multistage existing capacity definition
if MultiStage == 1
- @expression(EP, eExistingCapDischargeDC[y in VS_ASYM_DC_DISCHARGE], vEXISTINGCAPDISCHARGEDC[y])
+ @expression(EP,
+ eExistingCapDischargeDC[y in VS_ASYM_DC_DISCHARGE],
+ vEXISTINGCAPDISCHARGEDC[y])
else
- @expression(EP, eExistingCapDischargeDC[y in VS_ASYM_DC_DISCHARGE], by_rid(y,:existing_cap_discharge_dc_mw))
+ @expression(EP,
+ eExistingCapDischargeDC[y in VS_ASYM_DC_DISCHARGE],
+ by_rid(y, :existing_cap_discharge_dc_mw))
end
# 1. Total storage discharge DC capacity
@expression(EP, eTotalCapDischarge_DC[y in VS_ASYM_DC_DISCHARGE],
if (y in intersect(NEW_CAP_DISCHARGE_DC, RET_CAP_DISCHARGE_DC))
- eExistingCapDischargeDC[y] + EP[:vCAPDISCHARGE_DC][y] - EP[:vRETCAPDISCHARGE_DC][y]
+ eExistingCapDischargeDC[y] + EP[:vCAPDISCHARGE_DC][y] -
+ EP[:vRETCAPDISCHARGE_DC][y]
elseif (y in setdiff(NEW_CAP_DISCHARGE_DC, RET_CAP_DISCHARGE_DC))
eExistingCapDischargeDC[y] + EP[:vCAPDISCHARGE_DC][y]
elseif (y in setdiff(RET_CAP_DISCHARGE_DC, NEW_CAP_DISCHARGE_DC))
eExistingCapDischargeDC[y] - EP[:vRETCAPDISCHARGE_DC][y]
else
eExistingCapDischargeDC[y]
- end
- )
+ end)
# 2. Objective Function Additions
# If resource is not eligible for new discharge DC capacity, fixed costs are only O&M costs
@expression(EP, eCFixDischarge_DC[y in VS_ASYM_DC_DISCHARGE],
if y in NEW_CAP_DISCHARGE_DC # Resources eligible for new discharge DC capacity
- by_rid(y,:inv_cost_discharge_dc_per_mwyr)*vCAPDISCHARGE_DC[y] + by_rid(y,:fixed_om_cost_discharge_dc_per_mwyr)*eTotalCapDischarge_DC[y]
+ by_rid(y, :inv_cost_discharge_dc_per_mwyr) * vCAPDISCHARGE_DC[y] +
+ by_rid(y, :fixed_om_cost_discharge_dc_per_mwyr) * eTotalCapDischarge_DC[y]
else
- by_rid(y,:fixed_om_cost_discharge_dc_per_mwyr)*eTotalCapDischarge_DC[y]
- end
- )
-
+ by_rid(y, :fixed_om_cost_discharge_dc_per_mwyr) * eTotalCapDischarge_DC[y]
+ end)
+
# Sum individual resource contributions to fixed costs to get total fixed costs
- @expression(EP, eTotalCFixDischarge_DC, sum(EP[:eCFixDischarge_DC][y] for y in VS_ASYM_DC_DISCHARGE))
+ @expression(EP,
+ eTotalCFixDischarge_DC,
+ sum(EP[:eCFixDischarge_DC][y] for y in VS_ASYM_DC_DISCHARGE))
if MultiStage == 1
- EP[:eObj] += eTotalCFixDischarge_DC/inputs["OPEXMULT"]
+ EP[:eObj] += eTotalCFixDischarge_DC / inputs["OPEXMULT"]
else
EP[:eObj] += eTotalCFixDischarge_DC
end
@@ -1523,29 +1622,41 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
# Constraint 0: Existing capacity variable is equal to existing capacity specified in the input file
if MultiStage == 1
- @constraint(EP, cExistingCapDischargeDC[y in VS_ASYM_DC_DISCHARGE], EP[:vEXISTINGCAPDISCHARGEDC][y] == by_rid(y,:existing_cap_discharge_dc_mw))
+ @constraint(EP,
+ cExistingCapDischargeDC[y in VS_ASYM_DC_DISCHARGE],
+ EP[:vEXISTINGCAPDISCHARGEDC][y]==by_rid(y, :existing_cap_discharge_dc_mw))
end
# Constraints 1: Retirements and capacity additions
# Cannot retire more discharge DC capacity than existing discharge capacity
- @constraint(EP, cVreStorMaxRetDischargeDC[y in RET_CAP_DISCHARGE_DC], vRETCAPDISCHARGE_DC[y] <= eExistingCapDischargeDC[y])
+ @constraint(EP,
+ cVreStorMaxRetDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ vRETCAPDISCHARGE_DC[y]<=eExistingCapDischargeDC[y])
# Constraint on maximum discharge DC capacity (if applicable) [set input to -1 if no constraint on maximum discharge capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is >= Max_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cVreStorMaxCapDischargeDC[y in MAX_DC_DISCHARGE], eTotalCapDischarge_DC[y] <= by_rid(y,:Max_Cap_Discharge_DC_MW))
+ @constraint(EP,
+ cVreStorMaxCapDischargeDC[y in MAX_DC_DISCHARGE],
+ eTotalCapDischarge_DC[y]<=by_rid(y, :Max_Cap_Discharge_DC_MW))
# Constraint on minimum discharge DC capacity (if applicable) [set input to -1 if no constraint on minimum discharge capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is <= Min_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cVreStorMinCapDischargeDC[y in MIN_DC_DISCHARGE], eTotalCapDischarge_DC[y] >= by_rid(y,:Min_Cap_Discharge_DC_MW))
+ @constraint(EP,
+ cVreStorMinCapDischargeDC[y in MIN_DC_DISCHARGE],
+ eTotalCapDischarge_DC[y]>=by_rid(y, :Min_Cap_Discharge_DC_MW))
# Constraint 2: Maximum discharging must be less than discharge power rating
- @expression(EP, eVreStorMaxDischargingDC[y in VS_ASYM_DC_DISCHARGE, t=1:T], JuMP.AffExpr())
- for y in VS_ASYM_DC_DISCHARGE, t=1:T
- eVreStorMaxDischargingDC[y,t] += EP[:vP_DC_DISCHARGE][y,t]
+ @expression(EP,
+ eVreStorMaxDischargingDC[y in VS_ASYM_DC_DISCHARGE, t = 1:T],
+ JuMP.AffExpr())
+ for y in VS_ASYM_DC_DISCHARGE, t in 1:T
+ eVreStorMaxDischargingDC[y, t] += EP[:vP_DC_DISCHARGE][y, t]
end
end
-
+
if !isempty(VS_ASYM_DC_CHARGE)
- MAX_DC_CHARGE = intersect(ids_with_nonneg(gen_VRE_STOR, max_cap_charge_dc_mw), VS_ASYM_DC_CHARGE)
- MIN_DC_CHARGE = intersect(ids_with_positive(gen_VRE_STOR, min_cap_charge_dc_mw), VS_ASYM_DC_CHARGE)
+ MAX_DC_CHARGE = intersect(ids_with_nonneg(gen_VRE_STOR, max_cap_charge_dc_mw),
+ VS_ASYM_DC_CHARGE)
+ MIN_DC_CHARGE = intersect(ids_with_positive(gen_VRE_STOR, min_cap_charge_dc_mw),
+ VS_ASYM_DC_CHARGE)
### VARIABLES ###
@variables(EP, begin
@@ -1554,16 +1665,20 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
end)
if MultiStage == 1
- @variable(EP, vEXISTINGCAPCHARGEDC[y in VS_ASYM_DC_CHARGE] >= 0);
+ @variable(EP, vEXISTINGCAPCHARGEDC[y in VS_ASYM_DC_CHARGE]>=0)
end
### EXPRESSIONS ###
# 0. Multistage existing capacity definition
if MultiStage == 1
- @expression(EP, eExistingCapChargeDC[y in VS_ASYM_DC_CHARGE], vEXISTINGCAPCHARGEDC[y])
+ @expression(EP,
+ eExistingCapChargeDC[y in VS_ASYM_DC_CHARGE],
+ vEXISTINGCAPCHARGEDC[y])
else
- @expression(EP, eExistingCapChargeDC[y in VS_ASYM_DC_CHARGE], by_rid(y,:existing_cap_charge_dc_mw))
+ @expression(EP,
+ eExistingCapChargeDC[y in VS_ASYM_DC_CHARGE],
+ by_rid(y, :existing_cap_charge_dc_mw))
end
# 1. Total storage charge DC capacity
@@ -1576,25 +1691,26 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
eExistingCapChargeDC[y] - EP[:vRETCAPCHARGE_DC][y]
else
eExistingCapChargeDC[y]
- end
- )
+ end)
# 2. Objective Function Additions
# If resource is not eligible for new charge DC capacity, fixed costs are only O&M costs
@expression(EP, eCFixCharge_DC[y in VS_ASYM_DC_CHARGE],
if y in NEW_CAP_CHARGE_DC # Resources eligible for new charge DC capacity
- by_rid(y,:inv_cost_charge_dc_per_mwyr)*vCAPCHARGE_DC[y] + by_rid(y,:fixed_om_cost_charge_dc_per_mwyr)*eTotalCapCharge_DC[y]
+ by_rid(y, :inv_cost_charge_dc_per_mwyr) * vCAPCHARGE_DC[y] +
+ by_rid(y, :fixed_om_cost_charge_dc_per_mwyr) * eTotalCapCharge_DC[y]
else
- by_rid(y,:fixed_om_cost_charge_dc_per_mwyr)*eTotalCapCharge_DC[y]
- end
- )
-
+ by_rid(y, :fixed_om_cost_charge_dc_per_mwyr) * eTotalCapCharge_DC[y]
+ end)
+
# Sum individual resource contributions to fixed costs to get total fixed costs
- @expression(EP, eTotalCFixCharge_DC, sum(EP[:eCFixCharge_DC][y] for y in VS_ASYM_DC_CHARGE))
+ @expression(EP,
+ eTotalCFixCharge_DC,
+ sum(EP[:eCFixCharge_DC][y] for y in VS_ASYM_DC_CHARGE))
if MultiStage == 1
- EP[:eObj] += eTotalCFixCharge_DC/inputs["OPEXMULT"]
+ EP[:eObj] += eTotalCFixCharge_DC / inputs["OPEXMULT"]
else
EP[:eObj] += eTotalCFixCharge_DC
end
@@ -1603,29 +1719,44 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
# Constraint 0: Existing capacity variable is equal to existing capacity specified in the input file
if MultiStage == 1
- @constraint(EP, cExistingCapChargeDC[y in VS_ASYM_DC_CHARGE], EP[:vEXISTINGCAPCHARGEDC][y] == by_rid(y,:Existing_Cap_Charge_DC_MW))
+ @constraint(EP,
+ cExistingCapChargeDC[y in VS_ASYM_DC_CHARGE],
+ EP[:vEXISTINGCAPCHARGEDC][y]==by_rid(y, :Existing_Cap_Charge_DC_MW))
end
# Constraints 1: Retirements and capacity additions
# Cannot retire more charge DC capacity than existing charge capacity
- @constraint(EP, cVreStorMaxRetChargeDC[y in RET_CAP_CHARGE_DC], vRETCAPCHARGE_DC[y] <= eExistingCapChargeDC[y])
+ @constraint(EP,
+ cVreStorMaxRetChargeDC[y in RET_CAP_CHARGE_DC],
+ vRETCAPCHARGE_DC[y]<=eExistingCapChargeDC[y])
# Constraint on maximum charge DC capacity (if applicable) [set input to -1 if no constraint on maximum charge capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is >= Max_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cVreStorMaxCapChargeDC[y in MAX_DC_CHARGE], eTotalCapCharge_DC[y] <= by_rid(y,:max_cap_charge_dc_mw))
+ @constraint(EP,
+ cVreStorMaxCapChargeDC[y in MAX_DC_CHARGE],
+ eTotalCapCharge_DC[y]<=by_rid(y, :max_cap_charge_dc_mw))
# Constraint on minimum charge DC capacity (if applicable) [set input to -1 if no constraint on minimum charge capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is <= Min_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cVreStorMinCapChargeDC[y in MIN_DC_CHARGE], eTotalCapCharge_DC[y] >= by_rid(y,:min_cap_charge_dc_mw))
+ @constraint(EP,
+ cVreStorMinCapChargeDC[y in MIN_DC_CHARGE],
+ eTotalCapCharge_DC[y]>=by_rid(y, :min_cap_charge_dc_mw))
# Constraint 2: Maximum charging must be less than charge power rating
- @expression(EP, eVreStorMaxChargingDC[y in VS_ASYM_DC_CHARGE, t=1:T], JuMP.AffExpr())
- for y in VS_ASYM_DC_CHARGE, t=1:T
- eVreStorMaxChargingDC[y,t] += EP[:vP_DC_CHARGE][y,t]
+ @expression(EP,
+ eVreStorMaxChargingDC[y in VS_ASYM_DC_CHARGE, t = 1:T],
+ JuMP.AffExpr())
+ for y in VS_ASYM_DC_CHARGE, t in 1:T
+ eVreStorMaxChargingDC[y, t] += EP[:vP_DC_CHARGE][y, t]
end
end
if !isempty(VS_ASYM_AC_DISCHARGE)
- MAX_AC_DISCHARGE = intersect(ids_with_nonneg(gen_VRE_STOR, max_cap_discharge_ac_mw), VS_ASYM_AC_DISCHARGE)
- MIN_AC_DISCHARGE = intersect(ids_with_positive(gen_VRE_STOR, min_cap_discharge_ac_mw), VS_ASYM_AC_DISCHARGE)
+ MAX_AC_DISCHARGE = intersect(
+ ids_with_nonneg(gen_VRE_STOR, max_cap_discharge_ac_mw),
+ VS_ASYM_AC_DISCHARGE)
+ MIN_AC_DISCHARGE = intersect(
+ ids_with_positive(gen_VRE_STOR,
+ min_cap_discharge_ac_mw),
+ VS_ASYM_AC_DISCHARGE)
### VARIABLES ###
@variables(EP, begin
@@ -1634,47 +1765,53 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
end)
if MultiStage == 1
- @variable(EP, vEXISTINGCAPDISCHARGEAC[y in VS_ASYM_AC_DISCHARGE] >= 0);
+ @variable(EP, vEXISTINGCAPDISCHARGEAC[y in VS_ASYM_AC_DISCHARGE]>=0)
end
### EXPRESSIONS ###
# 0. Multistage existing capacity definition
if MultiStage == 1
- @expression(EP, eExistingCapDischargeAC[y in VS_ASYM_AC_DISCHARGE], vEXISTINGCAPDISCHARGEAC[y])
+ @expression(EP,
+ eExistingCapDischargeAC[y in VS_ASYM_AC_DISCHARGE],
+ vEXISTINGCAPDISCHARGEAC[y])
else
- @expression(EP, eExistingCapDischargeAC[y in VS_ASYM_AC_DISCHARGE], by_rid(y,:existing_cap_discharge_ac_mw))
+ @expression(EP,
+ eExistingCapDischargeAC[y in VS_ASYM_AC_DISCHARGE],
+ by_rid(y, :existing_cap_discharge_ac_mw))
end
# 1. Total storage discharge AC capacity
@expression(EP, eTotalCapDischarge_AC[y in VS_ASYM_AC_DISCHARGE],
if (y in intersect(NEW_CAP_DISCHARGE_AC, RET_CAP_DISCHARGE_AC))
- eExistingCapDischargeAC[y] + EP[:vCAPDISCHARGE_AC][y] - EP[:vRETCAPDISCHARGE_AC][y]
+ eExistingCapDischargeAC[y] + EP[:vCAPDISCHARGE_AC][y] -
+ EP[:vRETCAPDISCHARGE_AC][y]
elseif (y in setdiff(NEW_CAP_DISCHARGE_AC, RET_CAP_DISCHARGE_AC))
eExistingCapDischargeAC[y] + EP[:vCAPDISCHARGE_AC][y]
elseif (y in setdiff(RET_CAP_DISCHARGE_AC, NEW_CAP_DISCHARGE_AC))
eExistingCapDischargeAC[y] - EP[:vRETCAPDISCHARGE_AC][y]
else
eExistingCapDischargeAC[y]
- end
- )
+ end)
# 2. Objective Function Additions
# If resource is not eligible for new discharge AC capacity, fixed costs are only O&M costs
@expression(EP, eCFixDischarge_AC[y in VS_ASYM_AC_DISCHARGE],
if y in NEW_CAP_DISCHARGE_AC # Resources eligible for new discharge AC capacity
- by_rid(y,:inv_cost_discharge_ac_per_mwyr)*vCAPDISCHARGE_AC[y] + by_rid(y,:fixed_om_cost_discharge_ac_per_mwyr)*eTotalCapDischarge_AC[y]
+ by_rid(y, :inv_cost_discharge_ac_per_mwyr) * vCAPDISCHARGE_AC[y] +
+ by_rid(y, :fixed_om_cost_discharge_ac_per_mwyr) * eTotalCapDischarge_AC[y]
else
- by_rid(y,:fixed_om_cost_discharge_ac_per_mwyr)*eTotalCapDischarge_AC[y]
- end
- )
-
+ by_rid(y, :fixed_om_cost_discharge_ac_per_mwyr) * eTotalCapDischarge_AC[y]
+ end)
+
# Sum individual resource contributions to fixed costs to get total fixed costs
- @expression(EP, eTotalCFixDischarge_AC, sum(EP[:eCFixDischarge_AC][y] for y in VS_ASYM_AC_DISCHARGE))
+ @expression(EP,
+ eTotalCFixDischarge_AC,
+ sum(EP[:eCFixDischarge_AC][y] for y in VS_ASYM_AC_DISCHARGE))
if MultiStage == 1
- EP[:eObj] += eTotalCFixDischarge_AC/inputs["OPEXMULT"]
+ EP[:eObj] += eTotalCFixDischarge_AC / inputs["OPEXMULT"]
else
EP[:eObj] += eTotalCFixDischarge_AC
end
@@ -1683,29 +1820,41 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
# Constraint 0: Existing capacity variable is equal to existing capacity specified in the input file
if MultiStage == 1
- @constraint(EP, cExistingCapDischargeAC[y in VS_ASYM_AC_DISCHARGE], EP[:vEXISTINGCAPDISCHARGEAC][y] == by_rid(y,:existing_cap_discharge_ac_mw))
+ @constraint(EP,
+ cExistingCapDischargeAC[y in VS_ASYM_AC_DISCHARGE],
+ EP[:vEXISTINGCAPDISCHARGEAC][y]==by_rid(y, :existing_cap_discharge_ac_mw))
end
# Constraints 1: Retirements and capacity additions
# Cannot retire more discharge AC capacity than existing charge capacity
- @constraint(EP, cVreStorMaxRetDischargeAC[y in RET_CAP_DISCHARGE_AC], vRETCAPDISCHARGE_AC[y] <= eExistingCapDischargeAC[y])
+ @constraint(EP,
+ cVreStorMaxRetDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ vRETCAPDISCHARGE_AC[y]<=eExistingCapDischargeAC[y])
# Constraint on maximum discharge AC capacity (if applicable) [set input to -1 if no constraint on maximum charge capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is >= Max_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cVreStorMaxCapDischargeAC[y in MAX_AC_DISCHARGE], eTotalCapDischarge_AC[y] <= by_rid(y,:max_cap_discharge_ac_mw))
+ @constraint(EP,
+ cVreStorMaxCapDischargeAC[y in MAX_AC_DISCHARGE],
+ eTotalCapDischarge_AC[y]<=by_rid(y, :max_cap_discharge_ac_mw))
# Constraint on minimum discharge AC capacity (if applicable) [set input to -1 if no constraint on minimum charge capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is <= Min_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cVreStorMinCapDischargeAC[y in MIN_AC_DISCHARGE], eTotalCapDischarge_AC[y] >= by_rid(y,:min_cap_discharge_ac_mw))
+ @constraint(EP,
+ cVreStorMinCapDischargeAC[y in MIN_AC_DISCHARGE],
+ eTotalCapDischarge_AC[y]>=by_rid(y, :min_cap_discharge_ac_mw))
# Constraint 2: Maximum discharging rate must be less than discharge power rating
- @expression(EP, eVreStorMaxDischargingAC[y in VS_ASYM_AC_DISCHARGE, t=1:T], JuMP.AffExpr())
- for y in VS_ASYM_AC_DISCHARGE, t=1:T
- eVreStorMaxDischargingAC[y,t] += EP[:vP_AC_DISCHARGE][y,t]
+ @expression(EP,
+ eVreStorMaxDischargingAC[y in VS_ASYM_AC_DISCHARGE, t = 1:T],
+ JuMP.AffExpr())
+ for y in VS_ASYM_AC_DISCHARGE, t in 1:T
+ eVreStorMaxDischargingAC[y, t] += EP[:vP_AC_DISCHARGE][y, t]
end
end
if !isempty(VS_ASYM_AC_CHARGE)
- MAX_AC_CHARGE = intersect(ids_with_nonneg(gen_VRE_STOR, max_cap_charge_ac_mw), VS_ASYM_AC_CHARGE)
- MIN_AC_CHARGE = intersect(ids_with_positive(gen_VRE_STOR, min_cap_charge_ac_mw), VS_ASYM_AC_CHARGE)
+ MAX_AC_CHARGE = intersect(ids_with_nonneg(gen_VRE_STOR, max_cap_charge_ac_mw),
+ VS_ASYM_AC_CHARGE)
+ MIN_AC_CHARGE = intersect(ids_with_positive(gen_VRE_STOR, min_cap_charge_ac_mw),
+ VS_ASYM_AC_CHARGE)
### VARIABLES ###
@variables(EP, begin
@@ -1714,16 +1863,20 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
end)
if MultiStage == 1
- @variable(EP, vEXISTINGCAPCHARGEAC[y in VS_ASYM_AC_CHARGE] >= 0);
+ @variable(EP, vEXISTINGCAPCHARGEAC[y in VS_ASYM_AC_CHARGE]>=0)
end
### EXPRESSIONS ###
# 0. Multistage existing capacity definition
if MultiStage == 1
- @expression(EP, eExistingCapChargeAC[y in VS_ASYM_AC_CHARGE], vEXISTINGCAPCHARGEAC[y])
+ @expression(EP,
+ eExistingCapChargeAC[y in VS_ASYM_AC_CHARGE],
+ vEXISTINGCAPCHARGEAC[y])
else
- @expression(EP, eExistingCapChargeAC[y in VS_ASYM_AC_CHARGE], by_rid(y,:existing_cap_charge_ac_mw))
+ @expression(EP,
+ eExistingCapChargeAC[y in VS_ASYM_AC_CHARGE],
+ by_rid(y, :existing_cap_charge_ac_mw))
end
# 1. Total storage charge AC capacity
@@ -1736,25 +1889,26 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
eExistingCapChargeAC[y] - EP[:vRETCAPCHARGE_AC][y]
else
eExistingCapChargeAC[y]
- end
- )
+ end)
# 2. Objective Function Additions
# If resource is not eligible for new charge AC capacity, fixed costs are only O&M costs
@expression(EP, eCFixCharge_AC[y in VS_ASYM_AC_CHARGE],
if y in NEW_CAP_CHARGE_AC # Resources eligible for new charge AC capacity
- by_rid(y,:inv_cost_charge_ac_per_mwyr)*vCAPCHARGE_AC[y] + by_rid(y,:fixed_om_cost_charge_ac_per_mwyr)*eTotalCapCharge_AC[y]
+ by_rid(y, :inv_cost_charge_ac_per_mwyr) * vCAPCHARGE_AC[y] +
+ by_rid(y, :fixed_om_cost_charge_ac_per_mwyr) * eTotalCapCharge_AC[y]
else
- by_rid(y,:fixed_om_cost_charge_ac_per_mwyr)*eTotalCapCharge_AC[y]
- end
- )
-
+ by_rid(y, :fixed_om_cost_charge_ac_per_mwyr) * eTotalCapCharge_AC[y]
+ end)
+
# Sum individual resource contributions to fixed costs to get total fixed costs
- @expression(EP, eTotalCFixCharge_AC, sum(EP[:eCFixCharge_AC][y] for y in VS_ASYM_AC_CHARGE))
+ @expression(EP,
+ eTotalCFixCharge_AC,
+ sum(EP[:eCFixCharge_AC][y] for y in VS_ASYM_AC_CHARGE))
if MultiStage == 1
- EP[:eObj] += eTotalCFixCharge_AC/inputs["OPEXMULT"]
+ EP[:eObj] += eTotalCFixCharge_AC / inputs["OPEXMULT"]
else
EP[:eObj] += eTotalCFixCharge_AC
end
@@ -1763,23 +1917,33 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
# Constraint 0: Existing capacity variable is equal to existing capacity specified in the input file
if MultiStage == 1
- @constraint(EP, cExistingCapChargeAC[y in VS_ASYM_AC_CHARGE], EP[:vEXISTINGCAPCHARGEAC][y] == by_rid(y,:existing_cap_charge_ac_mw))
+ @constraint(EP,
+ cExistingCapChargeAC[y in VS_ASYM_AC_CHARGE],
+ EP[:vEXISTINGCAPCHARGEAC][y]==by_rid(y, :existing_cap_charge_ac_mw))
end
# Constraints 1: Retirements and capacity additions
# Cannot retire more charge AC capacity than existing charge capacity
- @constraint(EP, cVreStorMaxRetChargeAC[y in RET_CAP_CHARGE_AC], vRETCAPCHARGE_AC[y] <= eExistingCapChargeAC[y])
+ @constraint(EP,
+ cVreStorMaxRetChargeAC[y in RET_CAP_CHARGE_AC],
+ vRETCAPCHARGE_AC[y]<=eExistingCapChargeAC[y])
# Constraint on maximum charge AC capacity (if applicable) [set input to -1 if no constraint on maximum charge capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is >= Max_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cVreStorMaxCapChargeAC[y in MAX_AC_CHARGE], eTotalCapCharge_AC[y] <= by_rid(y,:max_cap_charge_ac_mw))
+ @constraint(EP,
+ cVreStorMaxCapChargeAC[y in MAX_AC_CHARGE],
+ eTotalCapCharge_AC[y]<=by_rid(y, :max_cap_charge_ac_mw))
# Constraint on minimum charge AC capacity (if applicable) [set input to -1 if no constraint on minimum charge capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is <= Min_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cVreStorMinCapChargeAC[y in MIN_AC_CHARGE], eTotalCapCharge_AC[y] >= by_rid(y,:min_cap_charge_ac_mw))
+ @constraint(EP,
+ cVreStorMinCapChargeAC[y in MIN_AC_CHARGE],
+ eTotalCapCharge_AC[y]>=by_rid(y, :min_cap_charge_ac_mw))
# Constraint 2: Maximum charging rate must be less than charge power rating
- @expression(EP, eVreStorMaxChargingAC[y in VS_ASYM_AC_CHARGE, t=1:T], JuMP.AffExpr())
- for y in VS_ASYM_AC_CHARGE, t=1:T
- eVreStorMaxChargingAC[y,t] += EP[:vP_AC_CHARGE][y,t]
+ @expression(EP,
+ eVreStorMaxChargingAC[y in VS_ASYM_AC_CHARGE, t = 1:T],
+ JuMP.AffExpr())
+ for y in VS_ASYM_AC_CHARGE, t in 1:T
+ eVreStorMaxChargingAC[y, t] += EP[:vP_AC_CHARGE][y, t]
end
end
end
@@ -1858,7 +2022,6 @@ All other constraints are identical to those used to track the actual state of c
state of charge, build up storage inventory and state of charge at the beginning of each period.
"""
function vre_stor_capres!(EP::Model, inputs::Dict, setup::Dict)
-
println("VRE-STOR Capacity Reserve Margin Module")
### LOAD DATA ###
@@ -1880,32 +2043,32 @@ function vre_stor_capres!(EP::Model, inputs::Dict, setup::Dict)
VS_LDS = inputs["VS_LDS"]
START_SUBPERIODS = inputs["START_SUBPERIODS"]
- INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
- hours_per_subperiod = inputs["hours_per_subperiod"] # total number of hours per subperiod
+ INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
+ hours_per_subperiod = inputs["hours_per_subperiod"] # total number of hours per subperiod
rep_periods = inputs["REP_PERIOD"]
virtual_discharge_cost = inputs["VirtualChargeDischargeCost"]
StorageVirtualDischarge = setup["StorageVirtualDischarge"]
-
+
by_rid(rid, sym) = by_rid_res(rid, sym, gen_VRE_STOR)
-
+
### VARIABLES ###
@variables(EP, begin
# Virtual DC discharge contributing to capacity reserves at timestep t for VRE-storage cluster y
- vCAPRES_DC_DISCHARGE[y in DC_DISCHARGE, t=1:T] >= 0
+ vCAPRES_DC_DISCHARGE[y in DC_DISCHARGE, t = 1:T] >= 0
# Virtual AC discharge contributing to capacity reserves at timestep t for VRE-storage cluster y
- vCAPRES_AC_DISCHARGE[y in AC_DISCHARGE, t=1:T] >= 0
+ vCAPRES_AC_DISCHARGE[y in AC_DISCHARGE, t = 1:T] >= 0
# Virtual DC charge contributing to capacity reserves at timestep t for VRE-storage cluster y
- vCAPRES_DC_CHARGE[y in DC_CHARGE, t=1:T] >= 0
+ vCAPRES_DC_CHARGE[y in DC_CHARGE, t = 1:T] >= 0
# Virtual AC charge contributing to capacity reserves at timestep t for VRE-storage cluster y
- vCAPRES_AC_CHARGE[y in AC_CHARGE, t=1:T] >= 0
+ vCAPRES_AC_CHARGE[y in AC_CHARGE, t = 1:T] >= 0
# Total state of charge being held in reserve at timestep t for VRE-storage cluster y
- vCAPRES_VS_VRE_STOR[y in STOR, t=1:T] >= 0
+ vCAPRES_VS_VRE_STOR[y in STOR, t = 1:T] >= 0
end)
### EXPRESSIONS ###
@@ -1921,11 +2084,13 @@ function vre_stor_capres!(EP::Model, inputs::Dict, setup::Dict)
# Virtual State of Charge Expressions
@expression(EP, eVreStorVSoCBalStart[y in CONSTRAINTSET, t in START_SUBPERIODS],
- EP[:vCAPRES_VS_VRE_STOR][y,t+hours_per_subperiod-1]
- - self_discharge(gen[y])*EP[:vCAPRES_VS_VRE_STOR][y,t+hours_per_subperiod-1])
+ EP[:vCAPRES_VS_VRE_STOR][y,
+ t + hours_per_subperiod - 1]
+ -self_discharge(gen[y]) * EP[:vCAPRES_VS_VRE_STOR][y, t + hours_per_subperiod - 1])
@expression(EP, eVreStorVSoCBalInterior[y in STOR, t in INTERIOR_SUBPERIODS],
- EP[:vCAPRES_VS_VRE_STOR][y,t-1]
- - self_discharge(gen[y])*EP[:vCAPRES_VS_VRE_STOR][y,t-1])
+ EP[:vCAPRES_VS_VRE_STOR][y,
+ t - 1]
+ -self_discharge(gen[y]) * EP[:vCAPRES_VS_VRE_STOR][y, t - 1])
DC_DISCHARGE_CONSTRAINTSET = intersect(CONSTRAINTSET, DC_DISCHARGE)
DC_CHARGE_CONSTRAINTSET = intersect(CONSTRAINTSET, DC_CHARGE)
@@ -1933,132 +2098,187 @@ function vre_stor_capres!(EP::Model, inputs::Dict, setup::Dict)
AC_CHARGE_CONSTRAINTSET = intersect(CONSTRAINTSET, AC_CHARGE)
for t in START_SUBPERIODS
for y in DC_DISCHARGE_CONSTRAINTSET
- eVreStorVSoCBalStart[y,t] += EP[:vCAPRES_DC_DISCHARGE][y,t]/by_rid(y,:eff_down_dc)
+ eVreStorVSoCBalStart[y, t] += EP[:vCAPRES_DC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_dc)
end
for y in DC_CHARGE_CONSTRAINTSET
- eVreStorVSoCBalStart[y,t] -= by_rid(y,:eff_up_dc)*EP[:vCAPRES_DC_CHARGE][y,t]
+ eVreStorVSoCBalStart[y, t] -= by_rid(y, :eff_up_dc) *
+ EP[:vCAPRES_DC_CHARGE][y, t]
end
for y in AC_DISCHARGE_CONSTRAINTSET
- eVreStorVSoCBalStart[y,t] += EP[:vCAPRES_AC_DISCHARGE][y,t]/by_rid(y,:eff_down_ac)
+ eVreStorVSoCBalStart[y, t] += EP[:vCAPRES_AC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_ac)
end
for y in AC_CHARGE_CONSTRAINTSET
- eVreStorVSoCBalStart[y,t] -= by_rid(y,:eff_up_ac)*EP[:vCAPRES_AC_CHARGE][y,t]
+ eVreStorVSoCBalStart[y, t] -= by_rid(y, :eff_up_ac) *
+ EP[:vCAPRES_AC_CHARGE][y, t]
end
end
for t in INTERIOR_SUBPERIODS
for y in DC_DISCHARGE
- eVreStorVSoCBalInterior[y,t] += EP[:vCAPRES_DC_DISCHARGE][y,t]/by_rid(y,:eff_down_dc)
+ eVreStorVSoCBalInterior[y, t] += EP[:vCAPRES_DC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_dc)
end
for y in DC_CHARGE
- eVreStorVSoCBalInterior[y,t] -= by_rid(y,:eff_up_dc)*EP[:vCAPRES_DC_CHARGE][y,t]
+ eVreStorVSoCBalInterior[y, t] -= by_rid(y, :eff_up_dc) *
+ EP[:vCAPRES_DC_CHARGE][y, t]
end
for y in AC_DISCHARGE
- eVreStorVSoCBalInterior[y,t] += EP[:vCAPRES_AC_DISCHARGE][y,t]/by_rid(y,:eff_down_ac)
+ eVreStorVSoCBalInterior[y, t] += EP[:vCAPRES_AC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_ac)
end
for y in AC_CHARGE
- eVreStorVSoCBalInterior[y,t] -= by_rid(y,:eff_up_ac)*EP[:vCAPRES_AC_CHARGE][y,t]
+ eVreStorVSoCBalInterior[y, t] -= by_rid(y, :eff_up_ac) *
+ EP[:vCAPRES_AC_CHARGE][y, t]
end
end
# Inverter & grid connection export additions
- for t=1:T
+ for t in 1:T
for y in DC_DISCHARGE
- EP[:eInverterExport][y,t] += by_rid(y,:etainverter)*vCAPRES_DC_DISCHARGE[y,t]
- EP[:eGridExport][y,t] += by_rid(y,:etainverter)*vCAPRES_DC_DISCHARGE[y,t]
+ EP[:eInverterExport][y, t] += by_rid(y, :etainverter) *
+ vCAPRES_DC_DISCHARGE[y, t]
+ EP[:eGridExport][y, t] += by_rid(y, :etainverter) * vCAPRES_DC_DISCHARGE[y, t]
end
for y in DC_CHARGE
- EP[:eInverterExport][y,t] += vCAPRES_DC_CHARGE[y,t]/by_rid(y,:etainverter)
- EP[:eGridExport][y,t] += vCAPRES_DC_CHARGE[y,t]/by_rid(y,:etainverter)
+ EP[:eInverterExport][y, t] += vCAPRES_DC_CHARGE[y, t] / by_rid(y, :etainverter)
+ EP[:eGridExport][y, t] += vCAPRES_DC_CHARGE[y, t] / by_rid(y, :etainverter)
end
for y in AC_DISCHARGE
- EP[:eGridExport][y,t] += vCAPRES_AC_DISCHARGE[y,t]
+ EP[:eGridExport][y, t] += vCAPRES_AC_DISCHARGE[y, t]
end
for y in AC_CHARGE
- EP[:eGridExport][y,t] += vCAPRES_AC_CHARGE[y,t]
+ EP[:eGridExport][y, t] += vCAPRES_AC_CHARGE[y, t]
end
-
- # Asymmetric and symmetric storage contributions
+
+ # Asymmetric and symmetric storage contributions
for y in VS_ASYM_DC_DISCHARGE
- EP[:eVreStorMaxDischargingDC][y,t] += vCAPRES_DC_DISCHARGE[y,t]
+ EP[:eVreStorMaxDischargingDC][y, t] += vCAPRES_DC_DISCHARGE[y, t]
end
for y in VS_ASYM_AC_DISCHARGE
- EP[:eVreStorMaxDischargingAC][y,t] += vCAPRES_AC_DISCHARGE[y,t]
+ EP[:eVreStorMaxDischargingAC][y, t] += vCAPRES_AC_DISCHARGE[y, t]
end
for y in VS_ASYM_DC_CHARGE
- EP[:eVreStorMaxChargingDC][y,t] += vCAPRES_DC_CHARGE[y,t]
+ EP[:eVreStorMaxChargingDC][y, t] += vCAPRES_DC_CHARGE[y, t]
end
for y in VS_ASYM_AC_CHARGE
- EP[:eVreStorMaxChargingAC][y,t] += vCAPRES_AC_CHARGE[y,t]
+ EP[:eVreStorMaxChargingAC][y, t] += vCAPRES_AC_CHARGE[y, t]
end
for y in VS_SYM_DC
- EP[:eChargeDischargeMaxDC][y,t] += (vCAPRES_DC_DISCHARGE[y,t]
- + vCAPRES_DC_CHARGE[y,t])
+ EP[:eChargeDischargeMaxDC][y, t] += (vCAPRES_DC_DISCHARGE[y, t]
+ +
+ vCAPRES_DC_CHARGE[y, t])
end
for y in VS_SYM_AC
- EP[:eChargeDischargeMaxAC][y,t] += (vCAPRES_AC_DISCHARGE[y,t]
- + vCAPRES_AC_CHARGE[y,t])
+ EP[:eChargeDischargeMaxAC][y, t] += (vCAPRES_AC_DISCHARGE[y, t]
+ +
+ vCAPRES_AC_CHARGE[y, t])
end
end
### CONSTRAINTS ###
# Constraint 1: Links energy held in reserve in first time step with decisions in last time step of each subperiod
# We use a modified formulation of this constraint (cVSoCBalLongDurationStorageStart) when modeling multiple representative periods and long duration storage
- @constraint(EP, cVreStorVSoCBalStart[y in CONSTRAINTSET, t in START_SUBPERIODS],
- vCAPRES_VS_VRE_STOR[y,t] == eVreStorVSoCBalStart[y,t])
+ @constraint(EP, cVreStorVSoCBalStart[y in CONSTRAINTSET, t in START_SUBPERIODS],
+ vCAPRES_VS_VRE_STOR[y, t]==eVreStorVSoCBalStart[y, t])
# Energy held in reserve for the next hour
- @constraint(EP, cVreStorVSoCBalInterior[y in STOR, t in INTERIOR_SUBPERIODS],
- vCAPRES_VS_VRE_STOR[y,t] == eVreStorVSoCBalInterior[y,t])
+ @constraint(EP, cVreStorVSoCBalInterior[y in STOR, t in INTERIOR_SUBPERIODS],
+ vCAPRES_VS_VRE_STOR[y, t]==eVreStorVSoCBalInterior[y, t])
# Constraint 2: Energy held in reserve acts as a lower bound on the total energy held in storage
- @constraint(EP, cVreStorSOCMinCapRes[y in STOR, t=1:T], EP[:vS_VRE_STOR][y,t] >= vCAPRES_VS_VRE_STOR[y,t])
+ @constraint(EP,
+ cVreStorSOCMinCapRes[y in STOR, t = 1:T],
+ EP[:vS_VRE_STOR][y, t]>=vCAPRES_VS_VRE_STOR[y, t])
# Constraint 3: Add capacity reserve margin contributions from VRE-STOR resources to capacity reserve margin constraint
- @expression(EP, eCapResMarBalanceStor_VRE_STOR[res=1:inputs["NCapacityReserveMargin"], t=1:T],(
- sum(derating_factor(gen[y],tag=res)*by_rid(y,:etainverter)*inputs["pP_Max_Solar"][y,t]*EP[:eTotalCap_SOLAR][y] for y in inputs["VS_SOLAR"])
- + sum(derating_factor(gen[y],tag=res)*inputs["pP_Max_Wind"][y,t]*EP[:eTotalCap_WIND][y] for y in inputs["VS_WIND"])
- + sum(derating_factor(gen[y],tag=res)*by_rid(y,:etainverter)*(EP[:vP_DC_DISCHARGE][y,t]) for y in DC_DISCHARGE)
- + sum(derating_factor(gen[y],tag=res)*(EP[:vP_AC_DISCHARGE][y,t]) for y in AC_DISCHARGE)
- - sum(derating_factor(gen[y],tag=res)*(EP[:vP_DC_CHARGE][y,t])/by_rid(y,:etainverter) for y in DC_CHARGE)
- - sum(derating_factor(gen[y],tag=res)*(EP[:vP_AC_CHARGE][y,t]) for y in AC_CHARGE)))
+ @expression(EP,
+ eCapResMarBalanceStor_VRE_STOR[res = 1:inputs["NCapacityReserveMargin"], t = 1:T],
+ (sum(derating_factor(gen[y], tag = res) * by_rid(y, :etainverter) *
+ inputs["pP_Max_Solar"][y, t] * EP[:eTotalCap_SOLAR][y]
+ for y in inputs["VS_SOLAR"])
+ +
+ sum(derating_factor(gen[y], tag = res) * inputs["pP_Max_Wind"][y, t] *
+ EP[:eTotalCap_WIND][y] for y in inputs["VS_WIND"])
+ +
+ sum(derating_factor(gen[y], tag = res) * by_rid(y, :etainverter) *
+ (EP[:vP_DC_DISCHARGE][y, t]) for y in DC_DISCHARGE)
+ +
+ sum(derating_factor(gen[y], tag = res) * (EP[:vP_AC_DISCHARGE][y, t])
+ for y in AC_DISCHARGE)
+ -
+ sum(derating_factor(gen[y], tag = res) * (EP[:vP_DC_CHARGE][y, t]) /
+ by_rid(y, :etainverter)
+ for y in DC_CHARGE)
+ -sum(derating_factor(gen[y], tag = res) * (EP[:vP_AC_CHARGE][y, t])
+ for y in AC_CHARGE)))
if StorageVirtualDischarge > 0
- @expression(EP, eCapResMarBalanceStor_VRE_STOR_Virtual[res=1:inputs["NCapacityReserveMargin"], t=1:T],(
- sum(derating_factor(gen[y],tag=res)*by_rid(y,:etainverter)*(vCAPRES_DC_DISCHARGE[y,t]) for y in DC_DISCHARGE)
- + sum(derating_factor(gen[y],tag=res)*(vCAPRES_AC_DISCHARGE[y,t]) for y in AC_DISCHARGE)
- - sum(derating_factor(gen[y],tag=res)*(vCAPRES_DC_CHARGE[y,t])/by_rid(y,:etainverter) for y in DC_CHARGE)
- - sum(derating_factor(gen[y],tag=res)*(vCAPRES_AC_CHARGE[y,t]) for y in AC_CHARGE)))
- add_similar_to_expression!(eCapResMarBalanceStor_VRE_STOR,eCapResMarBalanceStor_VRE_STOR_Virtual)
+ @expression(EP,
+ eCapResMarBalanceStor_VRE_STOR_Virtual[
+ res = 1:inputs["NCapacityReserveMargin"],
+ t = 1:T],
+ (sum(derating_factor(gen[y], tag = res) * by_rid(y, :etainverter) *
+ (vCAPRES_DC_DISCHARGE[y, t]) for y in DC_DISCHARGE)
+ +
+ sum(derating_factor(gen[y], tag = res) * (vCAPRES_AC_DISCHARGE[y, t])
+ for y in AC_DISCHARGE)
+ -
+ sum(derating_factor(gen[y], tag = res) * (vCAPRES_DC_CHARGE[y, t]) /
+ by_rid(y, :etainverter)
+ for y in DC_CHARGE)
+ -sum(derating_factor(gen[y], tag = res) * (vCAPRES_AC_CHARGE[y, t])
+ for y in AC_CHARGE)))
+ add_similar_to_expression!(eCapResMarBalanceStor_VRE_STOR,
+ eCapResMarBalanceStor_VRE_STOR_Virtual)
end
EP[:eCapResMarBalance] += EP[:eCapResMarBalanceStor_VRE_STOR]
### OBJECTIVE FUNCTION ADDITIONS ###
#Variable costs of DC "virtual charging" for technologies "y" during hour "t" in zone "z"
- @expression(EP, eCVar_Charge_DC_virtual[y in DC_CHARGE,t=1:T],
- inputs["omega"][t]*virtual_discharge_cost*vCAPRES_DC_CHARGE[y,t]/by_rid(y,:etainverter))
- @expression(EP, eTotalCVar_Charge_DC_T_virtual[t=1:T], sum(eCVar_Charge_DC_virtual[y,t] for y in DC_CHARGE))
- @expression(EP, eTotalCVar_Charge_DC_virtual, sum(eTotalCVar_Charge_DC_T_virtual[t] for t in 1:T))
+ @expression(EP, eCVar_Charge_DC_virtual[y in DC_CHARGE, t = 1:T],
+ inputs["omega"][t] * virtual_discharge_cost *
+ vCAPRES_DC_CHARGE[y, t]/by_rid(y, :etainverter))
+ @expression(EP,
+ eTotalCVar_Charge_DC_T_virtual[t = 1:T],
+ sum(eCVar_Charge_DC_virtual[y, t] for y in DC_CHARGE))
+ @expression(EP,
+ eTotalCVar_Charge_DC_virtual,
+ sum(eTotalCVar_Charge_DC_T_virtual[t] for t in 1:T))
EP[:eObj] += eTotalCVar_Charge_DC_virtual
#Variable costs of DC "virtual discharging" for technologies "y" during hour "t" in zone "z"
- @expression(EP, eCVar_Discharge_DC_virtual[y in DC_DISCHARGE,t=1:T],
- inputs["omega"][t]*virtual_discharge_cost*by_rid(y,:etainverter)*vCAPRES_DC_DISCHARGE[y,t])
- @expression(EP, eTotalCVar_Discharge_DC_T_virtual[t=1:T], sum(eCVar_Discharge_DC_virtual[y,t] for y in DC_DISCHARGE))
- @expression(EP, eTotalCVar_Discharge_DC_virtual, sum(eTotalCVar_Discharge_DC_T_virtual[t] for t in 1:T))
+ @expression(EP, eCVar_Discharge_DC_virtual[y in DC_DISCHARGE, t = 1:T],
+ inputs["omega"][t]*virtual_discharge_cost*by_rid(y, :etainverter)*
+ vCAPRES_DC_DISCHARGE[y, t])
+ @expression(EP,
+ eTotalCVar_Discharge_DC_T_virtual[t = 1:T],
+ sum(eCVar_Discharge_DC_virtual[y, t] for y in DC_DISCHARGE))
+ @expression(EP,
+ eTotalCVar_Discharge_DC_virtual,
+ sum(eTotalCVar_Discharge_DC_T_virtual[t] for t in 1:T))
EP[:eObj] += eTotalCVar_Discharge_DC_virtual
#Variable costs of AC "virtual charging" for technologies "y" during hour "t" in zone "z"
- @expression(EP, eCVar_Charge_AC_virtual[y in AC_CHARGE,t=1:T],
- inputs["omega"][t]*virtual_discharge_cost*vCAPRES_AC_CHARGE[y,t])
- @expression(EP, eTotalCVar_Charge_AC_T_virtual[t=1:T], sum(eCVar_Charge_AC_virtual[y,t] for y in AC_CHARGE))
- @expression(EP, eTotalCVar_Charge_AC_virtual, sum(eTotalCVar_Charge_AC_T_virtual[t] for t in 1:T))
+ @expression(EP, eCVar_Charge_AC_virtual[y in AC_CHARGE, t = 1:T],
+ inputs["omega"][t]*virtual_discharge_cost*vCAPRES_AC_CHARGE[y, t])
+ @expression(EP,
+ eTotalCVar_Charge_AC_T_virtual[t = 1:T],
+ sum(eCVar_Charge_AC_virtual[y, t] for y in AC_CHARGE))
+ @expression(EP,
+ eTotalCVar_Charge_AC_virtual,
+ sum(eTotalCVar_Charge_AC_T_virtual[t] for t in 1:T))
EP[:eObj] += eTotalCVar_Charge_AC_virtual
#Variable costs of AC "virtual discharging" for technologies "y" during hour "t" in zone "z"
- @expression(EP, eCVar_Discharge_AC_virtual[y in AC_DISCHARGE,t=1:T],
- inputs["omega"][t]*virtual_discharge_cost*vCAPRES_AC_DISCHARGE[y,t])
- @expression(EP, eTotalCVar_Discharge_AC_T_virtual[t=1:T], sum(eCVar_Discharge_AC_virtual[y,t] for y in AC_DISCHARGE))
- @expression(EP, eTotalCVar_Discharge_AC_virtual, sum(eTotalCVar_Discharge_AC_T_virtual[t] for t in 1:T))
+ @expression(EP, eCVar_Discharge_AC_virtual[y in AC_DISCHARGE, t = 1:T],
+ inputs["omega"][t]*virtual_discharge_cost*vCAPRES_AC_DISCHARGE[y, t])
+ @expression(EP,
+ eTotalCVar_Discharge_AC_T_virtual[t = 1:T],
+ sum(eCVar_Discharge_AC_virtual[y, t] for y in AC_DISCHARGE))
+ @expression(EP,
+ eTotalCVar_Discharge_AC_virtual,
+ sum(eTotalCVar_Discharge_AC_T_virtual[t] for t in 1:T))
EP[:eObj] += eTotalCVar_Discharge_AC_virtual
### LONG DURATION ENERGY STORAGE CAPACITY RESERVE MARGIN MODULE ###
@@ -2070,63 +2290,86 @@ function vre_stor_capres!(EP::Model, inputs::Dict, setup::Dict)
dfPeriodMap = inputs["Period_Map"] # Dataframe that maps modeled periods to representative periods
NPeriods = size(inputs["Period_Map"])[1] # Number of modeled periods
MODELED_PERIODS_INDEX = 1:NPeriods
- REP_PERIODS_INDEX = MODELED_PERIODS_INDEX[dfPeriodMap[!,:Rep_Period] .== MODELED_PERIODS_INDEX]
+ REP_PERIODS_INDEX = MODELED_PERIODS_INDEX[dfPeriodMap[!, :Rep_Period] .== MODELED_PERIODS_INDEX]
### VARIABLES ###
- @variables(EP, begin
- # State of charge held in reserve for storage at beginning of each modeled period n
- vCAPCONTRSTOR_VSOCw_VRE_STOR[y in VS_LDS, n in MODELED_PERIODS_INDEX] >= 0
+ @variables(EP,
+ begin
+ # State of charge held in reserve for storage at beginning of each modeled period n
+ vCAPCONTRSTOR_VSOCw_VRE_STOR[y in VS_LDS, n in MODELED_PERIODS_INDEX] >= 0
- # Build up in storage inventory held in reserve over each representative period w (can be pos or neg)
- vCAPCONTRSTOR_VdSOC_VRE_STOR[y in VS_LDS, w=1:REP_PERIOD]
- end)
+ # Build up in storage inventory held in reserve over each representative period w (can be pos or neg)
+ vCAPCONTRSTOR_VdSOC_VRE_STOR[y in VS_LDS, w = 1:REP_PERIOD]
+ end)
### EXPRESSIONS ###
- @expression(EP, eVreStorVSoCBalLongDurationStorageStart[y in VS_LDS, w=1:REP_PERIOD],
- (1-self_discharge(gen[y]))*(EP[:vCAPRES_VS_VRE_STOR][y,hours_per_subperiod*w]-vCAPCONTRSTOR_VdSOC_VRE_STOR[y,w]))
-
+ @expression(EP,
+ eVreStorVSoCBalLongDurationStorageStart[y in VS_LDS, w = 1:REP_PERIOD],
+ (1 -
+ self_discharge(gen[y]))*(EP[:vCAPRES_VS_VRE_STOR][y, hours_per_subperiod * w] -
+ vCAPCONTRSTOR_VdSOC_VRE_STOR[y, w]))
+
DC_DISCHARGE_CONSTRAINTSET = intersect(DC_DISCHARGE, VS_LDS)
DC_CHARGE_CONSTRAINTSET = intersect(DC_CHARGE, VS_LDS)
AC_DISCHARGE_CONSTRAINTSET = intersect(AC_DISCHARGE, VS_LDS)
AC_CHARGE_CONSTRAINTSET = intersect(AC_CHARGE, VS_LDS)
- for w=1:REP_PERIOD
+ for w in 1:REP_PERIOD
for y in DC_DISCHARGE_CONSTRAINTSET
- eVreStorVSoCBalLongDurationStorageStart[y,w] += EP[:vCAPRES_DC_DISCHARGE][y,hours_per_subperiod*(w-1)+1]/by_rid(y,:eff_down_dc)
+ eVreStorVSoCBalLongDurationStorageStart[y, w] += EP[:vCAPRES_DC_DISCHARGE][
+ y,
+ hours_per_subperiod * (w - 1) + 1] / by_rid(y, :eff_down_dc)
end
for y in DC_CHARGE_CONSTRAINTSET
- eVreStorVSoCBalLongDurationStorageStart[y,w] -= by_rid(y,:eff_up_dc)*EP[:vCAPRES_DC_CHARGE][y,hours_per_subperiod*(w-1)+1]
+ eVreStorVSoCBalLongDurationStorageStart[y, w] -= by_rid(y, :eff_up_dc) *
+ EP[:vCAPRES_DC_CHARGE][y,
+ hours_per_subperiod * (w - 1) + 1]
end
for y in AC_DISCHARGE_CONSTRAINTSET
- eVreStorVSoCBalLongDurationStorageStart[y,w] += EP[:vCAPRES_AC_DISCHARGE][y,hours_per_subperiod*(w-1)+1]/by_rid(y,:eff_down_ac)
+ eVreStorVSoCBalLongDurationStorageStart[y, w] += EP[:vCAPRES_AC_DISCHARGE][
+ y,
+ hours_per_subperiod * (w - 1) + 1] / by_rid(y, :eff_down_ac)
end
for y in AC_CHARGE_CONSTRAINTSET
- eVreStorVSoCBalLongDurationStorageStart[y,w] -= by_rid(y,:eff_up_ac)*EP[:vCAPRES_AC_CHARGE][y,hours_per_subperiod*(w-1)+1]
+ eVreStorVSoCBalLongDurationStorageStart[y, w] -= by_rid(y, :eff_up_ac) *
+ EP[:vCAPRES_AC_CHARGE][y,
+ hours_per_subperiod * (w - 1) + 1]
end
end
### CONSTRAINTS ###
- # Constraint 1: Links last time step with first time step, ensuring position in hour 1 is within eligible change from final hour position
- # Modified initial virtual state of storage for long duration storage - initialize wth value carried over from last period
- # Alternative to cVSoCBalStart constraint which is included when modeling multiple representative periods and long duration storage
- # Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
- @constraint(EP, cVreStorVSoCBalLongDurationStorageStart[y in VS_LDS, w=1:REP_PERIOD],
- EP[:vCAPRES_VS_VRE_STOR][y,hours_per_subperiod*(w-1)+1] == eVreStorVSoCBalLongDurationStorageStart[y,w])
+ # Constraint 1: Links last time step with first time step, ensuring position in hour 1 is within eligible change from final hour position
+ # Modified initial virtual state of storage for long duration storage - initialize wth value carried over from last period
+ # Alternative to cVSoCBalStart constraint which is included when modeling multiple representative periods and long duration storage
+ # Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
+ @constraint(EP,
+ cVreStorVSoCBalLongDurationStorageStart[y in VS_LDS, w = 1:REP_PERIOD],
+ EP[:vCAPRES_VS_VRE_STOR][y,
+ hours_per_subperiod * (w - 1) + 1]==eVreStorVSoCBalLongDurationStorageStart[y, w])
# Constraint 2: Storage held in reserve at beginning of period w = storage at beginning of period w-1 + storage built up in period w (after n representative periods)
# Multiply storage build up term from prior period with corresponding weight
- @constraint(EP, cVreStorVSoCBalLongDurationStorage[y in VS_LDS, r in MODELED_PERIODS_INDEX],
- vCAPCONTRSTOR_VSOCw_VRE_STOR[y,mod1(r+1, NPeriods)] == vCAPCONTRSTOR_VSOCw_VRE_STOR[y,r] + vCAPCONTRSTOR_VdSOC_VRE_STOR[y,dfPeriodMap[r,:Rep_Period_Index]])
+ @constraint(EP,
+ cVreStorVSoCBalLongDurationStorage[y in VS_LDS, r in MODELED_PERIODS_INDEX],
+ vCAPCONTRSTOR_VSOCw_VRE_STOR[y,
+ mod1(r + 1, NPeriods)]==vCAPCONTRSTOR_VSOCw_VRE_STOR[y, r] +
+ vCAPCONTRSTOR_VdSOC_VRE_STOR[
+ y, dfPeriodMap[r, :Rep_Period_Index]])
# Constraint 3: Initial reserve storage level for representative periods must also adhere to sub-period storage inventory balance
# Initial storage = Final storage - change in storage inventory across representative period
- @constraint(EP, cVreStorVSoCBalLongDurationStorageSub[y in VS_LDS, r in REP_PERIODS_INDEX],
- vCAPCONTRSTOR_VSOCw_VRE_STOR[y,r] == EP[:vCAPRES_VS_VRE_STOR][y,hours_per_subperiod*dfPeriodMap[r,:Rep_Period_Index]] - vCAPCONTRSTOR_VdSOC_VRE_STOR[y,dfPeriodMap[r,:Rep_Period_Index]])
+ @constraint(EP,
+ cVreStorVSoCBalLongDurationStorageSub[y in VS_LDS, r in REP_PERIODS_INDEX],
+ vCAPCONTRSTOR_VSOCw_VRE_STOR[y,r]==EP[:vCAPRES_VS_VRE_STOR][y,
+ hours_per_subperiod * dfPeriodMap[r, :Rep_Period_Index]] -
+ vCAPCONTRSTOR_VdSOC_VRE_STOR[y, dfPeriodMap[r, :Rep_Period_Index]])
# Constraint 4: Energy held in reserve at the beginning of each modeled period acts as a lower bound on the total energy held in storage
- @constraint(EP, cSOCMinCapResLongDurationStorage[y in VS_LDS, r in MODELED_PERIODS_INDEX], EP[:vSOCw_VRE_STOR][y,r] >= vCAPCONTRSTOR_VSOCw_VRE_STOR[y,r])
+ @constraint(EP,
+ cSOCMinCapResLongDurationStorage[y in VS_LDS, r in MODELED_PERIODS_INDEX],
+ EP[:vSOCw_VRE_STOR][y, r]>=vCAPCONTRSTOR_VSOCw_VRE_STOR[y, r])
end
end
@@ -2212,15 +2455,14 @@ Lastly, if the co-located resource has a variable renewable energy component, th
```
"""
function vre_stor_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
-
println("VRE-STOR Operational Reserves Module")
### LOAD DATA & CREATE SETS ###
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
gen_VRE_STOR = gen.VreStorage
- T = inputs["T"]
+ T = inputs["T"]
VRE_STOR = inputs["VRE_STOR"]
STOR = inputs["VS_STOR"]
DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
@@ -2250,10 +2492,10 @@ function vre_stor_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
SOLAR_RSV = intersect(SOLAR, inputs["RSV"]) # Set of solar resources with RSV reserves
WIND_REG = intersect(WIND, inputs["REG"]) # Set of wind resources with REG reserves
WIND_RSV = intersect(WIND, inputs["RSV"]) # Set of wind resources with RSV reserves
-
- STOR_REG = intersect(STOR, inputs["REG"]) # Set of storage resources with REG reserves
- STOR_RSV = intersect(STOR, inputs["RSV"]) # Set of storage resources with RSV reserves
- STOR_REG_RSV_UNION = union(STOR_REG, STOR_RSV) # Set of storage resources with either or both REG and RSV reserves
+
+ STOR_REG = intersect(STOR, inputs["REG"]) # Set of storage resources with REG reserves
+ STOR_RSV = intersect(STOR, inputs["RSV"]) # Set of storage resources with RSV reserves
+ STOR_REG_RSV_UNION = union(STOR_REG, STOR_RSV) # Set of storage resources with either or both REG and RSV reserves
DC_DISCHARGE_REG = intersect(DC_DISCHARGE, STOR_REG) # Set of DC discharge resources with REG reserves
DC_DISCHARGE_RSV = intersect(DC_DISCHARGE, STOR_RSV) # Set of DC discharge resources with RSV reserves
AC_DISCHARGE_REG = intersect(AC_DISCHARGE, STOR_REG) # Set of AC discharge resources with REG reserves
@@ -2279,173 +2521,179 @@ function vre_stor_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
@variables(EP, begin
# Contribution to regulation (primary reserves), assumed to be symmetric (up & down directions equal)
- vREG_SOLAR[y in SOLAR_REG, t=1:T] >= 0
- vREG_WIND[y in WIND_REG, t=1:T] >= 0
- vREG_DC_Discharge[y in DC_DISCHARGE_REG, t=1:T] >= 0
- vREG_DC_Charge[y in DC_CHARGE_REG, t=1:T] >= 0
- vREG_AC_Discharge[y in AC_DISCHARGE_REG, t=1:T] >= 0
- vREG_AC_Charge[y in AC_CHARGE_REG, t=1:T] >= 0
+ vREG_SOLAR[y in SOLAR_REG, t = 1:T] >= 0
+ vREG_WIND[y in WIND_REG, t = 1:T] >= 0
+ vREG_DC_Discharge[y in DC_DISCHARGE_REG, t = 1:T] >= 0
+ vREG_DC_Charge[y in DC_CHARGE_REG, t = 1:T] >= 0
+ vREG_AC_Discharge[y in AC_DISCHARGE_REG, t = 1:T] >= 0
+ vREG_AC_Charge[y in AC_CHARGE_REG, t = 1:T] >= 0
# Contribution to operating reserves (secondary reserves or contingency reserves); only model upward reserve requirements
- vRSV_SOLAR[y in SOLAR_RSV, t=1:T] >= 0
- vRSV_WIND[y in WIND_RSV, t=1:T] >= 0
- vRSV_DC_Discharge[y in DC_DISCHARGE_RSV, t=1:T] >= 0
- vRSV_DC_Charge[y in DC_CHARGE_RSV, t=1:T] >= 0
- vRSV_AC_Discharge[y in AC_DISCHARGE_RSV, t=1:T] >= 0
- vRSV_AC_Charge[y in AC_CHARGE_RSV, t=1:T] >= 0
+ vRSV_SOLAR[y in SOLAR_RSV, t = 1:T] >= 0
+ vRSV_WIND[y in WIND_RSV, t = 1:T] >= 0
+ vRSV_DC_Discharge[y in DC_DISCHARGE_RSV, t = 1:T] >= 0
+ vRSV_DC_Charge[y in DC_CHARGE_RSV, t = 1:T] >= 0
+ vRSV_AC_Discharge[y in AC_DISCHARGE_RSV, t = 1:T] >= 0
+ vRSV_AC_Charge[y in AC_CHARGE_RSV, t = 1:T] >= 0
end)
### EXPRESSIONS ###
- @expression(EP, eVreStorRegOnlyBalance[y in VRE_STOR_REG, t=1:T], JuMP.AffExpr())
- @expression(EP, eVreStorRsvOnlyBalance[y in VRE_STOR_RSV, t=1:T], JuMP.AffExpr())
- @expression(EP, eDischargeDCMin[y in DC_DISCHARGE, t=1:T], JuMP.AffExpr())
- @expression(EP, eChargeDCMin[y in DC_CHARGE, t=1:T], JuMP.AffExpr())
- @expression(EP, eDischargeACMin[y in AC_DISCHARGE, t=1:T], JuMP.AffExpr())
- @expression(EP, eChargeACMin[y in AC_CHARGE, t=1:T], JuMP.AffExpr())
- @expression(EP, eChargeMax[y in STOR_REG_RSV_UNION, t=1:T], JuMP.AffExpr())
- @expression(EP, eDischargeMax[y in STOR_REG_RSV_UNION, t=1:T], JuMP.AffExpr())
+ @expression(EP, eVreStorRegOnlyBalance[y in VRE_STOR_REG, t = 1:T], JuMP.AffExpr())
+ @expression(EP, eVreStorRsvOnlyBalance[y in VRE_STOR_RSV, t = 1:T], JuMP.AffExpr())
+ @expression(EP, eDischargeDCMin[y in DC_DISCHARGE, t = 1:T], JuMP.AffExpr())
+ @expression(EP, eChargeDCMin[y in DC_CHARGE, t = 1:T], JuMP.AffExpr())
+ @expression(EP, eDischargeACMin[y in AC_DISCHARGE, t = 1:T], JuMP.AffExpr())
+ @expression(EP, eChargeACMin[y in AC_CHARGE, t = 1:T], JuMP.AffExpr())
+ @expression(EP, eChargeMax[y in STOR_REG_RSV_UNION, t = 1:T], JuMP.AffExpr())
+ @expression(EP, eDischargeMax[y in STOR_REG_RSV_UNION, t = 1:T], JuMP.AffExpr())
- for t=1:T
+ for t in 1:T
for y in DC_DISCHARGE
- eDischargeDCMin[y,t] += EP[:vP_DC_DISCHARGE][y,t]
- eDischargeMax[y,t] += EP[:vP_DC_DISCHARGE][y,t]/by_rid(y,:eff_down_dc)
+ eDischargeDCMin[y, t] += EP[:vP_DC_DISCHARGE][y, t]
+ eDischargeMax[y, t] += EP[:vP_DC_DISCHARGE][y, t] / by_rid(y, :eff_down_dc)
end
for y in DC_CHARGE
- eChargeDCMin[y,t] += EP[:vP_DC_CHARGE][y,t]
- eChargeMax[y,t] += by_rid(y,:eff_up_dc)*EP[:vP_DC_CHARGE][y,t]
+ eChargeDCMin[y, t] += EP[:vP_DC_CHARGE][y, t]
+ eChargeMax[y, t] += by_rid(y, :eff_up_dc) * EP[:vP_DC_CHARGE][y, t]
end
for y in AC_DISCHARGE
- eDischargeACMin[y,t] += EP[:vP_AC_DISCHARGE][y,t]
- eDischargeMax[y,t] += EP[:vP_AC_DISCHARGE][y,t]/by_rid(y,:eff_down_ac)
+ eDischargeACMin[y, t] += EP[:vP_AC_DISCHARGE][y, t]
+ eDischargeMax[y, t] += EP[:vP_AC_DISCHARGE][y, t] / by_rid(y, :eff_down_ac)
end
for y in AC_CHARGE
- eChargeACMin[y,t] += EP[:vP_AC_CHARGE][y,t]
- eChargeMax[y,t] += by_rid(y,:eff_up_ac)*EP[:vP_AC_CHARGE][y,t]
+ eChargeACMin[y, t] += EP[:vP_AC_CHARGE][y, t]
+ eChargeMax[y, t] += by_rid(y, :eff_up_ac) * EP[:vP_AC_CHARGE][y, t]
end
for y in SOLAR_REG
- eVreStorRegOnlyBalance[y,t] += by_rid(y,:etainverter)*vREG_SOLAR[y,t]
- EP[:eGridExport][y,t] += by_rid(y,:etainverter)*vREG_SOLAR[y,t]
- EP[:eInverterExport][y,t] += by_rid(y,:etainverter)*vREG_SOLAR[y,t]
- EP[:eSolarGenMaxS][y,t] += vREG_SOLAR[y,t]
+ eVreStorRegOnlyBalance[y, t] += by_rid(y, :etainverter) * vREG_SOLAR[y, t]
+ EP[:eGridExport][y, t] += by_rid(y, :etainverter) * vREG_SOLAR[y, t]
+ EP[:eInverterExport][y, t] += by_rid(y, :etainverter) * vREG_SOLAR[y, t]
+ EP[:eSolarGenMaxS][y, t] += vREG_SOLAR[y, t]
end
for y in SOLAR_RSV
- eVreStorRsvOnlyBalance[y,t] += by_rid(y,:etainverter)*vRSV_SOLAR[y,t]
- EP[:eGridExport][y,t] += by_rid(y,:etainverter)*vRSV_SOLAR[y,t]
- EP[:eInverterExport][y,t] += by_rid(y,:etainverter)*vRSV_SOLAR[y,t]
- EP[:eSolarGenMaxS][y,t] += vRSV_SOLAR[y,t]
+ eVreStorRsvOnlyBalance[y, t] += by_rid(y, :etainverter) * vRSV_SOLAR[y, t]
+ EP[:eGridExport][y, t] += by_rid(y, :etainverter) * vRSV_SOLAR[y, t]
+ EP[:eInverterExport][y, t] += by_rid(y, :etainverter) * vRSV_SOLAR[y, t]
+ EP[:eSolarGenMaxS][y, t] += vRSV_SOLAR[y, t]
end
for y in WIND_REG
- eVreStorRegOnlyBalance[y,t] += vREG_WIND[y,t]
- EP[:eGridExport][y,t] += vREG_WIND[y,t]
- EP[:eWindGenMaxW][y,t] += vREG_WIND[y,t]
+ eVreStorRegOnlyBalance[y, t] += vREG_WIND[y, t]
+ EP[:eGridExport][y, t] += vREG_WIND[y, t]
+ EP[:eWindGenMaxW][y, t] += vREG_WIND[y, t]
end
for y in WIND_RSV
- eVreStorRsvOnlyBalance[y,t] += vRSV_WIND[y,t]
- EP[:eGridExport][y,t] += vRSV_WIND[y,t]
- EP[:eWindGenMaxW][y,t] += vRSV_WIND[y,t]
+ eVreStorRsvOnlyBalance[y, t] += vRSV_WIND[y, t]
+ EP[:eGridExport][y, t] += vRSV_WIND[y, t]
+ EP[:eWindGenMaxW][y, t] += vRSV_WIND[y, t]
end
for y in DC_DISCHARGE_REG
- eVreStorRegOnlyBalance[y,t] += by_rid(y,:etainverter)*vREG_DC_Discharge[y,t]
- eDischargeDCMin[y,t] -= vREG_DC_Discharge[y,t]
- eDischargeMax[y,t] += EP[:vREG_DC_Discharge][y,t]/by_rid(y,:eff_down_dc)
- EP[:eGridExport][y,t] += by_rid(y,:etainverter)*vREG_DC_Discharge[y,t]
- EP[:eInverterExport][y,t] += by_rid(y,:etainverter)*vREG_DC_Discharge[y,t]
+ eVreStorRegOnlyBalance[y, t] += by_rid(y, :etainverter) *
+ vREG_DC_Discharge[y, t]
+ eDischargeDCMin[y, t] -= vREG_DC_Discharge[y, t]
+ eDischargeMax[y, t] += EP[:vREG_DC_Discharge][y, t] / by_rid(y, :eff_down_dc)
+ EP[:eGridExport][y, t] += by_rid(y, :etainverter) * vREG_DC_Discharge[y, t]
+ EP[:eInverterExport][y, t] += by_rid(y, :etainverter) * vREG_DC_Discharge[y, t]
end
for y in DC_DISCHARGE_RSV
- eVreStorRsvOnlyBalance[y,t] += by_rid(y,:etainverter)*vRSV_DC_Discharge[y,t]
- eDischargeMax[y,t] += EP[:vRSV_DC_Discharge][y,t]/by_rid(y,:eff_down_dc)
- EP[:eGridExport][y,t] += by_rid(y,:etainverter)*vRSV_DC_Discharge[y,t]
- EP[:eInverterExport][y,t] += by_rid(y,:etainverter)*vRSV_DC_Discharge[y,t]
+ eVreStorRsvOnlyBalance[y, t] += by_rid(y, :etainverter) *
+ vRSV_DC_Discharge[y, t]
+ eDischargeMax[y, t] += EP[:vRSV_DC_Discharge][y, t] / by_rid(y, :eff_down_dc)
+ EP[:eGridExport][y, t] += by_rid(y, :etainverter) * vRSV_DC_Discharge[y, t]
+ EP[:eInverterExport][y, t] += by_rid(y, :etainverter) * vRSV_DC_Discharge[y, t]
end
for y in DC_CHARGE_REG
- eVreStorRegOnlyBalance[y,t] += vREG_DC_Charge[y,t]/by_rid(y,:etainverter)
- eChargeDCMin[y,t] -= vREG_DC_Charge[y,t]
- eChargeMax[y,t] += by_rid(y,:eff_up_dc)*EP[:vREG_DC_Charge][y,t]
- EP[:eGridExport][y,t] += vREG_DC_Charge[y,t]/by_rid(y,:etainverter)
- EP[:eInverterExport][y,t] += vREG_DC_Charge[y,t]/by_rid(y,:etainverter)
+ eVreStorRegOnlyBalance[y, t] += vREG_DC_Charge[y, t] / by_rid(y, :etainverter)
+ eChargeDCMin[y, t] -= vREG_DC_Charge[y, t]
+ eChargeMax[y, t] += by_rid(y, :eff_up_dc) * EP[:vREG_DC_Charge][y, t]
+ EP[:eGridExport][y, t] += vREG_DC_Charge[y, t] / by_rid(y, :etainverter)
+ EP[:eInverterExport][y, t] += vREG_DC_Charge[y, t] / by_rid(y, :etainverter)
end
for y in DC_CHARGE_RSV
- eVreStorRsvOnlyBalance[y,t] += vRSV_DC_Charge[y,t]/by_rid(y,:etainverter)
- eChargeDCMin[y,t] -= vRSV_DC_Charge[y,t]
+ eVreStorRsvOnlyBalance[y, t] += vRSV_DC_Charge[y, t] / by_rid(y, :etainverter)
+ eChargeDCMin[y, t] -= vRSV_DC_Charge[y, t]
end
for y in AC_DISCHARGE_REG
- eVreStorRegOnlyBalance[y,t] += vREG_AC_Discharge[y,t]
- eDischargeACMin[y,t] -= vREG_AC_Discharge[y,t]
- eDischargeMax[y,t] += EP[:vREG_AC_Discharge][y,t]/by_rid(y,:eff_down_ac)
- EP[:eGridExport][y,t] += vREG_AC_Discharge[y,t]
+ eVreStorRegOnlyBalance[y, t] += vREG_AC_Discharge[y, t]
+ eDischargeACMin[y, t] -= vREG_AC_Discharge[y, t]
+ eDischargeMax[y, t] += EP[:vREG_AC_Discharge][y, t] / by_rid(y, :eff_down_ac)
+ EP[:eGridExport][y, t] += vREG_AC_Discharge[y, t]
end
for y in AC_DISCHARGE_RSV
- eVreStorRsvOnlyBalance[y,t] += vRSV_AC_Discharge[y,t]
- eDischargeMax[y,t] += EP[:vRSV_AC_Discharge][y,t]/by_rid(y,:eff_down_ac)
- EP[:eGridExport][y,t] += vRSV_AC_Discharge[y,t]
+ eVreStorRsvOnlyBalance[y, t] += vRSV_AC_Discharge[y, t]
+ eDischargeMax[y, t] += EP[:vRSV_AC_Discharge][y, t] / by_rid(y, :eff_down_ac)
+ EP[:eGridExport][y, t] += vRSV_AC_Discharge[y, t]
end
for y in AC_CHARGE_REG
- eVreStorRegOnlyBalance[y,t] += vREG_AC_Charge[y,t]
- eChargeACMin[y,t] -= vREG_AC_Charge[y,t]
- eChargeMax[y,t] += by_rid(y,:eff_down_ac)*EP[:vREG_AC_Charge][y,t]
- EP[:eGridExport][y,t] += vREG_AC_Charge[y,t]
+ eVreStorRegOnlyBalance[y, t] += vREG_AC_Charge[y, t]
+ eChargeACMin[y, t] -= vREG_AC_Charge[y, t]
+ eChargeMax[y, t] += by_rid(y, :eff_down_ac) * EP[:vREG_AC_Charge][y, t]
+ EP[:eGridExport][y, t] += vREG_AC_Charge[y, t]
end
for y in AC_CHARGE_RSV
- eVreStorRsvOnlyBalance[y,t] += vRSV_AC_Charge[y,t]
- eChargeACMin[y,t] -= vRSV_AC_Charge[y,t]
+ eVreStorRsvOnlyBalance[y, t] += vRSV_AC_Charge[y, t]
+ eChargeACMin[y, t] -= vRSV_AC_Charge[y, t]
end
for y in VS_SYM_DC_REG
- EP[:eChargeDischargeMaxDC][y,t] += (vREG_DC_Discharge[y,t]
- + vREG_DC_Charge[y,t])
+ EP[:eChargeDischargeMaxDC][y, t] += (vREG_DC_Discharge[y, t]
+ +
+ vREG_DC_Charge[y, t])
end
for y in VS_SYM_DC_RSV
- EP[:eChargeDischargeMaxDC][y,t] += vRSV_DC_Discharge[y,t]
+ EP[:eChargeDischargeMaxDC][y, t] += vRSV_DC_Discharge[y, t]
end
for y in VS_SYM_AC_REG
- EP[:eChargeDischargeMaxAC][y,t] += (vREG_AC_Discharge[y,t]
- + vREG_AC_Charge[y,t])
+ EP[:eChargeDischargeMaxAC][y, t] += (vREG_AC_Discharge[y, t]
+ +
+ vREG_AC_Charge[y, t])
end
for y in VS_SYM_AC_RSV
- EP[:eChargeDischargeMaxAC][y,t] += vRSV_AC_Discharge[y,t]
+ EP[:eChargeDischargeMaxAC][y, t] += vRSV_AC_Discharge[y, t]
end
for y in VS_ASYM_DC_DISCHARGE_REG
- EP[:eVreStorMaxDischargingDC][y,t] += vREG_DC_Discharge[y,t]
+ EP[:eVreStorMaxDischargingDC][y, t] += vREG_DC_Discharge[y, t]
end
for y in VS_ASYM_DC_DISCHARGE_RSV
- EP[:eVreStorMaxDischargingDC][y,t] += vRSV_DC_Discharge[y,t]
+ EP[:eVreStorMaxDischargingDC][y, t] += vRSV_DC_Discharge[y, t]
end
for y in VS_ASYM_DC_CHARGE_REG
- EP[:eVreStorMaxChargingDC][y,t] += vREG_DC_Charge[y,t]
+ EP[:eVreStorMaxChargingDC][y, t] += vREG_DC_Charge[y, t]
end
for y in VS_ASYM_AC_DISCHARGE_REG
- EP[:eVreStorMaxDischargingAC][y,t] += vREG_AC_Discharge[y,t]
+ EP[:eVreStorMaxDischargingAC][y, t] += vREG_AC_Discharge[y, t]
end
for y in VS_ASYM_AC_DISCHARGE_RSV
- EP[:eVreStorMaxDischargingAC][y,t] += vRSV_AC_Discharge[y,t]
+ EP[:eVreStorMaxDischargingAC][y, t] += vRSV_AC_Discharge[y, t]
end
for y in VS_ASYM_AC_CHARGE_REG
- EP[:eVreStorMaxChargingAC][y,t] += vREG_AC_Charge[y,t]
+ EP[:eVreStorMaxChargingAC][y, t] += vREG_AC_Charge[y, t]
end
end
if CapacityReserveMargin > 0
- for t=1:T
+ for t in 1:T
for y in DC_DISCHARGE
- eDischargeMax[y,t] += EP[:vCAPRES_DC_DISCHARGE][y,t]/by_rid(y,:eff_down_dc)
+ eDischargeMax[y, t] += EP[:vCAPRES_DC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_dc)
end
for y in AC_DISCHARGE
- eDischargeMax[y,t] += EP[:vCAPRES_AC_DISCHARGE][y,t]/by_rid(y,:eff_down_ac)
+ eDischargeMax[y, t] += EP[:vCAPRES_AC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_ac)
end
end
end
@@ -2454,88 +2702,124 @@ function vre_stor_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
# Frequency regulation and operating reserves for all co-located VRE-STOR resources
if !isempty(VRE_STOR_REG_RSV)
- @constraints(EP, begin
- # Maximum VRE-STOR contribution to reserves is a specified fraction of installed grid connection capacity
- [y in VRE_STOR_REG_RSV, t=1:T], EP[:vREG][y,t] <= reg_max(gen[y])*EP[:eTotalCap][y]
- [y in VRE_STOR_REG_RSV, t=1:T], EP[:vRSV][y,t] <= rsv_max(gen[y])*EP[:eTotalCap][y]
-
- # Actual contribution to regulation and reserves is sum of auxilary variables
- [y in VRE_STOR_REG_RSV, t=1:T], EP[:vREG][y,t] == eVreStorRegOnlyBalance[y,t]
- [y in VRE_STOR_REG_RSV, t=1:T], EP[:vRSV][y,t] == eVreStorRsvOnlyBalance[y,t]
- end)
+ @constraints(EP,
+ begin
+ # Maximum VRE-STOR contribution to reserves is a specified fraction of installed grid connection capacity
+ [y in VRE_STOR_REG_RSV, t = 1:T],
+ EP[:vREG][y, t] <= reg_max(gen[y]) * EP[:eTotalCap][y]
+ [y in VRE_STOR_REG_RSV, t = 1:T],
+ EP[:vRSV][y, t] <= rsv_max(gen[y]) * EP[:eTotalCap][y]
+
+ # Actual contribution to regulation and reserves is sum of auxilary variables
+ [y in VRE_STOR_REG_RSV, t = 1:T],
+ EP[:vREG][y, t] == eVreStorRegOnlyBalance[y, t]
+ [y in VRE_STOR_REG_RSV, t = 1:T],
+ EP[:vRSV][y, t] == eVreStorRsvOnlyBalance[y, t]
+ end)
end
if !isempty(VRE_STOR_REG_ONLY)
- @constraints(EP, begin
- # Maximum VRE-STOR contribution to reserves is a specified fraction of installed grid connection capacity
- [y in VRE_STOR_REG_ONLY, t=1:T], EP[:vREG][y,t] <= reg_max(gen[y])*EP[:eTotalCap][y]
-
- # Actual contribution to regulation is sum of auxilary variables
- [y in VRE_STOR_REG_ONLY, t=1:T], EP[:vREG][y,t] == eVreStorRegOnlyBalance[y,t]
- end)
+ @constraints(EP,
+ begin
+ # Maximum VRE-STOR contribution to reserves is a specified fraction of installed grid connection capacity
+ [y in VRE_STOR_REG_ONLY, t = 1:T],
+ EP[:vREG][y, t] <= reg_max(gen[y]) * EP[:eTotalCap][y]
+
+ # Actual contribution to regulation is sum of auxilary variables
+ [y in VRE_STOR_REG_ONLY, t = 1:T],
+ EP[:vREG][y, t] == eVreStorRegOnlyBalance[y, t]
+ end)
end
if !isempty(VRE_STOR_RSV_ONLY)
- @constraints(EP, begin
- # Maximum VRE-STOR contribution to reserves is a specified fraction of installed grid connection capacity
- [y in VRE_STOR_RSV_ONLY, t=1:T], EP[:vRSV][y,t] <= rsv_max(gen[y])*EP[:eTotalCap][y]
-
- # Actual contribution to reserves is sum of auxilary variables
- [y in VRE_STOR_RSV_ONLY, t=1:T], EP[:vRSV][y,t] == eVreStorRsvOnlyBalance[y,t]
- end)
+ @constraints(EP,
+ begin
+ # Maximum VRE-STOR contribution to reserves is a specified fraction of installed grid connection capacity
+ [y in VRE_STOR_RSV_ONLY, t = 1:T],
+ EP[:vRSV][y, t] <= rsv_max(gen[y]) * EP[:eTotalCap][y]
+
+ # Actual contribution to reserves is sum of auxilary variables
+ [y in VRE_STOR_RSV_ONLY, t = 1:T],
+ EP[:vRSV][y, t] == eVreStorRsvOnlyBalance[y, t]
+ end)
end
# Frequency regulation and operating reserves for VRE-STOR resources with a VRE component
if !isempty(SOLAR_REG)
- @constraints(EP, begin
- # Maximum generation and contribution to reserves up must be greater than zero
- [y in SOLAR_REG, t=1:T], EP[:vP_SOLAR][y,t] - EP[:vREG_SOLAR][y,t] >= 0
- end)
+ @constraints(EP,
+ begin
+ # Maximum generation and contribution to reserves up must be greater than zero
+ [y in SOLAR_REG, t = 1:T], EP[:vP_SOLAR][y, t] - EP[:vREG_SOLAR][y, t] >= 0
+ end)
end
if !isempty(WIND_REG)
- @constraints(EP, begin
- # Maximum generation and contribution to reserves up must be greater than zero
- [y in WIND_REG, t=1:T], EP[:vP_WIND][y,t] - EP[:vREG_WIND][y,t] >= 0
- end)
+ @constraints(EP,
+ begin
+ # Maximum generation and contribution to reserves up must be greater than zero
+ [y in WIND_REG, t = 1:T], EP[:vP_WIND][y, t] - EP[:vREG_WIND][y, t] >= 0
+ end)
end
# Frequency regulation and operating reserves for VRE-STOR resources with a storage component
if !isempty(STOR_REG_RSV_UNION)
- @constraints(EP, begin
- # Maximum DC charging rate plus contribution to reserves up must be greater than zero
- # Note: when charging, reducing charge rate is contributing to upwards reserve & regulation as it drops net demand
- [y in DC_CHARGE, t=1:T], eChargeDCMin[y,t] >= 0
-
- # Maximum AC charging rate plus contribution to reserves up must be greater than zero
- # Note: when charging, reducing charge rate is contributing to upwards reserve & regulation as it drops net demand
- [y in AC_CHARGE, t=1:T], eChargeACMin[y,t] >= 0
-
- # Maximum DC discharging rate and contribution to reserves down must be greater than zero
- # Note: when discharging, reducing discharge rate is contributing to downwards regulation as it drops net supply
- [y in DC_DISCHARGE, t=1:T], eDischargeDCMin[y,t] >= 0
-
- # Maximum AC discharging rate and contribution to reserves down must be greater than zero
- # Note: when discharging, reducing discharge rate is contributing to downwards regulation as it drops net supply
- [y in AC_DISCHARGE, t=1:T], eDischargeACMin[y,t] >= 0
-
- # Maximum charging rate plus contributions must be less than available storage capacity
- [y in STOR_REG_RSV_UNION, t=1:T], eChargeMax[y,t] <= EP[:eTotalCap_STOR][y]-EP[:vS_VRE_STOR][y, hoursbefore(p,t,1)]
-
- # Maximum discharging rate and contributions must be less than the available stored energy in prior period
- # wrapping from end of sample period to start of sample period for energy capacity constraint
- [y in STOR_REG_RSV_UNION, t=1:T], eDischargeMax[y,t] <= EP[:vS_VRE_STOR][y, hoursbefore(p,t,1)]
- end)
+ @constraints(EP,
+ begin
+ # Maximum DC charging rate plus contribution to reserves up must be greater than zero
+ # Note: when charging, reducing charge rate is contributing to upwards reserve & regulation as it drops net demand
+ [y in DC_CHARGE, t = 1:T], eChargeDCMin[y, t] >= 0
+
+ # Maximum AC charging rate plus contribution to reserves up must be greater than zero
+ # Note: when charging, reducing charge rate is contributing to upwards reserve & regulation as it drops net demand
+ [y in AC_CHARGE, t = 1:T], eChargeACMin[y, t] >= 0
+
+ # Maximum DC discharging rate and contribution to reserves down must be greater than zero
+ # Note: when discharging, reducing discharge rate is contributing to downwards regulation as it drops net supply
+ [y in DC_DISCHARGE, t = 1:T], eDischargeDCMin[y, t] >= 0
+
+ # Maximum AC discharging rate and contribution to reserves down must be greater than zero
+ # Note: when discharging, reducing discharge rate is contributing to downwards regulation as it drops net supply
+ [y in AC_DISCHARGE, t = 1:T], eDischargeACMin[y, t] >= 0
+
+ # Maximum charging rate plus contributions must be less than available storage capacity
+ [y in STOR_REG_RSV_UNION, t = 1:T],
+ eChargeMax[y, t] <=
+ EP[:eTotalCap_STOR][y] - EP[:vS_VRE_STOR][y, hoursbefore(p, t, 1)]
+
+ # Maximum discharging rate and contributions must be less than the available stored energy in prior period
+ # wrapping from end of sample period to start of sample period for energy capacity constraint
+ [y in STOR_REG_RSV_UNION, t = 1:T],
+ eDischargeMax[y, t] <= EP[:vS_VRE_STOR][y, hoursbefore(p, t, 1)]
+ end)
end
# Total system reserve constraints
- @expression(EP, eRegReqVreStor[t=1:T], inputs["pReg_Req_VRE"]*sum(inputs["pP_Max_Solar"][y,t]*EP[:eTotalCap_SOLAR][y]*by_rid(y, :etainverter) for y in SOLAR_REG)
- + inputs["pReg_Req_VRE"]*sum(inputs["pP_Max_Wind"][y,t]*EP[:eTotalCap_WIND][y] for y in WIND_REG))
- @expression(EP, eRsvReqVreStor[t=1:T], inputs["pRsv_Req_VRE"]*sum(inputs["pP_Max_Solar"][y,t]*EP[:eTotalCap_SOLAR][y]*by_rid(y, :etainverter) for y in SOLAR_RSV)
- + inputs["pRsv_Req_VRE"]*sum(inputs["pP_Max_Wind"][y,t]*EP[:eTotalCap_WIND][y] for y in WIND_RSV))
+ @expression(EP,
+ eRegReqVreStor[t = 1:T],
+ inputs["pReg_Req_VRE"] *
+ sum(inputs["pP_Max_Solar"][y, t] * EP[:eTotalCap_SOLAR][y] *
+ by_rid(y, :etainverter)
+ for y in SOLAR_REG)
+ +inputs["pReg_Req_VRE"] *
+ sum(inputs["pP_Max_Wind"][y, t] * EP[:eTotalCap_WIND][y] for y in WIND_REG))
+ @expression(EP,
+ eRsvReqVreStor[t = 1:T],
+ inputs["pRsv_Req_VRE"] *
+ sum(inputs["pP_Max_Solar"][y, t] * EP[:eTotalCap_SOLAR][y] *
+ by_rid(y, :etainverter)
+ for y in SOLAR_RSV)
+ +inputs["pRsv_Req_VRE"] *
+ sum(inputs["pP_Max_Wind"][y, t] * EP[:eTotalCap_WIND][y] for y in WIND_RSV))
if !isempty(VRE_STOR_REG)
- @constraint(EP, cRegVreStor[t=1:T], sum(EP[:vREG][y,t] for y in inputs["REG"]) >= EP[:eRegReq][t] + eRegReqVreStor[t])
+ @constraint(EP,
+ cRegVreStor[t = 1:T],
+ sum(EP[:vREG][y, t]
+ for y in inputs["REG"])>=EP[:eRegReq][t] +
+ eRegReqVreStor[t])
end
if !isempty(VRE_STOR_RSV)
- @constraint(EP, cRsvReqVreStor[t=1:T], sum(EP[:vRSV][y,t] for y in inputs["RSV"]) + EP[:vUNMET_RSV][t] >= EP[:eRsvReq][t] + eRsvReqVreStor[t])
+ @constraint(EP,
+ cRsvReqVreStor[t = 1:T],
+ sum(EP[:vRSV][y, t] for y in inputs["RSV"]) +
+ EP[:vUNMET_RSV][t]>=EP[:eRsvReq][t] + eRsvReqVreStor[t])
end
end
diff --git a/src/model/solve_model.jl b/src/model/solve_model.jl
index 6519a8c863..e3713bc67b 100644
--- a/src/model/solve_model.jl
+++ b/src/model/solve_model.jl
@@ -11,28 +11,28 @@ nothing (modifies an existing-solved model in the memory). `solve()` must be run
"""
function fix_integers(jump_model::Model)
- ################################################################################
- ## function fix_integers()
- ##
- ## inputs: jump_model - a model object containing that has been previously solved.
- ##
- ## description: fixes the iteger variables ones the model has been solved in order
- ## to calculate approximations of dual variables
- ##
- ## returns: no result since it modifies an existing-solved model in the memory.
- ## solve() must be run again to solve and getdual veriables
- ##
- ################################################################################
- values = Dict(v => value(v) for v in all_variables(jump_model))
- for v in all_variables(jump_model)
- if is_integer(v)
- fix(v,values[v],force=true)
- unset_integer(v)
+ ################################################################################
+ ## function fix_integers()
+ ##
+ ## inputs: jump_model - a model object containing that has been previously solved.
+ ##
+ ## description: fixes the iteger variables ones the model has been solved in order
+ ## to calculate approximations of dual variables
+ ##
+ ## returns: no result since it modifies an existing-solved model in the memory.
+ ## solve() must be run again to solve and getdual veriables
+ ##
+ ################################################################################
+ values = Dict(v => value(v) for v in all_variables(jump_model))
+ for v in all_variables(jump_model)
+ if is_integer(v)
+ fix(v, values[v], force = true)
+ unset_integer(v)
elseif is_binary(v)
- fix(v,values[v],force=true)
- unset_binary(v)
+ fix(v, values[v], force = true)
+ unset_binary(v)
end
- end
+ end
end
@doc raw"""
@@ -48,62 +48,58 @@ Description: Solves and extracts solution variables for later processing
- `solver_time::Float64`: time taken to solve the model
"""
function solve_model(EP::Model, setup::Dict)
- ## Start solve timer
- solver_start_time = time()
- solver_time = time()
-
- ## Solve Model
- optimize!(EP)
-
- if has_values(EP)
-
- if has_duals(EP) # fully linear model
- println("LP solved for primal")
- else
- println("MILP solved for primal")
- end
-
- ## Record solver time
- solver_time = time() - solver_start_time
- elseif setup["ComputeConflicts"]==0
-
- @info "No model solution. You can try to set ComputeConflicts to 1 in the genx_settings.yml file to compute conflicting constraints."
-
- elseif setup["ComputeConflicts"]==1
-
- @info "No model solution. Trying to identify conflicting constriants..."
+ ## Start solve timer
+ solver_start_time = time()
+ solver_time = time()
+
+ ## Solve Model
+ optimize!(EP)
+
+ if has_values(EP)
+ if has_duals(EP) # fully linear model
+ println("LP solved for primal")
+ else
+ println("MILP solved for primal")
+ end
- try
- compute_conflict!(EP)
- catch e
- if isa(e, JuMP.ArgumentError)
- @warn "$(solver_name(EP)) does not support computing conflicting constraints. This is available using either Gurobi or CPLEX."
- solver_time = time() - solver_start_time
- return EP, solver_time
- else
- rethrow(e)
- end
- end
+ ## Record solver time
+ solver_time = time() - solver_start_time
+ elseif setup["ComputeConflicts"] == 0
+ @info "No model solution. You can try to set ComputeConflicts to 1 in the genx_settings.yml file to compute conflicting constraints."
+
+ elseif setup["ComputeConflicts"] == 1
+ @info "No model solution. Trying to identify conflicting constriants..."
+
+ try
+ compute_conflict!(EP)
+ catch e
+ if isa(e, JuMP.ArgumentError)
+ @warn "$(solver_name(EP)) does not support computing conflicting constraints. This is available using either Gurobi or CPLEX."
+ solver_time = time() - solver_start_time
+ return EP, solver_time
+ else
+ rethrow(e)
+ end
+ end
- list_of_conflicting_constraints = ConstraintRef[]
- if get_attribute(EP, MOI.ConflictStatus()) == MOI.CONFLICT_FOUND
- for (F, S) in list_of_constraint_types(EP)
- for con in all_constraints(EP, F, S)
- if get_attribute(con, MOI.ConstraintConflictStatus()) == MOI.IN_CONFLICT
- push!(list_of_conflicting_constraints, con)
- end
- end
- end
- display(list_of_conflicting_constraints)
- solver_time = time() - solver_start_time
- return EP, solver_time, list_of_conflicting_constraints
- else
- @info "Conflicts computation failed."
- solver_time = time() - solver_start_time
- return EP, solver_time, list_of_conflicting_constraints
- end
+ list_of_conflicting_constraints = ConstraintRef[]
+ if get_attribute(EP, MOI.ConflictStatus()) == MOI.CONFLICT_FOUND
+ for (F, S) in list_of_constraint_types(EP)
+ for con in all_constraints(EP, F, S)
+ if get_attribute(con, MOI.ConstraintConflictStatus()) == MOI.IN_CONFLICT
+ push!(list_of_conflicting_constraints, con)
+ end
+ end
+ end
+ display(list_of_conflicting_constraints)
+ solver_time = time() - solver_start_time
+ return EP, solver_time, list_of_conflicting_constraints
+ else
+ @info "Conflicts computation failed."
+ solver_time = time() - solver_start_time
+ return EP, solver_time, list_of_conflicting_constraints
+ end
+ end
- end
-
- return EP, solver_time
-end # END solve_model()
\ No newline at end of file
+ return EP, solver_time
+end # END solve_model()
diff --git a/src/model/utility.jl b/src/model/utility.jl
index 22e7329e9b..15e6841958 100644
--- a/src/model/utility.jl
+++ b/src/model/utility.jl
@@ -11,8 +11,8 @@ For example, if p = 10,
1 hour before t=11 is t=20
"""
function hoursbefore(p::Int, t::Int, b::Int)::Int
- period = div(t - 1, p)
- return period * p + mod1(t - b, p)
+ period = div(t - 1, p)
+ return period * p + mod1(t - b, p)
end
@doc raw"""
@@ -23,11 +23,10 @@ to allow for example b=1:3 to fetch a Vector{Int} of the three hours before
time index t.
"""
function hoursbefore(p::Int, t::Int, b::UnitRange{Int})::Vector{Int}
- period = div(t - 1, p)
- return period * p .+ mod1.(t .- b, p)
+ period = div(t - 1, p)
+ return period * p .+ mod1.(t .- b, p)
end
-
@doc raw"""
hoursafter(p::Int, t::Int, a::Int)
@@ -55,7 +54,6 @@ time index t.
function hoursafter(p::Int, t::Int, a::UnitRange{Int})::Vector{Int}
period = div(t - 1, p)
return period * p .+ mod1.(t .+ a, p)
-
end
@doc raw"""
@@ -64,7 +62,7 @@ end
This function checks if a column in a dataframe is all zeros.
"""
function is_nonzero(df::DataFrame, col::Symbol)::BitVector
- convert(BitVector, df[!, col] .> 0)::BitVector
+ convert(BitVector, df[!, col] .> 0)::BitVector
end
function is_nonzero(rs::Vector{<:AbstractResource}, col::Symbol)
@@ -82,4 +80,3 @@ function by_rid_res(rid::Integer, sym::Symbol, rs::Vector{<:AbstractResource})
f = isdefined(GenX, sym) ? getfield(GenX, sym) : x -> getproperty(x, sym)
return f(r)
end
-
diff --git a/src/multi_stage/configure_multi_stage_inputs.jl b/src/multi_stage/configure_multi_stage_inputs.jl
index 870d98cdca..f74d379ed3 100644
--- a/src/multi_stage/configure_multi_stage_inputs.jl
+++ b/src/multi_stage/configure_multi_stage_inputs.jl
@@ -21,29 +21,42 @@ NOTE: The inv\_costs\_yr and crp arrays must be the same length; values with the
returns: array object containing overnight capital costs, the discounted sum of annual investment costs incured within the model horizon.
"""
-function compute_overnight_capital_cost(settings_d::Dict,inv_costs_yr::Array,crp::Array, tech_wacc::Array)
-
- cur_stage = settings_d["CurStage"] # Current model
- num_stages = settings_d["NumStages"] # Total number of model stages
- stage_lens = settings_d["StageLengths"]
-
- # 1) For each resource, find the minimum of the capital recovery period and the end of the model horizon
- # Total time between the end of the final model stage and the start of the current stage
- model_yrs_remaining = sum(stage_lens[cur_stage:end])
-
- # We will sum annualized costs through the full capital recovery period or the end of planning horizon, whichever comes first
- payment_yrs_remaining = min.(crp, model_yrs_remaining)
+function compute_overnight_capital_cost(settings_d::Dict,
+ inv_costs_yr::Array,
+ crp::Array,
+ tech_wacc::Array)
+
+ # Check for resources with non-zero investment costs and a Capital_Recovery_Period value of 0 years
+ if any((crp .== 0) .& (inv_costs_yr .> 0))
+ msg = "You have some resources with non-zero investment costs and a Capital_Recovery_Period value of 0 years.\n" *
+ "These resources will have a calculated overnight capital cost of \$0. Correct your inputs if this is a mistake.\n"
+ error(msg)
+ end
- # KEY ASSUMPTION: Investment costs after the planning horizon are fully recoverable, so we don't need to include these costs
- # 2) Compute the present value of investment associated with capital recovery period within the model horizon - discounting to year 1 and not year 0
- # (Factor to adjust discounting to year 0 for capital cost is included in the discounting coefficient applied to all terms in the objective function value.)
- occ = zeros(length(inv_costs_yr))
- for i in 1:length(occ)
- occ[i] = sum(inv_costs_yr[i]/(1+tech_wacc[i]) .^ (p) for p=1:payment_yrs_remaining[i])
- end
+ cur_stage = settings_d["CurStage"] # Current model
+ num_stages = settings_d["NumStages"] # Total number of model stages
+ stage_lens = settings_d["StageLengths"]
+
+ # 1) For each resource, find the minimum of the capital recovery period and the end of the model horizon
+ # Total time between the end of the final model stage and the start of the current stage
+ model_yrs_remaining = sum(stage_lens[cur_stage:end]; init = 0)
+
+ # We will sum annualized costs through the full capital recovery period or the end of planning horizon, whichever comes first
+ payment_yrs_remaining = min.(crp, model_yrs_remaining)
+
+ # KEY ASSUMPTION: Investment costs after the planning horizon are fully recoverable, so we don't need to include these costs
+ # 2) Compute the present value of investment associated with capital recovery period within the model horizon - discounting to year 1 and not year 0
+ # (Factor to adjust discounting to year 0 for capital cost is included in the discounting coefficient applied to all terms in the objective function value.)
+ occ = zeros(length(inv_costs_yr))
+ for i in 1:length(occ)
+ occ[i] = sum(
+ inv_costs_yr[i] / (1 + tech_wacc[i]) .^ (p)
+ for p in 1:payment_yrs_remaining[i];
+ init = 0)
+ end
- # 3) Return the overnight capital cost (discounted sum of annual investment costs incured within the model horizon)
- return occ
+ # 3) Return the overnight capital cost (discounted sum of annual investment costs incured within the model horizon)
+ return occ
end
@doc raw"""
@@ -67,92 +80,181 @@ inputs:
returns: dictionary containing updated model inputs, to be used in the generate\_model() method.
"""
-function configure_multi_stage_inputs(inputs_d::Dict, settings_d::Dict, NetworkExpansion::Int64)
-
+function configure_multi_stage_inputs(inputs_d::Dict,
+ settings_d::Dict,
+ NetworkExpansion::Int64)
gen = inputs_d["RESOURCES"]
- # Parameter inputs when multi-year discounting is activated
- cur_stage = settings_d["CurStage"]
- stage_len = settings_d["StageLengths"][cur_stage]
- wacc = settings_d["WACC"] # Interest Rate and also the discount rate unless specified other wise
- myopic = settings_d["Myopic"] == 1 # 1 if myopic (only one forward pass), 0 if full DDP
-
- # Define OPEXMULT here, include in inputs_dict[t] for use in dual_dynamic_programming.jl, transmission_multi_stage.jl, and investment_multi_stage.jl
- OPEXMULT = myopic ? 1 : sum([1/(1+wacc)^(i-1) for i in range(1,stop=stage_len)])
- inputs_d["OPEXMULT"] = OPEXMULT
-
- if !myopic ### Leave myopic costs in annualized form and do not scale OPEX costs
- # 1. Convert annualized investment costs incured within the model horizon into overnight capital costs
- # NOTE: Although the "yr" suffix is still in use in these parameter names, they no longer represent annualized costs but rather truncated overnight capital costs
- gen.inv_cost_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_per_mwyr.(gen), capital_recovery_period.(gen), tech_wacc.(gen))
- gen.inv_cost_per_mwhyr = compute_overnight_capital_cost(settings_d, inv_cost_per_mwhyr.(gen), capital_recovery_period.(gen), tech_wacc.(gen))
- gen.inv_cost_charge_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_charge_per_mwyr.(gen), capital_recovery_period.(gen), tech_wacc.(gen))
-
- # 2. Update fixed O&M costs to account for the possibility of more than 1 year between two model stages
- # NOTE: Although the "yr" suffix is still in use in these parameter names, they now represent total costs incured in each stage, which may be multiple years
- gen.fixed_om_cost_per_mwyr = fixed_om_cost_per_mwyr.(gen) .* OPEXMULT
- gen.fixed_om_cost_per_mwhyr = fixed_om_cost_per_mwhyr.(gen) .* OPEXMULT
- gen.fixed_om_cost_charge_per_mwyr = fixed_om_cost_charge_per_mwyr.(gen) .* OPEXMULT
-
- # Conduct 1. and 2. for any co-located VRE-STOR resources
- if !isempty(inputs_d["VRE_STOR"])
- gen_VRE_STOR = gen.VreStorage
- gen_VRE_STOR.inv_cost_inverter_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_inverter_per_mwyr.(gen_VRE_STOR), capital_recovery_period_dc.(gen_VRE_STOR), tech_wacc_dc.(gen_VRE_STOR))
- gen_VRE_STOR.inv_cost_solar_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_solar_per_mwyr.(gen_VRE_STOR), capital_recovery_period_solar.(gen_VRE_STOR), tech_wacc_solar.(gen_VRE_STOR))
- gen_VRE_STOR.inv_cost_wind_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_wind_per_mwyr.(gen_VRE_STOR), capital_recovery_period_wind.(gen_VRE_STOR), tech_wacc_wind.(gen_VRE_STOR))
- gen_VRE_STOR.inv_cost_discharge_dc_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_discharge_dc_per_mwyr.(gen_VRE_STOR), capital_recovery_period_discharge_dc.(gen_VRE_STOR), tech_wacc_discharge_dc.(gen_VRE_STOR))
- gen_VRE_STOR.inv_cost_charge_dc_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_charge_dc_per_mwyr.(gen_VRE_STOR), capital_recovery_period_charge_dc.(gen_VRE_STOR), tech_wacc_charge_dc.(gen_VRE_STOR))
- gen_VRE_STOR.inv_cost_discharge_ac_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_discharge_ac_per_mwyr.(gen_VRE_STOR), capital_recovery_period_discharge_ac.(gen_VRE_STOR), tech_wacc_discharge_ac.(gen_VRE_STOR))
- gen_VRE_STOR.inv_cost_charge_ac_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_charge_ac_per_mwyr.(gen_VRE_STOR), capital_recovery_period_charge_ac.(gen_VRE_STOR), tech_wacc_charge_ac.(gen_VRE_STOR))
-
- gen_VRE_STOR.fixed_om_inverter_cost_per_mwyr = fixed_om_inverter_cost_per_mwyr.(gen_VRE_STOR) .* OPEXMULT
- gen_VRE_STOR.fixed_om_solar_cost_per_mwyr = fixed_om_solar_cost_per_mwyr.(gen_VRE_STOR) .* OPEXMULT
- gen_VRE_STOR.fixed_om_wind_cost_per_mwyr = fixed_om_wind_cost_per_mwyr.(gen_VRE_STOR) .* OPEXMULT
- gen_VRE_STOR.fixed_om_cost_discharge_dc_per_mwyr = fixed_om_cost_discharge_dc_per_mwyr.(gen_VRE_STOR) .* OPEXMULT
- gen_VRE_STOR.fixed_om_cost_charge_dc_per_mwyr = fixed_om_cost_charge_dc_per_mwyr.(gen_VRE_STOR) .* OPEXMULT
- gen_VRE_STOR.fixed_om_cost_discharge_ac_per_mwyr = fixed_om_cost_discharge_ac_per_mwyr.(gen_VRE_STOR) .* OPEXMULT
- gen_VRE_STOR.fixed_om_cost_charge_ac_per_mwyr = fixed_om_cost_charge_ac_per_mwyr.(gen_VRE_STOR) .* OPEXMULT
- end
- end
+ # Parameter inputs when multi-year discounting is activated
+ cur_stage = settings_d["CurStage"]
+ stage_len = settings_d["StageLengths"][cur_stage]
+ wacc = settings_d["WACC"] # Interest Rate and also the discount rate unless specified other wise
+ myopic = settings_d["Myopic"] == 1 # 1 if myopic (only one forward pass), 0 if full DDP
+
+ # Define OPEXMULT here, include in inputs_dict[t] for use in dual_dynamic_programming.jl, transmission_multi_stage.jl, and investment_multi_stage.jl
+ OPEXMULT = myopic ? 1 :
+ sum([1 / (1 + wacc)^(i - 1) for i in range(1, stop = stage_len)])
+ inputs_d["OPEXMULT"] = OPEXMULT
+
+ if !myopic ### Leave myopic costs in annualized form and do not scale OPEX costs
+ # 1. Convert annualized investment costs incured within the model horizon into overnight capital costs
+ # NOTE: Although the "yr" suffix is still in use in these parameter names, they no longer represent annualized costs but rather truncated overnight capital costs
+ gen.inv_cost_per_mwyr = compute_overnight_capital_cost(settings_d,
+ inv_cost_per_mwyr.(gen),
+ capital_recovery_period.(gen),
+ tech_wacc.(gen))
+ gen.inv_cost_per_mwhyr = compute_overnight_capital_cost(settings_d,
+ inv_cost_per_mwhyr.(gen),
+ capital_recovery_period.(gen),
+ tech_wacc.(gen))
+ gen.inv_cost_charge_per_mwyr = compute_overnight_capital_cost(settings_d,
+ inv_cost_charge_per_mwyr.(gen),
+ capital_recovery_period.(gen),
+ tech_wacc.(gen))
+
+ # 2. Update fixed O&M costs to account for the possibility of more than 1 year between two model stages
+ # NOTE: Although the "yr" suffix is still in use in these parameter names, they now represent total costs incured in each stage, which may be multiple years
+ gen.fixed_om_cost_per_mwyr = fixed_om_cost_per_mwyr.(gen) .* OPEXMULT
+ gen.fixed_om_cost_per_mwhyr = fixed_om_cost_per_mwhyr.(gen) .* OPEXMULT
+ gen.fixed_om_cost_charge_per_mwyr = fixed_om_cost_charge_per_mwyr.(gen) .* OPEXMULT
+
+ # Conduct 1. and 2. for any co-located VRE-STOR resources
+ if !isempty(inputs_d["VRE_STOR"])
+ gen_VRE_STOR = gen.VreStorage
+ gen_VRE_STOR.inv_cost_inverter_per_mwyr = compute_overnight_capital_cost(
+ settings_d,
+ inv_cost_inverter_per_mwyr.(gen_VRE_STOR),
+ capital_recovery_period_dc.(gen_VRE_STOR),
+ tech_wacc_dc.(gen_VRE_STOR))
+ gen_VRE_STOR.inv_cost_solar_per_mwyr = compute_overnight_capital_cost(
+ settings_d,
+ inv_cost_solar_per_mwyr.(gen_VRE_STOR),
+ capital_recovery_period_solar.(gen_VRE_STOR),
+ tech_wacc_solar.(gen_VRE_STOR))
+ gen_VRE_STOR.inv_cost_wind_per_mwyr = compute_overnight_capital_cost(
+ settings_d,
+ inv_cost_wind_per_mwyr.(gen_VRE_STOR),
+ capital_recovery_period_wind.(gen_VRE_STOR),
+ tech_wacc_wind.(gen_VRE_STOR))
+ gen_VRE_STOR.inv_cost_discharge_dc_per_mwyr = compute_overnight_capital_cost(
+ settings_d,
+ inv_cost_discharge_dc_per_mwyr.(gen_VRE_STOR),
+ capital_recovery_period_discharge_dc.(gen_VRE_STOR),
+ tech_wacc_discharge_dc.(gen_VRE_STOR))
+ gen_VRE_STOR.inv_cost_charge_dc_per_mwyr = compute_overnight_capital_cost(
+ settings_d,
+ inv_cost_charge_dc_per_mwyr.(gen_VRE_STOR),
+ capital_recovery_period_charge_dc.(gen_VRE_STOR),
+ tech_wacc_charge_dc.(gen_VRE_STOR))
+ gen_VRE_STOR.inv_cost_discharge_ac_per_mwyr = compute_overnight_capital_cost(
+ settings_d,
+ inv_cost_discharge_ac_per_mwyr.(gen_VRE_STOR),
+ capital_recovery_period_discharge_ac.(gen_VRE_STOR),
+ tech_wacc_discharge_ac.(gen_VRE_STOR))
+ gen_VRE_STOR.inv_cost_charge_ac_per_mwyr = compute_overnight_capital_cost(
+ settings_d,
+ inv_cost_charge_ac_per_mwyr.(gen_VRE_STOR),
+ capital_recovery_period_charge_ac.(gen_VRE_STOR),
+ tech_wacc_charge_ac.(gen_VRE_STOR))
+
+ gen_VRE_STOR.fixed_om_inverter_cost_per_mwyr = fixed_om_inverter_cost_per_mwyr.(gen_VRE_STOR) .*
+ OPEXMULT
+ gen_VRE_STOR.fixed_om_solar_cost_per_mwyr = fixed_om_solar_cost_per_mwyr.(gen_VRE_STOR) .*
+ OPEXMULT
+ gen_VRE_STOR.fixed_om_wind_cost_per_mwyr = fixed_om_wind_cost_per_mwyr.(gen_VRE_STOR) .*
+ OPEXMULT
+ gen_VRE_STOR.fixed_om_cost_discharge_dc_per_mwyr = fixed_om_cost_discharge_dc_per_mwyr.(gen_VRE_STOR) .*
+ OPEXMULT
+ gen_VRE_STOR.fixed_om_cost_charge_dc_per_mwyr = fixed_om_cost_charge_dc_per_mwyr.(gen_VRE_STOR) .*
+ OPEXMULT
+ gen_VRE_STOR.fixed_om_cost_discharge_ac_per_mwyr = fixed_om_cost_discharge_ac_per_mwyr.(gen_VRE_STOR) .*
+ OPEXMULT
+ gen_VRE_STOR.fixed_om_cost_charge_ac_per_mwyr = fixed_om_cost_charge_ac_per_mwyr.(gen_VRE_STOR) .*
+ OPEXMULT
+ end
+ end
retirable = is_retirable(gen)
- # TODO: ask Sam about this
+ # TODO: ask Sam about this
# Set of all resources eligible for capacity retirements
- inputs_d["RET_CAP"] = retirable
- # Set of all storage resources eligible for energy capacity retirements
- inputs_d["RET_CAP_ENERGY"] = intersect(retirable, inputs_d["STOR_ALL"])
- # Set of asymmetric charge/discharge storage resources eligible for charge capacity retirements
- inputs_d["RET_CAP_CHARGE"] = intersect(retirable, inputs_d["STOR_ASYMMETRIC"])
- # Set of all co-located resources' components eligible for capacity retirements
- if !isempty(inputs_d["VRE_STOR"])
- inputs_d["RET_CAP_DC"] = intersect(retirable, inputs_d["VS_DC"])
- inputs_d["RET_CAP_SOLAR"] = intersect(retirable, inputs_d["VS_SOLAR"])
- inputs_d["RET_CAP_WIND"] = intersect(retirable, inputs_d["VS_WIND"])
- inputs_d["RET_CAP_STOR"] = intersect(retirable, inputs_d["VS_STOR"])
- inputs_d["RET_CAP_DISCHARGE_DC"] = intersect(retirable, inputs_d["VS_ASYM_DC_DISCHARGE"])
- inputs_d["RET_CAP_CHARGE_DC"] = intersect(retirable, inputs_d["VS_ASYM_DC_CHARGE"])
- inputs_d["RET_CAP_DISCHARGE_AC"] = intersect(retirable, inputs_d["VS_ASYM_AC_DISCHARGE"])
- inputs_d["RET_CAP_CHARGE_AC"] = intersect(retirable, inputs_d["VS_ASYM_AC_CHARGE"])
- end
-
- # Transmission
- if NetworkExpansion == 1 && inputs_d["Z"] > 1
-
- if !myopic ### Leave myopic costs in annualized form
- # 1. Convert annualized tramsmission investment costs incured within the model horizon into overnight capital costs
- inputs_d["pC_Line_Reinforcement"] = compute_overnight_capital_cost(settings_d,inputs_d["pC_Line_Reinforcement"],inputs_d["Capital_Recovery_Period_Trans"],inputs_d["transmission_WACC"])
- end
-
- # Scale max_allowed_reinforcement to allow for possibility of deploying maximum reinforcement in each investment stage
- inputs_d["pTrans_Max_Possible"] = inputs_d["pLine_Max_Flow_Possible_MW"]
+ inputs_d["RET_CAP"] = retirable
+ # Set of all storage resources eligible for energy capacity retirements
+ inputs_d["RET_CAP_ENERGY"] = intersect(retirable, inputs_d["STOR_ALL"])
+ # Set of asymmetric charge/discharge storage resources eligible for charge capacity retirements
+ inputs_d["RET_CAP_CHARGE"] = intersect(retirable, inputs_d["STOR_ASYMMETRIC"])
+ # Set of all co-located resources' components eligible for capacity retirements
+ if !isempty(inputs_d["VRE_STOR"])
+ inputs_d["RET_CAP_DC"] = intersect(retirable, inputs_d["VS_DC"])
+ inputs_d["RET_CAP_SOLAR"] = intersect(retirable, inputs_d["VS_SOLAR"])
+ inputs_d["RET_CAP_WIND"] = intersect(retirable, inputs_d["VS_WIND"])
+ inputs_d["RET_CAP_STOR"] = intersect(retirable, inputs_d["VS_STOR"])
+ inputs_d["RET_CAP_DISCHARGE_DC"] = intersect(retirable,
+ inputs_d["VS_ASYM_DC_DISCHARGE"])
+ inputs_d["RET_CAP_CHARGE_DC"] = intersect(retirable, inputs_d["VS_ASYM_DC_CHARGE"])
+ inputs_d["RET_CAP_DISCHARGE_AC"] = intersect(retirable,
+ inputs_d["VS_ASYM_AC_DISCHARGE"])
+ inputs_d["RET_CAP_CHARGE_AC"] = intersect(retirable, inputs_d["VS_ASYM_AC_CHARGE"])
+ end
+
+ # Transmission
+ if NetworkExpansion == 1 && inputs_d["Z"] > 1
+ if !myopic ### Leave myopic costs in annualized form
+ # 1. Convert annualized tramsmission investment costs incured within the model horizon into overnight capital costs
+ inputs_d["pC_Line_Reinforcement"] = compute_overnight_capital_cost(settings_d,
+ inputs_d["pC_Line_Reinforcement"],
+ inputs_d["Capital_Recovery_Period_Trans"],
+ inputs_d["transmission_WACC"])
+ end
+
+ # Scale max_allowed_reinforcement to allow for possibility of deploying maximum reinforcement in each investment stage
+ inputs_d["pTrans_Max_Possible"] = inputs_d["pLine_Max_Flow_Possible_MW"]
# Network lines and zones that are expandable have greater maximum possible line flow than the available capacity of the previous stage as well as available line reinforcement
- inputs_d["EXPANSION_LINES"] = findall((inputs_d["pLine_Max_Flow_Possible_MW"] .> inputs_d["pTrans_Max"]) .& (inputs_d["pMax_Line_Reinforcement"] .> 0))
- inputs_d["NO_EXPANSION_LINES"] = findall((inputs_d["pLine_Max_Flow_Possible_MW"] .<= inputs_d["pTrans_Max"]) .| (inputs_d["pMax_Line_Reinforcement"] .<= 0))
- # To-Do: Error Handling
- # 1.) Enforce that pLine_Max_Flow_Possible_MW for the first model stage be equal to (for transmission expansion to be disalowed) or greater (to allow transmission expansion) than pTrans_Max in inputs/inputs_p1
+ inputs_d["EXPANSION_LINES"] = findall((inputs_d["pLine_Max_Flow_Possible_MW"] .>
+ inputs_d["pTrans_Max"]) .&
+ (inputs_d["pMax_Line_Reinforcement"] .> 0))
+ inputs_d["NO_EXPANSION_LINES"] = findall((inputs_d["pLine_Max_Flow_Possible_MW"] .<=
+ inputs_d["pTrans_Max"]) .|
+ (inputs_d["pMax_Line_Reinforcement"] .<=
+ 0))
+ # To-Do: Error Handling
+ # 1.) Enforce that pLine_Max_Flow_Possible_MW for the first model stage be equal to (for transmission expansion to be disalowed) or greater (to allow transmission expansion) than pTrans_Max in inputs/inputs_p1
end
return inputs_d
end
+
+@doc raw"""
+ validate_can_retire_multistage(inputs_dict::Dict, num_stages::Int)
+
+This function validates that all the resources do not switch from havig `can_retire = 0` to `can_retire = 1` during the multi-stage optimization.
+
+# Arguments
+- `inputs_dict::Dict`: A dictionary containing the inputs for each stage.
+- `num_stages::Int`: The number of stages in the multi-stage optimization.
+
+# Returns
+- Throws an error if a resource switches from `can_retire = 0` to `can_retire = 1` between stages.
+"""
+function validate_can_retire_multistage(inputs_dict::Dict, num_stages::Int)
+ for stage in 2:num_stages # note: loop starts from 2 because we are comparing stage t with stage t-1
+ can_retire_current = can_retire.(inputs_dict[stage]["RESOURCES"])
+ can_retire_previous = can_retire.(inputs_dict[stage - 1]["RESOURCES"])
+
+ # Check if any resource switched from can_retire = 0 to can_retire = 1 between stage t-1 and t
+ if any(can_retire_current .- can_retire_previous .> 0)
+ # Find the resources that switched from can_retire = 0 to can_retire = 1 and throw an error
+ retire_switch_ids = findall(can_retire_current .- can_retire_previous .> 0)
+ resources_switched = inputs_dict[stage]["RESOURCES"][retire_switch_ids]
+ for resource in resources_switched
+ @warn "Resource `$(resource_name(resource))` with id = $(resource_id(resource)) switched " *
+ "from can_retire = 0 to can_retire = 1 between stages $(stage - 1) and $stage"
+ end
+ msg = "Current implementation of multi-stage optimization does not allow resources " *
+ "to switch from can_retire = 0 to can_retire = 1 between stages."
+ error(msg)
+ end
+ end
+ return nothing
+end
diff --git a/src/multi_stage/dual_dynamic_programming.jl b/src/multi_stage/dual_dynamic_programming.jl
index d2ff098952..05ecd5154e 100644
--- a/src/multi_stage/dual_dynamic_programming.jl
+++ b/src/multi_stage/dual_dynamic_programming.jl
@@ -131,8 +131,7 @@ returns:
* stats\_d – Dictionary which contains the run time, upper bound, and lower bound of each DDP iteration.
* inputs\_d – Dictionary of inputs for each model stage, generated by the load\_inputs() method, modified by this method.
"""
-function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
-
+function run_ddp(outpath::AbstractString, models_d::Dict, setup::Dict, inputs_d::Dict)
settings_d = setup["MultiStageSettingsDict"]
num_stages = settings_d["NumStages"] # Total number of investment planning stages
EPSILON = settings_d["ConvergenceTolerance"] # Tolerance
@@ -143,14 +142,17 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
ic = 0 # Iteration Counter
results_d = Dict() # Dictionary to store the results to return
- stats_d = Dict() # Dictionary to store the statistics (total time, upper bound, and lower bound for each iteration)
times_a = [] # Array to store the total time of each iteration
upper_bounds_a = [] # Array to store the upper bound of each iteration
lower_bounds_a = [] # Array to store the lower bound of each iteration
+ stats_d = Dict() # Dictionary to store the statistics (total time, upper bound, and lower bound for each iteration)
+ stats_d["TIMES"] = times_a
+ stats_d["UPPER_BOUNDS"] = upper_bounds_a
+ stats_d["LOWER_BOUNDS"] = lower_bounds_a
# Step a.i) Initialize cost-to-go function for t = 1:num_stages
for t in 1:num_stages
- settings_d["CurStage"] = t;
+ settings_d["CurStage"] = t
models_d[t] = initialize_cost_to_go(settings_d, models_d[t], inputs_d[t])
end
@@ -162,7 +164,6 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
println("Solving First Stage Problem")
println("***********")
-
t = 1 # Stage = 1
solve_time_d = Dict()
ddp_prev_time = time() # Begin tracking time of each iteration
@@ -174,7 +175,6 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
# Step c.ii) If the relative difference between upper and lower bounds are small, break loop
while ((z_upper - z_lower) / z_lower > EPSILON)
-
ic = ic + 1 # Increase iteration counter by 1
if (ic > 10000)
@@ -184,10 +184,6 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
println(string("Lower Bound = ", z_lower))
println("***********")
- stats_d["TIMES"] = times_a
- stats_d["UPPER_BOUNDS"] = upper_bounds_a
- statd_d["LOWER_BOUNDS"] = lower_bounds_a
-
return models_d, stats_d, inputs_d
end
@@ -207,21 +203,25 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
end
## Forward pass for t=2:num_stages
for t in 2:num_stages
-
println("***********")
println(string("Forward Pass t = ", t))
println("***********")
# Step d.i) Fix initial investments for model at time t given optimal solution for time t-1
- models_d[t] = fix_initial_investments(models_d[t-1], models_d[t], start_cap_d, inputs_d[t])
+ models_d[t] = fix_initial_investments(models_d[t - 1],
+ models_d[t],
+ start_cap_d,
+ inputs_d[t])
# Step d.ii) Fix capacity tracking variables for endogenous retirements
- models_d[t] = fix_capacity_tracking(models_d[t-1], models_d[t], cap_track_d, t)
+ models_d[t] = fix_capacity_tracking(models_d[t - 1],
+ models_d[t],
+ cap_track_d,
+ t)
# Step d.iii) Solve the model at time t
models_d[t], solve_time_d[t] = solve_model(models_d[t], setup)
inputs_d[t]["solve_time"] = solve_time_d[t]
-
end
### For the myopic solution, algorithm should terminate here after the first forward pass calculation and then move to Outputs writing.
@@ -231,10 +231,6 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
println(string("Upper Bound = ", z_upper))
println(string("Lower Bound = ", z_lower))
println("***********")
-
- stats_d["TIMES"] = times_a
- stats_d["UPPER_BOUNDS"] = upper_bounds_a
- stats_d["LOWER_BOUNDS"] = lower_bounds_a
return models_d, stats_d, inputs_d
end
###
@@ -242,7 +238,8 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
# Step e) Calculate the new upper bound
z_upper_temp = 0
for t in 1:num_stages
- z_upper_temp = z_upper_temp + (objective_value(models_d[t]) - value(models_d[t][:vALPHA]))
+ z_upper_temp = z_upper_temp +
+ (objective_value(models_d[t]) - value(models_d[t][:vALPHA]))
end
# If the upper bound decreased, set it as the new upper bound
@@ -251,29 +248,35 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
end
append!(upper_bounds_a, z_upper) # Store current iteration upper bound
+ update_multi_stage_stats_file(outpath, ic, z_upper, z_lower, NaN, new_row = true)
# Step f) Backward pass for t = num_stages:2
for t in num_stages:-1:2
-
println("***********")
println(string("Backward Pass t = ", t))
println("***********")
# Step f.i) Add a cut to the previous time step using information from the current time step
- models_d[t-1] = add_cut(models_d[t-1], models_d[t], start_cap_d, cap_track_d)
+ models_d[t - 1] = add_cut(models_d[t - 1],
+ models_d[t],
+ start_cap_d,
+ cap_track_d)
# Step f.ii) Solve the model with the additional cut at time t-1
- models_d[t-1], solve_time_d[t-1] = solve_model(models_d[t-1], setup)
- inputs_d[t-1]["solve_time"] = solve_time_d[t-1]
+ models_d[t - 1], solve_time_d[t - 1] = solve_model(models_d[t - 1], setup)
+ inputs_d[t - 1]["solve_time"] = solve_time_d[t - 1]
end
# Step g) Recalculate lower bound and go back to c)
z_lower = objective_value(models_d[1])
append!(lower_bounds_a, z_lower) # Store current iteration lower bound
+ update_multi_stage_stats_file(outpath, ic, z_upper, z_lower, NaN)
# Step h) Store the total time of the current iteration (in seconds)
ddp_iteration_time = time() - ddp_prev_time
append!(times_a, ddp_iteration_time)
+ update_multi_stage_stats_file(outpath, ic, z_upper, z_lower, ddp_iteration_time)
+
ddp_prev_time = time()
end
@@ -283,7 +286,6 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
println(string("Lower Bound = ", z_lower))
println("***********")
-
### STEP I) One final forward pass to guarantee convergence
# Forward pass for t = 1:num_stages
t = 1 # update forward pass solution for the first stage
@@ -296,10 +298,13 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
println("***********")
# Step d.i) Fix initial investments for model at time t given optimal solution for time t-1
- models_d[t] = fix_initial_investments(models_d[t-1], models_d[t], start_cap_d, inputs_d[t])
+ models_d[t] = fix_initial_investments(models_d[t - 1],
+ models_d[t],
+ start_cap_d,
+ inputs_d[t])
# Step d.ii) Fix capacity tracking variables for endogenous retirements
- models_d[t] = fix_capacity_tracking(models_d[t-1], models_d[t], cap_track_d, t)
+ models_d[t] = fix_capacity_tracking(models_d[t - 1], models_d[t], cap_track_d, t)
# Step d.iii) Solve the model at time t
models_d[t], solve_time_d[t] = solve_model(models_d[t], setup)
@@ -307,40 +312,9 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
end
##### END of final forward pass
- stats_d["TIMES"] = times_a
- stats_d["UPPER_BOUNDS"] = upper_bounds_a
- stats_d["LOWER_BOUNDS"] = lower_bounds_a
-
return models_d, stats_d, inputs_d
end
-@doc raw"""
- write_multi_stage_outputs(stats_d::Dict, outpath::String, settings_d::Dict)
-
-This function calls various methods which write multi-stage modeling outputs as .csv files.
-
-inputs:
-
- * stats\_d – Dictionary which contains the run time, upper bound, and lower bound of each DDP iteration.
- * outpath – String which represents the path to the Results directory.
- * settings\_d - Dictionary containing settings configured in the GenX settings genx\_settings.yml file as well as the multi-stage settings file multi\_stage\_settings.yml.
-"""
-function write_multi_stage_outputs(stats_d::Dict, outpath::String, settings_d::Dict, inputs_dict::Dict)
-
- multi_stage_settings_d = settings_d["MultiStageSettingsDict"]
-
- write_multi_stage_capacities_discharge(outpath, multi_stage_settings_d)
- write_multi_stage_capacities_charge(outpath, multi_stage_settings_d)
- write_multi_stage_capacities_energy(outpath, multi_stage_settings_d)
- if settings_d["NetworkExpansion"] == 1
- write_multi_stage_network_expansion(outpath, multi_stage_settings_d)
- end
- write_multi_stage_costs(outpath, multi_stage_settings_d, inputs_dict)
- multi_stage_settings_d["Myopic"] == 0 && write_multi_stage_stats(outpath, stats_d)
- write_multi_stage_settings(outpath, settings_d)
-
-end
-
@doc raw"""
fix_initial_investments(EP_prev::Model, EP_cur::Model, start_cap_d::Dict)
@@ -354,22 +328,24 @@ inputs:
returns: JuMP model with updated linking constraints.
"""
-function fix_initial_investments(EP_prev::Model, EP_cur::Model, start_cap_d::Dict, inputs_d::Dict)
-
- ALL_CAP = union(inputs_d["RET_CAP"],inputs_d["NEW_CAP"]) # Set of all resources subject to inter-stage capacity tracking
-
+function fix_initial_investments(EP_prev::Model,
+ EP_cur::Model,
+ start_cap_d::Dict,
+ inputs_d::Dict)
+ ALL_CAP = union(inputs_d["RET_CAP"], inputs_d["NEW_CAP"]) # Set of all resources subject to inter-stage capacity tracking
+
# start_cap_d dictionary contains the starting capacity expression name (e) as a key,
# and the associated linking constraint name (c) as a value
for (e, c) in start_cap_d
for y in keys(EP_cur[c])
- # Set the right hand side value of the linking initial capacity constraint in the current stage to the value of the available capacity variable solved for in the previous stages
- if c == :cExistingTransCap
+ # Set the right hand side value of the linking initial capacity constraint in the current stage to the value of the available capacity variable solved for in the previous stages
+ if c == :cExistingTransCap
+ set_normalized_rhs(EP_cur[c][y], value(EP_prev[e][y]))
+ else
+ if y[1] in ALL_CAP # extract resource integer index value from key
set_normalized_rhs(EP_cur[c][y], value(EP_prev[e][y]))
- else
- if y[1] in ALL_CAP # extract resource integer index value from key
- set_normalized_rhs(EP_cur[c][y], value(EP_prev[e][y]))
- end
end
+ end
end
end
return EP_cur
@@ -391,7 +367,10 @@ inputs:
returns: JuMP model with updated linking constraints.
"""
-function fix_capacity_tracking(EP_prev::Model, EP_cur::Model, cap_track_d::Dict, cur_stage::Int)
+function fix_capacity_tracking(EP_prev::Model,
+ EP_cur::Model,
+ cap_track_d::Dict,
+ cur_stage::Int)
# cap_track_d dictionary contains the endogenous retirement tracking array variable name (v) as a key,
# and the associated linking constraint name (c) as a value
@@ -407,7 +386,7 @@ function fix_capacity_tracking(EP_prev::Model, EP_cur::Model, cap_track_d::Dict,
# For all previous stages, set the right hand side value of the tracking constraint in the current
# stage to the value of the tracking constraint observed in the previous stage
- for p in 1:(cur_stage-1)
+ for p in 1:(cur_stage - 1)
# Tracking newly buily capacity over all previous stages
JuMP.set_normalized_rhs(EP_cur[c][i, p], value(EP_prev[v][i, p]))
# Tracking retired capacity over all previous stages
@@ -432,7 +411,6 @@ inputs:
returns: JuMP expression representing a sum of Benders cuts for linking capacity investment variables to be added to the cost-to-go function.
"""
function add_cut(EP_cur::Model, EP_next::Model, start_cap_d::Dict, cap_track_d::Dict)
-
next_obj_value = objective_value(EP_next) # Get the objective function value for the next investment planning stage
eRHS = @expression(EP_cur, 0) # Initialize RHS of cut to 0
@@ -480,7 +458,7 @@ function add_cut(EP_cur::Model, EP_next::Model, start_cap_d::Dict, cap_track_d::
end
# Add the cut to the model
- @constraint(EP_cur, EP_cur[:vALPHA] >= next_obj_value - eRHS)
+ @constraint(EP_cur, EP_cur[:vALPHA]>=next_obj_value - eRHS)
return EP_cur
end
@@ -505,8 +483,10 @@ inputs:
returns: JuMP expression representing a sum of Benders cuts for linking capacity investment variables to be added to the cost-to-go function.
"""
-function generate_cut_component_track(EP_cur::Model, EP_next::Model, var_name::Symbol, constr_name::Symbol)
-
+function generate_cut_component_track(EP_cur::Model,
+ EP_next::Model,
+ var_name::Symbol,
+ constr_name::Symbol)
next_dual_value = Float64[]
cur_inv_value = Float64[]
cur_inv_var = []
@@ -520,7 +500,8 @@ function generate_cut_component_track(EP_cur::Model, EP_next::Model, var_name::S
push!(cur_inv_var, EP_cur[var_name][y, p])
end
- eCutComponent = @expression(EP_cur, dot(next_dual_value, (cur_inv_value .- cur_inv_var)))
+ eCutComponent = @expression(EP_cur,
+ dot(next_dual_value, (cur_inv_value .- cur_inv_var)))
return eCutComponent
end
@@ -545,20 +526,22 @@ inputs:
returns: JuMP expression representing a sum of Benders cuts for linking capacity investment variables to be added to the cost-to-go function.
"""
-function generate_cut_component_inv(EP_cur::Model, EP_next::Model, expr_name::Symbol, constr_name::Symbol)
-
+function generate_cut_component_inv(EP_cur::Model,
+ EP_next::Model,
+ expr_name::Symbol,
+ constr_name::Symbol)
next_dual_value = Float64[]
cur_inv_value = Float64[]
cur_inv_var = []
for y in keys(EP_next[constr_name])
-
push!(next_dual_value, dual(EP_next[constr_name][y]))
push!(cur_inv_value, value(EP_cur[expr_name][y]))
push!(cur_inv_var, EP_cur[expr_name][y])
end
- eCutComponent = @expression(EP_cur, dot(next_dual_value, (cur_inv_value .- cur_inv_var)))
+ eCutComponent = @expression(EP_cur,
+ dot(next_dual_value, (cur_inv_value .- cur_inv_var)))
return eCutComponent
end
@@ -577,10 +560,10 @@ The updated objective function $OBJ^{*}$ returned by this method takes the form:
where $OBJ$ is the original objective function. $OBJ$ is scaled by two terms. The first is a discount factor (applied only in the non-myopic case), which discounts costs associated with the model stage $p$ to year-0 dollars:
```math
\begin{aligned}
- DF = \frac{1}{(1+WACC)^{L*(p-1)}}
+ DF = \frac{1}{(1+WACC)^{\sum^{(p-1)}_{k=0}L_{k}}}
\end{aligned}
```
-where $WACC$ is the weighted average cost of capital, and $L$ is the length of each stage in years (both set in multi\_stage\_settings.yml)
+where $WACC$ is the weighted average cost of capital, and $L_{p}$ is the length of each stage in years (both set in multi\_stage\_settings.yml)
The second term is a discounted sum of annual operational expenses incurred each year of a multi-year model stage:
```math
@@ -600,8 +583,11 @@ inputs:
returns: JuMP model with updated objective function.
"""
function initialize_cost_to_go(settings_d::Dict, EP::Model, inputs::Dict)
-
cur_stage = settings_d["CurStage"] # Current DDP Investment Planning Stage
+ cum_years = 0
+ for stage_count in 1:(cur_stage - 1)
+ cum_years += settings_d["StageLengths"][stage_count]
+ end
stage_len = settings_d["StageLengths"][cur_stage]
wacc = settings_d["WACC"] # Interest Rate and also the discount rate unless specified other wise
myopic = settings_d["Myopic"] == 1 # 1 if myopic (only one forward pass), 0 if full DDP
@@ -614,12 +600,11 @@ function initialize_cost_to_go(settings_d::Dict, EP::Model, inputs::Dict)
### No discount factor or OPEX multiplier applied in myopic case as costs are left annualized.
@objective(EP, Min, EP[:eObj])
else
- DF = 1 / (1 + wacc)^(stage_len * (cur_stage - 1)) # Discount factor applied all to costs in each stage ###
+ DF = 1 / (1 + wacc)^(cum_years) # Discount factor applied all to costs in each stage ###
# Initialize the cost-to-go variable
- @variable(EP, vALPHA >= 0)
- @objective(EP, Min, DF * OPEXMULT * EP[:eObj] + vALPHA)
+ @variable(EP, vALPHA>=0)
+ @objective(EP, Min, DF * OPEXMULT * EP[:eObj]+vALPHA)
end
return EP
-
end
diff --git a/src/multi_stage/endogenous_retirement.jl b/src/multi_stage/endogenous_retirement.jl
index fca0ebb0bf..29c549d719 100644
--- a/src/multi_stage/endogenous_retirement.jl
+++ b/src/multi_stage/endogenous_retirement.jl
@@ -12,113 +12,143 @@ inputs:
returns: An Int representing the model stage in before which the resource must retire due to endogenous lifetime retirements.
"""
function get_retirement_stage(cur_stage::Int, lifetime::Int, stage_lens::Array{Int, 1})
- years_from_start = sum(stage_lens[1:cur_stage]) # Years from start from the END of the current stage
- ret_years = years_from_start - lifetime # Difference between end of current stage and technology lifetime
- ret_stage = 0 # Compute the stage before which all newly built capacity must be retired by the end of the current stage
- while (ret_years - stage_lens[ret_stage+1] >= 0) & (ret_stage < cur_stage)
- ret_stage += 1
- ret_years -= stage_lens[ret_stage]
- end
+ years_from_start = sum(stage_lens[1:cur_stage]) # Years from start from the END of the current stage
+ ret_years = years_from_start - lifetime # Difference between end of current stage and technology lifetime
+ ret_stage = 0 # Compute the stage before which all newly built capacity must be retired by the end of the current stage
+ while (ret_years - stage_lens[ret_stage + 1] >= 0) & (ret_stage < cur_stage)
+ ret_stage += 1
+ ret_years -= stage_lens[ret_stage]
+ end
return Int(ret_stage)
end
-function update_cumulative_min_ret!(inputs_d::Dict,t::Int,Resource_Set::String,RetCap::Symbol)
-
- gen_name = "RESOURCES"
- CumRetCap = Symbol("cum_"*String(RetCap))
- # if the getter function exists in GenX then use it, otherwise get the attribute directly
- ret_cap_f = isdefined(GenX, RetCap) ? getfield(GenX, RetCap) : r -> getproperty(r, RetCap)
- cum_ret_cap_f = isdefined(GenX, CumRetCap) ? getfield(GenX, CumRetCap) : r -> getproperty(r, CumRetCap)
- if !isempty(inputs_d[1][Resource_Set])
- gen_t = inputs_d[t][gen_name]
- if t==1
- gen_t[CumRetCap] = ret_cap_f.(gen_t)
- else
- gen_t[CumRetCap] = cum_ret_cap_f.(inputs_d[t-1][gen_name]) + ret_cap_f.(gen_t)
- end
- end
+function update_cumulative_min_ret!(inputs_d::Dict,
+ t::Int,
+ Resource_Set::String,
+ RetCap::Symbol)
+ gen_name = "RESOURCES"
+ CumRetCap = Symbol("cum_" * String(RetCap))
+ # if the getter function exists in GenX then use it, otherwise get the attribute directly
+ ret_cap_f = isdefined(GenX, RetCap) ? getfield(GenX, RetCap) :
+ r -> getproperty(r, RetCap)
+ cum_ret_cap_f = isdefined(GenX, CumRetCap) ? getfield(GenX, CumRetCap) :
+ r -> getproperty(r, CumRetCap)
+ if !isempty(inputs_d[1][Resource_Set])
+ gen_t = inputs_d[t][gen_name]
+ if t == 1
+ gen_t[CumRetCap] = ret_cap_f.(gen_t)
+ else
+ gen_t[CumRetCap] = cum_ret_cap_f.(inputs_d[t - 1][gen_name]) + ret_cap_f.(gen_t)
+ end
+ end
end
-
-function compute_cumulative_min_retirements!(inputs_d::Dict,t::Int)
-
- mytab =[("G", :min_retired_cap_mw),
- ("STOR_ALL", :min_retired_energy_cap_mw),
- ("STOR_ASYMMETRIC", :min_retired_charge_cap_mw)];
-
- if !isempty(inputs_d[1]["VRE_STOR"])
- append!(mytab,[("VS_STOR", :min_retired_energy_cap_mw),
- ("VS_DC", :min_retired_cap_inverter_mw),
- ("VS_SOLAR", :min_retired_cap_solar_mw),
- ("VS_WIND", :min_retired_cap_wind_mw),
- ("VS_ASYM_DC_DISCHARGE", :min_retired_cap_discharge_dc_mw),
- ("VS_ASYM_DC_CHARGE", :min_retired_cap_charge_dc_mw),
- ("VS_ASYM_AC_DISCHARGE", :min_retired_cap_discharge_ac_mw),
- ("VS_ASYM_AC_CHARGE", :min_retired_cap_charge_ac_mw)])
-
- end
-
- for (Resource_Set,RetCap) in mytab
- update_cumulative_min_ret!(inputs_d,t,Resource_Set,RetCap)
- end
-
-
+function compute_cumulative_min_retirements!(inputs_d::Dict, t::Int)
+ mytab = [("G", :min_retired_cap_mw),
+ ("STOR_ALL", :min_retired_energy_cap_mw),
+ ("STOR_ASYMMETRIC", :min_retired_charge_cap_mw)]
+
+ if !isempty(inputs_d[1]["VRE_STOR"])
+ append!(mytab,
+ [("VS_STOR", :min_retired_energy_cap_mw),
+ ("VS_DC", :min_retired_cap_inverter_mw),
+ ("VS_SOLAR", :min_retired_cap_solar_mw),
+ ("VS_WIND", :min_retired_cap_wind_mw),
+ ("VS_ASYM_DC_DISCHARGE", :min_retired_cap_discharge_dc_mw),
+ ("VS_ASYM_DC_CHARGE", :min_retired_cap_charge_dc_mw),
+ ("VS_ASYM_AC_DISCHARGE", :min_retired_cap_discharge_ac_mw),
+ ("VS_ASYM_AC_CHARGE", :min_retired_cap_charge_ac_mw)])
+ end
+
+ for (Resource_Set, RetCap) in mytab
+ update_cumulative_min_ret!(inputs_d, t, Resource_Set, RetCap)
+ end
end
-
function endogenous_retirement!(EP::Model, inputs::Dict, setup::Dict)
- multi_stage_settings = setup["MultiStageSettingsDict"]
-
- println("Endogenous Retirement Module")
-
- num_stages = multi_stage_settings["NumStages"]
- cur_stage = multi_stage_settings["CurStage"]
- stage_lens = multi_stage_settings["StageLengths"]
-
- endogenous_retirement_discharge!(EP, inputs, num_stages, cur_stage, stage_lens)
-
- if !isempty(inputs["STOR_ALL"])
- endogenous_retirement_energy!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["STOR_ASYMMETRIC"])
- endogenous_retirement_charge!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["VRE_STOR"])
- if !isempty(inputs["VS_DC"])
- endogenous_retirement_vre_stor_dc!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["VS_SOLAR"])
- endogenous_retirement_vre_stor_solar!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["VS_WIND"])
- endogenous_retirement_vre_stor_wind!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["VS_STOR"])
- endogenous_retirement_vre_stor_stor!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["VS_ASYM_DC_DISCHARGE"])
- endogenous_retirement_vre_stor_discharge_dc!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["VS_ASYM_DC_CHARGE"])
- endogenous_retirement_vre_stor_charge_dc!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["VS_ASYM_AC_DISCHARGE"])
- endogenous_retirement_vre_stor_discharge_ac!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["VS_ASYM_AC_CHARGE"])
- endogenous_retirement_vre_stor_charge_ac!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
- end
-
+ multi_stage_settings = setup["MultiStageSettingsDict"]
+
+ println("Endogenous Retirement Module")
+
+ num_stages = multi_stage_settings["NumStages"]
+ cur_stage = multi_stage_settings["CurStage"]
+ stage_lens = multi_stage_settings["StageLengths"]
+
+ endogenous_retirement_discharge!(EP, inputs, num_stages, cur_stage, stage_lens)
+
+ if !isempty(inputs["STOR_ALL"])
+ endogenous_retirement_energy!(EP, inputs, num_stages, cur_stage, stage_lens)
+ end
+
+ if !isempty(inputs["STOR_ASYMMETRIC"])
+ endogenous_retirement_charge!(EP, inputs, num_stages, cur_stage, stage_lens)
+ end
+
+ if !isempty(inputs["VRE_STOR"])
+ if !isempty(inputs["VS_DC"])
+ endogenous_retirement_vre_stor_dc!(EP,
+ inputs,
+ num_stages,
+ cur_stage,
+ stage_lens)
+ end
+
+ if !isempty(inputs["VS_SOLAR"])
+ endogenous_retirement_vre_stor_solar!(EP,
+ inputs,
+ num_stages,
+ cur_stage,
+ stage_lens)
+ end
+
+ if !isempty(inputs["VS_WIND"])
+ endogenous_retirement_vre_stor_wind!(EP,
+ inputs,
+ num_stages,
+ cur_stage,
+ stage_lens)
+ end
+
+ if !isempty(inputs["VS_STOR"])
+ endogenous_retirement_vre_stor_stor!(EP,
+ inputs,
+ num_stages,
+ cur_stage,
+ stage_lens)
+ end
+
+ if !isempty(inputs["VS_ASYM_DC_DISCHARGE"])
+ endogenous_retirement_vre_stor_discharge_dc!(EP,
+ inputs,
+ num_stages,
+ cur_stage,
+ stage_lens)
+ end
+
+ if !isempty(inputs["VS_ASYM_DC_CHARGE"])
+ endogenous_retirement_vre_stor_charge_dc!(EP,
+ inputs,
+ num_stages,
+ cur_stage,
+ stage_lens)
+ end
+
+ if !isempty(inputs["VS_ASYM_AC_DISCHARGE"])
+ endogenous_retirement_vre_stor_discharge_ac!(EP,
+ inputs,
+ num_stages,
+ cur_stage,
+ stage_lens)
+ end
+
+ if !isempty(inputs["VS_ASYM_AC_CHARGE"])
+ endogenous_retirement_vre_stor_charge_ac!(EP,
+ inputs,
+ num_stages,
+ cur_stage,
+ stage_lens)
+ end
+ end
end
@doc raw"""
@@ -139,547 +169,753 @@ In other words, it is the largest index $r \in \{1, ..., (p-1)\}$ such that:
\end{aligned}
```
"""
-function endogenous_retirement_discharge!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (Discharge) Module")
-
- gen = inputs["RESOURCES"]
-
- NEW_CAP = inputs["NEW_CAP"] # Set of all resources eligible for new capacity
- RET_CAP = inputs["RET_CAP"] # Set of all resources eligible for capacity retirements
- COMMIT = inputs["COMMIT"] # Set of all resources eligible for unit commitment
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACK[y in RET_CAP,p=1:num_stages] >= 0 )
- @variable(EP, vRETCAPTRACK[y in RET_CAP,p=1:num_stages] >= 0 )
-
- ### Expressions ###
-
- @expression(EP, eNewCap[y in RET_CAP],
- if y in NEW_CAP
- EP[:vCAP][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCap[y in RET_CAP],
- if y in ids_with_all_options_contributing(gen)
- EP[:vRETCAP][y] + EP[:vRETROFITCAP][y]
- else
- EP[:vRETCAP][y]
- end
- )
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrack[y in RET_CAP], sum(EP[:vRETCAPTRACK][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrack[y in RET_CAP], sum(EP[:vCAPTRACK][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrack[y in RET_CAP],
- if y in COMMIT
- cum_min_retired_cap_mw(gen[y])/cap_size(gen[y])
- else
- cum_min_retired_cap_mw(gen[y])
- end
- )
-
- ### Constraints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackNew[y in RET_CAP], eNewCap[y] == vCAPTRACK[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrack[y in RET_CAP,p=1:(cur_stage-1)], vCAPTRACK[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackNew[y in RET_CAP], eRetCap[y] == vRETCAPTRACK[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrack[y in RET_CAP,p=1:(cur_stage-1)], vRETCAPTRACK[y,p] == 0)
-
- # Create a slack variable for each resource that is not contributing to the retired capacity being tracked
- # This ensures that the model is able to satisfy the minimum retirement constraint
- RETROFIT_WITH_SLACK = ids_with_all_options_not_contributing(gen)
- if !isempty(RETROFIT_WITH_SLACK)
- @variable(EP, vslack_lifetime[y in RETROFIT_WITH_SLACK] >=0)
- @expression(EP, vslack_term, 2*maximum(inv_cost_per_mwyr.(gen))*sum(vslack_lifetime[y] for y in RETROFIT_WITH_SLACK; init=0))
- add_to_expression!(EP[:eObj], vslack_term)
- end
-
- @expression(EP,eLifetimeRetRHS[y in RET_CAP],
- if y in RETROFIT_WITH_SLACK
- eRetCapTrack[y] + vslack_lifetime[y]
- else
- eRetCapTrack[y]
- end
- )
-
- @constraint(EP, cLifetimeRet[y in RET_CAP], eNewCapTrack[y] + eMinRetCapTrack[y] <= eLifetimeRetRHS[y])
+function endogenous_retirement_discharge!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (Discharge) Module")
+
+ gen = inputs["RESOURCES"]
+
+ NEW_CAP = inputs["NEW_CAP"] # Set of all resources eligible for new capacity
+ RET_CAP = inputs["RET_CAP"] # Set of all resources eligible for capacity retirements
+ COMMIT = inputs["COMMIT"] # Set of all resources eligible for unit commitment
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACK[y in RET_CAP, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACK[y in RET_CAP, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCap[y in RET_CAP],
+ if y in NEW_CAP
+ EP[:vCAP][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCap[y in RET_CAP],
+ if y in ids_with_all_options_contributing(gen)
+ EP[:vRETCAP][y] + EP[:vRETROFITCAP][y]
+ else
+ EP[:vRETCAP][y]
+ end)
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrack[y in RET_CAP],
+ sum(EP[:vRETCAPTRACK][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrack[y in RET_CAP],
+ sum(EP[:vCAPTRACK][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP, eMinRetCapTrack[y in RET_CAP],
+ if y in COMMIT
+ cum_min_retired_cap_mw(gen[y]) / cap_size(gen[y])
+ else
+ cum_min_retired_cap_mw(gen[y])
+ end)
+
+ ### Constraints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP, cCapTrackNew[y in RET_CAP], eNewCap[y]==vCAPTRACK[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP, cCapTrack[y in RET_CAP, p = 1:(cur_stage - 1)], vCAPTRACK[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP, cRetCapTrackNew[y in RET_CAP], eRetCap[y]==vRETCAPTRACK[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrack[y in RET_CAP, p = 1:(cur_stage - 1)],
+ vRETCAPTRACK[y, p]==0)
+
+ # Create a slack variable for each resource that is not contributing to the retired capacity being tracked
+ # This ensures that the model is able to satisfy the minimum retirement constraint
+ RETROFIT_WITH_SLACK = ids_with_all_options_not_contributing(gen)
+ if !isempty(RETROFIT_WITH_SLACK)
+ @variable(EP, vslack_lifetime[y in RETROFIT_WITH_SLACK]>=0)
+ @expression(EP,
+ vslack_term,
+ 2*maximum(inv_cost_per_mwyr.(gen))*
+ sum(vslack_lifetime[y] for y in RETROFIT_WITH_SLACK; init = 0))
+ add_to_expression!(EP[:eObj], vslack_term)
+ end
+
+ @expression(EP, eLifetimeRetRHS[y in RET_CAP],
+ if y in RETROFIT_WITH_SLACK
+ eRetCapTrack[y] + vslack_lifetime[y]
+ else
+ eRetCapTrack[y]
+ end)
+
+ @constraint(EP,
+ cLifetimeRet[y in RET_CAP],
+ eNewCapTrack[y] + eMinRetCapTrack[y]<=eLifetimeRetRHS[y])
end
-function endogenous_retirement_charge!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (Charge) Module")
-
- gen = inputs["RESOURCES"]
-
- NEW_CAP_CHARGE = inputs["NEW_CAP_CHARGE"] # Set of asymmetric charge/discharge storage resources eligible for new charge capacity
- RET_CAP_CHARGE = inputs["RET_CAP_CHARGE"] # Set of asymmetric charge/discharge storage resources eligible for charge capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKCHARGE[y in RET_CAP_CHARGE,p=1:num_stages] >= 0)
- @variable(EP, vRETCAPTRACKCHARGE[y in RET_CAP_CHARGE,p=1:num_stages] >= 0)
-
- ### Expressions ###
-
- @expression(EP, eNewCapCharge[y in RET_CAP_CHARGE],
- if y in NEW_CAP_CHARGE
- EP[:vCAPCHARGE][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapCharge[y in RET_CAP_CHARGE], EP[:vRETCAPCHARGE][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackCharge[y in RET_CAP_CHARGE], sum(EP[:vRETCAPTRACKCHARGE][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackCharge[y in RET_CAP_CHARGE], sum(EP[:vCAPTRACKCHARGE][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackCharge[y in RET_CAP_CHARGE], cum_min_retired_charge_cap_mw(gen[y]))
-
- ### Constratints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackChargeNew[y in RET_CAP_CHARGE], eNewCapCharge[y] == vCAPTRACKCHARGE[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackCharge[y in RET_CAP_CHARGE,p=1:(cur_stage-1)], vCAPTRACKCHARGE[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackChargeNew[y in RET_CAP_CHARGE], eRetCapCharge[y] == vRETCAPTRACKCHARGE[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackCharge[y in RET_CAP_CHARGE,p=1:(cur_stage-1)], vRETCAPTRACKCHARGE[y,p] == 0)
-
- @constraint(EP, cLifetimeRetCharge[y in RET_CAP_CHARGE], eNewCapTrackCharge[y] + eMinRetCapTrackCharge[y] <= eRetCapTrackCharge[y])
-
+function endogenous_retirement_charge!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (Charge) Module")
+
+ gen = inputs["RESOURCES"]
+
+ NEW_CAP_CHARGE = inputs["NEW_CAP_CHARGE"] # Set of asymmetric charge/discharge storage resources eligible for new charge capacity
+ RET_CAP_CHARGE = inputs["RET_CAP_CHARGE"] # Set of asymmetric charge/discharge storage resources eligible for charge capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKCHARGE[y in RET_CAP_CHARGE, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKCHARGE[y in RET_CAP_CHARGE, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapCharge[y in RET_CAP_CHARGE],
+ if y in NEW_CAP_CHARGE
+ EP[:vCAPCHARGE][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCapCharge[y in RET_CAP_CHARGE], EP[:vRETCAPCHARGE][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackCharge[y in RET_CAP_CHARGE],
+ sum(EP[:vRETCAPTRACKCHARGE][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackCharge[y in RET_CAP_CHARGE],
+ sum(EP[:vCAPTRACKCHARGE][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackCharge[y in RET_CAP_CHARGE],
+ cum_min_retired_charge_cap_mw(gen[y]))
+
+ ### Constratints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackChargeNew[y in RET_CAP_CHARGE],
+ eNewCapCharge[y]==vCAPTRACKCHARGE[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackCharge[y in RET_CAP_CHARGE, p = 1:(cur_stage - 1)],
+ vCAPTRACKCHARGE[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackChargeNew[y in RET_CAP_CHARGE],
+ eRetCapCharge[y]==vRETCAPTRACKCHARGE[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackCharge[y in RET_CAP_CHARGE, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKCHARGE[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetCharge[y in RET_CAP_CHARGE],
+ eNewCapTrackCharge[y] + eMinRetCapTrackCharge[y]<=eRetCapTrackCharge[y])
end
-function endogenous_retirement_energy!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (Energy) Module")
-
- gen = inputs["RESOURCES"]
-
- NEW_CAP_ENERGY = inputs["NEW_CAP_ENERGY"] # Set of all storage resources eligible for new energy capacity
- RET_CAP_ENERGY = inputs["RET_CAP_ENERGY"] # Set of all storage resources eligible for energy capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKENERGY[y in RET_CAP_ENERGY,p=1:num_stages] >= 0)
- @variable(EP, vRETCAPTRACKENERGY[y in RET_CAP_ENERGY,p=1:num_stages] >= 0)
-
- ### Expressions ###
-
- @expression(EP, eNewCapEnergy[y in RET_CAP_ENERGY],
- if y in NEW_CAP_ENERGY
- EP[:vCAPENERGY][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapEnergy[y in RET_CAP_ENERGY], EP[:vRETCAPENERGY][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackEnergy[y in RET_CAP_ENERGY], sum(EP[:vRETCAPTRACKENERGY][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackEnergy[y in RET_CAP_ENERGY], sum(EP[:vCAPTRACKENERGY][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackEnergy[y in RET_CAP_ENERGY], cum_min_retired_energy_cap_mw(gen[y]))
-
- ### Constratints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackEnergyNew[y in RET_CAP_ENERGY], eNewCapEnergy[y] == vCAPTRACKENERGY[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackEnergy[y in RET_CAP_ENERGY,p=1:(cur_stage-1)], vCAPTRACKENERGY[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackEnergyNew[y in RET_CAP_ENERGY], eRetCapEnergy[y] == vRETCAPTRACKENERGY[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackEnergy[y in RET_CAP_ENERGY,p=1:(cur_stage-1)], vRETCAPTRACKENERGY[y,p] == 0)
-
- @constraint(EP, cLifetimeRetEnergy[y in RET_CAP_ENERGY], eNewCapTrackEnergy[y] + eMinRetCapTrackEnergy[y] <= eRetCapTrackEnergy[y])
+function endogenous_retirement_energy!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (Energy) Module")
+
+ gen = inputs["RESOURCES"]
+
+ NEW_CAP_ENERGY = inputs["NEW_CAP_ENERGY"] # Set of all storage resources eligible for new energy capacity
+ RET_CAP_ENERGY = inputs["RET_CAP_ENERGY"] # Set of all storage resources eligible for energy capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKENERGY[y in RET_CAP_ENERGY, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKENERGY[y in RET_CAP_ENERGY, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapEnergy[y in RET_CAP_ENERGY],
+ if y in NEW_CAP_ENERGY
+ EP[:vCAPENERGY][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCapEnergy[y in RET_CAP_ENERGY], EP[:vRETCAPENERGY][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackEnergy[y in RET_CAP_ENERGY],
+ sum(EP[:vRETCAPTRACKENERGY][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackEnergy[y in RET_CAP_ENERGY],
+ sum(EP[:vCAPTRACKENERGY][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackEnergy[y in RET_CAP_ENERGY],
+ cum_min_retired_energy_cap_mw(gen[y]))
+
+ ### Constratints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackEnergyNew[y in RET_CAP_ENERGY],
+ eNewCapEnergy[y]==vCAPTRACKENERGY[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackEnergy[y in RET_CAP_ENERGY, p = 1:(cur_stage - 1)],
+ vCAPTRACKENERGY[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackEnergyNew[y in RET_CAP_ENERGY],
+ eRetCapEnergy[y]==vRETCAPTRACKENERGY[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackEnergy[y in RET_CAP_ENERGY, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKENERGY[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetEnergy[y in RET_CAP_ENERGY],
+ eNewCapTrackEnergy[y] + eMinRetCapTrackEnergy[y]<=eRetCapTrackEnergy[y])
end
-function endogenous_retirement_vre_stor_dc!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (VRE-Storage DC) Module")
-
- gen = inputs["RESOURCES"]
-
- NEW_CAP_DC = inputs["NEW_CAP_DC"] # Set of all resources eligible for new capacity
- RET_CAP_DC = inputs["RET_CAP_DC"] # Set of all resources eligible for capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKDC[y in RET_CAP_DC,p=1:num_stages] >= 0 )
- @variable(EP, vRETCAPTRACKDC[y in RET_CAP_DC,p=1:num_stages] >= 0 )
-
- ### Expressions ###
-
- @expression(EP, eNewCapDC[y in RET_CAP_DC],
- if y in NEW_CAP_DC
- EP[:vDCCAP][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapDC[y in RET_CAP_DC], EP[:vRETDCCAP][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackDC[y in RET_CAP_DC], sum(EP[:vRETCAPTRACKDC][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackDC[y in RET_CAP_DC], sum(EP[:vCAPTRACKDC][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackDC[y in RET_CAP_DC], cum_min_retired_cap_inverter_mw(gen[y]))
-
- ### Constraints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackNewDC[y in RET_CAP_DC], eNewCapDC[y] == vCAPTRACKDC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackDC[y in RET_CAP_DC,p=1:(cur_stage-1)], vCAPTRACKDC[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackNewDC[y in RET_CAP_DC], eRetCapDC[y] == vRETCAPTRACKDC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackDC[y in RET_CAP_DC,p=1:(cur_stage-1)], vRETCAPTRACKDC[y,p] == 0)
-
- @constraint(EP, cLifetimeRetDC[y in RET_CAP_DC], eNewCapTrackDC[y] + eMinRetCapTrackDC[y] <= eRetCapTrackDC[y])
+function endogenous_retirement_vre_stor_dc!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (VRE-Storage DC) Module")
+
+ gen = inputs["RESOURCES"]
+
+ NEW_CAP_DC = inputs["NEW_CAP_DC"] # Set of all resources eligible for new capacity
+ RET_CAP_DC = inputs["RET_CAP_DC"] # Set of all resources eligible for capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKDC[y in RET_CAP_DC, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKDC[y in RET_CAP_DC, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapDC[y in RET_CAP_DC],
+ if y in NEW_CAP_DC
+ EP[:vDCCAP][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCapDC[y in RET_CAP_DC], EP[:vRETDCCAP][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackDC[y in RET_CAP_DC],
+ sum(EP[:vRETCAPTRACKDC][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackDC[y in RET_CAP_DC],
+ sum(EP[:vCAPTRACKDC][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackDC[y in RET_CAP_DC],
+ cum_min_retired_cap_inverter_mw(gen[y]))
+
+ ### Constraints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackNewDC[y in RET_CAP_DC],
+ eNewCapDC[y]==vCAPTRACKDC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackDC[y in RET_CAP_DC, p = 1:(cur_stage - 1)],
+ vCAPTRACKDC[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackNewDC[y in RET_CAP_DC],
+ eRetCapDC[y]==vRETCAPTRACKDC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackDC[y in RET_CAP_DC, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKDC[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetDC[y in RET_CAP_DC],
+ eNewCapTrackDC[y] + eMinRetCapTrackDC[y]<=eRetCapTrackDC[y])
end
-function endogenous_retirement_vre_stor_solar!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (VRE-Storage Solar) Module")
-
- gen = inputs["RESOURCES"]
-
- NEW_CAP_SOLAR = inputs["NEW_CAP_SOLAR"] # Set of all resources eligible for new capacity
- RET_CAP_SOLAR = inputs["RET_CAP_SOLAR"] # Set of all resources eligible for capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKSOLAR[y in RET_CAP_SOLAR,p=1:num_stages] >= 0 )
- @variable(EP, vRETCAPTRACKSOLAR[y in RET_CAP_SOLAR,p=1:num_stages] >= 0 )
-
- ### Expressions ###
-
- @expression(EP, eNewCapSolar[y in RET_CAP_SOLAR],
- if y in NEW_CAP_SOLAR
- EP[:vSOLARCAP][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapSolar[y in RET_CAP_SOLAR], EP[:vRETSOLARCAP][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackSolar[y in RET_CAP_SOLAR], sum(EP[:vRETCAPTRACKSOLAR][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackSolar[y in RET_CAP_SOLAR], sum(EP[:vCAPTRACKSOLAR][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackSolar[y in RET_CAP_SOLAR], cum_min_retired_cap_solar_mw(gen[y]))
-
- ### Constraints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackNewSolar[y in RET_CAP_SOLAR], eNewCapSolar[y] == vCAPTRACKSOLAR[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackSolar[y in RET_CAP_SOLAR,p=1:(cur_stage-1)], vCAPTRACKSOLAR[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackNewSolar[y in RET_CAP_SOLAR], eRetCapSolar[y] == vRETCAPTRACKSOLAR[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackSolar[y in RET_CAP_SOLAR,p=1:(cur_stage-1)], vRETCAPTRACKSOLAR[y,p] == 0)
-
- @constraint(EP, cLifetimeRetSolar[y in RET_CAP_SOLAR], eNewCapTrackSolar[y] + eMinRetCapTrackSolar[y] <= eRetCapTrackSolar[y])
+function endogenous_retirement_vre_stor_solar!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (VRE-Storage Solar) Module")
+
+ gen = inputs["RESOURCES"]
+
+ NEW_CAP_SOLAR = inputs["NEW_CAP_SOLAR"] # Set of all resources eligible for new capacity
+ RET_CAP_SOLAR = inputs["RET_CAP_SOLAR"] # Set of all resources eligible for capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKSOLAR[y in RET_CAP_SOLAR, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKSOLAR[y in RET_CAP_SOLAR, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapSolar[y in RET_CAP_SOLAR],
+ if y in NEW_CAP_SOLAR
+ EP[:vSOLARCAP][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCapSolar[y in RET_CAP_SOLAR], EP[:vRETSOLARCAP][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackSolar[y in RET_CAP_SOLAR],
+ sum(EP[:vRETCAPTRACKSOLAR][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackSolar[y in RET_CAP_SOLAR],
+ sum(EP[:vCAPTRACKSOLAR][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackSolar[y in RET_CAP_SOLAR],
+ cum_min_retired_cap_solar_mw(gen[y]))
+
+ ### Constraints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackNewSolar[y in RET_CAP_SOLAR],
+ eNewCapSolar[y]==vCAPTRACKSOLAR[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackSolar[y in RET_CAP_SOLAR, p = 1:(cur_stage - 1)],
+ vCAPTRACKSOLAR[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackNewSolar[y in RET_CAP_SOLAR],
+ eRetCapSolar[y]==vRETCAPTRACKSOLAR[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackSolar[y in RET_CAP_SOLAR, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKSOLAR[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetSolar[y in RET_CAP_SOLAR],
+ eNewCapTrackSolar[y] + eMinRetCapTrackSolar[y]<=eRetCapTrackSolar[y])
end
-function endogenous_retirement_vre_stor_wind!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (VRE-Storage Wind) Module")
-
- gen = inputs["RESOURCES"]
-
- NEW_CAP_WIND = inputs["NEW_CAP_WIND"] # Set of all resources eligible for new capacity
- RET_CAP_WIND = inputs["RET_CAP_WIND"] # Set of all resources eligible for capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKWIND[y in RET_CAP_WIND,p=1:num_stages] >= 0 )
- @variable(EP, vRETCAPTRACKWIND[y in RET_CAP_WIND,p=1:num_stages] >= 0 )
-
- ### Expressions ###
-
- @expression(EP, eNewCapWind[y in RET_CAP_WIND],
- if y in NEW_CAP_WIND
- EP[:vWINDCAP][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapWind[y in RET_CAP_WIND], EP[:vRETWINDCAP][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackWind[y in RET_CAP_WIND], sum(EP[:vRETCAPTRACKWIND][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackWind[y in RET_CAP_WIND], sum(EP[:vCAPTRACKWIND][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackWind[y in RET_CAP_WIND], cum_min_retired_cap_wind_mw(gen[y]))
-
- ### Constraints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackNewWind[y in RET_CAP_WIND], eNewCapWind[y] == vCAPTRACKWIND[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackWind[y in RET_CAP_WIND,p=1:(cur_stage-1)], vCAPTRACKWIND[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackNewWind[y in RET_CAP_WIND], eRetCapWind[y] == vRETCAPTRACKWIND[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackWind[y in RET_CAP_WIND,p=1:(cur_stage-1)], vRETCAPTRACKWIND[y,p] == 0)
-
- @constraint(EP, cLifetimeRetWind[y in RET_CAP_WIND], eNewCapTrackWind[y] + eMinRetCapTrackWind[y] <= eRetCapTrackWind[y])
+function endogenous_retirement_vre_stor_wind!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (VRE-Storage Wind) Module")
+
+ gen = inputs["RESOURCES"]
+
+ NEW_CAP_WIND = inputs["NEW_CAP_WIND"] # Set of all resources eligible for new capacity
+ RET_CAP_WIND = inputs["RET_CAP_WIND"] # Set of all resources eligible for capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKWIND[y in RET_CAP_WIND, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKWIND[y in RET_CAP_WIND, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapWind[y in RET_CAP_WIND],
+ if y in NEW_CAP_WIND
+ EP[:vWINDCAP][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCapWind[y in RET_CAP_WIND], EP[:vRETWINDCAP][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackWind[y in RET_CAP_WIND],
+ sum(EP[:vRETCAPTRACKWIND][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackWind[y in RET_CAP_WIND],
+ sum(EP[:vCAPTRACKWIND][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackWind[y in RET_CAP_WIND],
+ cum_min_retired_cap_wind_mw(gen[y]))
+
+ ### Constraints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackNewWind[y in RET_CAP_WIND],
+ eNewCapWind[y]==vCAPTRACKWIND[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackWind[y in RET_CAP_WIND, p = 1:(cur_stage - 1)],
+ vCAPTRACKWIND[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackNewWind[y in RET_CAP_WIND],
+ eRetCapWind[y]==vRETCAPTRACKWIND[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackWind[y in RET_CAP_WIND, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKWIND[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetWind[y in RET_CAP_WIND],
+ eNewCapTrackWind[y] + eMinRetCapTrackWind[y]<=eRetCapTrackWind[y])
end
-function endogenous_retirement_vre_stor_stor!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (VRE-Storage Storage) Module")
-
- gen = inputs["RESOURCES"]
-
- NEW_CAP_STOR = inputs["NEW_CAP_STOR"] # Set of all resources eligible for new capacity
- RET_CAP_STOR = inputs["RET_CAP_STOR"] # Set of all resources eligible for capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKENERGY_VS[y in RET_CAP_STOR,p=1:num_stages] >= 0)
- @variable(EP, vRETCAPTRACKENERGY_VS[y in RET_CAP_STOR,p=1:num_stages] >= 0)
-
- ### Expressions ###
-
- @expression(EP, eNewCapEnergy_VS[y in RET_CAP_STOR],
- if y in NEW_CAP_STOR
- EP[:vCAPENERGY_VS][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapEnergy_VS[y in RET_CAP_STOR], EP[:vRETCAPENERGY_VS][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackEnergy_VS[y in RET_CAP_STOR], sum(EP[:vRETCAPTRACKENERGY_VS][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackEnergy_VS[y in RET_CAP_STOR], sum(EP[:vCAPTRACKENERGY_VS][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackEnergy_VS[y in RET_CAP_STOR], cum_min_retired_energy_cap_mw(gen[y]))
-
- ### Constratints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackEnergyNew_VS[y in RET_CAP_STOR], eNewCapEnergy_VS[y] == vCAPTRACKENERGY_VS[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackEnergy_VS[y in RET_CAP_STOR,p=1:(cur_stage-1)], vCAPTRACKENERGY_VS[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackEnergyNew_VS[y in RET_CAP_STOR], eRetCapEnergy_VS[y] == vRETCAPTRACKENERGY_VS[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackEnergy_VS[y in RET_CAP_STOR,p=1:(cur_stage-1)], vRETCAPTRACKENERGY_VS[y,p] == 0)
-
- @constraint(EP, cLifetimeRetEnergy_VS[y in RET_CAP_STOR], eNewCapTrackEnergy_VS[y] + eMinRetCapTrackEnergy_VS[y] <= eRetCapTrackEnergy_VS[y])
+function endogenous_retirement_vre_stor_stor!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (VRE-Storage Storage) Module")
+
+ gen = inputs["RESOURCES"]
+
+ NEW_CAP_STOR = inputs["NEW_CAP_STOR"] # Set of all resources eligible for new capacity
+ RET_CAP_STOR = inputs["RET_CAP_STOR"] # Set of all resources eligible for capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKENERGY_VS[y in RET_CAP_STOR, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKENERGY_VS[y in RET_CAP_STOR, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapEnergy_VS[y in RET_CAP_STOR],
+ if y in NEW_CAP_STOR
+ EP[:vCAPENERGY_VS][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCapEnergy_VS[y in RET_CAP_STOR], EP[:vRETCAPENERGY_VS][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackEnergy_VS[y in RET_CAP_STOR],
+ sum(EP[:vRETCAPTRACKENERGY_VS][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackEnergy_VS[y in RET_CAP_STOR],
+ sum(EP[:vCAPTRACKENERGY_VS][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackEnergy_VS[y in RET_CAP_STOR],
+ cum_min_retired_energy_cap_mw(gen[y]))
+
+ ### Constratints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackEnergyNew_VS[y in RET_CAP_STOR],
+ eNewCapEnergy_VS[y]==vCAPTRACKENERGY_VS[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackEnergy_VS[y in RET_CAP_STOR, p = 1:(cur_stage - 1)],
+ vCAPTRACKENERGY_VS[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackEnergyNew_VS[y in RET_CAP_STOR],
+ eRetCapEnergy_VS[y]==vRETCAPTRACKENERGY_VS[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackEnergy_VS[y in RET_CAP_STOR, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKENERGY_VS[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetEnergy_VS[y in RET_CAP_STOR],
+ eNewCapTrackEnergy_VS[y] + eMinRetCapTrackEnergy_VS[y]<=eRetCapTrackEnergy_VS[y])
end
-function endogenous_retirement_vre_stor_discharge_dc!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (VRE-Storage Discharge DC) Module")
-
- gen = inputs["RESOURCES"]
-
- NEW_CAP_DISCHARGE_DC = inputs["NEW_CAP_DISCHARGE_DC"] # Set of all resources eligible for new capacity
- RET_CAP_DISCHARGE_DC = inputs["RET_CAP_DISCHARGE_DC"] # Set of all resources eligible for capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKDISCHARGEDC[y in RET_CAP_DISCHARGE_DC,p=1:num_stages] >= 0 )
- @variable(EP, vRETCAPTRACKDISCHARGEDC[y in RET_CAP_DISCHARGE_DC,p=1:num_stages] >= 0 )
-
- ### Expressions ###
-
- @expression(EP, eNewCapDischargeDC[y in RET_CAP_DISCHARGE_DC],
- if y in NEW_CAP_DISCHARGE_DC
- EP[:vCAPDISCHARGE_DC][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapDischargeDC[y in RET_CAP_DISCHARGE_DC], EP[:vRETCAPDISCHARGE_DC][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC], sum(EP[:vRETCAPTRACKDISCHARGEDC][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC], sum(EP[:vCAPTRACKDISCHARGEDC][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC], cum_min_retired_cap_discharge_dc_mw(gen[y]))
-
- ### Constraints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackNewDischargeDC[y in RET_CAP_DISCHARGE_DC], eNewCapDischargeDC[y] == vCAPTRACKDISCHARGEDC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC,p=1:(cur_stage-1)], vCAPTRACKDISCHARGEDC[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackNewDischargeDC[y in RET_CAP_DISCHARGE_DC], eRetCapTrackDischargeDC[y] == vRETCAPTRACKDISCHARGEDC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC,p=1:(cur_stage-1)], vRETCAPTRACKDISCHARGEDC[y,p] == 0)
-
- @constraint(EP, cLifetimeRetDischargeDC[y in RET_CAP_DISCHARGE_DC], eNewCapTrackDischargeDC[y] + eMinRetCapTrackDischargeDC[y] <= eRetCapTrackDischargeDC[y])
+function endogenous_retirement_vre_stor_discharge_dc!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (VRE-Storage Discharge DC) Module")
+
+ gen = inputs["RESOURCES"]
+
+ NEW_CAP_DISCHARGE_DC = inputs["NEW_CAP_DISCHARGE_DC"] # Set of all resources eligible for new capacity
+ RET_CAP_DISCHARGE_DC = inputs["RET_CAP_DISCHARGE_DC"] # Set of all resources eligible for capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKDISCHARGEDC[y in RET_CAP_DISCHARGE_DC, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKDISCHARGEDC[y in RET_CAP_DISCHARGE_DC, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ if y in NEW_CAP_DISCHARGE_DC
+ EP[:vCAPDISCHARGE_DC][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP,
+ eRetCapDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ EP[:vRETCAPDISCHARGE_DC][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ sum(EP[:vRETCAPTRACKDISCHARGEDC][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ sum(EP[:vCAPTRACKDISCHARGEDC][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ cum_min_retired_cap_discharge_dc_mw(gen[y]))
+
+ ### Constraints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackNewDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ eNewCapDischargeDC[y]==vCAPTRACKDISCHARGEDC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC, p = 1:(cur_stage - 1)],
+ vCAPTRACKDISCHARGEDC[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackNewDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ eRetCapTrackDischargeDC[y]==vRETCAPTRACKDISCHARGEDC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKDISCHARGEDC[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ eNewCapTrackDischargeDC[y] +
+ eMinRetCapTrackDischargeDC[y]<=eRetCapTrackDischargeDC[y])
end
-function endogenous_retirement_vre_stor_charge_dc!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (VRE-Storage Charge DC) Module")
-
- gen = inputs["RESOURCES"]
- NEW_CAP_CHARGE_DC = inputs["NEW_CAP_CHARGE_DC"] # Set of all resources eligible for new capacity
- RET_CAP_CHARGE_DC = inputs["RET_CAP_CHARGE_DC"] # Set of all resources eligible for capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKCHARGEDC[y in RET_CAP_CHARGE_DC,p=1:num_stages] >= 0 )
- @variable(EP, vRETCAPTRACKCHARGEDC[y in RET_CAP_CHARGE_DC,p=1:num_stages] >= 0 )
-
- ### Expressions ###
-
- @expression(EP, eNewCapChargeDC[y in RET_CAP_CHARGE_DC],
- if y in NEW_CAP_CHARGE_DC
- EP[:vCAPCHARGE_DC][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapChargeDC[y in RET_CAP_CHARGE_DC], EP[:vRETCAPCHARGE_DC][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackChargeDC[y in RET_CAP_CHARGE_DC], sum(EP[:vRETCAPTRACKCHARGEDC][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackChargeDC[y in RET_CAP_CHARGE_DC], sum(EP[:vCAPTRACKCHARGEDC][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackChargeDC[y in RET_CAP_CHARGE_DC], cum_min_retired_cap_charge_dc_mw(gen[y]))
-
- ### Constraints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackNewChargeDC[y in RET_CAP_CHARGE_DC], eNewCapChargeDC[y] == vCAPTRACKCHARGEDC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackChargeDC[y in RET_CAP_CHARGE_DC,p=1:(cur_stage-1)], vCAPTRACKCHARGEDC[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackNewChargeDC[y in RET_CAP_CHARGE_DC], eRetCapTrackChargeDC[y] == vRETCAPTRACKCHARGEDC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackChargeDC[y in RET_CAP_CHARGE_DC,p=1:(cur_stage-1)], vRETCAPTRACKCHARGEDC[y,p] == 0)
-
- @constraint(EP, cLifetimeRetChargeDC[y in RET_CAP_CHARGE_DC], eNewCapTrackChargeDC[y] + eMinRetCapTrackChargeDC[y] <= eRetCapTrackChargeDC[y])
+function endogenous_retirement_vre_stor_charge_dc!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (VRE-Storage Charge DC) Module")
+
+ gen = inputs["RESOURCES"]
+ NEW_CAP_CHARGE_DC = inputs["NEW_CAP_CHARGE_DC"] # Set of all resources eligible for new capacity
+ RET_CAP_CHARGE_DC = inputs["RET_CAP_CHARGE_DC"] # Set of all resources eligible for capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKCHARGEDC[y in RET_CAP_CHARGE_DC, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKCHARGEDC[y in RET_CAP_CHARGE_DC, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapChargeDC[y in RET_CAP_CHARGE_DC],
+ if y in NEW_CAP_CHARGE_DC
+ EP[:vCAPCHARGE_DC][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCapChargeDC[y in RET_CAP_CHARGE_DC], EP[:vRETCAPCHARGE_DC][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackChargeDC[y in RET_CAP_CHARGE_DC],
+ sum(EP[:vRETCAPTRACKCHARGEDC][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackChargeDC[y in RET_CAP_CHARGE_DC],
+ sum(EP[:vCAPTRACKCHARGEDC][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackChargeDC[y in RET_CAP_CHARGE_DC],
+ cum_min_retired_cap_charge_dc_mw(gen[y]))
+
+ ### Constraints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackNewChargeDC[y in RET_CAP_CHARGE_DC],
+ eNewCapChargeDC[y]==vCAPTRACKCHARGEDC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackChargeDC[y in RET_CAP_CHARGE_DC, p = 1:(cur_stage - 1)],
+ vCAPTRACKCHARGEDC[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackNewChargeDC[y in RET_CAP_CHARGE_DC],
+ eRetCapTrackChargeDC[y]==vRETCAPTRACKCHARGEDC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackChargeDC[y in RET_CAP_CHARGE_DC, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKCHARGEDC[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetChargeDC[y in RET_CAP_CHARGE_DC],
+ eNewCapTrackChargeDC[y] + eMinRetCapTrackChargeDC[y]<=eRetCapTrackChargeDC[y])
end
-function endogenous_retirement_vre_stor_discharge_ac!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (VRE-Storage Discharge AC) Module")
-
- gen = inputs["RESOURCES"]
- NEW_CAP_DISCHARGE_AC = inputs["NEW_CAP_DISCHARGE_AC"] # Set of all resources eligible for new capacity
- RET_CAP_DISCHARGE_AC = inputs["RET_CAP_DISCHARGE_AC"] # Set of all resources eligible for capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKDISCHARGEAC[y in RET_CAP_DISCHARGE_AC,p=1:num_stages] >= 0 )
- @variable(EP, vRETCAPTRACKDISCHARGEAC[y in RET_CAP_DISCHARGE_AC,p=1:num_stages] >= 0 )
-
- ### Expressions ###
-
- @expression(EP, eNewCapDischargeAC[y in RET_CAP_DISCHARGE_AC],
- if y in NEW_CAP_DISCHARGE_AC
- EP[:vCAPDISCHARGE_AC][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapDischargeAC[y in RET_CAP_DISCHARGE_AC], EP[:vRETCAPDISCHARGE_AC][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC], sum(EP[:vRETCAPTRACKDISCHARGEAC][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC], sum(EP[:vCAPTRACKDISCHARGEAC][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC], cum_min_retired_cap_discharge_ac_mw(gen[y]))
-
- ### Constraints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackNewDischargeAC[y in RET_CAP_DISCHARGE_AC], eNewCapDischargeAC[y] == vCAPTRACKDISCHARGEAC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC,p=1:(cur_stage-1)], vCAPTRACKDISCHARGEAC[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackNewDischargeAC[y in RET_CAP_DISCHARGE_AC], eRetCapTrackDischargeAC[y] == vRETCAPTRACKDISCHARGEAC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC,p=1:(cur_stage-1)], vRETCAPTRACKDISCHARGEAC[y,p] == 0)
-
- @constraint(EP, cLifetimeRetDischargeAC[y in RET_CAP_DISCHARGE_AC], eNewCapTrackDischargeAC[y] + eMinRetCapTrackDischargeAC[y] <= eRetCapTrackDischargeAC[y])
+function endogenous_retirement_vre_stor_discharge_ac!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (VRE-Storage Discharge AC) Module")
+
+ gen = inputs["RESOURCES"]
+ NEW_CAP_DISCHARGE_AC = inputs["NEW_CAP_DISCHARGE_AC"] # Set of all resources eligible for new capacity
+ RET_CAP_DISCHARGE_AC = inputs["RET_CAP_DISCHARGE_AC"] # Set of all resources eligible for capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKDISCHARGEAC[y in RET_CAP_DISCHARGE_AC, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKDISCHARGEAC[y in RET_CAP_DISCHARGE_AC, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ if y in NEW_CAP_DISCHARGE_AC
+ EP[:vCAPDISCHARGE_AC][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP,
+ eRetCapDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ EP[:vRETCAPDISCHARGE_AC][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ sum(EP[:vRETCAPTRACKDISCHARGEAC][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ sum(EP[:vCAPTRACKDISCHARGEAC][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ cum_min_retired_cap_discharge_ac_mw(gen[y]))
+
+ ### Constraints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackNewDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ eNewCapDischargeAC[y]==vCAPTRACKDISCHARGEAC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC, p = 1:(cur_stage - 1)],
+ vCAPTRACKDISCHARGEAC[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackNewDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ eRetCapTrackDischargeAC[y]==vRETCAPTRACKDISCHARGEAC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKDISCHARGEAC[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ eNewCapTrackDischargeAC[y] +
+ eMinRetCapTrackDischargeAC[y]<=eRetCapTrackDischargeAC[y])
end
-function endogenous_retirement_vre_stor_charge_ac!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (VRE-Storage Charge AC) Module")
-
- gen = inputs["RESOURCES"]
- NEW_CAP_CHARGE_AC = inputs["NEW_CAP_CHARGE_AC"] # Set of all resources eligible for new capacity
- RET_CAP_CHARGE_AC = inputs["RET_CAP_CHARGE_AC"] # Set of all resources eligible for capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKCHARGEAC[y in RET_CAP_CHARGE_AC,p=1:num_stages] >= 0 )
- @variable(EP, vRETCAPTRACKCHARGEAC[y in RET_CAP_CHARGE_AC,p=1:num_stages] >= 0 )
-
- ### Expressions ###
-
- @expression(EP, eNewCapChargeAC[y in RET_CAP_CHARGE_AC],
- if y in NEW_CAP_CHARGE_AC
- EP[:vCAPCHARGE_AC][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapChargeAC[y in RET_CAP_CHARGE_AC], EP[:vRETCAPCHARGE_AC][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackChargeAC[y in RET_CAP_CHARGE_AC], sum(EP[:vRETCAPTRACKCHARGEAC][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackChargeAC[y in RET_CAP_CHARGE_AC], sum(EP[:vCAPTRACKCHARGEAC][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackChargeAC[y in RET_CAP_CHARGE_AC], cum_min_retired_cap_charge_ac_mw(gen[y]))
-
- ### Constraints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackNewChargeAC[y in RET_CAP_CHARGE_AC], eNewCapChargeAC[y] == vCAPTRACKCHARGEAC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackChargeAC[y in RET_CAP_CHARGE_AC,p=1:(cur_stage-1)], vCAPTRACKCHARGEAC[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackNewChargeAC[y in RET_CAP_CHARGE_AC], eRetCapTrackChargeAC[y] == vRETCAPTRACKCHARGEAC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackChargeAC[y in RET_CAP_CHARGE_AC,p=1:(cur_stage-1)], vRETCAPTRACKCHARGEAC[y,p] == 0)
-
- @constraint(EP, cLifetimeRetChargeAC[y in RET_CAP_CHARGE_AC], eNewCapTrackChargeAC[y] + eMinRetCapTrackChargeAC[y] <= eRetCapTrackChargeAC[y])
+function endogenous_retirement_vre_stor_charge_ac!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (VRE-Storage Charge AC) Module")
+
+ gen = inputs["RESOURCES"]
+ NEW_CAP_CHARGE_AC = inputs["NEW_CAP_CHARGE_AC"] # Set of all resources eligible for new capacity
+ RET_CAP_CHARGE_AC = inputs["RET_CAP_CHARGE_AC"] # Set of all resources eligible for capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKCHARGEAC[y in RET_CAP_CHARGE_AC, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKCHARGEAC[y in RET_CAP_CHARGE_AC, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapChargeAC[y in RET_CAP_CHARGE_AC],
+ if y in NEW_CAP_CHARGE_AC
+ EP[:vCAPCHARGE_AC][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCapChargeAC[y in RET_CAP_CHARGE_AC], EP[:vRETCAPCHARGE_AC][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackChargeAC[y in RET_CAP_CHARGE_AC],
+ sum(EP[:vRETCAPTRACKCHARGEAC][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackChargeAC[y in RET_CAP_CHARGE_AC],
+ sum(EP[:vCAPTRACKCHARGEAC][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackChargeAC[y in RET_CAP_CHARGE_AC],
+ cum_min_retired_cap_charge_ac_mw(gen[y]))
+
+ ### Constraints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackNewChargeAC[y in RET_CAP_CHARGE_AC],
+ eNewCapChargeAC[y]==vCAPTRACKCHARGEAC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackChargeAC[y in RET_CAP_CHARGE_AC, p = 1:(cur_stage - 1)],
+ vCAPTRACKCHARGEAC[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackNewChargeAC[y in RET_CAP_CHARGE_AC],
+ eRetCapTrackChargeAC[y]==vRETCAPTRACKCHARGEAC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackChargeAC[y in RET_CAP_CHARGE_AC, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKCHARGEAC[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetChargeAC[y in RET_CAP_CHARGE_AC],
+ eNewCapTrackChargeAC[y] + eMinRetCapTrackChargeAC[y]<=eRetCapTrackChargeAC[y])
end
diff --git a/src/multi_stage/write_multi_stage_capacities_charge.jl b/src/multi_stage/write_multi_stage_capacities_charge.jl
index b098cae598..a9d7f4cf11 100644
--- a/src/multi_stage/write_multi_stage_capacities_charge.jl
+++ b/src/multi_stage/write_multi_stage_capacities_charge.jl
@@ -9,7 +9,6 @@ inputs:
* settings\_d - Dictionary containing settings dictionary configured in the multi-stage settings file multi\_stage\_settings.yml.
"""
function write_multi_stage_capacities_charge(outpath::String, settings_d::Dict)
-
num_stages = settings_d["NumStages"] # Total number of investment planning stages
capacities_d = Dict()
@@ -19,7 +18,8 @@ function write_multi_stage_capacities_charge(outpath::String, settings_d::Dict)
end
# Set first column of DataFrame as resource names from the first stage
- df_cap = DataFrame(Resource=capacities_d[1][!, :Resource], Zone=capacities_d[1][!, :Zone])
+ df_cap = DataFrame(Resource = capacities_d[1][!, :Resource],
+ Zone = capacities_d[1][!, :Zone])
# Store starting capacities from the first stage
df_cap[!, Symbol("StartChargeCap_p1")] = capacities_d[1][!, :StartChargeCap]
@@ -30,5 +30,4 @@ function write_multi_stage_capacities_charge(outpath::String, settings_d::Dict)
end
CSV.write(joinpath(outpath, "capacities_charge_multi_stage.csv"), df_cap)
-
end
diff --git a/src/multi_stage/write_multi_stage_capacities_discharge.jl b/src/multi_stage/write_multi_stage_capacities_discharge.jl
index b4a84f433f..0da02b7002 100644
--- a/src/multi_stage/write_multi_stage_capacities_discharge.jl
+++ b/src/multi_stage/write_multi_stage_capacities_discharge.jl
@@ -9,7 +9,6 @@ inputs:
* settings\_d - Dictionary containing settings dictionary configured in the multi-stage settings file multi\_stage\_settings.yml.
"""
function write_multi_stage_capacities_discharge(outpath::String, settings_d::Dict)
-
num_stages = settings_d["NumStages"] # Total number of investment planning stages
capacities_d = Dict()
@@ -19,7 +18,8 @@ function write_multi_stage_capacities_discharge(outpath::String, settings_d::Dic
end
# Set first column of DataFrame as resource names from the first stage
- df_cap = DataFrame(Resource=capacities_d[1][!, :Resource], Zone=capacities_d[1][!, :Zone])
+ df_cap = DataFrame(Resource = capacities_d[1][!, :Resource],
+ Zone = capacities_d[1][!, :Zone])
# Store starting capacities from the first stage
df_cap[!, Symbol("StartCap_p1")] = capacities_d[1][!, :StartCap]
@@ -30,5 +30,4 @@ function write_multi_stage_capacities_discharge(outpath::String, settings_d::Dic
end
CSV.write(joinpath(outpath, "capacities_multi_stage.csv"), df_cap)
-
end
diff --git a/src/multi_stage/write_multi_stage_capacities_energy.jl b/src/multi_stage/write_multi_stage_capacities_energy.jl
index b9d2d81849..9c7a5c1567 100644
--- a/src/multi_stage/write_multi_stage_capacities_energy.jl
+++ b/src/multi_stage/write_multi_stage_capacities_energy.jl
@@ -9,7 +9,6 @@ inputs:
* settings\_d - Dictionary containing settings dictionary configured in the multi-stage settings file multi\_stage\_settings.yml.
"""
function write_multi_stage_capacities_energy(outpath::String, settings_d::Dict)
-
num_stages = settings_d["NumStages"] # Total number of investment planning stages
capacities_d = Dict()
@@ -19,7 +18,8 @@ function write_multi_stage_capacities_energy(outpath::String, settings_d::Dict)
end
# Set first column of DataFrame as resource names from the first stage
- df_cap = DataFrame(Resource=capacities_d[1][!, :Resource], Zone=capacities_d[1][!, :Zone])
+ df_cap = DataFrame(Resource = capacities_d[1][!, :Resource],
+ Zone = capacities_d[1][!, :Zone])
# Store starting capacities from the first stage
df_cap[!, Symbol("StartEnergyCap_p1")] = capacities_d[1][!, :StartEnergyCap]
@@ -30,5 +30,4 @@ function write_multi_stage_capacities_energy(outpath::String, settings_d::Dict)
end
CSV.write(joinpath(outpath, "capacities_energy_multi_stage.csv"), df_cap)
-
end
diff --git a/src/multi_stage/write_multi_stage_costs.jl b/src/multi_stage/write_multi_stage_costs.jl
index dcc5533f27..92a8c9a71c 100644
--- a/src/multi_stage/write_multi_stage_costs.jl
+++ b/src/multi_stage/write_multi_stage_costs.jl
@@ -9,7 +9,6 @@ inputs:
* settings\_d - Dictionary containing settings dictionary configured in the multi-stage settings file multi\_stage\_settings.yml.
"""
function write_multi_stage_costs(outpath::String, settings_d::Dict, inputs_dict::Dict)
-
num_stages = settings_d["NumStages"] # Total number of DDP stages
wacc = settings_d["WACC"] # Interest Rate and also the discount rate unless specified other wise
stage_lens = settings_d["StageLengths"]
@@ -24,28 +23,35 @@ function write_multi_stage_costs(outpath::String, settings_d::Dict, inputs_dict:
OPEXMULTS = [inputs_dict[j]["OPEXMULT"] for j in 1:num_stages] # Stage-wise OPEX multipliers to count multiple years between two model stages
# Set first column of DataFrame as resource names from the first stage
- df_costs = DataFrame(Costs=costs_d[1][!, :Costs])
+ df_costs = DataFrame(Costs = costs_d[1][!, :Costs])
# Store discounted total costs for each stage in a data frame
for p in 1:num_stages
if myopic
DF = 1 # DF=1 because we do not apply discount factor in myopic case
else
- DF = 1 / (1 + wacc)^(stage_lens[p] * (p - 1)) # Discount factor applied to ALL costs in each stage
+ cum_stage_length = 0
+ if p > 1
+ for stage_counter in 1:(p - 1)
+ cum_stage_length += stage_lens[stage_counter]
+ end
+ end
+ DF = 1 / (1 + wacc)^(cum_stage_length) # Discount factor applied to ALL costs in each stage
end
df_costs[!, Symbol("TotalCosts_p$p")] = DF .* costs_d[p][!, Symbol("Total")]
end
# For OPEX costs, apply additional discounting
- for cost in ["cVar", "cNSE", "cStart", "cUnmetRsv"]
+ for cost in ["cVar", "cNSE", "cStart", "cUnmetRsv", "cUnmetPolicyPenalty"]
if cost in df_costs[!, :Costs]
- df_costs[df_costs[!, :Costs].==cost, 2:end] = transpose(OPEXMULTS) .* df_costs[df_costs[!, :Costs].==cost, 2:end]
+ df_costs[df_costs[!, :Costs] .== cost, 2:end] = transpose(OPEXMULTS) .*
+ df_costs[df_costs[!, :Costs] .== cost, 2:end]
end
end
# Remove "cTotal" from results (as this includes Cost-to-Go)
- df_costs = df_costs[df_costs[!, :Costs].!="cTotal", :]
+ df_costs = df_costs[df_costs[!, :Costs] .!= "cTotal", :]
+ @warn("The cost calculation of the multi-stage GenX is approximate currently, and we will be refining it more in one of the future releases.")
CSV.write(joinpath(outpath, "costs_multi_stage.csv"), df_costs)
-
end
diff --git a/src/multi_stage/write_multi_stage_network_expansion.jl b/src/multi_stage/write_multi_stage_network_expansion.jl
index 3b9808d29f..1a6ddc7015 100644
--- a/src/multi_stage/write_multi_stage_network_expansion.jl
+++ b/src/multi_stage/write_multi_stage_network_expansion.jl
@@ -19,11 +19,12 @@ function write_multi_stage_network_expansion(outpath::String, settings_d::Dict)
end
# Set first column of output DataFrame as line IDs
- df_trans_cap = DataFrame(Line=trans_capacities_d[1][!, :Line])
+ df_trans_cap = DataFrame(Line = trans_capacities_d[1][!, :Line])
# Store new transmission capacities for all stages
for p in 1:num_stages
- df_trans_cap[!, Symbol("New_Trans_Capacity_p$p")] = trans_capacities_d[p][!, :New_Trans_Capacity]
+ df_trans_cap[!, Symbol("New_Trans_Capacity_p$p")] = trans_capacities_d[p][!,
+ :New_Trans_Capacity]
end
CSV.write(joinpath(outpath, "network_expansion_multi_stage.csv"), df_trans_cap)
diff --git a/src/multi_stage/write_multi_stage_outputs.jl b/src/multi_stage/write_multi_stage_outputs.jl
new file mode 100644
index 0000000000..4e6d5612d1
--- /dev/null
+++ b/src/multi_stage/write_multi_stage_outputs.jl
@@ -0,0 +1,30 @@
+@doc raw"""
+ write_multi_stage_outputs(stats_d::Dict,
+ outpath::String,
+ settings_d::Dict,
+ inputs_dict::Dict)
+
+This function calls various methods which write multi-stage modeling outputs as .csv files.
+
+# Arguments:
+ * stats\_d: Dictionary which contains the run time, upper bound, and lower bound of each DDP iteration.
+ * outpath: String which represents the path to the Results directory.
+ * settings\_d: Dictionary containing settings configured in the GenX settings `genx_settings.yml` file as well as the multi-stage settings file `multi_stage_settings.yml`.
+ * inputs\_dict: Dictionary containing the input data for the multi-stage model.
+"""
+function write_multi_stage_outputs(stats_d::Dict,
+ outpath::String,
+ settings_d::Dict,
+ inputs_dict::Dict)
+ multi_stage_settings_d = settings_d["MultiStageSettingsDict"]
+
+ write_multi_stage_capacities_discharge(outpath, multi_stage_settings_d)
+ write_multi_stage_capacities_charge(outpath, multi_stage_settings_d)
+ write_multi_stage_capacities_energy(outpath, multi_stage_settings_d)
+ if settings_d["NetworkExpansion"] == 1
+ write_multi_stage_network_expansion(outpath, multi_stage_settings_d)
+ end
+ write_multi_stage_costs(outpath, multi_stage_settings_d, inputs_dict)
+ multi_stage_settings_d["Myopic"] == 0 && write_multi_stage_stats(outpath, stats_d)
+ write_multi_stage_settings(outpath, settings_d)
+end
diff --git a/src/multi_stage/write_multi_stage_stats.jl b/src/multi_stage/write_multi_stage_stats.jl
index 75919d067f..7f37b04043 100644
--- a/src/multi_stage/write_multi_stage_stats.jl
+++ b/src/multi_stage/write_multi_stage_stats.jl
@@ -1,3 +1,8 @@
+_get_multi_stage_stats_filename() = "stats_multi_stage.csv"
+function _get_multi_stage_stats_header()
+ ["Iteration_Number", "Seconds", "Upper_Bound", "Lower_Bound", "Relative_Gap"]
+end
+
@doc raw"""
write_multi_stage_stats(outpath::String, stats_d::Dict)
@@ -9,6 +14,10 @@ inputs:
* stats\_d – Dictionary which contains the run time, upper bound, and lower bound of each DDP iteration.
"""
function write_multi_stage_stats(outpath::String, stats_d::Dict)
+ filename = _get_multi_stage_stats_filename()
+
+ # don't overwrite existing file
+ isfile(joinpath(outpath, filename)) && return nothing
times_a = stats_d["TIMES"] # Time (seconds) of each iteration
upper_bounds_a = stats_d["UPPER_BOUNDS"] # Upper bound of each iteration
@@ -20,12 +29,76 @@ function write_multi_stage_stats(outpath::String, stats_d::Dict)
realtive_gap_a = (upper_bounds_a .- lower_bounds_a) ./ lower_bounds_a
# Construct dataframe where first column is iteration number, second is iteration time
- df_stats = DataFrame(Iteration_Number=iteration_count_a,
- Seconds=times_a,
- Upper_Bound=upper_bounds_a,
- Lower_Bound=lower_bounds_a,
- Relative_Gap=realtive_gap_a)
+ header = _get_multi_stage_stats_header()
+ df_stats = DataFrame(header .=>
+ [iteration_count_a, times_a, upper_bounds_a, lower_bounds_a, realtive_gap_a])
+
+ CSV.write(joinpath(outpath, filename), df_stats)
+ return nothing
+end
+
+@doc raw"""
+ create_multi_stage_stats_file(outpath::String)
+
+Create an empty CSV file in the specified output directory with the filename `stats_multi_stage.csv`.
+The file contains the columns defined in `_get_multi_stage_stats_header()`.
+The function first generates the filename and header using `_get_multi_stage_stats_filename()` and
+`_get_multi_stage_stats_header()` respectively. It then creates a DataFrame with column names as headers and
+writes it into a CSV file in the specified output directory.
+
+# Arguments
+- `outpath::String`: The output directory where the statistics file will be written.
+
+# Returns
+- Nothing. A CSV file is written to the `outpath`.
+"""
+function create_multi_stage_stats_file(outpath::String)
+ filename = _get_multi_stage_stats_filename()
+ header = _get_multi_stage_stats_header()
+ df_stats = DataFrame([col_name => Float64[] for col_name in header])
+ CSV.write(joinpath(outpath, filename), df_stats)
+end
+
+@doc raw"""
+ update_multi_stage_stats_file(outpath::String, ic::Int64, upper_bound::Float64, lower_bound::Float64, iteration_time::Float64; new_row::Bool=false)
+
+Update a multi-stage statistics file.
+
+# Arguments
+- `outpath::String`: The output directory where the statistics file will be written.
+- `ic::Int64`: The iteration count.
+- `upper_bound::Float64`: The upper bound value.
+- `lower_bound::Float64`: The lower bound value.
+- `iteration_time::Float64`: The iteration time value.
+- `new_row::Bool=false`: Optional argument to determine whether to append a new row (if true) or update the current row (if false).
+
+The function first checks if the file exists. If it does not, it creates a new one.
+Then, it reads the statistics from the existing file into a DataFrame.
+It calculates the relative gap based on the upper and lower bounds, and either appends a new row or updates the current row based on the `new_row` argument.
+Finally, it writes the updated DataFrame back to the file.
+
+# Returns
+- Nothing. A CSV file is updated or created at the `outpath`.
+"""
+function update_multi_stage_stats_file(outpath::String, ic::Int64, upper_bound::Float64,
+ lower_bound::Float64, iteration_time::Float64; new_row::Bool = false)
+ filename = _get_multi_stage_stats_filename()
+
+ # If the file does not exist, create it
+ if !isfile(joinpath(outpath, filename))
+ create_multi_stage_stats_file(outpath)
+ end
+
+ df_stats = CSV.read(joinpath(outpath, filename), DataFrame, types = Float64)
+
+ relative_gap = (upper_bound - lower_bound) / lower_bound
+
+ new_values = [ic, iteration_time, upper_bound, lower_bound, relative_gap]
- CSV.write(joinpath(outpath, "stats_multi_stage.csv"), df_stats)
+ # If new_row is true, append the new values to the end of the dataframe
+ # otherwise, update the row at index ic
+ new_row ? push!(df_stats, new_values) : (df_stats[ic, :] = new_values)
+ CSV.write(joinpath(outpath, filename), df_stats)
+ return nothing
end
diff --git a/src/time_domain_reduction/precluster.jl b/src/time_domain_reduction/precluster.jl
index b4ddb4df76..1d7352b8d4 100644
--- a/src/time_domain_reduction/precluster.jl
+++ b/src/time_domain_reduction/precluster.jl
@@ -45,4 +45,4 @@ function run_timedomainreduction_multistage!(case::AbstractString)
end
return
-end
\ No newline at end of file
+end
diff --git a/src/time_domain_reduction/time_domain_reduction.jl b/src/time_domain_reduction/time_domain_reduction.jl
index ca6b25ac6b..8f7f1e7a61 100644
--- a/src/time_domain_reduction/time_domain_reduction.jl
+++ b/src/time_domain_reduction/time_domain_reduction.jl
@@ -18,7 +18,6 @@ using Distances
using CSV
using GenX
-
const SEED = 1234
@doc raw"""
@@ -51,8 +50,9 @@ function parse_data(myinputs)
ZONES = myinputs["R_ZONES"]
# DEMAND - Demand_data.csv
- demand_profiles = [ myinputs["pD"][:,l] for l in 1:size(myinputs["pD"],2) ]
- demand_col_names = [DEMAND_COLUMN_PREFIX()*string(l) for l in 1:size(demand_profiles)[1]]
+ demand_profiles = [myinputs["pD"][:, l] for l in 1:size(myinputs["pD"], 2)]
+ demand_col_names = [DEMAND_COLUMN_PREFIX() * string(l)
+ for l in 1:size(demand_profiles)[1]]
demand_zones = [l for l in 1:size(demand_profiles)[1]]
col_to_zone_map = Dict(demand_col_names .=> 1:length(demand_col_names))
@@ -64,15 +64,18 @@ function parse_data(myinputs)
wind_col_names = []
var_col_names = []
for r in 1:length(RESOURCE_ZONES)
- if occursin("PV", RESOURCE_ZONES[r]) || occursin("pv", RESOURCE_ZONES[r]) || occursin("Pv", RESOURCE_ZONES[r]) || occursin("Solar", RESOURCE_ZONES[r]) || occursin("SOLAR", RESOURCE_ZONES[r]) || occursin("solar", RESOURCE_ZONES[r])
+ if occursin("PV", RESOURCE_ZONES[r]) || occursin("pv", RESOURCE_ZONES[r]) ||
+ occursin("Pv", RESOURCE_ZONES[r]) || occursin("Solar", RESOURCE_ZONES[r]) ||
+ occursin("SOLAR", RESOURCE_ZONES[r]) || occursin("solar", RESOURCE_ZONES[r])
push!(solar_col_names, RESOURCE_ZONES[r])
- push!(solar_profiles, myinputs["pP_Max"][r,:])
- elseif occursin("Wind", RESOURCE_ZONES[r]) || occursin("WIND", RESOURCE_ZONES[r]) || occursin("wind", RESOURCE_ZONES[r])
+ push!(solar_profiles, myinputs["pP_Max"][r, :])
+ elseif occursin("Wind", RESOURCE_ZONES[r]) || occursin("WIND", RESOURCE_ZONES[r]) ||
+ occursin("wind", RESOURCE_ZONES[r])
push!(wind_col_names, RESOURCE_ZONES[r])
- push!(wind_profiles, myinputs["pP_Max"][r,:])
+ push!(wind_profiles, myinputs["pP_Max"][r, :])
end
push!(var_col_names, RESOURCE_ZONES[r])
- push!(var_profiles, myinputs["pP_Max"][r,:])
+ push!(var_profiles, myinputs["pP_Max"][r, :])
col_to_zone_map[RESOURCE_ZONES[r]] = ZONES[r]
end
@@ -82,15 +85,19 @@ function parse_data(myinputs)
AllFuelsConst = true
for f in 1:length(fuel_col_names)
push!(fuel_profiles, myinputs["fuel_costs"][fuel_col_names[f]])
- if AllFuelsConst && (minimum(myinputs["fuel_costs"][fuel_col_names[f]]) != maximum(myinputs["fuel_costs"][fuel_col_names[f]]))
+ if AllFuelsConst && (minimum(myinputs["fuel_costs"][fuel_col_names[f]]) !=
+ maximum(myinputs["fuel_costs"][fuel_col_names[f]]))
AllFuelsConst = false
end
end
all_col_names = [demand_col_names; var_col_names; fuel_col_names]
all_profiles = [demand_profiles..., var_profiles..., fuel_profiles...]
- return demand_col_names, var_col_names, solar_col_names, wind_col_names, fuel_col_names, all_col_names,
- demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles, all_profiles,
- col_to_zone_map, AllFuelsConst
+ return demand_col_names,
+ var_col_names, solar_col_names, wind_col_names, fuel_col_names,
+ all_col_names,
+ demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles,
+ all_profiles,
+ col_to_zone_map, AllFuelsConst
end
@doc raw"""
@@ -113,39 +120,46 @@ function parse_multi_stage_data(inputs_dict)
# [ REPLACE THIS with multi_stage_settings.yml StageLengths ]
# In case not all stages have the same length, check relative lengths
- stage_lengths = [ size(inputs_dict[t]["pD"][:,1],1) for t in 1:length(keys(inputs_dict)) ]
+ stage_lengths = [size(inputs_dict[t]["pD"][:, 1], 1)
+ for t in 1:length(keys(inputs_dict))]
total_length = sum(stage_lengths)
- relative_lengths = stage_lengths/total_length
+ relative_lengths = stage_lengths / total_length
# DEMAND - Demand_data.csv
- stage_demand_profiles = [ inputs_dict[t]["pD"][:,l] for t in 1:length(keys(inputs_dict)), l in 1:size(inputs_dict[1]["pD"],2) ]
- vector_lps = [stage_demand_profiles[:,l] for l in 1:size(inputs_dict[1]["pD"],2)]
- demand_profiles = [reduce(vcat,vector_lps[l]) for l in 1:size(inputs_dict[1]["pD"],2)]
- demand_col_names = [DEMAND_COLUMN_PREFIX()*string(l) for l in 1:size(demand_profiles)[1]]
+ stage_demand_profiles = [inputs_dict[t]["pD"][:, l]
+ for t in 1:length(keys(inputs_dict)),
+ l in 1:size(inputs_dict[1]["pD"], 2)]
+ vector_lps = [stage_demand_profiles[:, l] for l in 1:size(inputs_dict[1]["pD"], 2)]
+ demand_profiles = [reduce(vcat, vector_lps[l]) for l in 1:size(inputs_dict[1]["pD"], 2)]
+ demand_col_names = [DEMAND_COLUMN_PREFIX() * string(l)
+ for l in 1:size(demand_profiles)[1]]
demand_zones = [l for l in 1:size(demand_profiles)[1]]
col_to_zone_map = Dict(demand_col_names .=> 1:length(demand_col_names))
# CAPACITY FACTORS - Generators_variability.csv
for r in 1:length(RESOURCE_ZONES)
- if occursin("PV", RESOURCE_ZONES[r]) || occursin("pv", RESOURCE_ZONES[r]) || occursin("Pv", RESOURCE_ZONES[r]) || occursin("Solar", RESOURCE_ZONES[r]) || occursin("SOLAR", RESOURCE_ZONES[r]) || occursin("solar", RESOURCE_ZONES[r])
+ if occursin("PV", RESOURCE_ZONES[r]) || occursin("pv", RESOURCE_ZONES[r]) ||
+ occursin("Pv", RESOURCE_ZONES[r]) || occursin("Solar", RESOURCE_ZONES[r]) ||
+ occursin("SOLAR", RESOURCE_ZONES[r]) || occursin("solar", RESOURCE_ZONES[r])
push!(solar_col_names, RESOURCE_ZONES[r])
pv_all_stages = []
for t in 1:length(keys(inputs_dict))
- pv_all_stages = vcat(pv_all_stages, inputs_dict[t]["pP_Max"][r,:])
+ pv_all_stages = vcat(pv_all_stages, inputs_dict[t]["pP_Max"][r, :])
end
push!(solar_profiles, pv_all_stages)
- elseif occursin("Wind", RESOURCE_ZONES[r]) || occursin("WIND", RESOURCE_ZONES[r]) || occursin("wind", RESOURCE_ZONES[r])
+ elseif occursin("Wind", RESOURCE_ZONES[r]) || occursin("WIND", RESOURCE_ZONES[r]) ||
+ occursin("wind", RESOURCE_ZONES[r])
push!(wind_col_names, RESOURCE_ZONES[r])
wind_all_stages = []
for t in 1:length(keys(inputs_dict))
- wind_all_stages = vcat(wind_all_stages, inputs_dict[t]["pP_Max"][r,:])
+ wind_all_stages = vcat(wind_all_stages, inputs_dict[t]["pP_Max"][r, :])
end
push!(wind_profiles, wind_all_stages)
end
push!(var_col_names, RESOURCE_ZONES[r])
var_all_stages = []
for t in 1:length(keys(inputs_dict))
- var_all_stages = vcat(var_all_stages, inputs_dict[t]["pP_Max"][r,:])
+ var_all_stages = vcat(var_all_stages, inputs_dict[t]["pP_Max"][r, :])
end
push!(var_profiles, var_all_stages)
col_to_zone_map[RESOURCE_ZONES[r]] = ZONES[r]
@@ -158,8 +172,10 @@ function parse_multi_stage_data(inputs_dict)
for f in 1:length(fuel_col_names)
fuel_all_stages = []
for t in 1:length(keys(inputs_dict))
- fuel_all_stages = vcat(fuel_all_stages, inputs_dict[t]["fuel_costs"][fuel_col_names[f]])
- if AllFuelsConst && (minimum(inputs_dict[t]["fuel_costs"][fuel_col_names[f]]) != maximum(inputs_dict[t]["fuel_costs"][fuel_col_names[f]]))
+ fuel_all_stages = vcat(fuel_all_stages,
+ inputs_dict[t]["fuel_costs"][fuel_col_names[f]])
+ if AllFuelsConst && (minimum(inputs_dict[t]["fuel_costs"][fuel_col_names[f]]) !=
+ maximum(inputs_dict[t]["fuel_costs"][fuel_col_names[f]]))
AllFuelsConst = false
end
end
@@ -168,9 +184,12 @@ function parse_multi_stage_data(inputs_dict)
all_col_names = [demand_col_names; var_col_names; fuel_col_names]
all_profiles = [demand_profiles..., var_profiles..., fuel_profiles...]
- return demand_col_names, var_col_names, solar_col_names, wind_col_names, fuel_col_names, all_col_names,
- demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles, all_profiles,
- col_to_zone_map, AllFuelsConst, stage_lengths, total_length, relative_lengths
+ return demand_col_names,
+ var_col_names, solar_col_names, wind_col_names, fuel_col_names,
+ all_col_names,
+ demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles,
+ all_profiles,
+ col_to_zone_map, AllFuelsConst, stage_lengths, total_length, relative_lengths
end
@doc raw"""
@@ -184,13 +203,16 @@ representation is within a given proportion of the "maximum" possible deviation.
"""
function check_condition(Threshold, R, OldColNames, ScalingMethod, TimestepsPerRepPeriod)
if ScalingMethod == "N"
- return maximum(R.costs)/(length(OldColNames)*TimestepsPerRepPeriod) < Threshold
+ return maximum(R.costs) / (length(OldColNames) * TimestepsPerRepPeriod) < Threshold
elseif ScalingMethod == "S"
- return maximum(R.costs)/(length(OldColNames)*TimestepsPerRepPeriod*4) < Threshold
+ return maximum(R.costs) / (length(OldColNames) * TimestepsPerRepPeriod * 4) <
+ Threshold
else
- println("INVALID Scaling Method ", ScalingMethod, " / Choose N for Normalization or S for Standardization. Proceeding with N.")
+ println("INVALID Scaling Method ",
+ ScalingMethod,
+ " / Choose N for Normalization or S for Standardization. Proceeding with N.")
end
- return maximum(R.costs)/(length(OldColNames)*TimestepsPerRepPeriod) < Threshold
+ return maximum(R.costs) / (length(OldColNames) * TimestepsPerRepPeriod) < Threshold
end
@doc raw"""
@@ -213,20 +235,28 @@ K-Means: [https://juliastats.org/Clustering.jl/dev/kmeans.html](https://juliasta
K-Medoids: [https://juliastats.org/Clustering.jl/stable/kmedoids.html](https://juliastats.org/Clustering.jl/stable/kmedoids.html)
"""
-function cluster(ClusterMethod, ClusteringInputDF, NClusters, nIters, v=false, random=true)
+function cluster(ClusterMethod,
+ ClusteringInputDF,
+ NClusters,
+ nIters,
+ v = false,
+ random = true)
if ClusterMethod == "kmeans"
- DistMatrix = pairwise(Euclidean(), Matrix(ClusteringInputDF), dims=2)
- R = kmeans(Matrix(ClusteringInputDF), NClusters, init=:kmcen)
+ DistMatrix = pairwise(Euclidean(), Matrix(ClusteringInputDF), dims = 2)
+ R = kmeans(Matrix(ClusteringInputDF), NClusters, init = :kmcen)
for i in 1:nIters
- if !random; Random.seed!(SEED); end
+ if !random
+ Random.seed!(SEED)
+ end
R_i = kmeans(Matrix(ClusteringInputDF), NClusters)
if R_i.totalcost < R.totalcost
R = R_i
end
- if v && (i % (nIters/10) == 0)
- println(string(i) * " : " * string(round(R_i.totalcost, digits=3)) * " " * string(round(R.totalcost, digits=3)) )
+ if v && (i % (nIters / 10) == 0)
+ println(string(i) * " : " * string(round(R_i.totalcost, digits = 3)) * " " *
+ string(round(R.totalcost, digits = 3)))
end
end
@@ -236,22 +266,26 @@ function cluster(ClusterMethod, ClusteringInputDF, NClusters, nIters, v=false, r
M = []
for i in 1:NClusters
- dists = [euclidean(Centers[:,i], ClusteringInputDF[!, j]) for j in 1:size(ClusteringInputDF, 2)]
- push!(M,argmin(dists))
+ dists = [euclidean(Centers[:, i], ClusteringInputDF[!, j])
+ for j in 1:size(ClusteringInputDF, 2)]
+ push!(M, argmin(dists))
end
elseif ClusterMethod == "kmedoids"
- DistMatrix = pairwise(Euclidean(), Matrix(ClusteringInputDF), dims=2)
- R = kmedoids(DistMatrix, NClusters, init=:kmcen)
+ DistMatrix = pairwise(Euclidean(), Matrix(ClusteringInputDF), dims = 2)
+ R = kmedoids(DistMatrix, NClusters, init = :kmcen)
for i in 1:nIters
- if !random; Random.seed!(SEED); end
+ if !random
+ Random.seed!(SEED)
+ end
R_i = kmedoids(DistMatrix, NClusters)
if R_i.totalcost < R.totalcost
R = R_i
end
- if v && (i % (nIters/10) == 0)
- println(string(i) * " : " * string(round(R_i.totalcost, digits=3)) * " " * string(round(R.totalcost, digits=3)) )
+ if v && (i % (nIters / 10) == 0)
+ println(string(i) * " : " * string(round(R_i.totalcost, digits = 3)) * " " *
+ string(round(R.totalcost, digits = 3)))
end
end
@@ -271,14 +305,16 @@ end
Remove and store the columns that do not vary during the period.
"""
-function RemoveConstCols(all_profiles, all_col_names, v=false)
+function RemoveConstCols(all_profiles, all_col_names, v = false)
ConstData = []
ConstIdx = []
ConstCols = []
for c in 1:length(all_col_names)
Const = minimum(all_profiles[c]) == maximum(all_profiles[c])
if Const
- if v println("Removing constant col: ", all_col_names[c]) end
+ if v
+ println("Removing constant col: ", all_col_names[c])
+ end
push!(ConstData, all_profiles[c])
push!(ConstCols, all_col_names[c])
push!(ConstIdx, c)
@@ -304,37 +340,59 @@ system to be included among the extreme periods. They would select
"""
function get_extreme_period(DF, GDF, profKey, typeKey, statKey,
- ConstCols, demand_col_names, solar_col_names, wind_col_names, v=false)
- if v println(profKey," ", typeKey," ", statKey) end
+ ConstCols, demand_col_names, solar_col_names, wind_col_names, v = false)
+ if v
+ println(profKey, " ", typeKey, " ", statKey)
+ end
if typeKey == "Integral"
if profKey == "Demand"
- (stat, group_idx) = get_integral_extreme(GDF, statKey, demand_col_names, ConstCols)
+ (stat, group_idx) = get_integral_extreme(GDF,
+ statKey,
+ demand_col_names,
+ ConstCols)
elseif profKey == "PV"
- (stat, group_idx) = get_integral_extreme(GDF, statKey, solar_col_names, ConstCols)
+ (stat, group_idx) = get_integral_extreme(GDF,
+ statKey,
+ solar_col_names,
+ ConstCols)
elseif profKey == "Wind"
- (stat, group_idx) = get_integral_extreme(GDF, statKey, wind_col_names, ConstCols)
+ (stat, group_idx) = get_integral_extreme(GDF,
+ statKey,
+ wind_col_names,
+ ConstCols)
else
- println("Error: Profile Key ", profKey, " is invalid. Choose `Demand', `PV' or `Wind'.")
+ println("Error: Profile Key ",
+ profKey,
+ " is invalid. Choose `Demand', `PV' or `Wind'.")
end
elseif typeKey == "Absolute"
if profKey == "Demand"
- (stat, group_idx) = get_absolute_extreme(DF, statKey, demand_col_names, ConstCols)
+ (stat, group_idx) = get_absolute_extreme(DF,
+ statKey,
+ demand_col_names,
+ ConstCols)
elseif profKey == "PV"
- (stat, group_idx) = get_absolute_extreme(DF, statKey, solar_col_names, ConstCols)
+ (stat, group_idx) = get_absolute_extreme(DF,
+ statKey,
+ solar_col_names,
+ ConstCols)
elseif profKey == "Wind"
(stat, group_idx) = get_absolute_extreme(DF, statKey, wind_col_names, ConstCols)
else
- println("Error: Profile Key ", profKey, " is invalid. Choose `Demand', `PV' or `Wind'.")
+ println("Error: Profile Key ",
+ profKey,
+ " is invalid. Choose `Demand', `PV' or `Wind'.")
end
- else
- println("Error: Type Key ", typeKey, " is invalid. Choose `Absolute' or `Integral'.")
- stat = 0
- group_idx = 0
- end
+ else
+ println("Error: Type Key ",
+ typeKey,
+ " is invalid. Choose `Absolute' or `Integral'.")
+ stat = 0
+ group_idx = 0
+ end
return (stat, group_idx)
end
-
@doc raw"""
get_integral_extreme(GDF, statKey, col_names, ConstCols)
@@ -345,9 +403,11 @@ summed over the period.
"""
function get_integral_extreme(GDF, statKey, col_names, ConstCols)
if statKey == "Max"
- (stat, stat_idx) = findmax( sum([GDF[!, Symbol(c)] for c in setdiff(col_names, ConstCols) ]) )
+ (stat, stat_idx) = findmax(sum([GDF[!, Symbol(c)]
+ for c in setdiff(col_names, ConstCols)]))
elseif statKey == "Min"
- (stat, stat_idx) = findmin( sum([GDF[!, Symbol(c)] for c in setdiff(col_names, ConstCols) ]) )
+ (stat, stat_idx) = findmin(sum([GDF[!, Symbol(c)]
+ for c in setdiff(col_names, ConstCols)]))
else
println("Error: Statistic Key ", statKey, " is invalid. Choose `Max' or `Min'.")
end
@@ -363,10 +423,12 @@ Get the period index of the single timestep with the minimum or maximum demand o
"""
function get_absolute_extreme(DF, statKey, col_names, ConstCols)
if statKey == "Max"
- (stat, stat_idx) = findmax( sum([DF[!, Symbol(c)] for c in setdiff(col_names, ConstCols) ]) )
+ (stat, stat_idx) = findmax(sum([DF[!, Symbol(c)]
+ for c in setdiff(col_names, ConstCols)]))
group_idx = DF.Group[stat_idx]
elseif statKey == "Min"
- (stat, stat_idx) = findmin( sum([DF[!, Symbol(c)] for c in setdiff(col_names, ConstCols) ]) )
+ (stat, stat_idx) = findmin(sum([DF[!, Symbol(c)]
+ for c in setdiff(col_names, ConstCols)]))
group_idx = DF.Group[stat_idx]
else
println("Error: Statistic Key ", statKey, " is invalid. Choose `Max' or `Min'.")
@@ -374,7 +436,6 @@ function get_absolute_extreme(DF, statKey, col_names, ConstCols)
return (stat, group_idx)
end
-
@doc raw"""
scale_weights(W, H)
@@ -386,9 +447,11 @@ w_j \leftarrow H \cdot \frac{w_j}{\sum_i w_i} \: \: \: \forall w_j \in W
```
"""
-function scale_weights(W, H, v=false)
- if v println("Weights before scaling: ", W) end
- W = [ float(w)/sum(W) * H for w in W] # Scale to number of hours in input data
+function scale_weights(W, H, v = false)
+ if v
+ println("Weights before scaling: ", W)
+ end
+ W = [float(w) / sum(W) * H for w in W] # Scale to number of hours in input data
if v
println("Weights after scaling: ", W)
println("Sum of Updated Cluster Weights: ", sum(W))
@@ -396,7 +459,6 @@ function scale_weights(W, H, v=false)
return W
end
-
@doc raw"""
get_demand_multipliers(ClusterOutputData, ModifiedData, M, W, DemandCols, TimestepsPerRepPeriod, NewColNames, NClusters, Ncols)
@@ -416,7 +478,16 @@ demand in timestep $i$ for representative period $m$ in zone $z$, $w_m$ is the w
hours that one hour in representative period $m$ represents in the original profile, and $k_z$ is the zonal demand multiplier returned by the function.
"""
-function get_demand_multipliers(ClusterOutputData, InputData, M, W, DemandCols, TimestepsPerRepPeriod, NewColNames, NClusters, Ncols, v=false)
+function get_demand_multipliers(ClusterOutputData,
+ InputData,
+ M,
+ W,
+ DemandCols,
+ TimestepsPerRepPeriod,
+ NewColNames,
+ NClusters,
+ Ncols,
+ v = false)
# Compute original zonal total demands
zone_sums = Dict()
for demandcol in DemandCols
@@ -426,7 +497,9 @@ function get_demand_multipliers(ClusterOutputData, InputData, M, W, DemandCols,
# Compute zonal demands per representative period
cluster_zone_sums = Dict()
for m in 1:NClusters
- clustered_lp_DF = DataFrame( Dict( NewColNames[i] => ClusterOutputData[!,m][TimestepsPerRepPeriod*(i-1)+1 : TimestepsPerRepPeriod*i] for i in 1:Ncols if (Symbol(NewColNames[i]) in DemandCols)) )
+ clustered_lp_DF = DataFrame(Dict(NewColNames[i] => ClusterOutputData[!, m][(TimestepsPerRepPeriod * (i - 1) + 1):(TimestepsPerRepPeriod * i)]
+ for i in 1:Ncols
+ if (Symbol(NewColNames[i]) in DemandCols)))
cluster_zone_sums[m] = Dict()
for demandcol in DemandCols
cluster_zone_sums[m][demandcol] = sum(clustered_lp_DF[:, demandcol])
@@ -439,10 +512,20 @@ function get_demand_multipliers(ClusterOutputData, InputData, M, W, DemandCols,
demand_mults = Dict()
for demandcol in DemandCols
for m in 1:NClusters
- weighted_cluster_zone_sums[demandcol] += (W[m]/(TimestepsPerRepPeriod))*cluster_zone_sums[m][demandcol]
+ weighted_cluster_zone_sums[demandcol] += (W[m] / (TimestepsPerRepPeriod)) *
+ cluster_zone_sums[m][demandcol]
+ end
+ demand_mults[demandcol] = zone_sums[demandcol] /
+ weighted_cluster_zone_sums[demandcol]
+ if v
+ println(demandcol,
+ ": ",
+ weighted_cluster_zone_sums[demandcol],
+ " vs. ",
+ zone_sums[demandcol],
+ " => ",
+ demand_mults[demandcol])
end
- demand_mults[demandcol] = zone_sums[demandcol]/weighted_cluster_zone_sums[demandcol]
- if v println(demandcol, ": ", weighted_cluster_zone_sums[demandcol], " vs. ", zone_sums[demandcol], " => ", demand_mults[demandcol]) end
end
# Zone-wise validation that scaled clustered demand equals original demand (Don't actually scale demand in this function)
@@ -453,20 +536,34 @@ function get_demand_multipliers(ClusterOutputData, InputData, M, W, DemandCols,
if (NewColNames[i] in DemandCols)
# Uncomment this line if we decide to scale demand here instead of later. (Also remove "demand_mults[NewColNames[i]]*" term from new_zone_sums computation)
#ClusterOutputData[!,m][TimestepsPerRepPeriod*(i-1)+1 : TimestepsPerRepPeriod*i] *= demand_mults[NewColNames[i]]
- println(" Scaling ", M[m], " (", NewColNames[i], ") : ", cluster_zone_sums[m][NewColNames[i]], " => ", demand_mults[NewColNames[i]]*sum(ClusterOutputData[!,m][TimestepsPerRepPeriod*(i-1)+1 : TimestepsPerRepPeriod*i]))
- new_zone_sums[NewColNames[i]] += (W[m]/(TimestepsPerRepPeriod))*demand_mults[NewColNames[i]]*sum(ClusterOutputData[!,m][TimestepsPerRepPeriod*(i-1)+1 : TimestepsPerRepPeriod*i])
+ println(" Scaling ",
+ M[m],
+ " (",
+ NewColNames[i],
+ ") : ",
+ cluster_zone_sums[m][NewColNames[i]],
+ " => ",
+ demand_mults[NewColNames[i]] *
+ sum(ClusterOutputData[!, m][(TimestepsPerRepPeriod * (i - 1) + 1):(TimestepsPerRepPeriod * i)]))
+ new_zone_sums[NewColNames[i]] += (W[m] / (TimestepsPerRepPeriod)) *
+ demand_mults[NewColNames[i]] *
+ sum(ClusterOutputData[!, m][(TimestepsPerRepPeriod * (i - 1) + 1):(TimestepsPerRepPeriod * i)])
end
end
end
for demandcol in DemandCols
- println(demandcol, ": ", new_zone_sums[demandcol], " =?= ", zone_sums[demandcol])
+ println(demandcol,
+ ": ",
+ new_zone_sums[demandcol],
+ " =?= ",
+ zone_sums[demandcol])
end
end
return demand_mults
end
-function update_deprecated_tdr_inputs!(setup::Dict{Any,Any})
+function update_deprecated_tdr_inputs!(setup::Dict{Any, Any})
if "LoadWeight" in keys(setup)
setup["DemandWeight"] = setup["LoadWeight"]
delete!(setup, "LoadWeight")
@@ -479,14 +576,13 @@ function update_deprecated_tdr_inputs!(setup::Dict{Any,Any})
extr_dict = setup[extreme_periods]
if "Load" in keys(extr_dict)
- extr_dict["Demand"] = extr_dict["Load"]
+ extr_dict["Demand"] = extr_dict["Load"]
delete!(extr_dict, "Load")
- @info "In time_domain_reduction_settings file the key Load is deprecated. Prefer Demand."
- end
+ @info "In time_domain_reduction_settings file the key Load is deprecated. Prefer Demand."
+ end
end
end
-
@doc raw"""
cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; random=true)
@@ -541,13 +637,21 @@ to separate Vre_and_stor_solar_variability.csv and Vre_and_stor_wind_variability
and wind profiles for co-located resources will be separated into different CSV files to be read by loading the inputs
after the clustering of the inputs has occurred.
"""
-function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; random=true)
- if v println(now()) end
+function cluster_inputs(inpath,
+ settings_path,
+ mysetup,
+ stage_id = -99,
+ v = false;
+ random = true)
+ if v
+ println(now())
+ end
##### Step 0: Load in settings and data
# Read time domain reduction settings file time_domain_reduction_settings.yml
- myTDRsetup = YAML.load(open(joinpath(settings_path,"time_domain_reduction_settings.yml")))
+ myTDRsetup = YAML.load(open(joinpath(settings_path,
+ "time_domain_reduction_settings.yml")))
update_deprecated_tdr_inputs!(myTDRsetup)
# Accept model parameters from the settings file time_domain_reduction_settings.yml
@@ -582,46 +686,55 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
# Define a local version of the setup so that you can modify the mysetup["ParameterScale"] value to be zero in case it is 1
mysetup_local = copy(mysetup)
# If ParameterScale =1 then make it zero, since clustered inputs will be scaled prior to generating model
- mysetup_local["ParameterScale"]=0 # Performing cluster and report outputs in user-provided units
+ mysetup_local["ParameterScale"] = 0 # Performing cluster and report outputs in user-provided units
# Define another local version of setup such that Multi-Stage Non-Concatentation TDR can iteratively read in the raw data
mysetup_MS = copy(mysetup)
- mysetup_MS["TimeDomainReduction"]=0
- mysetup_MS["DoNotReadPeriodMap"]=1
- mysetup_MS["ParameterScale"]=0
+ mysetup_MS["TimeDomainReduction"] = 0
+ mysetup_MS["DoNotReadPeriodMap"] = 1
+ mysetup_MS["ParameterScale"] = 0
if MultiStage == 1
- model_dict=Dict()
- inputs_dict=Dict()
+ model_dict = Dict()
+ inputs_dict = Dict()
for t in 1:NumStages
- # Step 0) Set Model Year
- mysetup["MultiStageSettingsDict"]["CurStage"] = t
+ # Step 0) Set Model Year
+ mysetup["MultiStageSettingsDict"]["CurStage"] = t
- # Step 1) Load Inputs
- global inpath_sub = string("$inpath/inputs/inputs_p",t)
+ # Step 1) Load Inputs
+ global inpath_sub = string("$inpath/inputs/inputs_p", t)
# this prevents doubled time domain reduction in stages past
# the first, even if the first stage is okay.
- prevent_doubled_timedomainreduction(joinpath(inpath_sub, mysetup["SystemFolder"]))
+ prevent_doubled_timedomainreduction(joinpath(inpath_sub,
+ mysetup["SystemFolder"]))
- inputs_dict[t] = load_inputs(mysetup_MS, inpath_sub)
+ inputs_dict[t] = load_inputs(mysetup_MS, inpath_sub)
- inputs_dict[t] = configure_multi_stage_inputs(inputs_dict[t],mysetup["MultiStageSettingsDict"],mysetup["NetworkExpansion"])
+ inputs_dict[t] = configure_multi_stage_inputs(inputs_dict[t],
+ mysetup["MultiStageSettingsDict"],
+ mysetup["NetworkExpansion"])
end
if MultiStageConcatenate == 1
- if v println("MultiStage with Concatenation") end
+ if v
+ println("MultiStage with Concatenation")
+ end
RESOURCE_ZONES = inputs_dict[1]["RESOURCE_ZONES"]
RESOURCES = inputs_dict[1]["RESOURCE_NAMES"]
ZONES = inputs_dict[1]["R_ZONES"]
# Parse input data into useful structures divided by type (demand, wind, solar, fuel, groupings thereof, etc.)
# TO DO LATER: Replace these with collections of col_names, profiles, zones
demand_col_names, var_col_names, solar_col_names, wind_col_names, fuel_col_names, all_col_names,
- demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles, all_profiles,
- col_to_zone_map, AllFuelsConst, stage_lengths, total_length, relative_lengths = parse_multi_stage_data(inputs_dict)
+ demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles, all_profiles,
+ col_to_zone_map, AllFuelsConst, stage_lengths, total_length, relative_lengths = parse_multi_stage_data(inputs_dict)
else # TDR each period individually
- if v println("MultiStage without Concatenation") end
- if v println("---> STAGE ", stage_id) end
+ if v
+ println("MultiStage without Concatenation")
+ end
+ if v
+ println("---> STAGE ", stage_id)
+ end
myinputs = inputs_dict[stage_id]
RESOURCE_ZONES = myinputs["RESOURCE_ZONES"]
RESOURCES = myinputs["RESOURCE_NAMES"]
@@ -629,32 +742,42 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
# Parse input data into useful structures divided by type (demand, wind, solar, fuel, groupings thereof, etc.)
# TO DO LATER: Replace these with collections of col_names, profiles, zones
demand_col_names, var_col_names, solar_col_names, wind_col_names, fuel_col_names, all_col_names,
- demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles, all_profiles,
- col_to_zone_map, AllFuelsConst = parse_data(myinputs)
+ demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles, all_profiles,
+ col_to_zone_map, AllFuelsConst = parse_data(myinputs)
end
else
- if v println("Not MultiStage") end
- myinputs = load_inputs(mysetup_local,inpath)
+ if v
+ println("Not MultiStage")
+ end
+ myinputs = load_inputs(mysetup_local, inpath)
RESOURCE_ZONES = myinputs["RESOURCE_ZONES"]
RESOURCES = myinputs["RESOURCE_NAMES"]
ZONES = myinputs["R_ZONES"]
# Parse input data into useful structures divided by type (demand, wind, solar, fuel, groupings thereof, etc.)
# TO DO LATER: Replace these with collections of col_names, profiles, zones
demand_col_names, var_col_names, solar_col_names, wind_col_names, fuel_col_names, all_col_names,
- demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles, all_profiles,
- col_to_zone_map, AllFuelsConst = parse_data(myinputs)
+ demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles, all_profiles,
+ col_to_zone_map, AllFuelsConst = parse_data(myinputs)
+ end
+ if v
+ println()
end
- if v println() end
# Remove Constant Columns - Add back later in final output
- all_profiles, all_col_names, ConstData, ConstCols, ConstIdx = RemoveConstCols(all_profiles, all_col_names, v)
+ all_profiles, all_col_names, ConstData, ConstCols, ConstIdx = RemoveConstCols(
+ all_profiles,
+ all_col_names,
+ v)
# Determine whether or not to time domain reduce fuel profiles as well based on user choice and file structure (i.e., variable fuels in Fuels_data.csv)
IncludeFuel = true
- if (ClusterFuelPrices != 1) || (AllFuelsConst) IncludeFuel = false end
+ if (ClusterFuelPrices != 1) || (AllFuelsConst)
+ IncludeFuel = false
+ end
# Put it together!
- InputData = DataFrame( Dict( all_col_names[c]=>all_profiles[c] for c in 1:length(all_col_names) ) )
+ InputData = DataFrame(Dict(all_col_names[c] => all_profiles[c]
+ for c in 1:length(all_col_names)))
InputData = convert.(Float64, InputData)
if v
println("Demand (MW) and Capacity Factor Profiles: ")
@@ -666,27 +789,40 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
Nhours = nrow(InputData) # Timesteps
Ncols = length(NewColNames) - 1
-
##### Step 1: Normalize or standardize all demand, renewables, and fuel data / optionally scale with DemandWeight
# Normalize/standardize data based on user-provided method
if ScalingMethod == "N"
- normProfiles = [ StatsBase.transform(fit(UnitRangeTransform, InputData[:,c]; dims=1, unit=true), InputData[:,c]) for c in 1:length(OldColNames) ]
+ normProfiles = [StatsBase.transform(
+ fit(UnitRangeTransform,
+ InputData[:, c];
+ dims = 1,
+ unit = true),
+ InputData[:, c]) for c in 1:length(OldColNames)]
elseif ScalingMethod == "S"
- normProfiles = [ StatsBase.transform(fit(ZScoreTransform, InputData[:,c]; dims=1), InputData[:,c]) for c in 1:length(OldColNames) ]
+ normProfiles = [StatsBase.transform(
+ fit(ZScoreTransform, InputData[:, c]; dims = 1),
+ InputData[:, c]) for c in 1:length(OldColNames)]
else
println("ERROR InvalidScalingMethod: Use N for Normalization or S for Standardization.")
println("CONTINUING using 0->1 normalization...")
- normProfiles = [ StatsBase.transform(fit(UnitRangeTransform, InputData[:,c]; dims=1, unit=true), InputData[:,c]) for c in 1:length(OldColNames) ]
+ normProfiles = [StatsBase.transform(
+ fit(UnitRangeTransform,
+ InputData[:, c];
+ dims = 1,
+ unit = true),
+ InputData[:, c]) for c in 1:length(OldColNames)]
end
# Compile newly normalized/standardized profiles
- AnnualTSeriesNormalized = DataFrame(Dict( OldColNames[c] => normProfiles[c] for c in 1:length(OldColNames) ))
+ AnnualTSeriesNormalized = DataFrame(Dict(OldColNames[c] => normProfiles[c]
+ for c in 1:length(OldColNames)))
# Optional pre-scaling of demand in order to give it more preference in clutering algorithm
if DemandWeight != 1 # If we want to value demand more/less than capacity factors. Assume nonnegative. LW=1 means no scaling.
for c in demand_col_names
- AnnualTSeriesNormalized[!, Symbol(c)] .= AnnualTSeriesNormalized[!, Symbol(c)] .* DemandWeight
+ AnnualTSeriesNormalized[!, Symbol(c)] .= AnnualTSeriesNormalized[!,
+ Symbol(c)] .* DemandWeight
end
end
@@ -696,121 +832,196 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
println()
end
-
##### STEP 2: Identify extreme periods in the model, Reshape data for clustering
# Total number of subperiods available in the dataset, where each subperiod length = TimestepsPerRepPeriod
- NumDataPoints = Nhours÷TimestepsPerRepPeriod # 364 weeks in 7 years
- if v println("Total Subperiods in the data set: ", NumDataPoints) end
- InputData[:, :Group] .= (1:Nhours) .÷ (TimestepsPerRepPeriod+0.0001) .+ 1 # Group col identifies the subperiod ID of each hour (e.g., all hours in week 2 have Group=2 if using TimestepsPerRepPeriod=168)
+ NumDataPoints = Nhours ÷ TimestepsPerRepPeriod # 364 weeks in 7 years
+ if v
+ println("Total Subperiods in the data set: ", NumDataPoints)
+ end
+ InputData[:, :Group] .= (1:Nhours) .÷ (TimestepsPerRepPeriod + 0.0001) .+ 1 # Group col identifies the subperiod ID of each hour (e.g., all hours in week 2 have Group=2 if using TimestepsPerRepPeriod=168)
# Group by period (e.g., week)
cgdf = combine(groupby(InputData, :Group), [c .=> sum for c in OldColNames])
- cgdf = cgdf[setdiff(1:end, NumDataPoints+1), :]
+ cgdf = cgdf[setdiff(1:end, NumDataPoints + 1), :]
rename!(cgdf, [:Group; Symbol.(OldColNames)])
# Extreme period identification based on user selection in time_domain_reduction_settings.yml
DemandExtremePeriod = false # Used when deciding whether or not to scale demand curves to equal original total demand
ExtremeWksList = []
if UseExtremePeriods == 1
- for profKey in keys(ExtPeriodSelections)
- for geoKey in keys(ExtPeriodSelections[profKey])
- for typeKey in keys(ExtPeriodSelections[profKey][geoKey])
- for statKey in keys(ExtPeriodSelections[profKey][geoKey][typeKey])
- if ExtPeriodSelections[profKey][geoKey][typeKey][statKey] == 1
- if profKey == "Demand"
- DemandExtremePeriod = true
- end
- if geoKey == "System"
- if v print(geoKey, " ") end
- (stat, group_idx) = get_extreme_period(InputData, cgdf, profKey, typeKey, statKey, ConstCols, demand_col_names, solar_col_names, wind_col_names, v)
- push!(ExtremeWksList, floor(Int, group_idx))
- if v println(group_idx, " : ", stat) end
- elseif geoKey == "Zone"
- for z in sort(unique(ZONES))
- z_cols = [k for (k,v) in col_to_zone_map if v==z]
- if profKey == "Demand" z_cols_type = intersect(z_cols, demand_col_names)
- elseif profKey == "PV" z_cols_type = intersect(z_cols, solar_col_names)
- elseif profKey == "Wind" z_cols_type = intersect(z_cols, wind_col_names)
- else z_cols_type = []
- end
- z_cols_type = setdiff(z_cols_type, ConstCols)
- if length(z_cols_type) > 0
- if v print(geoKey, " ") end
- (stat, group_idx) = get_extreme_period(select(InputData, [:Group; Symbol.(z_cols_type)]), select(cgdf, [:Group; Symbol.(z_cols_type)]), profKey, typeKey, statKey, ConstCols, z_cols_type, z_cols_type, z_cols_type, v)
- push!(ExtremeWksList, floor(Int, group_idx))
- if v println(group_idx, " : ", stat, "(", z, ")") end
- else
- if v println("Zone ", z, " has no time series profiles of type ", profKey) end
- end
- end
- else
- println("Error: Geography Key ", geoKey, " is invalid. Select `System' or `Zone'.")
- end
- end
- end
- end
- end
- end
- if v println(ExtremeWksList) end
- sort!(unique!(ExtremeWksList))
- if v println("Reduced to ", ExtremeWksList) end
+ for profKey in keys(ExtPeriodSelections)
+ for geoKey in keys(ExtPeriodSelections[profKey])
+ for typeKey in keys(ExtPeriodSelections[profKey][geoKey])
+ for statKey in keys(ExtPeriodSelections[profKey][geoKey][typeKey])
+ if ExtPeriodSelections[profKey][geoKey][typeKey][statKey] == 1
+ if profKey == "Demand"
+ DemandExtremePeriod = true
+ end
+ if geoKey == "System"
+ if v
+ print(geoKey, " ")
+ end
+ (stat, group_idx) = get_extreme_period(InputData,
+ cgdf,
+ profKey,
+ typeKey,
+ statKey,
+ ConstCols,
+ demand_col_names,
+ solar_col_names,
+ wind_col_names,
+ v)
+ push!(ExtremeWksList, floor(Int, group_idx))
+ if v
+ println(group_idx, " : ", stat)
+ end
+ elseif geoKey == "Zone"
+ for z in sort(unique(ZONES))
+ z_cols = [k for (k, v) in col_to_zone_map if v == z]
+ if profKey == "Demand"
+ z_cols_type = intersect(z_cols, demand_col_names)
+ elseif profKey == "PV"
+ z_cols_type = intersect(z_cols, solar_col_names)
+ elseif profKey == "Wind"
+ z_cols_type = intersect(z_cols, wind_col_names)
+ else
+ z_cols_type = []
+ end
+ z_cols_type = setdiff(z_cols_type, ConstCols)
+ if length(z_cols_type) > 0
+ if v
+ print(geoKey, " ")
+ end
+ (stat, group_idx) = get_extreme_period(
+ select(InputData,
+ [:Group; Symbol.(z_cols_type)]),
+ select(cgdf, [:Group; Symbol.(z_cols_type)]),
+ profKey,
+ typeKey,
+ statKey,
+ ConstCols,
+ z_cols_type,
+ z_cols_type,
+ z_cols_type,
+ v)
+ push!(ExtremeWksList, floor(Int, group_idx))
+ if v
+ println(group_idx, " : ", stat, "(", z, ")")
+ end
+ else
+ if v
+ println("Zone ",
+ z,
+ " has no time series profiles of type ",
+ profKey)
+ end
+ end
+ end
+ else
+ println("Error: Geography Key ",
+ geoKey,
+ " is invalid. Select `System' or `Zone'.")
+ end
+ end
+ end
+ end
+ end
+ end
+ if v
+ println(ExtremeWksList)
+ end
+ sort!(unique!(ExtremeWksList))
+ if v
+ println("Reduced to ", ExtremeWksList)
+ end
end
### DATA MODIFICATION - Shifting InputData and Normalized InputData
# from 8760 (# hours) by n (# profiles) DF to
# 168*n (n period-stacked profiles) by 52 (# periods) DF
- DFsToConcat = [stack(InputData[isequal.(InputData.Group,w),:], OldColNames)[!,:value] for w in 1:NumDataPoints if w <= NumDataPoints ]
+ DFsToConcat = [stack(InputData[isequal.(InputData.Group, w), :], OldColNames)[!,
+ :value] for w in 1:NumDataPoints if w <= NumDataPoints]
ModifiedData = DataFrame(Dict(Symbol(i) => DFsToConcat[i] for i in 1:NumDataPoints))
- AnnualTSeriesNormalized[:, :Group] .= (1:Nhours) .÷ (TimestepsPerRepPeriod+0.0001) .+ 1
- DFsToConcatNorm = [stack(AnnualTSeriesNormalized[isequal.(AnnualTSeriesNormalized.Group,w),:], OldColNames)[!,:value] for w in 1:NumDataPoints if w <= NumDataPoints ]
- ModifiedDataNormalized = DataFrame(Dict(Symbol(i) => DFsToConcatNorm[i] for i in 1:NumDataPoints))
+ AnnualTSeriesNormalized[:, :Group] .= (1:Nhours) .÷ (TimestepsPerRepPeriod + 0.0001) .+
+ 1
+ DFsToConcatNorm = [stack(
+ AnnualTSeriesNormalized[
+ isequal.(AnnualTSeriesNormalized.Group,
+ w),
+ :],
+ OldColNames)[!,
+ :value] for w in 1:NumDataPoints if w <= NumDataPoints]
+ ModifiedDataNormalized = DataFrame(Dict(Symbol(i) => DFsToConcatNorm[i]
+ for i in 1:NumDataPoints))
# Remove extreme periods from normalized data before clustering
NClusters = MinPeriods
if UseExtremePeriods == 1
- if v println("Pre-removal: ", names(ModifiedDataNormalized)) end
- if v println("Extreme Periods: ", string.(ExtremeWksList)) end
+ if v
+ println("Pre-removal: ", names(ModifiedDataNormalized))
+ end
+ if v
+ println("Extreme Periods: ", string.(ExtremeWksList))
+ end
ClusteringInputDF = select(ModifiedDataNormalized, Not(string.(ExtremeWksList)))
- if v println("Post-removal: ", names(ClusteringInputDF)) end
+ if v
+ println("Post-removal: ", names(ClusteringInputDF))
+ end
NClusters -= length(ExtremeWksList)
else
ClusteringInputDF = ModifiedDataNormalized
end
-
##### STEP 3: Clustering
cluster_results = []
# Cluster once regardless of iteration decisions
- push!(cluster_results, cluster(ClusterMethod, ClusteringInputDF, NClusters, nReps, v, random))
+ push!(cluster_results,
+ cluster(ClusterMethod, ClusteringInputDF, NClusters, nReps, v, random))
# Iteratively add worst periods as extreme periods OR increment number of clusters k
# until threshold is met or maximum periods are added (If chosen in inputs)
if (Iterate == 1)
- while (!check_condition(Threshold, last(cluster_results)[1], OldColNames, ScalingMethod, TimestepsPerRepPeriod)) & ((length(ExtremeWksList)+NClusters) < MaxPeriods)
+ while (!check_condition(Threshold,
+ last(cluster_results)[1],
+ OldColNames,
+ ScalingMethod,
+ TimestepsPerRepPeriod)) & ((length(ExtremeWksList) + NClusters) < MaxPeriods)
if IterateMethod == "cluster"
- if v println("Adding a new Cluster! ") end
+ if v
+ println("Adding a new Cluster! ")
+ end
NClusters += 1
- push!(cluster_results, cluster(ClusterMethod, ClusteringInputDF, NClusters, nReps, v, random))
+ push!(cluster_results,
+ cluster(ClusterMethod, ClusteringInputDF, NClusters, nReps, v, random))
elseif (IterateMethod == "extreme") & (UseExtremePeriods == 1)
- if v println("Adding a new Extreme Period! ") end
+ if v
+ println("Adding a new Extreme Period! ")
+ end
worst_period_idx = get_worst_period_idx(last(cluster_results)[1])
removed_period = string(names(ClusteringInputDF)[worst_period_idx])
select!(ClusteringInputDF, Not(worst_period_idx))
push!(ExtremeWksList, parse(Int, removed_period))
- if v println(worst_period_idx, " (", removed_period, ") ", ExtremeWksList) end
- push!(cluster_results, cluster(ClusterMethod, ClusteringInputDF, NClusters, nReps, v, random))
+ if v
+ println(worst_period_idx, " (", removed_period, ") ", ExtremeWksList)
+ end
+ push!(cluster_results,
+ cluster(ClusterMethod, ClusteringInputDF, NClusters, nReps, v, random))
elseif IterateMethod == "extreme"
- println("INVALID IterateMethod ", IterateMethod, " because UseExtremePeriods is off. Set to 1 if you wish to add extreme periods.")
+ println("INVALID IterateMethod ",
+ IterateMethod,
+ " because UseExtremePeriods is off. Set to 1 if you wish to add extreme periods.")
break
else
- println("INVALID IterateMethod ", IterateMethod, ". Choose 'cluster' or 'extreme'.")
+ println("INVALID IterateMethod ",
+ IterateMethod,
+ ". Choose 'cluster' or 'extreme'.")
break
end
end
- if v && (length(ExtremeWksList)+NClusters == MaxPeriods)
+ if v && (length(ExtremeWksList) + NClusters == MaxPeriods)
println("Stopped iterating by hitting the maximum number of periods.")
elseif v
println("Stopped by meeting the accuracy threshold.")
@@ -842,7 +1053,9 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
# ClusterInputDF Reframing of Centers/Medoids (i.e., alphabetical as opposed to indices, same order)
M = [parse(Int64, string(names(ClusteringInputDF)[i])) for i in M]
- if v println("Fixed M: ", M) end
+ if v
+ println("Fixed M: ", M)
+ end
# ClusterInputDF Ordering of All Periods (i.e., alphabetical as opposed to indices)
A_Dict = Dict() # States index of representative period within M for each period a in A
@@ -855,7 +1068,9 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
# Add extreme periods into the clustering result with # of occurences = 1 for each
ExtremeWksList = sort(ExtremeWksList)
if UseExtremePeriods == 1
- if v println("Extreme Periods: ", ExtremeWksList) end
+ if v
+ println("Extreme Periods: ", ExtremeWksList)
+ end
M = [M; ExtremeWksList]
A_idx = NClusters + 1
for w in ExtremeWksList
@@ -868,7 +1083,7 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
end
# Recreate A in numeric order (as opposed to ClusterInputDF order)
- A = [A_Dict[i] for i in 1:(length(A)+length(ExtremeWksList))]
+ A = [A_Dict[i] for i in 1:(length(A) + length(ExtremeWksList))]
N = W # Keep cluster version of weights stored as N, number of periods represented by RP
@@ -879,32 +1094,40 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
# SORT A W M in conjunction, chronologically by M, before handling them elsewhere to be consistent
# A points to an index of M. We need it to point to a new index of sorted M. Hence, AssignMap.
old_M = M
- df_sort = DataFrame( Weights = W, NumPeriodsRepresented = N, Rep_Period = M)
+ df_sort = DataFrame(Weights = W, NumPeriodsRepresented = N, Rep_Period = M)
sort!(df_sort, [:Rep_Period])
W = df_sort[!, :Weights]
N = df_sort[!, :NumPeriodsRepresented]
M = df_sort[!, :Rep_Period]
- AssignMap = Dict( i => findall(x->x==old_M[i], M)[1] for i in 1:length(M))
+ AssignMap = Dict(i => findall(x -> x == old_M[i], M)[1] for i in 1:length(M))
A = [AssignMap[a] for a in A]
# Make PeriodMap, maps each period to its representative period
PeriodMap = DataFrame(Period_Index = 1:length(A),
- Rep_Period = [M[a] for a in A],
- Rep_Period_Index = [a for a in A])
+ Rep_Period = [M[a] for a in A],
+ Rep_Period_Index = [a for a in A])
# Get Symbol-version of column names by type for later analysis
DemandCols = Symbol.(demand_col_names)
- VarCols = [Symbol(var_col_names[i]) for i in 1:length(var_col_names) ]
- FuelCols = [Symbol(fuel_col_names[i]) for i in 1:length(fuel_col_names) ]
- ConstCol_Syms = [Symbol(ConstCols[i]) for i in 1:length(ConstCols) ]
+ VarCols = [Symbol(var_col_names[i]) for i in 1:length(var_col_names)]
+ FuelCols = [Symbol(fuel_col_names[i]) for i in 1:length(fuel_col_names)]
+ ConstCol_Syms = [Symbol(ConstCols[i]) for i in 1:length(ConstCols)]
# Cluster Ouput: The original data at the medoids/centers
- ClusterOutputData = ModifiedData[:,Symbol.(M)]
+ ClusterOutputData = ModifiedData[:, Symbol.(M)]
# Get zone-wise demand multipliers for later scaling in order for weighted-representative-total-zonal demand to equal original total-zonal demand
# (Only if we don't have demand-related extreme periods because we don't want to change peak demand periods)
if !DemandExtremePeriod
- demand_mults = get_demand_multipliers(ClusterOutputData, InputData, M, W, DemandCols, TimestepsPerRepPeriod, NewColNames, NClusters, Ncols)
+ demand_mults = get_demand_multipliers(ClusterOutputData,
+ InputData,
+ M,
+ W,
+ DemandCols,
+ TimestepsPerRepPeriod,
+ NewColNames,
+ NClusters,
+ Ncols)
end
# Reorganize Data by Demand, Solar, Wind, Fuel, and GrpWeight by Hour, Add Constant Data Back In
@@ -914,37 +1137,47 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
fpDFs = [] # Fuel Profile DataFrames - Just Fuel Profiles
for m in 1:NClusters
- rpDF = DataFrame( Dict( NewColNames[i] => ClusterOutputData[!,m][TimestepsPerRepPeriod*(i-1)+1 : TimestepsPerRepPeriod*i] for i in 1:Ncols) )
- gvDF = DataFrame( Dict( NewColNames[i] => ClusterOutputData[!,m][TimestepsPerRepPeriod*(i-1)+1 : TimestepsPerRepPeriod*i] for i in 1:Ncols if (Symbol(NewColNames[i]) in VarCols)) )
- dmDF = DataFrame( Dict( NewColNames[i] => ClusterOutputData[!,m][TimestepsPerRepPeriod*(i-1)+1 : TimestepsPerRepPeriod*i] for i in 1:Ncols if (Symbol(NewColNames[i]) in DemandCols)) )
- if IncludeFuel fpDF = DataFrame( Dict( NewColNames[i] => ClusterOutputData[!,m][TimestepsPerRepPeriod*(i-1)+1 : TimestepsPerRepPeriod*i] for i in 1:Ncols if (Symbol(NewColNames[i]) in FuelCols)) ) end
- if !IncludeFuel fpDF = DataFrame(Placeholder = 1:TimestepsPerRepPeriod) end
+ rpDF = DataFrame(Dict(NewColNames[i] => ClusterOutputData[!, m][(TimestepsPerRepPeriod * (i - 1) + 1):(TimestepsPerRepPeriod * i)]
+ for i in 1:Ncols))
+ gvDF = DataFrame(Dict(NewColNames[i] => ClusterOutputData[!, m][(TimestepsPerRepPeriod * (i - 1) + 1):(TimestepsPerRepPeriod * i)]
+ for i in 1:Ncols if (Symbol(NewColNames[i]) in VarCols)))
+ dmDF = DataFrame(Dict(NewColNames[i] => ClusterOutputData[!, m][(TimestepsPerRepPeriod * (i - 1) + 1):(TimestepsPerRepPeriod * i)]
+ for i in 1:Ncols if (Symbol(NewColNames[i]) in DemandCols)))
+ if IncludeFuel
+ fpDF = DataFrame(Dict(NewColNames[i] => ClusterOutputData[!, m][(TimestepsPerRepPeriod * (i - 1) + 1):(TimestepsPerRepPeriod * i)]
+ for i in 1:Ncols if (Symbol(NewColNames[i]) in FuelCols)))
+ end
+ if !IncludeFuel
+ fpDF = DataFrame(Placeholder = 1:TimestepsPerRepPeriod)
+ end
# Add Constant Columns back in
for c in 1:length(ConstCols)
- rpDF[!,Symbol(ConstCols[c])] .= ConstData[c][1]
+ rpDF[!, Symbol(ConstCols[c])] .= ConstData[c][1]
if Symbol(ConstCols[c]) in VarCols
- gvDF[!,Symbol(ConstCols[c])] .= ConstData[c][1]
+ gvDF[!, Symbol(ConstCols[c])] .= ConstData[c][1]
elseif Symbol(ConstCols[c]) in FuelCols
- fpDF[!,Symbol(ConstCols[c])] .= ConstData[c][1]
+ fpDF[!, Symbol(ConstCols[c])] .= ConstData[c][1]
elseif Symbol(ConstCols[c]) in DemandCols
- dmDF[!,Symbol(ConstCols[c])] .= ConstData[c][1]
+ dmDF[!, Symbol(ConstCols[c])] .= ConstData[c][1]
end
end
- if !IncludeFuel select!(fpDF, Not(:Placeholder)) end
+ if !IncludeFuel
+ select!(fpDF, Not(:Placeholder))
+ end
# Scale Demand using previously identified multipliers
# Scale dmDF but not rpDF which compares to input data but is not written to file.
for demandcol in DemandCols
if demandcol ∉ ConstCol_Syms
if !DemandExtremePeriod
- dmDF[!,demandcol] .*= demand_mults[demandcol]
+ dmDF[!, demandcol] .*= demand_mults[demandcol]
end
end
end
- rpDF[!,:GrpWeight] .= W[m]
- rpDF[!,:Cluster] .= M[m]
+ rpDF[!, :GrpWeight] .= W[m]
+ rpDF[!, :Cluster] .= M[m]
push!(rpDFs, rpDF)
push!(gvDFs, gvDF)
push!(dmDFs, dmDF)
@@ -955,35 +1188,54 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
DMOutputData = vcat(dmDFs...) # Demand Profiles
FPOutputData = vcat(fpDFs...) # Fuel Profiles
-
##### Step 5: Evaluation
- InputDataTest = InputData[(InputData.Group .<= NumDataPoints*1.0), :]
+ InputDataTest = InputData[(InputData.Group .<= NumDataPoints * 1.0), :]
ClusterDataTest = vcat([rpDFs[a] for a in A]...) # To compare fairly, demand is not scaled here
- RMSE = Dict( c => rmse_score(InputDataTest[:, c], ClusterDataTest[:, c]) for c in OldColNames)
+ RMSE = Dict(c => rmse_score(InputDataTest[:, c], ClusterDataTest[:, c])
+ for c in OldColNames)
##### Step 6: Print to File
if MultiStage == 1
- if v print("Outputs: MultiStage") end
+ if v
+ print("Outputs: MultiStage")
+ end
if MultiStageConcatenate == 1
- if v println(" with Concatenation") end
- groups_per_stage = round.(Int, size(A,1)*relative_lengths)
- group_ranges = [if i == 1 1:groups_per_stage[1] else sum(groups_per_stage[1:i-1])+1:sum(groups_per_stage[1:i]) end for i in 1:size(relative_lengths,1)]
+ if v
+ println(" with Concatenation")
+ end
+ groups_per_stage = round.(Int, size(A, 1) * relative_lengths)
+ group_ranges = [if i == 1
+ 1:groups_per_stage[1]
+ else
+ (sum(groups_per_stage[1:(i - 1)]) + 1):sum(groups_per_stage[1:i])
+ end
+ for i in 1:size(relative_lengths, 1)]
Stage_Weights = Dict()
Stage_PeriodMaps = Dict()
Stage_Outfiles = Dict()
- SolarVar_Outfile = joinpath(TimeDomainReductionFolder, "Vre_and_stor_solar_variability.csv")
- WindVar_Outfile = joinpath(TimeDomainReductionFolder, "Vre_and_stor_wind_variability.csv")
+ SolarVar_Outfile = joinpath(TimeDomainReductionFolder,
+ "Vre_and_stor_solar_variability.csv")
+ WindVar_Outfile = joinpath(TimeDomainReductionFolder,
+ "Vre_and_stor_wind_variability.csv")
for per in 1:NumStages # Iterate over multi-stages
- mkpath(joinpath(inpath,"inputs","inputs_p$per", TimeDomainReductionFolder))
+ mkpath(joinpath(inpath,
+ "inputs",
+ "inputs_p$per",
+ TimeDomainReductionFolder))
# Stage-specific weights and mappings
cmap = countmap(A[group_ranges[per]]) # Count number of each rep. period in the planning stage
- weight_props = [ if i in keys(cmap) cmap[i]/N[i] else 0 end for i in 1:size(M,1) ] # Proportions of each rep. period associated with each planning stage
- Stage_Weights[per] = weight_props.*W # Total hours that each rep. period represents within the planning stage
- Stage_PeriodMaps[per] = PeriodMap[group_ranges[per],:]
- Stage_PeriodMaps[per][!,:Period_Index] = 1:(group_ranges[per][end]-group_ranges[per][1]+1)
+ weight_props = [if i in keys(cmap)
+ cmap[i] / N[i]
+ else
+ 0
+ end
+ for i in 1:size(M, 1)] # Proportions of each rep. period associated with each planning stage
+ Stage_Weights[per] = weight_props .* W # Total hours that each rep. period represents within the planning stage
+ Stage_PeriodMaps[per] = PeriodMap[group_ranges[per], :]
+ Stage_PeriodMaps[per][!, :Period_Index] = 1:(group_ranges[per][end] - group_ranges[per][1] + 1)
# Outfiles
Stage_Outfiles[per] = Dict()
Stage_Outfiles[per]["Demand"] = joinpath("inputs_p$per", Demand_Outfile)
@@ -992,239 +1244,354 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
Stage_Outfiles[per]["PMap"] = joinpath("inputs_p$per", PMap_Outfile)
Stage_Outfiles[per]["YAML"] = joinpath("inputs_p$per", YAML_Outfile)
if !isempty(inputs_dict[per]["VRE_STOR"])
- Stage_Outfiles[per]["GSolar"] = joinpath("inputs_p$per", SolarVar_Outfile)
+ Stage_Outfiles[per]["GSolar"] = joinpath("inputs_p$per",
+ SolarVar_Outfile)
Stage_Outfiles[per]["GWind"] = joinpath("inputs_p$per", WindVar_Outfile)
end
# Save output data to stage-specific locations
### TDR_Results/Demand_data_clustered.csv
- demand_in = get_demand_dataframe(joinpath(inpath, "inputs", "inputs_p$per"), mysetup["SystemFolder"])
- demand_in[!,:Sub_Weights] = demand_in[!,:Sub_Weights] * 1.
- demand_in[1:length(Stage_Weights[per]),:Sub_Weights] .= Stage_Weights[per]
- demand_in[!,:Rep_Periods][1] = length(Stage_Weights[per])
- demand_in[!,:Timesteps_per_Rep_Period][1] = TimestepsPerRepPeriod
+ demand_in = get_demand_dataframe(
+ joinpath(inpath, "inputs", "inputs_p$per"),
+ mysetup["SystemFolder"])
+ demand_in[!, :Sub_Weights] = demand_in[!, :Sub_Weights] * 1.0
+ demand_in[1:length(Stage_Weights[per]), :Sub_Weights] .= Stage_Weights[per]
+ demand_in[!, :Rep_Periods][1] = length(Stage_Weights[per])
+ demand_in[!, :Timesteps_per_Rep_Period][1] = TimestepsPerRepPeriod
select!(demand_in, Not(DemandCols))
select!(demand_in, Not(:Time_Index))
- Time_Index_M = Union{Int64, Missings.Missing}[missing for i in 1:size(demand_in,1)]
- Time_Index_M[1:size(DMOutputData,1)] = 1:size(DMOutputData,1)
- demand_in[!,:Time_Index] .= Time_Index_M
+ Time_Index_M = Union{Int64, Missings.Missing}[missing
+ for i in 1:size(demand_in, 1)]
+ Time_Index_M[1:size(DMOutputData, 1)] = 1:size(DMOutputData, 1)
+ demand_in[!, :Time_Index] .= Time_Index_M
for c in DemandCols
- new_col = Union{Float64, Missings.Missing}[missing for i in 1:size(demand_in,1)]
- new_col[1:size(DMOutputData,1)] = DMOutputData[!,c]
- demand_in[!,c] .= new_col
+ new_col = Union{Float64, Missings.Missing}[missing
+ for i in 1:size(demand_in, 1)]
+ new_col[1:size(DMOutputData, 1)] = DMOutputData[!, c]
+ demand_in[!, c] .= new_col
end
- demand_in = demand_in[1:size(DMOutputData,1),:]
+ demand_in = demand_in[1:size(DMOutputData, 1), :]
- if v println("Writing demand file...") end
- CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["Demand"]), demand_in)
+ if v
+ println("Writing demand file...")
+ end
+ CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["Demand"]),
+ demand_in)
### TDR_Results/Generators_variability.csv
# Reset column ordering, add time index, and solve duplicate column name trouble with CSV.write's header kwarg
- GVColMap = Dict(RESOURCE_ZONES[i] => RESOURCES[i] for i in 1:length(inputs_dict[1]["RESOURCE_NAMES"]))
+ GVColMap = Dict(RESOURCE_ZONES[i] => RESOURCES[i]
+ for i in 1:length(inputs_dict[1]["RESOURCE_NAMES"]))
GVColMap["Time_Index"] = "Time_Index"
GVOutputData = GVOutputData[!, Symbol.(RESOURCE_ZONES)]
- insertcols!(GVOutputData, 1, :Time_Index => 1:size(GVOutputData,1))
+ insertcols!(GVOutputData, 1, :Time_Index => 1:size(GVOutputData, 1))
NewGVColNames = [GVColMap[string(c)] for c in names(GVOutputData)]
- if v println("Writing resource file...") end
- CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["GVar"]), GVOutputData, header=NewGVColNames)
+ if v
+ println("Writing resource file...")
+ end
+ CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["GVar"]),
+ GVOutputData,
+ header = NewGVColNames)
if !isempty(inputs_dict[per]["VRE_STOR"])
- gen_var = load_dataframe(joinpath(inpath, "inputs", Stage_Outfiles[per]["GVar"]))
-
+ gen_var = load_dataframe(joinpath(inpath,
+ "inputs",
+ Stage_Outfiles[per]["GVar"]))
+
# Find which indexes have solar PV/wind names
RESOURCE_ZONES_VRE_STOR = NewGVColNames
solar_col_names = []
wind_col_names = []
for r in 1:length(RESOURCE_ZONES_VRE_STOR)
- if occursin("PV", RESOURCE_ZONES_VRE_STOR[r]) || occursin("pv", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Pv", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Solar", RESOURCE_ZONES_VRE_STOR[r]) || occursin("SOLAR", RESOURCE_ZONES_VRE_STOR[r]) || occursin("solar", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
- push!(solar_col_names,r)
+ if occursin("PV", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("pv", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Pv", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Solar", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("SOLAR", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("solar", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
+ push!(solar_col_names, r)
end
- if occursin("Wind", RESOURCE_ZONES_VRE_STOR[r]) || occursin("WIND", RESOURCE_ZONES_VRE_STOR[r]) || occursin("wind", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
+ if occursin("Wind", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("WIND", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("wind", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
push!(wind_col_names, r)
end
end
-
+
# Index into dataframe and output them
solar_var = gen_var[!, solar_col_names]
- solar_var[!, :Time_Index] = 1:size(solar_var,1)
+ solar_var[!, :Time_Index] = 1:size(solar_var, 1)
wind_var = gen_var[!, wind_col_names]
- wind_var[!, :Time_Index] = 1:size(wind_var,1)
-
- CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["GSolar"]), solar_var)
- CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["GWind"]), wind_var)
+ wind_var[!, :Time_Index] = 1:size(wind_var, 1)
+
+ CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["GSolar"]),
+ solar_var)
+ CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["GWind"]),
+ wind_var)
end
### TDR_Results/Fuels_data.csv
- fuel_in = load_dataframe(joinpath(inpath, "inputs", "inputs_p$per", mysetup["SystemFolder"], "Fuels_data.csv"))
+ fuel_in = load_dataframe(joinpath(inpath,
+ "inputs",
+ "inputs_p$per",
+ mysetup["SystemFolder"],
+ "Fuels_data.csv"))
select!(fuel_in, Not(:Time_Index))
SepFirstRow = DataFrame(fuel_in[1, :])
NewFuelOutput = vcat(SepFirstRow, FPOutputData)
rename!(NewFuelOutput, FuelCols)
- insertcols!(NewFuelOutput, 1, :Time_Index => 0:size(NewFuelOutput,1)-1)
- if v println("Writing fuel profiles...") end
- CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["Fuel"]), NewFuelOutput)
+ insertcols!(NewFuelOutput, 1, :Time_Index => 0:(size(NewFuelOutput, 1) - 1))
+ if v
+ println("Writing fuel profiles...")
+ end
+ CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["Fuel"]),
+ NewFuelOutput)
### TDR_Results/Period_map.csv
- if v println("Writing period map...") end
- CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["PMap"]), Stage_PeriodMaps[per])
+ if v
+ println("Writing period map...")
+ end
+ CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["PMap"]),
+ Stage_PeriodMaps[per])
### TDR_Results/time_domain_reduction_settings.yml
- if v println("Writing .yml settings...") end
- YAML.write_file(joinpath(inpath, "inputs", Stage_Outfiles[per]["YAML"]), myTDRsetup)
-
+ if v
+ println("Writing .yml settings...")
+ end
+ YAML.write_file(joinpath(inpath, "inputs", Stage_Outfiles[per]["YAML"]),
+ myTDRsetup)
end
else
- if v print("without Concatenation has not yet been fully implemented. ") end
- if v println("( STAGE ", stage_id, " )") end
- input_stage_directory = "inputs_p"*string(stage_id)
- mkpath(joinpath(inpath,"inputs",input_stage_directory, TimeDomainReductionFolder))
+ if v
+ print("without Concatenation has not yet been fully implemented. ")
+ end
+ if v
+ println("( STAGE ", stage_id, " )")
+ end
+ input_stage_directory = "inputs_p" * string(stage_id)
+ mkpath(joinpath(inpath,
+ "inputs",
+ input_stage_directory,
+ TimeDomainReductionFolder))
### TDR_Results/Demand_data.csv
- demand_in = get_demand_dataframe(joinpath(inpath, "inputs", input_stage_directory, mysetup["SystemFolder"]))
- demand_in[!,:Sub_Weights] = demand_in[!,:Sub_Weights] * 1.
- demand_in[1:length(W),:Sub_Weights] .= W
- demand_in[!,:Rep_Periods][1] = length(W)
- demand_in[!,:Timesteps_per_Rep_Period][1] = TimestepsPerRepPeriod
+ demand_in = get_demand_dataframe(joinpath(inpath,
+ "inputs",
+ input_stage_directory,
+ mysetup["SystemFolder"]))
+ demand_in[!, :Sub_Weights] = demand_in[!, :Sub_Weights] * 1.0
+ demand_in[1:length(W), :Sub_Weights] .= W
+ demand_in[!, :Rep_Periods][1] = length(W)
+ demand_in[!, :Timesteps_per_Rep_Period][1] = TimestepsPerRepPeriod
select!(demand_in, Not(DemandCols))
select!(demand_in, Not(:Time_Index))
- Time_Index_M = Union{Int64, Missings.Missing}[missing for i in 1:size(demand_in,1)]
- Time_Index_M[1:size(DMOutputData,1)] = 1:size(DMOutputData,1)
- demand_in[!,:Time_Index] .= Time_Index_M
+ Time_Index_M = Union{Int64, Missings.Missing}[missing
+ for i in 1:size(demand_in, 1)]
+ Time_Index_M[1:size(DMOutputData, 1)] = 1:size(DMOutputData, 1)
+ demand_in[!, :Time_Index] .= Time_Index_M
for c in DemandCols
- new_col = Union{Float64, Missings.Missing}[missing for i in 1:size(demand_in,1)]
- new_col[1:size(DMOutputData,1)] = DMOutputData[!,c]
- demand_in[!,c] .= new_col
+ new_col = Union{Float64, Missings.Missing}[missing
+ for i in 1:size(demand_in, 1)]
+ new_col[1:size(DMOutputData, 1)] = DMOutputData[!, c]
+ demand_in[!, c] .= new_col
end
- demand_in = demand_in[1:size(DMOutputData,1),:]
+ demand_in = demand_in[1:size(DMOutputData, 1), :]
- if v println("Writing demand file...") end
- CSV.write(joinpath(inpath,"inputs",input_stage_directory,Demand_Outfile), demand_in)
+ if v
+ println("Writing demand file...")
+ end
+ CSV.write(joinpath(inpath, "inputs", input_stage_directory, Demand_Outfile),
+ demand_in)
### TDR_Results/Generators_variability.csv
# Reset column ordering, add time index, and solve duplicate column name trouble with CSV.write's header kwarg
- GVColMap = Dict(RESOURCE_ZONES[i] => RESOURCES[i] for i in 1:length(myinputs["RESOURCE_NAMES"]))
+ GVColMap = Dict(RESOURCE_ZONES[i] => RESOURCES[i]
+ for i in 1:length(myinputs["RESOURCE_NAMES"]))
GVColMap["Time_Index"] = "Time_Index"
GVOutputData = GVOutputData[!, Symbol.(RESOURCE_ZONES)]
- insertcols!(GVOutputData, 1, :Time_Index => 1:size(GVOutputData,1))
+ insertcols!(GVOutputData, 1, :Time_Index => 1:size(GVOutputData, 1))
NewGVColNames = [GVColMap[string(c)] for c in names(GVOutputData)]
- if v println("Writing resource file...") end
- CSV.write(joinpath(inpath,"inputs",input_stage_directory,GVar_Outfile), GVOutputData, header=NewGVColNames)
+ if v
+ println("Writing resource file...")
+ end
+ CSV.write(joinpath(inpath, "inputs", input_stage_directory, GVar_Outfile),
+ GVOutputData,
+ header = NewGVColNames)
# Break up VRE-storage components if needed
if !isempty(myinputs["VRE_STOR"])
- gen_var = load_dataframe(joinpath(inpath,"inputs",input_stage_directory,GVar_Outfile))
+ gen_var = load_dataframe(joinpath(inpath,
+ "inputs",
+ input_stage_directory,
+ GVar_Outfile))
# Find which indexes have solar PV/wind names
RESOURCE_ZONES_VRE_STOR = NewGVColNames
solar_col_names = []
wind_col_names = []
for r in 1:length(RESOURCE_ZONES_VRE_STOR)
- if occursin("PV", RESOURCE_ZONES_VRE_STOR[r]) || occursin("pv", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Pv", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Solar", RESOURCE_ZONES_VRE_STOR[r]) || occursin("SOLAR", RESOURCE_ZONES_VRE_STOR[r]) || occursin("solar", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
- push!(solar_col_names,r)
+ if occursin("PV", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("pv", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Pv", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Solar", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("SOLAR", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("solar", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
+ push!(solar_col_names, r)
end
- if occursin("Wind", RESOURCE_ZONES_VRE_STOR[r]) || occursin("WIND", RESOURCE_ZONES_VRE_STOR[r]) || occursin("wind", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
+ if occursin("Wind", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("WIND", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("wind", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
push!(wind_col_names, r)
end
end
# Index into dataframe and output them
solar_var = gen_var[!, solar_col_names]
- solar_var[!, :Time_Index] = 1:size(solar_var,1)
+ solar_var[!, :Time_Index] = 1:size(solar_var, 1)
wind_var = gen_var[!, wind_col_names]
- wind_var[!, :Time_Index] = 1:size(wind_var,1)
-
- SolarVar_Outfile = joinpath(TimeDomainReductionFolder, "Vre_and_stor_solar_variability.csv")
- WindVar_Outfile = joinpath(TimeDomainReductionFolder, "Vre_and_stor_wind_variability.csv")
- CSV.write(joinpath(inpath,"inputs",input_stage_directory,SolarVar_Outfile), solar_var)
- CSV.write(joinpath(inpath,"inputs",input_stage_directory, WindVar_Outfile), wind_var)
+ wind_var[!, :Time_Index] = 1:size(wind_var, 1)
+
+ SolarVar_Outfile = joinpath(TimeDomainReductionFolder,
+ "Vre_and_stor_solar_variability.csv")
+ WindVar_Outfile = joinpath(TimeDomainReductionFolder,
+ "Vre_and_stor_wind_variability.csv")
+ CSV.write(
+ joinpath(inpath,
+ "inputs",
+ input_stage_directory,
+ SolarVar_Outfile),
+ solar_var)
+ CSV.write(
+ joinpath(inpath,
+ "inputs",
+ input_stage_directory,
+ WindVar_Outfile),
+ wind_var)
end
### TDR_Results/Fuels_data.csv
- fuel_in = load_dataframe(joinpath(inpath, "inputs", input_stage_directory, mysetup["SystemFolder"], "Fuels_data.csv"))
+ fuel_in = load_dataframe(joinpath(inpath,
+ "inputs",
+ input_stage_directory,
+ mysetup["SystemFolder"],
+ "Fuels_data.csv"))
select!(fuel_in, Not(:Time_Index))
SepFirstRow = DataFrame(fuel_in[1, :])
NewFuelOutput = vcat(SepFirstRow, FPOutputData)
rename!(NewFuelOutput, FuelCols)
- insertcols!(NewFuelOutput, 1, :Time_Index => 0:size(NewFuelOutput,1)-1)
- if v println("Writing fuel profiles...") end
- CSV.write(joinpath(inpath,"inputs",input_stage_directory,Fuel_Outfile), NewFuelOutput)
+ insertcols!(NewFuelOutput, 1, :Time_Index => 0:(size(NewFuelOutput, 1) - 1))
+ if v
+ println("Writing fuel profiles...")
+ end
+ CSV.write(joinpath(inpath, "inputs", input_stage_directory, Fuel_Outfile),
+ NewFuelOutput)
### Period_map.csv
- if v println("Writing period map...") end
- CSV.write(joinpath(inpath,"inputs",input_stage_directory,PMap_Outfile), PeriodMap)
+ if v
+ println("Writing period map...")
+ end
+ CSV.write(joinpath(inpath, "inputs", input_stage_directory, PMap_Outfile),
+ PeriodMap)
### time_domain_reduction_settings.yml
- if v println("Writing .yml settings...") end
- YAML.write_file(joinpath(inpath,"inputs",input_stage_directory,YAML_Outfile), myTDRsetup)
+ if v
+ println("Writing .yml settings...")
+ end
+ YAML.write_file(
+ joinpath(inpath, "inputs", input_stage_directory, YAML_Outfile),
+ myTDRsetup)
end
else
- if v println("Outputs: Single-Stage") end
+ if v
+ println("Outputs: Single-Stage")
+ end
mkpath(joinpath(inpath, TimeDomainReductionFolder))
### TDR_Results/Demand_data.csv
system_path = joinpath(inpath, mysetup["SystemFolder"])
demand_in = get_demand_dataframe(system_path)
- demand_in[!,:Sub_Weights] = demand_in[!,:Sub_Weights] * 1.
- demand_in[1:length(W),:Sub_Weights] .= W
- demand_in[!,:Rep_Periods][1] = length(W)
- demand_in[!,:Timesteps_per_Rep_Period][1] = TimestepsPerRepPeriod
+ demand_in[!, :Sub_Weights] = demand_in[!, :Sub_Weights] * 1.0
+ demand_in[1:length(W), :Sub_Weights] .= W
+ demand_in[!, :Rep_Periods][1] = length(W)
+ demand_in[!, :Timesteps_per_Rep_Period][1] = TimestepsPerRepPeriod
select!(demand_in, Not(DemandCols))
select!(demand_in, Not(:Time_Index))
- Time_Index_M = Union{Int64, Missings.Missing}[missing for i in 1:size(demand_in,1)]
- Time_Index_M[1:size(DMOutputData,1)] = 1:size(DMOutputData,1)
- demand_in[!,:Time_Index] .= Time_Index_M
+ Time_Index_M = Union{Int64, Missings.Missing}[missing for i in 1:size(demand_in, 1)]
+ Time_Index_M[1:size(DMOutputData, 1)] = 1:size(DMOutputData, 1)
+ demand_in[!, :Time_Index] .= Time_Index_M
for c in DemandCols
- new_col = Union{Float64, Missings.Missing}[missing for i in 1:size(demand_in,1)]
- new_col[1:size(DMOutputData,1)] = DMOutputData[!,c]
- demand_in[!,c] .= new_col
+ new_col = Union{Float64, Missings.Missing}[missing
+ for i in 1:size(demand_in, 1)]
+ new_col[1:size(DMOutputData, 1)] = DMOutputData[!, c]
+ demand_in[!, c] .= new_col
end
- demand_in = demand_in[1:size(DMOutputData,1),:]
+ demand_in = demand_in[1:size(DMOutputData, 1), :]
- if v println("Writing demand file...") end
+ if v
+ println("Writing demand file...")
+ end
CSV.write(joinpath(inpath, Demand_Outfile), demand_in)
### TDR_Results/Generators_variability.csv
# Reset column ordering, add time index, and solve duplicate column name trouble with CSV.write's header kwarg
- GVColMap = Dict(RESOURCE_ZONES[i] => RESOURCES[i] for i in 1:length(myinputs["RESOURCE_NAMES"]))
+ GVColMap = Dict(RESOURCE_ZONES[i] => RESOURCES[i]
+ for i in 1:length(myinputs["RESOURCE_NAMES"]))
GVColMap["Time_Index"] = "Time_Index"
GVOutputData = GVOutputData[!, Symbol.(RESOURCE_ZONES)]
- insertcols!(GVOutputData, 1, :Time_Index => 1:size(GVOutputData,1))
+ insertcols!(GVOutputData, 1, :Time_Index => 1:size(GVOutputData, 1))
NewGVColNames = [GVColMap[string(c)] for c in names(GVOutputData)]
- if v println("Writing resource file...") end
- CSV.write(joinpath(inpath, GVar_Outfile), GVOutputData, header=NewGVColNames)
+ if v
+ println("Writing resource file...")
+ end
+ CSV.write(joinpath(inpath, GVar_Outfile), GVOutputData, header = NewGVColNames)
# Break up VRE-storage components if needed
if !isempty(myinputs["VRE_STOR"])
- gen_var = load_dataframe(joinpath(inpath,GVar_Outfile))
+ gen_var = load_dataframe(joinpath(inpath, GVar_Outfile))
# Find which indexes have solar PV/wind names
RESOURCE_ZONES_VRE_STOR = NewGVColNames
solar_col_names = []
wind_col_names = []
for r in 1:length(RESOURCE_ZONES_VRE_STOR)
- if occursin("PV", RESOURCE_ZONES_VRE_STOR[r]) || occursin("pv", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Pv", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Solar", RESOURCE_ZONES_VRE_STOR[r]) || occursin("SOLAR", RESOURCE_ZONES_VRE_STOR[r]) || occursin("solar", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
- push!(solar_col_names,r)
+ if occursin("PV", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("pv", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Pv", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Solar", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("SOLAR", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("solar", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
+ push!(solar_col_names, r)
end
- if occursin("Wind", RESOURCE_ZONES_VRE_STOR[r]) || occursin("WIND", RESOURCE_ZONES_VRE_STOR[r]) || occursin("wind", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
+ if occursin("Wind", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("WIND", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("wind", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
push!(wind_col_names, r)
end
end
# Index into dataframe and output them
solar_var = gen_var[!, solar_col_names]
- solar_var[!, :Time_Index] = 1:size(solar_var,1)
+ solar_var[!, :Time_Index] = 1:size(solar_var, 1)
wind_var = gen_var[!, wind_col_names]
- wind_var[!, :Time_Index] = 1:size(wind_var,1)
+ wind_var[!, :Time_Index] = 1:size(wind_var, 1)
- SolarVar_Outfile = joinpath(TimeDomainReductionFolder, "Vre_and_stor_solar_variability.csv")
- WindVar_Outfile = joinpath(TimeDomainReductionFolder, "Vre_and_stor_wind_variability.csv")
+ SolarVar_Outfile = joinpath(TimeDomainReductionFolder,
+ "Vre_and_stor_solar_variability.csv")
+ WindVar_Outfile = joinpath(TimeDomainReductionFolder,
+ "Vre_and_stor_wind_variability.csv")
CSV.write(joinpath(inpath, SolarVar_Outfile), solar_var)
CSV.write(joinpath(inpath, WindVar_Outfile), wind_var)
end
@@ -1236,26 +1603,32 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
SepFirstRow = DataFrame(fuel_in[1, :])
NewFuelOutput = vcat(SepFirstRow, FPOutputData)
rename!(NewFuelOutput, FuelCols)
- insertcols!(NewFuelOutput, 1, :Time_Index => 0:size(NewFuelOutput,1)-1)
- if v println("Writing fuel profiles...") end
+ insertcols!(NewFuelOutput, 1, :Time_Index => 0:(size(NewFuelOutput, 1) - 1))
+ if v
+ println("Writing fuel profiles...")
+ end
CSV.write(joinpath(inpath, Fuel_Outfile), NewFuelOutput)
### TDR_Results/Period_map.csv
- if v println("Writing period map...") end
+ if v
+ println("Writing period map...")
+ end
CSV.write(joinpath(inpath, PMap_Outfile), PeriodMap)
### TDR_Results/time_domain_reduction_settings.yml
- if v println("Writing .yml settings...") end
+ if v
+ println("Writing .yml settings...")
+ end
YAML.write_file(joinpath(inpath, YAML_Outfile), myTDRsetup)
end
return Dict("OutputDF" => FinalOutputData,
- "InputDF" => ClusteringInputDF,
- "ColToZoneMap" => col_to_zone_map,
- "TDRsetup" => myTDRsetup,
- "ClusterObject" => R,
- "Assignments" => A,
- "Weights" => W,
- "Centers" => M,
- "RMSE" => RMSE)
+ "InputDF" => ClusteringInputDF,
+ "ColToZoneMap" => col_to_zone_map,
+ "TDRsetup" => myTDRsetup,
+ "ClusterObject" => R,
+ "Assignments" => A,
+ "Weights" => W,
+ "Centers" => M,
+ "RMSE" => RMSE)
end
diff --git a/src/write_outputs/capacity_reserve_margin/effective_capacity.jl b/src/write_outputs/capacity_reserve_margin/effective_capacity.jl
index 3e15d5ca66..88a5e9f2b0 100644
--- a/src/write_outputs/capacity_reserve_margin/effective_capacity.jl
+++ b/src/write_outputs/capacity_reserve_margin/effective_capacity.jl
@@ -3,25 +3,20 @@
inputs::Dict,
resources::Vector{Int},
capres_zone::Int,
- timesteps::Vector{Int})::Matrix{Float64}
+ timesteps::Vector{Int})::Matrix{Float64})
Effective capacity in a capacity reserve margin zone for certain resources in the given timesteps.
"""
-function thermal_plant_effective_capacity(
- EP,
- inputs,
- resources::Vector{Int},
- capres_zone::Int,
- timesteps::Vector{Int},
-)::Matrix{Float64}
- eff_cap =
- thermal_plant_effective_capacity.(
- Ref(EP),
- Ref(inputs),
- resources,
- Ref(capres_zone),
- Ref(timesteps),
- )
+function thermal_plant_effective_capacity(EP,
+ inputs,
+ resources::Vector{Int},
+ capres_zone::Int,
+ timesteps::Vector{Int})::Matrix{Float64}
+ eff_cap = thermal_plant_effective_capacity.(Ref(EP),
+ Ref(inputs),
+ resources,
+ Ref(capres_zone),
+ Ref(timesteps))
return reduce(hcat, eff_cap)
end
@@ -31,24 +26,26 @@ function thermal_plant_effective_capacity(EP::Model, inputs::Dict, y, capres_zon
return thermal_plant_effective_capacity(EP, inputs, y, capres_zone, timesteps)
end
-function thermal_plant_effective_capacity(
- EP::Model,
- inputs::Dict,
- r_id::Int,
- capres_zone::Int,
- timesteps::Vector{Int},
-)::Vector{Float64}
+function thermal_plant_effective_capacity(EP::Model,
+ inputs::Dict,
+ r_id::Int,
+ capres_zone::Int,
+ timesteps::Vector{Int})::Vector{Float64}
y = r_id
gen = inputs["RESOURCES"]
- capresfactor = derating_factor(gen[y], tag=capres_zone)
+ capresfactor = derating_factor(gen[y], tag = capres_zone)
eTotalCap = value.(EP[:eTotalCap][y])
effective_capacity = fill(capresfactor * eTotalCap, length(timesteps))
if has_maintenance(inputs) && y in ids_with_maintenance(gen)
- adjustment = thermal_maintenance_capacity_reserve_margin_adjustment(EP, inputs, y, capres_zone, timesteps)
- effective_capacity = effective_capacity .+ value.(adjustment)
- end
+ adjustment = thermal_maintenance_capacity_reserve_margin_adjustment(EP,
+ inputs,
+ y,
+ capres_zone,
+ timesteps)
+ effective_capacity = effective_capacity .+ value.(adjustment)
+ end
return effective_capacity
end
diff --git a/src/write_outputs/capacity_reserve_margin/write_capacity_value.jl b/src/write_outputs/capacity_reserve_margin/write_capacity_value.jl
index 747bf5602b..b1aa431bb6 100644
--- a/src/write_outputs/capacity_reserve_margin/write_capacity_value.jl
+++ b/src/write_outputs/capacity_reserve_margin/write_capacity_value.jl
@@ -1,16 +1,33 @@
+@doc raw"""
+ write_capacity_value(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+
+This is the value of the derated capacities of different types of resources multiplied by the power generated by each of them
+
+# Arguments
+- path::AbstractString: Path to the directory where the file will be written.
+- inputs::Dict: Dictionary of input data.
+- setup::Dict: Dictionary of setup data.
+- EP::Model: EnergyModel object.
+
+# Results
+- A CSV file named "CapacityValue.csv" is written to the directory specified by `path`.
+"""
function write_capacity_value(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- zones = zone_id.(gen)
-
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
- THERM_ALL = inputs["THERM_ALL"]
- VRE = inputs["VRE"]
- HYDRO_RES = inputs["HYDRO_RES"]
- STOR_ALL = inputs["STOR_ALL"]
- FLEX = inputs["FLEX"]
- MUST_RUN = inputs["MUST_RUN"]
- VRE_STOR = inputs["VRE_STOR"]
+ gen = inputs["RESOURCES"]
+ zones = zone_id.(gen)
+
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ THERM_ALL = inputs["THERM_ALL"]
+ VRE = inputs["VRE"]
+ HYDRO_RES = inputs["HYDRO_RES"]
+ STOR_ALL = inputs["STOR_ALL"]
+ FLEX = inputs["FLEX"]
+ MUST_RUN = inputs["MUST_RUN"]
+ VRE_STOR = inputs["VRE_STOR"]
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
eTotalCap = value.(EP[:eTotalCap])
@@ -18,88 +35,118 @@ function write_capacity_value(path::AbstractString, inputs::Dict, setup::Dict, E
minimum_plant_size = 1 # MW
large_plants = findall(>=(minimum_plant_size), eTotalCap * scale_factor)
- THERM_ALL_EX = intersect(THERM_ALL, large_plants)
- VRE_EX = intersect(VRE, large_plants)
- HYDRO_RES_EX = intersect(HYDRO_RES, large_plants)
- STOR_ALL_EX = intersect(STOR_ALL, large_plants)
- FLEX_EX = intersect(FLEX, large_plants)
- MUST_RUN_EX = intersect(MUST_RUN, large_plants)
- # Will only be activated if grid connection capacity exists (because may build standalone storage/VRE, which will only be telling by grid connection capacity)
- VRE_STOR_EX = intersect(VRE_STOR, large_plants)
- if !isempty(VRE_STOR_EX)
- DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
- DC_CHARGE = inputs["VS_STOR_DC_CHARGE"]
- VRE_STOR_STOR_EX = intersect(inputs["VS_STOR"], VRE_STOR_EX)
- DC_DISCHARGE_EX = intersect(DC_DISCHARGE, VRE_STOR_EX)
- AC_DISCHARGE_EX = intersect(inputs["VS_STOR_AC_DISCHARGE"], VRE_STOR_EX)
- DC_CHARGE_EX = intersect(DC_CHARGE, VRE_STOR_EX)
- AC_CHARGE_EX = intersect(inputs["VS_STOR_AC_CHARGE"], VRE_STOR_EX)
- end
-
- crm_derate(i, y::Vector{Int}) = derating_factor.(gen[y], tag=i)'
+ THERM_ALL_EX = intersect(THERM_ALL, large_plants)
+ VRE_EX = intersect(VRE, large_plants)
+ HYDRO_RES_EX = intersect(HYDRO_RES, large_plants)
+ STOR_ALL_EX = intersect(STOR_ALL, large_plants)
+ FLEX_EX = intersect(FLEX, large_plants)
+ MUST_RUN_EX = intersect(MUST_RUN, large_plants)
+ # Will only be activated if grid connection capacity exists (because may build standalone storage/VRE, which will only be telling by grid connection capacity)
+ VRE_STOR_EX = intersect(VRE_STOR, large_plants)
+ if !isempty(VRE_STOR_EX)
+ DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
+ DC_CHARGE = inputs["VS_STOR_DC_CHARGE"]
+ VRE_STOR_STOR_EX = intersect(inputs["VS_STOR"], VRE_STOR_EX)
+ DC_DISCHARGE_EX = intersect(DC_DISCHARGE, VRE_STOR_EX)
+ AC_DISCHARGE_EX = intersect(inputs["VS_STOR_AC_DISCHARGE"], VRE_STOR_EX)
+ DC_CHARGE_EX = intersect(DC_CHARGE, VRE_STOR_EX)
+ AC_CHARGE_EX = intersect(inputs["VS_STOR_AC_CHARGE"], VRE_STOR_EX)
+ end
+
+ crm_derate(i, y::Vector{Int}) = derating_factor.(gen[y], tag = i)'
max_power(t::Vector{Int}, y::Vector{Int}) = inputs["pP_Max"][y, t]'
total_cap(y::Vector{Int}) = eTotalCap[y]'
- dfCapValue = DataFrame()
- for i in 1:inputs["NCapacityReserveMargin"]
+ dfCapValue = DataFrame()
+ for i in 1:inputs["NCapacityReserveMargin"]
capvalue = zeros(T, G)
minimum_crm_price = 1 # $/MW
- riskyhour = findall(>=(minimum_crm_price), capacity_reserve_margin_price(EP, inputs, setup, i))
+ riskyhour = findall(>=(minimum_crm_price),
+ capacity_reserve_margin_price(EP, inputs, setup, i))
power(y::Vector{Int}) = value.(EP[:vP][y, riskyhour])'
- capvalue[riskyhour, THERM_ALL_EX] = thermal_plant_effective_capacity(EP, inputs, THERM_ALL_EX, i, riskyhour) ./ total_cap(THERM_ALL_EX)
+ capvalue[riskyhour, THERM_ALL_EX] = thermal_plant_effective_capacity(EP,
+ inputs,
+ THERM_ALL_EX,
+ i,
+ riskyhour) ./ total_cap(THERM_ALL_EX)
capvalue[riskyhour, VRE_EX] = crm_derate(i, VRE_EX) .* max_power(riskyhour, VRE_EX)
- capvalue[riskyhour, MUST_RUN_EX] = crm_derate(i, MUST_RUN_EX) .* max_power(riskyhour, MUST_RUN_EX)
+ capvalue[riskyhour, MUST_RUN_EX] = crm_derate(i, MUST_RUN_EX) .*
+ max_power(riskyhour, MUST_RUN_EX)
- capvalue[riskyhour, HYDRO_RES_EX] = crm_derate(i, HYDRO_RES_EX) .* power(HYDRO_RES_EX) ./ total_cap(HYDRO_RES_EX)
+ capvalue[riskyhour, HYDRO_RES_EX] = crm_derate(i, HYDRO_RES_EX) .*
+ power(HYDRO_RES_EX) ./ total_cap(HYDRO_RES_EX)
- if !isempty(STOR_ALL_EX)
+ if !isempty(STOR_ALL_EX)
charge = value.(EP[:vCHARGE][STOR_ALL_EX, riskyhour].data)'
capres_discharge = value.(EP[:vCAPRES_discharge][STOR_ALL_EX, riskyhour].data)'
capres_charge = value.(EP[:vCAPRES_charge][STOR_ALL_EX, riskyhour].data)'
- capvalue[riskyhour, STOR_ALL_EX] = crm_derate(i, STOR_ALL_EX) .* (power(STOR_ALL_EX) - charge + capres_discharge - capres_charge) ./ total_cap(STOR_ALL_EX)
- end
+ capvalue[riskyhour, STOR_ALL_EX] = crm_derate(i, STOR_ALL_EX) .*
+ (power(STOR_ALL_EX) - charge +
+ capres_discharge - capres_charge) ./
+ total_cap(STOR_ALL_EX)
+ end
- if !isempty(FLEX_EX)
+ if !isempty(FLEX_EX)
charge = value.(EP[:vCHARGE_FLEX][FLEX_EX, riskyhour].data)'
- capvalue[riskyhour, FLEX_EX] = crm_derate(i, FLEX_EX) .* (charge - power(FLEX_EX)) ./ total_cap(FLEX_EX)
- end
- if !isempty(VRE_STOR_EX)
- capres_dc_discharge = value.(EP[:vCAPRES_DC_DISCHARGE][DC_DISCHARGE, riskyhour].data)'
+ capvalue[riskyhour, FLEX_EX] = crm_derate(i, FLEX_EX) .*
+ (charge - power(FLEX_EX)) ./ total_cap(FLEX_EX)
+ end
+ if !isempty(VRE_STOR_EX)
+ capres_dc_discharge = value.(EP[:vCAPRES_DC_DISCHARGE][DC_DISCHARGE,
+ riskyhour].data)'
discharge_eff = etainverter.(gen[storage_dc_discharge(gen)])'
capvalue_dc_discharge = zeros(T, G)
- capvalue_dc_discharge[riskyhour, DC_DISCHARGE] = capres_dc_discharge .* discharge_eff
+ capvalue_dc_discharge[riskyhour, DC_DISCHARGE] = capres_dc_discharge .*
+ discharge_eff
capres_dc_charge = value.(EP[:vCAPRES_DC_CHARGE][DC_CHARGE, riskyhour].data)'
charge_eff = etainverter.(gen[storage_dc_charge(gen)])'
capvalue_dc_charge = zeros(T, G)
capvalue_dc_charge[riskyhour, DC_CHARGE] = capres_dc_charge ./ charge_eff
- capvalue[riskyhour, VRE_STOR_EX] = crm_derate(i, VRE_STOR_EX) .* power(VRE_STOR_EX) ./ total_cap(VRE_STOR_EX)
-
- charge_vre_stor = value.(EP[:vCHARGE_VRE_STOR][VRE_STOR_STOR_EX, riskyhour].data)'
- capvalue[riskyhour, VRE_STOR_STOR_EX] -= crm_derate(i, VRE_STOR_STOR_EX) .* charge_vre_stor ./ total_cap(VRE_STOR_STOR_EX)
-
- capvalue[riskyhour, DC_DISCHARGE_EX] += crm_derate(i, DC_DISCHARGE_EX) .* capvalue_dc_discharge[riskyhour, DC_DISCHARGE_EX] ./ total_cap(DC_DISCHARGE_EX)
- capres_ac_discharge = value.(EP[:vCAPRES_AC_DISCHARGE][AC_DISCHARGE_EX, riskyhour].data)'
- capvalue[riskyhour, AC_DISCHARGE_EX] += crm_derate(i, AC_DISCHARGE_EX) .* capres_ac_discharge ./ total_cap(AC_DISCHARGE_EX)
-
- capvalue[riskyhour, DC_CHARGE_EX] -= crm_derate(i, DC_CHARGE_EX) .* capvalue_dc_charge[riskyhour, DC_CHARGE_EX] ./ total_cap(DC_CHARGE_EX)
+ capvalue[riskyhour, VRE_STOR_EX] = crm_derate(i, VRE_STOR_EX) .*
+ power(VRE_STOR_EX) ./ total_cap(VRE_STOR_EX)
+
+ charge_vre_stor = value.(EP[:vCHARGE_VRE_STOR][VRE_STOR_STOR_EX,
+ riskyhour].data)'
+ capvalue[riskyhour, VRE_STOR_STOR_EX] -= crm_derate(i, VRE_STOR_STOR_EX) .*
+ charge_vre_stor ./
+ total_cap(VRE_STOR_STOR_EX)
+
+ capvalue[riskyhour, DC_DISCHARGE_EX] += crm_derate(i, DC_DISCHARGE_EX) .*
+ capvalue_dc_discharge[riskyhour,
+ DC_DISCHARGE_EX] ./ total_cap(DC_DISCHARGE_EX)
+ capres_ac_discharge = value.(EP[:vCAPRES_AC_DISCHARGE][AC_DISCHARGE_EX,
+ riskyhour].data)'
+ capvalue[riskyhour, AC_DISCHARGE_EX] += crm_derate(i, AC_DISCHARGE_EX) .*
+ capres_ac_discharge ./
+ total_cap(AC_DISCHARGE_EX)
+
+ capvalue[riskyhour, DC_CHARGE_EX] -= crm_derate(i, DC_CHARGE_EX) .*
+ capvalue_dc_charge[riskyhour,
+ DC_CHARGE_EX] ./ total_cap(DC_CHARGE_EX)
capres_ac_charge = value.(EP[:vCAPRES_AC_CHARGE][AC_CHARGE_EX, riskyhour].data)'
- capvalue[riskyhour, AC_CHARGE_EX] -= crm_derate(i, AC_CHARGE_EX) .* capres_ac_charge ./ total_cap(AC_CHARGE_EX)
- end
+ capvalue[riskyhour, AC_CHARGE_EX] -= crm_derate(i, AC_CHARGE_EX) .*
+ capres_ac_charge ./ total_cap(AC_CHARGE_EX)
+ end
capvalue = collect(transpose(capvalue))
- temp_dfCapValue = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones, Reserve = fill(Symbol("CapRes_$i"), G))
- temp_dfCapValue = hcat(temp_dfCapValue, DataFrame(capvalue, :auto))
- auxNew_Names = [Symbol("Resource"); Symbol("Zone"); Symbol("Reserve"); [Symbol("t$t") for t in 1:T]]
- rename!(temp_dfCapValue, auxNew_Names)
- append!(dfCapValue, temp_dfCapValue)
- end
+ temp_dfCapValue = DataFrame(Resource = inputs["RESOURCE_NAMES"],
+ Zone = zones,
+ Reserve = fill(Symbol("CapRes_$i"), G))
+ temp_dfCapValue = hcat(temp_dfCapValue, DataFrame(capvalue, :auto))
+ auxNew_Names = [Symbol("Resource");
+ Symbol("Zone");
+ Symbol("Reserve");
+ [Symbol("t$t") for t in 1:T]]
+ rename!(temp_dfCapValue, auxNew_Names)
+ append!(dfCapValue, temp_dfCapValue)
+ end
write_simple_csv(joinpath(path, "CapacityValue.csv"), dfCapValue)
end
@@ -109,17 +156,15 @@ end
setup::Dict,
capres_zone::Int)::Vector{Float64}
-Marginal electricity price for each model zone and time step.
-This is equal to the dual variable of the power balance constraint.
-When solving a linear program (i.e. linearized unit commitment or economic dispatch)
-this output is always available; when solving a mixed integer linear program, this can
-be calculated only if `WriteShadowPrices` is activated.
-
- Returns a vector, with units of $/MW
+Marginal price for capacity constraint.
+This is equal to the dual variable of the capacity constraint.
+Returns a vector, with units of $/MW
"""
-function capacity_reserve_margin_price(EP::Model, inputs::Dict, setup::Dict, capres_zone::Int)::Vector{Float64}
+function capacity_reserve_margin_price(EP::Model,
+ inputs::Dict,
+ setup::Dict,
+ capres_zone::Int)::Vector{Float64}
ω = inputs["omega"]
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
return dual.(EP[:cCapacityResMargin][capres_zone, :]) ./ ω * scale_factor
end
-
diff --git a/src/write_outputs/capacity_reserve_margin/write_reserve_margin.jl b/src/write_outputs/capacity_reserve_margin/write_reserve_margin.jl
index 6d2f8c2e80..1eeca0ef0e 100644
--- a/src/write_outputs/capacity_reserve_margin/write_reserve_margin.jl
+++ b/src/write_outputs/capacity_reserve_margin/write_reserve_margin.jl
@@ -1,9 +1,9 @@
function write_reserve_margin(path::AbstractString, setup::Dict, EP::Model)
- temp_ResMar = dual.(EP[:cCapacityResMargin])
- if setup["ParameterScale"] == 1
- temp_ResMar = temp_ResMar * ModelScalingFactor # Convert from MillionUS$/GWh to US$/MWh
- end
- dfResMar = DataFrame(temp_ResMar, :auto)
- CSV.write(joinpath(path, "ReserveMargin.csv"), dfResMar)
- return nothing
+ temp_ResMar = dual.(EP[:cCapacityResMargin])
+ if setup["ParameterScale"] == 1
+ temp_ResMar = temp_ResMar * ModelScalingFactor # Convert from MillionUS$/GWh to US$/MWh
+ end
+ dfResMar = DataFrame(temp_ResMar, :auto)
+ CSV.write(joinpath(path, "ReserveMargin.csv"), dfResMar)
+ return nothing
end
diff --git a/src/write_outputs/capacity_reserve_margin/write_reserve_margin_revenue.jl b/src/write_outputs/capacity_reserve_margin/write_reserve_margin_revenue.jl
index 629cc76756..707147556b 100644
--- a/src/write_outputs/capacity_reserve_margin/write_reserve_margin_revenue.jl
+++ b/src/write_outputs/capacity_reserve_margin/write_reserve_margin_revenue.jl
@@ -8,60 +8,100 @@ Function for reporting the capacity revenue earned by each generator listed in t
The last column is the total revenue received from all capacity reserve margin constraints.
As a reminder, GenX models the capacity reserve margin (aka capacity market) at the time-dependent level, and each constraint either stands for an overall market or a locality constraint.
"""
-function write_reserve_margin_revenue(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+function write_reserve_margin_revenue(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
-
- gen = inputs["RESOURCES"]
- regions = region.(gen)
- clusters = cluster.(gen)
- zones = zone_id.(gen)
+ gen = inputs["RESOURCES"]
+ regions = region.(gen)
+ clusters = cluster.(gen)
+ zones = zone_id.(gen)
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
- THERM_ALL = inputs["THERM_ALL"]
- VRE = inputs["VRE"]
- HYDRO_RES = inputs["HYDRO_RES"]
- STOR_ALL = inputs["STOR_ALL"]
- FLEX = inputs["FLEX"]
- MUST_RUN = inputs["MUST_RUN"]
- VRE_STOR = inputs["VRE_STOR"]
- if !isempty(VRE_STOR)
- VRE_STOR_STOR = inputs["VS_STOR"]
- DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
- AC_DISCHARGE = inputs["VS_STOR_AC_DISCHARGE"]
- DC_CHARGE = inputs["VS_STOR_DC_CHARGE"]
- AC_CHARGE = inputs["VS_STOR_AC_CHARGE"]
- end
- dfResRevenue = DataFrame(Region = regions, Resource = inputs["RESOURCE_NAMES"], Zone = zones, Cluster = clusters)
- annual_sum = zeros(G)
- for i in 1:inputs["NCapacityReserveMargin"]
- weighted_price = capacity_reserve_margin_price(EP, inputs, setup, i) .* inputs["omega"]
- tempresrev = zeros(G)
- tempresrev[THERM_ALL] = thermal_plant_effective_capacity(EP, inputs, THERM_ALL, i)' * weighted_price
- tempresrev[VRE] = derating_factor.(gen.Vre, tag=i) .* (value.(EP[:eTotalCap][VRE])) .* (inputs["pP_Max"][VRE, :] * weighted_price)
- tempresrev[MUST_RUN] = derating_factor.(gen.MustRun, tag=i) .* (value.(EP[:eTotalCap][MUST_RUN])) .* (inputs["pP_Max"][MUST_RUN, :] * weighted_price)
- tempresrev[HYDRO_RES] = derating_factor.(gen.Hydro, tag=i) .* (value.(EP[:vP][HYDRO_RES, :]) * weighted_price)
- if !isempty(STOR_ALL)
- tempresrev[STOR_ALL] = derating_factor.(gen.Storage, tag=i) .* ((value.(EP[:vP][STOR_ALL, :]) - value.(EP[:vCHARGE][STOR_ALL, :]).data + value.(EP[:vCAPRES_discharge][STOR_ALL, :]).data - value.(EP[:vCAPRES_charge][STOR_ALL, :]).data) * weighted_price)
- end
- if !isempty(FLEX)
- tempresrev[FLEX] = derating_factor.(gen.FlexDemand, tag=i) .* ((value.(EP[:vCHARGE_FLEX][FLEX, :]).data - value.(EP[:vP][FLEX, :])) * weighted_price)
- end
- if !isempty(VRE_STOR)
- gen_VRE_STOR = gen.VreStorage
- tempresrev[VRE_STOR] = derating_factor.(gen_VRE_STOR, tag=i) .* ((value.(EP[:vP][VRE_STOR, :])) * weighted_price)
- tempresrev[VRE_STOR_STOR] .-= derating_factor.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge.!=0) .| (gen_VRE_STOR.stor_dc_charge.!=0) .| (gen_VRE_STOR.stor_ac_discharge.!=0) .|(gen_VRE_STOR.stor_ac_charge.!=0)], tag=i) .* (value.(EP[:vCHARGE_VRE_STOR][VRE_STOR_STOR, :]).data * weighted_price)
- tempresrev[DC_DISCHARGE] .+= derating_factor.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge.!=0)], tag=i) .* ((value.(EP[:vCAPRES_DC_DISCHARGE][DC_DISCHARGE, :]).data .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge.!=0)])) * weighted_price)
- tempresrev[AC_DISCHARGE] .+= derating_factor.(gen_VRE_STOR[(gen_VRE_STOR.stor_ac_discharge.!=0)], tag=i) .* ((value.(EP[:vCAPRES_AC_DISCHARGE][AC_DISCHARGE, :]).data) * weighted_price)
- tempresrev[DC_CHARGE] .-= derating_factor.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_charge.!=0)], tag=i) .* ((value.(EP[:vCAPRES_DC_CHARGE][DC_CHARGE, :]).data ./ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_charge.!=0)])) * weighted_price)
- tempresrev[AC_CHARGE] .-= derating_factor.(gen_VRE_STOR[(gen_VRE_STOR.stor_ac_charge.!=0)], tag=i) .* ((value.(EP[:vCAPRES_AC_CHARGE][AC_CHARGE, :]).data) * weighted_price)
- end
- tempresrev *= scale_factor
- annual_sum .+= tempresrev
- dfResRevenue = hcat(dfResRevenue, DataFrame([tempresrev], [Symbol("CapRes_$i")]))
- end
- dfResRevenue.AnnualSum = annual_sum
- CSV.write(joinpath(path, "ReserveMarginRevenue.csv"), dfResRevenue)
- return dfResRevenue
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ THERM_ALL = inputs["THERM_ALL"]
+ VRE = inputs["VRE"]
+ HYDRO_RES = inputs["HYDRO_RES"]
+ STOR_ALL = inputs["STOR_ALL"]
+ FLEX = inputs["FLEX"]
+ MUST_RUN = inputs["MUST_RUN"]
+ VRE_STOR = inputs["VRE_STOR"]
+ if !isempty(VRE_STOR)
+ VRE_STOR_STOR = inputs["VS_STOR"]
+ DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
+ AC_DISCHARGE = inputs["VS_STOR_AC_DISCHARGE"]
+ DC_CHARGE = inputs["VS_STOR_DC_CHARGE"]
+ AC_CHARGE = inputs["VS_STOR_AC_CHARGE"]
+ end
+ dfResRevenue = DataFrame(Region = regions,
+ Resource = inputs["RESOURCE_NAMES"],
+ Zone = zones,
+ Cluster = clusters)
+ annual_sum = zeros(G)
+ for i in 1:inputs["NCapacityReserveMargin"]
+ weighted_price = capacity_reserve_margin_price(EP, inputs, setup, i) .*
+ inputs["omega"]
+ tempresrev = zeros(G)
+ tempresrev[THERM_ALL] = thermal_plant_effective_capacity(EP,
+ inputs,
+ THERM_ALL,
+ i)' * weighted_price
+ tempresrev[VRE] = derating_factor.(gen.Vre, tag = i) .*
+ (value.(EP[:eTotalCap][VRE])) .*
+ (inputs["pP_Max"][VRE, :] * weighted_price)
+ tempresrev[MUST_RUN] = derating_factor.(gen.MustRun, tag = i) .*
+ (value.(EP[:eTotalCap][MUST_RUN])) .*
+ (inputs["pP_Max"][MUST_RUN, :] * weighted_price)
+ tempresrev[HYDRO_RES] = derating_factor.(gen.Hydro, tag = i) .*
+ (value.(EP[:vP][HYDRO_RES, :]) * weighted_price)
+ if !isempty(STOR_ALL)
+ tempresrev[STOR_ALL] = derating_factor.(gen.Storage, tag = i) .*
+ ((value.(EP[:vP][STOR_ALL, :]) -
+ value.(EP[:vCHARGE][STOR_ALL, :]).data +
+ value.(EP[:vCAPRES_discharge][STOR_ALL, :]).data -
+ value.(EP[:vCAPRES_charge][STOR_ALL, :]).data) *
+ weighted_price)
+ end
+ if !isempty(FLEX)
+ tempresrev[FLEX] = derating_factor.(gen.FlexDemand, tag = i) .*
+ ((value.(EP[:vCHARGE_FLEX][FLEX, :]).data -
+ value.(EP[:vP][FLEX, :])) * weighted_price)
+ end
+ if !isempty(VRE_STOR)
+ gen_VRE_STOR = gen.VreStorage
+ tempresrev[VRE_STOR] = derating_factor.(gen_VRE_STOR, tag = i) .*
+ ((value.(EP[:vP][VRE_STOR, :])) * weighted_price)
+ tempresrev[VRE_STOR_STOR] .-= derating_factor.(
+ gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge .!= 0) .| (gen_VRE_STOR.stor_dc_charge .!= 0) .| (gen_VRE_STOR.stor_ac_discharge .!= 0) .| (gen_VRE_STOR.stor_ac_charge .!= 0)],
+ tag = i) .* (value.(EP[:vCHARGE_VRE_STOR][VRE_STOR_STOR,
+ :]).data * weighted_price)
+ tempresrev[DC_DISCHARGE] .+= derating_factor.(
+ gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge .!= 0)],
+ tag = i) .* ((value.(EP[:vCAPRES_DC_DISCHARGE][DC_DISCHARGE,
+ :]).data .*
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge .!= 0)])) *
+ weighted_price)
+ tempresrev[AC_DISCHARGE] .+= derating_factor.(
+ gen_VRE_STOR[(gen_VRE_STOR.stor_ac_discharge .!= 0)],
+ tag = i) .* ((value.(EP[:vCAPRES_AC_DISCHARGE][AC_DISCHARGE,
+ :]).data) * weighted_price)
+ tempresrev[DC_CHARGE] .-= derating_factor.(
+ gen_VRE_STOR[(gen_VRE_STOR.stor_dc_charge .!= 0)],
+ tag = i) .* ((value.(EP[:vCAPRES_DC_CHARGE][DC_CHARGE, :]).data ./
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_charge .!= 0)])) *
+ weighted_price)
+ tempresrev[AC_CHARGE] .-= derating_factor.(
+ gen_VRE_STOR[(gen_VRE_STOR.stor_ac_charge .!= 0)],
+ tag = i) .* ((value.(EP[:vCAPRES_AC_CHARGE][AC_CHARGE, :]).data) *
+ weighted_price)
+ end
+ tempresrev *= scale_factor
+ annual_sum .+= tempresrev
+ dfResRevenue = hcat(dfResRevenue, DataFrame([tempresrev], [Symbol("CapRes_$i")]))
+ end
+ dfResRevenue.AnnualSum = annual_sum
+ CSV.write(joinpath(path, "ReserveMarginRevenue.csv"), dfResRevenue)
+ return dfResRevenue
end
diff --git a/src/write_outputs/capacity_reserve_margin/write_reserve_margin_slack.jl b/src/write_outputs/capacity_reserve_margin/write_reserve_margin_slack.jl
index 99b0e9e0f6..f6d71e4fb4 100644
--- a/src/write_outputs/capacity_reserve_margin/write_reserve_margin_slack.jl
+++ b/src/write_outputs/capacity_reserve_margin/write_reserve_margin_slack.jl
@@ -1,10 +1,13 @@
-function write_reserve_margin_slack(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+function write_reserve_margin_slack(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
NCRM = inputs["NCapacityReserveMargin"]
T = inputs["T"] # Number of time steps (hours)
- dfResMar_slack = DataFrame(CRM_Constraint = [Symbol("CapRes_$res") for res = 1:NCRM],
- AnnualSum = value.(EP[:eCapResSlack_Year]),
- Penalty = value.(EP[:eCCapResSlack]))
-
+ dfResMar_slack = DataFrame(CRM_Constraint = [Symbol("CapRes_$res") for res in 1:NCRM],
+ AnnualSum = value.(EP[:eCapResSlack_Year]),
+ Penalty = value.(EP[:eCCapResSlack]))
+
if setup["ParameterScale"] == 1
dfResMar_slack.AnnualSum .*= ModelScalingFactor # Convert GW to MW
dfResMar_slack.Penalty .*= ModelScalingFactor^2 # Convert Million $ to $
@@ -17,9 +20,11 @@ function write_reserve_margin_slack(path::AbstractString, inputs::Dict, setup::D
if setup["ParameterScale"] == 1
temp_ResMar_slack .*= ModelScalingFactor # Convert GW to MW
end
- dfResMar_slack = hcat(dfResMar_slack, DataFrame(temp_ResMar_slack, [Symbol("t$t") for t in 1:T]))
- CSV.write(joinpath(path, "ReserveMargin_prices_and_penalties.csv"), dftranspose(dfResMar_slack, false), writeheader=false)
+ dfResMar_slack = hcat(dfResMar_slack,
+ DataFrame(temp_ResMar_slack, [Symbol("t$t") for t in 1:T]))
+ CSV.write(joinpath(path, "ReserveMargin_prices_and_penalties.csv"),
+ dftranspose(dfResMar_slack, false),
+ writeheader = false)
end
return nothing
end
-
diff --git a/src/write_outputs/capacity_reserve_margin/write_reserve_margin_w.jl b/src/write_outputs/capacity_reserve_margin/write_reserve_margin_w.jl
index 00c273adfc..74b4efa7fe 100644
--- a/src/write_outputs/capacity_reserve_margin/write_reserve_margin_w.jl
+++ b/src/write_outputs/capacity_reserve_margin/write_reserve_margin_w.jl
@@ -1,13 +1,14 @@
function write_reserve_margin_w(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- #dfResMar dataframe with weights included for calculations
- dfResMar_w = DataFrame(Constraint = [Symbol("t$t") for t in 1:T])
- temp_ResMar_w = transpose(dual.(EP[:cCapacityResMargin]))./inputs["omega"]
- if setup["ParameterScale"] == 1
- temp_ResMar_w = temp_ResMar_w * ModelScalingFactor # Convert from MillionUS$/GWh to US$/MWh
- end
- dfResMar_w = hcat(dfResMar_w, DataFrame(temp_ResMar_w, :auto))
- auxNew_Names_res=[Symbol("Constraint"); [Symbol("CapRes_$i") for i in 1:inputs["NCapacityReserveMargin"]]]
- rename!(dfResMar_w,auxNew_Names_res)
- CSV.write(joinpath(path, "ReserveMargin_w.csv"), dfResMar_w)
-end
\ No newline at end of file
+ T = inputs["T"] # Number of time steps (hours)
+ #dfResMar dataframe with weights included for calculations
+ dfResMar_w = DataFrame(Constraint = [Symbol("t$t") for t in 1:T])
+ temp_ResMar_w = transpose(dual.(EP[:cCapacityResMargin])) ./ inputs["omega"]
+ if setup["ParameterScale"] == 1
+ temp_ResMar_w = temp_ResMar_w * ModelScalingFactor # Convert from MillionUS$/GWh to US$/MWh
+ end
+ dfResMar_w = hcat(dfResMar_w, DataFrame(temp_ResMar_w, :auto))
+ auxNew_Names_res = [Symbol("Constraint");
+ [Symbol("CapRes_$i") for i in 1:inputs["NCapacityReserveMargin"]]]
+ rename!(dfResMar_w, auxNew_Names_res)
+ CSV.write(joinpath(path, "ReserveMargin_w.csv"), dfResMar_w)
+end
diff --git a/src/write_outputs/capacity_reserve_margin/write_virtual_discharge.jl b/src/write_outputs/capacity_reserve_margin/write_virtual_discharge.jl
index 9a4d8308a6..1aa52623de 100644
--- a/src/write_outputs/capacity_reserve_margin/write_virtual_discharge.jl
+++ b/src/write_outputs/capacity_reserve_margin/write_virtual_discharge.jl
@@ -5,25 +5,25 @@ Function for writing the "virtual" discharge of each storage technology. Virtual
allow storage resources to contribute to the capacity reserve margin without actually discharging.
"""
function write_virtual_discharge(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ STOR_ALL = inputs["STOR_ALL"]
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
- STOR_ALL = inputs["STOR_ALL"]
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
-
- resources = inputs["RESOURCE_NAMES"][STOR_ALL]
- zones = inputs["R_ZONES"][STOR_ALL]
- virtual_discharge = (value.(EP[:vCAPRES_discharge][STOR_ALL, :].data) - value.(EP[:vCAPRES_charge][STOR_ALL, :].data)) * scale_factor
+ resources = inputs["RESOURCE_NAMES"][STOR_ALL]
+ zones = inputs["R_ZONES"][STOR_ALL]
+ virtual_discharge = (value.(EP[:vCAPRES_discharge][STOR_ALL, :].data) -
+ value.(EP[:vCAPRES_charge][STOR_ALL, :].data)) * scale_factor
- dfVirtualDischarge = DataFrame(Resource = resources, Zone = zones)
- dfVirtualDischarge.AnnualSum .= virtual_discharge * inputs["omega"]
+ dfVirtualDischarge = DataFrame(Resource = resources, Zone = zones)
+ dfVirtualDischarge.AnnualSum .= virtual_discharge * inputs["omega"]
- filepath = joinpath(path, "virtual_discharge.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfVirtualDischarge)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, virtual_discharge, dfVirtualDischarge)
- end
- return nothing
-end
+ filepath = joinpath(path, "virtual_discharge.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfVirtualDischarge)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, virtual_discharge, dfVirtualDischarge)
+ end
+ return nothing
+end
diff --git a/src/write_outputs/choose_output_dir.jl b/src/write_outputs/choose_output_dir.jl
index dc051f9881..43a4878e8e 100644
--- a/src/write_outputs/choose_output_dir.jl
+++ b/src/write_outputs/choose_output_dir.jl
@@ -1,15 +1,15 @@
-"""
- path = choose_output_dir(pathinit)
+@doc raw"""
+ choose_output_dir(pathinit)
Avoid overwriting (potentially important) existing results by appending to the directory name\n
Checks if the suggested output directory already exists. While yes, it appends _1, _2, etc till an unused name is found
"""
function choose_output_dir(pathinit::String)
- path = pathinit
- counter = 1
- while isdir(path)
- path = string(pathinit, "_", counter)
- counter += 1
- end
- return path
+ path = pathinit
+ counter = 1
+ while isdir(path)
+ path = string(pathinit, "_", counter)
+ counter += 1
+ end
+ return path
end
diff --git a/src/write_outputs/co2_cap/write_co2_cap.jl b/src/write_outputs/co2_cap/write_co2_cap.jl
index fa8e479ec8..19cba87d71 100644
--- a/src/write_outputs/co2_cap/write_co2_cap.jl
+++ b/src/write_outputs/co2_cap/write_co2_cap.jl
@@ -5,19 +5,20 @@ Function for reporting carbon price associated with carbon cap constraints.
"""
function write_co2_cap(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- dfCO2Price = DataFrame(CO2_Cap = [Symbol("CO2_Cap_$cap") for cap = 1:inputs["NCO2Cap"]],
- CO2_Price = (-1) * (dual.(EP[:cCO2Emissions_systemwide])))
+ dfCO2Price = DataFrame(
+ CO2_Cap = [Symbol("CO2_Cap_$cap") for cap in 1:inputs["NCO2Cap"]],
+ CO2_Price = (-1) * (dual.(EP[:cCO2Emissions_systemwide])))
if setup["ParameterScale"] == 1
dfCO2Price.CO2_Price .*= ModelScalingFactor # Convert Million$/kton to $/ton
end
- if haskey(inputs, "dfCO2Cap_slack")
- dfCO2Price[!,:CO2_Mass_Slack] = convert(Array{Float64}, value.(EP[:vCO2Cap_slack]))
- dfCO2Price[!,:CO2_Penalty] = convert(Array{Float64}, value.(EP[:eCCO2Cap_slack]))
- if setup["ParameterScale"] == 1
+ if haskey(inputs, "dfCO2Cap_slack")
+ dfCO2Price[!, :CO2_Mass_Slack] = convert(Array{Float64}, value.(EP[:vCO2Cap_slack]))
+ dfCO2Price[!, :CO2_Penalty] = convert(Array{Float64}, value.(EP[:eCCO2Cap_slack]))
+ if setup["ParameterScale"] == 1
dfCO2Price.CO2_Mass_Slack .*= ModelScalingFactor # Convert ktons to tons
dfCO2Price.CO2_Penalty .*= ModelScalingFactor^2 # Convert Million$ to $
- end
- end
+ end
+ end
CSV.write(joinpath(path, "CO2_prices_and_penalties.csv"), dfCO2Price)
diff --git a/src/write_outputs/dftranspose.jl b/src/write_outputs/dftranspose.jl
index e482a2a37b..ec9d1a5f39 100644
--- a/src/write_outputs/dftranspose.jl
+++ b/src/write_outputs/dftranspose.jl
@@ -10,18 +10,17 @@
## Note this function is necessary because no stock function to transpose
## DataFrames appears to exist.
################################################################################
-"""
+@doc raw"""
df = dftranspose(df::DataFrame, withhead::Bool)
-Returns a transpose of a Dataframe.\n
-FIXME: This is for DataFrames@0.20.2, as used in GenX.
-Versions 0.21+ could use stack and unstack to make further changes while retaining the order
+Returns a transpose of a Dataframe.
"""
function dftranspose(df::DataFrame, withhead::Bool)
- if withhead
- colnames = cat(:Row, Symbol.(df[!,1]), dims=1)
- return DataFrame([[names(df)]; collect.(eachrow(df))], colnames)
- else
- return DataFrame([[names(df)]; collect.(eachrow(df))], [:Row; Symbol.("x",axes(df, 1))])
- end
+ if withhead
+ colnames = cat(:Row, Symbol.(df[!, 1]), dims = 1)
+ return DataFrame([[names(df)]; collect.(eachrow(df))], colnames)
+ else
+ return DataFrame([[names(df)]; collect.(eachrow(df))],
+ [:Row; Symbol.("x", axes(df, 1))])
+ end
end # End dftranpose()
diff --git a/src/write_outputs/energy_share_requirement/write_esr_prices.jl b/src/write_outputs/energy_share_requirement/write_esr_prices.jl
index fe1127bb42..e9cccc46ae 100644
--- a/src/write_outputs/energy_share_requirement/write_esr_prices.jl
+++ b/src/write_outputs/energy_share_requirement/write_esr_prices.jl
@@ -1,17 +1,17 @@
function write_esr_prices(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- dfESR = DataFrame(ESR_Price = convert(Array{Float64}, dual.(EP[:cESRShare])))
- if setup["ParameterScale"] == 1
- dfESR[!,:ESR_Price] = dfESR[!,:ESR_Price] * ModelScalingFactor # Converting MillionUS$/GWh to US$/MWh
- end
+ dfESR = DataFrame(ESR_Price = convert(Array{Float64}, dual.(EP[:cESRShare])))
+ if setup["ParameterScale"] == 1
+ dfESR[!, :ESR_Price] = dfESR[!, :ESR_Price] * ModelScalingFactor # Converting MillionUS$/GWh to US$/MWh
+ end
- if haskey(inputs, "dfESR_slack")
- dfESR[!,:ESR_AnnualSlack] = convert(Array{Float64}, value.(EP[:vESR_slack]))
- dfESR[!,:ESR_AnnualPenalty] = convert(Array{Float64}, value.(EP[:eCESRSlack]))
- if setup["ParameterScale"] == 1
- dfESR[!,:ESR_AnnualSlack] *= ModelScalingFactor # Converting GWh to MWh
- dfESR[!,:ESR_AnnualPenalty] *= (ModelScalingFactor^2) # Converting MillionUSD to USD
- end
- end
- CSV.write(joinpath(path, "ESR_prices_and_penalties.csv"), dfESR)
- return dfESR
+ if haskey(inputs, "dfESR_slack")
+ dfESR[!, :ESR_AnnualSlack] = convert(Array{Float64}, value.(EP[:vESR_slack]))
+ dfESR[!, :ESR_AnnualPenalty] = convert(Array{Float64}, value.(EP[:eCESRSlack]))
+ if setup["ParameterScale"] == 1
+ dfESR[!, :ESR_AnnualSlack] *= ModelScalingFactor # Converting GWh to MWh
+ dfESR[!, :ESR_AnnualPenalty] *= (ModelScalingFactor^2) # Converting MillionUSD to USD
+ end
+ end
+ CSV.write(joinpath(path, "ESR_prices_and_penalties.csv"), dfESR)
+ return dfESR
end
diff --git a/src/write_outputs/energy_share_requirement/write_esr_revenue.jl b/src/write_outputs/energy_share_requirement/write_esr_revenue.jl
index c212caccf8..3bb98c6e1f 100644
--- a/src/write_outputs/energy_share_requirement/write_esr_revenue.jl
+++ b/src/write_outputs/energy_share_requirement/write_esr_revenue.jl
@@ -3,67 +3,84 @@
Function for reporting the renewable/clean credit revenue earned by each generator listed in the input file. GenX will print this file only when RPS/CES is modeled and the shadow price can be obtained form the solver. Each row corresponds to a generator, and each column starting from the 6th to the second last is the total revenue earned from each RPS constraint. The revenue is calculated as the total annual generation (if elgible for the corresponding constraint) multiplied by the RPS/CES price. The last column is the total revenue received from all constraint. The unit is \$.
"""
-function write_esr_revenue(path::AbstractString, inputs::Dict, setup::Dict, dfPower::DataFrame, dfESR::DataFrame, EP::Model)
- gen = inputs["RESOURCES"]
- regions = region.(gen)
- clusters = cluster.(gen)
- zones = zone_id.(gen)
- rid = resource_id.(gen)
+function write_esr_revenue(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ dfPower::DataFrame,
+ dfESR::DataFrame,
+ EP::Model)
+ gen = inputs["RESOURCES"]
+ regions = region.(gen)
+ clusters = cluster.(gen)
+ zones = zone_id.(gen)
+ rid = resource_id.(gen)
- dfESRRev = DataFrame(region = regions, Resource = inputs["RESOURCE_NAMES"], zone = zones, Cluster = clusters, R_ID = rid)
- G = inputs["G"]
- nESR = inputs["nESR"]
- weight = inputs["omega"]
+ dfESRRev = DataFrame(region = regions,
+ Resource = inputs["RESOURCE_NAMES"],
+ zone = zones,
+ Cluster = clusters,
+ R_ID = rid)
+ G = inputs["G"]
+ nESR = inputs["nESR"]
+ weight = inputs["omega"]
# Load VRE-storage inputs
- VRE_STOR = inputs["VRE_STOR"] # Set of VRE-STOR generators (indices)
-
- if !isempty(VRE_STOR)
- gen_VRE_STOR = gen.VreStorage # Set of VRE-STOR generators (objects)
- SOLAR = inputs["VS_SOLAR"]
- WIND = inputs["VS_WIND"]
- SOLAR_ONLY = setdiff(SOLAR, WIND)
- WIND_ONLY = setdiff(WIND, SOLAR)
- SOLAR_WIND = intersect(SOLAR, WIND)
- end
+ VRE_STOR = inputs["VRE_STOR"] # Set of VRE-STOR generators (indices)
- for i in 1:nESR
- esr_col = Symbol("ESR_$i")
- price = dfESR[i, :ESR_Price]
- derated_annual_net_generation = dfPower[1:G,:AnnualSum] .* esr.(gen, tag=i)
- revenue = derated_annual_net_generation * price
- dfESRRev[!, esr_col] = revenue
+ if !isempty(VRE_STOR)
+ gen_VRE_STOR = gen.VreStorage # Set of VRE-STOR generators (objects)
+ SOLAR = inputs["VS_SOLAR"]
+ WIND = inputs["VS_WIND"]
+ SOLAR_ONLY = setdiff(SOLAR, WIND)
+ WIND_ONLY = setdiff(WIND, SOLAR)
+ SOLAR_WIND = intersect(SOLAR, WIND)
+ end
- if !isempty(VRE_STOR)
- if !isempty(SOLAR_ONLY)
- solar_resources = ((gen_VRE_STOR.wind.==0) .& (gen_VRE_STOR.solar.!=0))
- dfESRRev[SOLAR, esr_col] = (
- value.(EP[:vP_SOLAR][SOLAR, :]).data
- .* etainverter.(gen_VRE_STOR[solar_resources]) * weight
- ) .* esr_vrestor.(gen_VRE_STOR[solar_resources], tag=i) * price
- end
- if !isempty(WIND_ONLY)
- wind_resources = ((gen_VRE_STOR.wind.!=0) .& (gen_VRE_STOR.solar.==0))
- dfESRRev[WIND, esr_col] = (
- value.(EP[:vP_WIND][WIND, :]).data
- * weight
- ) .* esr_vrestor.(gen_VRE_STOR[wind_resources], tag=i) * price
- end
- if !isempty(SOLAR_WIND)
- solar_and_wind_resources = ((gen_VRE_STOR.wind.!=0) .& (gen_VRE_STOR.solar.!=0))
- dfESRRev[SOLAR_WIND, esr_col] = (
- (
- (value.(EP[:vP_WIND][SOLAR_WIND, :]).data * weight)
- .* esr_vrestor.(gen_VRE_STOR[solar_and_wind_resources], tag=i) * price
- ) + (
- value.(EP[:vP_SOLAR][SOLAR_WIND, :]).data
- .* etainverter.(gen_VRE_STOR[solar_and_wind_resources])
- * weight
- ) .* esr_vrestor.(gen_VRE_STOR[solar_and_wind_resources], tag=i) * price
- )
- end
- end
- end
- dfESRRev.Total = sum(eachcol(dfESRRev[:, 6:nESR + 5]))
- CSV.write(joinpath(path, "ESR_Revenue.csv"), dfESRRev)
- return dfESRRev
-end
\ No newline at end of file
+ for i in 1:nESR
+ esr_col = Symbol("ESR_$i")
+ price = dfESR[i, :ESR_Price]
+ derated_annual_net_generation = dfPower[1:G, :AnnualSum] .* esr.(gen, tag = i)
+ revenue = derated_annual_net_generation * price
+ dfESRRev[!, esr_col] = revenue
+
+ if !isempty(VRE_STOR)
+ if !isempty(SOLAR_ONLY)
+ solar_resources = ((gen_VRE_STOR.wind .== 0) .& (gen_VRE_STOR.solar .!= 0))
+ dfESRRev[SOLAR, esr_col] = (value.(EP[:vP_SOLAR][SOLAR, :]).data
+ .*
+ etainverter.(gen_VRE_STOR[solar_resources]) *
+ weight) .*
+ esr_vrestor.(gen_VRE_STOR[solar_resources],
+ tag = i) * price
+ end
+ if !isempty(WIND_ONLY)
+ wind_resources = ((gen_VRE_STOR.wind .!= 0) .& (gen_VRE_STOR.solar .== 0))
+ dfESRRev[WIND, esr_col] = (value.(EP[:vP_WIND][WIND, :]).data
+ *
+ weight) .*
+ esr_vrestor.(gen_VRE_STOR[wind_resources],
+ tag = i) * price
+ end
+ if !isempty(SOLAR_WIND)
+ solar_and_wind_resources = ((gen_VRE_STOR.wind .!= 0) .&
+ (gen_VRE_STOR.solar .!= 0))
+ dfESRRev[SOLAR_WIND, esr_col] = (((value.(EP[:vP_WIND][SOLAR_WIND,
+ :]).data * weight)
+ .*
+ esr_vrestor.(
+ gen_VRE_STOR[solar_and_wind_resources],
+ tag = i) * price) +
+ (value.(EP[:vP_SOLAR][SOLAR_WIND, :]).data
+ .*
+ etainverter.(gen_VRE_STOR[solar_and_wind_resources])
+ *
+ weight) .*
+ esr_vrestor.(
+ gen_VRE_STOR[solar_and_wind_resources],
+ tag = i) * price)
+ end
+ end
+ end
+ dfESRRev.Total = sum(eachcol(dfESRRev[:, 6:(nESR + 5)]))
+ CSV.write(joinpath(path, "ESR_Revenue.csv"), dfESRRev)
+ return dfESRRev
+end
diff --git a/src/write_outputs/hydrogen/write_hourly_matching_prices.jl b/src/write_outputs/hydrogen/write_hourly_matching_prices.jl
index 393544e4e4..794eb9111b 100644
--- a/src/write_outputs/hydrogen/write_hourly_matching_prices.jl
+++ b/src/write_outputs/hydrogen/write_hourly_matching_prices.jl
@@ -1,17 +1,26 @@
-function write_hourly_matching_prices(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+function write_hourly_matching_prices(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- ## Extract dual variables of constraints
- dfHourlyMatchPrices = DataFrame(Zone = 1:Z) # The unit is $/MWh
- # Dividing dual variable for each hour with corresponding hourly weight to retrieve marginal cost of the constraint
- dfHourlyMatchPrices = hcat(dfHourlyMatchPrices, DataFrame(dual.(EP[:cHourlyMatching]).data./transpose(inputs["omega"])*scale_factor, :auto))
+ ## Extract dual variables of constraints
+ dfHourlyMatchPrices = DataFrame(Zone = 1:Z) # The unit is $/MWh
+ # Dividing dual variable for each hour with corresponding hourly weight to retrieve marginal cost of the constraint
+ dfHourlyMatchPrices = hcat(dfHourlyMatchPrices,
+ DataFrame(
+ dual.(EP[:cHourlyMatching]).data ./ transpose(inputs["omega"]) *
+ scale_factor,
+ :auto))
- auxNew_Names=[Symbol("Zone");[Symbol("t$t") for t in 1:T]]
- rename!(dfHourlyMatchPrices,auxNew_Names)
+ auxNew_Names = [Symbol("Zone"); [Symbol("t$t") for t in 1:T]]
+ rename!(dfHourlyMatchPrices, auxNew_Names)
- CSV.write(joinpath(path, "hourly_matching_prices.csv"), dftranspose(dfHourlyMatchPrices, false), header=false)
+ CSV.write(joinpath(path, "hourly_matching_prices.csv"),
+ dftranspose(dfHourlyMatchPrices, false),
+ header = false)
- return nothing
+ return nothing
end
diff --git a/src/write_outputs/hydrogen/write_hydrogen_prices.jl b/src/write_outputs/hydrogen/write_hydrogen_prices.jl
index 1d7d905491..5b3903a5a2 100644
--- a/src/write_outputs/hydrogen/write_hydrogen_prices.jl
+++ b/src/write_outputs/hydrogen/write_hydrogen_prices.jl
@@ -1,7 +1,8 @@
function write_hydrogen_prices(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- scale_factor = setup["ParameterScale"] == 1 ? 10^6 : 1 # If ParameterScale==1, costs are in millions of $
- dfHydrogenPrice = DataFrame(Hydrogen_Price_Per_Tonne = convert(Array{Float64}, dual.(EP[:cHydrogenMin])*scale_factor))
+ scale_factor = setup["ParameterScale"] == 1 ? 10^6 : 1 # If ParameterScale==1, costs are in millions of $
+ dfHydrogenPrice = DataFrame(Hydrogen_Price_Per_Tonne = convert(Array{Float64},
+ dual.(EP[:cHydrogenMin]) * scale_factor))
- CSV.write(joinpath(path, "hydrogen_prices.csv"), dfHydrogenPrice)
- return nothing
+ CSV.write(joinpath(path, "hydrogen_prices.csv"), dfHydrogenPrice)
+ return nothing
end
diff --git a/src/write_outputs/long_duration_storage/write_opwrap_lds_dstor.jl b/src/write_outputs/long_duration_storage/write_opwrap_lds_dstor.jl
index a5ce31ec7b..875d8e6f86 100644
--- a/src/write_outputs/long_duration_storage/write_opwrap_lds_dstor.jl
+++ b/src/write_outputs/long_duration_storage/write_opwrap_lds_dstor.jl
@@ -1,30 +1,32 @@
function write_opwrap_lds_dstor(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- ## Extract data frames from input dictionary
- gen = inputs["RESOURCES"]
- zones = zone_id.(gen)
+ ## Extract data frames from input dictionary
+ gen = inputs["RESOURCES"]
+ zones = zone_id.(gen)
- W = inputs["REP_PERIOD"] # Number of subperiods
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ W = inputs["REP_PERIOD"] # Number of subperiods
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- #Excess inventory of storage period built up during representative period w
- dfdStorage = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones)
- dsoc = zeros(G,W)
- for i in 1:G
- if i in inputs["STOR_LONG_DURATION"]
- dsoc[i,:] = value.(EP[:vdSOC])[i,:]
- end
- if !isempty(inputs["VRE_STOR"])
- if i in inputs["VS_LDS"]
- dsoc[i,:] = value.(EP[:vdSOC_VRE_STOR])[i,:]
- end
- end
- end
- if setup["ParameterScale"] == 1
- dsoc *= ModelScalingFactor
- end
-
- dfdStorage = hcat(dfdStorage, DataFrame(dsoc, :auto))
- auxNew_Names=[Symbol("Resource");Symbol("Zone");[Symbol("w$t") for t in 1:W]]
- rename!(dfdStorage,auxNew_Names)
- CSV.write(joinpath(path, "dStorage.csv"), dftranspose(dfdStorage, false), header=false)
+ #Excess inventory of storage period built up during representative period w
+ dfdStorage = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones)
+ dsoc = zeros(G, W)
+ for i in 1:G
+ if i in inputs["STOR_LONG_DURATION"]
+ dsoc[i, :] = value.(EP[:vdSOC])[i, :]
+ end
+ if !isempty(inputs["VRE_STOR"])
+ if i in inputs["VS_LDS"]
+ dsoc[i, :] = value.(EP[:vdSOC_VRE_STOR])[i, :]
+ end
+ end
+ end
+ if setup["ParameterScale"] == 1
+ dsoc *= ModelScalingFactor
+ end
+
+ dfdStorage = hcat(dfdStorage, DataFrame(dsoc, :auto))
+ auxNew_Names = [Symbol("Resource"); Symbol("Zone"); [Symbol("w$t") for t in 1:W]]
+ rename!(dfdStorage, auxNew_Names)
+ CSV.write(joinpath(path, "dStorage.csv"),
+ dftranspose(dfdStorage, false),
+ header = false)
end
diff --git a/src/write_outputs/long_duration_storage/write_opwrap_lds_stor_init.jl b/src/write_outputs/long_duration_storage/write_opwrap_lds_stor_init.jl
index bf1bda48aa..66c8ed7efb 100644
--- a/src/write_outputs/long_duration_storage/write_opwrap_lds_stor_init.jl
+++ b/src/write_outputs/long_duration_storage/write_opwrap_lds_stor_init.jl
@@ -1,30 +1,35 @@
-function write_opwrap_lds_stor_init(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- ## Extract data frames from input dictionary
- gen = inputs["RESOURCES"]
- zones = zone_id.(gen)
+function write_opwrap_lds_stor_init(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ ## Extract data frames from input dictionary
+ gen = inputs["RESOURCES"]
+ zones = zone_id.(gen)
- G = inputs["G"]
+ G = inputs["G"]
- # Initial level of storage in each modeled period
- NPeriods = size(inputs["Period_Map"])[1]
- dfStorageInit = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones)
- socw = zeros(G,NPeriods)
- for i in 1:G
- if i in inputs["STOR_LONG_DURATION"]
- socw[i,:] = value.(EP[:vSOCw])[i,:]
- end
- if !isempty(inputs["VRE_STOR"])
- if i in inputs["VS_LDS"]
- socw[i, :] = value.(EP[:vSOCw_VRE_STOR][i,:])
- end
- end
- end
- if setup["ParameterScale"] == 1
- socw *= ModelScalingFactor
- end
+ # Initial level of storage in each modeled period
+ NPeriods = size(inputs["Period_Map"])[1]
+ dfStorageInit = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones)
+ socw = zeros(G, NPeriods)
+ for i in 1:G
+ if i in inputs["STOR_LONG_DURATION"]
+ socw[i, :] = value.(EP[:vSOCw])[i, :]
+ end
+ if !isempty(inputs["VRE_STOR"])
+ if i in inputs["VS_LDS"]
+ socw[i, :] = value.(EP[:vSOCw_VRE_STOR][i, :])
+ end
+ end
+ end
+ if setup["ParameterScale"] == 1
+ socw *= ModelScalingFactor
+ end
- dfStorageInit = hcat(dfStorageInit, DataFrame(socw, :auto))
- auxNew_Names=[Symbol("Resource");Symbol("Zone");[Symbol("n$t") for t in 1:NPeriods]]
- rename!(dfStorageInit,auxNew_Names)
- CSV.write(joinpath(path, "StorageInit.csv"), dftranspose(dfStorageInit, false), header=false)
+ dfStorageInit = hcat(dfStorageInit, DataFrame(socw, :auto))
+ auxNew_Names = [Symbol("Resource"); Symbol("Zone"); [Symbol("n$t") for t in 1:NPeriods]]
+ rename!(dfStorageInit, auxNew_Names)
+ CSV.write(joinpath(path, "StorageInit.csv"),
+ dftranspose(dfStorageInit, false),
+ header = false)
end
diff --git a/src/write_outputs/min_max_capacity_requirement/write_maximum_capacity_requirement.jl b/src/write_outputs/min_max_capacity_requirement/write_maximum_capacity_requirement.jl
index 24e3a7f4e6..8d1b3450ee 100644
--- a/src/write_outputs/min_max_capacity_requirement/write_maximum_capacity_requirement.jl
+++ b/src/write_outputs/min_max_capacity_requirement/write_maximum_capacity_requirement.jl
@@ -1,15 +1,20 @@
-function write_maximum_capacity_requirement(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+function write_maximum_capacity_requirement(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
NumberOfMaxCapReqs = inputs["NumberOfMaxCapReqs"]
- dfMaxCapPrice = DataFrame(Constraint = [Symbol("MaxCapReq_$maxcap") for maxcap = 1:NumberOfMaxCapReqs],
- Price=-dual.(EP[:cZoneMaxCapReq]))
+ dfMaxCapPrice = DataFrame(
+ Constraint = [Symbol("MaxCapReq_$maxcap")
+ for maxcap in 1:NumberOfMaxCapReqs],
+ Price = -dual.(EP[:cZoneMaxCapReq]))
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
dfMaxCapPrice.Price *= scale_factor
if haskey(inputs, "MaxCapPriceCap")
- dfMaxCapPrice[!,:Slack] = convert(Array{Float64}, value.(EP[:vMaxCap_slack]))
- dfMaxCapPrice[!,:Penalty] = convert(Array{Float64}, value.(EP[:eCMaxCap_slack]))
+ dfMaxCapPrice[!, :Slack] = convert(Array{Float64}, value.(EP[:vMaxCap_slack]))
+ dfMaxCapPrice[!, :Penalty] = convert(Array{Float64}, value.(EP[:eCMaxCap_slack]))
dfMaxCapPrice.Slack *= scale_factor # Convert GW to MW
dfMaxCapPrice.Penalty *= scale_factor^2 # Convert Million $ to $
end
diff --git a/src/write_outputs/min_max_capacity_requirement/write_minimum_capacity_requirement.jl b/src/write_outputs/min_max_capacity_requirement/write_minimum_capacity_requirement.jl
index a1bfe1d28d..bae7d17ee9 100644
--- a/src/write_outputs/min_max_capacity_requirement/write_minimum_capacity_requirement.jl
+++ b/src/write_outputs/min_max_capacity_requirement/write_minimum_capacity_requirement.jl
@@ -1,15 +1,20 @@
-function write_minimum_capacity_requirement(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+function write_minimum_capacity_requirement(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
NumberOfMinCapReqs = inputs["NumberOfMinCapReqs"]
- dfMinCapPrice = DataFrame(Constraint = [Symbol("MinCapReq_$mincap") for mincap = 1:NumberOfMinCapReqs],
- Price= dual.(EP[:cZoneMinCapReq]))
+ dfMinCapPrice = DataFrame(
+ Constraint = [Symbol("MinCapReq_$mincap")
+ for mincap in 1:NumberOfMinCapReqs],
+ Price = dual.(EP[:cZoneMinCapReq]))
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
dfMinCapPrice.Price *= scale_factor # Convert Million $/GW to $/MW
if haskey(inputs, "MinCapPriceCap")
- dfMinCapPrice[!,:Slack] = convert(Array{Float64}, value.(EP[:vMinCap_slack]))
- dfMinCapPrice[!,:Penalty] = convert(Array{Float64}, value.(EP[:eCMinCap_slack]))
+ dfMinCapPrice[!, :Slack] = convert(Array{Float64}, value.(EP[:vMinCap_slack]))
+ dfMinCapPrice[!, :Penalty] = convert(Array{Float64}, value.(EP[:eCMinCap_slack]))
dfMinCapPrice.Slack *= scale_factor # Convert GW to MW
dfMinCapPrice.Penalty *= scale_factor^2 # Convert Million $ to $
end
diff --git a/src/write_outputs/reserves/write_operating_reserve_price_revenue.jl b/src/write_outputs/reserves/write_operating_reserve_price_revenue.jl
index 25a2b4a760..79ab9b8cbe 100644
--- a/src/write_outputs/reserves/write_operating_reserve_price_revenue.jl
+++ b/src/write_outputs/reserves/write_operating_reserve_price_revenue.jl
@@ -7,36 +7,47 @@ Function for reporting the operating reserve and regulation revenue earned by ge
The last column is the total revenue received from all operating reserve and regulation constraints.
As a reminder, GenX models the operating reserve and regulation at the time-dependent level, and each constraint either stands for an overall market or a locality constraint.
"""
-function write_operating_reserve_regulation_revenue(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+function write_operating_reserve_regulation_revenue(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- gen = inputs["RESOURCES"]
- RSV = inputs["RSV"]
- REG = inputs["REG"]
+ gen = inputs["RESOURCES"]
+ RSV = inputs["RSV"]
+ REG = inputs["REG"]
regions = region.(gen)
clusters = cluster.(gen)
zones = zone_id.(gen)
names = inputs["RESOURCE_NAMES"]
- dfOpRsvRevenue = DataFrame(Region = regions[RSV], Resource = names[RSV], Zone = zones[RSV], Cluster = clusters[RSV], AnnualSum = Array{Float64}(undef, length(RSV)),)
- dfOpRegRevenue = DataFrame(Region = regions[REG], Resource = names[REG], Zone = zones[REG], Cluster = clusters[REG], AnnualSum = Array{Float64}(undef, length(REG)),)
-
- weighted_reg_price = operating_regulation_price(EP, inputs, setup)
- weighted_rsv_price = operating_reserve_price(EP, inputs, setup)
+ dfOpRsvRevenue = DataFrame(Region = regions[RSV],
+ Resource = names[RSV],
+ Zone = zones[RSV],
+ Cluster = clusters[RSV],
+ AnnualSum = Array{Float64}(undef, length(RSV)))
+ dfOpRegRevenue = DataFrame(Region = regions[REG],
+ Resource = names[REG],
+ Zone = zones[REG],
+ Cluster = clusters[REG],
+ AnnualSum = Array{Float64}(undef, length(REG)))
+
+ weighted_reg_price = operating_regulation_price(EP, inputs, setup)
+ weighted_rsv_price = operating_reserve_price(EP, inputs, setup)
- rsvrevenue = value.(EP[:vRSV][RSV, :].data) .* transpose(weighted_rsv_price)
- regrevenue = value.(EP[:vREG][REG, :].data) .* transpose(weighted_reg_price)
+ rsvrevenue = value.(EP[:vRSV][RSV, :].data) .* transpose(weighted_rsv_price)
+ regrevenue = value.(EP[:vREG][REG, :].data) .* transpose(weighted_reg_price)
- rsvrevenue *= scale_factor
- regrevenue *= scale_factor
+ rsvrevenue *= scale_factor
+ regrevenue *= scale_factor
- dfOpRsvRevenue.AnnualSum .= rsvrevenue * inputs["omega"]
- dfOpRegRevenue.AnnualSum .= regrevenue * inputs["omega"]
+ dfOpRsvRevenue.AnnualSum .= rsvrevenue * inputs["omega"]
+ dfOpRegRevenue.AnnualSum .= regrevenue * inputs["omega"]
- write_simple_csv(joinpath(path, "OperatingReserveRevenue.csv"), dfOpRsvRevenue)
- write_simple_csv(joinpath(path, "OperatingRegulationRevenue.csv"), dfOpRegRevenue)
- return dfOpRegRevenue, dfOpRsvRevenue
+ write_simple_csv(joinpath(path, "OperatingReserveRevenue.csv"), dfOpRsvRevenue)
+ write_simple_csv(joinpath(path, "OperatingRegulationRevenue.csv"), dfOpRegRevenue)
+ return dfOpRegRevenue, dfOpRsvRevenue
end
@doc raw"""
diff --git a/src/write_outputs/reserves/write_reg.jl b/src/write_outputs/reserves/write_reg.jl
index 4b984fcc14..7d7ca1efd6 100644
--- a/src/write_outputs/reserves/write_reg.jl
+++ b/src/write_outputs/reserves/write_reg.jl
@@ -1,20 +1,20 @@
function write_reg(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- REG = inputs["REG"]
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ REG = inputs["REG"]
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- resources = inputs["RESOURCE_NAMES"][REG]
- zones = inputs["R_ZONES"][REG]
- # Regulation contributions for each resource in each time step
- reg = value.(EP[:vREG][REG, :].data) * scale_factor
+ resources = inputs["RESOURCE_NAMES"][REG]
+ zones = inputs["R_ZONES"][REG]
+ # Regulation contributions for each resource in each time step
+ reg = value.(EP[:vREG][REG, :].data) * scale_factor
- dfReg = DataFrame(Resource = resources, Zone = zones)
- dfReg.AnnualSum = reg * inputs["omega"]
+ dfReg = DataFrame(Resource = resources, Zone = zones)
+ dfReg.AnnualSum = reg * inputs["omega"]
- filepath = joinpath(path, "reg.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfReg)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, reg, dfReg)
- end
- return nothing
+ filepath = joinpath(path, "reg.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfReg)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, reg, dfReg)
+ end
+ return nothing
end
diff --git a/src/write_outputs/reserves/write_rsv.jl b/src/write_outputs/reserves/write_rsv.jl
index ebfbca5725..ba38ccb727 100644
--- a/src/write_outputs/reserves/write_rsv.jl
+++ b/src/write_outputs/reserves/write_rsv.jl
@@ -1,32 +1,37 @@
function write_rsv(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- RSV = inputs["RSV"]
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ T = inputs["T"] # Number of time steps (hours)
+ RSV = inputs["RSV"]
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- resources = inputs["RESOURCE_NAMES"][RSV]
- zones = inputs["R_ZONES"][RSV]
- rsv = value.(EP[:vRSV][RSV, :].data) * scale_factor
+ resources = inputs["RESOURCE_NAMES"][RSV]
+ zones = inputs["R_ZONES"][RSV]
+ rsv = value.(EP[:vRSV][RSV, :].data) * scale_factor
- dfRsv = DataFrame(Resource = resources, Zone = zones)
+ dfRsv = DataFrame(Resource = resources, Zone = zones)
- dfRsv.AnnualSum = rsv * inputs["omega"]
+ dfRsv.AnnualSum = rsv * inputs["omega"]
- if setup["WriteOutputs"] == "annual"
- write_annual(joinpath(path, "reg_dn.csv"), dfRsv)
- else # setup["WriteOutputs"] == "full"
- unmet_vec = value.(EP[:vUNMET_RSV]) * scale_factor
- total_unmet = sum(unmet_vec)
- dfRsv = hcat(dfRsv, DataFrame(rsv, :auto))
- auxNew_Names=[Symbol("Resource");Symbol("Zone");Symbol("AnnualSum");[Symbol("t$t") for t in 1:T]]
- rename!(dfRsv,auxNew_Names)
-
- total = DataFrame(["Total" 0 sum(dfRsv.AnnualSum) zeros(1, T)], :auto)
- unmet = DataFrame(["unmet" 0 total_unmet zeros(1, T)], :auto)
- total[!, 4:T+3] .= sum(rsv, dims = 1)
- unmet[!, 4:T+3] .= transpose(unmet_vec)
- rename!(total,auxNew_Names)
- rename!(unmet,auxNew_Names)
- dfRsv = vcat(dfRsv, unmet, total)
- CSV.write(joinpath(path, "reg_dn.csv"), dftranspose(dfRsv, false), writeheader=false)
- end
+ if setup["WriteOutputs"] == "annual"
+ write_annual(joinpath(path, "reg_dn.csv"), dfRsv)
+ else # setup["WriteOutputs"] == "full"
+ unmet_vec = value.(EP[:vUNMET_RSV]) * scale_factor
+ total_unmet = sum(unmet_vec)
+ dfRsv = hcat(dfRsv, DataFrame(rsv, :auto))
+ auxNew_Names = [Symbol("Resource");
+ Symbol("Zone");
+ Symbol("AnnualSum");
+ [Symbol("t$t") for t in 1:T]]
+ rename!(dfRsv, auxNew_Names)
+
+ total = DataFrame(["Total" 0 sum(dfRsv.AnnualSum) zeros(1, T)], :auto)
+ unmet = DataFrame(["unmet" 0 total_unmet zeros(1, T)], :auto)
+ total[!, 4:(T + 3)] .= sum(rsv, dims = 1)
+ unmet[!, 4:(T + 3)] .= transpose(unmet_vec)
+ rename!(total, auxNew_Names)
+ rename!(unmet, auxNew_Names)
+ dfRsv = vcat(dfRsv, unmet, total)
+ CSV.write(joinpath(path, "reg_dn.csv"),
+ dftranspose(dfRsv, false),
+ writeheader = false)
+ end
end
diff --git a/src/write_outputs/transmission/write_nw_expansion.jl b/src/write_outputs/transmission/write_nw_expansion.jl
index 973248950c..f89e1bfe1f 100644
--- a/src/write_outputs/transmission/write_nw_expansion.jl
+++ b/src/write_outputs/transmission/write_nw_expansion.jl
@@ -1,23 +1,23 @@
function write_nw_expansion(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- L = inputs["L"] # Number of transmission lines
+ L = inputs["L"] # Number of transmission lines
- # Transmission network reinforcements
- transcap = zeros(L)
- for i in 1:L
- if i in inputs["EXPANSION_LINES"]
- transcap[i] = value.(EP[:vNEW_TRANS_CAP][i])
- end
- end
+ # Transmission network reinforcements
+ transcap = zeros(L)
+ for i in 1:L
+ if i in inputs["EXPANSION_LINES"]
+ transcap[i] = value.(EP[:vNEW_TRANS_CAP][i])
+ end
+ end
- dfTransCap = DataFrame(
- Line = 1:L, New_Trans_Capacity = convert(Array{Float64}, transcap),
- Cost_Trans_Capacity = convert(Array{Float64}, transcap.*inputs["pC_Line_Reinforcement"]),
- )
+ dfTransCap = DataFrame(Line = 1:L,
+ New_Trans_Capacity = convert(Array{Float64}, transcap),
+ Cost_Trans_Capacity = convert(Array{Float64},
+ transcap .* inputs["pC_Line_Reinforcement"]))
- if setup["ParameterScale"] == 1
- dfTransCap.New_Trans_Capacity *= ModelScalingFactor # GW to MW
- dfTransCap.Cost_Trans_Capacity *= ModelScalingFactor^2 # MUSD to USD
- end
+ if setup["ParameterScale"] == 1
+ dfTransCap.New_Trans_Capacity *= ModelScalingFactor # GW to MW
+ dfTransCap.Cost_Trans_Capacity *= ModelScalingFactor^2 # MUSD to USD
+ end
- CSV.write(joinpath(path, "network_expansion.csv"), dfTransCap)
+ CSV.write(joinpath(path, "network_expansion.csv"), dfTransCap)
end
diff --git a/src/write_outputs/transmission/write_transmission_flows.jl b/src/write_outputs/transmission/write_transmission_flows.jl
index 74f6f779dc..dbdaec2e4f 100644
--- a/src/write_outputs/transmission/write_transmission_flows.jl
+++ b/src/write_outputs/transmission/write_transmission_flows.jl
@@ -1,25 +1,28 @@
-function write_transmission_flows(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- # Transmission related values
- T = inputs["T"] # Number of time steps (hours)
- L = inputs["L"] # Number of transmission lines
- # Power flows on transmission lines at each time step
- dfFlow = DataFrame(Line = 1:L)
- flow = value.(EP[:vFLOW])
- if setup["ParameterScale"] == 1
- flow *= ModelScalingFactor
- end
+function write_transmission_flows(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ # Transmission related values
+ T = inputs["T"] # Number of time steps (hours)
+ L = inputs["L"] # Number of transmission lines
+ # Power flows on transmission lines at each time step
+ dfFlow = DataFrame(Line = 1:L)
+ flow = value.(EP[:vFLOW])
+ if setup["ParameterScale"] == 1
+ flow *= ModelScalingFactor
+ end
- filepath = joinpath(path, "flow.csv")
- if setup["WriteOutputs"] == "annual"
- dfFlow.AnnualSum = flow * inputs["omega"]
- total = DataFrame(["Total" sum(dfFlow.AnnualSum)], [:Line, :AnnualSum])
- dfFlow = vcat(dfFlow, total)
- CSV.write(filepath, dfFlow)
- else # setup["WriteOutputs"] == "full"
- dfFlow = hcat(dfFlow, DataFrame(flow, :auto))
- auxNew_Names=[Symbol("Line");[Symbol("t$t") for t in 1:T]]
- rename!(dfFlow,auxNew_Names)
- CSV.write(filepath, dftranspose(dfFlow, false), writeheader=false)
- end
- return nothing
+ filepath = joinpath(path, "flow.csv")
+ if setup["WriteOutputs"] == "annual"
+ dfFlow.AnnualSum = flow * inputs["omega"]
+ total = DataFrame(["Total" sum(dfFlow.AnnualSum)], [:Line, :AnnualSum])
+ dfFlow = vcat(dfFlow, total)
+ CSV.write(filepath, dfFlow)
+ else # setup["WriteOutputs"] == "full"
+ dfFlow = hcat(dfFlow, DataFrame(flow, :auto))
+ auxNew_Names = [Symbol("Line"); [Symbol("t$t") for t in 1:T]]
+ rename!(dfFlow, auxNew_Names)
+ CSV.write(filepath, dftranspose(dfFlow, false), writeheader = false)
+ end
+ return nothing
end
diff --git a/src/write_outputs/transmission/write_transmission_losses.jl b/src/write_outputs/transmission/write_transmission_losses.jl
index 8f5bb51977..1e4bf164bf 100644
--- a/src/write_outputs/transmission/write_transmission_losses.jl
+++ b/src/write_outputs/transmission/write_transmission_losses.jl
@@ -1,29 +1,35 @@
-function write_transmission_losses(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- L = inputs["L"] # Number of transmission lines
- LOSS_LINES = inputs["LOSS_LINES"]
- # Power losses for transmission between zones at each time step
- dfTLosses = DataFrame(Line = 1:L)
- tlosses = zeros(L, T)
- tlosses[LOSS_LINES, :] = value.(EP[:vTLOSS][LOSS_LINES, :])
- if setup["ParameterScale"] == 1
- tlosses[LOSS_LINES, :] *= ModelScalingFactor
- end
+function write_transmission_losses(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ T = inputs["T"] # Number of time steps (hours)
+ L = inputs["L"] # Number of transmission lines
+ LOSS_LINES = inputs["LOSS_LINES"]
+ # Power losses for transmission between zones at each time step
+ dfTLosses = DataFrame(Line = 1:L)
+ tlosses = zeros(L, T)
+ tlosses[LOSS_LINES, :] = value.(EP[:vTLOSS][LOSS_LINES, :])
+ if setup["ParameterScale"] == 1
+ tlosses[LOSS_LINES, :] *= ModelScalingFactor
+ end
- dfTLosses.AnnualSum = tlosses * inputs["omega"]
-
- if setup["WriteOutputs"] == "annual"
- total = DataFrame(["Total" sum(dfTLosses.AnnualSum)], [:Line, :AnnualSum])
- dfTLosses = vcat(dfTLosses, total)
- CSV.write(joinpath(path, "tlosses.csv"), dfTLosses)
- else
- dfTLosses = hcat(dfTLosses, DataFrame(tlosses, :auto))
- auxNew_Names=[Symbol("Line");Symbol("AnnualSum");[Symbol("t$t") for t in 1:T]]
- rename!(dfTLosses,auxNew_Names)
- total = DataFrame(["Total" sum(dfTLosses.AnnualSum) fill(0.0, (1,T))], auxNew_Names)
- total[:, 3:T+2] .= sum(tlosses, dims = 1)
- dfTLosses = vcat(dfTLosses, total)
- CSV.write(joinpath(path, "tlosses.csv"), dftranspose(dfTLosses, false), writeheader=false)
- end
- return nothing
+ dfTLosses.AnnualSum = tlosses * inputs["omega"]
+
+ if setup["WriteOutputs"] == "annual"
+ total = DataFrame(["Total" sum(dfTLosses.AnnualSum)], [:Line, :AnnualSum])
+ dfTLosses = vcat(dfTLosses, total)
+ CSV.write(joinpath(path, "tlosses.csv"), dfTLosses)
+ else
+ dfTLosses = hcat(dfTLosses, DataFrame(tlosses, :auto))
+ auxNew_Names = [Symbol("Line"); Symbol("AnnualSum"); [Symbol("t$t") for t in 1:T]]
+ rename!(dfTLosses, auxNew_Names)
+ total = DataFrame(["Total" sum(dfTLosses.AnnualSum) fill(0.0, (1, T))],
+ auxNew_Names)
+ total[:, 3:(T + 2)] .= sum(tlosses, dims = 1)
+ dfTLosses = vcat(dfTLosses, total)
+ CSV.write(joinpath(path, "tlosses.csv"),
+ dftranspose(dfTLosses, false),
+ writeheader = false)
+ end
+ return nothing
end
diff --git a/src/write_outputs/ucommit/write_commit.jl b/src/write_outputs/ucommit/write_commit.jl
index 685ad53e0a..bf8e712640 100644
--- a/src/write_outputs/ucommit/write_commit.jl
+++ b/src/write_outputs/ucommit/write_commit.jl
@@ -1,15 +1,14 @@
function write_commit(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
-
- COMMIT = inputs["COMMIT"]
- T = inputs["T"]
+ COMMIT = inputs["COMMIT"]
+ T = inputs["T"]
- # Commitment state for each resource in each time step
- resources = inputs["RESOURCE_NAMES"][COMMIT]
- zones = inputs["R_ZONES"][COMMIT]
- commit = value.(EP[:vCOMMIT][COMMIT, :].data)
- dfCommit = DataFrame(Resource = resources, Zone = zones)
- dfCommit = hcat(dfCommit, DataFrame(commit, :auto))
- auxNew_Names=[Symbol("Resource");Symbol("Zone");[Symbol("t$t") for t in 1:T]]
- rename!(dfCommit,auxNew_Names)
- CSV.write(joinpath(path, "commit.csv"), dftranspose(dfCommit, false), header=false)
+ # Commitment state for each resource in each time step
+ resources = inputs["RESOURCE_NAMES"][COMMIT]
+ zones = inputs["R_ZONES"][COMMIT]
+ commit = value.(EP[:vCOMMIT][COMMIT, :].data)
+ dfCommit = DataFrame(Resource = resources, Zone = zones)
+ dfCommit = hcat(dfCommit, DataFrame(commit, :auto))
+ auxNew_Names = [Symbol("Resource"); Symbol("Zone"); [Symbol("t$t") for t in 1:T]]
+ rename!(dfCommit, auxNew_Names)
+ CSV.write(joinpath(path, "commit.csv"), dftranspose(dfCommit, false), header = false)
end
diff --git a/src/write_outputs/ucommit/write_shutdown.jl b/src/write_outputs/ucommit/write_shutdown.jl
index 56325b25f6..8a726a3367 100644
--- a/src/write_outputs/ucommit/write_shutdown.jl
+++ b/src/write_outputs/ucommit/write_shutdown.jl
@@ -1,19 +1,19 @@
function write_shutdown(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- # Operational decision variable states
- COMMIT = inputs["COMMIT"]
- zones = inputs["R_ZONES"][COMMIT]
- # Shutdown state for each resource in each time step
- shut = value.(EP[:vSHUT][COMMIT, :].data)
- resources = inputs["RESOURCE_NAMES"][COMMIT]
+ # Operational decision variable states
+ COMMIT = inputs["COMMIT"]
+ zones = inputs["R_ZONES"][COMMIT]
+ # Shutdown state for each resource in each time step
+ shut = value.(EP[:vSHUT][COMMIT, :].data)
+ resources = inputs["RESOURCE_NAMES"][COMMIT]
- dfShutdown = DataFrame(Resource = resources, Zone = zones)
- dfShutdown.AnnualSum = shut * inputs["omega"]
+ dfShutdown = DataFrame(Resource = resources, Zone = zones)
+ dfShutdown.AnnualSum = shut * inputs["omega"]
- filepath = joinpath(path, "shutdown.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfShutdown)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, shut, dfShutdown)
- end
- return nothing
+ filepath = joinpath(path, "shutdown.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfShutdown)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, shut, dfShutdown)
+ end
+ return nothing
end
diff --git a/src/write_outputs/ucommit/write_start.jl b/src/write_outputs/ucommit/write_start.jl
index 461d522a17..be23be46bd 100644
--- a/src/write_outputs/ucommit/write_start.jl
+++ b/src/write_outputs/ucommit/write_start.jl
@@ -1,19 +1,18 @@
function write_start(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+ COMMIT = inputs["COMMIT"]
+ # Startup state for each resource in each time step
+ resources = inputs["RESOURCE_NAMES"][COMMIT]
+ zones = inputs["R_ZONES"][COMMIT]
- COMMIT = inputs["COMMIT"]
- # Startup state for each resource in each time step
- resources = inputs["RESOURCE_NAMES"][COMMIT]
- zones = inputs["R_ZONES"][COMMIT]
+ dfStart = DataFrame(Resource = resources, Zone = zones)
+ start = value.(EP[:vSTART][COMMIT, :].data)
+ dfStart.AnnualSum = start * inputs["omega"]
- dfStart = DataFrame(Resource = resources, Zone = zones)
- start = value.(EP[:vSTART][COMMIT, :].data)
- dfStart.AnnualSum = start * inputs["omega"]
-
- filepath = joinpath(path, "start.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfStart)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, start, dfStart)
- end
- return nothing
+ filepath = joinpath(path, "start.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfStart)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, start, dfStart)
+ end
+ return nothing
end
diff --git a/src/write_outputs/write_angles.jl b/src/write_outputs/write_angles.jl
index f638b37e52..b93870354f 100644
--- a/src/write_outputs/write_angles.jl
+++ b/src/write_outputs/write_angles.jl
@@ -4,17 +4,19 @@
Function for reporting the bus angles for each model zone and time step if the DC_OPF flag is activated
"""
function write_angles(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
- dfAngles = DataFrame(Zone = 1:Z)
- angles = value.(EP[:vANGLE])
- dfAngles = hcat(dfAngles, DataFrame(angles, :auto))
+ dfAngles = DataFrame(Zone = 1:Z)
+ angles = value.(EP[:vANGLE])
+ dfAngles = hcat(dfAngles, DataFrame(angles, :auto))
- auxNew_Names=[Symbol("Zone");[Symbol("t$t") for t in 1:T]]
- rename!(dfAngles,auxNew_Names)
+ auxNew_Names = [Symbol("Zone"); [Symbol("t$t") for t in 1:T]]
+ rename!(dfAngles, auxNew_Names)
- ## Linear configuration final output
- CSV.write(joinpath(path, "angles.csv"), dftranspose(dfAngles, false), writeheader=false)
- return nothing
+ ## Linear configuration final output
+ CSV.write(joinpath(path, "angles.csv"),
+ dftranspose(dfAngles, false),
+ writeheader = false)
+ return nothing
end
diff --git a/src/write_outputs/write_capacity.jl b/src/write_outputs/write_capacity.jl
index f102ced874..99e4797ecc 100755
--- a/src/write_outputs/write_capacity.jl
+++ b/src/write_outputs/write_capacity.jl
@@ -4,129 +4,129 @@
Function for writing the diferent capacities for the different generation technologies (starting capacities or, existing capacities, retired capacities, and new-built capacities).
"""
function write_capacity(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+ gen = inputs["RESOURCES"]
- gen = inputs["RESOURCES"]
+ MultiStage = setup["MultiStage"]
- MultiStage = setup["MultiStage"]
-
- # Capacity decisions
- capdischarge = zeros(size(inputs["RESOURCE_NAMES"]))
- for i in inputs["NEW_CAP"]
- if i in inputs["COMMIT"]
- capdischarge[i] = value(EP[:vCAP][i])*cap_size(gen[i])
- else
- capdischarge[i] = value(EP[:vCAP][i])
- end
- end
+ # Capacity decisions
+ capdischarge = zeros(size(inputs["RESOURCE_NAMES"]))
+ for i in inputs["NEW_CAP"]
+ if i in inputs["COMMIT"]
+ capdischarge[i] = value(EP[:vCAP][i]) * cap_size(gen[i])
+ else
+ capdischarge[i] = value(EP[:vCAP][i])
+ end
+ end
- retcapdischarge = zeros(size(inputs["RESOURCE_NAMES"]))
- for i in inputs["RET_CAP"]
- if i in inputs["COMMIT"]
- retcapdischarge[i] = first(value.(EP[:vRETCAP][i]))*cap_size(gen[i])
- else
- retcapdischarge[i] = first(value.(EP[:vRETCAP][i]))
- end
- end
+ retcapdischarge = zeros(size(inputs["RESOURCE_NAMES"]))
+ for i in inputs["RET_CAP"]
+ if i in inputs["COMMIT"]
+ retcapdischarge[i] = first(value.(EP[:vRETCAP][i])) * cap_size(gen[i])
+ else
+ retcapdischarge[i] = first(value.(EP[:vRETCAP][i]))
+ end
+ end
- retrocapdischarge = zeros(size(inputs["RESOURCE_NAMES"]))
- for i in inputs["RETROFIT_CAP"]
- if i in inputs["COMMIT"]
- retrocapdischarge[i] = first(value.(EP[:vRETROFITCAP][i])) * cap_size(gen[i])
- else
- retrocapdischarge[i] = first(value.(EP[:vRETROFITCAP][i]))
- end
- end
+ retrocapdischarge = zeros(size(inputs["RESOURCE_NAMES"]))
+ for i in inputs["RETROFIT_CAP"]
+ if i in inputs["COMMIT"]
+ retrocapdischarge[i] = first(value.(EP[:vRETROFITCAP][i])) * cap_size(gen[i])
+ else
+ retrocapdischarge[i] = first(value.(EP[:vRETROFITCAP][i]))
+ end
+ end
+ capacity_constraint_dual = zeros(size(inputs["RESOURCE_NAMES"]))
+ for y in ids_with_positive(gen, max_cap_mw)
+ capacity_constraint_dual[y] = -dual.(EP[:cMaxCap][y])
+ end
- capacity_constraint_dual = zeros(size(inputs["RESOURCE_NAMES"]))
- for y in ids_with_positive(gen, max_cap_mw)
- capacity_constraint_dual[y] = -dual.(EP[:cMaxCap][y])
- end
+ capcharge = zeros(size(inputs["RESOURCE_NAMES"]))
+ retcapcharge = zeros(size(inputs["RESOURCE_NAMES"]))
+ existingcapcharge = zeros(size(inputs["RESOURCE_NAMES"]))
+ for i in inputs["STOR_ASYMMETRIC"]
+ if i in inputs["NEW_CAP_CHARGE"]
+ capcharge[i] = value(EP[:vCAPCHARGE][i])
+ end
+ if i in inputs["RET_CAP_CHARGE"]
+ retcapcharge[i] = value(EP[:vRETCAPCHARGE][i])
+ end
+ existingcapcharge[i] = MultiStage == 1 ? value(EP[:vEXISTINGCAPCHARGE][i]) :
+ existing_charge_cap_mw(gen[i])
+ end
- capcharge = zeros(size(inputs["RESOURCE_NAMES"]))
- retcapcharge = zeros(size(inputs["RESOURCE_NAMES"]))
- existingcapcharge = zeros(size(inputs["RESOURCE_NAMES"]))
- for i in inputs["STOR_ASYMMETRIC"]
- if i in inputs["NEW_CAP_CHARGE"]
- capcharge[i] = value(EP[:vCAPCHARGE][i])
- end
- if i in inputs["RET_CAP_CHARGE"]
- retcapcharge[i] = value(EP[:vRETCAPCHARGE][i])
- end
- existingcapcharge[i] = MultiStage == 1 ? value(EP[:vEXISTINGCAPCHARGE][i]) : existing_charge_cap_mw(gen[i])
- end
+ capenergy = zeros(size(inputs["RESOURCE_NAMES"]))
+ retcapenergy = zeros(size(inputs["RESOURCE_NAMES"]))
+ existingcapenergy = zeros(size(inputs["RESOURCE_NAMES"]))
+ for i in inputs["STOR_ALL"]
+ if i in inputs["NEW_CAP_ENERGY"]
+ capenergy[i] = value(EP[:vCAPENERGY][i])
+ end
+ if i in inputs["RET_CAP_ENERGY"]
+ retcapenergy[i] = value(EP[:vRETCAPENERGY][i])
+ end
+ existingcapenergy[i] = MultiStage == 1 ? value(EP[:vEXISTINGCAPENERGY][i]) :
+ existing_cap_mwh(gen[i])
+ end
+ if !isempty(inputs["VRE_STOR"])
+ for i in inputs["VS_STOR"]
+ if i in inputs["NEW_CAP_STOR"]
+ capenergy[i] = value(EP[:vCAPENERGY_VS][i])
+ end
+ if i in inputs["RET_CAP_STOR"]
+ retcapenergy[i] = value(EP[:vRETCAPENERGY_VS][i])
+ end
+ existingcapenergy[i] = existing_cap_mwh(gen[i]) # multistage functionality doesn't exist yet for VRE-storage resources
+ end
+ end
+ dfCap = DataFrame(Resource = inputs["RESOURCE_NAMES"],
+ Zone = zone_id.(gen),
+ Retrofit_Id = retrofit_id.(gen),
+ StartCap = MultiStage == 1 ? value.(EP[:vEXISTINGCAP]) : existing_cap_mw.(gen),
+ RetCap = retcapdischarge[:],
+ RetroCap = retrocapdischarge[:], #### Need to change later
+ NewCap = capdischarge[:],
+ EndCap = value.(EP[:eTotalCap]),
+ CapacityConstraintDual = capacity_constraint_dual[:],
+ StartEnergyCap = existingcapenergy[:],
+ RetEnergyCap = retcapenergy[:],
+ NewEnergyCap = capenergy[:],
+ EndEnergyCap = existingcapenergy[:] - retcapenergy[:] + capenergy[:],
+ StartChargeCap = existingcapcharge[:],
+ RetChargeCap = retcapcharge[:],
+ NewChargeCap = capcharge[:],
+ EndChargeCap = existingcapcharge[:] - retcapcharge[:] + capcharge[:])
+ if setup["ParameterScale"] == 1
+ dfCap.StartCap = dfCap.StartCap * ModelScalingFactor
+ dfCap.RetCap = dfCap.RetCap * ModelScalingFactor
+ dfCap.RetroCap = dfCap.RetroCap * ModelScalingFactor
+ dfCap.NewCap = dfCap.NewCap * ModelScalingFactor
+ dfCap.EndCap = dfCap.EndCap * ModelScalingFactor
+ dfCap.CapacityConstraintDual = dfCap.CapacityConstraintDual * ModelScalingFactor
+ dfCap.StartEnergyCap = dfCap.StartEnergyCap * ModelScalingFactor
+ dfCap.RetEnergyCap = dfCap.RetEnergyCap * ModelScalingFactor
+ dfCap.NewEnergyCap = dfCap.NewEnergyCap * ModelScalingFactor
+ dfCap.EndEnergyCap = dfCap.EndEnergyCap * ModelScalingFactor
+ dfCap.StartChargeCap = dfCap.StartChargeCap * ModelScalingFactor
+ dfCap.RetChargeCap = dfCap.RetChargeCap * ModelScalingFactor
+ dfCap.NewChargeCap = dfCap.NewChargeCap * ModelScalingFactor
+ dfCap.EndChargeCap = dfCap.EndChargeCap * ModelScalingFactor
+ end
+ total = DataFrame(Resource = "Total", Zone = "n/a", Retrofit_Id = "n/a",
+ StartCap = sum(dfCap[!, :StartCap]), RetCap = sum(dfCap[!, :RetCap]),
+ NewCap = sum(dfCap[!, :NewCap]), EndCap = sum(dfCap[!, :EndCap]),
+ RetroCap = sum(dfCap[!, :RetroCap]),
+ CapacityConstraintDual = "n/a",
+ StartEnergyCap = sum(dfCap[!, :StartEnergyCap]),
+ RetEnergyCap = sum(dfCap[!, :RetEnergyCap]),
+ NewEnergyCap = sum(dfCap[!, :NewEnergyCap]),
+ EndEnergyCap = sum(dfCap[!, :EndEnergyCap]),
+ StartChargeCap = sum(dfCap[!, :StartChargeCap]),
+ RetChargeCap = sum(dfCap[!, :RetChargeCap]),
+ NewChargeCap = sum(dfCap[!, :NewChargeCap]),
+ EndChargeCap = sum(dfCap[!, :EndChargeCap]))
- capenergy = zeros(size(inputs["RESOURCE_NAMES"]))
- retcapenergy = zeros(size(inputs["RESOURCE_NAMES"]))
- existingcapenergy = zeros(size(inputs["RESOURCE_NAMES"]))
- for i in inputs["STOR_ALL"]
- if i in inputs["NEW_CAP_ENERGY"]
- capenergy[i] = value(EP[:vCAPENERGY][i])
- end
- if i in inputs["RET_CAP_ENERGY"]
- retcapenergy[i] = value(EP[:vRETCAPENERGY][i])
- end
- existingcapenergy[i] = MultiStage == 1 ? value(EP[:vEXISTINGCAPENERGY][i]) : existing_cap_mwh(gen[i])
- end
- if !isempty(inputs["VRE_STOR"])
- for i in inputs["VS_STOR"]
- if i in inputs["NEW_CAP_STOR"]
- capenergy[i] = value(EP[:vCAPENERGY_VS][i])
- end
- if i in inputs["RET_CAP_STOR"]
- retcapenergy[i] = value(EP[:vRETCAPENERGY_VS][i])
- end
- existingcapenergy[i] = existing_cap_mwh(gen[i]) # multistage functionality doesn't exist yet for VRE-storage resources
- end
- end
- dfCap = DataFrame(
- Resource = inputs["RESOURCE_NAMES"],
- Zone = zone_id.(gen),
- Retrofit_Id = retrofit_id.(gen),
- StartCap = MultiStage == 1 ? value.(EP[:vEXISTINGCAP]) : existing_cap_mw.(gen),
- RetCap = retcapdischarge[:],
- RetroCap = retrocapdischarge[:], #### Need to change later
- NewCap = capdischarge[:],
- EndCap = value.(EP[:eTotalCap]),
- CapacityConstraintDual = capacity_constraint_dual[:],
- StartEnergyCap = existingcapenergy[:],
- RetEnergyCap = retcapenergy[:],
- NewEnergyCap = capenergy[:],
- EndEnergyCap = existingcapenergy[:] - retcapenergy[:] + capenergy[:],
- StartChargeCap = existingcapcharge[:],
- RetChargeCap = retcapcharge[:],
- NewChargeCap = capcharge[:],
- EndChargeCap = existingcapcharge[:] - retcapcharge[:] + capcharge[:]
- )
- if setup["ParameterScale"] ==1
- dfCap.StartCap = dfCap.StartCap * ModelScalingFactor
- dfCap.RetCap = dfCap.RetCap * ModelScalingFactor
- dfCap.RetroCap = dfCap.RetroCap * ModelScalingFactor
- dfCap.NewCap = dfCap.NewCap * ModelScalingFactor
- dfCap.EndCap = dfCap.EndCap * ModelScalingFactor
- dfCap.CapacityConstraintDual = dfCap.CapacityConstraintDual * ModelScalingFactor
- dfCap.StartEnergyCap = dfCap.StartEnergyCap * ModelScalingFactor
- dfCap.RetEnergyCap = dfCap.RetEnergyCap * ModelScalingFactor
- dfCap.NewEnergyCap = dfCap.NewEnergyCap * ModelScalingFactor
- dfCap.EndEnergyCap = dfCap.EndEnergyCap * ModelScalingFactor
- dfCap.StartChargeCap = dfCap.StartChargeCap * ModelScalingFactor
- dfCap.RetChargeCap = dfCap.RetChargeCap * ModelScalingFactor
- dfCap.NewChargeCap = dfCap.NewChargeCap * ModelScalingFactor
- dfCap.EndChargeCap = dfCap.EndChargeCap * ModelScalingFactor
- end
- total = DataFrame(
- Resource = "Total", Zone = "n/a", Retrofit_Id = "n/a",
- StartCap = sum(dfCap[!,:StartCap]), RetCap = sum(dfCap[!,:RetCap]),
- NewCap = sum(dfCap[!,:NewCap]), EndCap = sum(dfCap[!,:EndCap]),
- RetroCap = sum(dfCap[!,:RetroCap]),
- CapacityConstraintDual = "n/a",
- StartEnergyCap = sum(dfCap[!,:StartEnergyCap]), RetEnergyCap = sum(dfCap[!,:RetEnergyCap]),
- NewEnergyCap = sum(dfCap[!,:NewEnergyCap]), EndEnergyCap = sum(dfCap[!,:EndEnergyCap]),
- StartChargeCap = sum(dfCap[!,:StartChargeCap]), RetChargeCap = sum(dfCap[!,:RetChargeCap]),
- NewChargeCap = sum(dfCap[!,:NewChargeCap]), EndChargeCap = sum(dfCap[!,:EndChargeCap])
- )
-
- dfCap = vcat(dfCap, total)
- CSV.write(joinpath(path, "capacity.csv"), dfCap)
- return dfCap
-end
\ No newline at end of file
+ dfCap = vcat(dfCap, total)
+ CSV.write(joinpath(path, "capacity.csv"), dfCap)
+ return dfCap
+end
diff --git a/src/write_outputs/write_capacityfactor.jl b/src/write_outputs/write_capacityfactor.jl
index 03c2a50e4b..17a019c2ff 100644
--- a/src/write_outputs/write_capacityfactor.jl
+++ b/src/write_outputs/write_capacityfactor.jl
@@ -15,40 +15,64 @@ function write_capacityfactor(path::AbstractString, inputs::Dict, setup::Dict, E
ELECTROLYZER = inputs["ELECTROLYZER"]
VRE_STOR = inputs["VRE_STOR"]
- dfCapacityfactor = DataFrame(Resource=inputs["RESOURCE_NAMES"], Zone=zone_id.(gen), AnnualSum=zeros(G), Capacity=zeros(G), CapacityFactor=zeros(G))
+ dfCapacityfactor = DataFrame(Resource = inputs["RESOURCE_NAMES"],
+ Zone = zone_id.(gen),
+ AnnualSum = zeros(G),
+ Capacity = zeros(G),
+ CapacityFactor = zeros(G))
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
dfCapacityfactor.AnnualSum .= value.(EP[:vP]) * inputs["omega"] * scale_factor
dfCapacityfactor.Capacity .= value.(EP[:eTotalCap]) * scale_factor
if !isempty(VRE_STOR)
- SOLAR = setdiff(inputs["VS_SOLAR"],inputs["VS_WIND"])
- WIND = setdiff(inputs["VS_WIND"],inputs["VS_SOLAR"])
- SOLAR_WIND = intersect(inputs["VS_SOLAR"],inputs["VS_WIND"])
+ SOLAR = setdiff(inputs["VS_SOLAR"], inputs["VS_WIND"])
+ WIND = setdiff(inputs["VS_WIND"], inputs["VS_SOLAR"])
+ SOLAR_WIND = intersect(inputs["VS_SOLAR"], inputs["VS_WIND"])
gen_VRE_STOR = gen.VreStorage
if !isempty(SOLAR)
- dfCapacityfactor.AnnualSum[SOLAR] .= value.(EP[:vP_SOLAR][SOLAR, :]).data * inputs["omega"] * scale_factor
- dfCapacityfactor.Capacity[SOLAR] .= value.(EP[:eTotalCap_SOLAR][SOLAR]).data * scale_factor
+ dfCapacityfactor.AnnualSum[SOLAR] .= value.(EP[:vP_SOLAR][SOLAR, :]).data *
+ inputs["omega"] * scale_factor
+ dfCapacityfactor.Capacity[SOLAR] .= value.(EP[:eTotalCap_SOLAR][SOLAR]).data *
+ scale_factor
end
if !isempty(WIND)
- dfCapacityfactor.AnnualSum[WIND] .= value.(EP[:vP_WIND][WIND, :]).data * inputs["omega"] * scale_factor
- dfCapacityfactor.Capacity[WIND] .= value.(EP[:eTotalCap_WIND][WIND]).data * scale_factor
+ dfCapacityfactor.AnnualSum[WIND] .= value.(EP[:vP_WIND][WIND, :]).data *
+ inputs["omega"] * scale_factor
+ dfCapacityfactor.Capacity[WIND] .= value.(EP[:eTotalCap_WIND][WIND]).data *
+ scale_factor
end
if !isempty(SOLAR_WIND)
- dfCapacityfactor.AnnualSum[SOLAR_WIND] .= (value.(EP[:vP_WIND][SOLAR_WIND, :]).data
- + value.(EP[:vP_SOLAR][SOLAR_WIND, :]).data .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.wind.!=0) .& (gen_VRE_STOR.solar.!=0)])) * inputs["omega"] * scale_factor
- dfCapacityfactor.Capacity[SOLAR_WIND] .= (value.(EP[:eTotalCap_WIND][SOLAR_WIND]).data + value.(EP[:eTotalCap_SOLAR][SOLAR_WIND]).data .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.wind.!=0) .& (gen_VRE_STOR.solar.!=0)])) * scale_factor
+ dfCapacityfactor.AnnualSum[SOLAR_WIND] .= (value.(EP[:vP_WIND][SOLAR_WIND,
+ :]).data
+ +
+ value.(EP[:vP_SOLAR][SOLAR_WIND,
+ :]).data .*
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.wind .!= 0) .& (gen_VRE_STOR.solar .!= 0)])) *
+ inputs["omega"] * scale_factor
+ dfCapacityfactor.Capacity[SOLAR_WIND] .= (value.(EP[:eTotalCap_WIND][SOLAR_WIND]).data +
+ value.(EP[:eTotalCap_SOLAR][SOLAR_WIND]).data .*
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.wind .!= 0) .& (gen_VRE_STOR.solar .!= 0)])) *
+ scale_factor
end
end
# We only calcualte the resulted capacity factor with total capacity > 1MW and total generation > 1MWh
- EXISTING = intersect(findall(x -> x >= 1, dfCapacityfactor.AnnualSum), findall(x -> x >= 1, dfCapacityfactor.Capacity))
+ EXISTING = intersect(findall(x -> x >= 1, dfCapacityfactor.AnnualSum),
+ findall(x -> x >= 1, dfCapacityfactor.Capacity))
# We calculate capacity factor for thermal, vre, hydro and must run. Not for storage and flexible demand
CF_GEN = intersect(union(THERM_ALL, VRE, HYDRO_RES, MUST_RUN, VRE_STOR), EXISTING)
- dfCapacityfactor.CapacityFactor[CF_GEN] .= (dfCapacityfactor.AnnualSum[CF_GEN] ./ dfCapacityfactor.Capacity[CF_GEN]) / sum(inputs["omega"][t] for t in 1:T)
+ dfCapacityfactor.CapacityFactor[CF_GEN] .= (dfCapacityfactor.AnnualSum[CF_GEN] ./
+ dfCapacityfactor.Capacity[CF_GEN]) /
+ sum(inputs["omega"][t] for t in 1:T)
# Capacity factor for electrolyzers is based on vUSE variable not vP
if (!isempty(ELECTROLYZER))
- dfCapacityfactor.AnnualSum[ELECTROLYZER] .= value.(EP[:vUSE][ELECTROLYZER, :]).data * inputs["omega"] * scale_factor
- dfCapacityfactor.CapacityFactor[ELECTROLYZER] .= (dfCapacityfactor.AnnualSum[ELECTROLYZER] ./ dfCapacityfactor.Capacity[ELECTROLYZER]) / sum(inputs["omega"][t] for t in 1:T)
+ dfCapacityfactor.AnnualSum[ELECTROLYZER] .= value.(EP[:vUSE][ELECTROLYZER,
+ :]).data * inputs["omega"] *
+ scale_factor
+ dfCapacityfactor.CapacityFactor[ELECTROLYZER] .= (dfCapacityfactor.AnnualSum[ELECTROLYZER] ./
+ dfCapacityfactor.Capacity[ELECTROLYZER]) /
+ sum(inputs["omega"][t]
+ for t in 1:T)
end
CSV.write(joinpath(path, "capacityfactor.csv"), dfCapacityfactor)
diff --git a/src/write_outputs/write_charge.jl b/src/write_outputs/write_charge.jl
index 74d00ad65a..1e0e835633 100644
--- a/src/write_outputs/write_charge.jl
+++ b/src/write_outputs/write_charge.jl
@@ -4,42 +4,44 @@
Function for writing the charging energy values of the different storage technologies.
"""
function write_charge(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- zones = zone_id.(gen)
+ gen = inputs["RESOURCES"]
+ zones = zone_id.(gen)
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
- STOR_ALL = inputs["STOR_ALL"]
- FLEX = inputs["FLEX"]
- ELECTROLYZER = inputs["ELECTROLYZER"]
- VRE_STOR = inputs["VRE_STOR"]
- VS_STOR = !isempty(VRE_STOR) ? inputs["VS_STOR"] : []
-
- # Power withdrawn to charge each resource in each time step
- dfCharge = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones, AnnualSum = Array{Union{Missing,Float64}}(undef, G))
- charge = zeros(G,T)
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ STOR_ALL = inputs["STOR_ALL"]
+ FLEX = inputs["FLEX"]
+ ELECTROLYZER = inputs["ELECTROLYZER"]
+ VRE_STOR = inputs["VRE_STOR"]
+ VS_STOR = !isempty(VRE_STOR) ? inputs["VS_STOR"] : []
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- if !isempty(STOR_ALL)
- charge[STOR_ALL, :] = value.(EP[:vCHARGE][STOR_ALL, :]) * scale_factor
- end
- if !isempty(FLEX)
- charge[FLEX, :] = value.(EP[:vCHARGE_FLEX][FLEX, :]) * scale_factor
- end
- if !isempty(ELECTROLYZER)
- charge[ELECTROLYZER, :] = value.(EP[:vUSE][ELECTROLYZER, :]) * scale_factor
- end
- if !isempty(VS_STOR)
- charge[VS_STOR, :] = value.(EP[:vCHARGE_VRE_STOR][VS_STOR, :]) * scale_factor
- end
+ # Power withdrawn to charge each resource in each time step
+ dfCharge = DataFrame(Resource = inputs["RESOURCE_NAMES"],
+ Zone = zones,
+ AnnualSum = Array{Union{Missing, Float64}}(undef, G))
+ charge = zeros(G, T)
- dfCharge.AnnualSum .= charge * inputs["omega"]
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ if !isempty(STOR_ALL)
+ charge[STOR_ALL, :] = value.(EP[:vCHARGE][STOR_ALL, :]) * scale_factor
+ end
+ if !isempty(FLEX)
+ charge[FLEX, :] = value.(EP[:vCHARGE_FLEX][FLEX, :]) * scale_factor
+ end
+ if !isempty(ELECTROLYZER)
+ charge[ELECTROLYZER, :] = value.(EP[:vUSE][ELECTROLYZER, :]) * scale_factor
+ end
+ if !isempty(VS_STOR)
+ charge[VS_STOR, :] = value.(EP[:vCHARGE_VRE_STOR][VS_STOR, :]) * scale_factor
+ end
- filepath = joinpath(path, "charge.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfCharge)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, charge, dfCharge)
- end
- return nothing
+ dfCharge.AnnualSum .= charge * inputs["omega"]
+
+ filepath = joinpath(path, "charge.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfCharge)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, charge, dfCharge)
+ end
+ return nothing
end
diff --git a/src/write_outputs/write_charging_cost.jl b/src/write_outputs/write_charging_cost.jl
index 7c2c84a812..00410b6b59 100644
--- a/src/write_outputs/write_charging_cost.jl
+++ b/src/write_outputs/write_charging_cost.jl
@@ -1,38 +1,46 @@
function write_charging_cost(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
- regions = region.(gen)
- clusters = cluster.(gen)
- zones = zone_id.(gen)
+ regions = region.(gen)
+ clusters = cluster.(gen)
+ zones = zone_id.(gen)
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
- STOR_ALL = inputs["STOR_ALL"]
- FLEX = inputs["FLEX"]
- ELECTROLYZER = inputs["ELECTROLYZER"]
- VRE_STOR = inputs["VRE_STOR"]
- VS_STOR = !isempty(VRE_STOR) ? inputs["VS_STOR"] : []
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ STOR_ALL = inputs["STOR_ALL"]
+ FLEX = inputs["FLEX"]
+ ELECTROLYZER = inputs["ELECTROLYZER"]
+ VRE_STOR = inputs["VRE_STOR"]
+ VS_STOR = !isempty(VRE_STOR) ? inputs["VS_STOR"] : []
price = locational_marginal_price(EP, inputs, setup)
- dfChargingcost = DataFrame(Region = regions, Resource = inputs["RESOURCE_NAMES"], Zone = zones, Cluster = clusters, AnnualSum = Array{Float64}(undef, G),)
- chargecost = zeros(G, T)
- if !isempty(STOR_ALL)
- chargecost[STOR_ALL, :] .= (value.(EP[:vCHARGE][STOR_ALL, :]).data) .* transpose(price)[zone_id.(gen.Storage), :]
- end
- if !isempty(FLEX)
- chargecost[FLEX, :] .= value.(EP[:vP][FLEX, :]) .* transpose(price)[zone_id.(gen.FlexDemand), :]
- end
- if !isempty(ELECTROLYZER)
- chargecost[ELECTROLYZER, :] .= (value.(EP[:vUSE][ELECTROLYZER, :]).data) .* transpose(price)[zone_id.(gen.Electrolyzer), :]
- end
- if !isempty(VS_STOR)
- chargecost[VS_STOR, :] .= value.(EP[:vCHARGE_VRE_STOR][VS_STOR, :].data) .* transpose(price)[zone_id.(gen[VS_STOR]), :]
- end
- if setup["ParameterScale"] == 1
- chargecost *= ModelScalingFactor
- end
- dfChargingcost.AnnualSum .= chargecost * inputs["omega"]
- write_simple_csv(joinpath(path, "ChargingCost.csv"), dfChargingcost)
- return dfChargingcost
+ dfChargingcost = DataFrame(Region = regions,
+ Resource = inputs["RESOURCE_NAMES"],
+ Zone = zones,
+ Cluster = clusters,
+ AnnualSum = Array{Float64}(undef, G))
+ chargecost = zeros(G, T)
+ if !isempty(STOR_ALL)
+ chargecost[STOR_ALL, :] .= (value.(EP[:vCHARGE][STOR_ALL, :]).data) .*
+ transpose(price)[zone_id.(gen.Storage), :]
+ end
+ if !isempty(FLEX)
+ chargecost[FLEX, :] .= value.(EP[:vP][FLEX, :]) .*
+ transpose(price)[zone_id.(gen.FlexDemand), :]
+ end
+ if !isempty(ELECTROLYZER)
+ chargecost[ELECTROLYZER, :] .= (value.(EP[:vUSE][ELECTROLYZER, :]).data) .*
+ transpose(price)[zone_id.(gen.Electrolyzer), :]
+ end
+ if !isempty(VS_STOR)
+ chargecost[VS_STOR, :] .= value.(EP[:vCHARGE_VRE_STOR][VS_STOR, :].data) .*
+ transpose(price)[zone_id.(gen[VS_STOR]), :]
+ end
+ if setup["ParameterScale"] == 1
+ chargecost *= ModelScalingFactor
+ end
+ dfChargingcost.AnnualSum .= chargecost * inputs["omega"]
+ write_simple_csv(joinpath(path, "ChargingCost.csv"), dfChargingcost)
+ return dfChargingcost
end
diff --git a/src/write_outputs/write_co2.jl b/src/write_outputs/write_co2.jl
index c737652323..a096dc55ab 100644
--- a/src/write_outputs/write_co2.jl
+++ b/src/write_outputs/write_co2.jl
@@ -9,13 +9,17 @@ function write_co2(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
write_co2_capture_plant(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
end
-
-function write_co2_emissions_plant(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+function write_co2_emissions_plant(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
gen = inputs["RESOURCES"]
G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
# CO2 emissions by plant
- dfEmissions_plant = DataFrame(Resource=inputs["RESOURCE_NAMES"], Zone=zone_id.(gen), AnnualSum=zeros(G))
+ dfEmissions_plant = DataFrame(Resource = inputs["RESOURCE_NAMES"],
+ Zone = zone_id.(gen),
+ AnnualSum = zeros(G))
emissions_plant = value.(EP[:eEmissionsByPlant])
if setup["ParameterScale"] == 1
@@ -26,7 +30,7 @@ function write_co2_emissions_plant(path::AbstractString, inputs::Dict, setup::Di
filepath = joinpath(path, "emissions_plant.csv")
if setup["WriteOutputs"] == "annual"
write_annual(filepath, dfEmissions_plant)
- else # setup["WriteOutputs"] == "full"
+ else # setup["WriteOutputs"] == "full"
write_fulltimeseries(filepath, emissions_plant, dfEmissions_plant)
end
return nothing
@@ -39,7 +43,9 @@ function write_co2_capture_plant(path::AbstractString, inputs::Dict, setup::Dict
T = inputs["T"] # Number of time steps (hours)
Z = inputs["Z"] # Number of zones
- dfCapturedEmissions_plant = DataFrame(Resource=inputs["RESOURCE_NAMES"][CCS], Zone=zone_id.(gen[CCS]), AnnualSum=zeros(length(CCS)))
+ dfCapturedEmissions_plant = DataFrame(Resource = inputs["RESOURCE_NAMES"][CCS],
+ Zone = zone_id.(gen[CCS]),
+ AnnualSum = zeros(length(CCS)))
if !isempty(CCS)
# Captured CO2 emissions by plant
emissions_captured_plant = (value.(EP[:eEmissionsCaptureByPlant]).data)
@@ -53,8 +59,10 @@ function write_co2_capture_plant(path::AbstractString, inputs::Dict, setup::Dict
if setup["WriteOutputs"] == "annual"
write_annual(filepath, dfCapturedEmissions_plant)
else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, emissions_captured_plant, dfCapturedEmissions_plant)
+ write_fulltimeseries(filepath,
+ emissions_captured_plant,
+ dfCapturedEmissions_plant)
end
return nothing
end
-end
\ No newline at end of file
+end
diff --git a/src/write_outputs/write_costs.jl b/src/write_outputs/write_costs.jl
index 8cbe60e5c9..0124c67828 100644
--- a/src/write_outputs/write_costs.jl
+++ b/src/write_outputs/write_costs.jl
@@ -4,246 +4,310 @@
Function for writing the costs pertaining to the objective function (fixed, variable O&M etc.).
"""
function write_costs(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- ## Cost results
- gen = inputs["RESOURCES"]
- SEG = inputs["SEG"] # Number of lines
- Z = inputs["Z"] # Number of zones
- T = inputs["T"] # Number of time steps (hours)
- VRE_STOR = inputs["VRE_STOR"]
- ELECTROLYZER = inputs["ELECTROLYZER"]
-
- cost_list = ["cTotal", "cFix", "cVar", "cFuel" ,"cNSE", "cStart", "cUnmetRsv", "cNetworkExp", "cUnmetPolicyPenalty", "cCO2"]
- if !isempty(VRE_STOR)
- push!(cost_list, "cGridConnection")
- end
- if !isempty(ELECTROLYZER)
- push!(cost_list, "cHydrogenRevenue")
- end
- dfCost = DataFrame(Costs = cost_list)
-
- cVar = value(EP[:eTotalCVarOut])+ (!isempty(inputs["STOR_ALL"]) ? value(EP[:eTotalCVarIn]) : 0.0) + (!isempty(inputs["FLEX"]) ? value(EP[:eTotalCVarFlexIn]) : 0.0)
- cFix = value(EP[:eTotalCFix]) + (!isempty(inputs["STOR_ALL"]) ? value(EP[:eTotalCFixEnergy]) : 0.0) + (!isempty(inputs["STOR_ASYMMETRIC"]) ? value(EP[:eTotalCFixCharge]) : 0.0)
-
- cFuel = value.(EP[:eTotalCFuelOut])
-
- if !isempty(VRE_STOR)
- cFix += ((!isempty(inputs["VS_DC"]) ? value(EP[:eTotalCFixDC]) : 0.0) + (!isempty(inputs["VS_SOLAR"]) ? value(EP[:eTotalCFixSolar]) : 0.0) + (!isempty(inputs["VS_WIND"]) ? value(EP[:eTotalCFixWind]) : 0.0))
- cVar += ((!isempty(inputs["VS_SOLAR"]) ? value(EP[:eTotalCVarOutSolar]) : 0.0) + (!isempty(inputs["VS_WIND"]) ? value(EP[:eTotalCVarOutWind]) : 0.0))
- if !isempty(inputs["VS_STOR"])
- cFix += ((!isempty(inputs["VS_STOR"]) ? value(EP[:eTotalCFixStor]) : 0.0) + (!isempty(inputs["VS_ASYM_DC_CHARGE"]) ? value(EP[:eTotalCFixCharge_DC]) : 0.0) + (!isempty(inputs["VS_ASYM_DC_DISCHARGE"]) ? value(EP[:eTotalCFixDischarge_DC]) : 0.0) + (!isempty(inputs["VS_ASYM_AC_CHARGE"]) ? value(EP[:eTotalCFixCharge_AC]) : 0.0) + (!isempty(inputs["VS_ASYM_AC_DISCHARGE"]) ? value(EP[:eTotalCFixDischarge_AC]) : 0.0))
- cVar += (!isempty(inputs["VS_STOR"]) ? value(EP[:eTotalCVarStor]) : 0.0)
- end
- total_cost =[value(EP[:eObj]), cFix, cVar, cFuel, value(EP[:eTotalCNSE]), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
- else
- total_cost = [value(EP[:eObj]), cFix, cVar, cFuel, value(EP[:eTotalCNSE]), 0.0, 0.0, 0.0, 0.0, 0.0]
- end
-
- if !isempty(ELECTROLYZER)
- push!(total_cost,(!isempty(inputs["ELECTROLYZER"]) ? -1*value(EP[:eTotalHydrogenValue]) : 0.0))
- end
-
- dfCost[!,Symbol("Total")] = total_cost
-
- if setup["ParameterScale"] == 1
- dfCost.Total *= ModelScalingFactor^2
- end
-
- if setup["UCommit"]>=1
- dfCost[6,2] = value(EP[:eTotalCStart]) + value(EP[:eTotalCFuelStart])
- end
-
- if setup["OperationalReserves"]==1
- dfCost[7,2] = value(EP[:eTotalCRsvPen])
- end
-
- if setup["NetworkExpansion"] == 1 && Z > 1
- dfCost[8,2] = value(EP[:eTotalCNetworkExp])
- end
-
- if haskey(inputs, "dfCapRes_slack")
- dfCost[9,2] += value(EP[:eCTotalCapResSlack])
- end
-
- if haskey(inputs, "dfESR_slack")
- dfCost[9,2] += value(EP[:eCTotalESRSlack])
- end
-
- if haskey(inputs, "dfCO2Cap_slack")
- dfCost[9,2] += value(EP[:eCTotalCO2CapSlack])
- end
-
- if haskey(inputs, "MinCapPriceCap")
- dfCost[9,2] += value(EP[:eTotalCMinCapSlack])
- end
-
- if !isempty(VRE_STOR)
- dfCost[!,2][11] = value(EP[:eTotalCGrid]) * (setup["ParameterScale"] == 1 ? ModelScalingFactor^2 : 1)
- end
-
- if any(co2_capture_fraction.(gen) .!= 0)
- dfCost[10,2] += value(EP[:eTotaleCCO2Sequestration])
- end
-
- if setup["ParameterScale"] == 1
- dfCost[6,2] *= ModelScalingFactor^2
- dfCost[7,2] *= ModelScalingFactor^2
- dfCost[8,2] *= ModelScalingFactor^2
- dfCost[9,2] *= ModelScalingFactor^2
- dfCost[10,2] *= ModelScalingFactor^2
- end
-
- for z in 1:Z
- tempCTotal = 0.0
- tempCFix = 0.0
- tempCVar = 0.0
- tempCFuel = 0.0
- tempCStart = 0.0
- tempCNSE = 0.0
- tempHydrogenValue = 0.0
- tempCCO2 = 0.0
-
- Y_ZONE = resources_in_zone_by_rid(gen,z)
- STOR_ALL_ZONE = intersect(inputs["STOR_ALL"], Y_ZONE)
- STOR_ASYMMETRIC_ZONE = intersect(inputs["STOR_ASYMMETRIC"], Y_ZONE)
- FLEX_ZONE = intersect(inputs["FLEX"], Y_ZONE)
- COMMIT_ZONE = intersect(inputs["COMMIT"], Y_ZONE)
- ELECTROLYZERS_ZONE = intersect(inputs["ELECTROLYZER"], Y_ZONE)
- CCS_ZONE = intersect(inputs["CCS"], Y_ZONE)
-
- eCFix = sum(value.(EP[:eCFix][Y_ZONE]))
- tempCFix += eCFix
- tempCTotal += eCFix
-
- tempCVar = sum(value.(EP[:eCVar_out][Y_ZONE,:]))
- tempCTotal += tempCVar
-
- tempCFuel = sum(value.(EP[:ePlantCFuelOut][Y_ZONE,:]))
- tempCTotal += tempCFuel
-
- if !isempty(STOR_ALL_ZONE)
- eCVar_in = sum(value.(EP[:eCVar_in][STOR_ALL_ZONE,:]))
- tempCVar += eCVar_in
- eCFixEnergy = sum(value.(EP[:eCFixEnergy][STOR_ALL_ZONE]))
- tempCFix += eCFixEnergy
- tempCTotal += eCVar_in + eCFixEnergy
- end
- if !isempty(STOR_ASYMMETRIC_ZONE)
- eCFixCharge = sum(value.(EP[:eCFixCharge][STOR_ASYMMETRIC_ZONE]))
- tempCFix += eCFixCharge
- tempCTotal += eCFixCharge
- end
- if !isempty(FLEX_ZONE)
- eCVarFlex_in = sum(value.(EP[:eCVarFlex_in][FLEX_ZONE,:]))
- tempCVar += eCVarFlex_in
- tempCTotal += eCVarFlex_in
- end
- if !isempty(VRE_STOR)
- gen_VRE_STOR = gen.VreStorage
- Y_ZONE_VRE_STOR = resources_in_zone_by_rid(gen_VRE_STOR, z)
-
- # Fixed Costs
- eCFix_VRE_STOR = 0.0
- SOLAR_ZONE_VRE_STOR = intersect(Y_ZONE_VRE_STOR, inputs["VS_SOLAR"])
- if !isempty(SOLAR_ZONE_VRE_STOR)
- eCFix_VRE_STOR += sum(value.(EP[:eCFixSolar][SOLAR_ZONE_VRE_STOR]))
- end
- WIND_ZONE_VRE_STOR = intersect(Y_ZONE_VRE_STOR, inputs["VS_WIND"])
- if !isempty(WIND_ZONE_VRE_STOR)
- eCFix_VRE_STOR += sum(value.(EP[:eCFixWind][WIND_ZONE_VRE_STOR]))
- end
- DC_ZONE_VRE_STOR = intersect(Y_ZONE_VRE_STOR, inputs["VS_DC"])
- if !isempty(DC_ZONE_VRE_STOR)
- eCFix_VRE_STOR += sum(value.(EP[:eCFixDC][DC_ZONE_VRE_STOR]))
- end
- STOR_ALL_ZONE_VRE_STOR = intersect(inputs["VS_STOR"], Y_ZONE_VRE_STOR)
- if !isempty(STOR_ALL_ZONE_VRE_STOR)
- eCFix_VRE_STOR += sum(value.(EP[:eCFixEnergy_VS][STOR_ALL_ZONE_VRE_STOR]))
- DC_CHARGE_ALL_ZONE_VRE_STOR = intersect(inputs["VS_ASYM_DC_CHARGE"], Y_ZONE_VRE_STOR)
- if !isempty(DC_CHARGE_ALL_ZONE_VRE_STOR)
- eCFix_VRE_STOR += sum(value.(EP[:eCFixCharge_DC][DC_CHARGE_ALL_ZONE_VRE_STOR]))
- end
- DC_DISCHARGE_ALL_ZONE_VRE_STOR = intersect(inputs["VS_ASYM_DC_DISCHARGE"], Y_ZONE_VRE_STOR)
- if !isempty(DC_DISCHARGE_ALL_ZONE_VRE_STOR)
- eCFix_VRE_STOR += sum(value.(EP[:eCFixDischarge_DC][DC_DISCHARGE_ALL_ZONE_VRE_STOR]))
- end
- AC_DISCHARGE_ALL_ZONE_VRE_STOR = intersect(inputs["VS_ASYM_AC_DISCHARGE"], Y_ZONE_VRE_STOR)
- if !isempty(AC_DISCHARGE_ALL_ZONE_VRE_STOR)
- eCFix_VRE_STOR += sum(value.(EP[:eCFixDischarge_AC][AC_DISCHARGE_ALL_ZONE_VRE_STOR]))
- end
- AC_CHARGE_ALL_ZONE_VRE_STOR = intersect(inputs["VS_ASYM_AC_CHARGE"], Y_ZONE_VRE_STOR)
- if !isempty(AC_CHARGE_ALL_ZONE_VRE_STOR)
- eCFix_VRE_STOR += sum(value.(EP[:eCFixCharge_AC][AC_CHARGE_ALL_ZONE_VRE_STOR]))
- end
- end
- tempCFix += eCFix_VRE_STOR
-
- # Variable Costs
- eCVar_VRE_STOR = 0.0
- if !isempty(SOLAR_ZONE_VRE_STOR)
- eCVar_VRE_STOR += sum(value.(EP[:eCVarOutSolar][SOLAR_ZONE_VRE_STOR,:]))
- end
- if !isempty(WIND_ZONE_VRE_STOR)
- eCVar_VRE_STOR += sum(value.(EP[:eCVarOutWind][WIND_ZONE_VRE_STOR, :]))
- end
- if !isempty(STOR_ALL_ZONE_VRE_STOR)
- vom_map = Dict(
- DC_CHARGE_ALL_ZONE_VRE_STOR => :eCVar_Charge_DC,
- DC_DISCHARGE_ALL_ZONE_VRE_STOR => :eCVar_Discharge_DC,
- AC_DISCHARGE_ALL_ZONE_VRE_STOR => :eCVar_Discharge_AC,
- AC_CHARGE_ALL_ZONE_VRE_STOR => :eCVar_Charge_AC
- )
- for (set, symbol) in vom_map
- if !isempty(set)
- eCVar_VRE_STOR += sum(value.(EP[symbol][set, :]))
- end
- end
- end
- tempCVar += eCVar_VRE_STOR
-
- # Total Added Costs
- tempCTotal += (eCFix_VRE_STOR + eCVar_VRE_STOR)
- end
-
- if setup["UCommit"] >= 1 && !isempty(COMMIT_ZONE)
- eCStart = sum(value.(EP[:eCStart][COMMIT_ZONE,:])) + sum(value.(EP[:ePlantCFuelStart][COMMIT_ZONE,:]))
- tempCStart += eCStart
- tempCTotal += eCStart
- end
-
- if !isempty(ELECTROLYZERS_ZONE)
- tempHydrogenValue = -1*sum(value.(EP[:eHydrogenValue][ELECTROLYZERS_ZONE,:]))
- tempCTotal += tempHydrogenValue
- end
-
-
- tempCNSE = sum(value.(EP[:eCNSE][:,:,z]))
- tempCTotal += tempCNSE
-
- # if any(dfGen.CO2_Capture_Fraction .!=0)
- if !isempty(CCS_ZONE)
- tempCCO2 = sum(value.(EP[:ePlantCCO2Sequestration][CCS_ZONE]))
- tempCTotal += tempCCO2
- end
-
- if setup["ParameterScale"] == 1
- tempCTotal *= ModelScalingFactor^2
- tempCFix *= ModelScalingFactor^2
- tempCVar *= ModelScalingFactor^2
- tempCFuel *= ModelScalingFactor^2
- tempCNSE *= ModelScalingFactor^2
- tempCStart *= ModelScalingFactor^2
- tempHydrogenValue *= ModelScalingFactor^2
- tempCCO2 *= ModelScalingFactor^2
- end
- temp_cost_list = [tempCTotal, tempCFix, tempCVar, tempCFuel,tempCNSE, tempCStart, "-", "-", "-", tempCCO2]
- if !isempty(VRE_STOR)
- push!(temp_cost_list, "-")
- end
- if !isempty(ELECTROLYZERS_ZONE)
- push!(temp_cost_list,tempHydrogenValue)
- end
-
- dfCost[!,Symbol("Zone$z")] = temp_cost_list
- end
- CSV.write(joinpath(path, "costs.csv"), dfCost)
+ ## Cost results
+ gen = inputs["RESOURCES"]
+ SEG = inputs["SEG"] # Number of lines
+ Z = inputs["Z"] # Number of zones
+ T = inputs["T"] # Number of time steps (hours)
+ VRE_STOR = inputs["VRE_STOR"]
+ ELECTROLYZER = inputs["ELECTROLYZER"]
+
+ cost_list = [
+ "cTotal",
+ "cFix",
+ "cVar",
+ "cFuel",
+ "cNSE",
+ "cStart",
+ "cUnmetRsv",
+ "cNetworkExp",
+ "cUnmetPolicyPenalty",
+ "cCO2"
+ ]
+ if !isempty(VRE_STOR)
+ push!(cost_list, "cGridConnection")
+ end
+ if !isempty(ELECTROLYZER)
+ push!(cost_list, "cHydrogenRevenue")
+ end
+ dfCost = DataFrame(Costs = cost_list)
+
+ cVar = value(EP[:eTotalCVarOut]) +
+ (!isempty(inputs["STOR_ALL"]) ? value(EP[:eTotalCVarIn]) : 0.0) +
+ (!isempty(inputs["FLEX"]) ? value(EP[:eTotalCVarFlexIn]) : 0.0)
+ cFix = value(EP[:eTotalCFix]) +
+ (!isempty(inputs["STOR_ALL"]) ? value(EP[:eTotalCFixEnergy]) : 0.0) +
+ (!isempty(inputs["STOR_ASYMMETRIC"]) ? value(EP[:eTotalCFixCharge]) : 0.0)
+
+ cFuel = value.(EP[:eTotalCFuelOut])
+
+ if !isempty(VRE_STOR)
+ cFix += ((!isempty(inputs["VS_DC"]) ? value(EP[:eTotalCFixDC]) : 0.0) +
+ (!isempty(inputs["VS_SOLAR"]) ? value(EP[:eTotalCFixSolar]) : 0.0) +
+ (!isempty(inputs["VS_WIND"]) ? value(EP[:eTotalCFixWind]) : 0.0))
+ cVar += ((!isempty(inputs["VS_SOLAR"]) ? value(EP[:eTotalCVarOutSolar]) : 0.0) +
+ (!isempty(inputs["VS_WIND"]) ? value(EP[:eTotalCVarOutWind]) : 0.0))
+ if !isempty(inputs["VS_STOR"])
+ cFix += ((!isempty(inputs["VS_STOR"]) ? value(EP[:eTotalCFixStor]) : 0.0) +
+ (!isempty(inputs["VS_ASYM_DC_CHARGE"]) ?
+ value(EP[:eTotalCFixCharge_DC]) : 0.0) +
+ (!isempty(inputs["VS_ASYM_DC_DISCHARGE"]) ?
+ value(EP[:eTotalCFixDischarge_DC]) : 0.0) +
+ (!isempty(inputs["VS_ASYM_AC_CHARGE"]) ?
+ value(EP[:eTotalCFixCharge_AC]) : 0.0) +
+ (!isempty(inputs["VS_ASYM_AC_DISCHARGE"]) ?
+ value(EP[:eTotalCFixDischarge_AC]) : 0.0))
+ cVar += (!isempty(inputs["VS_STOR"]) ? value(EP[:eTotalCVarStor]) : 0.0)
+ end
+ total_cost = [
+ value(EP[:eObj]),
+ cFix,
+ cVar,
+ cFuel,
+ value(EP[:eTotalCNSE]),
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0
+ ]
+ else
+ total_cost = [
+ value(EP[:eObj]),
+ cFix,
+ cVar,
+ cFuel,
+ value(EP[:eTotalCNSE]),
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0
+ ]
+ end
+
+ if !isempty(ELECTROLYZER)
+ push!(total_cost,
+ (!isempty(inputs["ELECTROLYZER"]) ? -1 * value(EP[:eTotalHydrogenValue]) : 0.0))
+ end
+
+ dfCost[!, Symbol("Total")] = total_cost
+
+ if setup["ParameterScale"] == 1
+ dfCost.Total *= ModelScalingFactor^2
+ end
+
+ if setup["UCommit"] >= 1
+ dfCost[6, 2] = value(EP[:eTotalCStart]) + value(EP[:eTotalCFuelStart])
+ end
+
+ if setup["OperationalReserves"] == 1
+ dfCost[7, 2] = value(EP[:eTotalCRsvPen])
+ end
+
+ if setup["NetworkExpansion"] == 1 && Z > 1
+ dfCost[8, 2] = value(EP[:eTotalCNetworkExp])
+ end
+
+ if haskey(inputs, "dfCapRes_slack")
+ dfCost[9, 2] += value(EP[:eCTotalCapResSlack])
+ end
+
+ if haskey(inputs, "dfESR_slack")
+ dfCost[9, 2] += value(EP[:eCTotalESRSlack])
+ end
+
+ if haskey(inputs, "dfCO2Cap_slack")
+ dfCost[9, 2] += value(EP[:eCTotalCO2CapSlack])
+ end
+
+ if haskey(inputs, "MinCapPriceCap")
+ dfCost[9, 2] += value(EP[:eTotalCMinCapSlack])
+ end
+
+ if !isempty(VRE_STOR)
+ dfCost[!, 2][11] = value(EP[:eTotalCGrid]) *
+ (setup["ParameterScale"] == 1 ? ModelScalingFactor^2 : 1)
+ end
+
+ if any(co2_capture_fraction.(gen) .!= 0)
+ dfCost[10, 2] += value(EP[:eTotaleCCO2Sequestration])
+ end
+
+ if setup["ParameterScale"] == 1
+ dfCost[6, 2] *= ModelScalingFactor^2
+ dfCost[7, 2] *= ModelScalingFactor^2
+ dfCost[8, 2] *= ModelScalingFactor^2
+ dfCost[9, 2] *= ModelScalingFactor^2
+ dfCost[10, 2] *= ModelScalingFactor^2
+ end
+
+ for z in 1:Z
+ tempCTotal = 0.0
+ tempCFix = 0.0
+ tempCVar = 0.0
+ tempCFuel = 0.0
+ tempCStart = 0.0
+ tempCNSE = 0.0
+ tempHydrogenValue = 0.0
+ tempCCO2 = 0.0
+
+ Y_ZONE = resources_in_zone_by_rid(gen, z)
+ STOR_ALL_ZONE = intersect(inputs["STOR_ALL"], Y_ZONE)
+ STOR_ASYMMETRIC_ZONE = intersect(inputs["STOR_ASYMMETRIC"], Y_ZONE)
+ FLEX_ZONE = intersect(inputs["FLEX"], Y_ZONE)
+ COMMIT_ZONE = intersect(inputs["COMMIT"], Y_ZONE)
+ ELECTROLYZERS_ZONE = intersect(inputs["ELECTROLYZER"], Y_ZONE)
+ CCS_ZONE = intersect(inputs["CCS"], Y_ZONE)
+
+ eCFix = sum(value.(EP[:eCFix][Y_ZONE]))
+ tempCFix += eCFix
+ tempCTotal += eCFix
+
+ tempCVar = sum(value.(EP[:eCVar_out][Y_ZONE, :]))
+ tempCTotal += tempCVar
+
+ tempCFuel = sum(value.(EP[:ePlantCFuelOut][Y_ZONE, :]))
+ tempCTotal += tempCFuel
+
+ if !isempty(STOR_ALL_ZONE)
+ eCVar_in = sum(value.(EP[:eCVar_in][STOR_ALL_ZONE, :]))
+ tempCVar += eCVar_in
+ eCFixEnergy = sum(value.(EP[:eCFixEnergy][STOR_ALL_ZONE]))
+ tempCFix += eCFixEnergy
+ tempCTotal += eCVar_in + eCFixEnergy
+ end
+ if !isempty(STOR_ASYMMETRIC_ZONE)
+ eCFixCharge = sum(value.(EP[:eCFixCharge][STOR_ASYMMETRIC_ZONE]))
+ tempCFix += eCFixCharge
+ tempCTotal += eCFixCharge
+ end
+ if !isempty(FLEX_ZONE)
+ eCVarFlex_in = sum(value.(EP[:eCVarFlex_in][FLEX_ZONE, :]))
+ tempCVar += eCVarFlex_in
+ tempCTotal += eCVarFlex_in
+ end
+ if !isempty(VRE_STOR)
+ gen_VRE_STOR = gen.VreStorage
+ Y_ZONE_VRE_STOR = resources_in_zone_by_rid(gen_VRE_STOR, z)
+
+ # Fixed Costs
+ eCFix_VRE_STOR = 0.0
+ SOLAR_ZONE_VRE_STOR = intersect(Y_ZONE_VRE_STOR, inputs["VS_SOLAR"])
+ if !isempty(SOLAR_ZONE_VRE_STOR)
+ eCFix_VRE_STOR += sum(value.(EP[:eCFixSolar][SOLAR_ZONE_VRE_STOR]))
+ end
+ WIND_ZONE_VRE_STOR = intersect(Y_ZONE_VRE_STOR, inputs["VS_WIND"])
+ if !isempty(WIND_ZONE_VRE_STOR)
+ eCFix_VRE_STOR += sum(value.(EP[:eCFixWind][WIND_ZONE_VRE_STOR]))
+ end
+ DC_ZONE_VRE_STOR = intersect(Y_ZONE_VRE_STOR, inputs["VS_DC"])
+ if !isempty(DC_ZONE_VRE_STOR)
+ eCFix_VRE_STOR += sum(value.(EP[:eCFixDC][DC_ZONE_VRE_STOR]))
+ end
+ STOR_ALL_ZONE_VRE_STOR = intersect(inputs["VS_STOR"], Y_ZONE_VRE_STOR)
+ if !isempty(STOR_ALL_ZONE_VRE_STOR)
+ eCFix_VRE_STOR += sum(value.(EP[:eCFixEnergy_VS][STOR_ALL_ZONE_VRE_STOR]))
+ DC_CHARGE_ALL_ZONE_VRE_STOR = intersect(inputs["VS_ASYM_DC_CHARGE"],
+ Y_ZONE_VRE_STOR)
+ if !isempty(DC_CHARGE_ALL_ZONE_VRE_STOR)
+ eCFix_VRE_STOR += sum(value.(EP[:eCFixCharge_DC][DC_CHARGE_ALL_ZONE_VRE_STOR]))
+ end
+ DC_DISCHARGE_ALL_ZONE_VRE_STOR = intersect(inputs["VS_ASYM_DC_DISCHARGE"],
+ Y_ZONE_VRE_STOR)
+ if !isempty(DC_DISCHARGE_ALL_ZONE_VRE_STOR)
+ eCFix_VRE_STOR += sum(value.(EP[:eCFixDischarge_DC][DC_DISCHARGE_ALL_ZONE_VRE_STOR]))
+ end
+ AC_DISCHARGE_ALL_ZONE_VRE_STOR = intersect(inputs["VS_ASYM_AC_DISCHARGE"],
+ Y_ZONE_VRE_STOR)
+ if !isempty(AC_DISCHARGE_ALL_ZONE_VRE_STOR)
+ eCFix_VRE_STOR += sum(value.(EP[:eCFixDischarge_AC][AC_DISCHARGE_ALL_ZONE_VRE_STOR]))
+ end
+ AC_CHARGE_ALL_ZONE_VRE_STOR = intersect(inputs["VS_ASYM_AC_CHARGE"],
+ Y_ZONE_VRE_STOR)
+ if !isempty(AC_CHARGE_ALL_ZONE_VRE_STOR)
+ eCFix_VRE_STOR += sum(value.(EP[:eCFixCharge_AC][AC_CHARGE_ALL_ZONE_VRE_STOR]))
+ end
+ end
+ tempCFix += eCFix_VRE_STOR
+
+ # Variable Costs
+ eCVar_VRE_STOR = 0.0
+ if !isempty(SOLAR_ZONE_VRE_STOR)
+ eCVar_VRE_STOR += sum(value.(EP[:eCVarOutSolar][SOLAR_ZONE_VRE_STOR, :]))
+ end
+ if !isempty(WIND_ZONE_VRE_STOR)
+ eCVar_VRE_STOR += sum(value.(EP[:eCVarOutWind][WIND_ZONE_VRE_STOR, :]))
+ end
+ if !isempty(STOR_ALL_ZONE_VRE_STOR)
+ vom_map = Dict(DC_CHARGE_ALL_ZONE_VRE_STOR => :eCVar_Charge_DC,
+ DC_DISCHARGE_ALL_ZONE_VRE_STOR => :eCVar_Discharge_DC,
+ AC_DISCHARGE_ALL_ZONE_VRE_STOR => :eCVar_Discharge_AC,
+ AC_CHARGE_ALL_ZONE_VRE_STOR => :eCVar_Charge_AC)
+ for (set, symbol) in vom_map
+ if !isempty(set)
+ eCVar_VRE_STOR += sum(value.(EP[symbol][set, :]))
+ end
+ end
+ end
+ tempCVar += eCVar_VRE_STOR
+
+ # Total Added Costs
+ tempCTotal += (eCFix_VRE_STOR + eCVar_VRE_STOR)
+ end
+
+ if setup["UCommit"] >= 1 && !isempty(COMMIT_ZONE)
+ eCStart = sum(value.(EP[:eCStart][COMMIT_ZONE, :])) +
+ sum(value.(EP[:ePlantCFuelStart][COMMIT_ZONE, :]))
+ tempCStart += eCStart
+ tempCTotal += eCStart
+ end
+
+ if !isempty(ELECTROLYZERS_ZONE)
+ tempHydrogenValue = -1 * sum(value.(EP[:eHydrogenValue][ELECTROLYZERS_ZONE, :]))
+ tempCTotal += tempHydrogenValue
+ end
+
+ tempCNSE = sum(value.(EP[:eCNSE][:, :, z]))
+ tempCTotal += tempCNSE
+
+ # if any(dfGen.CO2_Capture_Fraction .!=0)
+ if !isempty(CCS_ZONE)
+ tempCCO2 = sum(value.(EP[:ePlantCCO2Sequestration][CCS_ZONE]))
+ tempCTotal += tempCCO2
+ end
+
+ if setup["ParameterScale"] == 1
+ tempCTotal *= ModelScalingFactor^2
+ tempCFix *= ModelScalingFactor^2
+ tempCVar *= ModelScalingFactor^2
+ tempCFuel *= ModelScalingFactor^2
+ tempCNSE *= ModelScalingFactor^2
+ tempCStart *= ModelScalingFactor^2
+ tempHydrogenValue *= ModelScalingFactor^2
+ tempCCO2 *= ModelScalingFactor^2
+ end
+ temp_cost_list = [
+ tempCTotal,
+ tempCFix,
+ tempCVar,
+ tempCFuel,
+ tempCNSE,
+ tempCStart,
+ "-",
+ "-",
+ "-",
+ tempCCO2
+ ]
+ if !isempty(VRE_STOR)
+ push!(temp_cost_list, "-")
+ end
+ if !isempty(ELECTROLYZERS_ZONE)
+ push!(temp_cost_list, tempHydrogenValue)
+ end
+
+ dfCost[!, Symbol("Zone$z")] = temp_cost_list
+ end
+ CSV.write(joinpath(path, "costs.csv"), dfCost)
end
diff --git a/src/write_outputs/write_curtailment.jl b/src/write_outputs/write_curtailment.jl
index 6cb151f448..8ee244f105 100644
--- a/src/write_outputs/write_curtailment.jl
+++ b/src/write_outputs/write_curtailment.jl
@@ -5,42 +5,59 @@ Function for writing the curtailment values of the different variable renewable
co-located).
"""
function write_curtailment(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
- VRE = inputs["VRE"]
- dfCurtailment = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zone_id.(gen), AnnualSum = zeros(G))
- curtailment = zeros(G, T)
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- curtailment[VRE, :] = scale_factor * (value.(EP[:eTotalCap][VRE]) .* inputs["pP_Max"][VRE, :] .- value.(EP[:vP][VRE, :]))
-
- VRE_STOR = inputs["VRE_STOR"]
- if !isempty(VRE_STOR)
- SOLAR = setdiff(inputs["VS_SOLAR"],inputs["VS_WIND"])
- WIND = setdiff(inputs["VS_WIND"],inputs["VS_SOLAR"])
- SOLAR_WIND = intersect(inputs["VS_SOLAR"],inputs["VS_WIND"])
- gen_VRE_STOR = gen.VreStorage
- if !isempty(SOLAR)
- curtailment[SOLAR, :] = scale_factor * (value.(EP[:eTotalCap_SOLAR][SOLAR]).data .* inputs["pP_Max_Solar"][SOLAR, :] .- value.(EP[:vP_SOLAR][SOLAR, :]).data) .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.solar.!=0)])
- end
- if !isempty(WIND)
- curtailment[WIND, :] = scale_factor * (value.(EP[:eTotalCap_WIND][WIND]).data .* inputs["pP_Max_Wind"][WIND, :] .- value.(EP[:vP_WIND][WIND, :]).data)
- end
- if !isempty(SOLAR_WIND)
- curtailment[SOLAR_WIND, :] = scale_factor * ((value.(EP[:eTotalCap_SOLAR])[SOLAR_WIND].data
- .* inputs["pP_Max_Solar"][SOLAR_WIND, :] .- value.(EP[:vP_SOLAR][SOLAR_WIND, :]).data)
- .* etainverter.(gen_VRE_STOR[((gen_VRE_STOR.wind.!=0) .& (gen_VRE_STOR.solar.!=0))])
- + (value.(EP[:eTotalCap_WIND][SOLAR_WIND]).data .* inputs["pP_Max_Wind"][SOLAR_WIND, :] .- value.(EP[:vP_WIND][SOLAR_WIND, :]).data))
- end
- end
+ gen = inputs["RESOURCES"]
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ VRE = inputs["VRE"]
+ dfCurtailment = DataFrame(Resource = inputs["RESOURCE_NAMES"],
+ Zone = zone_id.(gen),
+ AnnualSum = zeros(G))
+ curtailment = zeros(G, T)
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ curtailment[VRE, :] = scale_factor *
+ (value.(EP[:eTotalCap][VRE]) .* inputs["pP_Max"][VRE, :] .-
+ value.(EP[:vP][VRE, :]))
- dfCurtailment.AnnualSum = curtailment * inputs["omega"]
+ VRE_STOR = inputs["VRE_STOR"]
+ if !isempty(VRE_STOR)
+ SOLAR = setdiff(inputs["VS_SOLAR"], inputs["VS_WIND"])
+ WIND = setdiff(inputs["VS_WIND"], inputs["VS_SOLAR"])
+ SOLAR_WIND = intersect(inputs["VS_SOLAR"], inputs["VS_WIND"])
+ gen_VRE_STOR = gen.VreStorage
+ if !isempty(SOLAR)
+ curtailment[SOLAR, :] = scale_factor *
+ (value.(EP[:eTotalCap_SOLAR][SOLAR]).data .*
+ inputs["pP_Max_Solar"][SOLAR, :] .-
+ value.(EP[:vP_SOLAR][SOLAR, :]).data) .*
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.solar .!= 0)])
+ end
+ if !isempty(WIND)
+ curtailment[WIND, :] = scale_factor * (value.(EP[:eTotalCap_WIND][WIND]).data .*
+ inputs["pP_Max_Wind"][WIND, :] .-
+ value.(EP[:vP_WIND][WIND, :]).data)
+ end
+ if !isempty(SOLAR_WIND)
+ curtailment[SOLAR_WIND, :] = scale_factor *
+ ((value.(EP[:eTotalCap_SOLAR])[SOLAR_WIND].data
+ .*
+ inputs["pP_Max_Solar"][SOLAR_WIND, :] .-
+ value.(EP[:vP_SOLAR][SOLAR_WIND, :]).data)
+ .*
+ etainverter.(gen_VRE_STOR[((gen_VRE_STOR.wind .!= 0) .& (gen_VRE_STOR.solar .!= 0))])
+ +
+ (value.(EP[:eTotalCap_WIND][SOLAR_WIND]).data .*
+ inputs["pP_Max_Wind"][SOLAR_WIND, :] .-
+ value.(EP[:vP_WIND][SOLAR_WIND, :]).data))
+ end
+ end
- filename = joinpath(path, "curtail.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filename, dfCurtailment)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filename, curtailment, dfCurtailment)
- end
- return nothing
+ dfCurtailment.AnnualSum = curtailment * inputs["omega"]
+
+ filename = joinpath(path, "curtail.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filename, dfCurtailment)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filename, curtailment, dfCurtailment)
+ end
+ return nothing
end
diff --git a/src/write_outputs/write_emissions.jl b/src/write_outputs/write_emissions.jl
index f4aaa00550..43c040a65e 100644
--- a/src/write_outputs/write_emissions.jl
+++ b/src/write_outputs/write_emissions.jl
@@ -5,92 +5,126 @@ Function for reporting time-dependent CO$_2$ emissions by zone.
"""
function write_emissions(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
-
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- if (setup["WriteShadowPrices"]==1 || setup["UCommit"]==0 || (setup["UCommit"]==2 && (setup["OperationalReserves"]==0 || (setup["OperationalReserves"]>0 && inputs["pDynamic_Contingency"]==0)))) # fully linear model
- # CO2 emissions by zone
+ if (setup["WriteShadowPrices"] == 1 || setup["UCommit"] == 0 ||
+ (setup["UCommit"] == 2 && (setup["OperationalReserves"] == 0 ||
+ (setup["OperationalReserves"] > 0 && inputs["pDynamic_Contingency"] == 0)))) # fully linear model
+ # CO2 emissions by zone
- if setup["CO2Cap"]>=1
- # Dual variable of CO2 constraint = shadow price of CO2
- tempCO2Price = zeros(Z,inputs["NCO2Cap"])
- if has_duals(EP) == 1
- for cap in 1:inputs["NCO2Cap"]
- for z in findall(x->x==1, inputs["dfCO2CapZones"][:,cap])
- tempCO2Price[z,cap] = (-1) * dual.(EP[:cCO2Emissions_systemwide])[cap]
- # when scaled, The objective function is in unit of Million US$/kton, thus k$/ton, to get $/ton, multiply 1000
- tempCO2Price[z,cap] *= scale_factor
- end
- end
- end
- dfEmissions = hcat(DataFrame(Zone = 1:Z), DataFrame(tempCO2Price, :auto), DataFrame(AnnualSum = Array{Float64}(undef, Z)))
- auxNew_Names=[Symbol("Zone"); [Symbol("CO2_Price_$cap") for cap in 1:inputs["NCO2Cap"]]; Symbol("AnnualSum")]
- rename!(dfEmissions,auxNew_Names)
- else
- dfEmissions = DataFrame(Zone = 1:Z, AnnualSum = Array{Float64}(undef, Z))
- end
+ if setup["CO2Cap"] >= 1
+ # Dual variable of CO2 constraint = shadow price of CO2
+ tempCO2Price = zeros(Z, inputs["NCO2Cap"])
+ if has_duals(EP) == 1
+ for cap in 1:inputs["NCO2Cap"]
+ for z in findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap])
+ tempCO2Price[z, cap] = (-1) *
+ dual.(EP[:cCO2Emissions_systemwide])[cap]
+ # when scaled, The objective function is in unit of Million US$/kton, thus k$/ton, to get $/ton, multiply 1000
+ tempCO2Price[z, cap] *= scale_factor
+ end
+ end
+ end
+ dfEmissions = hcat(DataFrame(Zone = 1:Z),
+ DataFrame(tempCO2Price, :auto),
+ DataFrame(AnnualSum = Array{Float64}(undef, Z)))
+ auxNew_Names = [Symbol("Zone");
+ [Symbol("CO2_Price_$cap") for cap in 1:inputs["NCO2Cap"]];
+ Symbol("AnnualSum")]
+ rename!(dfEmissions, auxNew_Names)
+ else
+ dfEmissions = DataFrame(Zone = 1:Z, AnnualSum = Array{Float64}(undef, Z))
+ end
- emissions_by_zone = value.(EP[:eEmissionsByZone])
- for i in 1:Z
- dfEmissions[i,:AnnualSum] = sum(inputs["omega"] .* emissions_by_zone[i,:]) * scale_factor
- end
+ emissions_by_zone = value.(EP[:eEmissionsByZone])
+ for i in 1:Z
+ dfEmissions[i, :AnnualSum] = sum(inputs["omega"] .* emissions_by_zone[i, :]) *
+ scale_factor
+ end
- if setup["WriteOutputs"] == "annual"
- total = DataFrame(["Total" sum(dfEmissions.AnnualSum)], [:Zone;:AnnualSum])
- if setup["CO2Cap"]>=1
- total = DataFrame(["Total" zeros(1,inputs["NCO2Cap"]) sum(dfEmissions.AnnualSum)], [:Zone;[Symbol("CO2_Price_$cap") for cap in 1:inputs["NCO2Cap"]];:AnnualSum])
- end
- dfEmissions = vcat(dfEmissions, total)
- CSV.write(joinpath(path, "emissions.csv"), dfEmissions)
- else # setup["WriteOutputs"] == "full"
- dfEmissions = hcat(dfEmissions, DataFrame(emissions_by_zone * scale_factor, :auto))
- if setup["CO2Cap"]>=1
- auxNew_Names=[Symbol("Zone");[Symbol("CO2_Price_$cap") for cap in 1:inputs["NCO2Cap"]];Symbol("AnnualSum");[Symbol("t$t") for t in 1:T]]
- rename!(dfEmissions,auxNew_Names)
- total = DataFrame(["Total" zeros(1,inputs["NCO2Cap"]) sum(dfEmissions[!,:AnnualSum]) fill(0.0, (1,T))], :auto)
- for t in 1:T
- total[:,t+inputs["NCO2Cap"]+2] .= sum(dfEmissions[:,Symbol("t$t")][1:Z])
- end
- else
- auxNew_Names=[Symbol("Zone"); Symbol("AnnualSum"); [Symbol("t$t") for t in 1:T]]
- rename!(dfEmissions,auxNew_Names)
- total = DataFrame(["Total" sum(dfEmissions[!,:AnnualSum]) fill(0.0, (1,T))], :auto)
- for t in 1:T
- total[:,t+2] .= sum(dfEmissions[:,Symbol("t$t")][1:Z])
- end
- end
- rename!(total,auxNew_Names)
- dfEmissions = vcat(dfEmissions, total)
- CSV.write(joinpath(path, "emissions.csv"), dftranspose(dfEmissions, false), writeheader=false)
- end
-## Aaron - Combined elseif setup["Dual_MIP"]==1 block with the first block since they were identical. Why do we have this third case? What is different about it?
- else
- # CO2 emissions by zone
- emissions_by_zone = value.(EP[:eEmissionsByZone])
- dfEmissions = hcat(DataFrame(Zone = 1:Z), DataFrame(AnnualSum = Array{Float64}(undef, Z)))
- for i in 1:Z
- dfEmissions[i,:AnnualSum] = sum(inputs["omega"] .* emissions_by_zone[i,:]) * scale_factor
- end
+ if setup["WriteOutputs"] == "annual"
+ total = DataFrame(["Total" sum(dfEmissions.AnnualSum)], [:Zone; :AnnualSum])
+ if setup["CO2Cap"] >= 1
+ total = DataFrame(
+ ["Total" zeros(1, inputs["NCO2Cap"]) sum(dfEmissions.AnnualSum)],
+ [:Zone;
+ [Symbol("CO2_Price_$cap") for cap in 1:inputs["NCO2Cap"]];
+ :AnnualSum])
+ end
+ dfEmissions = vcat(dfEmissions, total)
+ CSV.write(joinpath(path, "emissions.csv"), dfEmissions)
+ else# setup["WriteOutputs"] == "full"
+ dfEmissions = hcat(dfEmissions,
+ DataFrame(emissions_by_zone * scale_factor, :auto))
+ if setup["CO2Cap"] >= 1
+ auxNew_Names = [Symbol("Zone");
+ [Symbol("CO2_Price_$cap") for cap in 1:inputs["NCO2Cap"]];
+ Symbol("AnnualSum");
+ [Symbol("t$t") for t in 1:T]]
+ rename!(dfEmissions, auxNew_Names)
+ total = DataFrame(
+ ["Total" zeros(1, inputs["NCO2Cap"]) sum(dfEmissions[!,
+ :AnnualSum]) fill(0.0, (1, T))],
+ :auto)
+ for t in 1:T
+ total[:, t + inputs["NCO2Cap"] + 2] .= sum(dfEmissions[:,
+ Symbol("t$t")][1:Z])
+ end
+ else
+ auxNew_Names = [Symbol("Zone");
+ Symbol("AnnualSum");
+ [Symbol("t$t") for t in 1:T]]
+ rename!(dfEmissions, auxNew_Names)
+ total = DataFrame(
+ ["Total" sum(dfEmissions[!, :AnnualSum]) fill(0.0,
+ (1, T))],
+ :auto)
+ for t in 1:T
+ total[:, t + 2] .= sum(dfEmissions[:, Symbol("t$t")][1:Z])
+ end
+ end
+ rename!(total, auxNew_Names)
+ dfEmissions = vcat(dfEmissions, total)
+ CSV.write(joinpath(path, "emissions.csv"),
+ dftranspose(dfEmissions, false),
+ writeheader = false)
+ end
+ ## Aaron - Combined elseif setup["Dual_MIP"]==1 block with the first block since they were identical. Why do we have this third case? What is different about it?
+ else
+ # CO2 emissions by zone
+ emissions_by_zone = value.(EP[:eEmissionsByZone])
+ dfEmissions = hcat(DataFrame(Zone = 1:Z),
+ DataFrame(AnnualSum = Array{Float64}(undef, Z)))
+ for i in 1:Z
+ dfEmissions[i, :AnnualSum] = sum(inputs["omega"] .* emissions_by_zone[i, :]) *
+ scale_factor
+ end
- if setup["WriteOutputs"] == "annual"
- total = DataFrame(["Total" sum(dfEmissions.AnnualSum)], [:Zone;:AnnualSum])
- dfEmissions = vcat(dfEmissions, total)
- CSV.write(joinpath(path, "emissions.csv"), dfEmissions)
- else # setup["WriteOutputs"] == "full"
- dfEmissions = hcat(dfEmissions, DataFrame(emissions_by_zone * scale_factor, :auto))
- auxNew_Names=[Symbol("Zone");Symbol("AnnualSum");[Symbol("t$t") for t in 1:T]]
- rename!(dfEmissions,auxNew_Names)
- total = DataFrame(["Total" sum(dfEmissions[!,:AnnualSum]) fill(0.0, (1,T))], :auto)
- for t in 1:T
- total[:,t+2] .= sum(dfEmissions[:,Symbol("t$t")][1:Z])
- end
- rename!(total,auxNew_Names)
- dfEmissions = vcat(dfEmissions, total)
- CSV.write(joinpath(path, "emissions.csv"), dftranspose(dfEmissions, false), writeheader=false)
- end
- end
- return nothing
+ if setup["WriteOutputs"] == "annual"
+ total = DataFrame(["Total" sum(dfEmissions.AnnualSum)], [:Zone; :AnnualSum])
+ dfEmissions = vcat(dfEmissions, total)
+ CSV.write(joinpath(path, "emissions.csv"), dfEmissions)
+ else# setup["WriteOutputs"] == "full"
+ dfEmissions = hcat(dfEmissions,
+ DataFrame(emissions_by_zone * scale_factor, :auto))
+ auxNew_Names = [Symbol("Zone");
+ Symbol("AnnualSum");
+ [Symbol("t$t") for t in 1:T]]
+ rename!(dfEmissions, auxNew_Names)
+ total = DataFrame(["Total" sum(dfEmissions[!, :AnnualSum]) fill(0.0, (1, T))],
+ :auto)
+ for t in 1:T
+ total[:, t + 2] .= sum(dfEmissions[:, Symbol("t$t")][1:Z])
+ end
+ rename!(total, auxNew_Names)
+ dfEmissions = vcat(dfEmissions, total)
+ CSV.write(joinpath(path, "emissions.csv"),
+ dftranspose(dfEmissions, false),
+ writeheader = false)
+ end
+ end
+ return nothing
end
diff --git a/src/write_outputs/write_energy_revenue.jl b/src/write_outputs/write_energy_revenue.jl
index 92168c52f1..3e0834bd1e 100644
--- a/src/write_outputs/write_energy_revenue.jl
+++ b/src/write_outputs/write_energy_revenue.jl
@@ -4,26 +4,32 @@
Function for writing energy revenue from the different generation technologies.
"""
function write_energy_revenue(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- regions = region.(gen)
- clusters = cluster.(gen)
- zones = zone_id.(gen)
+ gen = inputs["RESOURCES"]
+ regions = region.(gen)
+ clusters = cluster.(gen)
+ zones = zone_id.(gen)
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
- FLEX = inputs["FLEX"]
- NONFLEX = setdiff(collect(1:G), FLEX)
- dfEnergyRevenue = DataFrame(Region = regions, Resource = inputs["RESOURCE_NAMES"], Zone = zones, Cluster = clusters, AnnualSum = Array{Float64}(undef, G),)
- energyrevenue = zeros(G, T)
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ FLEX = inputs["FLEX"]
+ NONFLEX = setdiff(collect(1:G), FLEX)
+ dfEnergyRevenue = DataFrame(Region = regions,
+ Resource = inputs["RESOURCE_NAMES"],
+ Zone = zones,
+ Cluster = clusters,
+ AnnualSum = Array{Float64}(undef, G))
+ energyrevenue = zeros(G, T)
price = locational_marginal_price(EP, inputs, setup)
- energyrevenue[NONFLEX, :] = value.(EP[:vP][NONFLEX, :]) .* transpose(price)[zone_id.(gen[NONFLEX]), :]
- if !isempty(FLEX)
- energyrevenue[FLEX, :] = value.(EP[:vCHARGE_FLEX][FLEX, :]).data .* transpose(price)[zone_id.(gen[FLEX]), :]
- end
- if setup["ParameterScale"] == 1
- energyrevenue *= ModelScalingFactor
- end
- dfEnergyRevenue.AnnualSum .= energyrevenue * inputs["omega"]
- write_simple_csv(joinpath(path, "EnergyRevenue.csv"), dfEnergyRevenue)
- return dfEnergyRevenue
+ energyrevenue[NONFLEX, :] = value.(EP[:vP][NONFLEX, :]) .*
+ transpose(price)[zone_id.(gen[NONFLEX]), :]
+ if !isempty(FLEX)
+ energyrevenue[FLEX, :] = value.(EP[:vCHARGE_FLEX][FLEX, :]).data .*
+ transpose(price)[zone_id.(gen[FLEX]), :]
+ end
+ if setup["ParameterScale"] == 1
+ energyrevenue *= ModelScalingFactor
+ end
+ dfEnergyRevenue.AnnualSum .= energyrevenue * inputs["omega"]
+ write_simple_csv(joinpath(path, "EnergyRevenue.csv"), dfEnergyRevenue)
+ return dfEnergyRevenue
end
diff --git a/src/write_outputs/write_fuel_consumption.jl b/src/write_outputs/write_fuel_consumption.jl
index 7a661b9386..a5f05f839a 100644
--- a/src/write_outputs/write_fuel_consumption.jl
+++ b/src/write_outputs/write_fuel_consumption.jl
@@ -4,57 +4,63 @@
Write fuel consumption of each power plant.
"""
function write_fuel_consumption(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
-
- write_fuel_consumption_plant(path::AbstractString,inputs::Dict, setup::Dict, EP::Model)
- if setup["WriteOutputs"] != "annual"
- write_fuel_consumption_ts(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- end
- write_fuel_consumption_tot(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+ write_fuel_consumption_plant(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+ if setup["WriteOutputs"] != "annual"
+ write_fuel_consumption_ts(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ end
+ write_fuel_consumption_tot(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
end
-function write_fuel_consumption_plant(path::AbstractString,inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
+function write_fuel_consumption_plant(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ gen = inputs["RESOURCES"]
+
+ HAS_FUEL = inputs["HAS_FUEL"]
+ MULTI_FUELS = inputs["MULTI_FUELS"]
- HAS_FUEL = inputs["HAS_FUEL"]
- MULTI_FUELS = inputs["MULTI_FUELS"]
+ # Fuel consumption cost by each resource, including start up fuel
+ dfPlantFuel = DataFrame(Resource = inputs["RESOURCE_NAMES"][HAS_FUEL],
+ Fuel = fuel.(gen[HAS_FUEL]),
+ Zone = zone_id.(gen[HAS_FUEL]),
+ AnnualSumCosts = zeros(length(HAS_FUEL)))
+ tempannualsum = value.(EP[:ePlantCFuelOut][HAS_FUEL]) +
+ value.(EP[:ePlantCFuelStart][HAS_FUEL])
- # Fuel consumption cost by each resource, including start up fuel
- dfPlantFuel = DataFrame(Resource = inputs["RESOURCE_NAMES"][HAS_FUEL],
- Fuel = fuel.(gen[HAS_FUEL]),
- Zone = zone_id.(gen[HAS_FUEL]),
- AnnualSumCosts = zeros(length(HAS_FUEL)))
- tempannualsum = value.(EP[:ePlantCFuelOut][HAS_FUEL]) + value.(EP[:ePlantCFuelStart][HAS_FUEL])
+ if !isempty(MULTI_FUELS)
+ fuel_cols_num = inputs["FUEL_COLS"]# TODO: rename it
+ max_fuels = inputs["MAX_NUM_FUELS"]
+ dfPlantFuel.Multi_Fuels = multi_fuels.(gen[HAS_FUEL])
+ for i in 1:max_fuels
+ tempannualsum_fuel_heat_multi_generation = zeros(length(HAS_FUEL))
+ tempannualsum_fuel_heat_multi_start = zeros(length(HAS_FUEL))
+ tempannualsum_fuel_heat_multi_total = zeros(length(HAS_FUEL))
+ tempannualsum_fuel_cost_multi = zeros(length(HAS_FUEL))
+ for g in MULTI_FUELS
+ tempannualsum_fuel_heat_multi_generation[findfirst(x -> x == g, HAS_FUEL)] = value.(EP[:ePlantFuelConsumptionYear_multi_generation][g,i])
+ tempannualsum_fuel_heat_multi_start[findfirst(x -> x == g, HAS_FUEL)] = value.(EP[:ePlantFuelConsumptionYear_multi_start][g,i])
+ tempannualsum_fuel_heat_multi_total[findfirst(x -> x == g, HAS_FUEL)] = value.(EP[:ePlantFuelConsumptionYear_multi][g,i])
+ tempannualsum_fuel_cost_multi[findfirst(x -> x == g, HAS_FUEL)] = value.(EP[:ePlantCFuelOut_multi][g,i]) + value.(EP[:ePlantCFuelOut_multi_start][g,i])
+ end
+ if setup["ParameterScale"] == 1
+ tempannualsum_fuel_heat_multi_generation *= ModelScalingFactor
+ tempannualsum_fuel_heat_multi_start *= ModelScalingFactor
+ tempannualsum_fuel_heat_multi_total *= ModelScalingFactor
+ tempannualsum_fuel_cost_multi *= ModelScalingFactor^2
+ end
- if !isempty(MULTI_FUELS)
- fuel_cols_num = inputs["FUEL_COLS"] # TODO: rename it
- max_fuels = inputs["MAX_NUM_FUELS"]
- dfPlantFuel.Multi_Fuels = multi_fuels.(gen[HAS_FUEL])
- for i = 1:max_fuels
- tempannualsum_fuel_heat_multi_generation = zeros(length(HAS_FUEL))
- tempannualsum_fuel_heat_multi_start = zeros(length(HAS_FUEL))
- tempannualsum_fuel_heat_multi_total = zeros(length(HAS_FUEL))
- tempannualsum_fuel_cost_multi = zeros(length(HAS_FUEL))
- for g in MULTI_FUELS
- tempannualsum_fuel_heat_multi_generation[findfirst(x->x==g, HAS_FUEL)] = value.(EP[:ePlantFuelConsumptionYear_multi_generation][g,i])
- tempannualsum_fuel_heat_multi_start[findfirst(x->x==g, HAS_FUEL)] = value.(EP[:ePlantFuelConsumptionYear_multi_start][g,i])
- tempannualsum_fuel_heat_multi_total[findfirst(x->x==g, HAS_FUEL)] = value.(EP[:ePlantFuelConsumptionYear_multi][g,i])
- tempannualsum_fuel_cost_multi[findfirst(x->x==g, HAS_FUEL)] = value.(EP[:ePlantCFuelOut_multi][g,i]) + value.(EP[:ePlantCFuelOut_multi_start][g,i])
- end
- if setup["ParameterScale"] == 1
- tempannualsum_fuel_heat_multi_generation *= ModelScalingFactor
- tempannualsum_fuel_heat_multi_start *= ModelScalingFactor
- tempannualsum_fuel_heat_multi_total *= ModelScalingFactor
- tempannualsum_fuel_cost_multi *= ModelScalingFactor^2
- end
+ dfPlantFuel[!, fuel_cols_num[i]] = fuel_cols.(gen[HAS_FUEL], tag = i)
+ dfPlantFuel[!, Symbol(string(fuel_cols_num[i], "_AnnualSum_Fuel_HeatInput_Generation_MMBtu"))] = tempannualsum_fuel_heat_multi_generation
+ dfPlantFuel[!, Symbol(string(fuel_cols_num[i], "_AnnualSum_Fuel_HeatInput_Start_MMBtu"))] = tempannualsum_fuel_heat_multi_start
+ dfPlantFuel[!, Symbol(string(fuel_cols_num[i], "_AnnualSum_Fuel_HeatInput_Total_MMBtu"))] = tempannualsum_fuel_heat_multi_total
+ dfPlantFuel[!, Symbol(string(fuel_cols_num[i], "_AnnualSum_Fuel_Cost"))] = tempannualsum_fuel_cost_multi
+ end
+ end
- dfPlantFuel[!, fuel_cols_num[i]] = fuel_cols.(gen[HAS_FUEL], tag=i)
- dfPlantFuel[!, Symbol(string(fuel_cols_num[i],"_AnnualSum_Fuel_HeatInput_Generation_MMBtu"))] = tempannualsum_fuel_heat_multi_generation
- dfPlantFuel[!, Symbol(string(fuel_cols_num[i],"_AnnualSum_Fuel_HeatInput_Start_MMBtu"))] = tempannualsum_fuel_heat_multi_start
- dfPlantFuel[!, Symbol(string(fuel_cols_num[i],"_AnnualSum_Fuel_HeatInput_Total_MMBtu"))] = tempannualsum_fuel_heat_multi_total
- dfPlantFuel[!, Symbol(string(fuel_cols_num[i],"_AnnualSum_Fuel_Cost"))] = tempannualsum_fuel_cost_multi
- end
- end
-
if setup["ParameterScale"] == 1
tempannualsum *= ModelScalingFactor^2 #
end
@@ -62,34 +68,38 @@ function write_fuel_consumption_plant(path::AbstractString,inputs::Dict, setup::
CSV.write(joinpath(path, "Fuel_cost_plant.csv"), dfPlantFuel)
end
+function write_fuel_consumption_ts(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ T = inputs["T"] # Number of time steps (hours)
+ HAS_FUEL = inputs["HAS_FUEL"]
-function write_fuel_consumption_ts(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- HAS_FUEL = inputs["HAS_FUEL"]
-
- # Fuel consumption by each resource per time step, unit is MMBTU
- dfPlantFuel_TS = DataFrame(Resource = inputs["RESOURCE_NAMES"][HAS_FUEL])
- tempts = value.(EP[:ePlantFuel_generation] + EP[:ePlantFuel_start])[HAS_FUEL,:]
+ # Fuel consumption by each resource per time step, unit is MMBTU
+ dfPlantFuel_TS = DataFrame(Resource = inputs["RESOURCE_NAMES"][HAS_FUEL])
+ tempts = value.(EP[:ePlantFuel_generation] + EP[:ePlantFuel_start])[HAS_FUEL, :]
if setup["ParameterScale"] == 1
tempts *= ModelScalingFactor # kMMBTU to MMBTU
end
- dfPlantFuel_TS = hcat(dfPlantFuel_TS,
- DataFrame(tempts, [Symbol("t$t") for t in 1:T]))
- CSV.write(joinpath(path, "FuelConsumption_plant_MMBTU.csv"),
- dftranspose(dfPlantFuel_TS, false), header=false)
+ dfPlantFuel_TS = hcat(dfPlantFuel_TS,
+ DataFrame(tempts, [Symbol("t$t") for t in 1:T]))
+ CSV.write(joinpath(path, "FuelConsumption_plant_MMBTU.csv"),
+ dftranspose(dfPlantFuel_TS, false), header = false)
end
-
-function write_fuel_consumption_tot(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- # types of fuel
- fuel_types = inputs["fuels"]
- fuel_number = length(fuel_types)
- dfFuel = DataFrame(Fuel = fuel_types,
- AnnualSum = zeros(fuel_number))
- tempannualsum = value.(EP[:eFuelConsumptionYear])
+function write_fuel_consumption_tot(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ # types of fuel
+ fuel_types = inputs["fuels"]
+ fuel_number = length(fuel_types)
+ dfFuel = DataFrame(Fuel = fuel_types,
+ AnnualSum = zeros(fuel_number))
+ tempannualsum = value.(EP[:eFuelConsumptionYear])
if setup["ParameterScale"] == 1
tempannualsum *= ModelScalingFactor # billion MMBTU to MMBTU
end
- dfFuel.AnnualSum .+= tempannualsum
- CSV.write(joinpath(path,"FuelConsumption_total_MMBTU.csv"), dfFuel)
+ dfFuel.AnnualSum .+= tempannualsum
+ CSV.write(joinpath(path, "FuelConsumption_total_MMBTU.csv"), dfFuel)
end
diff --git a/src/write_outputs/write_maintenance.jl b/src/write_outputs/write_maintenance.jl
index d00af7b696..f7a491828f 100644
--- a/src/write_outputs/write_maintenance.jl
+++ b/src/write_outputs/write_maintenance.jl
@@ -7,7 +7,7 @@ function write_simple_csv(filename::AbstractString, header::Vector, matrix)
write_simple_csv(filename, df)
end
-function prepare_timeseries_variables(EP::Model, set::Set{Symbol}, scale::Float64=1.0)
+function prepare_timeseries_variables(EP::Model, set::Set{Symbol}, scale::Float64 = 1.0)
# function to extract data from DenseAxisArray
data(var) = scale * value.(EP[var]).data
diff --git a/src/write_outputs/write_net_revenue.jl b/src/write_outputs/write_net_revenue.jl
index 3b3beb0772..ef42e9ca87 100644
--- a/src/write_outputs/write_net_revenue.jl
+++ b/src/write_outputs/write_net_revenue.jl
@@ -3,223 +3,287 @@
Function for writing net revenue of different generation technologies.
"""
-function write_net_revenue(path::AbstractString, inputs::Dict, setup::Dict, EP::Model, dfCap::DataFrame, dfESRRev::DataFrame, dfResRevenue::DataFrame, dfChargingcost::DataFrame, dfPower::DataFrame, dfEnergyRevenue::DataFrame, dfSubRevenue::DataFrame, dfRegSubRevenue::DataFrame, dfVreStor::DataFrame, dfOpRegRevenue::DataFrame, dfOpRsvRevenue::DataFrame)
-
- gen = inputs["RESOURCES"]
- zones = zone_id.(gen)
- regions = region.(gen)
- clusters = cluster.(gen)
- rid = resource_id.(gen)
-
- G = inputs["G"] # Number of generators
- COMMIT = inputs["COMMIT"] # Thermal units for unit commitment
- STOR_ALL = inputs["STOR_ALL"]
-
- if setup["OperationalReserves"] >= 1
- RSV = inputs["RSV"] # Generators contributing to operating reserves
- REG = inputs["REG"] # Generators contributing to regulation
- end
-
- VRE_STOR = inputs["VRE_STOR"]
- CCS = inputs["CCS"]
- if !isempty(VRE_STOR)
- gen_VRE_STOR = gen.VreStorage
- VRE_STOR_LENGTH = size(inputs["VRE_STOR"])[1]
- SOLAR = inputs["VS_SOLAR"]
- WIND = inputs["VS_WIND"]
- DC = inputs["VS_DC"]
- DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
- AC_DISCHARGE = inputs["VS_STOR_AC_DISCHARGE"]
- DC_CHARGE = inputs["VS_STOR_DC_CHARGE"]
- AC_CHARGE = inputs["VS_STOR_AC_CHARGE"]
- # Should read in charge asymmetric capacities
- end
-
- # Create a NetRevenue dataframe
- dfNetRevenue = DataFrame(region = regions, Resource = inputs["RESOURCE_NAMES"], zone = zones, Cluster = clusters, R_ID = rid)
-
- # Add investment cost to the dataframe
- dfNetRevenue.Inv_cost_MW = inv_cost_per_mwyr.(gen) .* dfCap[1:G,:NewCap]
- dfNetRevenue.Inv_cost_MWh = inv_cost_per_mwhyr.(gen) .* dfCap[1:G,:NewEnergyCap]
- dfNetRevenue.Inv_cost_charge_MW = inv_cost_charge_per_mwyr.(gen) .* dfCap[1:G,:NewChargeCap]
- if !isempty(VRE_STOR)
- # Doesn't include charge capacities
- if !isempty(SOLAR)
- dfNetRevenue.Inv_cost_MW[VRE_STOR] += inv_cost_solar_per_mwyr.(gen_VRE_STOR) .* dfVreStor[1:VRE_STOR_LENGTH,:NewCapSolar]
- end
- if !isempty(DC)
- dfNetRevenue.Inv_cost_MW[VRE_STOR] += inv_cost_inverter_per_mwyr.(gen_VRE_STOR) .* dfVreStor[1:VRE_STOR_LENGTH,:NewCapDC]
- end
- if !isempty(WIND)
- dfNetRevenue.Inv_cost_MW[VRE_STOR] += inv_cost_wind_per_mwyr.(gen_VRE_STOR) .* dfVreStor[1:VRE_STOR_LENGTH,:NewCapWind]
- end
- end
- if setup["ParameterScale"] == 1
- dfNetRevenue.Inv_cost_MWh *= ModelScalingFactor # converting Million US$ to US$
- dfNetRevenue.Inv_cost_MW *= ModelScalingFactor # converting Million US$ to US$
- dfNetRevenue.Inv_cost_charge_MW *= ModelScalingFactor # converting Million US$ to US$
- end
-
- # Add operations and maintenance cost to the dataframe
- dfNetRevenue.Fixed_OM_cost_MW = fixed_om_cost_per_mwyr.(gen) .* dfCap[1:G,:EndCap]
- dfNetRevenue.Fixed_OM_cost_MWh = fixed_om_cost_per_mwhyr.(gen) .* dfCap[1:G,:EndEnergyCap]
- dfNetRevenue.Fixed_OM_cost_charge_MW = fixed_om_cost_charge_per_mwyr.(gen) .* dfCap[1:G, :EndChargeCap]
-
- dfNetRevenue.Var_OM_cost_out = var_om_cost_per_mwh.(gen) .* dfPower[1:G,:AnnualSum]
- if !isempty(VRE_STOR)
- if !isempty(SOLAR)
- dfNetRevenue.Fixed_OM_cost_MW[VRE_STOR] += fixed_om_solar_cost_per_mwyr.(gen_VRE_STOR) .* dfVreStor[1:VRE_STOR_LENGTH, :EndCapSolar]
- dfNetRevenue.Var_OM_cost_out[SOLAR] += var_om_cost_per_mwh_solar.(gen_VRE_STOR[(gen_VRE_STOR.solar.!=0)]) .* (value.(EP[:vP_SOLAR][SOLAR, :]).data .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.solar.!=0)]) * inputs["omega"])
- end
- if !isempty(WIND)
- dfNetRevenue.Fixed_OM_cost_MW[VRE_STOR] += fixed_om_wind_cost_per_mwyr.(gen_VRE_STOR) .* dfVreStor[1:VRE_STOR_LENGTH, :EndCapWind]
- dfNetRevenue.Var_OM_cost_out[WIND] += var_om_cost_per_mwh_wind.(gen_VRE_STOR[(gen_VRE_STOR.wind.!=0)]) .* (value.(EP[:vP_WIND][WIND, :]).data * inputs["omega"])
- end
- if !isempty(DC)
- dfNetRevenue.Fixed_OM_cost_MW[VRE_STOR] += fixed_om_inverter_cost_per_mwyr.(gen_VRE_STOR) .* dfVreStor[1:VRE_STOR_LENGTH, :EndCapDC]
- end
- if !isempty(DC_DISCHARGE)
- dfNetRevenue.Var_OM_cost_out[DC_DISCHARGE] += var_om_cost_per_mwh_discharge_dc.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge.!=0)]) .* (value.(EP[:vP_DC_DISCHARGE][DC_DISCHARGE, :]).data .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge.!=0)]) * inputs["omega"])
- end
- if !isempty(AC_DISCHARGE)
- dfNetRevenue.Var_OM_cost_out[AC_DISCHARGE] += var_om_cost_per_mwh_discharge_ac.(gen_VRE_STOR[(gen_VRE_STOR.stor_ac_discharge.!=0)]) .* (value.(EP[:vP_AC_DISCHARGE][AC_DISCHARGE, :]).data * inputs["omega"])
- end
- end
- if setup["ParameterScale"] == 1
- dfNetRevenue.Fixed_OM_cost_MW *= ModelScalingFactor # converting Million US$ to US$
- dfNetRevenue.Fixed_OM_cost_MWh *= ModelScalingFactor # converting Million US$ to US$
- dfNetRevenue.Fixed_OM_cost_charge_MW *= ModelScalingFactor # converting Million US$ to US$
- dfNetRevenue.Var_OM_cost_out *= ModelScalingFactor # converting Million US$ to US$
- end
-
- # Add fuel cost to the dataframe
- dfNetRevenue.Fuel_cost = sum(value.(EP[:ePlantCFuelOut]), dims = 2)
- if setup["ParameterScale"] == 1
- dfNetRevenue.Fuel_cost *= ModelScalingFactor^2 # converting Million US$ to US$
- end
-
- # Add storage cost to the dataframe
- dfNetRevenue.Var_OM_cost_in = zeros(nrow(dfNetRevenue))
- if !isempty(STOR_ALL)
- dfNetRevenue.Var_OM_cost_in[STOR_ALL] = var_om_cost_per_mwh_in.(gen.Storage) .* ((value.(EP[:vCHARGE][STOR_ALL,:]).data) * inputs["omega"])
- end
- if !isempty(VRE_STOR)
- if !isempty(DC_CHARGE)
- dfNetRevenue.Var_OM_cost_in[DC_CHARGE] += var_om_cost_per_mwh_charge_dc.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_charge.!=0)]) .* (value.(EP[:vP_DC_CHARGE][DC_CHARGE, :]).data ./ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_charge.!=0)]) * inputs["omega"])
- end
- if !isempty(AC_CHARGE)
- dfNetRevenue.Var_OM_cost_in[AC_CHARGE] += var_om_cost_per_mwh_charge_ac.(gen_VRE_STOR[(gen_VRE_STOR.stor_ac_charge.!=0)]) .* (value.(EP[:vP_AC_CHARGE][AC_CHARGE, :]).data * inputs["omega"])
- end
- end
-
- if setup["ParameterScale"] == 1
- dfNetRevenue.Var_OM_cost_in *= ModelScalingFactor^2 # converting Million US$ to US$
- end
- # Add start-up cost to the dataframe
- dfNetRevenue.StartCost = zeros(nrow(dfNetRevenue))
- if setup["UCommit"]>=1 && !isempty(COMMIT)
- start_costs = vec(sum(value.(EP[:eCStart][COMMIT, :]).data, dims = 2))
- start_fuel_costs = vec(value.(EP[:ePlantCFuelStart][COMMIT]))
- dfNetRevenue.StartCost[COMMIT] .= start_costs + start_fuel_costs
- end
- if setup["ParameterScale"] == 1
- dfNetRevenue.StartCost *= ModelScalingFactor^2 # converting Million US$ to US$
- end
- # Add charge cost to the dataframe
- dfNetRevenue.Charge_cost = zeros(nrow(dfNetRevenue))
- if has_duals(EP)
- dfNetRevenue.Charge_cost = dfChargingcost[1:G,:AnnualSum] # Unit is confirmed to be US$
- end
-
- # Add CO2 releated sequestration cost or credit (e.g. 45 Q) to the dataframe
- dfNetRevenue.CO2SequestrationCost = zeros(nrow(dfNetRevenue))
- if any(co2_capture_fraction.(gen) .!= 0)
- dfNetRevenue.CO2SequestrationCost = zeros(G)
- dfNetRevenue[CCS, :CO2SequestrationCost] = value.(EP[:ePlantCCO2Sequestration]).data
- end
- if setup["ParameterScale"] == 1
- dfNetRevenue.CO2SequestrationCost *= ModelScalingFactor^2 # converting Million US$ to US$
- end
-
- # Add energy and subsidy revenue to the dataframe
- dfNetRevenue.EnergyRevenue = zeros(nrow(dfNetRevenue))
- dfNetRevenue.SubsidyRevenue = zeros(nrow(dfNetRevenue))
- if has_duals(EP)
- dfNetRevenue.EnergyRevenue = dfEnergyRevenue[1:G,:AnnualSum] # Unit is confirmed to be US$
- dfNetRevenue.SubsidyRevenue = dfSubRevenue[1:G,:SubsidyRevenue] # Unit is confirmed to be US$
- end
-
- # Add energy and subsidy revenue to the dataframe
- dfNetRevenue.OperatingReserveRevenue = zeros(nrow(dfNetRevenue))
- dfNetRevenue.OperatingRegulationRevenue = zeros(nrow(dfNetRevenue))
- if setup["OperationalReserves"] > 0 && has_duals(EP)
- dfNetRevenue.OperatingReserveRevenue[RSV] = dfOpRsvRevenue.AnnualSum # Unit is confirmed to be US$
- dfNetRevenue.OperatingRegulationRevenue[REG] = dfOpRegRevenue.AnnualSum # Unit is confirmed to be US$
- end
-
- # Add capacity revenue to the dataframe
- dfNetRevenue.ReserveMarginRevenue = zeros(nrow(dfNetRevenue))
- if setup["CapacityReserveMargin"] > 0 && has_duals(EP) # The unit is confirmed to be $
- dfNetRevenue.ReserveMarginRevenue = dfResRevenue[1:G,:AnnualSum]
- end
-
- # Add RPS/CES revenue to the dataframe
- dfNetRevenue.ESRRevenue = zeros(nrow(dfNetRevenue))
- if setup["EnergyShareRequirement"] > 0 && has_duals(EP) # The unit is confirmed to be $
- dfNetRevenue.ESRRevenue = dfESRRev[1:G,:Total]
- end
-
- # Calculate emissions cost
- dfNetRevenue.EmissionsCost = zeros(nrow(dfNetRevenue))
- if setup["CO2Cap"] >=1 && has_duals(EP)
- for cap in 1:inputs["NCO2Cap"]
- co2_cap_dual = dual(EP[:cCO2Emissions_systemwide][cap])
- CO2ZONES = findall(x->x==1, inputs["dfCO2CapZones"][:,cap])
- GEN_IN_ZONE = resource_id.(gen[[y in CO2ZONES for y in zone_id.(gen)]])
- if setup["CO2Cap"]==1 || setup["CO2Cap"]==2 # Mass-based or Demand + Rate-based
- # Cost = sum(sum(emissions for zone z * dual(CO2 constraint[cap]) for z in Z) for cap in setup["NCO2"])
- temp_vec = value.(EP[:eEmissionsByPlant][GEN_IN_ZONE, :]) * inputs["omega"]
- dfNetRevenue.EmissionsCost[GEN_IN_ZONE] += - co2_cap_dual * temp_vec
- elseif setup["CO2Cap"]==3 # Generation + Rate-based
- SET_WITH_MAXCO2RATE = union(inputs["THERM_ALL"],inputs["VRE"], inputs["VRE"],inputs["MUST_RUN"],inputs["HYDRO_RES"])
- Y = intersect(GEN_IN_ZONE, SET_WITH_MAXCO2RATE)
- temp_vec = (value.(EP[:eEmissionsByPlant][Y,:]) - (value.(EP[:vP][Y,:]) .* inputs["dfMaxCO2Rate"][zone_id.(gen[Y]), cap])) * inputs["omega"]
- dfNetRevenue.EmissionsCost[Y] += - co2_cap_dual * temp_vec
- end
- end
- if setup["ParameterScale"] == 1
- dfNetRevenue.EmissionsCost *= ModelScalingFactor^2 # converting Million US$ to US$
- end
- end
-
- # Add regional technology subsidy revenue to the dataframe
- dfNetRevenue.RegSubsidyRevenue = zeros(nrow(dfNetRevenue))
- if setup["MinCapReq"] >= 1 && has_duals(EP)# The unit is confirmed to be US$
- dfNetRevenue.RegSubsidyRevenue = dfRegSubRevenue[1:G,:SubsidyRevenue]
- end
-
- dfNetRevenue.Revenue = dfNetRevenue.EnergyRevenue
- .+ dfNetRevenue.SubsidyRevenue
- .+ dfNetRevenue.ReserveMarginRevenue
- .+ dfNetRevenue.ESRRevenue
- .+ dfNetRevenue.RegSubsidyRevenue
- .+ dfNetRevenue.OperatingReserveRevenue
- .+ dfNetRevenue.OperatingRegulationRevenue
-
- dfNetRevenue.Cost = (dfNetRevenue.Inv_cost_MW
- .+ dfNetRevenue.Inv_cost_MWh
- .+ dfNetRevenue.Inv_cost_charge_MW
- .+ dfNetRevenue.Fixed_OM_cost_MW
- .+ dfNetRevenue.Fixed_OM_cost_MWh
- .+ dfNetRevenue.Fixed_OM_cost_charge_MW
- .+ dfNetRevenue.Var_OM_cost_out
- .+ dfNetRevenue.Var_OM_cost_in
- .+ dfNetRevenue.Fuel_cost
- .+ dfNetRevenue.Charge_cost
- .+ dfNetRevenue.EmissionsCost
- .+ dfNetRevenue.StartCost
- .+ dfNetRevenue.CO2SequestrationCost)
- dfNetRevenue.Profit = dfNetRevenue.Revenue .- dfNetRevenue.Cost
-
- CSV.write(joinpath(path, "NetRevenue.csv"), dfNetRevenue)
+function write_net_revenue(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model,
+ dfCap::DataFrame,
+ dfESRRev::DataFrame,
+ dfResRevenue::DataFrame,
+ dfChargingcost::DataFrame,
+ dfPower::DataFrame,
+ dfEnergyRevenue::DataFrame,
+ dfSubRevenue::DataFrame,
+ dfRegSubRevenue::DataFrame,
+ dfVreStor::DataFrame,
+ dfOpRegRevenue::DataFrame,
+ dfOpRsvRevenue::DataFrame)
+ gen = inputs["RESOURCES"]
+ zones = zone_id.(gen)
+ regions = region.(gen)
+ clusters = cluster.(gen)
+ rid = resource_id.(gen)
+
+ G = inputs["G"] # Number of generators
+ COMMIT = inputs["COMMIT"]# Thermal units for unit commitment
+ STOR_ALL = inputs["STOR_ALL"]
+
+ if setup["OperationalReserves"] >= 1
+ RSV = inputs["RSV"]# Generators contributing to operating reserves
+ REG = inputs["REG"] # Generators contributing to regulation
+ end
+
+ VRE_STOR = inputs["VRE_STOR"]
+ CCS = inputs["CCS"]
+ if !isempty(VRE_STOR)
+ gen_VRE_STOR = gen.VreStorage
+ VRE_STOR_LENGTH = size(inputs["VRE_STOR"])[1]
+ SOLAR = inputs["VS_SOLAR"]
+ WIND = inputs["VS_WIND"]
+ DC = inputs["VS_DC"]
+ DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
+ AC_DISCHARGE = inputs["VS_STOR_AC_DISCHARGE"]
+ DC_CHARGE = inputs["VS_STOR_DC_CHARGE"]
+ AC_CHARGE = inputs["VS_STOR_AC_CHARGE"]
+ # Should read in charge asymmetric capacities
+ end
+
+ # Create a NetRevenue dataframe
+ dfNetRevenue = DataFrame(region = regions,
+ Resource = inputs["RESOURCE_NAMES"],
+ zone = zones,
+ Cluster = clusters,
+ R_ID = rid)
+
+ # Add investment cost to the dataframe
+ dfNetRevenue.Inv_cost_MW = inv_cost_per_mwyr.(gen) .* dfCap[1:G, :NewCap]
+ dfNetRevenue.Inv_cost_MWh = inv_cost_per_mwhyr.(gen) .* dfCap[1:G, :NewEnergyCap]
+ dfNetRevenue.Inv_cost_charge_MW = inv_cost_charge_per_mwyr.(gen) .*
+ dfCap[1:G, :NewChargeCap]
+ if !isempty(VRE_STOR)
+ # Doesn't include charge capacities
+ if !isempty(SOLAR)
+ dfNetRevenue.Inv_cost_MW[VRE_STOR] += inv_cost_solar_per_mwyr.(gen_VRE_STOR) .*
+ dfVreStor[1:VRE_STOR_LENGTH, :NewCapSolar]
+ end
+ if !isempty(DC)
+ dfNetRevenue.Inv_cost_MW[VRE_STOR] += inv_cost_inverter_per_mwyr.(gen_VRE_STOR) .*
+ dfVreStor[1:VRE_STOR_LENGTH, :NewCapDC]
+ end
+ if !isempty(WIND)
+ dfNetRevenue.Inv_cost_MW[VRE_STOR] += inv_cost_wind_per_mwyr.(gen_VRE_STOR) .*
+ dfVreStor[1:VRE_STOR_LENGTH, :NewCapWind]
+ end
+ end
+ if setup["ParameterScale"] == 1
+ dfNetRevenue.Inv_cost_MWh *= ModelScalingFactor # converting Million US$ to US$
+ dfNetRevenue.Inv_cost_MW *= ModelScalingFactor # converting Million US$ to US$
+ dfNetRevenue.Inv_cost_charge_MW *= ModelScalingFactor # converting Million US$ to US$
+ end
+
+ # Add operations and maintenance cost to the dataframe
+ dfNetRevenue.Fixed_OM_cost_MW = fixed_om_cost_per_mwyr.(gen) .* dfCap[1:G, :EndCap]
+ dfNetRevenue.Fixed_OM_cost_MWh = fixed_om_cost_per_mwhyr.(gen) .*
+ dfCap[1:G, :EndEnergyCap]
+ dfNetRevenue.Fixed_OM_cost_charge_MW = fixed_om_cost_charge_per_mwyr.(gen) .*
+ dfCap[1:G, :EndChargeCap]
+
+ dfNetRevenue.Var_OM_cost_out = var_om_cost_per_mwh.(gen) .* dfPower[1:G, :AnnualSum]
+ if !isempty(VRE_STOR)
+ if !isempty(SOLAR)
+ dfNetRevenue.Fixed_OM_cost_MW[VRE_STOR] += fixed_om_solar_cost_per_mwyr.(gen_VRE_STOR) .*
+ dfVreStor[1:VRE_STOR_LENGTH,
+ :EndCapSolar]
+ dfNetRevenue.Var_OM_cost_out[SOLAR] += var_om_cost_per_mwh_solar.(gen_VRE_STOR[(gen_VRE_STOR.solar .!= 0)]) .*
+ (value.(EP[:vP_SOLAR][SOLAR, :]).data .*
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.solar .!= 0)]) *
+ inputs["omega"])
+ end
+ if !isempty(WIND)
+ dfNetRevenue.Fixed_OM_cost_MW[VRE_STOR] += fixed_om_wind_cost_per_mwyr.(gen_VRE_STOR) .*
+ dfVreStor[1:VRE_STOR_LENGTH,
+ :EndCapWind]
+ dfNetRevenue.Var_OM_cost_out[WIND] += var_om_cost_per_mwh_wind.(gen_VRE_STOR[(gen_VRE_STOR.wind .!= 0)]) .*
+ (value.(EP[:vP_WIND][WIND, :]).data *
+ inputs["omega"])
+ end
+ if !isempty(DC)
+ dfNetRevenue.Fixed_OM_cost_MW[VRE_STOR] += fixed_om_inverter_cost_per_mwyr.(gen_VRE_STOR) .*
+ dfVreStor[1:VRE_STOR_LENGTH,
+ :EndCapDC]
+ end
+ if !isempty(DC_DISCHARGE)
+ dfNetRevenue.Var_OM_cost_out[DC_DISCHARGE] += var_om_cost_per_mwh_discharge_dc.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge .!= 0)]) .*
+ (value.(EP[:vP_DC_DISCHARGE][DC_DISCHARGE,:]).data .*
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge .!= 0)]) *
+ inputs["omega"])
+ end
+ if !isempty(AC_DISCHARGE)
+ dfNetRevenue.Var_OM_cost_out[AC_DISCHARGE] += var_om_cost_per_mwh_discharge_ac.(gen_VRE_STOR[(gen_VRE_STOR.stor_ac_discharge .!= 0)]) .*
+ (value.(EP[:vP_AC_DISCHARGE][AC_DISCHARGE,:]).data * inputs["omega"])
+ end
+ end
+ if setup["ParameterScale"] == 1
+ dfNetRevenue.Fixed_OM_cost_MW *= ModelScalingFactor # converting Million US$ to US$
+ dfNetRevenue.Fixed_OM_cost_MWh *= ModelScalingFactor # converting Million US$ to US$
+ dfNetRevenue.Fixed_OM_cost_charge_MW *= ModelScalingFactor # converting Million US$ to US$
+ dfNetRevenue.Var_OM_cost_out *= ModelScalingFactor # converting Million US$ to US$
+ end
+
+ # Add fuel cost to the dataframe
+ dfNetRevenue.Fuel_cost = sum(value.(EP[:ePlantCFuelOut]), dims = 2)
+ if setup["ParameterScale"] == 1
+ dfNetRevenue.Fuel_cost *= ModelScalingFactor^2 # converting Million US$ to US$
+ end
+
+ # Add storage cost to the dataframe
+ dfNetRevenue.Var_OM_cost_in = zeros(nrow(dfNetRevenue))
+ if !isempty(STOR_ALL)
+ dfNetRevenue.Var_OM_cost_in[STOR_ALL] = var_om_cost_per_mwh_in.(gen.Storage) .*
+ ((value.(EP[:vCHARGE][STOR_ALL, :]).data) *
+ inputs["omega"])
+ end
+ if !isempty(VRE_STOR)
+ if !isempty(DC_CHARGE)
+ dfNetRevenue.Var_OM_cost_in[DC_CHARGE] += var_om_cost_per_mwh_charge_dc.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_charge .!= 0)]) .*
+ (value.(EP[:vP_DC_CHARGE][DC_CHARGE,:]).data ./
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_charge .!= 0)]) *
+ inputs["omega"])
+ end
+ if !isempty(AC_CHARGE)
+ dfNetRevenue.Var_OM_cost_in[AC_CHARGE] += var_om_cost_per_mwh_charge_ac.(gen_VRE_STOR[(gen_VRE_STOR.stor_ac_charge .!= 0)]) .*
+ (value.(EP[:vP_AC_CHARGE][AC_CHARGE,
+ :]).data * inputs["omega"])
+ end
+ end
+
+ if setup["ParameterScale"] == 1
+ dfNetRevenue.Var_OM_cost_in *= ModelScalingFactor^2 # converting Million US$ to US$
+ end
+ # Add start-up cost to the dataframe
+ dfNetRevenue.StartCost = zeros(nrow(dfNetRevenue))
+ if setup["UCommit"] >= 1 && !isempty(COMMIT)
+ start_costs = vec(sum(value.(EP[:eCStart][COMMIT, :]).data, dims = 2))
+ start_fuel_costs = vec(value.(EP[:ePlantCFuelStart][COMMIT]))
+ dfNetRevenue.StartCost[COMMIT] .= start_costs + start_fuel_costs
+ end
+ if setup["ParameterScale"] == 1
+ dfNetRevenue.StartCost *= ModelScalingFactor^2 # converting Million US$ to US$
+ end
+ # Add charge cost to the dataframe
+ dfNetRevenue.Charge_cost = zeros(nrow(dfNetRevenue))
+ if has_duals(EP)
+ dfNetRevenue.Charge_cost = dfChargingcost[1:G, :AnnualSum] # Unit is confirmed to be US$
+ end
+
+ # Add CO2 releated sequestration cost or credit (e.g. 45 Q) to the dataframe
+ dfNetRevenue.CO2SequestrationCost = zeros(nrow(dfNetRevenue))
+ if any(co2_capture_fraction.(gen) .!= 0)
+ dfNetRevenue.CO2SequestrationCost = zeros(G)
+ dfNetRevenue[CCS, :CO2SequestrationCost] = value.(EP[:ePlantCCO2Sequestration]).data
+ end
+ if setup["ParameterScale"] == 1
+ dfNetRevenue.CO2SequestrationCost *= ModelScalingFactor^2 # converting Million US$ to US$
+ end
+
+ # Add energy and subsidy revenue to the dataframe
+ dfNetRevenue.EnergyRevenue = zeros(nrow(dfNetRevenue))
+ dfNetRevenue.SubsidyRevenue = zeros(nrow(dfNetRevenue))
+ if has_duals(EP)
+ dfNetRevenue.EnergyRevenue = dfEnergyRevenue[1:G, :AnnualSum] # Unit is confirmed to be US$
+ dfNetRevenue.SubsidyRevenue = dfSubRevenue[1:G, :SubsidyRevenue] # Unit is confirmed to be US$
+ end
+
+ # Add energy and subsidy revenue to the dataframe
+ dfNetRevenue.OperatingReserveRevenue = zeros(nrow(dfNetRevenue))
+ dfNetRevenue.OperatingRegulationRevenue = zeros(nrow(dfNetRevenue))
+ if setup["OperationalReserves"] > 0 && has_duals(EP)
+ dfNetRevenue.OperatingReserveRevenue[RSV] = dfOpRsvRevenue.AnnualSum # Unit is confirmed to be US$
+ dfNetRevenue.OperatingRegulationRevenue[REG] = dfOpRegRevenue.AnnualSum # Unit is confirmed to be US$
+ end
+
+ # Add capacity revenue to the dataframe
+ dfNetRevenue.ReserveMarginRevenue = zeros(nrow(dfNetRevenue))
+ if setup["CapacityReserveMargin"] > 0 && has_duals(EP) # The unit is confirmed to be $
+ dfNetRevenue.ReserveMarginRevenue = dfResRevenue[1:G, :AnnualSum]
+ end
+
+ # Add RPS/CES revenue to the dataframe
+ dfNetRevenue.ESRRevenue = zeros(nrow(dfNetRevenue))
+ if setup["EnergyShareRequirement"] > 0 && has_duals(EP) # The unit is confirmed to be $
+ dfNetRevenue.ESRRevenue = dfESRRev[1:G, :Total]
+ end
+
+ # Calculate emissions cost
+ dfNetRevenue.EmissionsCost = zeros(nrow(dfNetRevenue))
+ if setup["CO2Cap"] >= 1 && has_duals(EP)
+ for cap in 1:inputs["NCO2Cap"]
+ co2_cap_dual = dual(EP[:cCO2Emissions_systemwide][cap])
+ CO2ZONES = findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap])
+ GEN_IN_ZONE = resource_id.(gen[[y in CO2ZONES for y in zone_id.(gen)]])
+ if setup["CO2Cap"] == 1 || setup["CO2Cap"] == 2 # Mass-based or Demand + Rate-based
+ # Cost = sum(sum(emissions for zone z * dual(CO2 constraint[cap]) for z in Z) for cap in setup["NCO2"])
+ temp_vec = value.(EP[:eEmissionsByPlant][GEN_IN_ZONE, :]) * inputs["omega"]
+ dfNetRevenue.EmissionsCost[GEN_IN_ZONE] += -co2_cap_dual * temp_vec
+ elseif setup["CO2Cap"] == 3 # Generation + Rate-based
+ SET_WITH_MAXCO2RATE = union(inputs["THERM_ALL"],
+ inputs["VRE"],
+ inputs["VRE"],
+ inputs["MUST_RUN"],
+ inputs["HYDRO_RES"])
+ Y = intersect(GEN_IN_ZONE, SET_WITH_MAXCO2RATE)
+ temp_vec = (value.(EP[:eEmissionsByPlant][Y, :]) -
+ (value.(EP[:vP][Y, :]) .*
+ inputs["dfMaxCO2Rate"][zone_id.(gen[Y]), cap])) *
+ inputs["omega"]
+ dfNetRevenue.EmissionsCost[Y] += -co2_cap_dual * temp_vec
+ end
+ end
+ if setup["ParameterScale"] == 1
+ dfNetRevenue.EmissionsCost *= ModelScalingFactor^2 # converting Million US$ to US$
+ end
+ end
+
+ # Add regional technology subsidy revenue to the dataframe
+ dfNetRevenue.RegSubsidyRevenue = zeros(nrow(dfNetRevenue))
+ if setup["MinCapReq"] >= 1 && has_duals(EP)# The unit is confirmed to be US$
+ dfNetRevenue.RegSubsidyRevenue = dfRegSubRevenue[1:G, :SubsidyRevenue]
+ end
+
+ dfNetRevenue.Revenue = dfNetRevenue.EnergyRevenue
+ .+dfNetRevenue.SubsidyRevenue
+ .+dfNetRevenue.ReserveMarginRevenue
+ .+dfNetRevenue.ESRRevenue
+ .+dfNetRevenue.RegSubsidyRevenue
+ .+dfNetRevenue.OperatingReserveRevenue
+ .+dfNetRevenue.OperatingRegulationRevenue
+
+ dfNetRevenue.Cost = (dfNetRevenue.Inv_cost_MW
+ .+
+ dfNetRevenue.Inv_cost_MWh
+ .+
+ dfNetRevenue.Inv_cost_charge_MW
+ .+
+ dfNetRevenue.Fixed_OM_cost_MW
+ .+
+ dfNetRevenue.Fixed_OM_cost_MWh
+ .+
+ dfNetRevenue.Fixed_OM_cost_charge_MW
+ .+
+ dfNetRevenue.Var_OM_cost_out
+ .+
+ dfNetRevenue.Var_OM_cost_in
+ .+
+ dfNetRevenue.Fuel_cost
+ .+
+ dfNetRevenue.Charge_cost
+ .+
+ dfNetRevenue.EmissionsCost
+ .+
+ dfNetRevenue.StartCost
+ .+
+ dfNetRevenue.CO2SequestrationCost)
+ dfNetRevenue.Profit = dfNetRevenue.Revenue .- dfNetRevenue.Cost
+
+ CSV.write(joinpath(path, "NetRevenue.csv"), dfNetRevenue)
end
diff --git a/src/write_outputs/write_nse.jl b/src/write_outputs/write_nse.jl
index 5d30dcc987..c906624e4b 100644
--- a/src/write_outputs/write_nse.jl
+++ b/src/write_outputs/write_nse.jl
@@ -4,33 +4,39 @@
Function for reporting non-served energy for every model zone, time step and cost-segment.
"""
function write_nse(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
- SEG = inputs["SEG"] # Number of demand curtailment segments
- # Non-served energy/demand curtailment by segment in each time step
- dfNse = DataFrame(Segment = repeat(1:SEG, outer = Z), Zone = repeat(1:Z, inner = SEG), AnnualSum = zeros(SEG * Z))
- nse = zeros(SEG * Z, T)
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- for z in 1:Z
- nse[((z-1)*SEG+1):z*SEG, :] = value.(EP[:vNSE])[:, :, z] * scale_factor
- end
- dfNse.AnnualSum .= nse * inputs["omega"]
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ SEG = inputs["SEG"] # Number of demand curtailment segments
+ # Non-served energy/demand curtailment by segment in each time step
+ dfNse = DataFrame(Segment = repeat(1:SEG, outer = Z),
+ Zone = repeat(1:Z, inner = SEG),
+ AnnualSum = zeros(SEG * Z))
+ nse = zeros(SEG * Z, T)
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ for z in 1:Z
+ nse[((z - 1) * SEG + 1):(z * SEG), :] = value.(EP[:vNSE])[:, :, z] * scale_factor
+ end
+ dfNse.AnnualSum .= nse * inputs["omega"]
- if setup["WriteOutputs"] == "annual"
- total = DataFrame(["Total" 0 sum(dfNse[!,:AnnualSum])], [:Segment, :Zone, :AnnualSum])
- dfNse = vcat(dfNse, total)
- CSV.write(joinpath(path, "nse.csv"), dfNse)
- else # setup["WriteOutputs"] == "full"
- dfNse = hcat(dfNse, DataFrame(nse, :auto))
- auxNew_Names=[Symbol("Segment");Symbol("Zone");Symbol("AnnualSum");[Symbol("t$t") for t in 1:T]]
- rename!(dfNse,auxNew_Names)
+ if setup["WriteOutputs"] == "annual"
+ total = DataFrame(["Total" 0 sum(dfNse[!, :AnnualSum])],
+ [:Segment, :Zone, :AnnualSum])
+ dfNse = vcat(dfNse, total)
+ CSV.write(joinpath(path, "nse.csv"), dfNse)
+ else # setup["WriteOutputs"] == "full"
+ dfNse = hcat(dfNse, DataFrame(nse, :auto))
+ auxNew_Names = [Symbol("Segment");
+ Symbol("Zone");
+ Symbol("AnnualSum");
+ [Symbol("t$t") for t in 1:T]]
+ rename!(dfNse, auxNew_Names)
- total = DataFrame(["Total" 0 sum(dfNse[!,:AnnualSum]) fill(0.0, (1,T))], :auto)
- total[:, 4:T+3] .= sum(nse, dims = 1)
- rename!(total,auxNew_Names)
- dfNse = vcat(dfNse, total)
+ total = DataFrame(["Total" 0 sum(dfNse[!, :AnnualSum]) fill(0.0, (1, T))], :auto)
+ total[:, 4:(T + 3)] .= sum(nse, dims = 1)
+ rename!(total, auxNew_Names)
+ dfNse = vcat(dfNse, total)
- CSV.write(joinpath(path, "nse.csv"), dftranspose(dfNse, false), writeheader=false)
- end
- return nothing
+ CSV.write(joinpath(path, "nse.csv"), dftranspose(dfNse, false), writeheader = false)
+ end
+ return nothing
end
diff --git a/src/write_outputs/write_outputs.jl b/src/write_outputs/write_outputs.jl
index 8e88ec0dc1..b05e8f7579 100644
--- a/src/write_outputs/write_outputs.jl
+++ b/src/write_outputs/write_outputs.jl
@@ -11,389 +11,477 @@
Function for the entry-point for writing the different output files. From here, onward several other functions are called, each for writing specific output files, like costs, capacities, etc.
"""
function write_outputs(EP::Model, path::AbstractString, setup::Dict, inputs::Dict)
-
- if setup["OverwriteResults"] == 1
- # Overwrite existing results if dir exists
- # This is the default behaviour when there is no flag, to avoid breaking existing code
- if !(isdir(path))
- mkpath(path)
- end
- else
- # Find closest unused ouput directory name and create it
- path = choose_output_dir(path)
- mkpath(path)
- end
-
- # https://jump.dev/MathOptInterface.jl/v0.9.10/apireference/#MathOptInterface.TerminationStatusCode
- status = termination_status(EP)
-
- ## Check if solved sucessfully - time out is included
- if status != MOI.OPTIMAL && status != MOI.LOCALLY_SOLVED
- if status != MOI.TIME_LIMIT # Model failed to solve, so record solver status and exit
- write_status(path, inputs, setup, EP)
- return
- # Model reached timelimit but failed to find a feasible solution
- #### Aaron Schwartz - Not sure if the below condition is valid anymore. We should revisit ####
- elseif isnan(objective_value(EP))==true
- # Model failed to solve, so record solver status and exit
- write_status(path, inputs, setup, EP)
- return
- end
- end
-
- # Dict containing the list of outputs to write
- output_settings_d = setup["WriteOutputsSettingsDict"]
- write_settings_file(path, setup)
-
- output_settings_d["WriteStatus"] && write_status(path, inputs, setup, EP)
-
- # linearize and re-solve model if duals are not available but ShadowPrices are requested
- if !has_duals(EP) && setup["WriteShadowPrices"] == 1
- # function to fix integers and linearize problem
- fix_integers(EP)
- # re-solve statement for LP solution
- println("Solving LP solution for duals")
- set_silent(EP)
- optimize!(EP)
- end
-
- if output_settings_d["WriteCosts"]
- elapsed_time_costs = @elapsed write_costs(path, inputs, setup, EP)
- println("Time elapsed for writing costs is")
- println(elapsed_time_costs)
- end
-
- if output_settings_d["WriteCapacity"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_capacity = @elapsed dfCap = write_capacity(path, inputs, setup, EP)
- println("Time elapsed for writing capacity is")
- println(elapsed_time_capacity)
- end
-
- if output_settings_d["WritePower"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_power = @elapsed dfPower = write_power(path, inputs, setup, EP)
- println("Time elapsed for writing power is")
- println(elapsed_time_power)
- end
-
- if output_settings_d["WriteCharge"]
- elapsed_time_charge = @elapsed write_charge(path, inputs, setup, EP)
- println("Time elapsed for writing charge is")
- println(elapsed_time_charge)
- end
-
- if output_settings_d["WriteCapacityFactor"]
- elapsed_time_capacityfactor = @elapsed write_capacityfactor(path, inputs, setup, EP)
- println("Time elapsed for writing capacity factor is")
- println(elapsed_time_capacityfactor)
- end
-
- if output_settings_d["WriteStorage"]
- elapsed_time_storage = @elapsed write_storage(path, inputs, setup, EP)
- println("Time elapsed for writing storage is")
- println(elapsed_time_storage)
- end
-
- if output_settings_d["WriteCurtailment"]
- elapsed_time_curtailment = @elapsed write_curtailment(path, inputs, setup, EP)
- println("Time elapsed for writing curtailment is")
- println(elapsed_time_curtailment)
- end
-
- if output_settings_d["WriteNSE"]
- elapsed_time_nse = @elapsed write_nse(path, inputs, setup, EP)
- println("Time elapsed for writing nse is")
- println(elapsed_time_nse)
- end
-
- if output_settings_d["WritePowerBalance"]
- elapsed_time_power_balance = @elapsed write_power_balance(path, inputs, setup, EP)
- println("Time elapsed for writing power balance is")
- println(elapsed_time_power_balance)
- end
-
- if inputs["Z"] > 1
- if output_settings_d["WriteTransmissionFlows"]
- elapsed_time_flows = @elapsed write_transmission_flows(path, inputs, setup, EP)
- println("Time elapsed for writing transmission flows is")
- println(elapsed_time_flows)
- end
-
- if output_settings_d["WriteTransmissionLosses"]
- elapsed_time_losses = @elapsed write_transmission_losses(path, inputs, setup, EP)
- println("Time elapsed for writing transmission losses is")
- println(elapsed_time_losses)
- end
-
- if setup["NetworkExpansion"] == 1 && output_settings_d["WriteNWExpansion"]
- elapsed_time_expansion = @elapsed write_nw_expansion(path, inputs, setup, EP)
- println("Time elapsed for writing network expansion is")
- println(elapsed_time_expansion)
- end
- end
-
- if output_settings_d["WriteEmissions"]
- elapsed_time_emissions = @elapsed write_emissions(path, inputs, setup, EP)
- println("Time elapsed for writing emissions is")
- println(elapsed_time_emissions)
- end
-
- dfVreStor = DataFrame()
- if !isempty(inputs["VRE_STOR"])
- if output_settings_d["WriteVREStor"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_vrestor = @elapsed dfVreStor = write_vre_stor(path, inputs, setup, EP)
- println("Time elapsed for writing vre stor is")
- println(elapsed_time_vrestor)
- end
- VS_LDS = inputs["VS_LDS"]
- VS_STOR = inputs["VS_STOR"]
- else
- VS_LDS = []
- VS_STOR = []
- end
-
- if has_duals(EP) == 1
- if output_settings_d["WriteReliability"]
- elapsed_time_reliability = @elapsed write_reliability(path, inputs, setup, EP)
- println("Time elapsed for writing reliability is")
- println(elapsed_time_reliability)
- end
- if !isempty(inputs["STOR_ALL"]) || !isempty(VS_STOR)
- if output_settings_d["WriteStorageDual"]
- elapsed_time_stordual = @elapsed write_storagedual(path, inputs, setup, EP)
- println("Time elapsed for writing storage duals is")
- println(elapsed_time_stordual)
- end
- end
- end
-
- if setup["UCommit"] >= 1
- if output_settings_d["WriteCommit"]
- elapsed_time_commit = @elapsed write_commit(path, inputs, setup, EP)
- println("Time elapsed for writing commitment is")
- println(elapsed_time_commit)
- end
-
- if output_settings_d["WriteStart"]
- elapsed_time_start = @elapsed write_start(path, inputs, setup, EP)
- println("Time elapsed for writing startup is")
- println(elapsed_time_start)
- end
-
- if output_settings_d["WriteShutdown"]
- elapsed_time_shutdown = @elapsed write_shutdown(path, inputs, setup, EP)
- println("Time elapsed for writing shutdown is")
- println(elapsed_time_shutdown)
- end
-
- if setup["OperationalReserves"] == 1
- if output_settings_d["WriteReg"]
- elapsed_time_reg = @elapsed write_reg(path, inputs, setup, EP)
- println("Time elapsed for writing regulation is")
- println(elapsed_time_reg)
- end
-
- if output_settings_d["WriteRsv"]
- elapsed_time_rsv = @elapsed write_rsv(path, inputs, setup, EP)
- println("Time elapsed for writing reserves is")
- println(elapsed_time_rsv)
- end
- end
- end
-
- # Output additional variables related inter-period energy transfer via storage
- representative_periods = inputs["REP_PERIOD"]
- if representative_periods > 1 && (!isempty(inputs["STOR_LONG_DURATION"]) || !isempty(VS_LDS))
- if output_settings_d["WriteOpWrapLDSStorInit"]
- elapsed_time_lds_init = @elapsed write_opwrap_lds_stor_init(path, inputs, setup, EP)
- println("Time elapsed for writing lds init is")
- println(elapsed_time_lds_init)
- end
-
- if output_settings_d["WriteOpWrapLDSdStor"]
- elapsed_time_lds_dstor = @elapsed write_opwrap_lds_dstor(path, inputs, setup, EP)
- println("Time elapsed for writing lds dstor is")
- println(elapsed_time_lds_dstor)
- end
- end
-
- if output_settings_d["WriteFuelConsumption"]
- elapsed_time_fuel_consumption = @elapsed write_fuel_consumption(path, inputs, setup, EP)
- println("Time elapsed for writing fuel consumption is")
- println(elapsed_time_fuel_consumption)
- end
-
- if output_settings_d["WriteCO2"]
- elapsed_time_emissions = @elapsed write_co2(path, inputs, setup, EP)
- println("Time elapsed for writing co2 is")
- println(elapsed_time_emissions)
- end
-
- if has_maintenance(inputs) && output_settings_d["WriteMaintenance"]
- write_maintenance(path, inputs, EP)
- end
-
- #Write angles when DC_OPF is activated
- if setup["DC_OPF"] == 1 && output_settings_d["WriteAngles"]
- elapsed_time_angles = @elapsed write_angles(path, inputs, setup, EP)
- println("Time elapsed for writing angles is")
- println(elapsed_time_angles)
- end
-
- # Temporary! Suppress these outputs until we know that they are compatable with multi-stage modeling
- if setup["MultiStage"] == 0
- dfEnergyRevenue = DataFrame()
- dfChargingcost = DataFrame()
- dfSubRevenue = DataFrame()
- dfRegSubRevenue = DataFrame()
- if has_duals(EP) == 1
- if output_settings_d["WritePrice"]
- elapsed_time_price = @elapsed write_price(path, inputs, setup, EP)
- println("Time elapsed for writing price is")
- println(elapsed_time_price)
- end
-
- if output_settings_d["WriteEnergyRevenue"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_energy_rev = @elapsed dfEnergyRevenue = write_energy_revenue(path, inputs, setup, EP)
- println("Time elapsed for writing energy revenue is")
- println(elapsed_time_energy_rev)
- end
-
- if output_settings_d["WriteChargingCost"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_charging_cost = @elapsed dfChargingcost = write_charging_cost(path, inputs, setup, EP)
- println("Time elapsed for writing charging cost is")
- println(elapsed_time_charging_cost)
- end
-
- if output_settings_d["WriteSubsidyRevenue"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_subsidy = @elapsed dfSubRevenue, dfRegSubRevenue = write_subsidy_revenue(path, inputs, setup, EP)
- println("Time elapsed for writing subsidy is")
- println(elapsed_time_subsidy)
- end
- end
-
- if output_settings_d["WriteTimeWeights"]
- elapsed_time_time_weights = @elapsed write_time_weights(path, inputs)
- println("Time elapsed for writing time weights is")
- println(elapsed_time_time_weights)
- end
-
- dfESRRev = DataFrame()
- if setup["EnergyShareRequirement"] == 1 && has_duals(EP)
- dfESR = DataFrame()
- if output_settings_d["WriteESRPrices"] || output_settings_d["WriteESRRevenue"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_esr_prices = @elapsed dfESR = write_esr_prices(path, inputs, setup, EP)
- println("Time elapsed for writing esr prices is")
- println(elapsed_time_esr_prices)
- end
-
- if output_settings_d["WriteESRRevenue"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_esr_revenue = @elapsed dfESRRev = write_esr_revenue(path, inputs, setup, dfPower, dfESR, EP)
- println("Time elapsed for writing esr revenue is")
- println(elapsed_time_esr_revenue)
- end
-
- end
-
- dfResRevenue = DataFrame()
- if setup["CapacityReserveMargin"]==1 && has_duals(EP)
- if output_settings_d["WriteReserveMargin"]
- elapsed_time_reserve_margin = @elapsed write_reserve_margin(path, setup, EP)
- println("Time elapsed for writing reserve margin is")
- println(elapsed_time_reserve_margin)
- end
-
- if output_settings_d["WriteReserveMarginWithWeights"]
- elapsed_time_rsv_margin_w = @elapsed write_reserve_margin_w(path, inputs, setup, EP)
- println("Time elapsed for writing reserve margin with weights is")
- println(elapsed_time_rsv_margin_w)
- end
-
- if output_settings_d["WriteVirtualDischarge"]
- elapsed_time_virtual_discharge = @elapsed write_virtual_discharge(path, inputs, setup, EP)
- println("Time elapsed for writing virtual discharge is")
- println(elapsed_time_virtual_discharge)
- end
-
- if output_settings_d["WriteReserveMarginRevenue"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_res_rev = @elapsed dfResRevenue = write_reserve_margin_revenue(path, inputs, setup, EP)
- println("Time elapsed for writing reserve revenue is")
- println(elapsed_time_res_rev)
- end
-
- if haskey(inputs, "dfCapRes_slack") && output_settings_d["WriteReserveMarginSlack"]
- elapsed_time_rsv_slack = @elapsed write_reserve_margin_slack(path, inputs, setup, EP)
- println("Time elapsed for writing reserve margin slack is")
- println(elapsed_time_rsv_slack)
- end
-
- if output_settings_d["WriteCapacityValue"]
- elapsed_time_cap_value = @elapsed write_capacity_value(path, inputs, setup, EP)
- println("Time elapsed for writing capacity value is")
- println(elapsed_time_cap_value)
- end
-
- end
-
- dfOpRegRevenue = DataFrame()
- dfOpRsvRevenue = DataFrame()
- if setup["OperationalReserves"]==1 && has_duals(EP)
- elapsed_time_op_res_rev = @elapsed dfOpRegRevenue, dfOpRsvRevenue = write_operating_reserve_regulation_revenue(path, inputs, setup, EP)
- println("Time elapsed for writing oerating reserve and regulation revenue is")
- println(elapsed_time_op_res_rev)
- end
-
- if setup["CO2Cap"]>0 && has_duals(EP) == 1 && output_settings_d["WriteCO2Cap"]
- elapsed_time_co2_cap = @elapsed write_co2_cap(path, inputs, setup, EP)
- println("Time elapsed for writing co2 cap is")
- println(elapsed_time_co2_cap)
- end
- if setup["MinCapReq"] == 1 && has_duals(EP) == 1 && output_settings_d["WriteMinCapReq"]
- elapsed_time_min_cap_req = @elapsed write_minimum_capacity_requirement(path, inputs, setup, EP)
- println("Time elapsed for writing minimum capacity requirement is")
- println(elapsed_time_min_cap_req)
- end
-
- if setup["MaxCapReq"] == 1 && has_duals(EP) == 1 && output_settings_d["WriteMaxCapReq"]
- elapsed_time_max_cap_req = @elapsed write_maximum_capacity_requirement(path, inputs, setup, EP)
- println("Time elapsed for writing maximum capacity requirement is")
- println(elapsed_time_max_cap_req)
- end
-
- if !isempty(inputs["ELECTROLYZER"]) && has_duals(EP)
- if output_settings_d["WriteHydrogenPrices"]
- elapsed_time_hydrogen_prices = @elapsed write_hydrogen_prices(path, inputs, setup, EP)
- println("Time elapsed for writing hydrogen prices is")
- println(elapsed_time_hydrogen_prices)
- end
- if setup["HydrogenHourlyMatching"] == 1 && output_settings_d["WriteHourlyMatchingPrices"]
- elapsed_time_hourly_matching_prices = @elapsed write_hourly_matching_prices(path, inputs, setup, EP)
- println("Time elapsed for writing hourly matching prices is")
- println(elapsed_time_hourly_matching_prices)
- end
- end
-
- if output_settings_d["WriteNetRevenue"]
- elapsed_time_net_rev = @elapsed write_net_revenue(path, inputs, setup, EP, dfCap, dfESRRev, dfResRevenue, dfChargingcost, dfPower, dfEnergyRevenue, dfSubRevenue, dfRegSubRevenue, dfVreStor, dfOpRegRevenue, dfOpRsvRevenue)
- println("Time elapsed for writing net revenue is")
- println(elapsed_time_net_rev)
- end
- end
- ## Print confirmation
- println("Wrote outputs to $path")
-
- return path
+ if setup["OverwriteResults"] == 1
+ # Overwrite existing results if dir exists
+ # This is the default behaviour when there is no flag, to avoid breaking existing code
+ if !(isdir(path))
+ mkpath(path)
+ end
+ else
+ # Find closest unused ouput directory name and create it
+ path = choose_output_dir(path)
+ mkpath(path)
+ end
+
+ # https://jump.dev/MathOptInterface.jl/v0.9.10/apireference/#MathOptInterface.TerminationStatusCode
+ status = termination_status(EP)
+
+ ## Check if solved sucessfully - time out is included
+ if status != MOI.OPTIMAL && status != MOI.LOCALLY_SOLVED
+ if status != MOI.TIME_LIMIT # Model failed to solve, so record solver status and exit
+ write_status(path, inputs, setup, EP)
+ return
+ # Model reached timelimit but failed to find a feasible solution
+ #### Aaron Schwartz - Not sure if the below condition is valid anymore. We should revisit ####
+ elseif isnan(objective_value(EP)) == true
+ # Model failed to solve, so record solver status and exit
+ write_status(path, inputs, setup, EP)
+ return
+ end
+ end
+
+ # Dict containing the list of outputs to write
+ output_settings_d = setup["WriteOutputsSettingsDict"]
+ write_settings_file(path, setup)
+
+ output_settings_d["WriteStatus"] && write_status(path, inputs, setup, EP)
+
+ # linearize and re-solve model if duals are not available but ShadowPrices are requested
+ if !has_duals(EP) && setup["WriteShadowPrices"] == 1
+ # function to fix integers and linearize problem
+ fix_integers(EP)
+ # re-solve statement for LP solution
+ println("Solving LP solution for duals")
+ set_silent(EP)
+ optimize!(EP)
+ end
+
+ if output_settings_d["WriteCosts"]
+ elapsed_time_costs = @elapsed write_costs(path, inputs, setup, EP)
+ println("Time elapsed for writing costs is")
+ println(elapsed_time_costs)
+ end
+
+ if output_settings_d["WriteCapacity"] || output_settings_d["WriteNetRevenue"]
+ elapsed_time_capacity = @elapsed dfCap = write_capacity(path, inputs, setup, EP)
+ println("Time elapsed for writing capacity is")
+ println(elapsed_time_capacity)
+ end
+
+ if output_settings_d["WritePower"] || output_settings_d["WriteNetRevenue"]
+ elapsed_time_power = @elapsed dfPower = write_power(path, inputs, setup, EP)
+ println("Time elapsed for writing power is")
+ println(elapsed_time_power)
+ end
+
+ if output_settings_d["WriteCharge"]
+ elapsed_time_charge = @elapsed write_charge(path, inputs, setup, EP)
+ println("Time elapsed for writing charge is")
+ println(elapsed_time_charge)
+ end
+
+ if output_settings_d["WriteCapacityFactor"]
+ elapsed_time_capacityfactor = @elapsed write_capacityfactor(path, inputs, setup, EP)
+ println("Time elapsed for writing capacity factor is")
+ println(elapsed_time_capacityfactor)
+ end
+
+ if output_settings_d["WriteStorage"]
+ elapsed_time_storage = @elapsed write_storage(path, inputs, setup, EP)
+ println("Time elapsed for writing storage is")
+ println(elapsed_time_storage)
+ end
+
+ if output_settings_d["WriteCurtailment"]
+ elapsed_time_curtailment = @elapsed write_curtailment(path, inputs, setup, EP)
+ println("Time elapsed for writing curtailment is")
+ println(elapsed_time_curtailment)
+ end
+
+ if output_settings_d["WriteNSE"]
+ elapsed_time_nse = @elapsed write_nse(path, inputs, setup, EP)
+ println("Time elapsed for writing nse is")
+ println(elapsed_time_nse)
+ end
+
+ if output_settings_d["WritePowerBalance"]
+ elapsed_time_power_balance = @elapsed write_power_balance(path, inputs, setup, EP)
+ println("Time elapsed for writing power balance is")
+ println(elapsed_time_power_balance)
+ end
+
+ if inputs["Z"] > 1
+ if output_settings_d["WriteTransmissionFlows"]
+ elapsed_time_flows = @elapsed write_transmission_flows(path, inputs, setup, EP)
+ println("Time elapsed for writing transmission flows is")
+ println(elapsed_time_flows)
+ end
+
+ if output_settings_d["WriteTransmissionLosses"]
+ elapsed_time_losses = @elapsed write_transmission_losses(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing transmission losses is")
+ println(elapsed_time_losses)
+ end
+
+ if setup["NetworkExpansion"] == 1 && output_settings_d["WriteNWExpansion"]
+ elapsed_time_expansion = @elapsed write_nw_expansion(path, inputs, setup, EP)
+ println("Time elapsed for writing network expansion is")
+ println(elapsed_time_expansion)
+ end
+ end
+
+ if output_settings_d["WriteEmissions"]
+ elapsed_time_emissions = @elapsed write_emissions(path, inputs, setup, EP)
+ println("Time elapsed for writing emissions is")
+ println(elapsed_time_emissions)
+ end
+
+ dfVreStor = DataFrame()
+ if !isempty(inputs["VRE_STOR"])
+ if output_settings_d["WriteVREStor"] || output_settings_d["WriteNetRevenue"]
+ elapsed_time_vrestor = @elapsed dfVreStor = write_vre_stor(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing vre stor is")
+ println(elapsed_time_vrestor)
+ end
+ VS_LDS = inputs["VS_LDS"]
+ VS_STOR = inputs["VS_STOR"]
+ else
+ VS_LDS = []
+ VS_STOR = []
+ end
+
+ if has_duals(EP) == 1
+ if output_settings_d["WriteReliability"]
+ elapsed_time_reliability = @elapsed write_reliability(path, inputs, setup, EP)
+ println("Time elapsed for writing reliability is")
+ println(elapsed_time_reliability)
+ end
+ if !isempty(inputs["STOR_ALL"]) || !isempty(VS_STOR)
+ if output_settings_d["WriteStorageDual"]
+ elapsed_time_stordual = @elapsed write_storagedual(path, inputs, setup, EP)
+ println("Time elapsed for writing storage duals is")
+ println(elapsed_time_stordual)
+ end
+ end
+ end
+
+ if setup["UCommit"] >= 1
+ if output_settings_d["WriteCommit"]
+ elapsed_time_commit = @elapsed write_commit(path, inputs, setup, EP)
+ println("Time elapsed for writing commitment is")
+ println(elapsed_time_commit)
+ end
+
+ if output_settings_d["WriteStart"]
+ elapsed_time_start = @elapsed write_start(path, inputs, setup, EP)
+ println("Time elapsed for writing startup is")
+ println(elapsed_time_start)
+ end
+
+ if output_settings_d["WriteShutdown"]
+ elapsed_time_shutdown = @elapsed write_shutdown(path, inputs, setup, EP)
+ println("Time elapsed for writing shutdown is")
+ println(elapsed_time_shutdown)
+ end
+
+ if setup["OperationalReserves"] == 1
+ if output_settings_d["WriteReg"]
+ elapsed_time_reg = @elapsed write_reg(path, inputs, setup, EP)
+ println("Time elapsed for writing regulation is")
+ println(elapsed_time_reg)
+ end
+
+ if output_settings_d["WriteRsv"]
+ elapsed_time_rsv = @elapsed write_rsv(path, inputs, setup, EP)
+ println("Time elapsed for writing reserves is")
+ println(elapsed_time_rsv)
+ end
+ end
+ end
+
+ # Output additional variables related inter-period energy transfer via storage
+ representative_periods = inputs["REP_PERIOD"]
+ if representative_periods > 1 &&
+ (!isempty(inputs["STOR_LONG_DURATION"]) || !isempty(VS_LDS))
+ if output_settings_d["WriteOpWrapLDSStorInit"]
+ elapsed_time_lds_init = @elapsed write_opwrap_lds_stor_init(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing lds init is")
+ println(elapsed_time_lds_init)
+ end
+
+ if output_settings_d["WriteOpWrapLDSdStor"]
+ elapsed_time_lds_dstor = @elapsed write_opwrap_lds_dstor(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing lds dstor is")
+ println(elapsed_time_lds_dstor)
+ end
+ end
+
+ if output_settings_d["WriteFuelConsumption"]
+ elapsed_time_fuel_consumption = @elapsed write_fuel_consumption(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing fuel consumption is")
+ println(elapsed_time_fuel_consumption)
+ end
+
+ if output_settings_d["WriteCO2"]
+ elapsed_time_emissions = @elapsed write_co2(path, inputs, setup, EP)
+ println("Time elapsed for writing co2 is")
+ println(elapsed_time_emissions)
+ end
+
+ if has_maintenance(inputs) && output_settings_d["WriteMaintenance"]
+ write_maintenance(path, inputs, EP)
+ end
+
+ #Write angles when DC_OPF is activated
+ if setup["DC_OPF"] == 1 && output_settings_d["WriteAngles"]
+ elapsed_time_angles = @elapsed write_angles(path, inputs, setup, EP)
+ println("Time elapsed for writing angles is")
+ println(elapsed_time_angles)
+ end
+
+ # Temporary! Suppress these outputs until we know that they are compatable with multi-stage modeling
+ if setup["MultiStage"] == 0
+ dfEnergyRevenue = DataFrame()
+ dfChargingcost = DataFrame()
+ dfSubRevenue = DataFrame()
+ dfRegSubRevenue = DataFrame()
+ if has_duals(EP) == 1
+ if output_settings_d["WritePrice"]
+ elapsed_time_price = @elapsed write_price(path, inputs, setup, EP)
+ println("Time elapsed for writing price is")
+ println(elapsed_time_price)
+ end
+
+ if output_settings_d["WriteEnergyRevenue"] ||
+ output_settings_d["WriteNetRevenue"]
+ elapsed_time_energy_rev = @elapsed dfEnergyRevenue = write_energy_revenue(
+ path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing energy revenue is")
+ println(elapsed_time_energy_rev)
+ end
+
+ if output_settings_d["WriteChargingCost"] ||
+ output_settings_d["WriteNetRevenue"]
+ elapsed_time_charging_cost = @elapsed dfChargingcost = write_charging_cost(
+ path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing charging cost is")
+ println(elapsed_time_charging_cost)
+ end
+
+ if output_settings_d["WriteSubsidyRevenue"] ||
+ output_settings_d["WriteNetRevenue"]
+ elapsed_time_subsidy = @elapsed dfSubRevenue, dfRegSubRevenue = write_subsidy_revenue(
+ path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing subsidy is")
+ println(elapsed_time_subsidy)
+ end
+ end
+
+ if output_settings_d["WriteTimeWeights"]
+ elapsed_time_time_weights = @elapsed write_time_weights(path, inputs)
+ println("Time elapsed for writing time weights is")
+ println(elapsed_time_time_weights)
+ end
+
+ dfESRRev = DataFrame()
+ if setup["EnergyShareRequirement"] == 1 && has_duals(EP)
+ dfESR = DataFrame()
+ if output_settings_d["WriteESRPrices"] ||
+ output_settings_d["WriteESRRevenue"] || output_settings_d["WriteNetRevenue"]
+ elapsed_time_esr_prices = @elapsed dfESR = write_esr_prices(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing esr prices is")
+ println(elapsed_time_esr_prices)
+ end
+
+ if output_settings_d["WriteESRRevenue"] || output_settings_d["WriteNetRevenue"]
+ elapsed_time_esr_revenue = @elapsed dfESRRev = write_esr_revenue(path,
+ inputs,
+ setup,
+ dfPower,
+ dfESR,
+ EP)
+ println("Time elapsed for writing esr revenue is")
+ println(elapsed_time_esr_revenue)
+ end
+ end
+
+ dfResRevenue = DataFrame()
+ if setup["CapacityReserveMargin"] == 1 && has_duals(EP)
+ if output_settings_d["WriteReserveMargin"]
+ elapsed_time_reserve_margin = @elapsed write_reserve_margin(path, setup, EP)
+ println("Time elapsed for writing reserve margin is")
+ println(elapsed_time_reserve_margin)
+ end
+
+ if output_settings_d["WriteReserveMarginWithWeights"]
+ elapsed_time_rsv_margin_w = @elapsed write_reserve_margin_w(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing reserve margin with weights is")
+ println(elapsed_time_rsv_margin_w)
+ end
+
+ if output_settings_d["WriteVirtualDischarge"]
+ elapsed_time_virtual_discharge = @elapsed write_virtual_discharge(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing virtual discharge is")
+ println(elapsed_time_virtual_discharge)
+ end
+
+ if output_settings_d["WriteReserveMarginRevenue"] ||
+ output_settings_d["WriteNetRevenue"]
+ elapsed_time_res_rev = @elapsed dfResRevenue = write_reserve_margin_revenue(
+ path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing reserve revenue is")
+ println(elapsed_time_res_rev)
+ end
+
+ if haskey(inputs, "dfCapRes_slack") &&
+ output_settings_d["WriteReserveMarginSlack"]
+ elapsed_time_rsv_slack = @elapsed write_reserve_margin_slack(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing reserve margin slack is")
+ println(elapsed_time_rsv_slack)
+ end
+
+ if output_settings_d["WriteCapacityValue"]
+ elapsed_time_cap_value = @elapsed write_capacity_value(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing capacity value is")
+ println(elapsed_time_cap_value)
+ end
+ end
+
+ dfOpRegRevenue = DataFrame()
+ dfOpRsvRevenue = DataFrame()
+ if setup["OperationalReserves"] == 1 && has_duals(EP)
+ elapsed_time_op_res_rev = @elapsed dfOpRegRevenue, dfOpRsvRevenue = write_operating_reserve_regulation_revenue(
+ path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing oerating reserve and regulation revenue is")
+ println(elapsed_time_op_res_rev)
+ end
+
+ if setup["CO2Cap"] > 0 && has_duals(EP) == 1 && output_settings_d["WriteCO2Cap"]
+ elapsed_time_co2_cap = @elapsed write_co2_cap(path, inputs, setup, EP)
+ println("Time elapsed for writing co2 cap is")
+ println(elapsed_time_co2_cap)
+ end
+ if setup["MinCapReq"] == 1 && has_duals(EP) == 1 &&
+ output_settings_d["WriteMinCapReq"]
+ elapsed_time_min_cap_req = @elapsed write_minimum_capacity_requirement(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing minimum capacity requirement is")
+ println(elapsed_time_min_cap_req)
+ end
+
+ if setup["MaxCapReq"] == 1 && has_duals(EP) == 1 &&
+ output_settings_d["WriteMaxCapReq"]
+ elapsed_time_max_cap_req = @elapsed write_maximum_capacity_requirement(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing maximum capacity requirement is")
+ println(elapsed_time_max_cap_req)
+ end
+
+ if !isempty(inputs["ELECTROLYZER"]) && has_duals(EP)
+ if output_settings_d["WriteHydrogenPrices"]
+ elapsed_time_hydrogen_prices = @elapsed write_hydrogen_prices(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing hydrogen prices is")
+ println(elapsed_time_hydrogen_prices)
+ end
+ if setup["HydrogenHourlyMatching"] == 1 &&
+ output_settings_d["WriteHourlyMatchingPrices"]
+ elapsed_time_hourly_matching_prices = @elapsed write_hourly_matching_prices(
+ path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing hourly matching prices is")
+ println(elapsed_time_hourly_matching_prices)
+ end
+ end
+
+ if output_settings_d["WriteNetRevenue"]
+ elapsed_time_net_rev = @elapsed write_net_revenue(path,
+ inputs,
+ setup,
+ EP,
+ dfCap,
+ dfESRRev,
+ dfResRevenue,
+ dfChargingcost,
+ dfPower,
+ dfEnergyRevenue,
+ dfSubRevenue,
+ dfRegSubRevenue,
+ dfVreStor,
+ dfOpRegRevenue,
+ dfOpRsvRevenue)
+ println("Time elapsed for writing net revenue is")
+ println(elapsed_time_net_rev)
+ end
+ end
+ ## Print confirmation
+ println("Wrote outputs to $path")
+
+ return path
end # END output()
-
"""
write_annual(fullpath::AbstractString, dfOut::DataFrame)
Internal function for writing annual outputs.
"""
function write_annual(fullpath::AbstractString, dfOut::DataFrame)
- push!(dfOut, ["Total" 0 sum(dfOut[!, :AnnualSum])])
- CSV.write(fullpath, dfOut)
- return nothing
+ push!(dfOut, ["Total" 0 sum(dfOut[!, :AnnualSum])])
+ CSV.write(fullpath, dfOut)
+ return nothing
end
"""
@@ -401,16 +489,28 @@ end
Internal function for writing full time series outputs. This function wraps the instructions for creating the full time series output files.
"""
-function write_fulltimeseries(fullpath::AbstractString, dataOut::Matrix{Float64}, dfOut::DataFrame)
- T = size(dataOut, 2)
- dfOut = hcat(dfOut, DataFrame(dataOut, :auto))
- auxNew_Names = [Symbol("Resource");Symbol("Zone");Symbol("AnnualSum");[Symbol("t$t") for t in 1:T]]
- rename!(dfOut, auxNew_Names)
- total = DataFrame(["Total" 0 sum(dfOut[!, :AnnualSum]) fill(0.0, (1, T))], auxNew_Names)
- total[!, 4:T+3] .= sum(dataOut, dims=1)
- dfOut = vcat(dfOut, total)
- CSV.write(fullpath, dftranspose(dfOut, false), writeheader=false)
- return nothing
+function write_fulltimeseries(fullpath::AbstractString,
+ dataOut::Matrix{Float64},
+ dfOut::DataFrame)
+ T = size(dataOut, 2)
+ dfOut = hcat(dfOut, DataFrame(dataOut, :auto))
+ auxNew_Names = [Symbol("Resource");
+ Symbol("Zone");
+ Symbol("AnnualSum");
+ [Symbol("t$t") for t in 1:T]]
+ rename!(dfOut, auxNew_Names)
+ total = DataFrame(["Total" 0 sum(dfOut[!, :AnnualSum]) fill(0.0, (1, T))], auxNew_Names)
+ total[!, 4:(T + 3)] .= sum(dataOut, dims = 1)
+ dfOut = vcat(dfOut, total)
+ CSV.write(fullpath, dftranspose(dfOut, false), writeheader = false)
+ return nothing
end
-write_settings_file(path, setup) = YAML.write_file(joinpath(path, "run_settings.yml"), setup)
+"""
+ write_settings_file(path, setup)
+
+Internal function for writing settings files
+"""
+function write_settings_file(path, setup)
+ YAML.write_file(joinpath(path, "run_settings.yml"), setup)
+end
diff --git a/src/write_outputs/write_power.jl b/src/write_outputs/write_power.jl
index 3be5e83bf3..30e14048be 100644
--- a/src/write_outputs/write_power.jl
+++ b/src/write_outputs/write_power.jl
@@ -4,26 +4,28 @@
Function for writing the different values of power generated by the different technologies in operation.
"""
function write_power(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- zones = zone_id.(gen)
+ gen = inputs["RESOURCES"]
+ zones = zone_id.(gen)
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
- # Power injected by each resource in each time step
- dfPower = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones, AnnualSum = Array{Union{Missing,Float64}}(undef, G))
- power = value.(EP[:vP])
- if setup["ParameterScale"] == 1
- power *= ModelScalingFactor
- end
- dfPower.AnnualSum .= power * inputs["omega"]
+ # Power injected by each resource in each time step
+ dfPower = DataFrame(Resource = inputs["RESOURCE_NAMES"],
+ Zone = zones,
+ AnnualSum = Array{Union{Missing, Float64}}(undef, G))
+ power = value.(EP[:vP])
+ if setup["ParameterScale"] == 1
+ power *= ModelScalingFactor
+ end
+ dfPower.AnnualSum .= power * inputs["omega"]
- filepath = joinpath(path, "power.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfPower)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, power, dfPower)
- end
+ filepath = joinpath(path, "power.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfPower)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, power, dfPower)
+ end
- return dfPower #Shouldn't this be return nothing
+ return dfPower #Shouldn't this be return nothing
end
diff --git a/src/write_outputs/write_power_balance.jl b/src/write_outputs/write_power_balance.jl
index 627f3a4821..e96a320ebb 100644
--- a/src/write_outputs/write_power_balance.jl
+++ b/src/write_outputs/write_power_balance.jl
@@ -1,71 +1,101 @@
function write_power_balance(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
- SEG = inputs["SEG"] # Number of demand curtailment segments
- THERM_ALL = inputs["THERM_ALL"]
- VRE = inputs["VRE"]
- MUST_RUN = inputs["MUST_RUN"]
- HYDRO_RES = inputs["HYDRO_RES"]
- STOR_ALL = inputs["STOR_ALL"]
- FLEX = inputs["FLEX"]
- ELECTROLYZER = inputs["ELECTROLYZER"]
- VRE_STOR = inputs["VRE_STOR"]
- Com_list = ["Generation", "Storage_Discharge", "Storage_Charge",
- "Flexible_Demand_Defer", "Flexible_Demand_Stasify",
- "Demand_Response", "Nonserved_Energy",
- "Transmission_NetExport", "Transmission_Losses",
- "Demand"]
- if !isempty(ELECTROLYZER)
- push!(Com_list, "Electrolyzer_Consumption")
- end
- L = length(Com_list)
- dfPowerBalance = DataFrame(BalanceComponent = repeat(Com_list, outer = Z), Zone = repeat(1:Z, inner = L), AnnualSum = zeros(L * Z))
- powerbalance = zeros(Z * L, T) # following the same style of power/charge/storage/nse
- for z in 1:Z
- POWER_ZONE = intersect(resources_in_zone_by_rid(gen,z), union(THERM_ALL, VRE, MUST_RUN, HYDRO_RES))
- powerbalance[(z-1)*L+1, :] = sum(value.(EP[:vP][POWER_ZONE, :]), dims = 1)
- if !isempty(intersect(resources_in_zone_by_rid(gen,z), STOR_ALL))
- STOR_ALL_ZONE = intersect(resources_in_zone_by_rid(gen,z), STOR_ALL)
- powerbalance[(z-1)*L+2, :] = sum(value.(EP[:vP][STOR_ALL_ZONE, :]), dims = 1)
- powerbalance[(z-1)*L+3, :] = (-1) * sum((value.(EP[:vCHARGE][STOR_ALL_ZONE, :]).data), dims = 1)
- end
- if !isempty(intersect(resources_in_zone_by_rid(gen,z), VRE_STOR))
- VS_ALL_ZONE = intersect(resources_in_zone_by_rid(gen,z), inputs["VS_STOR"])
- powerbalance[(z-1)*L+2, :] = sum(value.(EP[:vP][VS_ALL_ZONE, :]), dims = 1)
- powerbalance[(z-1)*L+3, :] = (-1) * sum(value.(EP[:vCHARGE_VRE_STOR][VS_ALL_ZONE, :]).data, dims=1)
- end
- if !isempty(intersect(resources_in_zone_by_rid(gen,z), FLEX))
- FLEX_ZONE = intersect(resources_in_zone_by_rid(gen,z), FLEX)
- powerbalance[(z-1)*L+4, :] = sum((value.(EP[:vCHARGE_FLEX][FLEX_ZONE, :]).data), dims = 1)
- powerbalance[(z-1)*L+5, :] = (-1) * sum(value.(EP[:vP][FLEX_ZONE, :]), dims = 1)
- end
- if SEG > 1
- powerbalance[(z-1)*L+6, :] = sum(value.(EP[:vNSE][2:SEG, :, z]), dims = 1)
- end
- powerbalance[(z-1)*L+7, :] = value.(EP[:vNSE][1, :, z])
- if Z >= 2
- powerbalance[(z-1)*L+8, :] = (value.(EP[:ePowerBalanceNetExportFlows][:, z]))' # Transpose
- powerbalance[(z-1)*L+9, :] = -(value.(EP[:eLosses_By_Zone][z, :]))
- end
- powerbalance[(z-1)*L+10, :] = (((-1) * inputs["pD"][:, z]))' # Transpose
- if !isempty(ELECTROLYZER)
- ELECTROLYZER_ZONE = intersect(resources_in_zone_by_rid(gen,z), ELECTROLYZER)
- powerbalance[(z-1)*L+11, :] = (-1) * sum(value.(EP[:vUSE][ELECTROLYZER_ZONE, :].data), dims = 1)
- end
- end
- if setup["ParameterScale"] == 1
- powerbalance *= ModelScalingFactor
- end
- dfPowerBalance.AnnualSum .= powerbalance * inputs["omega"]
+ gen = inputs["RESOURCES"]
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ SEG = inputs["SEG"] # Number of demand curtailment segments
+ THERM_ALL = inputs["THERM_ALL"]
+ VRE = inputs["VRE"]
+ MUST_RUN = inputs["MUST_RUN"]
+ HYDRO_RES = inputs["HYDRO_RES"]
+ STOR_ALL = inputs["STOR_ALL"]
+ FLEX = inputs["FLEX"]
+ ELECTROLYZER = inputs["ELECTROLYZER"]
+ VRE_STOR = inputs["VRE_STOR"]
+ Com_list = ["Generation", "Storage_Discharge", "Storage_Charge",
+ "Flexible_Demand_Defer", "Flexible_Demand_Stasify",
+ "Demand_Response", "Nonserved_Energy",
+ "Transmission_NetExport", "Transmission_Losses",
+ "Demand"]
+ if !isempty(ELECTROLYZER)
+ push!(Com_list, "Electrolyzer_Consumption")
+ end
+ if !isempty(VRE_STOR)
+ push!(Com_list, "VRE_Storage_Discharge")
+ push!(Com_list, "VRE_Storage_Charge")
+ end
+ L = length(Com_list)
+ dfPowerBalance = DataFrame(BalanceComponent = repeat(Com_list, outer = Z),
+ Zone = repeat(1:Z, inner = L),
+ AnnualSum = zeros(L * Z))
+ powerbalance = zeros(Z * L, T) # following the same style of power/charge/storage/nse
+ for z in 1:Z
+ POWER_ZONE = intersect(resources_in_zone_by_rid(gen, z),
+ union(THERM_ALL, VRE, MUST_RUN, HYDRO_RES))
+ powerbalance[(z - 1) * L + 1, :] = sum(value.(EP[:vP][POWER_ZONE, :]), dims = 1)
+ if !isempty(intersect(resources_in_zone_by_rid(gen, z), STOR_ALL))
+ STOR_ALL_ZONE = intersect(resources_in_zone_by_rid(gen, z), STOR_ALL)
+ powerbalance[(z - 1) * L + 2, :] = sum(value.(EP[:vP][STOR_ALL_ZONE, :]),
+ dims = 1)
+ powerbalance[(z - 1) * L + 3, :] = (-1) * sum(
+ (value.(EP[:vCHARGE][STOR_ALL_ZONE,:]).data),
+ dims = 1)
+ end
+ if !isempty(intersect(resources_in_zone_by_rid(gen, z), FLEX))
+ FLEX_ZONE = intersect(resources_in_zone_by_rid(gen, z), FLEX)
+ powerbalance[(z - 1) * L + 4, :] = sum(
+ (value.(EP[:vCHARGE_FLEX][FLEX_ZONE,:]).data),
+ dims = 1)
+ powerbalance[(z - 1) * L + 5, :] = (-1) *
+ sum(value.(EP[:vP][FLEX_ZONE, :]), dims = 1)
+ end
+ if SEG > 1
+ powerbalance[(z - 1) * L + 6, :] = sum(value.(EP[:vNSE][2:SEG, :, z]), dims = 1)
+ end
+ powerbalance[(z - 1) * L + 7, :] = value.(EP[:vNSE][1, :, z])
+ if Z >= 2
+ powerbalance[(z - 1) * L + 8, :] = (value.(EP[:ePowerBalanceNetExportFlows][:,
+ z]))' # Transpose
+ powerbalance[(z - 1) * L + 9, :] = -(value.(EP[:eLosses_By_Zone][z, :]))
+ end
+ powerbalance[(z - 1) * L + 10, :] = (((-1) * inputs["pD"][:, z]))' # Transpose
+ if !isempty(ELECTROLYZER)
+ ELECTROLYZER_ZONE = intersect(resources_in_zone_by_rid(gen, z), ELECTROLYZER)
+ powerbalance[(z - 1) * L + 11, :] = (-1) * sum(
+ value.(EP[:vUSE][ELECTROLYZER_ZONE,:].data),
+ dims = 1)
+ end
+ # VRE storage discharge and charge
+ if !isempty(intersect(resources_in_zone_by_rid(gen, z), VRE_STOR))
+ VS_ALL_ZONE = intersect(resources_in_zone_by_rid(gen, z), inputs["VS_STOR"])
- if setup["WriteOutputs"] == "annual"
- CSV.write(joinpath(path, "power_balance.csv"), dfPowerBalance)
- else # setup["WriteOutputs"] == "full"
- dfPowerBalance = hcat(dfPowerBalance, DataFrame(powerbalance, :auto))
- auxNew_Names = [Symbol("BalanceComponent"); Symbol("Zone"); Symbol("AnnualSum"); [Symbol("t$t") for t in 1:T]]
- rename!(dfPowerBalance,auxNew_Names)
- CSV.write(joinpath(path, "power_balance.csv"), dftranspose(dfPowerBalance, false), writeheader=false)
- end
- return nothing
+ # if ELECTROLYZER is empty, increase indices by 1
+ is_electrolyzer_empty = isempty(ELECTROLYZER)
+ discharge_idx = is_electrolyzer_empty ? 11 : 12
+ charge_idx = is_electrolyzer_empty ? 12 : 13
+
+ powerbalance[(z - 1) * L + discharge_idx, :] = sum(
+ value.(EP[:vP][VS_ALL_ZONE, :]), dims = 1)
+ powerbalance[(z - 1) * L + charge_idx, :] = (-1) * sum(
+ value.(EP[:vCHARGE_VRE_STOR][VS_ALL_ZONE, :]).data, dims = 1)
+ end
+ end
+ if setup["ParameterScale"] == 1
+ powerbalance *= ModelScalingFactor
+ end
+ dfPowerBalance.AnnualSum .= powerbalance * inputs["omega"]
+
+ if setup["WriteOutputs"] == "annual"
+ CSV.write(joinpath(path, "power_balance.csv"), dfPowerBalance)
+ else # setup["WriteOutputs"] == "full"
+ dfPowerBalance = hcat(dfPowerBalance, DataFrame(powerbalance, :auto))
+ auxNew_Names = [Symbol("BalanceComponent");
+ Symbol("Zone");
+ Symbol("AnnualSum");
+ [Symbol("t$t") for t in 1:T]]
+ rename!(dfPowerBalance, auxNew_Names)
+ CSV.write(joinpath(path, "power_balance.csv"),
+ dftranspose(dfPowerBalance, false),
+ writeheader = false)
+ end
+ return nothing
end
diff --git a/src/write_outputs/write_price.jl b/src/write_outputs/write_price.jl
index 7d240a0f02..05943c8fa8 100644
--- a/src/write_outputs/write_price.jl
+++ b/src/write_outputs/write_price.jl
@@ -4,22 +4,24 @@
Function for reporting marginal electricity price for each model zone and time step. Marginal electricity price is equal to the dual variable of the power balance constraint. If GenX is configured as a mixed integer linear program, then this output is only generated if `WriteShadowPrices` flag is activated. If configured as a linear program (i.e. linearized unit commitment or economic dispatch) then output automatically available.
"""
function write_price(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
- ## Extract dual variables of constraints
- # Electricity price: Dual variable of hourly power balance constraint = hourly price
- dfPrice = DataFrame(Zone = 1:Z) # The unit is $/MWh
- # Dividing dual variable for each hour with corresponding hourly weight to retrieve marginal cost of generation
+ ## Extract dual variables of constraints
+ # Electricity price: Dual variable of hourly power balance constraint = hourly price
+ dfPrice = DataFrame(Zone = 1:Z) # The unit is $/MWh
+ # Dividing dual variable for each hour with corresponding hourly weight to retrieve marginal cost of generation
price = locational_marginal_price(EP, inputs, setup)
- dfPrice = hcat(dfPrice, DataFrame(transpose(price), :auto))
+ dfPrice = hcat(dfPrice, DataFrame(transpose(price), :auto))
- auxNew_Names=[Symbol("Zone");[Symbol("t$t") for t in 1:T]]
- rename!(dfPrice,auxNew_Names)
+ auxNew_Names = [Symbol("Zone"); [Symbol("t$t") for t in 1:T]]
+ rename!(dfPrice, auxNew_Names)
- ## Linear configuration final output
- CSV.write(joinpath(path, "prices.csv"), dftranspose(dfPrice, false), writeheader=false)
- return nothing
+ ## Linear configuration final output
+ CSV.write(joinpath(path, "prices.csv"),
+ dftranspose(dfPrice, false),
+ writeheader = false)
+ return nothing
end
@doc raw"""
diff --git a/src/write_outputs/write_reliability.jl b/src/write_outputs/write_reliability.jl
index ce5cd34efd..876465f8b7 100644
--- a/src/write_outputs/write_reliability.jl
+++ b/src/write_outputs/write_reliability.jl
@@ -4,18 +4,20 @@
Function for reporting dual variable of maximum non-served energy constraint (shadow price of reliability constraint) for each model zone and time step.
"""
function write_reliability(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
- # reliability: Dual variable of maximum NSE constraint = shadow value of reliability constraint
- dfReliability = DataFrame(Zone = 1:Z)
- # Dividing dual variable for each hour with corresponding hourly weight to retrieve marginal cost of generation
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- dfReliability = hcat(dfReliability, DataFrame(transpose(dual.(EP[:cMaxNSE])./inputs["omega"]*scale_factor), :auto))
+ # reliability: Dual variable of maximum NSE constraint = shadow value of reliability constraint
+ dfReliability = DataFrame(Zone = 1:Z)
+ # Dividing dual variable for each hour with corresponding hourly weight to retrieve marginal cost of generation
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ dfReliability = hcat(dfReliability,
+ DataFrame(transpose(dual.(EP[:cMaxNSE]) ./ inputs["omega"] * scale_factor), :auto))
- auxNew_Names=[Symbol("Zone");[Symbol("t$t") for t in 1:T]]
- rename!(dfReliability,auxNew_Names)
-
- CSV.write(joinpath(path, "reliability.csv"), dftranspose(dfReliability, false), header=false)
+ auxNew_Names = [Symbol("Zone"); [Symbol("t$t") for t in 1:T]]
+ rename!(dfReliability, auxNew_Names)
+ CSV.write(joinpath(path, "reliability.csv"),
+ dftranspose(dfReliability, false),
+ header = false)
end
diff --git a/src/write_outputs/write_status.jl b/src/write_outputs/write_status.jl
index 32b6eee760..8558a21a50 100644
--- a/src/write_outputs/write_status.jl
+++ b/src/write_outputs/write_status.jl
@@ -5,16 +5,17 @@ Function for writing the final solve status of the optimization problem solved.
"""
function write_status(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- # https://jump.dev/MathOptInterface.jl/v0.9.10/apireference/#MathOptInterface.TerminationStatusCode
- status = termination_status(EP)
+ # https://jump.dev/MathOptInterface.jl/v0.9.10/apireference/#MathOptInterface.TerminationStatusCode
+ status = termination_status(EP)
- # Note: Gurobi excludes constants from solver reported objective function value - MIPGap calculated may be erroneous
- if (setup["UCommit"] == 0 || setup["UCommit"] == 2)
- dfStatus = DataFrame(Status = status, Solve = inputs["solve_time"],
- Objval = objective_value(EP))
- else
- dfStatus = DataFrame(Status = status, Solve = inputs["solve_time"],
- Objval = objective_value(EP), Objbound= objective_bound(EP),FinalMIPGap =(objective_value(EP) -objective_bound(EP))/objective_value(EP) )
- end
- CSV.write(joinpath(path, "status.csv"),dfStatus)
+ # Note: Gurobi excludes constants from solver reported objective function value - MIPGap calculated may be erroneous
+ if (setup["UCommit"] == 0 || setup["UCommit"] == 2)
+ dfStatus = DataFrame(Status = status, Solve = inputs["solve_time"],
+ Objval = objective_value(EP))
+ else
+ dfStatus = DataFrame(Status = status, Solve = inputs["solve_time"],
+ Objval = objective_value(EP), Objbound = objective_bound(EP),
+ FinalMIPGap = (objective_value(EP) - objective_bound(EP)) / objective_value(EP))
+ end
+ CSV.write(joinpath(path, "status.csv"), dfStatus)
end
diff --git a/src/write_outputs/write_storage.jl b/src/write_outputs/write_storage.jl
index b8d2167dba..a34c470108 100644
--- a/src/write_outputs/write_storage.jl
+++ b/src/write_outputs/write_storage.jl
@@ -3,40 +3,40 @@
Function for writing the capacities of different storage technologies, including hydro reservoir, flexible storage tech etc.
"""
-function write_storage(path::AbstractString, inputs::Dict,setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- zones = zone_id.(gen)
+function write_storage(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+ gen = inputs["RESOURCES"]
+ zones = zone_id.(gen)
- T = inputs["T"] # Number of time steps (hours)
- G = inputs["G"]
- STOR_ALL = inputs["STOR_ALL"]
- HYDRO_RES = inputs["HYDRO_RES"]
- FLEX = inputs["FLEX"]
- VRE_STOR = inputs["VRE_STOR"]
- VS_STOR = !isempty(VRE_STOR) ? inputs["VS_STOR"] : []
-
- # Storage level (state of charge) of each resource in each time step
- dfStorage = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones)
- storagevcapvalue = zeros(G,T)
+ T = inputs["T"] # Number of time steps (hours)
+ G = inputs["G"]
+ STOR_ALL = inputs["STOR_ALL"]
+ HYDRO_RES = inputs["HYDRO_RES"]
+ FLEX = inputs["FLEX"]
+ VRE_STOR = inputs["VRE_STOR"]
+ VS_STOR = !isempty(VRE_STOR) ? inputs["VS_STOR"] : []
- if !isempty(inputs["STOR_ALL"])
- storagevcapvalue[STOR_ALL, :] = value.(EP[:vS][STOR_ALL, :])
- end
- if !isempty(inputs["HYDRO_RES"])
- storagevcapvalue[HYDRO_RES, :] = value.(EP[:vS_HYDRO][HYDRO_RES, :])
- end
- if !isempty(inputs["FLEX"])
- storagevcapvalue[FLEX, :] = value.(EP[:vS_FLEX][FLEX, :])
- end
- if !isempty(VS_STOR)
- storagevcapvalue[VS_STOR, :] = value.(EP[:vS_VRE_STOR][VS_STOR, :])
- end
- if setup["ParameterScale"] == 1
- storagevcapvalue *= ModelScalingFactor
- end
+ # Storage level (state of charge) of each resource in each time step
+ dfStorage = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones)
+ storagevcapvalue = zeros(G, T)
- dfStorage = hcat(dfStorage, DataFrame(storagevcapvalue, :auto))
- auxNew_Names=[Symbol("Resource");Symbol("Zone");[Symbol("t$t") for t in 1:T]]
- rename!(dfStorage,auxNew_Names)
- CSV.write(joinpath(path, "storage.csv"), dftranspose(dfStorage, false), header=false)
+ if !isempty(inputs["STOR_ALL"])
+ storagevcapvalue[STOR_ALL, :] = value.(EP[:vS][STOR_ALL, :])
+ end
+ if !isempty(inputs["HYDRO_RES"])
+ storagevcapvalue[HYDRO_RES, :] = value.(EP[:vS_HYDRO][HYDRO_RES, :])
+ end
+ if !isempty(inputs["FLEX"])
+ storagevcapvalue[FLEX, :] = value.(EP[:vS_FLEX][FLEX, :])
+ end
+ if !isempty(VS_STOR)
+ storagevcapvalue[VS_STOR, :] = value.(EP[:vS_VRE_STOR][VS_STOR, :])
+ end
+ if setup["ParameterScale"] == 1
+ storagevcapvalue *= ModelScalingFactor
+ end
+
+ dfStorage = hcat(dfStorage, DataFrame(storagevcapvalue, :auto))
+ auxNew_Names = [Symbol("Resource"); Symbol("Zone"); [Symbol("t$t") for t in 1:T]]
+ rename!(dfStorage, auxNew_Names)
+ CSV.write(joinpath(path, "storage.csv"), dftranspose(dfStorage, false), header = false)
end
diff --git a/src/write_outputs/write_storagedual.jl b/src/write_outputs/write_storagedual.jl
index 53a99f9603..5b1eced203 100644
--- a/src/write_outputs/write_storagedual.jl
+++ b/src/write_outputs/write_storagedual.jl
@@ -4,60 +4,79 @@
Function for reporting dual of storage level (state of charge) balance of each resource in each time step.
"""
function write_storagedual(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- zones = zone_id.(gen)
-
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
-
- START_SUBPERIODS = inputs["START_SUBPERIODS"]
- INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
- REP_PERIOD = inputs["REP_PERIOD"]
- STOR_ALL = inputs["STOR_ALL"]
- VRE_STOR = inputs["VRE_STOR"]
- if !isempty(VRE_STOR)
- VS_STOR = inputs["VS_STOR"]
- VS_LDS = inputs["VS_LDS"]
- VS_NONLDS = setdiff(VS_STOR, VS_LDS)
- end
-
- # # Dual of storage level (state of charge) balance of each resource in each time step
- dfStorageDual = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones)
- dual_values = zeros(G, T)
-
- # Loop over W separately hours_per_subperiod
- if !isempty(STOR_ALL)
- STOR_ALL_NONLDS = setdiff(STOR_ALL, inputs["STOR_LONG_DURATION"])
- STOR_ALL_LDS = intersect(STOR_ALL, inputs["STOR_LONG_DURATION"])
- dual_values[STOR_ALL, INTERIOR_SUBPERIODS] = (dual.(EP[:cSoCBalInterior][INTERIOR_SUBPERIODS, STOR_ALL]).data ./ inputs["omega"][INTERIOR_SUBPERIODS])'
- dual_values[STOR_ALL_NONLDS, START_SUBPERIODS] = (dual.(EP[:cSoCBalStart][START_SUBPERIODS, STOR_ALL_NONLDS]).data ./ inputs["omega"][START_SUBPERIODS])'
- if !isempty(STOR_ALL_LDS)
- if inputs["REP_PERIOD"] > 1
- dual_values[STOR_ALL_LDS, START_SUBPERIODS] = (dual.(EP[:cSoCBalLongDurationStorageStart][1:REP_PERIOD, STOR_ALL_LDS]).data ./ inputs["omega"][START_SUBPERIODS])'
- else
- dual_values[STOR_ALL_LDS, START_SUBPERIODS] = (dual.(EP[:cSoCBalStart][START_SUBPERIODS, STOR_ALL_LDS]).data ./ inputs["omega"][START_SUBPERIODS])'
- end
- end
- end
-
- if !isempty(VRE_STOR)
- dual_values[VS_STOR, INTERIOR_SUBPERIODS] = ((dual.(EP[:cSoCBalInterior_VRE_STOR][VS_STOR, INTERIOR_SUBPERIODS]).data)' ./ inputs["omega"][INTERIOR_SUBPERIODS])'
- dual_values[VS_NONLDS, START_SUBPERIODS] = ((dual.(EP[:cSoCBalStart_VRE_STOR][VS_NONLDS, START_SUBPERIODS]).data)' ./ inputs["omega"][START_SUBPERIODS])'
- if !isempty(VS_LDS)
- if inputs["REP_PERIOD"] > 1
- dual_values[VS_LDS, START_SUBPERIODS] = ((dual.(EP[:cVreStorSoCBalLongDurationStorageStart][VS_LDS, 1:REP_PERIOD]).data)' ./ inputs["omega"][START_SUBPERIODS])'
- else
- dual_values[VS_LDS, START_SUBPERIODS] = ((dual.(EP[:cSoCBalStart_VRE_STOR][VS_LDS, START_SUBPERIODS]).data)' ./ inputs["omega"][START_SUBPERIODS])'
- end
- end
- end
-
- if setup["ParameterScale"] == 1
- dual_values *= ModelScalingFactor
- end
-
- dfStorageDual=hcat(dfStorageDual, DataFrame(dual_values, :auto))
- rename!(dfStorageDual,[Symbol("Resource");Symbol("Zone");[Symbol("t$t") for t in 1:T]])
-
- CSV.write(joinpath(path, "storagebal_duals.csv"), dftranspose(dfStorageDual, false), header=false)
+ gen = inputs["RESOURCES"]
+ zones = zone_id.(gen)
+
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+
+ START_SUBPERIODS = inputs["START_SUBPERIODS"]
+ INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
+ REP_PERIOD = inputs["REP_PERIOD"]
+ STOR_ALL = inputs["STOR_ALL"]
+ VRE_STOR = inputs["VRE_STOR"]
+ if !isempty(VRE_STOR)
+ VS_STOR = inputs["VS_STOR"]
+ VS_LDS = inputs["VS_LDS"]
+ VS_NONLDS = setdiff(VS_STOR, VS_LDS)
+ end
+
+ # # Dual of storage level (state of charge) balance of each resource in each time step
+ dfStorageDual = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones)
+ dual_values = zeros(G, T)
+
+ # Loop over W separately hours_per_subperiod
+ if !isempty(STOR_ALL)
+ STOR_ALL_NONLDS = setdiff(STOR_ALL, inputs["STOR_LONG_DURATION"])
+ STOR_ALL_LDS = intersect(STOR_ALL, inputs["STOR_LONG_DURATION"])
+ dual_values[STOR_ALL, INTERIOR_SUBPERIODS] = (dual.(EP[:cSoCBalInterior][
+ INTERIOR_SUBPERIODS,
+ STOR_ALL]).data ./ inputs["omega"][INTERIOR_SUBPERIODS])'
+ dual_values[STOR_ALL_NONLDS, START_SUBPERIODS] = (dual.(EP[:cSoCBalStart][
+ START_SUBPERIODS,
+ STOR_ALL_NONLDS]).data ./ inputs["omega"][START_SUBPERIODS])'
+ if !isempty(STOR_ALL_LDS)
+ if inputs["REP_PERIOD"] > 1
+ dual_values[STOR_ALL_LDS, START_SUBPERIODS] = (dual.(EP[:cSoCBalLongDurationStorageStart][
+ 1:REP_PERIOD,
+ STOR_ALL_LDS]).data ./ inputs["omega"][START_SUBPERIODS])'
+ else
+ dual_values[STOR_ALL_LDS, START_SUBPERIODS] = (dual.(EP[:cSoCBalStart][
+ START_SUBPERIODS,
+ STOR_ALL_LDS]).data ./ inputs["omega"][START_SUBPERIODS])'
+ end
+ end
+ end
+
+ if !isempty(VRE_STOR)
+ dual_values[VS_STOR, INTERIOR_SUBPERIODS] = ((dual.(EP[:cSoCBalInterior_VRE_STOR][
+ VS_STOR,
+ INTERIOR_SUBPERIODS]).data)' ./ inputs["omega"][INTERIOR_SUBPERIODS])'
+ dual_values[VS_NONLDS, START_SUBPERIODS] = ((dual.(EP[:cSoCBalStart_VRE_STOR][
+ VS_NONLDS,
+ START_SUBPERIODS]).data)' ./ inputs["omega"][START_SUBPERIODS])'
+ if !isempty(VS_LDS)
+ if inputs["REP_PERIOD"] > 1
+ dual_values[VS_LDS, START_SUBPERIODS] = ((dual.(EP[:cVreStorSoCBalLongDurationStorageStart][
+ VS_LDS,
+ 1:REP_PERIOD]).data)' ./ inputs["omega"][START_SUBPERIODS])'
+ else
+ dual_values[VS_LDS, START_SUBPERIODS] = ((dual.(EP[:cSoCBalStart_VRE_STOR][
+ VS_LDS,
+ START_SUBPERIODS]).data)' ./ inputs["omega"][START_SUBPERIODS])'
+ end
+ end
+ end
+
+ if setup["ParameterScale"] == 1
+ dual_values *= ModelScalingFactor
+ end
+
+ dfStorageDual = hcat(dfStorageDual, DataFrame(dual_values, :auto))
+ rename!(dfStorageDual,
+ [Symbol("Resource"); Symbol("Zone"); [Symbol("t$t") for t in 1:T]])
+
+ CSV.write(joinpath(path, "storagebal_duals.csv"),
+ dftranspose(dfStorageDual, false),
+ header = false)
end
diff --git a/src/write_outputs/write_subsidy_revenue.jl b/src/write_outputs/write_subsidy_revenue.jl
index b7702cd747..f74bede14c 100644
--- a/src/write_outputs/write_subsidy_revenue.jl
+++ b/src/write_outputs/write_subsidy_revenue.jl
@@ -4,98 +4,121 @@
Function for reporting subsidy revenue earned if a generator specified `Min_Cap` is provided in the input file, or if a generator is subject to a Minimum Capacity Requirement constraint. The unit is \$.
"""
function write_subsidy_revenue(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- regions = region.(gen)
- clusters = cluster.(gen)
- zones = zone_id.(gen)
- rid = resource_id.(gen)
-
- G = inputs["G"]
+ gen = inputs["RESOURCES"]
+ regions = region.(gen)
+ clusters = cluster.(gen)
+ zones = zone_id.(gen)
+ rid = resource_id.(gen)
- dfSubRevenue = DataFrame(Region = regions, Resource = inputs["RESOURCE_NAMES"], Zone = zones, Cluster = clusters, R_ID=rid, SubsidyRevenue = zeros(G))
- MIN_CAP = ids_with_positive(gen, min_cap_mw)
- if !isempty(inputs["VRE_STOR"])
- MIN_CAP_SOLAR = ids_with_positive(gen.VreStorage, min_cap_solar_mw)
- MIN_CAP_WIND = ids_with_positive(gen.VreStorage, min_cap_wind_mw)
- MIN_CAP_STOR = ids_with_positive(gen, min_cap_mwh)
- if !isempty(MIN_CAP_SOLAR)
- dfSubRevenue.SubsidyRevenue[MIN_CAP_SOLAR] .+= (value.(EP[:eTotalCap_SOLAR])[MIN_CAP_SOLAR]) .* (dual.(EP[:cMinCap_Solar][MIN_CAP_SOLAR])).data
- end
- if !isempty(MIN_CAP_WIND)
- dfSubRevenue.SubsidyRevenue[MIN_CAP_WIND] .+= (value.(EP[:eTotalCap_WIND])[MIN_CAP_WIND]) .* (dual.(EP[:cMinCap_Wind][MIN_CAP_WIND])).data
- end
- if !isempty(MIN_CAP_STOR)
- dfSubRevenue.SubsidyRevenue[MIN_CAP_STOR] .+= (value.(EP[:eTotalCap_STOR])[MIN_CAP_STOR]) .* (dual.(EP[:cMinCap_Stor][MIN_CAP_STOR])).data
- end
- end
- dfSubRevenue.SubsidyRevenue[MIN_CAP] .= (value.(EP[:eTotalCap])[MIN_CAP]) .* (dual.(EP[:cMinCap][MIN_CAP])).data
- ### calculating tech specific subsidy revenue
- dfRegSubRevenue = DataFrame(Region = regions, Resource = inputs["RESOURCE_NAMES"], Zone = zones, Cluster = clusters, R_ID=rid, SubsidyRevenue = zeros(G))
- if (setup["MinCapReq"] >= 1)
- for mincap in 1:inputs["NumberOfMinCapReqs"] # This key only exists if MinCapReq >= 1, so we can't get it at the top outside of this condition.
- MIN_CAP_GEN = ids_with_policy(gen, min_cap, tag=mincap)
- dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN] .= dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN] + (value.(EP[:eTotalCap][MIN_CAP_GEN])) * (dual.(EP[:cZoneMinCapReq][mincap]))
- if !isempty(inputs["VRE_STOR"])
- gen_VRE_STOR = gen.VreStorage
- HAS_MIN_CAP_STOR = ids_with_policy(gen_VRE_STOR, min_cap_stor, tag=mincap)
- MIN_CAP_GEN_SOLAR = ids_with_policy(gen_VRE_STOR, min_cap_solar, tag=mincap)
- MIN_CAP_GEN_WIND = ids_with_policy(gen_VRE_STOR, min_cap_wind, tag=mincap)
- MIN_CAP_GEN_ASYM_DC_DIS = intersect(inputs["VS_ASYM_DC_DISCHARGE"], HAS_MIN_CAP_STOR)
- MIN_CAP_GEN_ASYM_AC_DIS = intersect(inputs["VS_ASYM_AC_DISCHARGE"], HAS_MIN_CAP_STOR)
- MIN_CAP_GEN_SYM_DC = intersect(inputs["VS_SYM_DC"], HAS_MIN_CAP_STOR)
- MIN_CAP_GEN_SYM_AC = intersect(inputs["VS_SYM_AC"], HAS_MIN_CAP_STOR)
- if !isempty(MIN_CAP_GEN_SOLAR)
- dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_SOLAR] .+= (
- (value.(EP[:eTotalCap_SOLAR][MIN_CAP_GEN_SOLAR]).data)
- .* etainverter.(gen[ids_with_policy(gen, min_cap_solar, tag=mincap)])
- * (dual.(EP[:cZoneMinCapReq][mincap]))
- )
- end
- if !isempty(MIN_CAP_GEN_WIND)
- dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_WIND] .+= (
- (value.(EP[:eTotalCap_WIND][MIN_CAP_GEN_WIND]).data)
- * (dual.(EP[:cZoneMinCapReq][mincap]))
- )
- end
- if !isempty(MIN_CAP_GEN_ASYM_DC_DIS)
- MIN_CAP_GEN_ASYM_DC_DIS = intersect(inputs["VS_ASYM_DC_DISCHARGE"], HAS_MIN_CAP_STOR)
- dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_ASYM_DC_DIS] .+= (
- (value.(EP[:eTotalCapDischarge_DC][MIN_CAP_GEN_ASYM_DC_DIS].data)
- .* etainverter.(gen_VRE_STOR[min_cap_stor.(gen_VRE_STOR, tag=mincap).==1 .& (gen_VRE_STOR.stor_dc_discharge.==2)]))
- * (dual.(EP[:cZoneMinCapReq][mincap]))
- )
- end
- if !isempty(MIN_CAP_GEN_ASYM_AC_DIS)
- dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_ASYM_AC_DIS] .+= (
- (value.(EP[:eTotalCapDischarge_AC][MIN_CAP_GEN_ASYM_AC_DIS]).data)
- * (dual.(EP[:cZoneMinCapReq][mincap]))
- )
- end
- if !isempty(MIN_CAP_GEN_SYM_DC)
- dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_SYM_DC] .+= (
- (value.(EP[:eTotalCap_STOR][MIN_CAP_GEN_SYM_DC]).data
- .* power_to_energy_dc.(gen_VRE_STOR[(min_cap_stor.(gen_VRE_STOR, tag=mincap).==1 .& (gen_VRE_STOR.stor_dc_discharge.==1))])
- .* etainverter.(gen_VRE_STOR[(min_cap_stor.(gen_VRE_STOR, tag=mincap).==1 .& (gen_VRE_STOR.stor_dc_discharge.==1))]))
- * (dual.(EP[:cZoneMinCapReq][mincap]))
- )
- end
- if !isempty(MIN_CAP_GEN_SYM_AC)
- dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_SYM_AC] .+= (
- (value.(EP[:eTotalCap_STOR][MIN_CAP_GEN_SYM_AC]).data
- .* power_to_energy_ac.(gen_VRE_STOR[(min_cap_stor.(gen_VRE_STOR, tag=mincap).==1 .& (gen_VRE_STOR.stor_ac_discharge.==1))]))
- * (dual.(EP[:cZoneMinCapReq][mincap]))
- )
- end
- end
- end
- end
+ G = inputs["G"]
- if setup["ParameterScale"] == 1
- dfSubRevenue.SubsidyRevenue *= ModelScalingFactor^2 #convert from Million US$ to US$
- dfRegSubRevenue.SubsidyRevenue *= ModelScalingFactor^2 #convert from Million US$ to US$
- end
+ dfSubRevenue = DataFrame(Region = regions,
+ Resource = inputs["RESOURCE_NAMES"],
+ Zone = zones,
+ Cluster = clusters,
+ R_ID = rid,
+ SubsidyRevenue = zeros(G))
+ MIN_CAP = ids_with_positive(gen, min_cap_mw)
+ if !isempty(inputs["VRE_STOR"])
+ MIN_CAP_SOLAR = ids_with_positive(gen.VreStorage, min_cap_solar_mw)
+ MIN_CAP_WIND = ids_with_positive(gen.VreStorage, min_cap_wind_mw)
+ MIN_CAP_STOR = ids_with_positive(gen, min_cap_mwh)
+ if !isempty(MIN_CAP_SOLAR)
+ dfSubRevenue.SubsidyRevenue[MIN_CAP_SOLAR] .+= (value.(EP[:eTotalCap_SOLAR])[MIN_CAP_SOLAR]) .*
+ (dual.(EP[:cMinCap_Solar][MIN_CAP_SOLAR])).data
+ end
+ if !isempty(MIN_CAP_WIND)
+ dfSubRevenue.SubsidyRevenue[MIN_CAP_WIND] .+= (value.(EP[:eTotalCap_WIND])[MIN_CAP_WIND]) .*
+ (dual.(EP[:cMinCap_Wind][MIN_CAP_WIND])).data
+ end
+ if !isempty(MIN_CAP_STOR)
+ dfSubRevenue.SubsidyRevenue[MIN_CAP_STOR] .+= (value.(EP[:eTotalCap_STOR])[MIN_CAP_STOR]) .*
+ (dual.(EP[:cMinCap_Stor][MIN_CAP_STOR])).data
+ end
+ end
+ dfSubRevenue.SubsidyRevenue[MIN_CAP] .= (value.(EP[:eTotalCap])[MIN_CAP]) .*
+ (dual.(EP[:cMinCap][MIN_CAP])).data
+ ### calculating tech specific subsidy revenue
+ dfRegSubRevenue = DataFrame(Region = regions,
+ Resource = inputs["RESOURCE_NAMES"],
+ Zone = zones,
+ Cluster = clusters,
+ R_ID = rid,
+ SubsidyRevenue = zeros(G))
+ if (setup["MinCapReq"] >= 1)
+ for mincap in 1:inputs["NumberOfMinCapReqs"] # This key only exists if MinCapReq >= 1, so we can't get it at the top outside of this condition.
+ MIN_CAP_GEN = ids_with_policy(gen, min_cap, tag = mincap)
+ dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN] .= dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN] +
+ (value.(EP[:eTotalCap][MIN_CAP_GEN])) *
+ (dual.(EP[:cZoneMinCapReq][mincap]))
+ if !isempty(inputs["VRE_STOR"])
+ gen_VRE_STOR = gen.VreStorage
+ HAS_MIN_CAP_STOR = ids_with_policy(gen_VRE_STOR, min_cap_stor, tag = mincap)
+ MIN_CAP_GEN_SOLAR = ids_with_policy(gen_VRE_STOR,
+ min_cap_solar,
+ tag = mincap)
+ MIN_CAP_GEN_WIND = ids_with_policy(gen_VRE_STOR, min_cap_wind, tag = mincap)
+ MIN_CAP_GEN_ASYM_DC_DIS = intersect(inputs["VS_ASYM_DC_DISCHARGE"],
+ HAS_MIN_CAP_STOR)
+ MIN_CAP_GEN_ASYM_AC_DIS = intersect(inputs["VS_ASYM_AC_DISCHARGE"],
+ HAS_MIN_CAP_STOR)
+ MIN_CAP_GEN_SYM_DC = intersect(inputs["VS_SYM_DC"], HAS_MIN_CAP_STOR)
+ MIN_CAP_GEN_SYM_AC = intersect(inputs["VS_SYM_AC"], HAS_MIN_CAP_STOR)
+ if !isempty(MIN_CAP_GEN_SOLAR)
+ dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_SOLAR] .+= ((value.(EP[:eTotalCap_SOLAR][MIN_CAP_GEN_SOLAR]).data)
+ .*
+ etainverter.(gen[ids_with_policy(
+ gen,
+ min_cap_solar,
+ tag = mincap)])
+ *
+ (dual.(EP[:cZoneMinCapReq][mincap])))
+ end
+ if !isempty(MIN_CAP_GEN_WIND)
+ dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_WIND] .+= ((value.(EP[:eTotalCap_WIND][MIN_CAP_GEN_WIND]).data)
+ *
+ (dual.(EP[:cZoneMinCapReq][mincap])))
+ end
+ if !isempty(MIN_CAP_GEN_ASYM_DC_DIS)
+ MIN_CAP_GEN_ASYM_DC_DIS = intersect(inputs["VS_ASYM_DC_DISCHARGE"],
+ HAS_MIN_CAP_STOR)
+ dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_ASYM_DC_DIS] .+= ((value.(EP[:eTotalCapDischarge_DC][MIN_CAP_GEN_ASYM_DC_DIS].data)
+ .*
+ etainverter.(gen_VRE_STOR[min_cap_stor.(gen_VRE_STOR, tag = mincap) .== 1 .& (gen_VRE_STOR.stor_dc_discharge .== 2)]))
+ *
+ (dual.(EP[:cZoneMinCapReq][mincap])))
+ end
+ if !isempty(MIN_CAP_GEN_ASYM_AC_DIS)
+ dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_ASYM_AC_DIS] .+= ((value.(EP[:eTotalCapDischarge_AC][MIN_CAP_GEN_ASYM_AC_DIS]).data)
+ *
+ (dual.(EP[:cZoneMinCapReq][mincap])))
+ end
+ if !isempty(MIN_CAP_GEN_SYM_DC)
+ dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_SYM_DC] .+= ((value.(EP[:eTotalCap_STOR][MIN_CAP_GEN_SYM_DC]).data
+ .*
+ power_to_energy_dc.(gen_VRE_STOR[(min_cap_stor.(gen_VRE_STOR, tag = mincap) .== 1 .& (gen_VRE_STOR.stor_dc_discharge .== 1))])
+ .*
+ etainverter.(gen_VRE_STOR[(min_cap_stor.(gen_VRE_STOR, tag = mincap) .== 1 .& (gen_VRE_STOR.stor_dc_discharge .== 1))]))
+ *
+ (dual.(EP[:cZoneMinCapReq][mincap])))
+ end
+ if !isempty(MIN_CAP_GEN_SYM_AC)
+ dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_SYM_AC] .+= ((value.(EP[:eTotalCap_STOR][MIN_CAP_GEN_SYM_AC]).data
+ .*
+ power_to_energy_ac.(gen_VRE_STOR[(min_cap_stor.(gen_VRE_STOR, tag = mincap) .== 1 .& (gen_VRE_STOR.stor_ac_discharge .== 1))]))
+ *
+ (dual.(EP[:cZoneMinCapReq][mincap])))
+ end
+ end
+ end
+ end
- CSV.write(joinpath(path, "SubsidyRevenue.csv"), dfSubRevenue)
- CSV.write(joinpath(path, "RegSubsidyRevenue.csv"), dfRegSubRevenue)
- return dfSubRevenue, dfRegSubRevenue
+ if setup["ParameterScale"] == 1
+ dfSubRevenue.SubsidyRevenue *= ModelScalingFactor^2 #convert from Million US$ to US$
+ dfRegSubRevenue.SubsidyRevenue *= ModelScalingFactor^2 #convert from Million US$ to US$
+ end
+
+ CSV.write(joinpath(path, "SubsidyRevenue.csv"), dfSubRevenue)
+ CSV.write(joinpath(path, "RegSubsidyRevenue.csv"), dfRegSubRevenue)
+ return dfSubRevenue, dfRegSubRevenue
end
diff --git a/src/write_outputs/write_time_weights.jl b/src/write_outputs/write_time_weights.jl
index 8f799478f0..b29bbdcb2f 100644
--- a/src/write_outputs/write_time_weights.jl
+++ b/src/write_outputs/write_time_weights.jl
@@ -1,6 +1,6 @@
function write_time_weights(path::AbstractString, inputs::Dict)
- T = inputs["T"] # Number of time steps (hours)
- # Save array of weights for each time period (when using time sampling)
- dfTimeWeights = DataFrame(Time=1:T, Weight=inputs["omega"])
- CSV.write(joinpath(path, "time_weights.csv"), dfTimeWeights)
+ T = inputs["T"] # Number of time steps (hours)
+ # Save array of weights for each time period (when using time sampling)
+ dfTimeWeights = DataFrame(Time = 1:T, Weight = inputs["omega"])
+ CSV.write(joinpath(path, "time_weights.csv"), dfTimeWeights)
end
diff --git a/src/write_outputs/write_vre_stor.jl b/src/write_outputs/write_vre_stor.jl
index 6f7e617ec1..5e54303ebf 100644
--- a/src/write_outputs/write_vre_stor.jl
+++ b/src/write_outputs/write_vre_stor.jl
@@ -5,16 +5,16 @@ Function for writing the vre-storage specific files.
"""
function write_vre_stor(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- ### CAPACITY DECISIONS ###
- dfVreStor = write_vre_stor_capacity(path, inputs, setup, EP)
+ ### CAPACITY DECISIONS ###
+ dfVreStor = write_vre_stor_capacity(path, inputs, setup, EP)
- ### CHARGING DECISIONS ###
- write_vre_stor_charge(path, inputs, setup, EP)
+ ### CHARGING DECISIONS ###
+ write_vre_stor_charge(path, inputs, setup, EP)
- ### DISCHARGING DECISIONS ###
- write_vre_stor_discharge(path, inputs, setup, EP)
+ ### DISCHARGING DECISIONS ###
+ write_vre_stor_discharge(path, inputs, setup, EP)
- return dfVreStor
+ return dfVreStor
end
@doc raw"""
@@ -23,262 +23,289 @@ end
Function for writing the vre-storage capacities.
"""
function write_vre_stor_capacity(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- gen_VRE_STOR = gen.VreStorage
-
- VRE_STOR = inputs["VRE_STOR"]
- SOLAR = inputs["VS_SOLAR"]
- WIND = inputs["VS_WIND"]
- DC = inputs["VS_DC"]
- STOR = inputs["VS_STOR"]
- MultiStage = setup["MultiStage"]
- size_vrestor_resources = size(inputs["RESOURCE_NAMES_VRE_STOR"])
-
- # Solar capacity
- capsolar = zeros(size_vrestor_resources)
- retcapsolar = zeros(size_vrestor_resources)
- existingcapsolar = zeros(size_vrestor_resources)
-
- # Wind capacity
- capwind = zeros(size_vrestor_resources)
- retcapwind = zeros(size_vrestor_resources)
- existingcapwind = zeros(size_vrestor_resources)
-
- # Inverter capacity
- capdc = zeros(size_vrestor_resources)
- retcapdc = zeros(size_vrestor_resources)
- existingcapdc = zeros(size_vrestor_resources)
-
- # Grid connection capacity
- capgrid = zeros(size_vrestor_resources)
- retcapgrid = zeros(size_vrestor_resources)
- existingcapgrid = zeros(size_vrestor_resources)
-
- # Energy storage capacity
- capenergy = zeros(size_vrestor_resources)
- retcapenergy = zeros(size_vrestor_resources)
- existingcapenergy = zeros(size_vrestor_resources)
-
- # Charge storage capacity DC
- capchargedc = zeros(size_vrestor_resources)
- retcapchargedc = zeros(size_vrestor_resources)
- existingcapchargedc = zeros(size_vrestor_resources)
-
- # Charge storage capacity AC
- capchargeac = zeros(size_vrestor_resources)
- retcapchargeac = zeros(size_vrestor_resources)
- existingcapchargeac = zeros(size_vrestor_resources)
-
- # Discharge storage capacity DC
- capdischargedc = zeros(size_vrestor_resources)
- retcapdischargedc = zeros(size_vrestor_resources)
- existingcapdischargedc = zeros(size_vrestor_resources)
-
- # Discharge storage capacity AC
- capdischargeac = zeros(size_vrestor_resources)
- retcapdischargeac = zeros(size_vrestor_resources)
- existingcapdischargeac = zeros(size_vrestor_resources)
-
- j = 1
- for i in VRE_STOR
- existingcapgrid[j] = MultiStage == 1 ? value(EP[:vEXISTINGCAP][i]) : existing_cap_mw(gen[i])
- if i in inputs["NEW_CAP"]
- capgrid[j] = value(EP[:vCAP][i])
- end
- if i in inputs["RET_CAP"]
- retcapgrid[j] = value(EP[:vRETCAP][i])
- end
-
- if i in SOLAR
- existingcapsolar[j] = MultiStage == 1 ? value(EP[:vEXISTINGSOLARCAP][i]) : existing_cap_solar_mw(gen_VRE_STOR[j])
- if i in inputs["NEW_CAP_SOLAR"]
- capsolar[j] = value(EP[:vSOLARCAP][i])
- end
- if i in inputs["RET_CAP_SOLAR"]
- retcapsolar[j] = first(value.(EP[:vRETSOLARCAP][i]))
- end
- end
-
- if i in WIND
- existingcapwind[j] = MultiStage == 1 ? value(EP[:vEXISTINGWINDCAP][i]) : existing_cap_wind_mw(gen_VRE_STOR[j])
- if i in inputs["NEW_CAP_WIND"]
- capwind[j] = value(EP[:vWINDCAP][i])
- end
- if i in inputs["RET_CAP_WIND"]
- retcapwind[j] = first(value.(EP[:vRETWINDCAP][i]))
- end
- end
-
- if i in DC
- existingcapdc[j] = MultiStage == 1 ? value(EP[:vEXISTINGDCCAP][i]) : existing_cap_inverter_mw(gen_VRE_STOR[j])
- if i in inputs["NEW_CAP_DC"]
- capdc[j] = value(EP[:vDCCAP][i])
- end
- if i in inputs["RET_CAP_DC"]
- retcapdc[j] = first(value.(EP[:vRETDCCAP][i]))
- end
- end
-
- if i in STOR
- existingcapenergy[j] = MultiStage == 1 ? value(EP[:vEXISTINGCAPENERGY_VS][i]) : existing_cap_mwh(gen[i])
- if i in inputs["NEW_CAP_STOR"]
- capenergy[j] = value(EP[:vCAPENERGY_VS][i])
- end
- if i in inputs["RET_CAP_STOR"]
- retcapenergy[j] = first(value.(EP[:vRETCAPENERGY_VS][i]))
- end
-
- if i in inputs["VS_ASYM_DC_CHARGE"]
- if i in inputs["NEW_CAP_CHARGE_DC"]
- capchargedc[j] = value(EP[:vCAPCHARGE_DC][i])
- end
- if i in inputs["RET_CAP_CHARGE_DC"]
- retcapchargedc[j] = value(EP[:vRETCAPCHARGE_DC][i])
- end
- existingcapchargedc[j] = MultiStage == 1 ? value(EP[:vEXISTINGCAPCHARGEDC][i]) : existing_cap_charge_dc_mw(gen_VRE_STOR[j])
- end
- if i in inputs["VS_ASYM_AC_CHARGE"]
- if i in inputs["NEW_CAP_CHARGE_AC"]
- capchargeac[j] = value(EP[:vCAPCHARGE_AC][i])
- end
- if i in inputs["RET_CAP_CHARGE_AC"]
- retcapchargeac[j] = value(EP[:vRETCAPCHARGE_AC][i])
- end
- existingcapchargeac[j] = MultiStage == 1 ? value(EP[:vEXISTINGCAPCHARGEAC][i]) : existing_cap_charge_ac_mw(gen_VRE_STOR[j])
- end
- if i in inputs["VS_ASYM_DC_DISCHARGE"]
- if i in inputs["NEW_CAP_DISCHARGE_DC"]
- capdischargedc[j] = value(EP[:vCAPDISCHARGE_DC][i])
- end
- if i in inputs["RET_CAP_DISCHARGE_DC"]
- retcapdischargedc[j] = value(EP[:vRETCAPDISCHARGE_DC][i])
- end
- existingcapdischargedc[j] = MultiStage == 1 ? value(EP[:vEXISTINGCAPDISCHARGEDC][i]) : existing_cap_discharge_dc_mw(gen_VRE_STOR[j])
- end
- if i in inputs["VS_ASYM_AC_DISCHARGE"]
- if i in inputs["NEW_CAP_DISCHARGE_AC"]
- capdischargeac[j] = value(EP[:vCAPDISCHARGE_AC][i])
- end
- if i in inputs["RET_CAP_DISCHARGE_AC"]
- retcapdischargeac[j] = value(EP[:vRETCAPDISCHARGE_AC][i])
- end
- existingcapdischargeac[j] = MultiStage == 1 ? value(EP[:vEXISTINGCAPDISCHARGEAC][i]) : existing_cap_discharge_ac_mw(gen_VRE_STOR[j])
- end
- end
- j += 1
- end
-
- technologies = resource_type_mga.(gen_VRE_STOR)
- clusters = cluster.(gen_VRE_STOR)
- zones = zone_id.(gen_VRE_STOR)
-
- dfCap = DataFrame(
- Resource = inputs["RESOURCE_NAMES_VRE_STOR"], Zone = zones, Resource_Type = technologies, Cluster=clusters,
- StartCapSolar = existingcapsolar[:],
- RetCapSolar = retcapsolar[:],
- NewCapSolar = capsolar[:],
- EndCapSolar = existingcapsolar[:] - retcapsolar[:] + capsolar[:],
- StartCapWind = existingcapwind[:],
- RetCapWind = retcapwind[:],
- NewCapWind = capwind[:],
- EndCapWind = existingcapwind[:] - retcapwind[:] + capwind[:],
- StartCapDC = existingcapdc[:],
- RetCapDC = retcapdc[:],
- NewCapDC = capdc[:],
- EndCapDC = existingcapdc[:] - retcapdc[:] + capdc[:],
- StartCapGrid = existingcapgrid[:],
- RetCapGrid = retcapgrid[:],
- NewCapGrid = capgrid[:],
- EndCapGrid = existingcapgrid[:] - retcapgrid[:] + capgrid[:],
- StartEnergyCap = existingcapenergy[:],
- RetEnergyCap = retcapenergy[:],
- NewEnergyCap = capenergy[:],
- EndEnergyCap = existingcapenergy[:] - retcapenergy[:] + capenergy[:],
- StartChargeDCCap = existingcapchargedc[:],
- RetChargeDCCap = retcapchargedc[:],
- NewChargeDCCap = capchargedc[:],
- EndChargeDCCap = existingcapchargedc[:] - retcapchargedc[:] + capchargedc[:],
- StartChargeACCap = existingcapchargeac[:],
- RetChargeACCap = retcapchargeac[:],
- NewChargeACCap = capchargeac[:],
- EndChargeACCap = existingcapchargeac[:] - retcapchargeac[:] + capchargeac[:],
- StartDischargeDCCap = existingcapdischargedc[:],
- RetDischargeDCCap = retcapdischargedc[:],
- NewDischargeDCCap = capdischargedc[:],
- EndDischargeDCCap = existingcapdischargedc[:] - retcapdischargedc[:] + capdischargedc[:],
- StartDischargeACCap = existingcapdischargeac[:],
- RetDischargeACCap = retcapdischargeac[:],
- NewDischargeACCap = capdischargeac[:],
- EndDischargeACCap = existingcapdischargeac[:] - retcapdischargeac[:] + capdischargeac[:]
- )
-
- if setup["ParameterScale"] ==1
- columns_to_scale = [
- :StartCapSolar,
- :RetCapSolar,
- :NewCapSolar,
- :EndCapSolar,
- :StartCapWind,
- :RetCapWind,
- :NewCapWind,
- :EndCapWind,
- :StartCapDC,
- :RetCapDC,
- :NewCapDC,
- :EndCapDC,
- :StartCapGrid,
- :RetCapGrid,
- :NewCapGrid,
- :EndCapGrid,
- :StartEnergyCap,
- :RetEnergyCap,
- :NewEnergyCap,
- :EndEnergyCap,
- :StartChargeACCap,
- :RetChargeACCap,
- :NewChargeACCap,
- :EndChargeACCap,
- :StartChargeDCCap,
- :RetChargeDCCap,
- :NewChargeDCCap,
- :EndChargeDCCap,
- :StartDischargeDCCap,
- :RetDischargeDCCap,
- :NewDischargeDCCap,
- :EndDischargeDCCap,
- :StartDischargeACCap,
- :RetDischargeACCap,
- :NewDischargeACCap,
- :EndDischargeACCap,
- ]
- dfCap[!, columns_to_scale] .*= ModelScalingFactor
- end
-
- total = DataFrame(
- Resource = "Total", Zone = "n/a", Resource_Type = "Total", Cluster= "n/a",
- StartCapSolar = sum(dfCap[!,:StartCapSolar]), RetCapSolar = sum(dfCap[!,:RetCapSolar]),
- NewCapSolar = sum(dfCap[!,:NewCapSolar]), EndCapSolar = sum(dfCap[!,:EndCapSolar]),
- StartCapWind = sum(dfCap[!,:StartCapWind]), RetCapWind = sum(dfCap[!,:RetCapWind]),
- NewCapWind = sum(dfCap[!,:NewCapWind]), EndCapWind = sum(dfCap[!,:EndCapWind]),
- StartCapDC = sum(dfCap[!,:StartCapDC]), RetCapDC = sum(dfCap[!,:RetCapDC]),
- NewCapDC = sum(dfCap[!,:NewCapDC]), EndCapDC = sum(dfCap[!,:EndCapDC]),
- StartCapGrid = sum(dfCap[!,:StartCapGrid]), RetCapGrid = sum(dfCap[!,:RetCapGrid]),
- NewCapGrid = sum(dfCap[!,:NewCapGrid]), EndCapGrid = sum(dfCap[!,:EndCapGrid]),
- StartEnergyCap = sum(dfCap[!,:StartEnergyCap]), RetEnergyCap = sum(dfCap[!,:RetEnergyCap]),
- NewEnergyCap = sum(dfCap[!,:NewEnergyCap]), EndEnergyCap = sum(dfCap[!,:EndEnergyCap]),
- StartChargeACCap = sum(dfCap[!,:StartChargeACCap]), RetChargeACCap = sum(dfCap[!,:RetChargeACCap]),
- NewChargeACCap = sum(dfCap[!,:NewChargeACCap]), EndChargeACCap = sum(dfCap[!,:EndChargeACCap]),
- StartChargeDCCap = sum(dfCap[!,:StartChargeDCCap]), RetChargeDCCap = sum(dfCap[!,:RetChargeDCCap]),
- NewChargeDCCap = sum(dfCap[!,:NewChargeDCCap]), EndChargeDCCap = sum(dfCap[!,:EndChargeDCCap]),
- StartDischargeDCCap = sum(dfCap[!,:StartDischargeDCCap]), RetDischargeDCCap = sum(dfCap[!,:RetDischargeDCCap]),
- NewDischargeDCCap = sum(dfCap[!,:NewDischargeDCCap]), EndDischargeDCCap = sum(dfCap[!,:EndDischargeDCCap]),
- StartDischargeACCap = sum(dfCap[!,:StartDischargeACCap]), RetDischargeACCap = sum(dfCap[!,:RetDischargeACCap]),
- NewDischargeACCap = sum(dfCap[!,:NewDischargeACCap]), EndDischargeACCap = sum(dfCap[!,:EndDischargeACCap])
- )
-
- dfCap = vcat(dfCap, total)
- CSV.write(joinpath(path, "vre_stor_capacity.csv"), dfCap)
- return dfCap
+ gen = inputs["RESOURCES"]
+ gen_VRE_STOR = gen.VreStorage
+
+ VRE_STOR = inputs["VRE_STOR"]
+ SOLAR = inputs["VS_SOLAR"]
+ WIND = inputs["VS_WIND"]
+ DC = inputs["VS_DC"]
+ STOR = inputs["VS_STOR"]
+ MultiStage = setup["MultiStage"]
+ size_vrestor_resources = size(inputs["RESOURCE_NAMES_VRE_STOR"])
+
+ # Solar capacity
+ capsolar = zeros(size_vrestor_resources)
+ retcapsolar = zeros(size_vrestor_resources)
+ existingcapsolar = zeros(size_vrestor_resources)
+
+ # Wind capacity
+ capwind = zeros(size_vrestor_resources)
+ retcapwind = zeros(size_vrestor_resources)
+ existingcapwind = zeros(size_vrestor_resources)
+
+ # Inverter capacity
+ capdc = zeros(size_vrestor_resources)
+ retcapdc = zeros(size_vrestor_resources)
+ existingcapdc = zeros(size_vrestor_resources)
+
+ # Grid connection capacity
+ capgrid = zeros(size_vrestor_resources)
+ retcapgrid = zeros(size_vrestor_resources)
+ existingcapgrid = zeros(size_vrestor_resources)
+
+ # Energy storage capacity
+ capenergy = zeros(size_vrestor_resources)
+ retcapenergy = zeros(size_vrestor_resources)
+ existingcapenergy = zeros(size_vrestor_resources)
+
+ # Charge storage capacity DC
+ capchargedc = zeros(size_vrestor_resources)
+ retcapchargedc = zeros(size_vrestor_resources)
+ existingcapchargedc = zeros(size_vrestor_resources)
+
+ # Charge storage capacity AC
+ capchargeac = zeros(size_vrestor_resources)
+ retcapchargeac = zeros(size_vrestor_resources)
+ existingcapchargeac = zeros(size_vrestor_resources)
+
+ # Discharge storage capacity DC
+ capdischargedc = zeros(size_vrestor_resources)
+ retcapdischargedc = zeros(size_vrestor_resources)
+ existingcapdischargedc = zeros(size_vrestor_resources)
+
+ # Discharge storage capacity AC
+ capdischargeac = zeros(size_vrestor_resources)
+ retcapdischargeac = zeros(size_vrestor_resources)
+ existingcapdischargeac = zeros(size_vrestor_resources)
+
+ j = 1
+ for i in VRE_STOR
+ existingcapgrid[j] = MultiStage == 1 ? value(EP[:vEXISTINGCAP][i]) :
+ existing_cap_mw(gen[i])
+ if i in inputs["NEW_CAP"]
+ capgrid[j] = value(EP[:vCAP][i])
+ end
+ if i in inputs["RET_CAP"]
+ retcapgrid[j] = value(EP[:vRETCAP][i])
+ end
+
+ if i in SOLAR
+ existingcapsolar[j] = MultiStage == 1 ? value(EP[:vEXISTINGSOLARCAP][i]) :
+ existing_cap_solar_mw(gen_VRE_STOR[j])
+ if i in inputs["NEW_CAP_SOLAR"]
+ capsolar[j] = value(EP[:vSOLARCAP][i])
+ end
+ if i in inputs["RET_CAP_SOLAR"]
+ retcapsolar[j] = first(value.(EP[:vRETSOLARCAP][i]))
+ end
+ end
+
+ if i in WIND
+ existingcapwind[j] = MultiStage == 1 ? value(EP[:vEXISTINGWINDCAP][i]) :
+ existing_cap_wind_mw(gen_VRE_STOR[j])
+ if i in inputs["NEW_CAP_WIND"]
+ capwind[j] = value(EP[:vWINDCAP][i])
+ end
+ if i in inputs["RET_CAP_WIND"]
+ retcapwind[j] = first(value.(EP[:vRETWINDCAP][i]))
+ end
+ end
+
+ if i in DC
+ existingcapdc[j] = MultiStage == 1 ? value(EP[:vEXISTINGDCCAP][i]) :
+ existing_cap_inverter_mw(gen_VRE_STOR[j])
+ if i in inputs["NEW_CAP_DC"]
+ capdc[j] = value(EP[:vDCCAP][i])
+ end
+ if i in inputs["RET_CAP_DC"]
+ retcapdc[j] = first(value.(EP[:vRETDCCAP][i]))
+ end
+ end
+
+ if i in STOR
+ existingcapenergy[j] = MultiStage == 1 ? value(EP[:vEXISTINGCAPENERGY_VS][i]) :
+ existing_cap_mwh(gen[i])
+ if i in inputs["NEW_CAP_STOR"]
+ capenergy[j] = value(EP[:vCAPENERGY_VS][i])
+ end
+ if i in inputs["RET_CAP_STOR"]
+ retcapenergy[j] = first(value.(EP[:vRETCAPENERGY_VS][i]))
+ end
+
+ if i in inputs["VS_ASYM_DC_CHARGE"]
+ if i in inputs["NEW_CAP_CHARGE_DC"]
+ capchargedc[j] = value(EP[:vCAPCHARGE_DC][i])
+ end
+ if i in inputs["RET_CAP_CHARGE_DC"]
+ retcapchargedc[j] = value(EP[:vRETCAPCHARGE_DC][i])
+ end
+ existingcapchargedc[j] = MultiStage == 1 ?
+ value(EP[:vEXISTINGCAPCHARGEDC][i]) :
+ existing_cap_charge_dc_mw(gen_VRE_STOR[j])
+ end
+ if i in inputs["VS_ASYM_AC_CHARGE"]
+ if i in inputs["NEW_CAP_CHARGE_AC"]
+ capchargeac[j] = value(EP[:vCAPCHARGE_AC][i])
+ end
+ if i in inputs["RET_CAP_CHARGE_AC"]
+ retcapchargeac[j] = value(EP[:vRETCAPCHARGE_AC][i])
+ end
+ existingcapchargeac[j] = MultiStage == 1 ?
+ value(EP[:vEXISTINGCAPCHARGEAC][i]) :
+ existing_cap_charge_ac_mw(gen_VRE_STOR[j])
+ end
+ if i in inputs["VS_ASYM_DC_DISCHARGE"]
+ if i in inputs["NEW_CAP_DISCHARGE_DC"]
+ capdischargedc[j] = value(EP[:vCAPDISCHARGE_DC][i])
+ end
+ if i in inputs["RET_CAP_DISCHARGE_DC"]
+ retcapdischargedc[j] = value(EP[:vRETCAPDISCHARGE_DC][i])
+ end
+ existingcapdischargedc[j] = MultiStage == 1 ?
+ value(EP[:vEXISTINGCAPDISCHARGEDC][i]) :
+ existing_cap_discharge_dc_mw(gen_VRE_STOR[j])
+ end
+ if i in inputs["VS_ASYM_AC_DISCHARGE"]
+ if i in inputs["NEW_CAP_DISCHARGE_AC"]
+ capdischargeac[j] = value(EP[:vCAPDISCHARGE_AC][i])
+ end
+ if i in inputs["RET_CAP_DISCHARGE_AC"]
+ retcapdischargeac[j] = value(EP[:vRETCAPDISCHARGE_AC][i])
+ end
+ existingcapdischargeac[j] = MultiStage == 1 ?
+ value(EP[:vEXISTINGCAPDISCHARGEAC][i]) :
+ existing_cap_discharge_ac_mw(gen_VRE_STOR[j])
+ end
+ end
+ j += 1
+ end
+
+ technologies = resource_type_mga.(gen_VRE_STOR)
+ clusters = cluster.(gen_VRE_STOR)
+ zones = zone_id.(gen_VRE_STOR)
+
+ dfCap = DataFrame(Resource = inputs["RESOURCE_NAMES_VRE_STOR"], Zone = zones,
+ Resource_Type = technologies, Cluster = clusters,
+ StartCapSolar = existingcapsolar[:],
+ RetCapSolar = retcapsolar[:],
+ NewCapSolar = capsolar[:],
+ EndCapSolar = existingcapsolar[:] - retcapsolar[:] + capsolar[:],
+ StartCapWind = existingcapwind[:],
+ RetCapWind = retcapwind[:],
+ NewCapWind = capwind[:],
+ EndCapWind = existingcapwind[:] - retcapwind[:] + capwind[:],
+ StartCapDC = existingcapdc[:],
+ RetCapDC = retcapdc[:],
+ NewCapDC = capdc[:],
+ EndCapDC = existingcapdc[:] - retcapdc[:] + capdc[:],
+ StartCapGrid = existingcapgrid[:],
+ RetCapGrid = retcapgrid[:],
+ NewCapGrid = capgrid[:],
+ EndCapGrid = existingcapgrid[:] - retcapgrid[:] + capgrid[:],
+ StartEnergyCap = existingcapenergy[:],
+ RetEnergyCap = retcapenergy[:],
+ NewEnergyCap = capenergy[:],
+ EndEnergyCap = existingcapenergy[:] - retcapenergy[:] + capenergy[:],
+ StartChargeDCCap = existingcapchargedc[:],
+ RetChargeDCCap = retcapchargedc[:],
+ NewChargeDCCap = capchargedc[:],
+ EndChargeDCCap = existingcapchargedc[:] - retcapchargedc[:] + capchargedc[:],
+ StartChargeACCap = existingcapchargeac[:],
+ RetChargeACCap = retcapchargeac[:],
+ NewChargeACCap = capchargeac[:],
+ EndChargeACCap = existingcapchargeac[:] - retcapchargeac[:] + capchargeac[:],
+ StartDischargeDCCap = existingcapdischargedc[:],
+ RetDischargeDCCap = retcapdischargedc[:],
+ NewDischargeDCCap = capdischargedc[:],
+ EndDischargeDCCap = existingcapdischargedc[:] - retcapdischargedc[:] +
+ capdischargedc[:],
+ StartDischargeACCap = existingcapdischargeac[:],
+ RetDischargeACCap = retcapdischargeac[:],
+ NewDischargeACCap = capdischargeac[:],
+ EndDischargeACCap = existingcapdischargeac[:] - retcapdischargeac[:] +
+ capdischargeac[:])
+
+ if setup["ParameterScale"] == 1
+ columns_to_scale = [
+ :StartCapSolar,
+ :RetCapSolar,
+ :NewCapSolar,
+ :EndCapSolar,
+ :StartCapWind,
+ :RetCapWind,
+ :NewCapWind,
+ :EndCapWind,
+ :StartCapDC,
+ :RetCapDC,
+ :NewCapDC,
+ :EndCapDC,
+ :StartCapGrid,
+ :RetCapGrid,
+ :NewCapGrid,
+ :EndCapGrid,
+ :StartEnergyCap,
+ :RetEnergyCap,
+ :NewEnergyCap,
+ :EndEnergyCap,
+ :StartChargeACCap,
+ :RetChargeACCap,
+ :NewChargeACCap,
+ :EndChargeACCap,
+ :StartChargeDCCap,
+ :RetChargeDCCap,
+ :NewChargeDCCap,
+ :EndChargeDCCap,
+ :StartDischargeDCCap,
+ :RetDischargeDCCap,
+ :NewDischargeDCCap,
+ :EndDischargeDCCap,
+ :StartDischargeACCap,
+ :RetDischargeACCap,
+ :NewDischargeACCap,
+ :EndDischargeACCap
+ ]
+ dfCap[!, columns_to_scale] .*= ModelScalingFactor
+ end
+
+ total = DataFrame(Resource = "Total", Zone = "n/a", Resource_Type = "Total",
+ Cluster = "n/a",
+ StartCapSolar = sum(dfCap[!, :StartCapSolar]),
+ RetCapSolar = sum(dfCap[!, :RetCapSolar]),
+ NewCapSolar = sum(dfCap[!, :NewCapSolar]),
+ EndCapSolar = sum(dfCap[!, :EndCapSolar]),
+ StartCapWind = sum(dfCap[!, :StartCapWind]),
+ RetCapWind = sum(dfCap[!, :RetCapWind]),
+ NewCapWind = sum(dfCap[!, :NewCapWind]), EndCapWind = sum(dfCap[!, :EndCapWind]),
+ StartCapDC = sum(dfCap[!, :StartCapDC]), RetCapDC = sum(dfCap[!, :RetCapDC]),
+ NewCapDC = sum(dfCap[!, :NewCapDC]), EndCapDC = sum(dfCap[!, :EndCapDC]),
+ StartCapGrid = sum(dfCap[!, :StartCapGrid]),
+ RetCapGrid = sum(dfCap[!, :RetCapGrid]),
+ NewCapGrid = sum(dfCap[!, :NewCapGrid]), EndCapGrid = sum(dfCap[!, :EndCapGrid]),
+ StartEnergyCap = sum(dfCap[!, :StartEnergyCap]),
+ RetEnergyCap = sum(dfCap[!, :RetEnergyCap]),
+ NewEnergyCap = sum(dfCap[!, :NewEnergyCap]),
+ EndEnergyCap = sum(dfCap[!, :EndEnergyCap]),
+ StartChargeACCap = sum(dfCap[!, :StartChargeACCap]),
+ RetChargeACCap = sum(dfCap[!, :RetChargeACCap]),
+ NewChargeACCap = sum(dfCap[!, :NewChargeACCap]),
+ EndChargeACCap = sum(dfCap[!, :EndChargeACCap]),
+ StartChargeDCCap = sum(dfCap[!, :StartChargeDCCap]),
+ RetChargeDCCap = sum(dfCap[!, :RetChargeDCCap]),
+ NewChargeDCCap = sum(dfCap[!, :NewChargeDCCap]),
+ EndChargeDCCap = sum(dfCap[!, :EndChargeDCCap]),
+ StartDischargeDCCap = sum(dfCap[!, :StartDischargeDCCap]),
+ RetDischargeDCCap = sum(dfCap[!, :RetDischargeDCCap]),
+ NewDischargeDCCap = sum(dfCap[!, :NewDischargeDCCap]),
+ EndDischargeDCCap = sum(dfCap[!, :EndDischargeDCCap]),
+ StartDischargeACCap = sum(dfCap[!, :StartDischargeACCap]),
+ RetDischargeACCap = sum(dfCap[!, :RetDischargeACCap]),
+ NewDischargeACCap = sum(dfCap[!, :NewDischargeACCap]),
+ EndDischargeACCap = sum(dfCap[!, :EndDischargeACCap]))
+
+ dfCap = vcat(dfCap, total)
+ CSV.write(joinpath(path, "vre_stor_capacity.csv"), dfCap)
+ return dfCap
end
@doc raw"""
@@ -287,43 +314,49 @@ end
Function for writing the vre-storage charging decision variables/expressions.
"""
function write_vre_stor_charge(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- gen_VRE_STOR = gen.VreStorage
- T = inputs["T"]
+ gen = inputs["RESOURCES"]
+ gen_VRE_STOR = gen.VreStorage
+ T = inputs["T"]
DC_CHARGE = inputs["VS_STOR_DC_CHARGE"]
AC_CHARGE = inputs["VS_STOR_AC_CHARGE"]
- # DC charging of battery dataframe
- if !isempty(DC_CHARGE)
- dfCharge_DC = DataFrame(Resource = inputs["RESOURCE_NAMES_DC_CHARGE"], Zone = inputs["ZONES_DC_CHARGE"], AnnualSum = Array{Union{Missing,Float32}}(undef, size(DC_CHARGE)[1]))
- charge_dc = zeros(size(DC_CHARGE)[1], T)
- charge_dc = value.(EP[:vP_DC_CHARGE]).data ./ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge.!=0)]) * (setup["ParameterScale"]==1 ? ModelScalingFactor : 1)
- dfCharge_DC.AnnualSum .= charge_dc * inputs["omega"]
-
-
- filepath = joinpath(path,"vre_stor_dc_charge.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfCharge_DC)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, charge_dc, dfCharge_DC)
- end
- end
-
- # AC charging of battery dataframe
- if !isempty(AC_CHARGE)
- dfCharge_AC = DataFrame(Resource = inputs["RESOURCE_NAMES_AC_CHARGE"], Zone = inputs["ZONES_AC_CHARGE"], AnnualSum = Array{Union{Missing,Float32}}(undef, size(AC_CHARGE)[1]))
- charge_ac = zeros(size(AC_CHARGE)[1], T)
- charge_ac = value.(EP[:vP_AC_CHARGE]).data * (setup["ParameterScale"]==1 ? ModelScalingFactor : 1)
- dfCharge_AC.AnnualSum .= charge_ac * inputs["omega"]
-
- filepath = joinpath(path,"vre_stor_ac_charge.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfCharge_AC)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, charge_ac, dfCharge_AC)
- end
- end
- return nothing
+ # DC charging of battery dataframe
+ if !isempty(DC_CHARGE)
+ dfCharge_DC = DataFrame(Resource = inputs["RESOURCE_NAMES_DC_CHARGE"],
+ Zone = inputs["ZONES_DC_CHARGE"],
+ AnnualSum = Array{Union{Missing, Float32}}(undef, size(DC_CHARGE)[1]))
+ charge_dc = zeros(size(DC_CHARGE)[1], T)
+ charge_dc = value.(EP[:vP_DC_CHARGE]).data ./
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge .!= 0)]) *
+ (setup["ParameterScale"] == 1 ? ModelScalingFactor : 1)
+ dfCharge_DC.AnnualSum .= charge_dc * inputs["omega"]
+
+ filepath = joinpath(path, "vre_stor_dc_charge.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfCharge_DC)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, charge_dc, dfCharge_DC)
+ end
+ end
+
+ # AC charging of battery dataframe
+ if !isempty(AC_CHARGE)
+ dfCharge_AC = DataFrame(Resource = inputs["RESOURCE_NAMES_AC_CHARGE"],
+ Zone = inputs["ZONES_AC_CHARGE"],
+ AnnualSum = Array{Union{Missing, Float32}}(undef, size(AC_CHARGE)[1]))
+ charge_ac = zeros(size(AC_CHARGE)[1], T)
+ charge_ac = value.(EP[:vP_AC_CHARGE]).data *
+ (setup["ParameterScale"] == 1 ? ModelScalingFactor : 1)
+ dfCharge_AC.AnnualSum .= charge_ac * inputs["omega"]
+
+ filepath = joinpath(path, "vre_stor_ac_charge.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfCharge_AC)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, charge_ac, dfCharge_AC)
+ end
+ end
+ return nothing
end
@doc raw"""
@@ -331,81 +364,94 @@ end
Function for writing the vre-storage discharging decision variables/expressions.
"""
-function write_vre_stor_discharge(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- gen_VRE_STOR = gen.VreStorage
- T = inputs["T"]
- DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
+function write_vre_stor_discharge(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ gen = inputs["RESOURCES"]
+ gen_VRE_STOR = gen.VreStorage
+ T = inputs["T"]
+ DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
AC_DISCHARGE = inputs["VS_STOR_AC_DISCHARGE"]
- WIND = inputs["VS_WIND"]
- SOLAR = inputs["VS_SOLAR"]
-
- # DC discharging of battery dataframe
- if !isempty(DC_DISCHARGE)
- dfDischarge_DC = DataFrame(Resource = inputs["RESOURCE_NAMES_DC_DISCHARGE"], Zone = inputs["ZONES_DC_DISCHARGE"], AnnualSum = Array{Union{Missing,Float32}}(undef, size(DC_DISCHARGE)[1]))
- power_vre_stor = value.(EP[:vP_DC_DISCHARGE]).data .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge.!=0)])
- if setup["ParameterScale"] == 1
- power_vre_stor *= ModelScalingFactor
- end
- dfDischarge_DC.AnnualSum .= power_vre_stor * inputs["omega"]
-
- filepath = joinpath(path,"vre_stor_dc_discharge.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfDischarge_DC)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, power_vre_stor, dfDischarge_DC)
- end
- end
-
- # AC discharging of battery dataframe
- if !isempty(AC_DISCHARGE)
- dfDischarge_AC = DataFrame(Resource = inputs["RESOURCE_NAMES_AC_DISCHARGE"], Zone = inputs["ZONES_AC_DISCHARGE"], AnnualSum = Array{Union{Missing,Float32}}(undef, size(AC_DISCHARGE)[1]))
- power_vre_stor = value.(EP[:vP_AC_DISCHARGE]).data
- if setup["ParameterScale"] == 1
- power_vre_stor *= ModelScalingFactor
- end
- dfDischarge_AC.AnnualSum .= power_vre_stor * inputs["omega"]
-
- filepath = joinpath(path,"vre_stor_ac_discharge.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfDischarge_AC)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, power_vre_stor, dfDischarge_AC)
- end
- end
-
- # Wind generation of co-located resource dataframe
- if !isempty(WIND)
- dfVP_VRE_STOR = DataFrame(Resource = inputs["RESOURCE_NAMES_WIND"], Zone = inputs["ZONES_WIND"], AnnualSum = Array{Union{Missing,Float32}}(undef, size(WIND)[1]))
- vre_vre_stor = value.(EP[:vP_WIND]).data
- if setup["ParameterScale"] == 1
- vre_vre_stor *= ModelScalingFactor
- end
- dfVP_VRE_STOR.AnnualSum .= vre_vre_stor * inputs["omega"]
-
- filepath = joinpath(path,"vre_stor_wind_power.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfVP_VRE_STOR)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, vre_vre_stor, dfVP_VRE_STOR)
- end
- end
-
- # Solar generation of co-located resource dataframe
- if !isempty(SOLAR)
- dfVP_VRE_STOR = DataFrame(Resource = inputs["RESOURCE_NAMES_SOLAR"], Zone = inputs["ZONES_SOLAR"], AnnualSum = Array{Union{Missing,Float32}}(undef, size(SOLAR)[1]))
- vre_vre_stor = value.(EP[:vP_SOLAR]).data .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.solar.!=0)])
- if setup["ParameterScale"] == 1
- vre_vre_stor *= ModelScalingFactor
- end
- dfVP_VRE_STOR.AnnualSum .= vre_vre_stor * inputs["omega"]
-
- filepath = joinpath(path,"vre_stor_solar_power.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfVP_VRE_STOR)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, vre_vre_stor, dfVP_VRE_STOR)
- end
- end
- return nothing
+ WIND = inputs["VS_WIND"]
+ SOLAR = inputs["VS_SOLAR"]
+
+ # DC discharging of battery dataframe
+ if !isempty(DC_DISCHARGE)
+ dfDischarge_DC = DataFrame(Resource = inputs["RESOURCE_NAMES_DC_DISCHARGE"],
+ Zone = inputs["ZONES_DC_DISCHARGE"],
+ AnnualSum = Array{Union{Missing, Float32}}(undef, size(DC_DISCHARGE)[1]))
+ power_vre_stor = value.(EP[:vP_DC_DISCHARGE]).data .*
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge .!= 0)])
+ if setup["ParameterScale"] == 1
+ power_vre_stor *= ModelScalingFactor
+ end
+ dfDischarge_DC.AnnualSum .= power_vre_stor * inputs["omega"]
+
+ filepath = joinpath(path, "vre_stor_dc_discharge.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfDischarge_DC)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, power_vre_stor, dfDischarge_DC)
+ end
+ end
+
+ # AC discharging of battery dataframe
+ if !isempty(AC_DISCHARGE)
+ dfDischarge_AC = DataFrame(Resource = inputs["RESOURCE_NAMES_AC_DISCHARGE"],
+ Zone = inputs["ZONES_AC_DISCHARGE"],
+ AnnualSum = Array{Union{Missing, Float32}}(undef, size(AC_DISCHARGE)[1]))
+ power_vre_stor = value.(EP[:vP_AC_DISCHARGE]).data
+ if setup["ParameterScale"] == 1
+ power_vre_stor *= ModelScalingFactor
+ end
+ dfDischarge_AC.AnnualSum .= power_vre_stor * inputs["omega"]
+
+ filepath = joinpath(path, "vre_stor_ac_discharge.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfDischarge_AC)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, power_vre_stor, dfDischarge_AC)
+ end
+ end
+
+ # Wind generation of co-located resource dataframe
+ if !isempty(WIND)
+ dfVP_VRE_STOR = DataFrame(Resource = inputs["RESOURCE_NAMES_WIND"],
+ Zone = inputs["ZONES_WIND"],
+ AnnualSum = Array{Union{Missing, Float32}}(undef, size(WIND)[1]))
+ vre_vre_stor = value.(EP[:vP_WIND]).data
+ if setup["ParameterScale"] == 1
+ vre_vre_stor *= ModelScalingFactor
+ end
+ dfVP_VRE_STOR.AnnualSum .= vre_vre_stor * inputs["omega"]
+
+ filepath = joinpath(path, "vre_stor_wind_power.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfVP_VRE_STOR)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, vre_vre_stor, dfVP_VRE_STOR)
+ end
+ end
+
+ # Solar generation of co-located resource dataframe
+ if !isempty(SOLAR)
+ dfVP_VRE_STOR = DataFrame(Resource = inputs["RESOURCE_NAMES_SOLAR"],
+ Zone = inputs["ZONES_SOLAR"],
+ AnnualSum = Array{Union{Missing, Float32}}(undef, size(SOLAR)[1]))
+ vre_vre_stor = value.(EP[:vP_SOLAR]).data .*
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.solar .!= 0)])
+ if setup["ParameterScale"] == 1
+ vre_vre_stor *= ModelScalingFactor
+ end
+ dfVP_VRE_STOR.AnnualSum .= vre_vre_stor * inputs["omega"]
+
+ filepath = joinpath(path, "vre_stor_solar_power.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfVP_VRE_STOR)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, vre_vre_stor, dfVP_VRE_STOR)
+ end
+ end
+ return nothing
end
diff --git a/test/expression_manipulation_test.jl b/test/expression_manipulation_test.jl
index aae5d442ec..71891d80ac 100644
--- a/test/expression_manipulation_test.jl
+++ b/test/expression_manipulation_test.jl
@@ -3,11 +3,11 @@ using HiGHS
function setup_sum_model()
EP = Model(HiGHS.Optimizer)
- @variable(EP, x[i=1:100,j=1:4:200]>=0)
- @variable(EP, y[i=1:100,j=1:50]>=0)
- @expression(EP, eX[i=1:100,j=1:4:200], 2.0*x[i,j]+i+10.0*j)
- @expression(EP, eY[i=1:100,j=1:50], 3.0*y[i,j]+2*i+j)
- @expression(EP, eZ[i=1:100,j=1:50], 2.0*x[i,(j-1)*4+1] + 4.0*y[i,j])
+ @variable(EP, x[i = 1:100, j = 1:4:200]>=0)
+ @variable(EP, y[i = 1:100, j = 1:50]>=0)
+ @expression(EP, eX[i = 1:100, j = 1:4:200], 2.0*x[i, j]+i+10.0*j)
+ @expression(EP, eY[i = 1:100, j = 1:50], 3.0*y[i, j]+2*i+j)
+ @expression(EP, eZ[i = 1:100, j = 1:50], 2.0 * x[i, (j - 1) * 4 + 1]+4.0 * y[i, j])
return EP
end
@@ -61,21 +61,21 @@ function sum_combo_expr()
return true
end
-let
+let
EP = Model(HiGHS.Optimizer)
# Test fill_with_zeros!
- small_zeros_expr = Array{AffExpr,2}(undef,(2,3))
+ small_zeros_expr = Array{AffExpr, 2}(undef, (2, 3))
GenX.fill_with_zeros!(small_zeros_expr)
@test small_zeros_expr == AffExpr.([0.0 0.0 0.0; 0.0 0.0 0.0])
# Test fill_with_const!
- small_const_expr = Array{AffExpr,2}(undef,(3,2))
+ small_const_expr = Array{AffExpr, 2}(undef, (3, 2))
GenX.fill_with_const!(small_const_expr, 6.0)
@test small_const_expr == AffExpr.([6.0 6.0; 6.0 6.0; 6.0 6.0])
# Test create_empty_expression! with fill_with_const!
- large_dims = (2,10,20)
+ large_dims = (2, 10, 20)
GenX.create_empty_expression!(EP, :large_expr, large_dims)
@test all(EP[:large_expr] .== 0.0)
@@ -93,11 +93,12 @@ let
@test all(EP[:large_expr][:] .== 18.0)
# Test add_similar_to_expression! returns an error if the dimensions don't match
- GenX.create_empty_expression!(EP, :small_expr, (2,3))
- @test_throws ErrorException GenX.add_similar_to_expression!(EP[:large_expr], EP[:small_expr])
+ GenX.create_empty_expression!(EP, :small_expr, (2, 3))
+ @test_throws ErrorException GenX.add_similar_to_expression!(EP[:large_expr],
+ EP[:small_expr])
# Test we can add variables to an expression using add_similar_to_expression!
- @variable(EP, test_var[1:large_dims[1], 1:large_dims[2], 1:large_dims[3]] >= 0)
+ @variable(EP, test_var[1:large_dims[1], 1:large_dims[2], 1:large_dims[3]]>=0)
GenX.add_similar_to_expression!(EP[:large_expr], test_var)
@test EP[:large_expr][100] == test_var[100] + 18.0
@@ -117,7 +118,7 @@ let
@test sum_combo_expr() == true
# Test add_term_to_expression! for variable
- @variable(EP, single_var >= 0)
+ @variable(EP, single_var>=0)
GenX.add_term_to_expression!(EP[:large_expr], single_var)
@test EP[:large_expr][100] == test_var[100] + 22.0 + single_var
@@ -144,12 +145,12 @@ let
unregister(EP, :var_denseaxisarray)
end
- ###### ###### ###### ###### ###### ###### ######
- ###### ###### ###### ###### ###### ###### ######
- # Performance tests we can perhaps add later
- # These require the BenchmarkTests.jl package
- ###### ###### ###### ###### ###### ###### ######
- ###### ###### ###### ###### ###### ###### ######
+###### ###### ###### ###### ###### ###### ######
+###### ###### ###### ###### ###### ###### ######
+# Performance tests we can perhaps add later
+# These require the BenchmarkTests.jl package
+###### ###### ###### ###### ###### ###### ######
+###### ###### ###### ###### ###### ###### ######
# function test_performance(expr_dims)
# EP = Model(HiGHS.Optimizer)
@@ -165,4 +166,3 @@ end
# small_benchmark = test_performance((2,3))
# medium_benchmark = test_performance((2,10,20))
# large_benchmark = test_performance((2,20,40))
-
diff --git a/test/resource_test.jl b/test/resource_test.jl
index 2203af17e8..7824187bfa 100644
--- a/test/resource_test.jl
+++ b/test/resource_test.jl
@@ -3,109 +3,109 @@ let
check_resource = GenX.check_resource
therm = Resource(:Resource => "my_therm",
- :THERM => 1,
- :FLEX => 0,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 0,
- :STOR => 0,
- :LDS => 0)
+ :THERM => 1,
+ :FLEX => 0,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 0,
+ :STOR => 0,
+ :LDS => 0)
stor_lds = Resource(:Resource => "stor_lds",
- :THERM => 0,
- :FLEX => 0,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 0,
- :STOR => 1,
- :LDS => 1)
+ :THERM => 0,
+ :FLEX => 0,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 0,
+ :STOR => 1,
+ :LDS => 1)
hydro_lds = Resource(:Resource => "hydro_lds",
- :THERM => 0,
- :FLEX => 0,
- :HYDRO => 1,
- :VRE => 0,
- :MUST_RUN => 0,
- :STOR => 0,
- :LDS => 1)
+ :THERM => 0,
+ :FLEX => 0,
+ :HYDRO => 1,
+ :VRE => 0,
+ :MUST_RUN => 0,
+ :STOR => 0,
+ :LDS => 1)
bad_lds = Resource(:Resource => "bad lds combo",
- :THERM => 0,
- :FLEX => 1,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 0,
- :STOR => 0,
- :LDS => 1)
+ :THERM => 0,
+ :FLEX => 1,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 0,
+ :STOR => 0,
+ :LDS => 1)
bad_none = Resource(:Resource => "none",
- :THERM => 0,
- :FLEX => 0,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 0,
- :STOR => 0,
- :LDS => 0)
+ :THERM => 0,
+ :FLEX => 0,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 0,
+ :STOR => 0,
+ :LDS => 0)
bad_twotypes = Resource(:Resource => "too many",
- :THERM => 1,
- :FLEX => 1,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 0,
- :STOR => 0,
- :LDS => 0)
+ :THERM => 1,
+ :FLEX => 1,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 0,
+ :STOR => 0,
+ :LDS => 0)
bad_multiple = Resource(:Resource => "multiple_bad",
- :THERM => 1,
- :FLEX => 1,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 0,
- :STOR => 0,
- :LDS => 1)
+ :THERM => 1,
+ :FLEX => 1,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 0,
+ :STOR => 0,
+ :LDS => 1)
# MUST_RUN units contribution to reserves
must_run = Resource(:Resource => "must_run",
- :THERM => 0,
- :FLEX => 0,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 1,
- :STOR => 0,
- :LDS => 0,
- :Reg_Max => 0,
- :Rsv_Max => 0)
+ :THERM => 0,
+ :FLEX => 0,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 1,
+ :STOR => 0,
+ :LDS => 0,
+ :Reg_Max => 0,
+ :Rsv_Max => 0)
bad_must_run = Resource(:Resource => "bad_must_run",
- :THERM => 0,
- :FLEX => 0,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 1,
- :STOR => 0,
- :LDS => 0,
- :Reg_Max => 0.083333333,
- :Rsv_Max => 0.166666667)
+ :THERM => 0,
+ :FLEX => 0,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 1,
+ :STOR => 0,
+ :LDS => 0,
+ :Reg_Max => 0.083333333,
+ :Rsv_Max => 0.166666667)
bad_mustrun_reg = Resource(:Resource => "bad_mustrun_reg",
- :THERM => 0,
- :FLEX => 0,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 1,
- :STOR => 0,
- :LDS => 0,
- :Reg_Max => 0.083333333,
- :Rsv_Max => 0)
+ :THERM => 0,
+ :FLEX => 0,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 1,
+ :STOR => 0,
+ :LDS => 0,
+ :Reg_Max => 0.083333333,
+ :Rsv_Max => 0)
bad_mustrun_rsv = Resource(:Resource => "bad_mustrun_rsv",
- :THERM => 0,
- :FLEX => 0,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 1,
- :STOR => 0,
- :LDS => 0,
- :Reg_Max => 0,
- :Rsv_Max => 0.166666667)
+ :THERM => 0,
+ :FLEX => 0,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 1,
+ :STOR => 0,
+ :LDS => 0,
+ :Reg_Max => 0,
+ :Rsv_Max => 0.166666667)
function check_okay(resource)
e = check_resource(resource)
@@ -143,6 +143,4 @@ let
end
test_validate_bad(multiple_bad_resources)
-
-
end
diff --git a/test/runtests.jl b/test/runtests.jl
index 1722acabce..f2fc3896ac 100644
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -4,7 +4,6 @@ using Logging
include("utilities.jl")
-
@testset "Expr manipulation" begin
include("expression_manipulation_test.jl")
end
@@ -16,7 +15,7 @@ if VERSION ≥ v"1.7"
end
# Test GenX modules
-@testset verbose = true "GenX modules" begin
+@testset verbose=true "GenX modules" begin
@testset "Three zones" begin
include("test_threezones.jl")
end
@@ -57,3 +56,13 @@ end
include("test_retrofit.jl")
end
end
+
+# Test writing outputs
+@testset "Writing outputs " begin
+ for test_file in filter!(x -> endswith(x, ".jl"), readdir("writing_outputs"))
+ include("writing_outputs/$test_file")
+ end
+end
+
+# Remove temporary files
+isdir(results_path) && rm(results_path, force = true, recursive = true)
diff --git a/test/test_DCOPF.jl b/test/test_DCOPF.jl
index bbac42ff62..ca15bef686 100644
--- a/test/test_DCOPF.jl
+++ b/test/test_DCOPF.jl
@@ -8,11 +8,9 @@ obj_true = 395.171391
test_path = "DCOPF"
# Define test inputs
-genx_setup = Dict(
- "Trans_Loss_Segments" => 0,
+genx_setup = Dict("Trans_Loss_Segments" => 0,
"StorageLosses" => 0,
- "DC_OPF" => 1,
-)
+ "DC_OPF" => 1)
# Run the case and get the objective value and tolerance
EP, _, _ = redirect_stdout(devnull) do
@@ -23,11 +21,11 @@ optimal_tol_rel = get_attribute(EP, "ipm_optimality_tolerance")
optimal_tol = optimal_tol_rel * obj_test # Convert to absolute tolerance
# Test the objective value
-test_result = @test obj_test ≈ obj_true atol = optimal_tol
+test_result = @test obj_test≈obj_true atol=optimal_tol
# Round objective value and tolerance. Write to test log.
obj_test = round_from_tol!(obj_test, optimal_tol)
optimal_tol = round_from_tol!(optimal_tol, optimal_tol)
write_testlog(test_path, obj_test, optimal_tol, test_result)
-end # module TestDCOPF
\ No newline at end of file
+end # module TestDCOPF
diff --git a/test/test_VRE_storage.jl b/test/test_VRE_storage.jl
index 10ce1b9d31..2765283f6e 100644
--- a/test/test_VRE_storage.jl
+++ b/test/test_VRE_storage.jl
@@ -7,8 +7,7 @@ obj_true = 92376.060123
test_path = "VRE_storage"
# Define test inputs
-genx_setup = Dict(
- "NetworkExpansion" => 1,
+genx_setup = Dict("NetworkExpansion" => 1,
"UCommit" => 2,
"CapacityReserveMargin" => 1,
"MinCapReq" => 1,
@@ -16,8 +15,7 @@ genx_setup = Dict(
"CO2Cap" => 1,
"StorageLosses" => 1,
"VirtualChargeDischargeCost" => 1,
- "ParameterScale" => 1,
-)
+ "ParameterScale" => 1)
# Run the case and get the objective value and tolerance
EP, inputs, _ = redirect_stdout(devnull) do
@@ -28,7 +26,7 @@ optimal_tol_rel = get_attribute(EP, "dual_feasibility_tolerance")
optimal_tol = optimal_tol_rel * obj_test # Convert to absolute tolerance
# Test the objective value
-test_result = @test obj_test ≈ obj_true atol = optimal_tol
+test_result = @test obj_test≈obj_true atol=optimal_tol
# Round objective value and tolerance. Write to test log.
obj_test = round_from_tol!(obj_test, optimal_tol)
diff --git a/test/test_compute_conflicts.jl b/test/test_compute_conflicts.jl
index c8c4c88f72..e02780eec8 100644
--- a/test/test_compute_conflicts.jl
+++ b/test/test_compute_conflicts.jl
@@ -3,22 +3,22 @@ module TestConflicts
using Test
include(joinpath(@__DIR__, "utilities.jl"))
-test_path = joinpath(@__DIR__,"compute_conflicts");
+test_path = joinpath(@__DIR__, "compute_conflicts")
# Define test inputs
-genx_setup = Dict{Any,Any}(
- "Trans_Loss_Segments" => 1,
+genx_setup = Dict{Any, Any}("Trans_Loss_Segments" => 1,
"CO2Cap" => 1,
"StorageLosses" => 1,
"MaxCapReq" => 1,
- "ComputeConflicts" => 1
-)
+ "ComputeConflicts" => 1)
genxoutput = redirect_stdout(devnull) do
run_genx_case_conflict_testing(test_path, genx_setup)
end
-test_result = @test length(genxoutput)==2
-write_testlog(test_path,"Testing that the infeasible model is correctly handled",test_result)
+test_result = @test length(genxoutput) == 2
+write_testlog(test_path,
+ "Testing that the infeasible model is correctly handled",
+ test_result)
-end
\ No newline at end of file
+end
diff --git a/test/test_electrolyzer.jl b/test/test_electrolyzer.jl
index 426540eef1..7789751e86 100644
--- a/test/test_electrolyzer.jl
+++ b/test/test_electrolyzer.jl
@@ -8,13 +8,11 @@ obj_true = 6946.9618
test_path = "electrolyzer"
# Define test inputs
-genx_setup = Dict(
- "Trans_Loss_Segments" => 1,
+genx_setup = Dict("Trans_Loss_Segments" => 1,
"UCommit" => 2,
"StorageLosses" => 1,
"HydrogenHourlyMatching" => 1,
- "ParameterScale" => 1,
-)
+ "ParameterScale" => 1)
# Run the case and get the objective value and tolerance
EP, _, _ = redirect_stdout(devnull) do
@@ -26,7 +24,7 @@ optimal_tol_rel = get_attribute(EP, "ipm_optimality_tolerance")
optimal_tol = optimal_tol_rel * obj_test # Convert to absolute tolerance
# Test the objective value
-test_result = @test obj_test ≈ obj_true atol = optimal_tol
+test_result = @test obj_test≈obj_true atol=optimal_tol
# Round objective value and tolerance. Write to test log.
obj_test = round_from_tol!(obj_test, optimal_tol)
diff --git a/test/test_examples.jl b/test/test_examples.jl
index ed060f9ca1..a15b5ef17b 100644
--- a/test/test_examples.jl
+++ b/test/test_examples.jl
@@ -5,13 +5,12 @@ using GenX
include(joinpath(@__DIR__, "utilities.jl"))
-
# Test that the examples in the example_systems directory run without error
function test_examples()
base_path = Base.dirname(Base.dirname(pathof(GenX)))
examples_path = joinpath(base_path, "example_systems")
- examples_dir = readdir(examples_path, join=true)
+ examples_dir = readdir(examples_path, join = true)
for example_dir in examples_dir
if isdir(example_dir) && isfile(joinpath(example_dir, "Run.jl"))
@info "Running example in $example_dir"
@@ -25,4 +24,4 @@ end
test_examples()
end
-end # module
\ No newline at end of file
+end # module
diff --git a/test/test_load_resource_data.jl b/test/test_load_resource_data.jl
index fec45f8c8e..00d979b197 100644
--- a/test/test_load_resource_data.jl
+++ b/test/test_load_resource_data.jl
@@ -11,32 +11,33 @@ struct InputsTrue
inputs_filename::AbstractString
end
-
function test_macro_interface(attr::Symbol, gen, dfGen)
f = getfield(GenX, attr)
@test f.(gen) == dfGen[!, attr]
end
function test_ids_with(attr::Symbol, gen, dfGen)
- @test GenX.ids_with(gen,attr) == dfGen[dfGen[!, attr] .!= 0, :r_id]
+ @test GenX.ids_with(gen, attr) == dfGen[dfGen[!, attr] .!= 0, :r_id]
end
function test_ids_with_nonneg(attr::Symbol, gen, dfGen)
- @test GenX.ids_with_nonneg(gen,attr) == dfGen[dfGen[!, attr] .>= 0, :r_id]
+ @test GenX.ids_with_nonneg(gen, attr) == dfGen[dfGen[!, attr] .>= 0, :r_id]
end
function test_ids_with_positive(attr::Symbol, gen, dfGen)
- @test GenX.ids_with_positive(gen,attr) == dfGen[dfGen[!, attr] .> 0, :r_id]
+ @test GenX.ids_with_positive(gen, attr) == dfGen[dfGen[!, attr] .> 0, :r_id]
end
-function prepare_inputs_true(test_path::AbstractString, in_filenames::InputsTrue, setup::Dict)
+function prepare_inputs_true(test_path::AbstractString,
+ in_filenames::InputsTrue,
+ setup::Dict)
gen_filename = in_filenames.gen_filename
inputs_filename = in_filenames.inputs_filename
dfGen = GenX.load_dataframe(joinpath(test_path, gen_filename))
- scale_factor = setup["ParameterScale"] == 1 ? GenX.ModelScalingFactor : 1.
+ scale_factor = setup["ParameterScale"] == 1 ? GenX.ModelScalingFactor : 1.0
GenX.rename!(dfGen, lowercase.(names(dfGen)))
GenX.scale_resources_data!(dfGen, scale_factor)
- dfGen[!,:r_id] = 1:size(dfGen,1)
+ dfGen[!, :r_id] = 1:size(dfGen, 1)
inputs_true = load(joinpath(test_path, inputs_filename))
return dfGen, inputs_true
end
@@ -88,27 +89,48 @@ function test_load_scaled_resources_data(gen, dfGen)
@test GenX.fuel.(gen) == dfGen.fuel
@test GenX.co2_capture_fraction.(gen) == dfGen.co2_capture_fraction
@test GenX.co2_capture_fraction_startup.(gen) == dfGen.co2_capture_fraction_startup
- @test GenX.ccs_disposal_cost_per_metric_ton.(gen) == dfGen.ccs_disposal_cost_per_metric_ton
+ @test GenX.ccs_disposal_cost_per_metric_ton.(gen) ==
+ dfGen.ccs_disposal_cost_per_metric_ton
@test GenX.biomass.(gen) == dfGen.biomass
## multi-fuel flags
- @test GenX.ids_with_fuel(gen) == dfGen[(dfGen[!,:fuel] .!= "None"),:r_id]
- @test GenX.ids_with_positive(gen, GenX.co2_capture_fraction) == dfGen[dfGen.co2_capture_fraction .>0,:r_id]
- @test GenX.ids_with_singlefuel(gen) == dfGen[dfGen.multi_fuels.!=1,:r_id]
- @test GenX.ids_with_multifuels(gen) == dfGen[dfGen.multi_fuels.==1,:r_id]
+ @test GenX.ids_with_fuel(gen) == dfGen[(dfGen[!, :fuel] .!= "None"), :r_id]
+ @test GenX.ids_with_positive(gen, GenX.co2_capture_fraction) ==
+ dfGen[dfGen.co2_capture_fraction .> 0, :r_id]
+ @test GenX.ids_with_singlefuel(gen) == dfGen[dfGen.multi_fuels .!= 1, :r_id]
+ @test GenX.ids_with_multifuels(gen) == dfGen[dfGen.multi_fuels .== 1, :r_id]
if !isempty(GenX.ids_with_multifuels(gen))
MULTI_FUELS = GenX.ids_with_multifuels(gen)
max_fuels = maximum(GenX.num_fuels.(gen))
for i in 1:max_fuels
- @test findall(g -> GenX.max_cofire_cols(g, tag=i) < 1, gen[MULTI_FUELS]) == dfGen[dfGen[!, Symbol(string("fuel",i, "_max_cofire_level"))].< 1, :][!, :r_id]
- @test findall(g -> GenX.max_cofire_start_cols(g, tag=i) < 1, gen[MULTI_FUELS]) == dfGen[dfGen[!, Symbol(string("fuel",i, "_max_cofire_level_start"))].< 1, :][!, :r_id]
- @test findall(g -> GenX.min_cofire_cols(g, tag=i) > 0, gen[MULTI_FUELS]) == dfGen[dfGen[!, Symbol(string("fuel",i, "_min_cofire_level"))].> 0, :][!, :r_id]
- @test findall(g -> GenX.min_cofire_start_cols(g, tag=i) > 0, gen[MULTI_FUELS]) == dfGen[dfGen[!, Symbol(string("fuel",i, "_min_cofire_level_start"))].> 0, :][!, :r_id]
- @test GenX.fuel_cols.(gen, tag=i) == dfGen[!,Symbol(string("fuel",i))]
- @test GenX.heat_rate_cols.(gen, tag=i) == dfGen[!,Symbol(string("heat_rate",i, "_mmbtu_per_mwh"))]
- @test GenX.max_cofire_cols.(gen, tag=i) == dfGen[!,Symbol(string("fuel",i, "_max_cofire_level"))]
- @test GenX.min_cofire_cols.(gen, tag=i) == dfGen[!,Symbol(string("fuel",i, "_min_cofire_level"))]
- @test GenX.max_cofire_start_cols.(gen, tag=i) == dfGen[!,Symbol(string("fuel",i, "_max_cofire_level_start"))]
- @test GenX.min_cofire_start_cols.(gen, tag=i) == dfGen[!,Symbol(string("fuel",i, "_min_cofire_level_start"))]
+ @test findall(g -> GenX.max_cofire_cols(g, tag = i) < 1, gen[MULTI_FUELS]) ==
+ dfGen[dfGen[!, Symbol(string("fuel", i, "_max_cofire_level"))] .< 1, :][
+ !,
+ :r_id]
+ @test findall(g -> GenX.max_cofire_start_cols(g, tag = i) < 1,
+ gen[MULTI_FUELS]) == dfGen[
+ dfGen[!, Symbol(string("fuel", i, "_max_cofire_level_start"))] .< 1,
+ :][!,
+ :r_id]
+ @test findall(g -> GenX.min_cofire_cols(g, tag = i) > 0, gen[MULTI_FUELS]) ==
+ dfGen[dfGen[!, Symbol(string("fuel", i, "_min_cofire_level"))] .> 0, :][
+ !,
+ :r_id]
+ @test findall(g -> GenX.min_cofire_start_cols(g, tag = i) > 0,
+ gen[MULTI_FUELS]) == dfGen[
+ dfGen[!, Symbol(string("fuel", i, "_min_cofire_level_start"))] .> 0,
+ :][!,
+ :r_id]
+ @test GenX.fuel_cols.(gen, tag = i) == dfGen[!, Symbol(string("fuel", i))]
+ @test GenX.heat_rate_cols.(gen, tag = i) ==
+ dfGen[!, Symbol(string("heat_rate", i, "_mmbtu_per_mwh"))]
+ @test GenX.max_cofire_cols.(gen, tag = i) ==
+ dfGen[!, Symbol(string("fuel", i, "_max_cofire_level"))]
+ @test GenX.min_cofire_cols.(gen, tag = i) ==
+ dfGen[!, Symbol(string("fuel", i, "_min_cofire_level"))]
+ @test GenX.max_cofire_start_cols.(gen, tag = i) ==
+ dfGen[!, Symbol(string("fuel", i, "_max_cofire_level_start"))]
+ @test GenX.min_cofire_start_cols.(gen, tag = i) ==
+ dfGen[!, Symbol(string("fuel", i, "_min_cofire_level_start"))]
end
end
@test GenX.ids_with_mga(gen) == dfGen[dfGen.mga .== 1, :r_id]
@@ -118,12 +140,12 @@ function test_load_scaled_resources_data(gen, dfGen)
end
function test_add_policies_to_resources(gen, dfGen)
- @test GenX.esr.(gen, tag=1) == dfGen.esr_1
- @test GenX.esr.(gen, tag=2) == dfGen.esr_2
- @test GenX.min_cap.(gen, tag=1) == dfGen.mincaptag_1
- @test GenX.min_cap.(gen, tag=2) == dfGen.mincaptag_2
- @test GenX.min_cap.(gen, tag=3) == dfGen.mincaptag_3
- @test GenX.derating_factor.(gen, tag=1) == dfGen.capres_1
+ @test GenX.esr.(gen, tag = 1) == dfGen.esr_1
+ @test GenX.esr.(gen, tag = 2) == dfGen.esr_2
+ @test GenX.min_cap.(gen, tag = 1) == dfGen.mincaptag_1
+ @test GenX.min_cap.(gen, tag = 2) == dfGen.mincaptag_2
+ @test GenX.min_cap.(gen, tag = 3) == dfGen.mincaptag_3
+ @test GenX.derating_factor.(gen, tag = 1) == dfGen.capres_1
end
function test_add_modules_to_resources(gen, dfGen)
@@ -136,7 +158,6 @@ function test_add_modules_to_resources(gen, dfGen)
end
function test_inputs_keys(inputs, inputs_true)
-
@test inputs["G"] == inputs_true["G"]
@test inputs["HYDRO_RES"] == inputs_true["HYDRO_RES"]
@@ -159,7 +180,7 @@ function test_inputs_keys(inputs, inputs_true)
@test inputs["THERM_NO_COMMIT"] == inputs_true["THERM_NO_COMMIT"]
@test inputs["COMMIT"] == inputs_true["COMMIT"]
@test inputs["C_Start"] == inputs_true["C_Start"]
-
+
@test Set(inputs["RET_CAP"]) == inputs_true["RET_CAP"]
@test Set(inputs["RET_CAP_CHARGE"]) == inputs_true["RET_CAP_CHARGE"]
@test Set(inputs["RET_CAP_ENERGY"]) == inputs_true["RET_CAP_ENERGY"]
@@ -167,14 +188,17 @@ function test_inputs_keys(inputs, inputs_true)
@test Set(inputs["NEW_CAP_ENERGY"]) == inputs_true["NEW_CAP_ENERGY"]
@test Set(inputs["NEW_CAP_CHARGE"]) == inputs_true["NEW_CAP_CHARGE"]
- if isempty(inputs["MULTI_FUELS"])
- @test string.(inputs["slope_cols"]) == lowercase.(string.(inputs_true["slope_cols"]))
- @test string.(inputs["intercept_cols"]) == lowercase.(string.(inputs_true["intercept_cols"]))
- @test inputs["PWFU_data"] == rename!(inputs_true["PWFU_data"], lowercase.(names(inputs_true["PWFU_data"])))
+ if isempty(inputs["MULTI_FUELS"])
+ @test string.(inputs["slope_cols"]) ==
+ lowercase.(string.(inputs_true["slope_cols"]))
+ @test string.(inputs["intercept_cols"]) ==
+ lowercase.(string.(inputs_true["intercept_cols"]))
+ @test inputs["PWFU_data"] ==
+ rename!(inputs_true["PWFU_data"], lowercase.(names(inputs_true["PWFU_data"])))
@test inputs["PWFU_Num_Segments"] == inputs_true["PWFU_Num_Segments"]
@test inputs["THERM_COMMIT_PWFU"] == inputs_true["THERM_COMMIT_PWFU"]
end
-
+
@test inputs["R_ZONES"] == inputs_true["R_ZONES"]
@test inputs["RESOURCE_ZONES"] == inputs_true["RESOURCE_ZONES"]
@test inputs["RESOURCE_NAMES"] == inputs_true["RESOURCES"]
@@ -183,7 +207,7 @@ end
function test_resource_specific_attributes(gen, dfGen, inputs)
@test GenX.is_buildable(gen) == dfGen[dfGen.new_build .== 1, :r_id]
@test GenX.is_retirable(gen) == dfGen[dfGen.can_retire .== 1, :r_id]
-
+
rs = GenX.ids_with_positive(gen, GenX.max_cap_mwh)
@test rs == dfGen[dfGen.max_cap_mwh .> 0, :r_id]
@test GenX.max_cap_mwh.(rs) == dfGen[dfGen.max_cap_mwh .> 0, :r_id]
@@ -192,7 +216,7 @@ function test_resource_specific_attributes(gen, dfGen, inputs)
@test GenX.max_charge_cap_mw.(rs) == dfGen[dfGen.max_charge_cap_mw .> 0, :r_id]
rs = GenX.ids_with_unit_commitment(gen)
@test rs == dfGen[dfGen.therm .== 1, :r_id]
- @test GenX.cap_size.(gen[rs]) == dfGen[dfGen.therm.==1,:cap_size]
+ @test GenX.cap_size.(gen[rs]) == dfGen[dfGen.therm .== 1, :cap_size]
rs = setdiff(inputs["HAS_FUEL"], inputs["THERM_COMMIT"])
@test GenX.heat_rate_mmbtu_per_mwh.(gen[rs]) == dfGen[rs, :heat_rate_mmbtu_per_mwh]
rs = setdiff(inputs["THERM_COMMIT"], inputs["THERM_COMMIT_PWFU"])
@@ -211,23 +235,23 @@ function test_resource_specific_attributes(gen, dfGen, inputs)
@test GenX.min_charge_cap_mw.(gen[rs]) == dfGen[rs, :min_charge_cap_mw]
@test GenX.existing_charge_cap_mw.(gen[rs]) == dfGen[rs, :existing_charge_cap_mw]
@test GenX.inv_cost_charge_per_mwyr.(gen[rs]) == dfGen[rs, :inv_cost_charge_per_mwyr]
- @test GenX.fixed_om_cost_charge_per_mwyr.(gen[rs]) == dfGen[rs, :fixed_om_cost_charge_per_mwyr]
+ @test GenX.fixed_om_cost_charge_per_mwyr.(gen[rs]) ==
+ dfGen[rs, :fixed_om_cost_charge_per_mwyr]
rs = union(inputs["HYDRO_RES_KNOWN_CAP"], inputs["STOR_HYDRO_LONG_DURATION"])
- @test GenX.hydro_energy_to_power_ratio.(gen[rs]) == dfGen[rs, :hydro_energy_to_power_ratio]
+ @test GenX.hydro_energy_to_power_ratio.(gen[rs]) ==
+ dfGen[rs, :hydro_energy_to_power_ratio]
end
function test_load_resources_data()
- setup = Dict(
- "ParameterScale" => 0,
+ setup = Dict("ParameterScale" => 0,
"OperationalReserves" => 1,
"UCommit" => 2,
- "MultiStage" => 1,
- )
+ "MultiStage" => 1)
# Merge the setup with the default settings
settings = GenX.default_settings()
merge!(settings, setup)
-
+
test_path = joinpath("load_resources", "test_gen_non_colocated")
# load dfGen and inputs_true to compare against
@@ -269,25 +293,22 @@ function test_load_resources_data()
end
function test_load_VRE_STOR_data()
-
- setup = Dict(
- "ParameterScale" => 0,
+ setup = Dict("ParameterScale" => 0,
"OperationalReserves" => 1,
"UCommit" => 2,
- "MultiStage" => 0,
- )
+ "MultiStage" => 0)
# Merge the setup with the default settings
settings = GenX.default_settings()
merge!(settings, setup)
-
- test_path = joinpath("load_resources","test_gen_vre_stor")
+
+ test_path = joinpath("load_resources", "test_gen_vre_stor")
input_true_filenames = InputsTrue("generators_data.csv", "inputs_after_loadgen.jld2")
dfGen, inputs_true = prepare_inputs_true(test_path, input_true_filenames, settings)
dfVRE_STOR = GenX.load_dataframe(joinpath(test_path, "Vre_and_stor_data.csv"))
dfVRE_STOR = GenX.rename!(dfVRE_STOR, lowercase.(names(dfVRE_STOR)))
- scale_factor = settings["ParameterScale"] == 1 ? GenX.ModelScalingFactor : 1.
+ scale_factor = settings["ParameterScale"] == 1 ? GenX.ModelScalingFactor : 1.0
GenX.scale_vre_stor_data!(dfVRE_STOR, scale_factor)
resources_path = joinpath(test_path, settings["ResourcesFolder"])
@@ -304,27 +325,36 @@ function test_load_VRE_STOR_data()
rs = inputs["VRE_STOR"]
@test GenX.solar(gen) == dfVRE_STOR[dfVRE_STOR.solar .== 1, :r_id]
@test GenX.wind(gen) == dfVRE_STOR[dfVRE_STOR.wind .== 1, :r_id]
- @test GenX.storage_dc_discharge(gen) == dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .>= 1, :r_id]
- @test GenX.storage_sym_dc_discharge(gen) == dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .== 1, :r_id]
- @test GenX.storage_asym_dc_discharge(gen) == dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .== 2, :r_id]
+ @test GenX.storage_dc_discharge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .>= 1, :r_id]
+ @test GenX.storage_sym_dc_discharge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .== 1, :r_id]
+ @test GenX.storage_asym_dc_discharge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .== 2, :r_id]
@test GenX.storage_dc_charge(gen) == dfVRE_STOR[dfVRE_STOR.stor_dc_charge .>= 1, :r_id]
- @test GenX.storage_sym_dc_charge(gen) == dfVRE_STOR[dfVRE_STOR.stor_dc_charge .== 1, :r_id]
- @test GenX.storage_asym_dc_charge(gen) == dfVRE_STOR[dfVRE_STOR.stor_dc_charge .== 2, :r_id]
-
- @test GenX.storage_ac_discharge(gen) == dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .>= 1, :r_id]
- @test GenX.storage_sym_ac_discharge(gen) == dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .== 1, :r_id]
- @test GenX.storage_asym_ac_discharge(gen) == dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .== 2, :r_id]
+ @test GenX.storage_sym_dc_charge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_dc_charge .== 1, :r_id]
+ @test GenX.storage_asym_dc_charge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_dc_charge .== 2, :r_id]
+
+ @test GenX.storage_ac_discharge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .>= 1, :r_id]
+ @test GenX.storage_sym_ac_discharge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .== 1, :r_id]
+ @test GenX.storage_asym_ac_discharge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .== 2, :r_id]
@test GenX.storage_ac_charge(gen) == dfVRE_STOR[dfVRE_STOR.stor_ac_charge .>= 1, :r_id]
- @test GenX.storage_sym_ac_charge(gen) == dfVRE_STOR[dfVRE_STOR.stor_ac_charge .== 1, :r_id]
- @test GenX.storage_asym_ac_charge(gen) == dfVRE_STOR[dfVRE_STOR.stor_ac_charge .== 2, :r_id]
+ @test GenX.storage_sym_ac_charge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_ac_charge .== 1, :r_id]
+ @test GenX.storage_asym_ac_charge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_ac_charge .== 2, :r_id]
@test GenX.technology.(gen[rs]) == dfVRE_STOR.technology
- @test GenX.is_LDS_VRE_STOR(gen) == dfVRE_STOR[dfVRE_STOR.lds_vre_stor .!= 0, :r_id]
-
+ @test GenX.is_LDS_VRE_STOR(gen) == dfVRE_STOR[dfVRE_STOR.lds_vre_stor .!= 0, :r_id]
- for attr in (:existing_cap_solar_mw,
+ for attr in (:existing_cap_solar_mw,
:existing_cap_wind_mw,
:existing_cap_inverter_mw,
:existing_cap_charge_dc_mw,
@@ -335,140 +365,201 @@ function test_load_VRE_STOR_data()
test_ids_with_nonneg(attr, gen[rs], dfVRE_STOR)
end
- for attr in (:max_cap_solar_mw,
- :max_cap_wind_mw,
- :max_cap_inverter_mw,
- :max_cap_charge_dc_mw,
- :max_cap_charge_ac_mw,
- :max_cap_discharge_dc_mw,
- :max_cap_discharge_ac_mw)
+ for attr in (:max_cap_solar_mw,
+ :max_cap_wind_mw,
+ :max_cap_inverter_mw,
+ :max_cap_charge_dc_mw,
+ :max_cap_charge_ac_mw,
+ :max_cap_discharge_dc_mw,
+ :max_cap_discharge_ac_mw)
test_macro_interface(attr, gen[rs], dfVRE_STOR)
test_ids_with_nonneg(attr, gen[rs], dfVRE_STOR)
test_ids_with(attr, gen[rs], dfVRE_STOR)
end
- for attr in (:min_cap_solar_mw,
- :min_cap_wind_mw,
- :min_cap_inverter_mw,
- :min_cap_charge_dc_mw,
- :min_cap_charge_ac_mw,
- :min_cap_discharge_dc_mw,
- :min_cap_discharge_ac_mw,
- :inverter_ratio_solar,
- :inverter_ratio_wind,)
+ for attr in (:min_cap_solar_mw,
+ :min_cap_wind_mw,
+ :min_cap_inverter_mw,
+ :min_cap_charge_dc_mw,
+ :min_cap_charge_ac_mw,
+ :min_cap_discharge_dc_mw,
+ :min_cap_discharge_ac_mw,
+ :inverter_ratio_solar,
+ :inverter_ratio_wind)
test_macro_interface(attr, gen[rs], dfVRE_STOR)
test_ids_with_positive(attr, gen[rs], dfVRE_STOR)
end
for attr in (:etainverter,
- :inv_cost_inverter_per_mwyr,
- :inv_cost_solar_per_mwyr,
- :inv_cost_wind_per_mwyr,
- :inv_cost_discharge_dc_per_mwyr,
- :inv_cost_charge_dc_per_mwyr,
- :inv_cost_discharge_ac_per_mwyr,
- :inv_cost_charge_ac_per_mwyr,
- :fixed_om_inverter_cost_per_mwyr,
- :fixed_om_solar_cost_per_mwyr,
- :fixed_om_wind_cost_per_mwyr,
- :fixed_om_cost_discharge_dc_per_mwyr,
- :fixed_om_cost_charge_dc_per_mwyr,
- :fixed_om_cost_discharge_ac_per_mwyr,
- :fixed_om_cost_charge_ac_per_mwyr,
- :var_om_cost_per_mwh_solar,
- :var_om_cost_per_mwh_wind,
- :var_om_cost_per_mwh_charge_dc,
- :var_om_cost_per_mwh_discharge_dc,
- :var_om_cost_per_mwh_charge_ac,
- :var_om_cost_per_mwh_discharge_ac,
- :eff_up_ac,
- :eff_down_ac,
- :eff_up_dc,
- :eff_down_dc,
- :power_to_energy_ac,
- :power_to_energy_dc)
+ :inv_cost_inverter_per_mwyr,
+ :inv_cost_solar_per_mwyr,
+ :inv_cost_wind_per_mwyr,
+ :inv_cost_discharge_dc_per_mwyr,
+ :inv_cost_charge_dc_per_mwyr,
+ :inv_cost_discharge_ac_per_mwyr,
+ :inv_cost_charge_ac_per_mwyr,
+ :fixed_om_inverter_cost_per_mwyr,
+ :fixed_om_solar_cost_per_mwyr,
+ :fixed_om_wind_cost_per_mwyr,
+ :fixed_om_cost_discharge_dc_per_mwyr,
+ :fixed_om_cost_charge_dc_per_mwyr,
+ :fixed_om_cost_discharge_ac_per_mwyr,
+ :fixed_om_cost_charge_ac_per_mwyr,
+ :var_om_cost_per_mwh_solar,
+ :var_om_cost_per_mwh_wind,
+ :var_om_cost_per_mwh_charge_dc,
+ :var_om_cost_per_mwh_discharge_dc,
+ :var_om_cost_per_mwh_charge_ac,
+ :var_om_cost_per_mwh_discharge_ac,
+ :eff_up_ac,
+ :eff_down_ac,
+ :eff_up_dc,
+ :eff_down_dc,
+ :power_to_energy_ac,
+ :power_to_energy_dc)
test_macro_interface(attr, gen[rs], dfVRE_STOR)
end
# policies
- @test GenX.esr_vrestor.(gen[rs], tag=1) == dfVRE_STOR.esr_vrestor_1
- @test GenX.esr_vrestor.(gen[rs], tag=2) == dfVRE_STOR.esr_vrestor_2
- @test GenX.min_cap_stor.(gen[rs], tag=1) == dfVRE_STOR.mincaptagstor_1
- @test GenX.min_cap_stor.(gen[rs], tag=2) == dfVRE_STOR.mincaptagstor_2
- @test GenX.derating_factor.(gen[rs], tag=1) == dfVRE_STOR.capresvrestor_1
- @test GenX.derating_factor.(gen[rs], tag=2) == dfVRE_STOR.capresvrestor_2
- @test GenX.max_cap_stor.(gen[rs], tag=1) == dfVRE_STOR.maxcaptagstor_1
- @test GenX.max_cap_stor.(gen[rs], tag=2) == dfVRE_STOR.maxcaptagstor_2
- @test GenX.min_cap_solar.(gen[rs], tag=1) == dfVRE_STOR.mincaptagsolar_1
- @test GenX.max_cap_solar.(gen[rs], tag=1) == dfVRE_STOR.maxcaptagsolar_1
- @test GenX.min_cap_wind.(gen[rs], tag=1) == dfVRE_STOR.mincaptagwind_1
- @test GenX.max_cap_wind.(gen[rs], tag=1) == dfVRE_STOR.maxcaptagwind_1
-
- @test GenX.ids_with_policy(gen, GenX.min_cap_solar, tag=1) == dfVRE_STOR[dfVRE_STOR.mincaptagsolar_1 .== 1, :r_id]
- @test GenX.ids_with_policy(gen, GenX.min_cap_wind, tag=1) == dfVRE_STOR[dfVRE_STOR.mincaptagwind_1 .== 1, :r_id]
- @test GenX.ids_with_policy(gen, GenX.min_cap_stor, tag=1) == dfVRE_STOR[dfVRE_STOR.mincaptagstor_1 .== 1, :r_id]
- @test GenX.ids_with_policy(gen, GenX.max_cap_solar, tag=1) == dfVRE_STOR[dfVRE_STOR.maxcaptagsolar_1 .== 1, :r_id]
- @test GenX.ids_with_policy(gen, GenX.max_cap_wind, tag=1) == dfVRE_STOR[dfVRE_STOR.maxcaptagwind_1 .== 1, :r_id]
- @test GenX.ids_with_policy(gen, GenX.max_cap_stor, tag=1) == dfVRE_STOR[dfVRE_STOR.maxcaptagstor_1 .== 1, :r_id]
+ @test GenX.esr_vrestor.(gen[rs], tag = 1) == dfVRE_STOR.esr_vrestor_1
+ @test GenX.esr_vrestor.(gen[rs], tag = 2) == dfVRE_STOR.esr_vrestor_2
+ @test GenX.min_cap_stor.(gen[rs], tag = 1) == dfVRE_STOR.mincaptagstor_1
+ @test GenX.min_cap_stor.(gen[rs], tag = 2) == dfVRE_STOR.mincaptagstor_2
+ @test GenX.derating_factor.(gen[rs], tag = 1) == dfVRE_STOR.capresvrestor_1
+ @test GenX.derating_factor.(gen[rs], tag = 2) == dfVRE_STOR.capresvrestor_2
+ @test GenX.max_cap_stor.(gen[rs], tag = 1) == dfVRE_STOR.maxcaptagstor_1
+ @test GenX.max_cap_stor.(gen[rs], tag = 2) == dfVRE_STOR.maxcaptagstor_2
+ @test GenX.min_cap_solar.(gen[rs], tag = 1) == dfVRE_STOR.mincaptagsolar_1
+ @test GenX.max_cap_solar.(gen[rs], tag = 1) == dfVRE_STOR.maxcaptagsolar_1
+ @test GenX.min_cap_wind.(gen[rs], tag = 1) == dfVRE_STOR.mincaptagwind_1
+ @test GenX.max_cap_wind.(gen[rs], tag = 1) == dfVRE_STOR.maxcaptagwind_1
+
+ @test GenX.ids_with_policy(gen, GenX.min_cap_solar, tag = 1) ==
+ dfVRE_STOR[dfVRE_STOR.mincaptagsolar_1 .== 1, :r_id]
+ @test GenX.ids_with_policy(gen, GenX.min_cap_wind, tag = 1) ==
+ dfVRE_STOR[dfVRE_STOR.mincaptagwind_1 .== 1, :r_id]
+ @test GenX.ids_with_policy(gen, GenX.min_cap_stor, tag = 1) ==
+ dfVRE_STOR[dfVRE_STOR.mincaptagstor_1 .== 1, :r_id]
+ @test GenX.ids_with_policy(gen, GenX.max_cap_solar, tag = 1) ==
+ dfVRE_STOR[dfVRE_STOR.maxcaptagsolar_1 .== 1, :r_id]
+ @test GenX.ids_with_policy(gen, GenX.max_cap_wind, tag = 1) ==
+ dfVRE_STOR[dfVRE_STOR.maxcaptagwind_1 .== 1, :r_id]
+ @test GenX.ids_with_policy(gen, GenX.max_cap_stor, tag = 1) ==
+ dfVRE_STOR[dfVRE_STOR.maxcaptagstor_1 .== 1, :r_id]
# inputs keys
- @test inputs["VRE_STOR"] == dfGen[dfGen.vre_stor.==1,:r_id]
- @test inputs["VS_SOLAR"] == dfVRE_STOR[(dfVRE_STOR.solar.!=0),:r_id]
- @test inputs["VS_WIND"] == dfVRE_STOR[(dfVRE_STOR.wind.!=0),:r_id]
- @test inputs["VS_DC"] == union(dfVRE_STOR[dfVRE_STOR.stor_dc_discharge.>=1,:r_id], dfVRE_STOR[dfVRE_STOR.stor_dc_charge.>=1,:r_id], dfVRE_STOR[dfVRE_STOR.solar.!=0,:r_id])
-
- @test inputs["VS_STOR"] == union(dfVRE_STOR[dfVRE_STOR.stor_dc_charge.>=1,:r_id], dfVRE_STOR[dfVRE_STOR.stor_ac_charge.>=1,:r_id],
- dfVRE_STOR[dfVRE_STOR.stor_dc_discharge.>=1,:r_id], dfVRE_STOR[dfVRE_STOR.stor_ac_discharge.>=1,:r_id])
- STOR = inputs["VS_STOR"]
- @test inputs["VS_STOR_DC_DISCHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_dc_discharge.>=1),:r_id]
- @test inputs["VS_SYM_DC_DISCHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_dc_discharge.==1,:r_id]
- @test inputs["VS_ASYM_DC_DISCHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_dc_discharge.==2,:r_id]
- @test inputs["VS_STOR_DC_CHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_dc_charge.>=1),:r_id]
- @test inputs["VS_SYM_DC_CHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_dc_charge.==1,:r_id]
- @test inputs["VS_ASYM_DC_CHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_dc_charge.==2,:r_id]
- @test inputs["VS_STOR_AC_DISCHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_ac_discharge.>=1),:r_id]
- @test inputs["VS_SYM_AC_DISCHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_ac_discharge.==1,:r_id]
- @test inputs["VS_ASYM_AC_DISCHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_ac_discharge.==2,:r_id]
- @test inputs["VS_STOR_AC_CHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_ac_charge.>=1),:r_id]
- @test inputs["VS_SYM_AC_CHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_ac_charge.==1,:r_id]
- @test inputs["VS_ASYM_AC_CHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_ac_charge.==2,:r_id]
- @test inputs["VS_LDS"] == dfVRE_STOR[(dfVRE_STOR.lds_vre_stor.!=0),:r_id]
- @test inputs["VS_nonLDS"] == setdiff(STOR, inputs["VS_LDS"])
- @test inputs["VS_ASYM"] == union(inputs["VS_ASYM_DC_CHARGE"], inputs["VS_ASYM_DC_DISCHARGE"], inputs["VS_ASYM_AC_DISCHARGE"], inputs["VS_ASYM_AC_CHARGE"])
- @test inputs["VS_SYM_DC"] == intersect(inputs["VS_SYM_DC_CHARGE"], inputs["VS_SYM_DC_DISCHARGE"])
- @test inputs["VS_SYM_AC"] == intersect(inputs["VS_SYM_AC_CHARGE"], inputs["VS_SYM_AC_DISCHARGE"])
+ @test inputs["VRE_STOR"] == dfGen[dfGen.vre_stor .== 1, :r_id]
+ @test inputs["VS_SOLAR"] == dfVRE_STOR[(dfVRE_STOR.solar .!= 0), :r_id]
+ @test inputs["VS_WIND"] == dfVRE_STOR[(dfVRE_STOR.wind .!= 0), :r_id]
+ @test inputs["VS_DC"] == union(dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .>= 1, :r_id],
+ dfVRE_STOR[dfVRE_STOR.stor_dc_charge .>= 1, :r_id],
+ dfVRE_STOR[dfVRE_STOR.solar .!= 0, :r_id])
+
+ @test inputs["VS_STOR"] == union(dfVRE_STOR[dfVRE_STOR.stor_dc_charge .>= 1, :r_id],
+ dfVRE_STOR[dfVRE_STOR.stor_ac_charge .>= 1, :r_id],
+ dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .>= 1, :r_id],
+ dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .>= 1, :r_id])
+ STOR = inputs["VS_STOR"]
+ @test inputs["VS_STOR_DC_DISCHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_dc_discharge .>= 1), :r_id]
+ @test inputs["VS_SYM_DC_DISCHARGE"] ==
+ dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .== 1, :r_id]
+ @test inputs["VS_ASYM_DC_DISCHARGE"] ==
+ dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .== 2, :r_id]
+ @test inputs["VS_STOR_DC_CHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_dc_charge .>= 1), :r_id]
+ @test inputs["VS_SYM_DC_CHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_dc_charge .== 1, :r_id]
+ @test inputs["VS_ASYM_DC_CHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_dc_charge .== 2, :r_id]
+ @test inputs["VS_STOR_AC_DISCHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_ac_discharge .>= 1), :r_id]
+ @test inputs["VS_SYM_AC_DISCHARGE"] ==
+ dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .== 1, :r_id]
+ @test inputs["VS_ASYM_AC_DISCHARGE"] ==
+ dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .== 2, :r_id]
+ @test inputs["VS_STOR_AC_CHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_ac_charge .>= 1), :r_id]
+ @test inputs["VS_SYM_AC_CHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_ac_charge .== 1, :r_id]
+ @test inputs["VS_ASYM_AC_CHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_ac_charge .== 2, :r_id]
+ @test inputs["VS_LDS"] == dfVRE_STOR[(dfVRE_STOR.lds_vre_stor .!= 0), :r_id]
+ @test inputs["VS_nonLDS"] == setdiff(STOR, inputs["VS_LDS"])
+ @test inputs["VS_ASYM"] == union(inputs["VS_ASYM_DC_CHARGE"],
+ inputs["VS_ASYM_DC_DISCHARGE"],
+ inputs["VS_ASYM_AC_DISCHARGE"],
+ inputs["VS_ASYM_AC_CHARGE"])
+ @test inputs["VS_SYM_DC"] ==
+ intersect(inputs["VS_SYM_DC_CHARGE"], inputs["VS_SYM_DC_DISCHARGE"])
+ @test inputs["VS_SYM_AC"] ==
+ intersect(inputs["VS_SYM_AC_CHARGE"], inputs["VS_SYM_AC_DISCHARGE"])
buildable = dfGen[dfGen.new_build .== 1, :r_id]
retirable = dfGen[dfGen.can_retire .== 1, :r_id]
- @test inputs["NEW_CAP_SOLAR"] == intersect(buildable, dfVRE_STOR[dfVRE_STOR.solar.!=0,:r_id], dfVRE_STOR[dfVRE_STOR.max_cap_solar_mw.!=0,:r_id])
- @test inputs["RET_CAP_SOLAR"] == intersect(retirable, dfVRE_STOR[dfVRE_STOR.solar.!=0,:r_id], dfVRE_STOR[dfVRE_STOR.existing_cap_solar_mw.>=0,:r_id])
- @test inputs["NEW_CAP_WIND"] == intersect(buildable, dfVRE_STOR[dfVRE_STOR.wind.!=0,:r_id], dfVRE_STOR[dfVRE_STOR.max_cap_wind_mw.!=0,:r_id])
- @test inputs["RET_CAP_WIND"] == intersect(retirable, dfVRE_STOR[dfVRE_STOR.wind.!=0,:r_id], dfVRE_STOR[dfVRE_STOR.existing_cap_wind_mw.>=0,:r_id])
- @test inputs["NEW_CAP_DC"] == intersect(buildable, dfVRE_STOR[dfVRE_STOR.max_cap_inverter_mw.!=0,:r_id], inputs["VS_DC"])
- @test inputs["RET_CAP_DC"] == intersect(retirable, dfVRE_STOR[dfVRE_STOR.existing_cap_inverter_mw.>=0,:r_id], inputs["VS_DC"])
- @test inputs["NEW_CAP_STOR"] == intersect(buildable, dfGen[dfGen.max_cap_mwh.!=0,:r_id], inputs["VS_STOR"])
- @test inputs["RET_CAP_STOR"] == intersect(retirable, dfGen[dfGen.existing_cap_mwh.>=0,:r_id], inputs["VS_STOR"])
- @test inputs["NEW_CAP_CHARGE_DC"] == intersect(buildable, dfVRE_STOR[dfVRE_STOR.max_cap_charge_dc_mw.!=0,:r_id], inputs["VS_ASYM_DC_CHARGE"])
- @test inputs["RET_CAP_CHARGE_DC"] == intersect(retirable, dfVRE_STOR[dfVRE_STOR.existing_cap_charge_dc_mw.>=0,:r_id], inputs["VS_ASYM_DC_CHARGE"])
- @test inputs["NEW_CAP_DISCHARGE_DC"] == intersect(buildable, dfVRE_STOR[dfVRE_STOR.max_cap_discharge_dc_mw.!=0,:r_id], inputs["VS_ASYM_DC_DISCHARGE"])
- @test inputs["RET_CAP_DISCHARGE_DC"] == intersect(retirable, dfVRE_STOR[dfVRE_STOR.existing_cap_discharge_dc_mw.>=0,:r_id], inputs["VS_ASYM_DC_DISCHARGE"])
- @test inputs["NEW_CAP_CHARGE_AC"] == intersect(buildable, dfVRE_STOR[dfVRE_STOR.max_cap_charge_ac_mw.!=0,:r_id], inputs["VS_ASYM_AC_CHARGE"])
- @test inputs["RET_CAP_CHARGE_AC"] == intersect(retirable, dfVRE_STOR[dfVRE_STOR.existing_cap_charge_ac_mw.>=0,:r_id], inputs["VS_ASYM_AC_CHARGE"])
- @test inputs["NEW_CAP_DISCHARGE_AC"] == intersect(buildable, dfVRE_STOR[dfVRE_STOR.max_cap_discharge_ac_mw.!=0,:r_id], inputs["VS_ASYM_AC_DISCHARGE"])
- @test inputs["RET_CAP_DISCHARGE_AC"] == intersect(retirable, dfVRE_STOR[dfVRE_STOR.existing_cap_discharge_ac_mw.>=0,:r_id], inputs["VS_ASYM_AC_DISCHARGE"])
- @test inputs["RESOURCE_NAMES_VRE_STOR"] == collect(skipmissing(dfVRE_STOR[!,:resource][1:size(inputs["VRE_STOR"])[1]]))
- @test inputs["RESOURCE_NAMES_SOLAR"] == dfVRE_STOR[(dfVRE_STOR.solar.!=0), :resource]
- @test inputs["RESOURCE_NAMES_WIND"] == dfVRE_STOR[(dfVRE_STOR.wind.!=0), :resource]
- @test inputs["RESOURCE_NAMES_DC_DISCHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_dc_discharge.!=0), :resource]
- @test inputs["RESOURCE_NAMES_AC_DISCHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_ac_discharge.!=0), :resource]
- @test inputs["RESOURCE_NAMES_DC_CHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_dc_charge.!=0), :resource]
- @test inputs["RESOURCE_NAMES_AC_CHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_ac_charge.!=0), :resource]
- @test inputs["ZONES_SOLAR"] == dfVRE_STOR[(dfVRE_STOR.solar.!=0), :zone]
- @test inputs["ZONES_WIND"] == dfVRE_STOR[(dfVRE_STOR.wind.!=0), :zone]
- @test inputs["ZONES_DC_DISCHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_dc_discharge.!=0), :zone]
- @test inputs["ZONES_AC_DISCHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_ac_discharge.!=0), :zone]
- @test inputs["ZONES_DC_CHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_dc_charge.!=0), :zone]
- @test inputs["ZONES_AC_CHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_ac_charge.!=0), :zone]
+ @test inputs["NEW_CAP_SOLAR"] == intersect(buildable,
+ dfVRE_STOR[dfVRE_STOR.solar .!= 0, :r_id],
+ dfVRE_STOR[dfVRE_STOR.max_cap_solar_mw .!= 0, :r_id])
+ @test inputs["RET_CAP_SOLAR"] == intersect(retirable,
+ dfVRE_STOR[dfVRE_STOR.solar .!= 0, :r_id],
+ dfVRE_STOR[dfVRE_STOR.existing_cap_solar_mw .>= 0, :r_id])
+ @test inputs["NEW_CAP_WIND"] == intersect(buildable,
+ dfVRE_STOR[dfVRE_STOR.wind .!= 0, :r_id],
+ dfVRE_STOR[dfVRE_STOR.max_cap_wind_mw .!= 0, :r_id])
+ @test inputs["RET_CAP_WIND"] == intersect(retirable,
+ dfVRE_STOR[dfVRE_STOR.wind .!= 0, :r_id],
+ dfVRE_STOR[dfVRE_STOR.existing_cap_wind_mw .>= 0, :r_id])
+ @test inputs["NEW_CAP_DC"] == intersect(buildable,
+ dfVRE_STOR[dfVRE_STOR.max_cap_inverter_mw .!= 0, :r_id],
+ inputs["VS_DC"])
+ @test inputs["RET_CAP_DC"] == intersect(retirable,
+ dfVRE_STOR[dfVRE_STOR.existing_cap_inverter_mw .>= 0, :r_id],
+ inputs["VS_DC"])
+ @test inputs["NEW_CAP_STOR"] ==
+ intersect(buildable, dfGen[dfGen.max_cap_mwh .!= 0, :r_id], inputs["VS_STOR"])
+ @test inputs["RET_CAP_STOR"] == intersect(retirable,
+ dfGen[dfGen.existing_cap_mwh .>= 0, :r_id],
+ inputs["VS_STOR"])
+ @test inputs["NEW_CAP_CHARGE_DC"] == intersect(buildable,
+ dfVRE_STOR[dfVRE_STOR.max_cap_charge_dc_mw .!= 0, :r_id],
+ inputs["VS_ASYM_DC_CHARGE"])
+ @test inputs["RET_CAP_CHARGE_DC"] == intersect(retirable,
+ dfVRE_STOR[dfVRE_STOR.existing_cap_charge_dc_mw .>= 0, :r_id],
+ inputs["VS_ASYM_DC_CHARGE"])
+ @test inputs["NEW_CAP_DISCHARGE_DC"] == intersect(buildable,
+ dfVRE_STOR[dfVRE_STOR.max_cap_discharge_dc_mw .!= 0, :r_id],
+ inputs["VS_ASYM_DC_DISCHARGE"])
+ @test inputs["RET_CAP_DISCHARGE_DC"] == intersect(retirable,
+ dfVRE_STOR[dfVRE_STOR.existing_cap_discharge_dc_mw .>= 0, :r_id],
+ inputs["VS_ASYM_DC_DISCHARGE"])
+ @test inputs["NEW_CAP_CHARGE_AC"] == intersect(buildable,
+ dfVRE_STOR[dfVRE_STOR.max_cap_charge_ac_mw .!= 0, :r_id],
+ inputs["VS_ASYM_AC_CHARGE"])
+ @test inputs["RET_CAP_CHARGE_AC"] == intersect(retirable,
+ dfVRE_STOR[dfVRE_STOR.existing_cap_charge_ac_mw .>= 0, :r_id],
+ inputs["VS_ASYM_AC_CHARGE"])
+ @test inputs["NEW_CAP_DISCHARGE_AC"] == intersect(buildable,
+ dfVRE_STOR[dfVRE_STOR.max_cap_discharge_ac_mw .!= 0, :r_id],
+ inputs["VS_ASYM_AC_DISCHARGE"])
+ @test inputs["RET_CAP_DISCHARGE_AC"] == intersect(retirable,
+ dfVRE_STOR[dfVRE_STOR.existing_cap_discharge_ac_mw .>= 0, :r_id],
+ inputs["VS_ASYM_AC_DISCHARGE"])
+ @test inputs["RESOURCE_NAMES_VRE_STOR"] ==
+ collect(skipmissing(dfVRE_STOR[!, :resource][1:size(inputs["VRE_STOR"])[1]]))
+ @test inputs["RESOURCE_NAMES_SOLAR"] == dfVRE_STOR[(dfVRE_STOR.solar .!= 0), :resource]
+ @test inputs["RESOURCE_NAMES_WIND"] == dfVRE_STOR[(dfVRE_STOR.wind .!= 0), :resource]
+ @test inputs["RESOURCE_NAMES_DC_DISCHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_dc_discharge .!= 0), :resource]
+ @test inputs["RESOURCE_NAMES_AC_DISCHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_ac_discharge .!= 0), :resource]
+ @test inputs["RESOURCE_NAMES_DC_CHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_dc_charge .!= 0), :resource]
+ @test inputs["RESOURCE_NAMES_AC_CHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_ac_charge .!= 0), :resource]
+ @test inputs["ZONES_SOLAR"] == dfVRE_STOR[(dfVRE_STOR.solar .!= 0), :zone]
+ @test inputs["ZONES_WIND"] == dfVRE_STOR[(dfVRE_STOR.wind .!= 0), :zone]
+ @test inputs["ZONES_DC_DISCHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_dc_discharge .!= 0), :zone]
+ @test inputs["ZONES_AC_DISCHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_ac_discharge .!= 0), :zone]
+ @test inputs["ZONES_DC_CHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_dc_charge .!= 0), :zone]
+ @test inputs["ZONES_AC_CHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_ac_charge .!= 0), :zone]
end
with_logger(ConsoleLogger(stderr, Logging.Warn)) do
@@ -476,4 +567,4 @@ with_logger(ConsoleLogger(stderr, Logging.Warn)) do
test_load_VRE_STOR_data()
end
-end # module TestLoadResourceData
\ No newline at end of file
+end # module TestLoadResourceData
diff --git a/test/test_multifuels.jl b/test/test_multifuels.jl
index 050f908509..ff1a0efdac 100644
--- a/test/test_multifuels.jl
+++ b/test/test_multifuels.jl
@@ -8,8 +8,7 @@ obj_true = 5494.7919354
test_path = "multi_fuels"
# Define test inputs
-genx_setup = Dict(
- "Trans_Loss_Segments" => 1,
+genx_setup = Dict("Trans_Loss_Segments" => 1,
"EnergyShareRequirement" => 1,
"CapacityReserveMargin" => 1,
"StorageLosses" => 1,
@@ -17,8 +16,7 @@ genx_setup = Dict(
"MaxCapReq" => 1,
"ParameterScale" => 1,
"WriteShadowPrices" => 1,
- "UCommit" => 2,
-)
+ "UCommit" => 2)
# Run the case and get the objective value and tolerance
EP, _, _ = redirect_stdout(devnull) do
@@ -29,11 +27,11 @@ optimal_tol_rel = get_attribute(EP, "ipm_optimality_tolerance")
optimal_tol = optimal_tol_rel * obj_test # Convert to absolute tolerance
# Test the objective value
-test_result = @test obj_test ≈ obj_true atol = optimal_tol
+test_result = @test obj_test≈obj_true atol=optimal_tol
# Round objective value and tolerance. Write to test log.
obj_test = round_from_tol!(obj_test, optimal_tol)
optimal_tol = round_from_tol!(optimal_tol, optimal_tol)
write_testlog(test_path, obj_test, optimal_tol, test_result)
-end # module TestMultiFuels
\ No newline at end of file
+end # module TestMultiFuels
diff --git a/test/test_multistage.jl b/test/test_multistage.jl
index c7f8d83631..96b135ad08 100644
--- a/test/test_multistage.jl
+++ b/test/test_multistage.jl
@@ -5,38 +5,31 @@ using Test
include(joinpath(@__DIR__, "utilities.jl"))
obj_true = [79734.80032, 41630.03494, 27855.20631]
-test_path = joinpath(@__DIR__, "multi_stage");
+test_path = joinpath(@__DIR__, "multi_stage")
# Define test inputs
-multistage_setup = Dict(
- "NumStages" => 3,
+multistage_setup = Dict("NumStages" => 3,
"StageLengths" => [10, 10, 10],
"WACC" => 0.045,
"ConvergenceTolerance" => 0.01,
- "Myopic" => 0,
-)
+ "Myopic" => 0)
-genx_setup = Dict(
- "Trans_Loss_Segments" => 1,
+genx_setup = Dict("Trans_Loss_Segments" => 1,
"OperationalReserves" => 1,
"CO2Cap" => 2,
"StorageLosses" => 1,
"ParameterScale" => 1,
"UCommit" => 2,
"MultiStage" => 1,
- "MultiStageSettingsDict" => multistage_setup,
-)
+ "MultiStageSettingsDict" => multistage_setup)
# Run the case and get the objective value and tolerance
EP, _, _ = redirect_stdout(devnull) do
run_genx_case_testing(test_path, genx_setup)
end
-obj_test = objective_value.(EP[i] for i = 1:multistage_setup["NumStages"])
-optimal_tol_rel =
- get_attribute.(
- (EP[i] for i = 1:multistage_setup["NumStages"]),
- "ipm_optimality_tolerance",
- )
+obj_test = objective_value.(EP[i] for i in 1:multistage_setup["NumStages"])
+optimal_tol_rel = get_attribute.((EP[i] for i in 1:multistage_setup["NumStages"]),
+ "ipm_optimality_tolerance")
optimal_tol = optimal_tol_rel .* obj_test # Convert to absolute tolerance
# Test the objective value
@@ -47,17 +40,18 @@ obj_test = round_from_tol!.(obj_test, optimal_tol)
optimal_tol = round_from_tol!.(optimal_tol, optimal_tol)
write_testlog(test_path, obj_test, optimal_tol, test_result)
-function test_new_build(EP::Dict,inputs::Dict)
+function test_new_build(EP::Dict, inputs::Dict)
### Test that the resource with New_Build = 0 did not expand capacity
- a = true;
+ a = true
for t in keys(EP)
- if t==1
- a = value(EP[t][:eTotalCap][1]) <= GenX.existing_cap_mw(inputs[1]["RESOURCES"][1])[1]
+ if t == 1
+ a = value(EP[t][:eTotalCap][1]) <=
+ GenX.existing_cap_mw(inputs[1]["RESOURCES"][1])[1]
else
- a = value(EP[t][:eTotalCap][1]) <= value(EP[t-1][:eTotalCap][1])
+ a = value(EP[t][:eTotalCap][1]) <= value(EP[t - 1][:eTotalCap][1])
end
- if a==false
+ if a == false
break
end
end
@@ -65,17 +59,18 @@ function test_new_build(EP::Dict,inputs::Dict)
return a
end
-function test_can_retire(EP::Dict,inputs::Dict)
+function test_can_retire(EP::Dict, inputs::Dict)
### Test that the resource with Can_Retire = 0 did not retire capacity
- a = true;
-
+ a = true
+
for t in keys(EP)
- if t==1
- a = value(EP[t][:eTotalCap][1]) >= GenX.existing_cap_mw(inputs[1]["RESOURCES"][1])[1]
+ if t == 1
+ a = value(EP[t][:eTotalCap][1]) >=
+ GenX.existing_cap_mw(inputs[1]["RESOURCES"][1])[1]
else
- a = value(EP[t][:eTotalCap][1]) >= value(EP[t-1][:eTotalCap][1])
+ a = value(EP[t][:eTotalCap][1]) >= value(EP[t - 1][:eTotalCap][1])
end
- if a==false
+ if a == false
break
end
end
@@ -83,49 +78,60 @@ function test_can_retire(EP::Dict,inputs::Dict)
return a
end
-test_path_new_build = joinpath(test_path, "new_build");
+test_path_new_build = joinpath(test_path, "new_build")
EP, inputs, _ = redirect_stdout(devnull) do
- run_genx_case_testing(test_path_new_build, genx_setup);
+ run_genx_case_testing(test_path_new_build, genx_setup)
end
-new_build_test_result = @test test_new_build(EP,inputs)
-write_testlog(test_path,"Testing that the resource with New_Build = 0 did not expand capacity",new_build_test_result)
+new_build_test_result = @test test_new_build(EP, inputs)
+write_testlog(test_path,
+ "Testing that the resource with New_Build = 0 did not expand capacity",
+ new_build_test_result)
-test_path_can_retire = joinpath(test_path, "can_retire");
+test_path_can_retire = joinpath(test_path, "can_retire")
EP, inputs, _ = redirect_stdout(devnull) do
- run_genx_case_testing(test_path_can_retire, genx_setup);
+ run_genx_case_testing(test_path_can_retire, genx_setup)
end
-can_retire_test_result = @test test_can_retire(EP,inputs)
-write_testlog(test_path,"Testing that the resource with Can_Retire = 0 did not expand capacity",can_retire_test_result)
-
+can_retire_test_result = @test test_can_retire(EP, inputs)
+write_testlog(test_path,
+ "Testing that the resource with Can_Retire = 0 did not expand capacity",
+ can_retire_test_result)
function test_update_cumulative_min_ret!()
# Merge the genx_setup with the default settings
settings = GenX.default_settings()
- for ParameterScale ∈ [0,1]
+ for ParameterScale in [0, 1]
genx_setup["ParameterScale"] = ParameterScale
merge!(settings, genx_setup)
inputs_dict = Dict()
true_min_retirements = Dict()
-
+
scale_factor = settings["ParameterScale"] == 1 ? GenX.ModelScalingFactor : 1.0
redirect_stdout(devnull) do
warnerror_logger = ConsoleLogger(stderr, Logging.Warn)
with_logger(warnerror_logger) do
for t in 1:3
- inpath_sub = joinpath(test_path, "cum_min_ret", string("inputs_p",t))
-
- true_min_retirements[t] = CSV.read(joinpath(inpath_sub, "resources", "Resource_multistage_data.csv"), DataFrame)
- rename!(true_min_retirements[t], lowercase.(names(true_min_retirements[t])))
+ inpath_sub = joinpath(test_path, "cum_min_ret", string("inputs_p", t))
+
+ true_min_retirements[t] = CSV.read(
+ joinpath(inpath_sub,
+ "resources",
+ "Resource_multistage_data.csv"),
+ DataFrame)
+ rename!(true_min_retirements[t],
+ lowercase.(names(true_min_retirements[t])))
GenX.scale_multistage_data!(true_min_retirements[t], scale_factor)
inputs_dict[t] = Dict()
inputs_dict[t]["Z"] = 1
GenX.load_demand_data!(settings, inpath_sub, inputs_dict[t])
- GenX.load_resources_data!(inputs_dict[t], settings, inpath_sub, joinpath(inpath_sub, settings["ResourcesFolder"]))
- compute_cumulative_min_retirements!(inputs_dict,t)
+ GenX.load_resources_data!(inputs_dict[t],
+ settings,
+ inpath_sub,
+ joinpath(inpath_sub, settings["ResourcesFolder"]))
+ compute_cumulative_min_retirements!(inputs_dict, t)
end
end
end
@@ -133,31 +139,121 @@ function test_update_cumulative_min_ret!()
for t in 1:3
# Test that the cumulative min retirements are updated correctly
gen = inputs_dict[t]["RESOURCES"]
- @test GenX.min_retired_cap_mw.(gen) == true_min_retirements[t].min_retired_cap_mw
- @test GenX.min_retired_energy_cap_mw.(gen) == true_min_retirements[t].min_retired_energy_cap_mw
- @test GenX.min_retired_charge_cap_mw.(gen) == true_min_retirements[t].min_retired_charge_cap_mw
- @test GenX.min_retired_cap_inverter_mw.(gen) == true_min_retirements[t].min_retired_cap_inverter_mw
- @test GenX.min_retired_cap_solar_mw.(gen) == true_min_retirements[t].min_retired_cap_solar_mw
- @test GenX.min_retired_cap_wind_mw.(gen) == true_min_retirements[t].min_retired_cap_wind_mw
- @test GenX.min_retired_cap_discharge_dc_mw.(gen) == true_min_retirements[t].min_retired_cap_discharge_dc_mw
- @test GenX.min_retired_cap_charge_dc_mw.(gen) == true_min_retirements[t].min_retired_cap_charge_dc_mw
- @test GenX.min_retired_cap_discharge_ac_mw.(gen) == true_min_retirements[t].min_retired_cap_discharge_ac_mw
- @test GenX.min_retired_cap_charge_ac_mw.(gen) == true_min_retirements[t].min_retired_cap_charge_ac_mw
-
- @test GenX.cum_min_retired_cap_mw.(gen) == sum(true_min_retirements[i].min_retired_cap_mw for i in 1:t)
- @test GenX.cum_min_retired_energy_cap_mw.(gen) == sum(true_min_retirements[i].min_retired_energy_cap_mw for i in 1:t)
- @test GenX.cum_min_retired_charge_cap_mw.(gen) == sum(true_min_retirements[i].min_retired_charge_cap_mw for i in 1:t)
- @test GenX.cum_min_retired_cap_inverter_mw.(gen) == sum(true_min_retirements[i].min_retired_cap_inverter_mw for i in 1:t)
- @test GenX.cum_min_retired_cap_solar_mw.(gen) == sum(true_min_retirements[i].min_retired_cap_solar_mw for i in 1:t)
- @test GenX.cum_min_retired_cap_wind_mw.(gen) == sum(true_min_retirements[i].min_retired_cap_wind_mw for i in 1:t)
- @test GenX.cum_min_retired_cap_discharge_dc_mw.(gen) == sum(true_min_retirements[i].min_retired_cap_discharge_dc_mw for i in 1:t)
- @test GenX.cum_min_retired_cap_charge_dc_mw.(gen) == sum(true_min_retirements[i].min_retired_cap_charge_dc_mw for i in 1:t)
- @test GenX.cum_min_retired_cap_discharge_ac_mw.(gen) == sum(true_min_retirements[i].min_retired_cap_discharge_ac_mw for i in 1:t)
- @test GenX.cum_min_retired_cap_charge_ac_mw.(gen) == sum(true_min_retirements[i].min_retired_cap_charge_ac_mw for i in 1:t)
+ @test GenX.min_retired_cap_mw.(gen) ==
+ true_min_retirements[t].min_retired_cap_mw
+ @test GenX.min_retired_energy_cap_mw.(gen) ==
+ true_min_retirements[t].min_retired_energy_cap_mw
+ @test GenX.min_retired_charge_cap_mw.(gen) ==
+ true_min_retirements[t].min_retired_charge_cap_mw
+ @test GenX.min_retired_cap_inverter_mw.(gen) ==
+ true_min_retirements[t].min_retired_cap_inverter_mw
+ @test GenX.min_retired_cap_solar_mw.(gen) ==
+ true_min_retirements[t].min_retired_cap_solar_mw
+ @test GenX.min_retired_cap_wind_mw.(gen) ==
+ true_min_retirements[t].min_retired_cap_wind_mw
+ @test GenX.min_retired_cap_discharge_dc_mw.(gen) ==
+ true_min_retirements[t].min_retired_cap_discharge_dc_mw
+ @test GenX.min_retired_cap_charge_dc_mw.(gen) ==
+ true_min_retirements[t].min_retired_cap_charge_dc_mw
+ @test GenX.min_retired_cap_discharge_ac_mw.(gen) ==
+ true_min_retirements[t].min_retired_cap_discharge_ac_mw
+ @test GenX.min_retired_cap_charge_ac_mw.(gen) ==
+ true_min_retirements[t].min_retired_cap_charge_ac_mw
+
+ @test GenX.cum_min_retired_cap_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_cap_mw for i in 1:t)
+ @test GenX.cum_min_retired_energy_cap_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_energy_cap_mw for i in 1:t)
+ @test GenX.cum_min_retired_charge_cap_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_charge_cap_mw for i in 1:t)
+ @test GenX.cum_min_retired_cap_inverter_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_cap_inverter_mw for i in 1:t)
+ @test GenX.cum_min_retired_cap_solar_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_cap_solar_mw for i in 1:t)
+ @test GenX.cum_min_retired_cap_wind_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_cap_wind_mw for i in 1:t)
+ @test GenX.cum_min_retired_cap_discharge_dc_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_cap_discharge_dc_mw for i in 1:t)
+ @test GenX.cum_min_retired_cap_charge_dc_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_cap_charge_dc_mw for i in 1:t)
+ @test GenX.cum_min_retired_cap_discharge_ac_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_cap_discharge_ac_mw for i in 1:t)
+ @test GenX.cum_min_retired_cap_charge_ac_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_cap_charge_ac_mw for i in 1:t)
end
end
end
test_update_cumulative_min_ret!()
+function test_can_retire_validation()
+ @testset "No resources switch from can_retire = 0 to can_retire = 1" begin
+ inputs = Dict{Int, Dict}()
+ inputs[1] = Dict("RESOURCES" => [
+ GenX.Thermal(Dict(:resource => "thermal", :id => 1,
+ :can_retire => 1)),
+ GenX.Vre(Dict(:resource => "vre", :id => 2,
+ :can_retire => 1)),
+ GenX.Hydro(Dict(:resource => "hydro", :id => 3,
+ :can_retire => 1)),
+ GenX.FlexDemand(Dict(:resource => "flex_demand", :id => 4,
+ :can_retire => 1))])
+ inputs[2] = Dict("RESOURCES" => [
+ GenX.Thermal(Dict(:resource => "thermal", :id => 1,
+ :can_retire => 0)),
+ GenX.Vre(Dict(:resource => "vre", :id => 2,
+ :can_retire => 1)),
+ GenX.Hydro(Dict(:resource => "hydro", :id => 3,
+ :can_retire => 1)),
+ GenX.FlexDemand(Dict(:resource => "flex_demand", :id => 4,
+ :can_retire => 1))])
+ inputs[3] = Dict("RESOURCES" => [
+ GenX.Thermal(Dict(:resource => "thermal", :id => 1,
+ :can_retire => 0)),
+ GenX.Vre(Dict(:resource => "vre", :id => 2,
+ :can_retire => 0)),
+ GenX.Hydro(Dict(:resource => "hydro", :id => 3,
+ :can_retire => 1)),
+ GenX.FlexDemand(Dict(:resource => "flex_demand", :id => 4,
+ :can_retire => 1))])
+ @test isnothing(GenX.validate_can_retire_multistage(inputs, 3))
+ end
+
+ @testset "One resource switches from can_retire = 0 to can_retire = 1" begin
+ inputs = Dict{Int, Dict}()
+ inputs[1] = Dict("RESOURCES" => [
+ GenX.Thermal(Dict(:resource => "thermal", :id => 1,
+ :can_retire => 0)),
+ GenX.Vre(Dict(:resource => "vre", :id => 2,
+ :can_retire => 0)),
+ GenX.Hydro(Dict(:resource => "hydro", :id => 3,
+ :can_retire => 0)),
+ GenX.FlexDemand(Dict(:resource => "flex_demand", :id => 4,
+ :can_retire => 1))])
+ inputs[2] = Dict("RESOURCES" => [
+ GenX.Thermal(Dict(:resource => "thermal", :id => 1,
+ :can_retire => 0)),
+ GenX.Vre(Dict(:resource => "vre", :id => 2,
+ :can_retire => 0)),
+ GenX.Hydro(Dict(:resource => "hydro", :id => 3,
+ :can_retire => 1)),
+ GenX.FlexDemand(Dict(:resource => "flex_demand", :id => 4,
+ :can_retire => 1))])
+ inputs[3] = Dict("RESOURCES" => [
+ GenX.Thermal(Dict(:resource => "thermal", :id => 1,
+ :can_retire => 0)),
+ GenX.Vre(Dict(:resource => "vre", :id => 2,
+ :can_retire => 0)),
+ GenX.Hydro(Dict(:resource => "hydro", :id => 3,
+ :can_retire => 1)),
+ GenX.FlexDemand(Dict(:resource => "flex_demand", :id => 4,
+ :can_retire => 1))])
+ @test_throws ErrorException GenX.validate_can_retire_multistage(inputs, 3)
+ end
+end
+
+with_logger(ConsoleLogger(stderr, Logging.Error)) do
+ test_can_retire_validation()
+end
+
end # module TestMultiStage
diff --git a/test/test_piecewisefuel.jl b/test/test_piecewisefuel.jl
index a9630ce320..db52aaf0da 100644
--- a/test/test_piecewisefuel.jl
+++ b/test/test_piecewisefuel.jl
@@ -7,11 +7,9 @@ obj_true = 2341.82308
test_path = "piecewise_fuel"
# Define test inputs
-genx_setup = Dict(
- "UCommit" => 2,
+genx_setup = Dict("UCommit" => 2,
"CO2Cap" => 1,
- "ParameterScale" => 1,
-)
+ "ParameterScale" => 1)
# Run the case and get the objective value and tolerance
EP, _, _ = redirect_stdout(devnull) do
@@ -22,7 +20,7 @@ optimal_tol_rel = get_attribute(EP, "dual_feasibility_tolerance")
optimal_tol = optimal_tol_rel * obj_test # Convert to absolute tolerance
# Test the objective value
-test_result = @test obj_test ≈ obj_true atol = optimal_tol
+test_result = @test obj_test≈obj_true atol=optimal_tol
# Round objective value and tolerance. Write to test log.
obj_test = round_from_tol!(obj_test, optimal_tol)
diff --git a/test/test_retrofit.jl b/test/test_retrofit.jl
index 20ce1c2ea0..54ae82ad5a 100644
--- a/test/test_retrofit.jl
+++ b/test/test_retrofit.jl
@@ -8,8 +8,7 @@ obj_true = 3179.6244
test_path = "retrofit"
# Define test inputs
-genx_setup = Dict(
- "CO2Cap" => 2,
+genx_setup = Dict("CO2Cap" => 2,
"StorageLosses" => 1,
"MinCapReq" => 1,
"MaxCapReq" => 1,
@@ -17,8 +16,7 @@ genx_setup = Dict(
"UCommit" => 2,
"EnergyShareRequirement" => 1,
"CapacityReserveMargin" => 1,
- "MultiStage" => 0,
-)
+ "MultiStage" => 0)
# Run the case and get the objective value and tolerance
EP, inputs, _ = redirect_stdout(devnull) do
@@ -29,7 +27,7 @@ optimal_tol_rel = get_attribute(EP, "ipm_optimality_tolerance")
optimal_tol = optimal_tol_rel * obj_test # Convert to absolute tolerance
# Test the objective value
-test_result = @test obj_test ≈ obj_true atol = optimal_tol
+test_result = @test obj_test≈obj_true atol=optimal_tol
# Round objective value and tolerance. Write to test log.
obj_test = round_from_tol!(obj_test, optimal_tol)
diff --git a/test/test_threezones.jl b/test/test_threezones.jl
index c533da8770..5d608e0f96 100644
--- a/test/test_threezones.jl
+++ b/test/test_threezones.jl
@@ -8,15 +8,13 @@ obj_true = 6960.20855
test_path = "three_zones"
# Define test inputs
-genx_setup = Dict(
- "NetworkExpansion" => 1,
+genx_setup = Dict("NetworkExpansion" => 1,
"Trans_Loss_Segments" => 1,
"CO2Cap" => 2,
"StorageLosses" => 1,
"MinCapReq" => 1,
"ParameterScale" => 1,
- "UCommit" => 2,
-)
+ "UCommit" => 2)
# Run the case and get the objective value and tolerance
EP, inputs, _ = redirect_stdout(devnull) do
@@ -27,7 +25,7 @@ optimal_tol_rel = get_attribute(EP, "ipm_optimality_tolerance")
optimal_tol = optimal_tol_rel * obj_test # Convert to absolute tolerance
# Test the objective value
-test_result = @test obj_test ≈ obj_true atol = optimal_tol
+test_result = @test obj_test≈obj_true atol=optimal_tol
# Round objective value and tolerance. Write to test log.
obj_test = round_from_tol!(obj_test, optimal_tol)
diff --git a/test/test_time_domain_reduction.jl b/test/test_time_domain_reduction.jl
index 90dedfc17f..7a70df7425 100644
--- a/test/test_time_domain_reduction.jl
+++ b/test/test_time_domain_reduction.jl
@@ -1,6 +1,5 @@
module TestTDR
-
import GenX
import Test
import JLD2, Clustering
@@ -17,7 +16,7 @@ TDR_Results_test = joinpath(test_folder, "TDR_results_test")
# Folder with true clustering results for LTS and non-LTS versions
TDR_Results_true = if VERSION == v"1.6.7"
joinpath(test_folder, "TDR_results_true_LTS")
-else
+else
joinpath(test_folder, "TDR_results_true")
end
@@ -27,23 +26,21 @@ if isdir(TDR_Results_test)
end
# Inputs for cluster_inputs function
-genx_setup = Dict(
- "TimeDomainReduction" => 1,
+genx_setup = Dict("TimeDomainReduction" => 1,
"TimeDomainReductionFolder" => "TDR_results_test",
"UCommit" => 2,
"CapacityReserveMargin" => 1,
"MinCapReq" => 1,
"MaxCapReq" => 1,
"EnergyShareRequirement" => 1,
- "CO2Cap" => 2,
-)
+ "CO2Cap" => 2)
settings = GenX.default_settings()
merge!(settings, genx_setup)
clustering_test = with_logger(ConsoleLogger(stderr, Logging.Warn)) do
GenX.cluster_inputs(test_folder, settings_path, settings, random = false)["ClusterObject"]
-end
+end
# Load true clustering
clustering_true = JLD2.load(joinpath(TDR_Results_true, "clusters_true.jld2"))["ClusterObject"]
diff --git a/test/utilities.jl b/test/utilities.jl
index 43417f5462..a98440ee7b 100644
--- a/test/utilities.jl
+++ b/test/utilities.jl
@@ -4,8 +4,7 @@ using Dates
using CSV, DataFrames
using Logging, LoggingExtras
-
-const TestResult = Union{Test.Result,String}
+const TestResult = Union{Test.Result, String}
# Exception to throw if a csv file is not found
struct CSVFileNotFound <: Exception
@@ -13,11 +12,12 @@ struct CSVFileNotFound <: Exception
end
Base.showerror(io::IO, e::CSVFileNotFound) = print(io, e.filefullpath, " not found")
-function run_genx_case_testing(
- test_path::AbstractString,
- test_setup::Dict,
- optimizer::Any = HiGHS.Optimizer,
-)
+const results_path = "results"
+!isdir(results_path) && mkdir(results_path)
+
+function run_genx_case_testing(test_path::AbstractString,
+ test_setup::Dict,
+ optimizer::Any = HiGHS.Optimizer)
# Merge the genx_setup with the default settings
settings = GenX.default_settings()
merge!(settings, test_setup)
@@ -36,11 +36,9 @@ function run_genx_case_testing(
return EP, inputs, OPTIMIZER
end
-function run_genx_case_conflict_testing(
- test_path::AbstractString,
- test_setup::Dict,
- optimizer::Any = HiGHS.Optimizer,
-)
+function run_genx_case_conflict_testing(test_path::AbstractString,
+ test_setup::Dict,
+ optimizer::Any = HiGHS.Optimizer)
# Merge the genx_setup with the default settings
settings = GenX.default_settings()
@@ -59,11 +57,9 @@ function run_genx_case_conflict_testing(
return output
end
-function run_genx_case_simple_testing(
- test_path::AbstractString,
- genx_setup::Dict,
- optimizer::Any,
-)
+function run_genx_case_simple_testing(test_path::AbstractString,
+ genx_setup::Dict,
+ optimizer::Any)
# Run the case
OPTIMIZER = configure_solver(test_path, optimizer)
inputs = load_inputs(genx_setup, test_path)
@@ -72,49 +68,42 @@ function run_genx_case_simple_testing(
return EP, inputs, OPTIMIZER
end
-function run_genx_case_multistage_testing(
- test_path::AbstractString,
- genx_setup::Dict,
- optimizer::Any,
-)
+function run_genx_case_multistage_testing(test_path::AbstractString,
+ genx_setup::Dict,
+ optimizer::Any)
# Run the case
OPTIMIZER = configure_solver(test_path, optimizer)
model_dict = Dict()
inputs_dict = Dict()
- for t = 1:genx_setup["MultiStageSettingsDict"]["NumStages"]
+ for t in 1:genx_setup["MultiStageSettingsDict"]["NumStages"]
# Step 0) Set Model Year
genx_setup["MultiStageSettingsDict"]["CurStage"] = t
# Step 1) Load Inputs
inpath_sub = joinpath(test_path, string("inputs_p", t))
inputs_dict[t] = load_inputs(genx_setup, inpath_sub)
- inputs_dict[t] = configure_multi_stage_inputs(
- inputs_dict[t],
+ inputs_dict[t] = configure_multi_stage_inputs(inputs_dict[t],
genx_setup["MultiStageSettingsDict"],
- genx_setup["NetworkExpansion"],
- )
+ genx_setup["NetworkExpansion"])
compute_cumulative_min_retirements!(inputs_dict, t)
# Step 2) Generate model
model_dict[t] = generate_model(genx_setup, inputs_dict[t], OPTIMIZER)
end
- model_dict, _, inputs_dict = run_ddp(model_dict, genx_setup, inputs_dict)
+ model_dict, _, inputs_dict = run_ddp(results_path, model_dict, genx_setup, inputs_dict)
return model_dict, inputs_dict, OPTIMIZER
end
-
-function write_testlog(
- test_path::AbstractString,
- message::AbstractString,
- test_result::TestResult,
-)
+function write_testlog(test_path::AbstractString,
+ message::AbstractString,
+ test_result::TestResult)
# Save the results to a log file
# Format: datetime, message, test result
- Log_path = joinpath(@__DIR__,"Logs")
+ Log_path = joinpath(@__DIR__, "Logs")
if !isdir(Log_path)
mkdir(Log_path)
end
@@ -132,24 +121,20 @@ function write_testlog(
end
end
-function write_testlog(
- test_path::AbstractString,
- obj_test::Real,
- optimal_tol::Real,
- test_result::TestResult,
-)
+function write_testlog(test_path::AbstractString,
+ obj_test::Real,
+ optimal_tol::Real,
+ test_result::TestResult)
# Save the results to a log file
# Format: datetime, objective value ± tolerance, test result
message = "$obj_test ± $optimal_tol"
write_testlog(test_path, message, test_result)
end
-function write_testlog(
- test_path::AbstractString,
- obj_test::Vector{<:Real},
- optimal_tol::Vector{<:Real},
- test_result::TestResult,
-)
+function write_testlog(test_path::AbstractString,
+ obj_test::Vector{<:Real},
+ optimal_tol::Vector{<:Real},
+ test_result::TestResult)
# Save the results to a log file
# Format: datetime, [objective value ± tolerance], test result
@assert length(obj_test) == length(optimal_tol)
@@ -227,13 +212,15 @@ Compare two columns of a DataFrame. Return true if they are identical or approxi
function isapprox_col(col1, col2)
if isequal(col1, col2) || (eltype(col1) <: Float64 && isapprox(col1, col2))
return true
- elseif eltype(col1) <: AbstractString
+ elseif eltype(col1) <: AbstractString
isapprox_col = true
for i in eachindex(col1)
- if !isapprox_col
+ if !isapprox_col
break
- elseif !isnothing(tryparse(Float64, col1[i])) && !isnothing(tryparse(Float64, col2[i]))
- isapprox_col = isapprox_col && isapprox(parse(Float64, col1[i]), parse(Float64, col2[i]))
+ elseif !isnothing(tryparse(Float64, col1[i])) &&
+ !isnothing(tryparse(Float64, col2[i]))
+ isapprox_col = isapprox_col &&
+ isapprox(parse(Float64, col1[i]), parse(Float64, col2[i]))
else
isapprox_col = isapprox_col && isequal(col1[i], col2[i])
end
@@ -243,7 +230,6 @@ function isapprox_col(col1, col2)
return false
end
-
macro warn_error_logger(block)
quote
result = nothing
@@ -256,4 +242,4 @@ macro warn_error_logger(block)
end
result
end
-end
\ No newline at end of file
+end
diff --git a/test/writing_outputs/test_writing_stats_ms.jl b/test/writing_outputs/test_writing_stats_ms.jl
new file mode 100644
index 0000000000..305fa04a77
--- /dev/null
+++ b/test/writing_outputs/test_writing_stats_ms.jl
@@ -0,0 +1,107 @@
+module TestWritingStatsMs
+
+using Test
+using CSV, DataFrames
+using GenX
+
+# create temporary directory for testing
+mkpath("writing_outputs/multi_stage_stats_tmp")
+outpath = "writing_outputs/multi_stage_stats_tmp"
+filename = GenX._get_multi_stage_stats_filename()
+
+function test_header()
+ # Note: if this test fails, it means that the header in the function _get_multi_stage_stats_header() has been changed.
+ # Make sure to check that the code is consistent with the new header, and update the test accordingly.
+ header = GenX._get_multi_stage_stats_header()
+ @test header ==
+ ["Iteration_Number", "Seconds", "Upper_Bound", "Lower_Bound", "Relative_Gap"]
+end
+
+function test_skip_existing_file()
+ touch(joinpath(outpath, filename))
+ # If the file already exists, don't overwrite it
+ write_multi_stage_stats = GenX.write_multi_stage_stats(outpath, Dict())
+ @test isnothing(write_multi_stage_stats)
+ rm(joinpath(outpath, filename))
+end
+
+function test_write_multi_stage_stats(iter::Int64 = 10)
+ # test writing stats to file for `iter` number of iterations
+ times_a, upper_bounds_a, lower_bounds_a = rand(iter), rand(iter), rand(iter)
+ stats_d = Dict("TIMES" => times_a, "UPPER_BOUNDS" => upper_bounds_a,
+ "LOWER_BOUNDS" => lower_bounds_a)
+
+ @test isnothing(GenX.write_multi_stage_stats(outpath, stats_d))
+ df_stats = CSV.read(joinpath(outpath, filename), DataFrame)
+ header = GenX._get_multi_stage_stats_header()
+ @test size(df_stats) == (iter, length(header))
+ for i in 1:iter
+ test_stats_d(df_stats, i, times_a[i], upper_bounds_a[i], lower_bounds_a[i],
+ (upper_bounds_a[i] - lower_bounds_a[i]) / lower_bounds_a[i])
+ end
+ rm(joinpath(outpath, filename))
+end
+
+function test_create_multi_stage_stats_file()
+ GenX.create_multi_stage_stats_file(outpath)
+ df_stats = CSV.read(joinpath(outpath, filename), DataFrame)
+ @test size(df_stats, 1) == 0
+ @test size(df_stats, 2) == 5
+ @test names(df_stats) == GenX._get_multi_stage_stats_header()
+ rm(joinpath(outpath, filename))
+end
+
+function test_update_multi_stage_stats_file(iter::Int64 = 10)
+ # test updating the stats file with new values
+ header = GenX._get_multi_stage_stats_header()
+ GenX.create_multi_stage_stats_file(outpath)
+ lower_bound = rand()
+ iteration_time = rand()
+ for i in 1:iter
+ # upper bound is updated
+ upper_bound = rand()
+ GenX.update_multi_stage_stats_file(
+ outpath, i, upper_bound, lower_bound, iteration_time, new_row = true)
+ df_stats = CSV.read(joinpath(outpath, filename), DataFrame)
+ test_stats_d(df_stats, i, iteration_time, upper_bound, lower_bound,
+ (upper_bound - lower_bound) / lower_bound)
+ # lower bound is updated
+ lower_bound = rand()
+ GenX.update_multi_stage_stats_file(
+ outpath, i, upper_bound, lower_bound, iteration_time)
+ df_stats = CSV.read(joinpath(outpath, filename), DataFrame)
+ test_stats_d(df_stats, i, iteration_time, upper_bound, lower_bound,
+ (upper_bound - lower_bound) / lower_bound)
+ # iteration time is updated
+ iteration_time = rand()
+ GenX.update_multi_stage_stats_file(
+ outpath, i, upper_bound, lower_bound, iteration_time)
+ df_stats = CSV.read(joinpath(outpath, filename), DataFrame)
+ test_stats_d(df_stats, i, iteration_time, upper_bound, lower_bound,
+ (upper_bound - lower_bound) / lower_bound)
+ # test size
+ @test size(df_stats) == (i, length(header))
+ end
+ rm(joinpath(outpath, filename))
+end
+
+function test_stats_d(df_stats, i, iteration_time, upper_bound, lower_bound, relative_gap)
+ header = GenX._get_multi_stage_stats_header()
+ @test df_stats[i, header[1]] == i
+ @test df_stats[i, header[2]] == iteration_time
+ @test df_stats[i, header[3]] == upper_bound
+ @test df_stats[i, header[4]] == lower_bound
+ @test df_stats[i, header[5]] == relative_gap
+end
+
+@testset "Test writing multi-stage stats" begin
+ test_header()
+ test_skip_existing_file()
+ test_write_multi_stage_stats()
+ test_create_multi_stage_stats_file()
+ test_update_multi_stage_stats_file()
+end
+
+rm(outpath)
+
+end # module TestWritingStatsMs