diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml
new file mode 100644
index 0000000000..453925c3f9
--- /dev/null
+++ b/.JuliaFormatter.toml
@@ -0,0 +1 @@
+style = "sciml"
\ No newline at end of file
diff --git a/.github/workflows/format_suggestions.yml b/.github/workflows/format_suggestions.yml
new file mode 100644
index 0000000000..05e574dd68
--- /dev/null
+++ b/.github/workflows/format_suggestions.yml
@@ -0,0 +1,9 @@
+name: Format suggestions
+on:
+ pull_request:
+
+jobs:
+ code-style:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: julia-actions/julia-format@v2
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3792d38e89..c372739aaa 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -38,6 +38,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
introduce this parameter as cost of virtual charging and discharging to avoid unusual results (#608).
- New settings parameter, StorageVirtualDischarge, to turn storage virtual charging and discharging off if desired by the user (#638).
- Add module to retrofit existing resources with new technologies (#600).
+- Formatted the code and added a format check to the CI pipeline (#673).
### Fixed
- Set MUST_RUN=1 for RealSystemExample/small_hydro plants (#517).
diff --git a/README.md b/README.md
index f4eb78ce22..7a4e642e3f 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,10 @@
-[![CI](https://github.com/GenXProject/GenX/actions/workflows/ci.yml/badge.svg)](https://github.com/GenXProject/GenX/actions/workflows/ci.yml)
-[![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://genxproject.github.io/GenX.jl/dev)
-[![DOI](https://zenodo.org/badge/368957308.svg)](https://zenodo.org/doi/10.5281/zenodo.10846069)
-[![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor's%20Guide-blueviolet)](https://github.com/SciML/ColPrac)
+| **Documentation** | **DOI** |
+|:-------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------:|
+[![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](https://genxproject.github.io/GenX.jl/stable/) [![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://genxproject.github.io/GenX.jl/dev) | [![DOI](https://zenodo.org/badge/368957308.svg)](https://zenodo.org/doi/10.5281/zenodo.10846069)
+
+[![CI](https://github.com/GenXProject/GenX/actions/workflows/ci.yml/badge.svg)](https://github.com/GenXProject/GenX/actions/workflows/ci.yml) [![SciML Code Style](https://img.shields.io/static/v1?label=code%20style&message=SciML&color=9558b2&labelColor=389826)](https://github.com/SciML/SciMLStyle) [![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor's%20Guide-blueviolet)](https://github.com/SciML/ColPrac)
## Overview
GenX is a highly-configurable, [open source](https://github.com/GenXProject/GenX/blob/main/LICENSE) electricity resource capacity expansion model
diff --git a/docs/make.jl b/docs/make.jl
index eeaa501c5b..f31c297eeb 100644
--- a/docs/make.jl
+++ b/docs/make.jl
@@ -2,14 +2,13 @@ using Documenter
using GenX
import DataStructures: OrderedDict
-DocMeta.setdocmeta!(GenX, :DocTestSetup, :(using GenX); recursive=true)
+DocMeta.setdocmeta!(GenX, :DocTestSetup, :(using GenX); recursive = true)
-pages = OrderedDict(
- "Welcome Page" => [
+pages = OrderedDict("Welcome Page" => [
"GenX: Introduction" => "index.md",
"Installation Guide" => "installation.md",
"Limitation of GenX" => "limitations_genx.md",
- "Third Party Extensions" => "third_party_genx.md"
+ "Third Party Extensions" => "third_party_genx.md",
],
"Getting Started" => [
"Running GenX" => "Getting_Started/examples_casestudies.md",
@@ -51,7 +50,7 @@ pages = OrderedDict(
"Flexible Demand" => "Model_Reference/Resources/flexible_demand.md",
"Hydro" => [
"Hydro Reservoir" => "Model_Reference/Resources/hydro_res.md",
- "Long Duration Hydro" => "Model_Reference/Resources/hydro_inter_period_linkage.md"
+ "Long Duration Hydro" => "Model_Reference/Resources/hydro_inter_period_linkage.md",
],
"Must Run" => "Model_Reference/Resources/must_run.md",
"Retrofit" => "Model_Reference/Resources/retrofit.md",
@@ -62,18 +61,18 @@ pages = OrderedDict(
"Long Duration Storage" => "Model_Reference/Resources/long_duration_storage.md",
"Storage All" => "Model_Reference/Resources/storage_all.md",
"Storage Asymmetric" => "Model_Reference/Resources/storage_asymmetric.md",
- "Storage Symmetric" => "Model_Reference/Resources/storage_symmetric.md"
+ "Storage Symmetric" => "Model_Reference/Resources/storage_symmetric.md",
],
"Co-located VRE and Storage" => "Model_Reference/Resources/vre_stor.md",
"Thermal" => [
"Thermal" => "Model_Reference/Resources/thermal.md",
"Thermal Commit" => "Model_Reference/Resources/thermal_commit.md",
- "Thermal No Commit" => "Model_Reference/Resources/thermal_no_commit.md"
+ "Thermal No Commit" => "Model_Reference/Resources/thermal_no_commit.md",
],
"Hydrogen Electrolyzers" => "Model_Reference/Resources/electrolyzers.md",
"Retrofit" => "Model_Reference/Resources/retrofit.md",
"Scheduled maintenance for various resources" => "Model_Reference/Resources/maintenance.md",
- "Resource types" => "Model_Reference/Resources/resource.md"
+ "Resource types" => "Model_Reference/Resources/resource.md",
],
"Maintenance" => "Model_Reference/maintenance_overview.md",
"Policies" => "Model_Reference/policies.md",
@@ -88,46 +87,40 @@ pages = OrderedDict(
"Multi-Stage Modeling Introduction" => "Model_Reference/Multi_Stage/multi_stage_overview.md",
"Configure multi-stage inputs" => "Model_Reference/Multi_Stage/configure_multi_stage_inputs.md",
"Model multi stage: Dual Dynamic Programming Algorithm" => "Model_Reference/Multi_Stage/dual_dynamic_programming.md",
- "Endogenous Retirement" => "Model_Reference/Multi_Stage/endogenous_retirement.md"
+ "Endogenous Retirement" => "Model_Reference/Multi_Stage/endogenous_retirement.md",
],
"Method of Morris" => "Model_Reference/methodofmorris.md",
"Utility Functions" => "Model_Reference/utility_functions.md",
],
"Public API Reference" => [
- "Public API" => "Public_API/public_api.md",
-
- ],
+ "Public API" => "Public_API/public_api.md"],
"Third Party Extensions" => "additional_third_party_extensions.md",
- "Developer Docs" => "developer_guide.md",
-)
+ "Developer Docs" => "developer_guide.md")
# Build documentation.
# ====================
makedocs(;
- modules=[GenX],
- authors="Jesse Jenkins, Nestor Sepulveda, Dharik Mallapragada, Aaron Schwartz, Neha Patankar, Qingyu Xu, Jack Morris, Sambuddha Chakrabarti",
- sitename="GenX.jl",
- format=Documenter.HTML(;
- prettyurls=get(ENV, "CI", "false") == "true",
- canonical="https://genxproject.github.io/GenX.jl/stable",
+ modules = [GenX],
+ authors = "Jesse Jenkins, Nestor Sepulveda, Dharik Mallapragada, Aaron Schwartz, Neha Patankar, Qingyu Xu, Jack Morris, Sambuddha Chakrabarti",
+ sitename = "GenX.jl",
+ format = Documenter.HTML(;
+ prettyurls = get(ENV, "CI", "false") == "true",
+ canonical = "https://genxproject.github.io/GenX.jl/stable",
assets = ["assets/genx_style.css"],
- sidebar_sitename=false,
- collapselevel=1
- ),
- pages=[p for p in pages]
-)
+ sidebar_sitename = false,
+ collapselevel = 1),
+ pages = [p for p in pages])
# Deploy built documentation.
# ===========================
deploydocs(;
- repo="github.com/GenXProject/GenX.jl.git",
+ repo = "github.com/GenXProject/GenX.jl.git",
target = "build",
branch = "gh-pages",
devbranch = "main",
devurl = "dev",
- push_preview=true,
+ push_preview = true,
versions = ["stable" => "v^", "v#.#.#", "dev" => "dev"],
- forcepush = false,
-)
+ forcepush = false)
diff --git a/docs/src/developer_guide.md b/docs/src/developer_guide.md
index 584a60458b..bca5d37293 100644
--- a/docs/src/developer_guide.md
+++ b/docs/src/developer_guide.md
@@ -12,6 +12,19 @@ GenX is an open-source project, and we welcome contributions from the community.
The following sections describe in more detail how to work with GenX resources and how to add a new resource to GenX.
+## Style guide
+GenX project follows the [SciML Style Guide](https://github.com/SciML/SciMLStyle). We encourage contributors to follow this style guide when submitting code changes to GenX. Before submitting a new PR, please run the following command to format a file or a directory:
+```julia
+julia> using JuliaFormatter
+julia> format("path_to_directory", SciMLStyle(), verbose=true)
+```
+or
+```julia
+julia> using JuliaFormatter
+julia> format("path_to_file.jl", SciMLStyle(), verbose=true)
+```
+The GitHub repository of GenX is configured to verify the code style of each PR and will automatically provide comments to assist you in formatting the code according to the style guide.
+
## GenX resources
In GenX, a resource is defined as an instance of a `GenX resource type`, a subtype of an `AbstractResource`. This allows the code to use multiple dispatch and define a common interface (behavior) for all resources in the code.
diff --git a/example_systems/1_three_zones/Run.jl b/example_systems/1_three_zones/Run.jl
index 7ce1891834..b44ca23ec1 100644
--- a/example_systems/1_three_zones/Run.jl
+++ b/example_systems/1_three_zones/Run.jl
@@ -1,3 +1,3 @@
using GenX
-run_genx_case!(dirname(@__FILE__))
\ No newline at end of file
+run_genx_case!(dirname(@__FILE__))
diff --git a/example_systems/2_three_zones_w_electrolyzer/Run.jl b/example_systems/2_three_zones_w_electrolyzer/Run.jl
index 7ce1891834..b44ca23ec1 100644
--- a/example_systems/2_three_zones_w_electrolyzer/Run.jl
+++ b/example_systems/2_three_zones_w_electrolyzer/Run.jl
@@ -1,3 +1,3 @@
using GenX
-run_genx_case!(dirname(@__FILE__))
\ No newline at end of file
+run_genx_case!(dirname(@__FILE__))
diff --git a/example_systems/3_three_zones_w_co2_capture/Run.jl b/example_systems/3_three_zones_w_co2_capture/Run.jl
index 7ce1891834..b44ca23ec1 100644
--- a/example_systems/3_three_zones_w_co2_capture/Run.jl
+++ b/example_systems/3_three_zones_w_co2_capture/Run.jl
@@ -1,3 +1,3 @@
using GenX
-run_genx_case!(dirname(@__FILE__))
\ No newline at end of file
+run_genx_case!(dirname(@__FILE__))
diff --git a/example_systems/4_three_zones_w_policies_slack/Run.jl b/example_systems/4_three_zones_w_policies_slack/Run.jl
index 7ce1891834..b44ca23ec1 100644
--- a/example_systems/4_three_zones_w_policies_slack/Run.jl
+++ b/example_systems/4_three_zones_w_policies_slack/Run.jl
@@ -1,3 +1,3 @@
using GenX
-run_genx_case!(dirname(@__FILE__))
\ No newline at end of file
+run_genx_case!(dirname(@__FILE__))
diff --git a/example_systems/6_three_zones_w_multistage/Run.jl b/example_systems/6_three_zones_w_multistage/Run.jl
index 7ce1891834..b44ca23ec1 100644
--- a/example_systems/6_three_zones_w_multistage/Run.jl
+++ b/example_systems/6_three_zones_w_multistage/Run.jl
@@ -1,3 +1,3 @@
using GenX
-run_genx_case!(dirname(@__FILE__))
\ No newline at end of file
+run_genx_case!(dirname(@__FILE__))
diff --git a/example_systems/7_three_zones_w_colocated_VRE_storage/Run.jl b/example_systems/7_three_zones_w_colocated_VRE_storage/Run.jl
index 7ce1891834..b44ca23ec1 100644
--- a/example_systems/7_three_zones_w_colocated_VRE_storage/Run.jl
+++ b/example_systems/7_three_zones_w_colocated_VRE_storage/Run.jl
@@ -1,3 +1,3 @@
using GenX
-run_genx_case!(dirname(@__FILE__))
\ No newline at end of file
+run_genx_case!(dirname(@__FILE__))
diff --git a/example_systems/9_IEEE_9_bus_DC_OPF/Run.jl b/example_systems/9_IEEE_9_bus_DC_OPF/Run.jl
index 7ce1891834..b44ca23ec1 100644
--- a/example_systems/9_IEEE_9_bus_DC_OPF/Run.jl
+++ b/example_systems/9_IEEE_9_bus_DC_OPF/Run.jl
@@ -1,3 +1,3 @@
using GenX
-run_genx_case!(dirname(@__FILE__))
\ No newline at end of file
+run_genx_case!(dirname(@__FILE__))
diff --git a/src/additional_tools/method_of_morris.jl b/src/additional_tools/method_of_morris.jl
index 0938c8022e..d4628b55fa 100644
--- a/src/additional_tools/method_of_morris.jl
+++ b/src/additional_tools/method_of_morris.jl
@@ -1,28 +1,28 @@
const SEED = 1234
-
@doc raw"""
morris(EP::Model, path::AbstractString, setup::Dict, inputs::Dict, outpath::AbstractString, OPTIMIZER)
We apply the Method of Morris developed by [Morris, M., 1991](https://www.jstor.org/stable/1269043) in order to identify the input parameters that produce the largest change on total system cost. Method of Morris falls under the simplest class of one-factor-at-a-time (OAT) screening techniques. It assumes l levels per input factor and generates a set of trajectories through the input space. As such, the Method of Morris generates a grid of uncertain model input parameters, $x_i, i=1, ..., k$,, where the range $[x_i^{-}, x_i^{+}$ of each uncertain input parameter i is split into l intervals of equal length. Each trajectory starts at different realizations of input parameters chosen at random and are built by successively selecting one of the inputs randomly and moving it to an adjacent level. These trajectories are used to estimate the mean and the standard deviation of each input parameter on total system cost. A high estimated mean indicates that the input parameter is important; a high estimated standard deviation indicates important interactions between that input parameter and other inputs.
"""
-struct MatSpread{T1,T2}
+struct MatSpread{T1, T2}
mat::T1
spread::T2
end
-struct MorrisResult{T1,T2}
+struct MorrisResult{T1, T2}
means::T1
means_star::T1
variances::T1
elementary_effects::T2
end
-function generate_design_matrix(p_range, p_steps, rng;len_design_mat,groups)
- ps = [range(p_range[i][1], stop=p_range[i][2], length=p_steps[i]) for i in 1:length(p_range)]
+function generate_design_matrix(p_range, p_steps, rng; len_design_mat, groups)
+ ps = [range(p_range[i][1], stop = p_range[i][2], length = p_steps[i])
+ for i in 1:length(p_range)]
indices = [rand(rng, 1:i) for i in p_steps]
- all_idxs_original = Vector{typeof(indices)}(undef,len_design_mat)
-
+ all_idxs_original = Vector{typeof(indices)}(undef, len_design_mat)
+
for i in 1:len_design_mat
j = rand(rng, 1:length(p_range))
indices[j] += (rand(rng) < 0.5 ? -1 : 1)
@@ -34,20 +34,20 @@ function generate_design_matrix(p_range, p_steps, rng;len_design_mat,groups)
all_idxs_original[i] = copy(indices)
end
- df_all_idx_original = DataFrame(all_idxs_original,:auto)
+ df_all_idx_original = DataFrame(all_idxs_original, :auto)
println(df_all_idx_original)
all_idxs = similar(df_all_idx_original)
for g in unique(groups)
- temp = findall(x->x==g, groups)
+ temp = findall(x -> x == g, groups)
for k in temp
- all_idxs[k,:] = df_all_idx_original[temp[1],:]
+ all_idxs[k, :] = df_all_idx_original[temp[1], :]
end
end
println(all_idxs)
- B = Array{Array{Float64}}(undef,len_design_mat)
+ B = Array{Array{Float64}}(undef, len_design_mat)
for j in 1:len_design_mat
- cur_p = [ps[u][(all_idxs[:,j][u])] for u in 1:length(p_range)]
+ cur_p = [ps[u][(all_idxs[:, j][u])] for u in 1:length(p_range)]
B[j] = cur_p
end
reduce(hcat, B)
@@ -55,45 +55,66 @@ end
function calculate_spread(matrix)
spread = 0.0
- for i in 2:size(matrix,2)
- spread += sqrt(sum(abs2.(matrix[:,i] - matrix[:,i-1])))
+ for i in 2:size(matrix, 2)
+ spread += sqrt(sum(abs2.(matrix[:, i] - matrix[:, i - 1])))
end
spread
end
-function sample_matrices(p_range,p_steps, rng;num_trajectory,total_num_trajectory,len_design_mat,groups)
+function sample_matrices(p_range,
+ p_steps,
+ rng;
+ num_trajectory,
+ total_num_trajectory,
+ len_design_mat,
+ groups)
matrix_array = []
println(num_trajectory)
println(total_num_trajectory)
- if total_num_trajectory x.spread,rev=true)
+ sort!(matrix_array, by = x -> x.spread, rev = true)
matrices = [i.mat for i in matrix_array[1:num_trajectory]]
- reduce(hcat,matrices)
+ reduce(hcat, matrices)
end
-function my_gsa(f, p_steps, num_trajectory, total_num_trajectory, p_range::AbstractVector, len_design_mat, groups, random)
+function my_gsa(f,
+ p_steps,
+ num_trajectory,
+ total_num_trajectory,
+ p_range::AbstractVector,
+ len_design_mat,
+ groups,
+ random)
rng = Random.default_rng()
- if !random; Random.seed!(SEED); end
- design_matrices_original = sample_matrices(p_range, p_steps, rng;num_trajectory,
- total_num_trajectory,len_design_mat,groups)
+ if !random
+ Random.seed!(SEED)
+ end
+ design_matrices_original = sample_matrices(p_range, p_steps, rng; num_trajectory,
+ total_num_trajectory, len_design_mat, groups)
println(design_matrices_original)
- L = DataFrame(design_matrices_original,:auto)
+ L = DataFrame(design_matrices_original, :auto)
println(L)
- distinct_trajectories = Array{Int64}(undef,num_trajectory)
- design_matrices = Matrix(DataFrame(unique(last, pairs(eachcol(L[!,1:len_design_mat])))))
- distinct_trajectories[1] = length(design_matrices[1,:])
+ distinct_trajectories = Array{Int64}(undef, num_trajectory)
+ design_matrices = Matrix(DataFrame(unique(last,
+ pairs(eachcol(L[!, 1:len_design_mat])))))
+ distinct_trajectories[1] = length(design_matrices[1, :])
if num_trajectory > 1
for i in 2:num_trajectory
- design_matrices = hcat(design_matrices, Matrix(DataFrame(unique(last, pairs(eachcol(L[!,(i-1)*len_design_mat+1:i*len_design_mat]))))))
- distinct_trajectories[i] = length(Matrix(DataFrame(unique(last, pairs(eachcol(L[!,(i-1)*len_design_mat+1:i*len_design_mat])))))[1,:])
+ design_matrices = hcat(design_matrices,
+ Matrix(DataFrame(unique(last,
+ pairs(eachcol(L[!,
+ ((i - 1) * len_design_mat + 1):(i * len_design_mat)]))))))
+ distinct_trajectories[i] = length(Matrix(DataFrame(unique(last,
+ pairs(eachcol(L[!, ((i - 1) * len_design_mat + 1):(i * len_design_mat)])))))[1,
+ :])
end
end
println(distinct_trajectories)
@@ -102,26 +123,27 @@ function my_gsa(f, p_steps, num_trajectory, total_num_trajectory, p_range::Abstr
multioutput = false
desol = false
local y_size
-
- _y = [f(design_matrices[:,i]) for i in 1:size(design_matrices,2)]
+
+ _y = [f(design_matrices[:, i]) for i in 1:size(design_matrices, 2)]
multioutput = !(eltype(_y) <: Number)
if eltype(_y) <: RecursiveArrayTools.AbstractVectorOfArray
y_size = size(_y[1])
_y = vec.(_y)
desol = true
end
- all_y = multioutput ? reduce(hcat,_y) : _y
+ all_y = multioutput ? reduce(hcat, _y) : _y
println(all_y)
effects = []
- while(length(effects) < length(groups))
- push!(effects,Vector{Float64}[])
+ while (length(effects) < length(groups))
+ push!(effects, Vector{Float64}[])
end
for i in 1:num_trajectory
len_design_mat = distinct_trajectories[i]
- y1 = multioutput ? all_y[:,(i-1)*len_design_mat+1] : all_y[(i-1)*len_design_mat+1]
- for j in (i-1)*len_design_mat+1:(i*len_design_mat)-1
+ y1 = multioutput ? all_y[:, (i - 1) * len_design_mat + 1] :
+ all_y[(i - 1) * len_design_mat + 1]
+ for j in ((i - 1) * len_design_mat + 1):((i * len_design_mat) - 1)
y2 = y1
- del = design_matrices[:,j+1] - design_matrices[:,j]
+ del = design_matrices[:, j + 1] - design_matrices[:, j]
change_index = 0
for k in 1:length(del)
if abs(del[k]) > 0
@@ -130,14 +152,14 @@ function my_gsa(f, p_steps, num_trajectory, total_num_trajectory, p_range::Abstr
end
end
del = sum(del)
- y1 = multioutput ? all_y[:,j+1] : all_y[j+1]
- effect = @. (y1-y2)/(del)
+ y1 = multioutput ? all_y[:, j + 1] : all_y[j + 1]
+ effect = @. (y1 - y2) / (del)
elem_effect = typeof(y1) <: Number ? effect : mean(effect, dims = 2)
- temp_g_index = findall(x->x==groups[change_index], groups)
+ temp_g_index = findall(x -> x == groups[change_index], groups)
for g in temp_g_index
println(effects)
println(elem_effect)
- push!(effects[g],elem_effect)
+ push!(effects[g], elem_effect)
end
end
end
@@ -156,23 +178,32 @@ function my_gsa(f, p_steps, num_trajectory, total_num_trajectory, p_range::Abstr
end
end
if desol
- f_shape = x -> [reshape(x[:,i],y_size) for i in 1:size(x,2)]
- means = map(f_shape,means)
- means_star = map(f_shape,means_star)
- variances = map(f_shape,variances)
+ f_shape = x -> [reshape(x[:, i], y_size) for i in 1:size(x, 2)]
+ means = map(f_shape, means)
+ means_star = map(f_shape, means_star)
+ variances = map(f_shape, variances)
end
- MorrisResult(reduce(hcat, means),reduce(hcat, means_star),reduce(hcat, variances),effects)
+ MorrisResult(reduce(hcat, means),
+ reduce(hcat, means_star),
+ reduce(hcat, variances),
+ effects)
end
-function morris(EP::Model, path::AbstractString, setup::Dict, inputs::Dict, outpath::AbstractString, OPTIMIZER; random=true)
+function morris(EP::Model,
+ path::AbstractString,
+ setup::Dict,
+ inputs::Dict,
+ outpath::AbstractString,
+ OPTIMIZER;
+ random = true)
# Reading the input parameters
Morris_range = load_dataframe(joinpath(path, "Method_of_morris_range.csv"))
- groups = Morris_range[!,:Group]
- p_steps = Morris_range[!,:p_steps]
- total_num_trajectory = Morris_range[!,:total_num_trajectory][1]
- num_trajectory = Morris_range[!,:num_trajectory][1]
- len_design_mat = Morris_range[!,:len_design_mat][1]
- uncertain_columns = unique(Morris_range[!,:Parameter])
+ groups = Morris_range[!, :Group]
+ p_steps = Morris_range[!, :p_steps]
+ total_num_trajectory = Morris_range[!, :total_num_trajectory][1]
+ num_trajectory = Morris_range[!, :num_trajectory][1]
+ len_design_mat = Morris_range[!, :len_design_mat][1]
+ uncertain_columns = unique(Morris_range[!, :Parameter])
#save_parameters = zeros(length(Morris_range[!,:Parameter]))
gen = inputs["RESOURCES"]
@@ -181,40 +212,53 @@ function morris(EP::Model, path::AbstractString, setup::Dict, inputs::Dict, outp
for column in uncertain_columns
col_sym = Symbol(lowercase(column))
# column_f is the function to get the value "column" for each generator
- column_f = isdefined(GenX, col_sym) ? getfield(GenX, col_sym) : r -> getproperty(r, col_sym)
- sigma = [sigma; [column_f.(gen) .* (1 .+ Morris_range[Morris_range[!,:Parameter] .== column, :Lower_bound] ./100) column_f.(gen) .* (1 .+ Morris_range[Morris_range[!,:Parameter] .== column, :Upper_bound] ./100)]]
+ column_f = isdefined(GenX, col_sym) ? getfield(GenX, col_sym) :
+ r -> getproperty(r, col_sym)
+ sigma = [sigma;
+ [column_f.(gen) .* (1 .+
+ Morris_range[Morris_range[!, :Parameter] .== column, :Lower_bound] ./ 100) column_f.(gen) .*
+ (1 .+
+ Morris_range[Morris_range[!, :Parameter] .== column,
+ :Upper_bound] ./ 100)]]
end
- sigma = sigma[2:end,:]
+ sigma = sigma[2:end, :]
- p_range = mapslices(x->[x], sigma, dims=2)[:]
+ p_range = mapslices(x -> [x], sigma, dims = 2)[:]
# Creating a function for iteratively solving the model with different sets of input parameters
- f1 = function(sigma)
+ f1 = function (sigma)
#print(sigma)
print("\n")
#save_parameters = hcat(save_parameters, sigma)
for column in uncertain_columns
- index = findall(s -> s == column, Morris_range[!,:Parameter])
+ index = findall(s -> s == column, Morris_range[!, :Parameter])
attr_to_set = Symbol(lowercase(column))
gen[attr_to_set] = sigma[first(index):last(index)]
end
EP = generate_model(setup, inputs, OPTIMIZER)
#EP, solve_time = solve_model(EP, setup)
- redirect_stdout((()->optimize!(EP)),open("/dev/null", "w"))
+ redirect_stdout((() -> optimize!(EP)), open("/dev/null", "w"))
[objective_value(EP)]
end
# Perform the method of morris analysis
- m = my_gsa(f1,p_steps,num_trajectory,total_num_trajectory,p_range,len_design_mat,groups,random)
+ m = my_gsa(f1,
+ p_steps,
+ num_trajectory,
+ total_num_trajectory,
+ p_range,
+ len_design_mat,
+ groups,
+ random)
println(m.means)
println(DataFrame(m.means', :auto))
#save the mean effect of each uncertain variable on the objective fucntion
- Morris_range[!,:mean] = DataFrame(m.means', :auto)[!,:x1]
+ Morris_range[!, :mean] = DataFrame(m.means', :auto)[!, :x1]
println(DataFrame(m.variances', :auto))
#save the variance of effect of each uncertain variable on the objective function
- Morris_range[!,:variance] = DataFrame(m.variances', :auto)[!,:x1]
+ Morris_range[!, :variance] = DataFrame(m.variances', :auto)[!, :x1]
CSV.write(joinpath(outpath, "morris.csv"), Morris_range)
return Morris_range
diff --git a/src/additional_tools/modeling_to_generate_alternatives.jl b/src/additional_tools/modeling_to_generate_alternatives.jl
index 97400b54b1..ba0fec33e3 100644
--- a/src/additional_tools/modeling_to_generate_alternatives.jl
+++ b/src/additional_tools/modeling_to_generate_alternatives.jl
@@ -19,95 +19,101 @@ To create the MGA formulation, we replace the cost-minimizing objective function
where, $\beta_{zr}$ is a random objective fucntion coefficient betwen $[0,100]$ for MGA iteration $k$. $\Theta_{y,t,z,r}$ is a generation of technology $y$ in zone $z$ in time period $t$ that belongs to a resource type $r$. We aggregate $\Theta_{y,t,z,r}$ into a new variable $P_{z,r}$ that represents total generation from technology type $r$ in a zone $z$. In the second constraint above, $\delta$ denote the increase in budget from the least-cost solution and $f$ represents the expression for the total system cost. The constraint $Ax = b$ represents all other constraints in the power system model. We then solve the formulation with minimization and maximization objective function to explore near optimal solution space.
"""
function mga(EP::Model, path::AbstractString, setup::Dict, inputs::Dict)
-
- if setup["ModelingToGenerateAlternatives"]==1
+ if setup["ModelingToGenerateAlternatives"] == 1
# Start MGA Algorithm
- println("MGA Module")
+ println("MGA Module")
- # Objective function value of the least cost problem
- Least_System_Cost = objective_value(EP)
+ # Objective function value of the least cost problem
+ Least_System_Cost = objective_value(EP)
- # Read sets
- gen = inputs["RESOURCES"]
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zonests
- zones = unique(inputs["R_ZONES"])
+ # Read sets
+ gen = inputs["RESOURCES"]
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zonests
+ zones = unique(inputs["R_ZONES"])
- # Create a set of unique technology types
- resources_with_mga = gen[ids_with_mga(gen)]
- TechTypes = unique(resource_type_mga.(resources_with_mga))
+ # Create a set of unique technology types
+ resources_with_mga = gen[ids_with_mga(gen)]
+ TechTypes = unique(resource_type_mga.(resources_with_mga))
- # Read slack parameter representing desired increase in budget from the least cost solution
- slack = setup["ModelingtoGenerateAlternativeSlack"]
+ # Read slack parameter representing desired increase in budget from the least cost solution
+ slack = setup["ModelingtoGenerateAlternativeSlack"]
- ### Variables ###
+ ### Variables ###
- @variable(EP, vSumvP[TechTypes = 1:length(TechTypes), z = 1:Z] >= 0) # Variable denoting total generation from eligible technology of a given type
+ @variable(EP, vSumvP[TechTypes = 1:length(TechTypes), z = 1:Z]>=0) # Variable denoting total generation from eligible technology of a given type
- ### End Variables ###
+ ### End Variables ###
- ### Constraints ###
+ ### Constraints ###
- # Constraint to set budget for MGA iterations
- @constraint(EP, budget, EP[:eObj] <= Least_System_Cost * (1 + slack) )
+ # Constraint to set budget for MGA iterations
+ @constraint(EP, budget, EP[:eObj]<=Least_System_Cost * (1 + slack))
# Constraint to compute total generation in each zone from a given Technology Type
- function resource_in_zone_with_TechType(tt::Int64, z::Int64)
- condition::BitVector = (resource_type_mga.(gen) .== TechTypes[tt]) .& (zone_id.(gen) .== z)
- return resource_id.(gen[condition])
- end
- @constraint(EP,cGeneration[tt = 1:length(TechTypes), z = 1:Z], vSumvP[tt,z] == sum(EP[:vP][y,t] * inputs["omega"][t] for y in resource_in_zone_with_TechType(tt,z), t in 1:T))
-
- ### End Constraints ###
-
- ### Create Results Directory for MGA iterations
+ function resource_in_zone_with_TechType(tt::Int64, z::Int64)
+ condition::BitVector = (resource_type_mga.(gen) .== TechTypes[tt]) .&
+ (zone_id.(gen) .== z)
+ return resource_id.(gen[condition])
+ end
+ @constraint(EP,
+ cGeneration[tt = 1:length(TechTypes), z = 1:Z],
+ vSumvP[tt,
+ z]==sum(EP[:vP][y, t] * inputs["omega"][t]
+ for y in resource_in_zone_with_TechType(tt, z), t in 1:T))
+
+ ### End Constraints ###
+
+ ### Create Results Directory for MGA iterations
outpath_max = joinpath(path, "MGAResults_max")
- if !(isdir(outpath_max))
- mkdir(outpath_max)
- end
+ if !(isdir(outpath_max))
+ mkdir(outpath_max)
+ end
outpath_min = joinpath(path, "MGAResults_min")
- if !(isdir(outpath_min))
- mkdir(outpath_min)
- end
+ if !(isdir(outpath_min))
+ mkdir(outpath_min)
+ end
- ### Begin MGA iterations for maximization and minimization objective ###
- mga_start_time = time()
+ ### Begin MGA iterations for maximization and minimization objective ###
+ mga_start_time = time()
- print("Starting the first MGA iteration")
+ print("Starting the first MGA iteration")
- for i in 1:setup["ModelingToGenerateAlternativeIterations"]
+ for i in 1:setup["ModelingToGenerateAlternativeIterations"]
- # Create random coefficients for the generators that we want to include in the MGA run for the given budget
- pRand = rand(length(TechTypes),length(zones))
+ # Create random coefficients for the generators that we want to include in the MGA run for the given budget
+ pRand = rand(length(TechTypes), length(zones))
- ### Maximization objective
- @objective(EP, Max, sum(pRand[tt,z] * vSumvP[tt,z] for tt in 1:length(TechTypes), z in 1:Z ))
+ ### Maximization objective
+ @objective(EP,
+ Max,
+ sum(pRand[tt, z] * vSumvP[tt, z] for tt in 1:length(TechTypes), z in 1:Z))
- # Solve Model Iteration
- status = optimize!(EP)
+ # Solve Model Iteration
+ status = optimize!(EP)
# Create path for saving MGA iterations
- mgaoutpath_max = joinpath(outpath_max, string("MGA", "_", slack,"_", i))
+ mgaoutpath_max = joinpath(outpath_max, string("MGA", "_", slack, "_", i))
- # Write results
- write_outputs(EP, mgaoutpath_max, setup, inputs)
+ # Write results
+ write_outputs(EP, mgaoutpath_max, setup, inputs)
- ### Minimization objective
- @objective(EP, Min, sum(pRand[tt,z] * vSumvP[tt,z] for tt in 1:length(TechTypes), z in 1:Z ))
+ ### Minimization objective
+ @objective(EP,
+ Min,
+ sum(pRand[tt, z] * vSumvP[tt, z] for tt in 1:length(TechTypes), z in 1:Z))
- # Solve Model Iteration
- status = optimize!(EP)
+ # Solve Model Iteration
+ status = optimize!(EP)
# Create path for saving MGA iterations
- mgaoutpath_min = joinpath(outpath_min, string("MGA", "_", slack,"_", i))
-
- # Write results
- write_outputs(EP, mgaoutpath_min, setup, inputs)
-
- end
+ mgaoutpath_min = joinpath(outpath_min, string("MGA", "_", slack, "_", i))
- total_time = time() - mga_start_time
- ### End MGA Iterations ###
- end
+ # Write results
+ write_outputs(EP, mgaoutpath_min, setup, inputs)
+ end
+ total_time = time() - mga_start_time
+ ### End MGA Iterations ###
+ end
end
diff --git a/src/case_runners/case_runner.jl b/src/case_runners/case_runner.jl
index bbdaae4f53..f5725e7bfc 100644
--- a/src/case_runners/case_runner.jl
+++ b/src/case_runners/case_runner.jl
@@ -28,7 +28,7 @@ run_genx_case!("path/to/case", HiGHS.Optimizer)
run_genx_case!("path/to/case", Gurobi.Optimizer)
```
"""
-function run_genx_case!(case::AbstractString, optimizer::Any=HiGHS.Optimizer)
+function run_genx_case!(case::AbstractString, optimizer::Any = HiGHS.Optimizer)
genx_settings = get_settings_path(case, "genx_settings.yml") # Settings YAML file path
writeoutput_settings = get_settings_path(case, "output_settings.yml") # Write-output settings YAML file path
mysetup = configure_settings(genx_settings, writeoutput_settings) # mysetup dictionary stores settings and GenX-specific parameters
@@ -86,7 +86,10 @@ function run_genx_case_simple!(case::AbstractString, mysetup::Dict, optimizer::A
if has_values(EP)
println("Writing Output")
outputs_path = get_default_output_folder(case)
- elapsed_time = @elapsed outputs_path = write_outputs(EP, outputs_path, mysetup, myinputs)
+ elapsed_time = @elapsed outputs_path = write_outputs(EP,
+ outputs_path,
+ mysetup,
+ myinputs)
println("Time elapsed for writing is")
println(elapsed_time)
if mysetup["ModelingToGenerateAlternatives"] == 1
@@ -101,7 +104,6 @@ function run_genx_case_simple!(case::AbstractString, mysetup::Dict, optimizer::A
end
end
-
function run_genx_case_multistage!(case::AbstractString, mysetup::Dict, optimizer::Any)
settings_path = get_settings_path(case)
multistage_settings = get_settings_path(case, "multi_stage_settings.yml") # Multi stage settings YAML file path
@@ -111,13 +113,14 @@ function run_genx_case_multistage!(case::AbstractString, mysetup::Dict, optimize
if mysetup["TimeDomainReduction"] == 1
tdr_settings = get_settings_path(case, "time_domain_reduction_settings.yml") # Multi stage settings YAML file path
TDRSettingsDict = YAML.load(open(tdr_settings))
-
+
first_stage_path = joinpath(case, "inputs", "inputs_p1")
TDRpath = joinpath(first_stage_path, mysetup["TimeDomainReductionFolder"])
system_path = joinpath(first_stage_path, mysetup["SystemFolder"])
prevent_doubled_timedomainreduction(system_path)
if !time_domain_reduced_files_exist(TDRpath)
- if (mysetup["MultiStage"] == 1) && (TDRSettingsDict["MultiStageConcatenate"] == 0)
+ if (mysetup["MultiStage"] == 1) &&
+ (TDRSettingsDict["MultiStageConcatenate"] == 0)
println("Clustering Time Series Data (Individually)...")
for stage_id in 1:mysetup["MultiStageSettingsDict"]["NumStages"]
cluster_inputs(case, settings_path, mysetup, stage_id)
@@ -135,8 +138,8 @@ function run_genx_case_multistage!(case::AbstractString, mysetup::Dict, optimize
println("Configuring Solver")
OPTIMIZER = configure_solver(settings_path, optimizer)
- model_dict=Dict()
- inputs_dict=Dict()
+ model_dict = Dict()
+ inputs_dict = Dict()
for t in 1:mysetup["MultiStageSettingsDict"]["NumStages"]
@@ -144,17 +147,18 @@ function run_genx_case_multistage!(case::AbstractString, mysetup::Dict, optimize
mysetup["MultiStageSettingsDict"]["CurStage"] = t
# Step 1) Load Inputs
- inpath_sub = joinpath(case, "inputs", string("inputs_p",t))
+ inpath_sub = joinpath(case, "inputs", string("inputs_p", t))
inputs_dict[t] = load_inputs(mysetup, inpath_sub)
- inputs_dict[t] = configure_multi_stage_inputs(inputs_dict[t],mysetup["MultiStageSettingsDict"],mysetup["NetworkExpansion"])
+ inputs_dict[t] = configure_multi_stage_inputs(inputs_dict[t],
+ mysetup["MultiStageSettingsDict"],
+ mysetup["NetworkExpansion"])
- compute_cumulative_min_retirements!(inputs_dict,t)
+ compute_cumulative_min_retirements!(inputs_dict, t)
# Step 2) Generate model
model_dict[t] = generate_model(mysetup, inputs_dict[t], OPTIMIZER)
end
-
### Solve model
println("Solving Model")
@@ -187,4 +191,3 @@ function run_genx_case_multistage!(case::AbstractString, mysetup::Dict, optimize
write_multi_stage_outputs(mystats_d, outpath, mysetup, inputs_dict)
end
-
diff --git a/src/configure_settings/configure_settings.jl b/src/configure_settings/configure_settings.jl
index 8706c20b96..d37107f256 100644
--- a/src/configure_settings/configure_settings.jl
+++ b/src/configure_settings/configure_settings.jl
@@ -1,6 +1,5 @@
function default_settings()
- Dict{Any,Any}(
- "PrintModel" => 0,
+ Dict{Any, Any}("PrintModel" => 0,
"OverwriteResults" => 0,
"NetworkExpansion" => 0,
"Trans_Loss_Segments" => 1,
@@ -32,8 +31,7 @@ function default_settings()
"ResourcePoliciesFolder" => "policy_assignments",
"SystemFolder" => "system",
"PoliciesFolder" => "policies",
- "ObjScale" => 1,
- )
+ "ObjScale" => 1)
end
@doc raw"""
@@ -64,7 +62,7 @@ function configure_settings(settings_path::String, output_settings_path::String)
return settings
end
-function validate_settings!(settings::Dict{Any,Any})
+function validate_settings!(settings::Dict{Any, Any})
# Check for any settings combinations that are not allowed.
# If we find any then make a response and issue a note to the user.
@@ -81,20 +79,18 @@ function validate_settings!(settings::Dict{Any,Any})
if haskey(settings, "Reserves")
Base.depwarn("""The Reserves setting has been deprecated. Please use the
- OperationalReserves setting instead.""", :validate_settings!, force=true)
+ OperationalReserves setting instead.""", :validate_settings!, force = true)
settings["OperationalReserves"] = settings["Reserves"]
delete!(settings, "Reserves")
end
- if settings["EnableJuMPStringNames"]==0 && settings["ComputeConflicts"]==1
- settings["EnableJuMPStringNames"]=1;
+ if settings["EnableJuMPStringNames"] == 0 && settings["ComputeConflicts"] == 1
+ settings["EnableJuMPStringNames"] = 1
end
-
end
function default_writeoutput()
- Dict{String,Bool}(
- "WriteCosts" => true,
+ Dict{String, Bool}("WriteCosts" => true,
"WriteCapacity" => true,
"WriteCapacityValue" => true,
"WriteCapacityFactor" => true,
@@ -140,12 +136,10 @@ function default_writeoutput()
"WriteTransmissionLosses" => true,
"WriteVirtualDischarge" => true,
"WriteVREStor" => true,
- "WriteAngles" => true
- )
+ "WriteAngles" => true)
end
function configure_writeoutput(output_settings_path::String, settings::Dict)
-
writeoutput = default_writeoutput()
# don't write files with hourly data if settings["WriteOutputs"] == "annual"
@@ -169,4 +163,4 @@ function configure_writeoutput(output_settings_path::String, settings::Dict)
merge!(writeoutput, model_writeoutput)
end
return writeoutput
-end
\ No newline at end of file
+end
diff --git a/src/configure_solver/configure_cbc.jl b/src/configure_solver/configure_cbc.jl
index afa3f41727..0379fbd43c 100644
--- a/src/configure_solver/configure_cbc.jl
+++ b/src/configure_solver/configure_cbc.jl
@@ -17,26 +17,23 @@ The Cbc optimizer instance is configured with the following default parameters i
"""
function configure_cbc(solver_settings_path::String, optimizer::Any)
-
- solver_settings = YAML.load(open(solver_settings_path))
- solver_settings = convert(Dict{String, Any}, solver_settings)
+ solver_settings = YAML.load(open(solver_settings_path))
+ solver_settings = convert(Dict{String, Any}, solver_settings)
default_settings = Dict("TimeLimit" => 1e-6,
- "logLevel" => 1e-6,
- "maxSolutions" => -1,
- "maxNodes" => -1,
- "allowableGap" => -1,
- "ratioGap" => Inf,
- "threads" => 1,
- )
+ "logLevel" => 1e-6,
+ "maxSolutions" => -1,
+ "maxNodes" => -1,
+ "allowableGap" => -1,
+ "ratioGap" => Inf,
+ "threads" => 1)
attributes = merge(default_settings, solver_settings)
- key_replacement = Dict("TimeLimit" => "seconds",
- )
+ key_replacement = Dict("TimeLimit" => "seconds")
attributes = rename_keys(attributes, key_replacement)
attributes::Dict{String, Any}
- return optimizer_with_attributes(optimizer, attributes...)
+ return optimizer_with_attributes(optimizer, attributes...)
end
diff --git a/src/configure_solver/configure_clp.jl b/src/configure_solver/configure_clp.jl
index 2da048e989..cd5af6e42d 100644
--- a/src/configure_solver/configure_clp.jl
+++ b/src/configure_solver/configure_clp.jl
@@ -21,12 +21,10 @@ The Clp optimizer instance is configured with the following default parameters i
"""
function configure_clp(solver_settings_path::String, optimizer::Any)
+ solver_settings = YAML.load(open(solver_settings_path))
+ solver_settings = convert(Dict{String, Any}, solver_settings)
- solver_settings = YAML.load(open(solver_settings_path))
- solver_settings = convert(Dict{String, Any}, solver_settings)
-
- default_settings = Dict{String,Any}(
- "Feasib_Tol" => 1e-7,
+ default_settings = Dict{String, Any}("Feasib_Tol" => 1e-7,
"DualObjectiveLimit" => 1e308,
"MaximumIterations" => 2147483647,
"TimeLimit" => -1.0,
@@ -35,16 +33,14 @@ function configure_clp(solver_settings_path::String, optimizer::Any)
"Method" => 5,
"InfeasibleReturn" => 0,
"Scaling" => 3,
- "Perturbation" => 100,
- )
+ "Perturbation" => 100)
attributes = merge(default_settings, solver_settings)
key_replacement = Dict("Feasib_Tol" => "PrimalTolerance",
- "TimeLimit" => "MaximumSeconds",
- "Pre_Solve" => "PresolveType",
- "Method" => "SolveType",
- )
+ "TimeLimit" => "MaximumSeconds",
+ "Pre_Solve" => "PresolveType",
+ "Method" => "SolveType")
attributes = rename_keys(attributes, key_replacement)
@@ -53,5 +49,5 @@ function configure_clp(solver_settings_path::String, optimizer::Any)
attributes["DualTolerance"] = attributes["PrimalTolerance"]
attributes::Dict{String, Any}
- return optimizer_with_attributes(optimizer, attributes...)
+ return optimizer_with_attributes(optimizer, attributes...)
end
diff --git a/src/configure_solver/configure_cplex.jl b/src/configure_solver/configure_cplex.jl
index fe860fb67d..b320857217 100644
--- a/src/configure_solver/configure_cplex.jl
+++ b/src/configure_solver/configure_cplex.jl
@@ -78,40 +78,35 @@ The optimizer instance is configured with the following default parameters if a
Any other attributes in the settings file (which typically start with `CPX_PARAM_`) will also be passed to the solver.
"""
function configure_cplex(solver_settings_path::String, optimizer::Any)
-
solver_settings = YAML.load(open(solver_settings_path))
solver_settings = convert(Dict{String, Any}, solver_settings)
default_settings = Dict("Feasib_Tol" => 1e-6,
- "Optimal_Tol" => 1e-4,
- "AggFill" => 10,
- "PreDual" => 0,
- "TimeLimit" => 1e+75,
- "MIPGap" => 1e-3,
- "Method" => 0,
- "BarConvTol" => 1e-8,
- "NumericFocus" => 0,
- "BarObjRng" => 1e+75,
- "SolutionType" => 2,
- )
-
+ "Optimal_Tol" => 1e-4,
+ "AggFill" => 10,
+ "PreDual" => 0,
+ "TimeLimit" => 1e+75,
+ "MIPGap" => 1e-3,
+ "Method" => 0,
+ "BarConvTol" => 1e-8,
+ "NumericFocus" => 0,
+ "BarObjRng" => 1e+75,
+ "SolutionType" => 2)
attributes = merge(default_settings, solver_settings)
- key_replacement = Dict(
- "Feasib_Tol" => "CPX_PARAM_EPRHS",
- "Optimal_Tol" => "CPX_PARAM_EPOPT",
- "AggFill" => "CPX_PARAM_AGGFILL",
- "PreDual" => "CPX_PARAM_PREDUAL",
- "TimeLimit" => "CPX_PARAM_TILIM",
- "MIPGap" => "CPX_PARAM_EPGAP",
- "Method" => "CPX_PARAM_LPMETHOD",
- "Pre_Solve" => "CPX_PARAM_PREIND", # https://www.ibm.com/docs/en/icos/12.8.0.0?topic=parameters-presolve-switch
- "BarConvTol" => "CPX_PARAM_BAREPCOMP",
- "NumericFocus" => "CPX_PARAM_NUMERICALEMPHASIS",
- "BarObjRng" => "CPX_PARAM_BAROBJRNG",
- "SolutionType" => "CPX_PARAM_SOLUTIONTYPE",
- )
+ key_replacement = Dict("Feasib_Tol" => "CPX_PARAM_EPRHS",
+ "Optimal_Tol" => "CPX_PARAM_EPOPT",
+ "AggFill" => "CPX_PARAM_AGGFILL",
+ "PreDual" => "CPX_PARAM_PREDUAL",
+ "TimeLimit" => "CPX_PARAM_TILIM",
+ "MIPGap" => "CPX_PARAM_EPGAP",
+ "Method" => "CPX_PARAM_LPMETHOD",
+ "Pre_Solve" => "CPX_PARAM_PREIND", # https://www.ibm.com/docs/en/icos/12.8.0.0?topic=parameters-presolve-switch
+ "BarConvTol" => "CPX_PARAM_BAREPCOMP",
+ "NumericFocus" => "CPX_PARAM_NUMERICALEMPHASIS",
+ "BarObjRng" => "CPX_PARAM_BAROBJRNG",
+ "SolutionType" => "CPX_PARAM_SOLUTIONTYPE")
attributes = rename_keys(attributes, key_replacement)
attributes::Dict{String, Any}
diff --git a/src/configure_solver/configure_gurobi.jl b/src/configure_solver/configure_gurobi.jl
index 2e5c8b7d39..00f132f34f 100644
--- a/src/configure_solver/configure_gurobi.jl
+++ b/src/configure_solver/configure_gurobi.jl
@@ -21,33 +21,30 @@ The Gurobi optimizer instance is configured with the following default parameter
"""
function configure_gurobi(solver_settings_path::String, optimizer::Any)
-
- solver_settings = YAML.load(open(solver_settings_path))
- solver_settings = convert(Dict{String, Any}, solver_settings)
+ solver_settings = YAML.load(open(solver_settings_path))
+ solver_settings = convert(Dict{String, Any}, solver_settings)
default_settings = Dict("Feasib_Tol" => 1e-6,
- "Optimal_Tol" => 1e-4,
- "Pre_Solve" => -1,
- "AggFill" => -1,
- "PreDual" => -1,
- "TimeLimit" => Inf,
- "MIPGap" => 1e-3,
- "Crossover" => -1,
- "Method" => -1,
- "BarConvTol" => 1e-8,
- "NumericFocus" => 0,
- "OutputFlag" => 1
- )
+ "Optimal_Tol" => 1e-4,
+ "Pre_Solve" => -1,
+ "AggFill" => -1,
+ "PreDual" => -1,
+ "TimeLimit" => Inf,
+ "MIPGap" => 1e-3,
+ "Crossover" => -1,
+ "Method" => -1,
+ "BarConvTol" => 1e-8,
+ "NumericFocus" => 0,
+ "OutputFlag" => 1)
attributes = merge(default_settings, solver_settings)
key_replacement = Dict("Feasib_Tol" => "FeasibilityTol",
- "Optimal_Tol" => "OptimalityTol",
- "Pre_Solve" => "Presolve",
- )
+ "Optimal_Tol" => "OptimalityTol",
+ "Pre_Solve" => "Presolve")
attributes = rename_keys(attributes, key_replacement)
attributes::Dict{String, Any}
- return optimizer_with_attributes(optimizer, attributes...)
+ return optimizer_with_attributes(optimizer, attributes...)
end
diff --git a/src/configure_solver/configure_highs.jl b/src/configure_solver/configure_highs.jl
index d0bab8e835..549395d8dc 100644
--- a/src/configure_solver/configure_highs.jl
+++ b/src/configure_solver/configure_highs.jl
@@ -33,30 +33,26 @@ The HiGHS optimizer instance is configured with the following default parameters
mip_abs_gap: 1e-06
"""
function configure_highs(solver_settings_path::String, optimizer::Any)
+ solver_settings = YAML.load(open(solver_settings_path))
+ solver_settings = convert(Dict{String, Any}, solver_settings)
- solver_settings = YAML.load(open(solver_settings_path))
- solver_settings = convert(Dict{String, Any}, solver_settings)
-
- default_settings = Dict{String,Any}(
- "Feasib_Tol" => 1e-6,
+ default_settings = Dict{String, Any}("Feasib_Tol" => 1e-6,
"Optimal_Tol" => 1e-4,
"Pre_Solve" => "choose",
"TimeLimit" => Inf,
"Method" => "ipm",
"ipm_optimality_tolerance" => 1e-08,
- "run_crossover" => "off",
- "mip_rel_gap" => 0.001,
- "mip_abs_gap" => 1e-06,
- )
+ "run_crossover" => "off",
+ "mip_rel_gap" => 0.001,
+ "mip_abs_gap" => 1e-06)
attributes = merge(default_settings, solver_settings)
key_replacement = Dict("Feasib_Tol" => "primal_feasibility_tolerance",
- "Optimal_Tol" => "dual_feasibility_tolerance",
- "TimeLimit" => "time_limit",
- "Pre_Solve" => "presolve",
- "Method" => "solver",
- )
+ "Optimal_Tol" => "dual_feasibility_tolerance",
+ "TimeLimit" => "time_limit",
+ "Pre_Solve" => "presolve",
+ "Method" => "solver")
attributes = rename_keys(attributes, key_replacement)
diff --git a/src/configure_solver/configure_scip.jl b/src/configure_solver/configure_scip.jl
index 3609657d66..591d36eeb7 100644
--- a/src/configure_solver/configure_scip.jl
+++ b/src/configure_solver/configure_scip.jl
@@ -12,21 +12,18 @@ The SCIP optimizer instance is configured with the following default parameters
"""
function configure_scip(solver_settings_path::String, optimizer::Any)
-
- solver_settings = YAML.load(open(solver_settings_path))
- solver_settings = convert(Dict{String, Any}, solver_settings)
+ solver_settings = YAML.load(open(solver_settings_path))
+ solver_settings = convert(Dict{String, Any}, solver_settings)
default_settings = Dict("Dispverblevel" => 0,
- "limitsgap" => 0.05,
- )
+ "limitsgap" => 0.05)
attributes = merge(default_settings, solver_settings)
key_replacement = Dict("Dispverblevel" => "display_verblevel",
- "limitsgap" => "limits_gap",
- )
+ "limitsgap" => "limits_gap")
attributes = rename_keys(attributes, key_replacement)
attributes::Dict{String, Any}
- return optimizer_with_attributes(optimizer, attributes...)
+ return optimizer_with_attributes(optimizer, attributes...)
end
diff --git a/src/configure_solver/configure_solver.jl b/src/configure_solver/configure_solver.jl
index 96a8bb2e02..76cf01cca7 100644
--- a/src/configure_solver/configure_solver.jl
+++ b/src/configure_solver/configure_solver.jl
@@ -6,7 +6,6 @@ function infer_solver(optimizer::Any)
return lowercase(string(parentmodule(optimizer)))
end
-
@doc raw"""
configure_solver(solver_settings_path::String, optimizer::Any)
@@ -24,15 +23,13 @@ function configure_solver(solver_settings_path::String, optimizer::Any)
solver_name = infer_solver(optimizer)
path = joinpath(solver_settings_path, solver_name * "_settings.yml")
- configure_functions = Dict(
- "highs" => configure_highs,
+ configure_functions = Dict("highs" => configure_highs,
"gurobi" => configure_gurobi,
"cplex" => configure_cplex,
"clp" => configure_clp,
"cbc" => configure_cbc,
- "scip" => configure_scip,
- )
-
+ "scip" => configure_scip)
+
return configure_functions[solver_name](path, optimizer)
end
@@ -50,7 +47,8 @@ function rename_keys(attributes::Dict, new_key_names::Dict)
else
new_key = new_key_names[old_key]
if haskey(attributes, new_key)
- @error "Colliding keys: '$old_key' needs to be renamed to '$new_key' but '$new_key' already exists in", attributes
+ @error "Colliding keys: '$old_key' needs to be renamed to '$new_key' but '$new_key' already exists in",
+ attributes
end
end
updated_attributes[new_key] = value
diff --git a/src/load_inputs/load_cap_reserve_margin.jl b/src/load_inputs/load_cap_reserve_margin.jl
index 646385d078..0a652bc78f 100644
--- a/src/load_inputs/load_cap_reserve_margin.jl
+++ b/src/load_inputs/load_cap_reserve_margin.jl
@@ -5,12 +5,12 @@ Read input parameters related to planning reserve margin constraints
"""
function load_cap_reserve_margin!(setup::Dict, path::AbstractString, inputs::Dict)
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
-
+
filename = "Capacity_reserve_margin_slack.csv"
if isfile(joinpath(path, filename))
df = load_dataframe(joinpath(path, filename))
inputs["dfCapRes_slack"] = df
- inputs["dfCapRes_slack"][!,:PriceCap] ./= scale_factor # Million $/GW if scaled, $/MW if not scaled
+ inputs["dfCapRes_slack"][!, :PriceCap] ./= scale_factor # Million $/GW if scaled, $/MW if not scaled
end
filename = "Capacity_reserve_margin.csv"
diff --git a/src/load_inputs/load_co2_cap.jl b/src/load_inputs/load_co2_cap.jl
index 0c93f3c199..08e6802a0a 100644
--- a/src/load_inputs/load_co2_cap.jl
+++ b/src/load_inputs/load_co2_cap.jl
@@ -5,14 +5,14 @@ Read input parameters related to CO$_2$ emissions cap constraints
"""
function load_co2_cap!(setup::Dict, path::AbstractString, inputs::Dict)
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
-
+
filename = "CO2_cap_slack.csv"
if isfile(joinpath(path, filename))
df = load_dataframe(joinpath(path, filename))
inputs["dfCO2Cap_slack"] = df
- inputs["dfCO2Cap_slack"][!,:PriceCap] ./= scale_factor # Million $/kton if scaled, $/ton if not scaled
- end
-
+ inputs["dfCO2Cap_slack"][!, :PriceCap] ./= scale_factor # Million $/kton if scaled, $/ton if not scaled
+ end
+
filename = "CO2_cap.csv"
df = load_dataframe(joinpath(path, filename))
@@ -21,7 +21,6 @@ function load_co2_cap!(setup::Dict, path::AbstractString, inputs::Dict)
inputs["dfCO2CapZones"] = mat
inputs["NCO2Cap"] = size(mat, 2)
-
# Emission limits
if setup["CO2Cap"] == 1
# CO2 emissions cap in mass
diff --git a/src/load_inputs/load_dataframe.jl b/src/load_inputs/load_dataframe.jl
index b6ca5ef552..64aa112968 100644
--- a/src/load_inputs/load_dataframe.jl
+++ b/src/load_inputs/load_dataframe.jl
@@ -64,7 +64,9 @@ function load_dataframe(dir::AbstractString, basenames::Vector{String})::DataFra
target = look_for_file_with_alternate_case(dir, base)
# admonish
if target != FILENOTFOUND
- Base.depwarn("""The filename '$target' is deprecated. '$best_basename' is preferred.""", :load_dataframe, force=true)
+ Base.depwarn("""The filename '$target' is deprecated. '$best_basename' is preferred.""",
+ :load_dataframe,
+ force = true)
return load_dataframe_from_file(joinpath(dir, target))
end
end
@@ -107,7 +109,7 @@ end
function keep_duplicated_entries!(s, uniques)
for u in uniques
- deleteat!(s, first(findall(x->x==u, s)))
+ deleteat!(s, first(findall(x -> x == u, s)))
end
return s
end
@@ -126,23 +128,23 @@ end
function load_dataframe_from_file(path)::DataFrame
check_for_duplicate_keys(path)
- CSV.read(path, DataFrame, header=1)
+ CSV.read(path, DataFrame, header = 1)
end
function find_matrix_columns_in_dataframe(df::DataFrame,
- columnprefix::AbstractString;
- prefixseparator='_')::Vector{Int}
+ columnprefix::AbstractString;
+ prefixseparator = '_')::Vector{Int}
all_columns = names(df)
# 2 is the length of the '_' connector plus one for indexing
- get_integer_part(c) = tryparse(Int, c[length(columnprefix)+2:end])
+ get_integer_part(c) = tryparse(Int, c[(length(columnprefix) + 2):end])
# if prefix is "ESR", the column name should be like "ESR_1"
function is_of_this_column_type(c)
startswith(c, columnprefix) &&
- length(c) >= length(columnprefix) + 2 &&
- c[length(columnprefix) + 1] == prefixseparator &&
- !isnothing(get_integer_part(c))
+ length(c) >= length(columnprefix) + 2 &&
+ c[length(columnprefix) + 1] == prefixseparator &&
+ !isnothing(get_integer_part(c))
end
columns = filter(is_of_this_column_type, all_columns)
@@ -164,11 +166,13 @@ ESR_1, other_thing, ESR_3, ESR_2,
0.4, 2, 0.6, 0.5,
```
"""
-function extract_matrix_from_dataframe(df::DataFrame, columnprefix::AbstractString; prefixseparator='_')
+function extract_matrix_from_dataframe(df::DataFrame,
+ columnprefix::AbstractString;
+ prefixseparator = '_')
all_columns = names(df)
columnnumbers = find_matrix_columns_in_dataframe(df,
- columnprefix,
- prefixseparator=prefixseparator)
+ columnprefix,
+ prefixseparator = prefixseparator)
if length(columnnumbers) == 0
msg = """an input dataframe with columns $all_columns was searched for
@@ -188,10 +192,13 @@ function extract_matrix_from_dataframe(df::DataFrame, columnprefix::AbstractStri
Matrix(dropmissing(df[:, sorted_columns]))
end
-function extract_matrix_from_resources(rs::Vector{T}, columnprefix::AbstractString, default=0.0) where T<:AbstractResource
+function extract_matrix_from_resources(rs::Vector{T},
+ columnprefix::AbstractString,
+ default = 0.0) where {T <: AbstractResource}
# attributes starting with columnprefix with a numeric suffix
- attributes_n = [attr for attr in string.(attributes(rs[1])) if startswith(attr, columnprefix)]
+ attributes_n = [attr
+ for attr in string.(attributes(rs[1])) if startswith(attr, columnprefix)]
# sort the attributes by the numeric suffix
sort!(attributes_n, by = x -> parse(Int, split(x, "_")[end]))
@@ -216,7 +223,7 @@ Check that the dataframe has all the required columns.
- `df_name::AbstractString`: the name of the dataframe, for error messages
- `required_cols::Vector{AbstractString}`: the names of the required columns
"""
-function validate_df_cols(df::DataFrame, df_name::AbstractString, required_cols)
+function validate_df_cols(df::DataFrame, df_name::AbstractString, required_cols)
for col in required_cols
if col ∉ names(df)
error("$df_name data file is missing column $col")
diff --git a/src/load_inputs/load_demand_data.jl b/src/load_inputs/load_demand_data.jl
index 509d0216bb..4c0e8a0319 100644
--- a/src/load_inputs/load_demand_data.jl
+++ b/src/load_inputs/load_demand_data.jl
@@ -3,14 +3,16 @@ function get_demand_dataframe(path)
deprecated_synonym = "Load_data.csv"
df = load_dataframe(path, [filename, deprecated_synonym])
# update column names
- old_columns = find_matrix_columns_in_dataframe(df, DEMAND_COLUMN_PREFIX_DEPRECATED()[1:end-1],
- prefixseparator='z')
- old_column_symbols = Symbol.(DEMAND_COLUMN_PREFIX_DEPRECATED()*string(i) for i in old_columns)
+ old_columns = find_matrix_columns_in_dataframe(df,
+ DEMAND_COLUMN_PREFIX_DEPRECATED()[1:(end - 1)],
+ prefixseparator = 'z')
+ old_column_symbols = Symbol.(DEMAND_COLUMN_PREFIX_DEPRECATED() * string(i)
+ for i in old_columns)
if length(old_column_symbols) > 0
pref_prefix = DEMAND_COLUMN_PREFIX()
dep_prefix = DEMAND_COLUMN_PREFIX_DEPRECATED()
@info "$dep_prefix is deprecated. Use $pref_prefix."
- new_column_symbols = Symbol.(DEMAND_COLUMN_PREFIX()*string(i) for i in old_columns)
+ new_column_symbols = Symbol.(DEMAND_COLUMN_PREFIX() * string(i) for i in old_columns)
rename!(df, Dict(old_column_symbols .=> new_column_symbols))
end
return df
@@ -26,7 +28,7 @@ Read input parameters related to electricity demand (load)
"""
function load_demand_data!(setup::Dict, path::AbstractString, inputs::Dict)
- # Load related inputs
+ # Load related inputs
TDR_directory = joinpath(path, setup["TimeDomainReductionFolder"])
# if TDR is used, my_dir = TDR_directory, else my_dir = "system"
my_dir = get_systemfiles_path(setup, TDR_directory, path)
@@ -35,17 +37,17 @@ function load_demand_data!(setup::Dict, path::AbstractString, inputs::Dict)
as_vector(col::Symbol) = collect(skipmissing(demand_in[!, col]))
- # Number of time steps (periods)
+ # Number of time steps (periods)
T = length(as_vector(:Time_Index))
- # Number of demand curtailment/lost load segments
+ # Number of demand curtailment/lost load segments
SEG = length(as_vector(:Demand_Segment))
- ## Set indices for internal use
+ ## Set indices for internal use
inputs["T"] = T
inputs["SEG"] = SEG
- Z = inputs["Z"] # Number of zones
+ Z = inputs["Z"] # Number of zones
- inputs["omega"] = zeros(Float64, T) # weights associated with operational sub-period in the model - sum of weight = 8760
+ inputs["omega"] = zeros(Float64, T) # weights associated with operational sub-period in the model - sum of weight = 8760
# Weights for each period - assumed same weights for each sub-period within a period
inputs["Weights"] = as_vector(:Sub_Weights) # Weights each period
@@ -56,30 +58,31 @@ function load_demand_data!(setup::Dict, path::AbstractString, inputs::Dict)
# Creating sub-period weights from weekly weights
for w in 1:inputs["REP_PERIOD"]
for h in 1:inputs["H"]
- t = inputs["H"]*(w-1)+h
- inputs["omega"][t] = inputs["Weights"][w]/inputs["H"]
+ t = inputs["H"] * (w - 1) + h
+ inputs["omega"][t] = inputs["Weights"][w] / inputs["H"]
end
end
- # Create time set steps indicies
- inputs["hours_per_subperiod"] = div.(T,inputs["REP_PERIOD"]) # total number of hours per subperiod
- hours_per_subperiod = inputs["hours_per_subperiod"] # set value for internal use
+ # Create time set steps indicies
+ inputs["hours_per_subperiod"] = div.(T, inputs["REP_PERIOD"]) # total number of hours per subperiod
+ hours_per_subperiod = inputs["hours_per_subperiod"] # set value for internal use
- inputs["START_SUBPERIODS"] = 1:hours_per_subperiod:T # set of indexes for all time periods that start a subperiod (e.g. sample day/week)
- inputs["INTERIOR_SUBPERIODS"] = setdiff(1:T, inputs["START_SUBPERIODS"]) # set of indexes for all time periods that do not start a subperiod
+ inputs["START_SUBPERIODS"] = 1:hours_per_subperiod:T # set of indexes for all time periods that start a subperiod (e.g. sample day/week)
+ inputs["INTERIOR_SUBPERIODS"] = setdiff(1:T, inputs["START_SUBPERIODS"]) # set of indexes for all time periods that do not start a subperiod
- # Demand in MW for each zone
+ # Demand in MW for each zone
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
# Max value of non-served energy
inputs["Voll"] = as_vector(:Voll) / scale_factor # convert from $/MWh $ million/GWh (assuming objective is divided by 1000)
# Demand in MW
inputs["pD"] = extract_matrix_from_dataframe(demand_in,
- DEMAND_COLUMN_PREFIX()[1:end-1],
- prefixseparator='z') / scale_factor
+ DEMAND_COLUMN_PREFIX()[1:(end - 1)],
+ prefixseparator = 'z') / scale_factor
- # Cost of non-served energy/demand curtailment
+ # Cost of non-served energy/demand curtailment
# Cost of each segment reported as a fraction of value of non-served energy - scaled implicitly
- inputs["pC_D_Curtail"] = as_vector(:Cost_of_Demand_Curtailment_per_MW) * inputs["Voll"][1]
+ inputs["pC_D_Curtail"] = as_vector(:Cost_of_Demand_Curtailment_per_MW) *
+ inputs["Voll"][1]
# Maximum hourly demand curtailable as % of the max demand (for each segment)
inputs["pMax_D_Curtail"] = as_vector(:Max_Demand_Curtailment)
@@ -106,13 +109,13 @@ function validatetimebasis(inputs::Dict)
expected_length_2 = H * number_of_representative_periods
check_equal = [T,
- demand_length,
- generators_variability_length,
- fuel_costs_length,
- expected_length_1,
- expected_length_2]
+ demand_length,
+ generators_variability_length,
+ fuel_costs_length,
+ expected_length_1,
+ expected_length_2]
- allequal(x) = all(y->y==x[1], x)
+ allequal(x) = all(y -> y == x[1], x)
ok = allequal(check_equal)
if ~ok
@@ -160,7 +163,6 @@ This function prevents TimeDomainReduction from running on a case which
already has more than one Representative Period or has more than one Sub_Weight specified.
"""
function prevent_doubled_timedomainreduction(path::AbstractString)
-
demand_in = get_demand_dataframe(path)
as_vector(col::Symbol) = collect(skipmissing(demand_in[!, col]))
representative_periods = convert(Int16, as_vector(:Rep_Periods)[1])
@@ -174,5 +176,4 @@ function prevent_doubled_timedomainreduction(path::AbstractString)
and the number of subperiod weight entries (:Sub_Weights) is ($num_sub_weights).
Each of these must be 1: only a single period can have TimeDomainReduction applied.""")
end
-
end
diff --git a/src/load_inputs/load_energy_share_requirement.jl b/src/load_inputs/load_energy_share_requirement.jl
index af6ef9b786..02b96fe7e7 100644
--- a/src/load_inputs/load_energy_share_requirement.jl
+++ b/src/load_inputs/load_energy_share_requirement.jl
@@ -11,9 +11,9 @@ function load_energy_share_requirement!(setup::Dict, path::AbstractString, input
if isfile(joinpath(path, filename))
df = load_dataframe(joinpath(path, filename))
inputs["dfESR_slack"] = df
- inputs["dfESR_slack"][!,:PriceCap] ./= scale_factor # million $/GWh if scaled, $/MWh if not scaled
- end
-
+ inputs["dfESR_slack"][!, :PriceCap] ./= scale_factor # million $/GWh if scaled, $/MWh if not scaled
+ end
+
filename = "Energy_share_requirement.csv"
df = load_dataframe(joinpath(path, filename))
mat = extract_matrix_from_dataframe(df, "ESR")
diff --git a/src/load_inputs/load_fuels_data.jl b/src/load_inputs/load_fuels_data.jl
index aa64ff43fa..61b0ff2f0f 100644
--- a/src/load_inputs/load_fuels_data.jl
+++ b/src/load_inputs/load_fuels_data.jl
@@ -9,7 +9,7 @@ function load_fuels_data!(setup::Dict, path::AbstractString, inputs::Dict)
TDR_directory = joinpath(path, setup["TimeDomainReductionFolder"])
# if TDR is used, my_dir = TDR_directory, else my_dir = "system"
my_dir = get_systemfiles_path(setup, TDR_directory, path)
-
+
filename = "Fuels_data.csv"
fuels_in = load_dataframe(joinpath(my_dir, filename))
@@ -26,11 +26,11 @@ function load_fuels_data!(setup::Dict, path::AbstractString, inputs::Dict)
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- for i = 1:length(fuels)
- # fuel cost is in $/MMBTU w/o scaling, $/Billon BTU w/ scaling
- fuel_costs[fuels[i]] = costs[:,i] / scale_factor
- # No need to scale fuel_CO2, fuel_CO2 is ton/MMBTU or kton/Billion BTU
- fuel_CO2[fuels[i]] = CO2_content[i]
+ for i in 1:length(fuels)
+ # fuel cost is in $/MMBTU w/o scaling, $/Billon BTU w/ scaling
+ fuel_costs[fuels[i]] = costs[:, i] / scale_factor
+ # No need to scale fuel_CO2, fuel_CO2 is ton/MMBTU or kton/Billion BTU
+ fuel_CO2[fuels[i]] = CO2_content[i]
end
inputs["fuels"] = fuels
diff --git a/src/load_inputs/load_generators_variability.jl b/src/load_inputs/load_generators_variability.jl
index 1ca02162ec..99294bffed 100644
--- a/src/load_inputs/load_generators_variability.jl
+++ b/src/load_inputs/load_generators_variability.jl
@@ -5,11 +5,11 @@ Read input parameters related to hourly maximum capacity factors for generators,
"""
function load_generators_variability!(setup::Dict, path::AbstractString, inputs::Dict)
- # Hourly capacity factors
+ # Hourly capacity factors
TDR_directory = joinpath(path, setup["TimeDomainReductionFolder"])
# if TDR is used, my_dir = TDR_directory, else my_dir = "system"
my_dir = get_systemfiles_path(setup, TDR_directory, path)
-
+
filename = "Generators_variability.csv"
gen_var = load_dataframe(joinpath(my_dir, filename))
@@ -23,11 +23,12 @@ function load_generators_variability!(setup::Dict, path::AbstractString, inputs:
end
end
- # Reorder DataFrame to R_ID order
- select!(gen_var, [:Time_Index; Symbol.(all_resources) ])
+ # Reorder DataFrame to R_ID order
+ select!(gen_var, [:Time_Index; Symbol.(all_resources)])
- # Maximum power output and variability of each energy resource
- inputs["pP_Max"] = transpose(Matrix{Float64}(gen_var[1:inputs["T"],2:(inputs["G"]+1)]))
+ # Maximum power output and variability of each energy resource
+ inputs["pP_Max"] = transpose(Matrix{Float64}(gen_var[1:inputs["T"],
+ 2:(inputs["G"] + 1)]))
- println(filename * " Successfully Read!")
+ println(filename * " Successfully Read!")
end
diff --git a/src/load_inputs/load_inputs.jl b/src/load_inputs/load_inputs.jl
index 9ef747a0ed..1b8705ec4e 100644
--- a/src/load_inputs/load_inputs.jl
+++ b/src/load_inputs/load_inputs.jl
@@ -9,94 +9,95 @@ path - string path to working directory
returns: Dict (dictionary) object containing all data inputs
"""
-function load_inputs(setup::Dict,path::AbstractString)
-
- ## Read input files
- println("Reading Input CSV Files")
- ## input paths
- system_path = joinpath(path, setup["SystemFolder"])
- resources_path = joinpath(path, setup["ResourcesFolder"])
- policies_path = joinpath(path, setup["PoliciesFolder"])
- ## Declare Dict (dictionary) object used to store parameters
- inputs = Dict()
- # Read input data about power network topology, operating and expansion attributes
- if isfile(joinpath(system_path,"Network.csv"))
- network_var = load_network_data!(setup, system_path, inputs)
- else
- inputs["Z"] = 1
- inputs["L"] = 0
- end
-
- # Read temporal-resolved load data, and clustering information if relevant
- load_demand_data!(setup, path, inputs)
- # Read fuel cost data, including time-varying fuel costs
- load_fuels_data!(setup, path, inputs)
- # Read in generator/resource related inputs
- load_resources_data!(inputs, setup, path, resources_path)
- # Read in generator/resource availability profiles
- load_generators_variability!(setup, path, inputs)
+function load_inputs(setup::Dict, path::AbstractString)
+
+ ## Read input files
+ println("Reading Input CSV Files")
+ ## input paths
+ system_path = joinpath(path, setup["SystemFolder"])
+ resources_path = joinpath(path, setup["ResourcesFolder"])
+ policies_path = joinpath(path, setup["PoliciesFolder"])
+ ## Declare Dict (dictionary) object used to store parameters
+ inputs = Dict()
+ # Read input data about power network topology, operating and expansion attributes
+ if isfile(joinpath(system_path, "Network.csv"))
+ network_var = load_network_data!(setup, system_path, inputs)
+ else
+ inputs["Z"] = 1
+ inputs["L"] = 0
+ end
+
+ # Read temporal-resolved load data, and clustering information if relevant
+ load_demand_data!(setup, path, inputs)
+ # Read fuel cost data, including time-varying fuel costs
+ load_fuels_data!(setup, path, inputs)
+ # Read in generator/resource related inputs
+ load_resources_data!(inputs, setup, path, resources_path)
+ # Read in generator/resource availability profiles
+ load_generators_variability!(setup, path, inputs)
validatetimebasis(inputs)
- if setup["CapacityReserveMargin"]==1
- load_cap_reserve_margin!(setup, policies_path, inputs)
- if inputs["Z"] >1
- load_cap_reserve_margin_trans!(setup, inputs, network_var)
- end
- end
+ if setup["CapacityReserveMargin"] == 1
+ load_cap_reserve_margin!(setup, policies_path, inputs)
+ if inputs["Z"] > 1
+ load_cap_reserve_margin_trans!(setup, inputs, network_var)
+ end
+ end
- # Read in general configuration parameters for operational reserves (resource-specific reserve parameters are read in load_resources_data)
- if setup["OperationalReserves"]==1
- load_operational_reserves!(setup, system_path, inputs)
- end
+ # Read in general configuration parameters for operational reserves (resource-specific reserve parameters are read in load_resources_data)
+ if setup["OperationalReserves"] == 1
+ load_operational_reserves!(setup, system_path, inputs)
+ end
- if setup["MinCapReq"] == 1
- load_minimum_capacity_requirement!(policies_path, inputs, setup)
- end
+ if setup["MinCapReq"] == 1
+ load_minimum_capacity_requirement!(policies_path, inputs, setup)
+ end
- if setup["MaxCapReq"] == 1
- load_maximum_capacity_requirement!(policies_path, inputs, setup)
- end
+ if setup["MaxCapReq"] == 1
+ load_maximum_capacity_requirement!(policies_path, inputs, setup)
+ end
- if setup["EnergyShareRequirement"]==1
- load_energy_share_requirement!(setup, policies_path, inputs)
- end
+ if setup["EnergyShareRequirement"] == 1
+ load_energy_share_requirement!(setup, policies_path, inputs)
+ end
- if setup["CO2Cap"] >= 1
- load_co2_cap!(setup, policies_path, inputs)
- end
+ if setup["CO2Cap"] >= 1
+ load_co2_cap!(setup, policies_path, inputs)
+ end
- if !isempty(inputs["VRE_STOR"])
- load_vre_stor_variability!(setup, path, inputs)
- end
+ if !isempty(inputs["VRE_STOR"])
+ load_vre_stor_variability!(setup, path, inputs)
+ end
- # Read in mapping of modeled periods to representative periods
- if is_period_map_necessary(inputs) && is_period_map_exist(setup, path)
- load_period_map!(setup, path, inputs)
- end
+ # Read in mapping of modeled periods to representative periods
+ if is_period_map_necessary(inputs) && is_period_map_exist(setup, path)
+ load_period_map!(setup, path, inputs)
+ end
- # Virtual charge discharge cost
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- inputs["VirtualChargeDischargeCost"] = setup["VirtualChargeDischargeCost"] / scale_factor
+ # Virtual charge discharge cost
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ inputs["VirtualChargeDischargeCost"] = setup["VirtualChargeDischargeCost"] /
+ scale_factor
- println("CSV Files Successfully Read In From $path")
+ println("CSV Files Successfully Read In From $path")
- return inputs
+ return inputs
end
function is_period_map_necessary(inputs::Dict)
- multiple_rep_periods = inputs["REP_PERIOD"] > 1
- has_stor_lds = !isempty(inputs["STOR_LONG_DURATION"])
- has_hydro_lds = !isempty(inputs["STOR_HYDRO_LONG_DURATION"])
- has_vre_stor_lds = !isempty(inputs["VRE_STOR"]) && !isempty(inputs["VS_LDS"])
+ multiple_rep_periods = inputs["REP_PERIOD"] > 1
+ has_stor_lds = !isempty(inputs["STOR_LONG_DURATION"])
+ has_hydro_lds = !isempty(inputs["STOR_HYDRO_LONG_DURATION"])
+ has_vre_stor_lds = !isempty(inputs["VRE_STOR"]) && !isempty(inputs["VS_LDS"])
multiple_rep_periods && (has_stor_lds || has_hydro_lds || has_vre_stor_lds)
end
function is_period_map_exist(setup::Dict, path::AbstractString)
- filename = "Period_map.csv"
- is_in_system_dir = isfile(joinpath(path, setup["SystemFolder"], filename))
- is_in_TDR_dir = isfile(joinpath(path, setup["TimeDomainReductionFolder"], filename))
- is_in_system_dir || is_in_TDR_dir
+ filename = "Period_map.csv"
+ is_in_system_dir = isfile(joinpath(path, setup["SystemFolder"], filename))
+ is_in_TDR_dir = isfile(joinpath(path, setup["TimeDomainReductionFolder"], filename))
+ is_in_system_dir || is_in_TDR_dir
end
"""
@@ -115,17 +116,21 @@ Parameters:
Returns:
- String: The directory path based on the setup parameters.
"""
-function get_systemfiles_path(setup::Dict, TDR_directory::AbstractString, path::AbstractString)
+function get_systemfiles_path(setup::Dict,
+ TDR_directory::AbstractString,
+ path::AbstractString)
if setup["TimeDomainReduction"] == 1 && time_domain_reduced_files_exist(TDR_directory)
return TDR_directory
else
- # If TDR is not used, then use the "system" directory specified in the setup
+ # If TDR is not used, then use the "system" directory specified in the setup
return joinpath(path, setup["SystemFolder"])
end
end
abstract type AbstractLogMsg end
-struct ErrorMsg <: AbstractLogMsg msg::String end
-struct WarnMsg <: AbstractLogMsg msg::String end
-
-
+struct ErrorMsg <: AbstractLogMsg
+ msg::String
+end
+struct WarnMsg <: AbstractLogMsg
+ msg::String
+end
diff --git a/src/load_inputs/load_minimum_capacity_requirement.jl b/src/load_inputs/load_minimum_capacity_requirement.jl
index fad1fbd165..d30f2d6425 100644
--- a/src/load_inputs/load_minimum_capacity_requirement.jl
+++ b/src/load_inputs/load_minimum_capacity_requirement.jl
@@ -6,14 +6,14 @@ Read input parameters related to mimimum capacity requirement constraints (e.g.
function load_minimum_capacity_requirement!(path::AbstractString, inputs::Dict, setup::Dict)
filename = "Minimum_capacity_requirement.csv"
df = load_dataframe(joinpath(path, filename))
- NumberOfMinCapReqs = length(df[!,:MinCapReqConstraint])
+ NumberOfMinCapReqs = length(df[!, :MinCapReqConstraint])
inputs["NumberOfMinCapReqs"] = NumberOfMinCapReqs
- inputs["MinCapReq"] = df[!,:Min_MW]
+ inputs["MinCapReq"] = df[!, :Min_MW]
if setup["ParameterScale"] == 1
inputs["MinCapReq"] /= ModelScalingFactor # Convert to GW
end
if "PriceCap" in names(df)
- inputs["MinCapPriceCap"] = df[!,:PriceCap]
+ inputs["MinCapPriceCap"] = df[!, :PriceCap]
if setup["ParameterScale"] == 1
inputs["MinCapPriceCap"] /= ModelScalingFactor # Convert to million $/GW
end
diff --git a/src/load_inputs/load_multistage_data.jl b/src/load_inputs/load_multistage_data.jl
index edd5021839..95395c726f 100644
--- a/src/load_inputs/load_multistage_data.jl
+++ b/src/load_inputs/load_multistage_data.jl
@@ -15,7 +15,7 @@ end
function validate_multistage_data!(multistage_df::DataFrame)
# cols that the user must provide
- required_cols = ("lifetime","capital_recovery_period")
+ required_cols = ("lifetime", "capital_recovery_period")
# check that all required columns are present
for col in required_cols
if col ∉ names(multistage_df)
@@ -26,17 +26,16 @@ end
function scale_multistage_data!(multistage_in::DataFrame, scale_factor::Float64)
columns_to_scale = [:min_retired_cap_mw, # to GW
- :min_retired_charge_cap_mw, # to GW
- :min_retired_energy_cap_mw, # to GW
-
- :min_retired_cap_inverter_mw,
- :min_retired_cap_solar_mw,
- :min_retired_cap_wind_mw,
- :min_retired_cap_charge_dc_mw,
- :min_retired_cap_charge_ac_mw,
- :min_retired_cap_discharge_dc_mw,
- :min_retired_cap_discharge_ac_mw,
- ]
+ :min_retired_charge_cap_mw, # to GW
+ :min_retired_energy_cap_mw, # to GW
+ :min_retired_cap_inverter_mw,
+ :min_retired_cap_solar_mw,
+ :min_retired_cap_wind_mw,
+ :min_retired_cap_charge_dc_mw,
+ :min_retired_cap_charge_ac_mw,
+ :min_retired_cap_discharge_dc_mw,
+ :min_retired_cap_discharge_ac_mw,
+ ]
scale_columns!(multistage_in, columns_to_scale, scale_factor)
return nothing
-end
\ No newline at end of file
+end
diff --git a/src/load_inputs/load_network_data.jl b/src/load_inputs/load_network_data.jl
index 8116eaf02f..ac7f2b1c8c 100644
--- a/src/load_inputs/load_network_data.jl
+++ b/src/load_inputs/load_network_data.jl
@@ -4,7 +4,6 @@
Function for reading input parameters related to the electricity transmission network
"""
function load_network_data!(setup::Dict, path::AbstractString, inputs_nw::Dict)
-
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
filename = "Network.csv"
@@ -40,42 +39,46 @@ function load_network_data!(setup::Dict, path::AbstractString, inputs_nw::Dict)
if setup["DC_OPF"] == 1
if setup["NetworkExpansion"] == 1
@warn("Because the DC_OPF flag is active, GenX will not allow any transmission capacity expansion. Set the DC_OPF flag to 0 if you want to optimize tranmission capacity expansion.")
- setup["NetworkExpansion"] = 0;
+ setup["NetworkExpansion"] = 0
end
println("Reading DC-OPF values...")
# Transmission line voltage (in kV)
line_voltage_kV = to_floats(:Line_Voltage_kV)
# Transmission line reactance (in Ohms)
- line_reactance_Ohms = to_floats(:Line_Reactance_Ohms)
+ line_reactance_Ohms = to_floats(:Line_Reactance_Ohms)
# Line angle limit (in radians)
inputs_nw["Line_Angle_Limit"] = to_floats(:Angle_Limit_Rad)
# DC-OPF coefficient for each line (in MW when not scaled, in GW when scaled)
# MW = (kV)^2/Ohms
- inputs_nw["pDC_OPF_coeff"] = ((line_voltage_kV.^2)./line_reactance_Ohms)/scale_factor
+ inputs_nw["pDC_OPF_coeff"] = ((line_voltage_kV .^ 2) ./ line_reactance_Ohms) /
+ scale_factor
end
# Maximum possible flow after reinforcement for use in linear segments of piecewise approximation
inputs_nw["pTrans_Max_Possible"] = inputs_nw["pTrans_Max"]
- if setup["NetworkExpansion"]==1
+ if setup["NetworkExpansion"] == 1
# Read between zone network reinforcement costs per peak MW of capacity added
- inputs_nw["pC_Line_Reinforcement"] = to_floats(:Line_Reinforcement_Cost_per_MWyr) / scale_factor # convert to million $/GW/yr with objective function in millions
+ inputs_nw["pC_Line_Reinforcement"] = to_floats(:Line_Reinforcement_Cost_per_MWyr) /
+ scale_factor # convert to million $/GW/yr with objective function in millions
# Maximum reinforcement allowed in MW
#NOTE: values <0 indicate no expansion possible
- inputs_nw["pMax_Line_Reinforcement"] = map(x->max(0, x), to_floats(:Line_Max_Reinforcement_MW)) / scale_factor # convert to GW
+ inputs_nw["pMax_Line_Reinforcement"] = map(x -> max(0, x),
+ to_floats(:Line_Max_Reinforcement_MW)) / scale_factor # convert to GW
inputs_nw["pTrans_Max_Possible"] += inputs_nw["pMax_Line_Reinforcement"]
end
# Multi-Stage
if setup["MultiStage"] == 1
# Weighted Average Cost of Capital for Transmission Expansion
- if setup["NetworkExpansion"]>=1
- inputs_nw["transmission_WACC"]= to_floats(:WACC)
- inputs_nw["Capital_Recovery_Period_Trans"]= to_floats(:Capital_Recovery_Period)
+ if setup["NetworkExpansion"] >= 1
+ inputs_nw["transmission_WACC"] = to_floats(:WACC)
+ inputs_nw["Capital_Recovery_Period_Trans"] = to_floats(:Capital_Recovery_Period)
end
# Max Flow Possible on Each Line
- inputs_nw["pLine_Max_Flow_Possible_MW"] = to_floats(:Line_Max_Flow_Possible_MW) / scale_factor # Convert to GW
+ inputs_nw["pLine_Max_Flow_Possible_MW"] = to_floats(:Line_Max_Flow_Possible_MW) /
+ scale_factor # Convert to GW
end
# Transmission line (between zone) loss coefficient (resistance/voltage^2)
@@ -84,17 +87,18 @@ function load_network_data!(setup::Dict, path::AbstractString, inputs_nw::Dict)
inputs_nw["pTrans_Loss_Coef"] = inputs_nw["pPercent_Loss"]
elseif setup["Trans_Loss_Segments"] >= 2
# If zones are connected, loss coefficient is R/V^2 where R is resistance in Ohms and V is voltage in Volts
- inputs_nw["pTrans_Loss_Coef"] = (inputs_nw["Ohms"]/10^6)./(inputs_nw["kV"]/10^3)^2 * scale_factor # 1/GW ***
+ inputs_nw["pTrans_Loss_Coef"] = (inputs_nw["Ohms"] / 10^6) ./
+ (inputs_nw["kV"] / 10^3)^2 * scale_factor # 1/GW ***
end
## Sets and indices for transmission losses and expansion
inputs_nw["TRANS_LOSS_SEGS"] = setup["Trans_Loss_Segments"] # Number of segments used in piecewise linear approximations quadratic loss functions
- inputs_nw["LOSS_LINES"] = findall(inputs_nw["pTrans_Loss_Coef"].!=0) # Lines for which loss coefficients apply (are non-zero);
+ inputs_nw["LOSS_LINES"] = findall(inputs_nw["pTrans_Loss_Coef"] .!= 0) # Lines for which loss coefficients apply (are non-zero);
if setup["NetworkExpansion"] == 1
# Network lines and zones that are expandable have non-negative maximum reinforcement inputs
- inputs_nw["EXPANSION_LINES"] = findall(inputs_nw["pMax_Line_Reinforcement"].>=0)
- inputs_nw["NO_EXPANSION_LINES"] = findall(inputs_nw["pMax_Line_Reinforcement"].<0)
+ inputs_nw["EXPANSION_LINES"] = findall(inputs_nw["pMax_Line_Reinforcement"] .>= 0)
+ inputs_nw["NO_EXPANSION_LINES"] = findall(inputs_nw["pMax_Line_Reinforcement"] .< 0)
end
println(filename * " Successfully Read!")
@@ -138,9 +142,9 @@ starting zone of the line and the zone with entry -1 is the ending zone of the l
"""
function load_network_map_from_matrix(network_var::DataFrame, Z, L)
# Topology of the network source-sink matrix
- network_map_matrix_format_deprecation_warning()
+ network_map_matrix_format_deprecation_warning()
col = findall(s -> s == "z1", names(network_var))[1]
- mat = Matrix{Float64}(network_var[1:L, col:col+Z-1])
+ mat = Matrix{Float64}(network_var[1:L, col:(col + Z - 1)])
end
function load_network_map(network_var::DataFrame, Z, L)
@@ -150,7 +154,7 @@ function load_network_map(network_var::DataFrame, Z, L)
has_network_list = all([c in columns for c in list_columns])
zones_as_strings = ["z" * string(i) for i in 1:Z]
- has_network_matrix = all([c in columns for c in zones_as_strings])
+ has_network_matrix = all([c in columns for c in zones_as_strings])
instructions = """The transmission network should be specified in the form of a matrix
(with columns z1, z2, ... zN) or in the form of lists (with Start_Zone, End_Zone),
@@ -168,12 +172,12 @@ function load_network_map(network_var::DataFrame, Z, L)
end
function network_map_matrix_format_deprecation_warning()
- @warn """Specifying the network map as a matrix is deprecated as of v0.4
-and will be removed in v0.5. Instead, use the more compact list-style format.
-
-..., Network_Lines, Start_Zone, End_Zone, ...
- 1, 1, 2,
- 2, 1, 3,
- 3, 2, 3,
-""" maxlog=1
+ @warn """Specifying the network map as a matrix is deprecated as of v0.4
+ and will be removed in v0.5. Instead, use the more compact list-style format.
+
+ ..., Network_Lines, Start_Zone, End_Zone, ...
+ 1, 1, 2,
+ 2, 1, 3,
+ 3, 2, 3,
+ """ maxlog=1
end
diff --git a/src/load_inputs/load_operational_reserves.jl b/src/load_inputs/load_operational_reserves.jl
index 35508e9f5f..7aad7a74de 100644
--- a/src/load_inputs/load_operational_reserves.jl
+++ b/src/load_inputs/load_operational_reserves.jl
@@ -5,10 +5,10 @@ Read input parameters related to frequency regulation and operating reserve requ
"""
function load_operational_reserves!(setup::Dict, path::AbstractString, inputs::Dict)
filename = "Operational_reserves.csv"
- deprecated_synonym = "Reserves.csv"
+ deprecated_synonym = "Reserves.csv"
res_in = load_dataframe(path, [filename, deprecated_synonym])
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
function load_field_with_deprecated_symbol(df::DataFrame, columns::Vector{Symbol})
best = popfirst!(columns)
@@ -19,49 +19,52 @@ function load_operational_reserves!(setup::Dict, path::AbstractString, inputs::D
end
for col in columns
if col in all_columns
- Base.depwarn("The column name $col in file $filename is deprecated; prefer $best", :load_operational_reserves, force=true)
+ Base.depwarn("The column name $col in file $filename is deprecated; prefer $best",
+ :load_operational_reserves,
+ force = true)
return float(df[firstrow, col])
end
end
error("None of the columns $columns were found in the file $filename")
end
- # Regulation requirement as a percent of hourly demand; here demand is the total across all model zones
- inputs["pReg_Req_Demand"] = load_field_with_deprecated_symbol(res_in,
- [:Reg_Req_Percent_Demand,
- :Reg_Req_Percent_Load])
+ # Regulation requirement as a percent of hourly demand; here demand is the total across all model zones
+ inputs["pReg_Req_Demand"] = load_field_with_deprecated_symbol(res_in,
+ [:Reg_Req_Percent_Demand,
+ :Reg_Req_Percent_Load])
- # Regulation requirement as a percent of hourly wind and solar generation (summed across all model zones)
- inputs["pReg_Req_VRE"] = float(res_in[1,:Reg_Req_Percent_VRE])
- # Spinning up reserve requirement as a percent of hourly demand (which is summed across all zones)
- inputs["pRsv_Req_Demand"] = load_field_with_deprecated_symbol(res_in,
- [:Rsv_Req_Percent_Demand,
- :Rsv_Req_Percent_Load])
- # Spinning up reserve requirement as a percent of hourly wind and solar generation (which is summed across all zones)
- inputs["pRsv_Req_VRE"] = float(res_in[1,:Rsv_Req_Percent_VRE])
+ # Regulation requirement as a percent of hourly wind and solar generation (summed across all model zones)
+ inputs["pReg_Req_VRE"] = float(res_in[1, :Reg_Req_Percent_VRE])
+ # Spinning up reserve requirement as a percent of hourly demand (which is summed across all zones)
+ inputs["pRsv_Req_Demand"] = load_field_with_deprecated_symbol(res_in,
+ [:Rsv_Req_Percent_Demand,
+ :Rsv_Req_Percent_Load])
+ # Spinning up reserve requirement as a percent of hourly wind and solar generation (which is summed across all zones)
+ inputs["pRsv_Req_VRE"] = float(res_in[1, :Rsv_Req_Percent_VRE])
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
# Penalty for not meeting hourly spinning reserve requirement
- inputs["pC_Rsv_Penalty"] = float(res_in[1,:Unmet_Rsv_Penalty_Dollar_per_MW]) / scale_factor # convert to million $/GW with objective function in millions
- inputs["pStatic_Contingency"] = float(res_in[1,:Static_Contingency_MW]) / scale_factor # convert to GW
+ inputs["pC_Rsv_Penalty"] = float(res_in[1, :Unmet_Rsv_Penalty_Dollar_per_MW]) /
+ scale_factor # convert to million $/GW with objective function in millions
+ inputs["pStatic_Contingency"] = float(res_in[1, :Static_Contingency_MW]) / scale_factor # convert to GW
- if setup["UCommit"] >= 1
- inputs["pDynamic_Contingency"] = convert(Int8, res_in[1,:Dynamic_Contingency] )
- # Set BigM value used for dynamic contingencies cases to be largest possible cluster size
- # Note: this BigM value is only relevant for units in the COMMIT set. See operational_reserves.jl for details on implementation of dynamic contingencies
- if inputs["pDynamic_Contingency"] > 0
- inputs["pContingency_BigM"] = zeros(Float64, inputs["G"])
- for y in inputs["COMMIT"]
- inputs["pContingency_BigM"][y] = max_cap_mw(gen[y])
- # When Max_Cap_MW == -1, there is no limit on capacity size
- if inputs["pContingency_BigM"][y] < 0
- # NOTE: this effectively acts as a maximum cluster size when not otherwise specified, adjust accordingly
- inputs["pContingency_BigM"][y] = 5000 * cap_size(gen[y])
- end
- end
- end
- end
+ if setup["UCommit"] >= 1
+ inputs["pDynamic_Contingency"] = convert(Int8, res_in[1, :Dynamic_Contingency])
+ # Set BigM value used for dynamic contingencies cases to be largest possible cluster size
+ # Note: this BigM value is only relevant for units in the COMMIT set. See operational_reserves.jl for details on implementation of dynamic contingencies
+ if inputs["pDynamic_Contingency"] > 0
+ inputs["pContingency_BigM"] = zeros(Float64, inputs["G"])
+ for y in inputs["COMMIT"]
+ inputs["pContingency_BigM"][y] = max_cap_mw(gen[y])
+ # When Max_Cap_MW == -1, there is no limit on capacity size
+ if inputs["pContingency_BigM"][y] < 0
+ # NOTE: this effectively acts as a maximum cluster size when not otherwise specified, adjust accordingly
+ inputs["pContingency_BigM"][y] = 5000 * cap_size(gen[y])
+ end
+ end
+ end
+ end
- println(filename * " Successfully Read!")
+ println(filename * " Successfully Read!")
end
diff --git a/src/load_inputs/load_period_map.jl b/src/load_inputs/load_period_map.jl
index dee0b1ccd2..3966ea03b3 100644
--- a/src/load_inputs/load_period_map.jl
+++ b/src/load_inputs/load_period_map.jl
@@ -4,16 +4,16 @@
Read input parameters related to mapping of representative time periods to full chronological time series
"""
function load_period_map!(setup::Dict, path::AbstractString, inputs::Dict)
- period_map = "Period_map.csv"
- data_directory = joinpath(path, setup["TimeDomainReductionFolder"])
- if setup["TimeDomainReduction"] == 1 && isfile(joinpath(data_directory, period_map)) # Use Time Domain Reduced data for GenX
- my_dir = data_directory
- else
+ period_map = "Period_map.csv"
+ data_directory = joinpath(path, setup["TimeDomainReductionFolder"])
+ if setup["TimeDomainReduction"] == 1 && isfile(joinpath(data_directory, period_map)) # Use Time Domain Reduced data for GenX
+ my_dir = data_directory
+ else
# If TDR is not used, then use the "system" directory specified in the setup
my_dir = joinpath(path, setup["SystemFolder"])
- end
- file_path = joinpath(my_dir, period_map)
+ end
+ file_path = joinpath(my_dir, period_map)
inputs["Period_Map"] = load_dataframe(file_path)
- println(period_map * " Successfully Read!")
+ println(period_map * " Successfully Read!")
end
diff --git a/src/load_inputs/load_resources_data.jl b/src/load_inputs/load_resources_data.jl
index c5a37486a1..979d9e7c21 100644
--- a/src/load_inputs/load_resources_data.jl
+++ b/src/load_inputs/load_resources_data.jl
@@ -8,16 +8,14 @@ Internal function to get resource information (filename and GenX type) for each
"""
function _get_resource_info()
- resource_info = (
- hydro = (filename="Hydro.csv", type=Hydro),
- thermal = (filename="Thermal.csv", type=Thermal),
- vre = (filename="Vre.csv", type=Vre),
- storage = (filename="Storage.csv", type=Storage),
- flex_demand = (filename="Flex_demand.csv", type=FlexDemand),
- must_run = (filename="Must_run.csv", type=MustRun),
- electrolyzer = (filename="Electrolyzer.csv", type=Electrolyzer),
- vre_stor = (filename="Vre_stor.csv", type=VreStorage)
- )
+ resource_info = (hydro = (filename = "Hydro.csv", type = Hydro),
+ thermal = (filename = "Thermal.csv", type = Thermal),
+ vre = (filename = "Vre.csv", type = Vre),
+ storage = (filename = "Storage.csv", type = Storage),
+ flex_demand = (filename = "Flex_demand.csv", type = FlexDemand),
+ must_run = (filename = "Must_run.csv", type = MustRun),
+ electrolyzer = (filename = "Electrolyzer.csv", type = Electrolyzer),
+ vre_stor = (filename = "Vre_stor.csv", type = VreStorage))
return resource_info
end
@@ -37,12 +35,11 @@ function _get_policyfile_info()
min_cap_filenames = ["Resource_minimum_capacity_requirement.csv"]
max_cap_filenames = ["Resource_maximum_capacity_requirement.csv"]
- policyfile_info = (
- esr = (filenames=esr_filenames, setup_param="EnergyShareRequirement"),
- cap_res = (filenames=cap_res_filenames, setup_param="CapacityReserveMargin"),
- min_cap = (filenames=min_cap_filenames, setup_param="MinCapReq"),
- max_cap = (filenames=max_cap_filenames, setup_param="MaxCapReq"),
- )
+ policyfile_info = (esr = (filenames = esr_filenames,
+ setup_param = "EnergyShareRequirement"),
+ cap_res = (filenames = cap_res_filenames, setup_param = "CapacityReserveMargin"),
+ min_cap = (filenames = min_cap_filenames, setup_param = "MinCapReq"),
+ max_cap = (filenames = max_cap_filenames, setup_param = "MaxCapReq"))
return policyfile_info
end
@@ -52,18 +49,16 @@ end
Internal function to get a map of GenX resource type their corresponding names in the summary table.
"""
function _get_summary_map()
- names_map = Dict{Symbol,String}(
- :Electrolyzer => "Electrolyzer",
+ names_map = Dict{Symbol, String}(:Electrolyzer => "Electrolyzer",
:FlexDemand => "Flexible_demand",
:Hydro => "Hydro",
:Storage => "Storage",
:Thermal => "Thermal",
:Vre => "VRE",
:MustRun => "Must_run",
- :VreStorage => "VRE_and_storage",
- )
+ :VreStorage => "VRE_and_storage")
max_length = maximum(length.(values(names_map)))
- for (k,v) in names_map
+ for (k, v) in names_map
names_map[k] = v * repeat(" ", max_length - length(v))
end
return names_map
@@ -82,43 +77,31 @@ See documentation for descriptions of each column being scaled.
"""
function scale_resources_data!(resource_in::DataFrame, scale_factor::Float64)
columns_to_scale = [:existing_charge_cap_mw, # to GW
- :existing_cap_mwh, # to GWh
- :existing_cap_mw, # to GW
-
- :cap_size, # to GW
-
- :min_cap_mw, # to GW
- :min_cap_mwh, # to GWh
- :min_charge_cap_mw, # to GWh
-
- :max_cap_mw, # to GW
- :max_cap_mwh, # to GWh
- :max_charge_cap_mw, # to GW
-
- :inv_cost_per_mwyr, # to $M/GW/yr
- :inv_cost_per_mwhyr, # to $M/GWh/yr
- :inv_cost_charge_per_mwyr, # to $M/GW/yr
-
- :fixed_om_cost_per_mwyr, # to $M/GW/yr
- :fixed_om_cost_per_mwhyr, # to $M/GWh/yr
- :fixed_om_cost_charge_per_mwyr, # to $M/GW/yr
-
- :var_om_cost_per_mwh, # to $M/GWh
- :var_om_cost_per_mwh_in, # to $M/GWh
-
- :reg_cost, # to $M/GW
- :rsv_cost, # to $M/GW
-
- :min_retired_cap_mw, # to GW
- :min_retired_charge_cap_mw, # to GW
- :min_retired_energy_cap_mw, # to GW
-
- :start_cost_per_mw, # to $M/GW
-
- :ccs_disposal_cost_per_metric_ton,
-
- :hydrogen_mwh_per_tonne # to GWh/t
- ]
+ :existing_cap_mwh, # to GWh
+ :existing_cap_mw, # to GW
+ :cap_size, # to GW
+ :min_cap_mw, # to GW
+ :min_cap_mwh, # to GWh
+ :min_charge_cap_mw, # to GWh
+ :max_cap_mw, # to GW
+ :max_cap_mwh, # to GWh
+ :max_charge_cap_mw, # to GW
+ :inv_cost_per_mwyr, # to $M/GW/yr
+ :inv_cost_per_mwhyr, # to $M/GWh/yr
+ :inv_cost_charge_per_mwyr, # to $M/GW/yr
+ :fixed_om_cost_per_mwyr, # to $M/GW/yr
+ :fixed_om_cost_per_mwhyr, # to $M/GWh/yr
+ :fixed_om_cost_charge_per_mwyr, # to $M/GW/yr
+ :var_om_cost_per_mwh, # to $M/GWh
+ :var_om_cost_per_mwh_in, # to $M/GWh
+ :reg_cost, # to $M/GW
+ :rsv_cost, # to $M/GW
+ :min_retired_cap_mw, # to GW
+ :min_retired_charge_cap_mw, # to GW
+ :min_retired_energy_cap_mw, # to GW
+ :start_cost_per_mw, # to $M/GW
+ :ccs_disposal_cost_per_metric_ton, :hydrogen_mwh_per_tonne, # to GWh/t
+ ]
scale_columns!(resource_in, columns_to_scale, scale_factor)
return nothing
@@ -137,53 +120,53 @@ See documentation for descriptions of each column being scaled.
"""
function scale_vre_stor_data!(vre_stor_in::DataFrame, scale_factor::Float64)
columns_to_scale = [:existing_cap_inverter_mw,
- :existing_cap_solar_mw,
- :existing_cap_wind_mw,
- :existing_cap_charge_dc_mw,
- :existing_cap_charge_ac_mw,
- :existing_cap_discharge_dc_mw,
- :existing_cap_discharge_ac_mw,
- :min_cap_inverter_mw,
- :max_cap_inverter_mw,
- :min_cap_solar_mw,
- :max_cap_solar_mw,
- :min_cap_wind_mw,
- :max_cap_wind_mw,
- :min_cap_charge_ac_mw,
- :max_cap_charge_ac_mw,
- :min_cap_charge_dc_mw,
- :max_cap_charge_dc_mw,
- :min_cap_discharge_ac_mw,
- :max_cap_discharge_ac_mw,
- :min_cap_discharge_dc_mw,
- :max_cap_discharge_dc_mw,
- :inv_cost_inverter_per_mwyr,
- :fixed_om_inverter_cost_per_mwyr,
- :inv_cost_solar_per_mwyr,
- :fixed_om_solar_cost_per_mwyr,
- :inv_cost_wind_per_mwyr,
- :fixed_om_wind_cost_per_mwyr,
- :inv_cost_discharge_dc_per_mwyr,
- :fixed_om_cost_discharge_dc_per_mwyr,
- :inv_cost_charge_dc_per_mwyr,
- :fixed_om_cost_charge_dc_per_mwyr,
- :inv_cost_discharge_ac_per_mwyr,
- :fixed_om_cost_discharge_ac_per_mwyr,
- :inv_cost_charge_ac_per_mwyr,
- :fixed_om_cost_charge_ac_per_mwyr,
- :var_om_cost_per_mwh_solar,
- :var_om_cost_per_mwh_wind,
- :var_om_cost_per_mwh_charge_dc,
- :var_om_cost_per_mwh_discharge_dc,
- :var_om_cost_per_mwh_charge_ac,
- :var_om_cost_per_mwh_discharge_ac,
- :min_retired_cap_inverter_mw,
- :min_retired_cap_solar_mw,
- :min_retired_cap_wind_mw,
- :min_retired_cap_charge_dc_mw,
- :min_retired_cap_charge_ac_mw,
- :min_retired_cap_discharge_dc_mw,
- :min_retired_cap_discharge_ac_mw]
+ :existing_cap_solar_mw,
+ :existing_cap_wind_mw,
+ :existing_cap_charge_dc_mw,
+ :existing_cap_charge_ac_mw,
+ :existing_cap_discharge_dc_mw,
+ :existing_cap_discharge_ac_mw,
+ :min_cap_inverter_mw,
+ :max_cap_inverter_mw,
+ :min_cap_solar_mw,
+ :max_cap_solar_mw,
+ :min_cap_wind_mw,
+ :max_cap_wind_mw,
+ :min_cap_charge_ac_mw,
+ :max_cap_charge_ac_mw,
+ :min_cap_charge_dc_mw,
+ :max_cap_charge_dc_mw,
+ :min_cap_discharge_ac_mw,
+ :max_cap_discharge_ac_mw,
+ :min_cap_discharge_dc_mw,
+ :max_cap_discharge_dc_mw,
+ :inv_cost_inverter_per_mwyr,
+ :fixed_om_inverter_cost_per_mwyr,
+ :inv_cost_solar_per_mwyr,
+ :fixed_om_solar_cost_per_mwyr,
+ :inv_cost_wind_per_mwyr,
+ :fixed_om_wind_cost_per_mwyr,
+ :inv_cost_discharge_dc_per_mwyr,
+ :fixed_om_cost_discharge_dc_per_mwyr,
+ :inv_cost_charge_dc_per_mwyr,
+ :fixed_om_cost_charge_dc_per_mwyr,
+ :inv_cost_discharge_ac_per_mwyr,
+ :fixed_om_cost_discharge_ac_per_mwyr,
+ :inv_cost_charge_ac_per_mwyr,
+ :fixed_om_cost_charge_ac_per_mwyr,
+ :var_om_cost_per_mwh_solar,
+ :var_om_cost_per_mwh_wind,
+ :var_om_cost_per_mwh_charge_dc,
+ :var_om_cost_per_mwh_discharge_dc,
+ :var_om_cost_per_mwh_charge_ac,
+ :var_om_cost_per_mwh_discharge_ac,
+ :min_retired_cap_inverter_mw,
+ :min_retired_cap_solar_mw,
+ :min_retired_cap_wind_mw,
+ :min_retired_cap_charge_dc_mw,
+ :min_retired_cap_charge_ac_mw,
+ :min_retired_cap_discharge_dc_mw,
+ :min_retired_cap_discharge_ac_mw]
scale_columns!(vre_stor_in, columns_to_scale, scale_factor)
return nothing
end
@@ -199,7 +182,9 @@ Scales in-place the columns in `columns_to_scale` of a dataframe `df` by a `scal
- `scale_factor` (Float64): A scaling factor for energy and currency units.
"""
-function scale_columns!(df::DataFrame, columns_to_scale::Vector{Symbol}, scale_factor::Float64)
+function scale_columns!(df::DataFrame,
+ columns_to_scale::Vector{Symbol},
+ scale_factor::Float64)
for column in columns_to_scale
if string(column) in names(df)
df[!, column] /= scale_factor
@@ -246,7 +231,7 @@ Computes the indices for the resources loaded from a single dataframe by shiftin
"""
function compute_resource_indices(resources_in::DataFrame, offset::Int64)
- range = (1,nrow(resources_in)) .+ offset
+ range = (1, nrow(resources_in)) .+ offset
return UnitRange{Int64}(range...)
end
@@ -314,7 +299,9 @@ Construct the array of resources from multiple files of different types located
- `Error`: If no resources data is found. Check the data path or the configuration file "genx_settings.yml" inside Settings.
"""
-function create_resource_array(resource_folder::AbstractString, resources_info::NamedTuple, scale_factor::Float64=1.0)
+function create_resource_array(resource_folder::AbstractString,
+ resources_info::NamedTuple,
+ scale_factor::Float64 = 1.0)
resource_id_offset = 0
resources = []
# loop over available types and load all resources in resource_folder
@@ -333,7 +320,8 @@ function create_resource_array(resource_folder::AbstractString, resources_info::
@info filename * " Successfully Read."
end
end
- isempty(resources) && error("No resources data found. Check data path or configuration file \"genx_settings.yml\" inside Settings.")
+ isempty(resources) &&
+ error("No resources data found. Check data path or configuration file \"genx_settings.yml\" inside Settings.")
return reduce(vcat, resources)
end
@@ -353,15 +341,17 @@ function check_mustrun_reserve_contribution(r::AbstractResource)
reg_max_r = reg_max(r)
if reg_max_r != 0
- e = string("Resource ", resource_name(r), " is of MUST_RUN type but :Reg_Max = ", reg_max_r, ".\n",
- "MUST_RUN units must have Reg_Max = 0 since they cannot contribute to reserves.")
+ e = string("Resource ", resource_name(r), " is of MUST_RUN type but :Reg_Max = ",
+ reg_max_r, ".\n",
+ "MUST_RUN units must have Reg_Max = 0 since they cannot contribute to reserves.")
push!(error_strings, e)
end
-
+
rsv_max_r = rsv_max(r)
if rsv_max_r != 0
- e = string("Resource ", resource_name(r), " is of MUST_RUN type but :Rsv_Max = ", rsv_max_r, ".\n",
- "MUST_RUN units must have Rsv_Max = 0 since they cannot contribute to reserves.")
+ e = string("Resource ", resource_name(r), " is of MUST_RUN type but :Rsv_Max = ",
+ rsv_max_r, ".\n",
+ "MUST_RUN units must have Rsv_Max = 0 since they cannot contribute to reserves.")
push!(error_strings, e)
end
return ErrorMsg.(error_strings)
@@ -377,7 +367,7 @@ function check_LDS_applicability(r::AbstractResource)
# LDS is available only for Hydro and Storage
if !isa(r, applicable_resources) && lds_value > 0
e = string("Resource ", resource_name(r), " has :lds = ", lds_value, ".\n",
- "This setting is valid only for resources where the type is one of $applicable_resources.")
+ "This setting is valid only for resources where the type is one of $applicable_resources.")
push!(error_strings, e)
end
return ErrorMsg.(error_strings)
@@ -388,9 +378,9 @@ function check_maintenance_applicability(r::AbstractResource)
not_set = default_zero
maint_value = get(r, :maint, not_set)
-
+
error_strings = String[]
-
+
if maint_value == not_set
# not MAINT so the rest is not applicable
return error_strings
@@ -399,13 +389,13 @@ function check_maintenance_applicability(r::AbstractResource)
# MAINT is available only for Thermal
if !isa(r, applicable_resources) && maint_value > 0
e = string("Resource ", resource_name(r), " has :maint = ", maint_value, ".\n",
- "This setting is valid only for resources where the type is one of $applicable_resources.")
+ "This setting is valid only for resources where the type is one of $applicable_resources.")
push!(error_strings, e)
end
if get(r, :model, not_set) == 2
e = string("Resource ", resource_name(r), " has :maint = ", maint_value, ".\n",
- "This is valid only for resources with unit commitment (:model = 1);\n",
- "this has :model = 2.")
+ "This is valid only for resources with unit commitment (:model = 1);\n",
+ "this has :model = 2.")
push!(error_strings, e)
end
return ErrorMsg.(error_strings)
@@ -416,27 +406,29 @@ function check_retrofit_resource(r::AbstractResource)
# check that retrofit_id is set only for retrofitting units and not for new builds or units that can retire
if can_retrofit(r) == true && can_retire(r) == false
- e = string("Resource ", resource_name(r), " has :can_retrofit = ", can_retrofit(r), " but :can_retire = ", can_retire(r), ".\n",
- "A unit that can be retrofitted must also be eligible for retirement (:can_retire = 1)")
+ e = string("Resource ", resource_name(r), " has :can_retrofit = ", can_retrofit(r),
+ " but :can_retire = ", can_retire(r), ".\n",
+ "A unit that can be retrofitted must also be eligible for retirement (:can_retire = 1)")
push!(error_strings, e)
elseif is_retrofit_option(r) == true && new_build(r) == false
- e = string("Resource ", resource_name(r), " has :retrofit = ", is_retrofit_option(r), " but :new_build = ", new_build(r), ".\n",
- "This setting is valid only for resources that have :new_build = 1")
+ e = string("Resource ", resource_name(r), " has :retrofit = ",
+ is_retrofit_option(r), " but :new_build = ", new_build(r), ".\n",
+ "This setting is valid only for resources that have :new_build = 1")
push!(error_strings, e)
end
return ErrorMsg.(error_strings)
-end
+end
function check_resource(r::AbstractResource)
e = []
e = [e; check_LDS_applicability(r)]
- e = [e; check_maintenance_applicability(r)]
+ e = [e; check_maintenance_applicability(r)]
e = [e; check_mustrun_reserve_contribution(r)]
e = [e; check_retrofit_resource(r)]
return e
end
-function check_retrofit_id(rs::Vector{T}) where T <: AbstractResource
+function check_retrofit_id(rs::Vector{T}) where {T <: AbstractResource}
warning_strings = String[]
units_can_retrofit = ids_can_retrofit(rs)
@@ -445,7 +437,7 @@ function check_retrofit_id(rs::Vector{T}) where T <: AbstractResource
# check that all retrofit_ids for resources that can retrofit and retrofit options match
if Set(rs[units_can_retrofit].retrofit_id) != Set(rs[retrofit_options].retrofit_id)
msg = string("Retrofit IDs for resources that \"can retrofit\" and \"retrofit options\" do not match.\n" *
- "All retrofitting units must be associated with a retrofit option.")
+ "All retrofitting units must be associated with a retrofit option.")
push!(warning_strings, msg)
end
@@ -458,7 +450,7 @@ end
Validate the consistency of a vector of GenX resources
Reports any errors/warnings as a vector of messages.
"""
-function check_resource(resources::Vector{T}) where T <: AbstractResource
+function check_resource(resources::Vector{T}) where {T <: AbstractResource}
e = []
for r in resources
e = [e; check_resource(r)]
@@ -488,7 +480,7 @@ function announce_errors_and_halt(e::Vector)
return nothing
end
-function validate_resources(resources::Vector{T}) where T <: AbstractResource
+function validate_resources(resources::Vector{T}) where {T <: AbstractResource}
e = check_resource(resources)
if length(e) > 0
announce_errors_and_halt(e)
@@ -510,7 +502,7 @@ Function that loads and scales resources data from folder specified in resources
"""
function create_resource_array(setup::Dict, resources_path::AbstractString)
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1.0
-
+
# get filename and GenX type for each type of resources available in GenX
resources_info = _get_resource_info()
@@ -521,7 +513,6 @@ function create_resource_array(setup::Dict, resources_path::AbstractString)
return resources
end
-
"""
validate_policy_files(resource_policies_path::AbstractString, setup::Dict)
@@ -538,8 +529,13 @@ Validate the policy files by checking if they exist in the specified folder and
function validate_policy_files(resource_policies_path::AbstractString, setup::Dict)
policyfile_info = _get_policyfile_info()
for (filenames, setup_param) in values(policyfile_info)
- if setup[setup_param] == 1 && any(!isfile(joinpath(resource_policies_path, filename)) for filename in filenames)
- msg = string(setup_param, " is set to 1 in settings but the required file(s) ", filenames, " was (were) not found in ", resource_policies_path)
+ if setup[setup_param] == 1 &&
+ any(!isfile(joinpath(resource_policies_path, filename)) for filename in filenames)
+ msg = string(setup_param,
+ " is set to 1 in settings but the required file(s) ",
+ filenames,
+ " was (were) not found in ",
+ resource_policies_path)
@warn(msg)
end
end
@@ -564,15 +560,16 @@ function validate_policy_dataframe!(filename::AbstractString, policy_in::DataFra
error(msg)
end
# if the single column attribute does not have a tag number, add a tag number of 1
- if n_cols == 2 && cols[2][end-1:end] != "_1"
+ if n_cols == 2 && cols[2][(end - 1):end] != "_1"
rename!(policy_in, Symbol.(cols[2]) => Symbol.(cols[2], "_1"))
end
# get policy column names
cols = lowercase.(names(policy_in))
- filter!(col -> col ≠ "resource",cols)
-
+ filter!(col -> col ≠ "resource", cols)
+
accepted_cols = ["derating_factor", "esr", "esr_vrestor",
- [string(cap, type) for cap in ["min_cap", "max_cap"] for type in ("", "_stor", "_solar", "_wind")]...]
+ [string(cap, type) for cap in ["min_cap", "max_cap"]
+ for type in ("", "_stor", "_solar", "_wind")]...]
# Check that all policy columns have names in accepted_cols
if !all(x -> replace(x, r"(_*|_*\d*)$" => "") in accepted_cols, cols)
@@ -581,7 +578,8 @@ function validate_policy_dataframe!(filename::AbstractString, policy_in::DataFra
error(msg)
end
# Check that all policy columns have names with format "[policy_name]_[tagnum]"
- if !all(any([occursin(Regex("$(y)")*r"_\d", col) for y in accepted_cols]) for col in cols)
+ if !all(any([occursin(Regex("$(y)") * r"_\d", col) for y in accepted_cols])
+ for col in cols)
msg = "Columns in policy file $filename must have names with format \"[policy_name]_[tagnum]\", case insensitive. (e.g., ESR_1, Min_Cap_1, Max_Cap_2, etc.)."
error(msg)
end
@@ -599,14 +597,16 @@ Adds a set of new attributes (names and corresponding values) to a resource. The
- `new_values::DataFrameRow`: DataFrameRow containing the values of the new attributes.
"""
-function add_attributes_to_resource!(resource::AbstractResource, new_symbols::Vector{Symbol}, new_values::T) where T <: DataFrameRow
+function add_attributes_to_resource!(resource::AbstractResource,
+ new_symbols::Vector{Symbol},
+ new_values::T) where {T <: DataFrameRow}
# loop over new attributes
for (sym, value) in zip(new_symbols, new_values)
# add attribute to resource
setproperty!(resource, sym, value)
end
return nothing
-end
+end
"""
add_df_to_resources!(resources::Vector{<:AbstractResource}, module_in::DataFrame)
@@ -642,7 +642,9 @@ Loads a single policy file and adds the columns as new attributes to resources i
- `path::AbstractString`: The path to the policy file.
- `filename::AbstractString`: The name of the policy file.
"""
-function add_policy_to_resources!(resources::Vector{<:AbstractResource}, path::AbstractString, filename::AbstractString)
+function add_policy_to_resources!(resources::Vector{<:AbstractResource},
+ path::AbstractString,
+ filename::AbstractString)
policy_in = load_dataframe(path)
# check if policy file has any attributes, validate column names
validate_policy_dataframe!(filename, policy_in)
@@ -660,15 +662,16 @@ Reads policy files and adds policies-related attributes to resources in the mode
- `resources::Vector{<:AbstractResource}`: Vector of resources in the model.
- `resources_path::AbstractString`: The path to the resources folder.
"""
-function add_policies_to_resources!(resources::Vector{<:AbstractResource}, resource_policy_path::AbstractString)
+function add_policies_to_resources!(resources::Vector{<:AbstractResource},
+ resource_policy_path::AbstractString)
# get filename for each type of policy available in GenX
policies_info = _get_policyfile_info()
# loop over policy files
- for (filenames,_) in values(policies_info)
+ for (filenames, _) in values(policies_info)
for filename in filenames
path = joinpath(resource_policy_path, filename)
# if file exists, add policy to resources
- if isfile(path)
+ if isfile(path)
add_policy_to_resources!(resources, path, filename)
@info filename * " Successfully Read."
end
@@ -686,7 +689,8 @@ Reads module dataframe and adds columns as new attributes to the resources in th
- `resources::Vector{<:AbstractResource}`: A vector of resources.
- `module_in::DataFrame`: The dataframe with the columns to add to the resources.
"""
-function add_module_to_resources!(resources::Vector{<:AbstractResource}, module_in::DataFrame)
+function add_module_to_resources!(resources::Vector{<:AbstractResource},
+ module_in::DataFrame)
# add module columns to resources as new attributes
add_df_to_resources!(resources, module_in)
return nothing
@@ -702,7 +706,9 @@ Reads module dataframes, loops over files and adds columns as new attributes to
- `setup (Dict)`: A dictionary containing GenX settings.
- `resources_path::AbstractString`: The path to the resources folder.
"""
-function add_modules_to_resources!(resources::Vector{<:AbstractResource}, setup::Dict, resources_path::AbstractString)
+function add_modules_to_resources!(resources::Vector{<:AbstractResource},
+ setup::Dict,
+ resources_path::AbstractString)
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1.0
modules = Vector{DataFrame}()
@@ -715,7 +721,7 @@ function add_modules_to_resources!(resources::Vector{<:AbstractResource}, setup:
push!(modules, multistage_in)
@info "Multistage data successfully read."
end
-
+
## Loop over modules and add attributes to resources
add_module_to_resources!.(Ref(resources), modules)
@@ -723,32 +729,32 @@ function add_modules_to_resources!(resources::Vector{<:AbstractResource}, setup:
end
function validate_piecewisefuelusage(heat_rate_mat, load_point_mat)
- # it's possible to construct piecewise fuel consumption with n of heat rate and n-1 of load point.
- # if a user feed n of heat rate and more than n of load point, throw a error message, and then use
- # n of heat rate and n-1 load point to construct the piecewise fuel usage fuction
- if size(heat_rate_mat)[2] < size(load_point_mat)[2]
- @error """ The numbers of heatrate data are less than load points, we found $(size(heat_rate_mat)[2]) of heat rate,
- and $(size(load_point_mat)[2]) of load points. We will just use $(size(heat_rate_mat)[2]) of heat rate, and $(size(heat_rate_mat)[2]-1)
- load point to create piecewise fuel usage
- """
- end
-
- # check if values for piecewise fuel consumption make sense. Negative heat rate or load point are not allowed
- if any(heat_rate_mat .< 0) | any(load_point_mat .< 0)
- @error """ Neither heat rate nor load point can be negative
- """
- error("Invalid inputs detected for piecewise fuel usage")
- end
- # for non-zero values, heat rates and load points should follow an increasing trend
- if any([any(diff(filter(!=(0), row)) .< 0) for row in eachrow(heat_rate_mat)])
- @error """ Heat rates should follow an increasing trend
- """
- error("Invalid inputs detected for piecewise fuel usage")
- elseif any([any(diff(filter(!=(0), row)) .< 0) for row in eachrow(load_point_mat)])
- @error """load points should follow an increasing trend
- """
- error("Invalid inputs detected for piecewise fuel usage")
- end
+ # it's possible to construct piecewise fuel consumption with n of heat rate and n-1 of load point.
+ # if a user feed n of heat rate and more than n of load point, throw a error message, and then use
+ # n of heat rate and n-1 load point to construct the piecewise fuel usage fuction
+ if size(heat_rate_mat)[2] < size(load_point_mat)[2]
+ @error """ The numbers of heatrate data are less than load points, we found $(size(heat_rate_mat)[2]) of heat rate,
+ and $(size(load_point_mat)[2]) of load points. We will just use $(size(heat_rate_mat)[2]) of heat rate, and $(size(heat_rate_mat)[2]-1)
+ load point to create piecewise fuel usage
+ """
+ end
+
+ # check if values for piecewise fuel consumption make sense. Negative heat rate or load point are not allowed
+ if any(heat_rate_mat .< 0) | any(load_point_mat .< 0)
+ @error """ Neither heat rate nor load point can be negative
+ """
+ error("Invalid inputs detected for piecewise fuel usage")
+ end
+ # for non-zero values, heat rates and load points should follow an increasing trend
+ if any([any(diff(filter(!=(0), row)) .< 0) for row in eachrow(heat_rate_mat)])
+ @error """ Heat rates should follow an increasing trend
+ """
+ error("Invalid inputs detected for piecewise fuel usage")
+ elseif any([any(diff(filter(!=(0), row)) .< 0) for row in eachrow(load_point_mat)])
+ @error """load points should follow an increasing trend
+ """
+ error("Invalid inputs detected for piecewise fuel usage")
+ end
end
"""
@@ -762,20 +768,23 @@ Reads piecewise fuel usage data from the vector of generators, create a PWFU_dat
- `gen::Vector{<:AbstractResource}`: The vector of generators in the model
- `inputs::Dict`: The dictionary containing the input data
"""
-function process_piecewisefuelusage!(setup::Dict, gen::Vector{<:AbstractResource}, inputs::Dict)
+function process_piecewisefuelusage!(setup::Dict,
+ gen::Vector{<:AbstractResource},
+ inputs::Dict)
inputs["PWFU_Num_Segments"] = 0
inputs["THERM_COMMIT_PWFU"] = Int64[]
-
- if any(haskey.(gen, :pwfu_fuel_usage_zero_load_mmbtu_per_h))
+ if any(haskey.(gen, :pwfu_fuel_usage_zero_load_mmbtu_per_h))
thermal_gen = gen.Thermal
has_pwfu = haskey.(thermal_gen, :pwfu_fuel_usage_zero_load_mmbtu_per_h)
@assert all(has_pwfu) "Piecewise fuel usage data is not consistent across thermal generators"
- heat_rate_mat_therm = extract_matrix_from_resources(thermal_gen, "pwfu_heat_rate_mmbtu_per_mwh")
- load_point_mat_therm = extract_matrix_from_resources(thermal_gen, "pwfu_load_point_mw")
-
- num_segments = size(heat_rate_mat_therm)[2]
+ heat_rate_mat_therm = extract_matrix_from_resources(thermal_gen,
+ "pwfu_heat_rate_mmbtu_per_mwh")
+ load_point_mat_therm = extract_matrix_from_resources(thermal_gen,
+ "pwfu_load_point_mw")
+
+ num_segments = size(heat_rate_mat_therm)[2]
# create a matrix to store the heat rate and load point for each generator in the model
heat_rate_mat = zeros(length(gen), num_segments)
@@ -784,74 +793,78 @@ function process_piecewisefuelusage!(setup::Dict, gen::Vector{<:AbstractResource
heat_rate_mat[THERM, :] = heat_rate_mat_therm
load_point_mat[THERM, :] = load_point_mat_therm
- # check data input
- validate_piecewisefuelusage(heat_rate_mat, load_point_mat)
+ # check data input
+ validate_piecewisefuelusage(heat_rate_mat, load_point_mat)
# determine if a generator contains piecewise fuel usage segment based on non-zero heatrate
- nonzero_rows = any(heat_rate_mat .!= 0 , dims = 2)[:]
- HAS_PWFU = resource_id.(gen[nonzero_rows])
+ nonzero_rows = any(heat_rate_mat .!= 0, dims = 2)[:]
+ HAS_PWFU = resource_id.(gen[nonzero_rows])
- # translate the inital fuel usage, heat rate, and load points into intercept for each segment
+ # translate the inital fuel usage, heat rate, and load points into intercept for each segment
fuel_usage_zero_load = zeros(length(gen))
- fuel_usage_zero_load[THERM] = pwfu_fuel_usage_zero_load_mmbtu_per_h.(thermal_gen)
- # construct a matrix for intercept
- intercept_mat = zeros(size(heat_rate_mat))
- # PWFU_Fuel_Usage_MMBTU_per_h is always the intercept of the first segment
- intercept_mat[:,1] = fuel_usage_zero_load
-
- # create a function to compute intercept if we have more than one segment
- function calculate_intercepts(slope, intercept_1, load_point)
- m, n = size(slope)
- # Initialize the intercepts matrix with zeros
- intercepts = zeros(m, n)
- # The first segment's intercepts should be intercept_1 vector
- intercepts[:, 1] = intercept_1
- # Calculate intercepts for the other segments using the load points (i.e., intersection points)
- for j in 1:n-1
- for i in 1:m
- current_slope = slope[i, j+1]
- previous_slope = slope[i, j]
- # If the current slope is 0, then skip the calculation and return 0
- if current_slope == 0
- intercepts[i, j+1] = 0.0
- else
- # y = a*x + b; => b = y - ax
- # Calculate y-coordinate of the intersection
- y = previous_slope * load_point[i, j] + intercepts[i, j]
- # determine the new intercept
- b = y - current_slope * load_point[i, j]
- intercepts[i, j+1] = b
- end
- end
- end
- return intercepts
- end
-
- if num_segments > 1
- # determine the intercept for the rest of segment if num_segments > 1
- intercept_mat = calculate_intercepts(heat_rate_mat, fuel_usage_zero_load, load_point_mat)
- end
-
- # create a PWFU_data that contain processed intercept and slope (i.e., heat rate)
- intercept_cols = [Symbol("pwfu_intercept_", i) for i in 1:num_segments]
- intercept_df = DataFrame(intercept_mat, Symbol.(intercept_cols))
- slope_cols = Symbol.(filter(colname -> startswith(string(colname),"pwfu_heat_rate_mmbtu_per_mwh"), collect(attributes(thermal_gen[1]))))
+ fuel_usage_zero_load[THERM] = pwfu_fuel_usage_zero_load_mmbtu_per_h.(thermal_gen)
+ # construct a matrix for intercept
+ intercept_mat = zeros(size(heat_rate_mat))
+ # PWFU_Fuel_Usage_MMBTU_per_h is always the intercept of the first segment
+ intercept_mat[:, 1] = fuel_usage_zero_load
+
+ # create a function to compute intercept if we have more than one segment
+ function calculate_intercepts(slope, intercept_1, load_point)
+ m, n = size(slope)
+ # Initialize the intercepts matrix with zeros
+ intercepts = zeros(m, n)
+ # The first segment's intercepts should be intercept_1 vector
+ intercepts[:, 1] = intercept_1
+ # Calculate intercepts for the other segments using the load points (i.e., intersection points)
+ for j in 1:(n - 1)
+ for i in 1:m
+ current_slope = slope[i, j + 1]
+ previous_slope = slope[i, j]
+ # If the current slope is 0, then skip the calculation and return 0
+ if current_slope == 0
+ intercepts[i, j + 1] = 0.0
+ else
+ # y = a*x + b; => b = y - ax
+ # Calculate y-coordinate of the intersection
+ y = previous_slope * load_point[i, j] + intercepts[i, j]
+ # determine the new intercept
+ b = y - current_slope * load_point[i, j]
+ intercepts[i, j + 1] = b
+ end
+ end
+ end
+ return intercepts
+ end
+
+ if num_segments > 1
+ # determine the intercept for the rest of segment if num_segments > 1
+ intercept_mat = calculate_intercepts(heat_rate_mat,
+ fuel_usage_zero_load,
+ load_point_mat)
+ end
+
+ # create a PWFU_data that contain processed intercept and slope (i.e., heat rate)
+ intercept_cols = [Symbol("pwfu_intercept_", i) for i in 1:num_segments]
+ intercept_df = DataFrame(intercept_mat, Symbol.(intercept_cols))
+ slope_cols = Symbol.(filter(colname -> startswith(string(colname),
+ "pwfu_heat_rate_mmbtu_per_mwh"),
+ collect(attributes(thermal_gen[1]))))
sort!(slope_cols, by = x -> parse(Int, split(string(x), "_")[end]))
- slope_df = DataFrame(heat_rate_mat, Symbol.(slope_cols))
- PWFU_data = hcat(slope_df, intercept_df)
- # no need to scale sclope, but intercept should be scaled when parameterscale is on (MMBTU -> billion BTU)
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- PWFU_data[!, intercept_cols] ./= scale_factor
-
- inputs["slope_cols"] = slope_cols
- inputs["intercept_cols"] = intercept_cols
- inputs["PWFU_data"] = PWFU_data
- inputs["PWFU_Num_Segments"] = num_segments
- inputs["THERM_COMMIT_PWFU"] = intersect(ids_with_unit_commitment(gen), HAS_PWFU)
-
- @info "Piecewise fuel usage data successfully read!"
- end
- return nothing
+ slope_df = DataFrame(heat_rate_mat, Symbol.(slope_cols))
+ PWFU_data = hcat(slope_df, intercept_df)
+ # no need to scale sclope, but intercept should be scaled when parameterscale is on (MMBTU -> billion BTU)
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ PWFU_data[!, intercept_cols] ./= scale_factor
+
+ inputs["slope_cols"] = slope_cols
+ inputs["intercept_cols"] = intercept_cols
+ inputs["PWFU_data"] = PWFU_data
+ inputs["PWFU_Num_Segments"] = num_segments
+ inputs["THERM_COMMIT_PWFU"] = intersect(ids_with_unit_commitment(gen), HAS_PWFU)
+
+ @info "Piecewise fuel usage data successfully read!"
+ end
+ return nothing
end
@doc raw"""
@@ -863,59 +876,61 @@ For co-located VRE-storage resources, this function returns the storage type
"""
function split_storage_resources!(inputs::Dict, gen::Vector{<:AbstractResource})
- # All Storage Resources
- inputs["VS_STOR"] = union(storage_dc_charge(gen), storage_dc_discharge(gen),
- storage_ac_charge(gen), storage_ac_discharge(gen))
-
- STOR = inputs["VS_STOR"]
+ # All Storage Resources
+ inputs["VS_STOR"] = union(storage_dc_charge(gen), storage_dc_discharge(gen),
+ storage_ac_charge(gen), storage_ac_discharge(gen))
+
+ STOR = inputs["VS_STOR"]
- # Storage DC Discharge Resources
- inputs["VS_STOR_DC_DISCHARGE"] = storage_dc_discharge(gen)
- inputs["VS_SYM_DC_DISCHARGE"] = storage_sym_dc_discharge(gen)
- inputs["VS_ASYM_DC_DISCHARGE"] = storage_asym_dc_discharge(gen)
+ # Storage DC Discharge Resources
+ inputs["VS_STOR_DC_DISCHARGE"] = storage_dc_discharge(gen)
+ inputs["VS_SYM_DC_DISCHARGE"] = storage_sym_dc_discharge(gen)
+ inputs["VS_ASYM_DC_DISCHARGE"] = storage_asym_dc_discharge(gen)
- # Storage DC Charge Resources
- inputs["VS_STOR_DC_CHARGE"] = storage_dc_charge(gen)
- inputs["VS_SYM_DC_CHARGE"] = storage_sym_dc_charge(gen)
+ # Storage DC Charge Resources
+ inputs["VS_STOR_DC_CHARGE"] = storage_dc_charge(gen)
+ inputs["VS_SYM_DC_CHARGE"] = storage_sym_dc_charge(gen)
inputs["VS_ASYM_DC_CHARGE"] = storage_asym_dc_charge(gen)
- # Storage AC Discharge Resources
- inputs["VS_STOR_AC_DISCHARGE"] = storage_ac_discharge(gen)
- inputs["VS_SYM_AC_DISCHARGE"] = storage_sym_ac_discharge(gen)
- inputs["VS_ASYM_AC_DISCHARGE"] = storage_asym_ac_discharge(gen)
+ # Storage AC Discharge Resources
+ inputs["VS_STOR_AC_DISCHARGE"] = storage_ac_discharge(gen)
+ inputs["VS_SYM_AC_DISCHARGE"] = storage_sym_ac_discharge(gen)
+ inputs["VS_ASYM_AC_DISCHARGE"] = storage_asym_ac_discharge(gen)
- # Storage AC Charge Resources
- inputs["VS_STOR_AC_CHARGE"] = storage_ac_charge(gen)
- inputs["VS_SYM_AC_CHARGE"] = storage_sym_ac_charge(gen)
- inputs["VS_ASYM_AC_CHARGE"] = storage_asym_ac_charge(gen)
+ # Storage AC Charge Resources
+ inputs["VS_STOR_AC_CHARGE"] = storage_ac_charge(gen)
+ inputs["VS_SYM_AC_CHARGE"] = storage_sym_ac_charge(gen)
+ inputs["VS_ASYM_AC_CHARGE"] = storage_asym_ac_charge(gen)
- # Storage LDS & Non-LDS Resources
- inputs["VS_LDS"] = is_LDS_VRE_STOR(gen)
- inputs["VS_nonLDS"] = setdiff(STOR, inputs["VS_LDS"])
+ # Storage LDS & Non-LDS Resources
+ inputs["VS_LDS"] = is_LDS_VRE_STOR(gen)
+ inputs["VS_nonLDS"] = setdiff(STOR, inputs["VS_LDS"])
# Symmetric and asymmetric storage resources
- inputs["VS_ASYM"] = union(inputs["VS_ASYM_DC_CHARGE"], inputs["VS_ASYM_DC_DISCHARGE"],
- inputs["VS_ASYM_AC_DISCHARGE"], inputs["VS_ASYM_AC_CHARGE"])
- inputs["VS_SYM_DC"] = intersect(inputs["VS_SYM_DC_CHARGE"], inputs["VS_SYM_DC_DISCHARGE"])
- inputs["VS_SYM_AC"] = intersect(inputs["VS_SYM_AC_CHARGE"], inputs["VS_SYM_AC_DISCHARGE"])
+ inputs["VS_ASYM"] = union(inputs["VS_ASYM_DC_CHARGE"], inputs["VS_ASYM_DC_DISCHARGE"],
+ inputs["VS_ASYM_AC_DISCHARGE"], inputs["VS_ASYM_AC_CHARGE"])
+ inputs["VS_SYM_DC"] = intersect(inputs["VS_SYM_DC_CHARGE"],
+ inputs["VS_SYM_DC_DISCHARGE"])
+ inputs["VS_SYM_AC"] = intersect(inputs["VS_SYM_AC_CHARGE"],
+ inputs["VS_SYM_AC_DISCHARGE"])
# Send warnings for symmetric/asymmetric resources
- if (!isempty(setdiff(inputs["VS_SYM_DC_DISCHARGE"], inputs["VS_SYM_DC_CHARGE"]))
- || !isempty(setdiff(inputs["VS_SYM_DC_CHARGE"], inputs["VS_SYM_DC_DISCHARGE"]))
- || !isempty(setdiff(inputs["VS_SYM_AC_DISCHARGE"], inputs["VS_SYM_AC_CHARGE"]))
- || !isempty(setdiff(inputs["VS_SYM_AC_CHARGE"], inputs["VS_SYM_AC_DISCHARGE"])))
+ if (!isempty(setdiff(inputs["VS_SYM_DC_DISCHARGE"], inputs["VS_SYM_DC_CHARGE"]))
+ || !isempty(setdiff(inputs["VS_SYM_DC_CHARGE"], inputs["VS_SYM_DC_DISCHARGE"]))
+ || !isempty(setdiff(inputs["VS_SYM_AC_DISCHARGE"], inputs["VS_SYM_AC_CHARGE"]))
+ || !isempty(setdiff(inputs["VS_SYM_AC_CHARGE"], inputs["VS_SYM_AC_DISCHARGE"])))
@warn("Symmetric capacities must both be DC or AC.")
end
- # Send warnings for battery resources discharging
- if !isempty(intersect(inputs["VS_STOR_DC_DISCHARGE"], inputs["VS_STOR_AC_DISCHARGE"]))
- @warn("Both AC and DC discharging functionalities are turned on.")
- end
+ # Send warnings for battery resources discharging
+ if !isempty(intersect(inputs["VS_STOR_DC_DISCHARGE"], inputs["VS_STOR_AC_DISCHARGE"]))
+ @warn("Both AC and DC discharging functionalities are turned on.")
+ end
- # Send warnings for battery resources charging
- if !isempty(intersect(inputs["VS_STOR_DC_CHARGE"], inputs["VS_STOR_AC_CHARGE"]))
- @warn("Both AC and DC charging functionalities are turned on.")
- end
+ # Send warnings for battery resources charging
+ if !isempty(intersect(inputs["VS_STOR_DC_CHARGE"], inputs["VS_STOR_AC_CHARGE"]))
+ @warn("Both AC and DC charging functionalities are turned on.")
+ end
end
"""
@@ -926,7 +941,7 @@ Updates the retrofit_id of a resource that can be retrofit or is a retrofit opti
# Arguments
- `r::AbstractResource`: The resource to update.
"""
-function update_retrofit_id(r::AbstractResource)
+function update_retrofit_id(r::AbstractResource)
if haskey(r, :retrofit_id) && (can_retrofit(r) == true || is_retrofit_option(r) == true)
r.retrofit_id = string(r.retrofit_id, "_", region(r))
else
@@ -946,21 +961,25 @@ Adds resources to the `inputs` `Dict` with the key "RESOURCES" together with sev
- `gen (Vector{<:AbstractResource})`: Array of GenX resources.
"""
-function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::AbstractString, gen::Vector{<:AbstractResource})
-
+function add_resources_to_input_data!(inputs::Dict,
+ setup::Dict,
+ case_path::AbstractString,
+ gen::Vector{<:AbstractResource})
+
# Number of resources
G = length(gen)
inputs["G"] = G
# Number of time steps (periods)
T = inputs["T"]
-
+
## HYDRO
# Set of all reservoir hydro resources
inputs["HYDRO_RES"] = hydro(gen)
# Set of hydro resources modeled with known reservoir energy capacity
if !isempty(inputs["HYDRO_RES"])
- inputs["HYDRO_RES_KNOWN_CAP"] = intersect(inputs["HYDRO_RES"], ids_with_positive(gen, hydro_energy_to_power_ratio))
+ inputs["HYDRO_RES_KNOWN_CAP"] = intersect(inputs["HYDRO_RES"],
+ ids_with_positive(gen, hydro_energy_to_power_ratio))
end
## STORAGE
@@ -969,12 +988,12 @@ function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::Abst
# Set of storage resources with asymmetric (separte) charge/discharge capacity components
inputs["STOR_ASYMMETRIC"] = asymmetric_storage(gen)
# Set of all storage resources
- inputs["STOR_ALL"] = union(inputs["STOR_SYMMETRIC"],inputs["STOR_ASYMMETRIC"])
+ inputs["STOR_ALL"] = union(inputs["STOR_SYMMETRIC"], inputs["STOR_ASYMMETRIC"])
# Set of storage resources with long duration storage capabilitites
inputs["STOR_HYDRO_LONG_DURATION"] = intersect(inputs["HYDRO_RES"], is_LDS(gen))
inputs["STOR_HYDRO_SHORT_DURATION"] = intersect(inputs["HYDRO_RES"], is_SDS(gen))
- inputs["STOR_LONG_DURATION"] = intersect(inputs["STOR_ALL"], is_LDS(gen))
+ inputs["STOR_LONG_DURATION"] = intersect(inputs["STOR_ALL"], is_LDS(gen))
inputs["STOR_SHORT_DURATION"] = intersect(inputs["STOR_ALL"], is_SDS(gen))
## VRE
@@ -1011,10 +1030,10 @@ function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::Abst
# Set of thermal resources without unit commitment
inputs["THERM_NO_COMMIT"] = no_unit_commitment(gen)
# Start-up cost is sum of fixed cost per start startup
- inputs["C_Start"] = zeros(Float64, G, T)
+ inputs["C_Start"] = zeros(Float64, G, T)
for g in inputs["THERM_COMMIT"]
start_up_cost = start_cost_per_mw(gen[g]) * cap_size(gen[g])
- inputs["C_Start"][g,:] .= start_up_cost
+ inputs["C_Start"][g, :] .= start_up_cost
end
# Piecewise fuel usage option
process_piecewisefuelusage!(setup, gen, inputs)
@@ -1027,27 +1046,28 @@ function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::Abst
# For now, the only resources eligible for UC are themal resources
inputs["COMMIT"] = inputs["THERM_COMMIT"]
- # Set of CCS resources (optional set):
+ # Set of CCS resources (optional set):
inputs["CCS"] = ids_with_positive(gen, co2_capture_fraction)
# Single-fuel resources
- inputs["SINGLE_FUEL"] = ids_with_singlefuel(gen)
- # Multi-fuel resources
- inputs["MULTI_FUELS"] = ids_with_multifuels(gen)
- if !isempty(inputs["MULTI_FUELS"]) # If there are any resources using multi fuels, read relevant data
- load_multi_fuels_data!(inputs, gen, setup, case_path)
- end
+ inputs["SINGLE_FUEL"] = ids_with_singlefuel(gen)
+ # Multi-fuel resources
+ inputs["MULTI_FUELS"] = ids_with_multifuels(gen)
+ if !isempty(inputs["MULTI_FUELS"]) # If there are any resources using multi fuels, read relevant data
+ load_multi_fuels_data!(inputs, gen, setup, case_path)
+ end
buildable = is_buildable(gen)
retirable = is_retirable(gen)
units_can_retrofit = ids_can_retrofit(gen)
-
+
# Set of all resources eligible for new capacity
inputs["NEW_CAP"] = intersect(buildable, ids_with(gen, max_cap_mw))
# Set of all resources eligible for capacity retirements
inputs["RET_CAP"] = intersect(retirable, ids_with_nonneg(gen, existing_cap_mw))
# Set of all resources eligible for capacity retrofitting (by Yifu, same with retirement)
- inputs["RETROFIT_CAP"] = intersect(units_can_retrofit, ids_with_nonneg(gen, existing_cap_mw))
+ inputs["RETROFIT_CAP"] = intersect(units_can_retrofit,
+ ids_with_nonneg(gen, existing_cap_mw))
inputs["RETROFIT_OPTIONS"] = ids_retrofit_options(gen)
# Retrofit
@@ -1060,14 +1080,15 @@ function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::Abst
# in the same cluster either all have Contribute_Min_Retirement set to 1 or none of them do
if setup["MultiStage"] == 1
for retrofit_res in inputs["RETROFIT_CAP"]
- if !has_all_options_contributing(gen[retrofit_res], gen) && !has_all_options_not_contributing(gen[retrofit_res], gen)
+ if !has_all_options_contributing(gen[retrofit_res], gen) &&
+ !has_all_options_not_contributing(gen[retrofit_res], gen)
msg = "Retrofit options in the same cluster either all have Contribute_Min_Retirement set to 1 or none of them do. \n" *
- "Check column Contribute_Min_Retirement in the \"Resource_multistage_data.csv\" file for resource $(resource_name(gen[retrofit_res]))."
+ "Check column Contribute_Min_Retirement in the \"Resource_multistage_data.csv\" file for resource $(resource_name(gen[retrofit_res]))."
@error msg
error("Invalid input detected for Contribute_Min_Retirement.")
-
end
- if has_all_options_not_contributing(gen[retrofit_res], gen) && setup["MultiStageSettingsDict"]["Myopic"]==1
+ if has_all_options_not_contributing(gen[retrofit_res], gen) &&
+ setup["MultiStageSettingsDict"]["Myopic"] == 1
@error "When performing myopic multistage expansion all retrofit options need to have Contribute_Min_Retirement set to 1 to avoid model infeasibilities."
error("Invalid input detected for Contribute_Min_Retirement.")
end
@@ -1079,34 +1100,44 @@ function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::Abst
ret_cap_energy = Set{Int64}()
if !isempty(inputs["STOR_ALL"])
# Set of all storage resources eligible for new energy capacity
- new_cap_energy = intersect(buildable, ids_with(gen, max_cap_mwh), inputs["STOR_ALL"])
+ new_cap_energy = intersect(buildable,
+ ids_with(gen, max_cap_mwh),
+ inputs["STOR_ALL"])
# Set of all storage resources eligible for energy capacity retirements
- ret_cap_energy = intersect(retirable, ids_with_nonneg(gen, existing_cap_mwh), inputs["STOR_ALL"])
+ ret_cap_energy = intersect(retirable,
+ ids_with_nonneg(gen, existing_cap_mwh),
+ inputs["STOR_ALL"])
end
inputs["NEW_CAP_ENERGY"] = new_cap_energy
inputs["RET_CAP_ENERGY"] = ret_cap_energy
- new_cap_charge = Set{Int64}()
- ret_cap_charge = Set{Int64}()
- if !isempty(inputs["STOR_ASYMMETRIC"])
- # Set of asymmetric charge/discharge storage resources eligible for new charge capacity
- new_cap_charge = intersect(buildable, ids_with(gen, max_charge_cap_mw), inputs["STOR_ASYMMETRIC"])
- # Set of asymmetric charge/discharge storage resources eligible for charge capacity retirements
- ret_cap_charge = intersect(retirable, ids_with_nonneg(gen, existing_charge_cap_mw), inputs["STOR_ASYMMETRIC"])
- end
- inputs["NEW_CAP_CHARGE"] = new_cap_charge
- inputs["RET_CAP_CHARGE"] = ret_cap_charge
+ new_cap_charge = Set{Int64}()
+ ret_cap_charge = Set{Int64}()
+ if !isempty(inputs["STOR_ASYMMETRIC"])
+ # Set of asymmetric charge/discharge storage resources eligible for new charge capacity
+ new_cap_charge = intersect(buildable,
+ ids_with(gen, max_charge_cap_mw),
+ inputs["STOR_ASYMMETRIC"])
+ # Set of asymmetric charge/discharge storage resources eligible for charge capacity retirements
+ ret_cap_charge = intersect(retirable,
+ ids_with_nonneg(gen, existing_charge_cap_mw),
+ inputs["STOR_ASYMMETRIC"])
+ end
+ inputs["NEW_CAP_CHARGE"] = new_cap_charge
+ inputs["RET_CAP_CHARGE"] = ret_cap_charge
## Co-located resources
# VRE and storage
inputs["VRE_STOR"] = vre_stor(gen)
# Check if VRE-STOR resources exist
- if !isempty(inputs["VRE_STOR"])
+ if !isempty(inputs["VRE_STOR"])
# Solar PV Resources
inputs["VS_SOLAR"] = solar(gen)
# DC Resources
- inputs["VS_DC"] = union(storage_dc_discharge(gen), storage_dc_charge(gen), solar(gen))
+ inputs["VS_DC"] = union(storage_dc_discharge(gen),
+ storage_dc_charge(gen),
+ solar(gen))
# Wind Resources
inputs["VS_WIND"] = wind(gen)
@@ -1116,39 +1147,71 @@ function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::Abst
gen_VRE_STOR = gen.VreStorage
# Set of all VRE-STOR resources eligible for new solar capacity
- inputs["NEW_CAP_SOLAR"] = intersect(buildable, solar(gen), ids_with(gen_VRE_STOR, max_cap_solar_mw))
+ inputs["NEW_CAP_SOLAR"] = intersect(buildable,
+ solar(gen),
+ ids_with(gen_VRE_STOR, max_cap_solar_mw))
# Set of all VRE_STOR resources eligible for solar capacity retirements
- inputs["RET_CAP_SOLAR"] = intersect(retirable, solar(gen), ids_with_nonneg(gen_VRE_STOR, existing_cap_solar_mw))
+ inputs["RET_CAP_SOLAR"] = intersect(retirable,
+ solar(gen),
+ ids_with_nonneg(gen_VRE_STOR, existing_cap_solar_mw))
# Set of all VRE-STOR resources eligible for new wind capacity
- inputs["NEW_CAP_WIND"] = intersect(buildable, wind(gen), ids_with(gen_VRE_STOR, max_cap_wind_mw))
+ inputs["NEW_CAP_WIND"] = intersect(buildable,
+ wind(gen),
+ ids_with(gen_VRE_STOR, max_cap_wind_mw))
# Set of all VRE_STOR resources eligible for wind capacity retirements
- inputs["RET_CAP_WIND"] = intersect(retirable, wind(gen), ids_with_nonneg(gen_VRE_STOR, existing_cap_wind_mw))
+ inputs["RET_CAP_WIND"] = intersect(retirable,
+ wind(gen),
+ ids_with_nonneg(gen_VRE_STOR, existing_cap_wind_mw))
# Set of all VRE-STOR resources eligible for new inverter capacity
- inputs["NEW_CAP_DC"] = intersect(buildable, ids_with(gen_VRE_STOR, max_cap_inverter_mw), inputs["VS_DC"])
+ inputs["NEW_CAP_DC"] = intersect(buildable,
+ ids_with(gen_VRE_STOR, max_cap_inverter_mw),
+ inputs["VS_DC"])
# Set of all VRE_STOR resources eligible for inverter capacity retirements
- inputs["RET_CAP_DC"] = intersect(retirable, ids_with_nonneg(gen_VRE_STOR, existing_cap_inverter_mw), inputs["VS_DC"])
+ inputs["RET_CAP_DC"] = intersect(retirable,
+ ids_with_nonneg(gen_VRE_STOR, existing_cap_inverter_mw),
+ inputs["VS_DC"])
# Set of all storage resources eligible for new energy capacity
- inputs["NEW_CAP_STOR"] = intersect(buildable, ids_with(gen_VRE_STOR, max_cap_mwh), inputs["VS_STOR"])
+ inputs["NEW_CAP_STOR"] = intersect(buildable,
+ ids_with(gen_VRE_STOR, max_cap_mwh),
+ inputs["VS_STOR"])
# Set of all storage resources eligible for energy capacity retirements
- inputs["RET_CAP_STOR"] = intersect(retirable, ids_with_nonneg(gen_VRE_STOR, existing_cap_mwh), inputs["VS_STOR"])
+ inputs["RET_CAP_STOR"] = intersect(retirable,
+ ids_with_nonneg(gen_VRE_STOR, existing_cap_mwh),
+ inputs["VS_STOR"])
if !isempty(inputs["VS_ASYM"])
# Set of asymmetric charge DC storage resources eligible for new charge capacity
- inputs["NEW_CAP_CHARGE_DC"] = intersect(buildable, ids_with(gen_VRE_STOR, max_cap_charge_dc_mw), inputs["VS_ASYM_DC_CHARGE"])
+ inputs["NEW_CAP_CHARGE_DC"] = intersect(buildable,
+ ids_with(gen_VRE_STOR, max_cap_charge_dc_mw),
+ inputs["VS_ASYM_DC_CHARGE"])
# Set of asymmetric charge DC storage resources eligible for charge capacity retirements
- inputs["RET_CAP_CHARGE_DC"] = intersect(retirable, ids_with_nonneg(gen_VRE_STOR, existing_cap_charge_dc_mw), inputs["VS_ASYM_DC_CHARGE"])
+ inputs["RET_CAP_CHARGE_DC"] = intersect(retirable,
+ ids_with_nonneg(gen_VRE_STOR, existing_cap_charge_dc_mw),
+ inputs["VS_ASYM_DC_CHARGE"])
# Set of asymmetric discharge DC storage resources eligible for new discharge capacity
- inputs["NEW_CAP_DISCHARGE_DC"] = intersect(buildable, ids_with(gen_VRE_STOR, max_cap_discharge_dc_mw), inputs["VS_ASYM_DC_DISCHARGE"])
+ inputs["NEW_CAP_DISCHARGE_DC"] = intersect(buildable,
+ ids_with(gen_VRE_STOR, max_cap_discharge_dc_mw),
+ inputs["VS_ASYM_DC_DISCHARGE"])
# Set of asymmetric discharge DC storage resources eligible for discharge capacity retirements
- inputs["RET_CAP_DISCHARGE_DC"] = intersect(retirable, ids_with_nonneg(gen_VRE_STOR, existing_cap_discharge_dc_mw), inputs["VS_ASYM_DC_DISCHARGE"])
+ inputs["RET_CAP_DISCHARGE_DC"] = intersect(retirable,
+ ids_with_nonneg(gen_VRE_STOR, existing_cap_discharge_dc_mw),
+ inputs["VS_ASYM_DC_DISCHARGE"])
# Set of asymmetric charge AC storage resources eligible for new charge capacity
- inputs["NEW_CAP_CHARGE_AC"] = intersect(buildable, ids_with(gen_VRE_STOR, max_cap_charge_ac_mw), inputs["VS_ASYM_AC_CHARGE"])
+ inputs["NEW_CAP_CHARGE_AC"] = intersect(buildable,
+ ids_with(gen_VRE_STOR, max_cap_charge_ac_mw),
+ inputs["VS_ASYM_AC_CHARGE"])
# Set of asymmetric charge AC storage resources eligible for charge capacity retirements
- inputs["RET_CAP_CHARGE_AC"] = intersect(retirable, ids_with_nonneg(gen_VRE_STOR, existing_cap_charge_ac_mw), inputs["VS_ASYM_AC_CHARGE"])
+ inputs["RET_CAP_CHARGE_AC"] = intersect(retirable,
+ ids_with_nonneg(gen_VRE_STOR, existing_cap_charge_ac_mw),
+ inputs["VS_ASYM_AC_CHARGE"])
# Set of asymmetric discharge AC storage resources eligible for new discharge capacity
- inputs["NEW_CAP_DISCHARGE_AC"] = intersect(buildable, ids_with(gen_VRE_STOR, max_cap_discharge_ac_mw), inputs["VS_ASYM_AC_DISCHARGE"])
+ inputs["NEW_CAP_DISCHARGE_AC"] = intersect(buildable,
+ ids_with(gen_VRE_STOR, max_cap_discharge_ac_mw),
+ inputs["VS_ASYM_AC_DISCHARGE"])
# Set of asymmetric discharge AC storage resources eligible for discharge capacity retirements
- inputs["RET_CAP_DISCHARGE_AC"] = intersect(retirable, ids_with_nonneg(gen_VRE_STOR, existing_cap_discharge_ac_mw), inputs["VS_ASYM_AC_DISCHARGE"])
- end
+ inputs["RET_CAP_DISCHARGE_AC"] = intersect(retirable,
+ ids_with_nonneg(gen_VRE_STOR, existing_cap_discharge_ac_mw),
+ inputs["VS_ASYM_AC_DISCHARGE"])
+ end
# Names for systemwide resources
inputs["RESOURCE_NAMES_VRE_STOR"] = resource_name(gen_VRE_STOR)
@@ -1174,7 +1237,7 @@ function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::Abst
# Zones resources are located in
zones = zone_id(gen)
-
+
# Resource identifiers by zone (just zones in resource order + resource and zone concatenated)
inputs["R_ZONES"] = zones
inputs["RESOURCE_ZONES"] = inputs["RESOURCE_NAMES"] .* "_z" .* string.(zones)
@@ -1185,7 +1248,7 @@ function add_resources_to_input_data!(inputs::Dict, setup::Dict, case_path::Abst
inputs["HAS_FUEL"] = union(inputs["HAS_FUEL"], inputs["MULTI_FUELS"])
sort!(inputs["HAS_FUEL"])
end
-
+
inputs["RESOURCES"] = gen
return nothing
end
@@ -1205,10 +1268,11 @@ function summary(rs::Vector{<:AbstractResource})
println(repeat("-", line_width))
println("\tResource type \t\tNumber of resources")
println(repeat("=", line_width))
- for r_type ∈ resource_types
+ for r_type in resource_types
num_rs = length(rs[nameof.(typeof.(rs)) .== r_type])
if num_rs > 0
- r_type ∉ keys(rs_summary_names) && error("Resource type $r_type not found in summary map. Please add it to the map.")
+ r_type ∉ keys(rs_summary_names) &&
+ error("Resource type $r_type not found in summary map. Please add it to the map.")
println("\t", rs_summary_names[r_type], "\t\t", num_rs)
end
end
@@ -1232,11 +1296,14 @@ This function loads resources data from the resources_path folder and create the
Raises:
DeprecationWarning: If the `Generators_data.csv` file is found, a deprecation warning is issued, together with an error message.
"""
-function load_resources_data!(inputs::Dict, setup::Dict, case_path::AbstractString, resources_path::AbstractString)
+function load_resources_data!(inputs::Dict,
+ setup::Dict,
+ case_path::AbstractString,
+ resources_path::AbstractString)
if isfile(joinpath(case_path, "Generators_data.csv"))
msg = "The `Generators_data.csv` file was deprecated in release v0.4. " *
- "Please use the new interface for generators creation, and see the documentation for additional details."
- Base.depwarn(msg, :load_resources_data!, force=true)
+ "Please use the new interface for generators creation, and see the documentation for additional details."
+ Base.depwarn(msg, :load_resources_data!, force = true)
error("Exiting GenX...")
end
# create vector of resources from dataframes
@@ -1249,7 +1316,7 @@ function load_resources_data!(inputs::Dict, setup::Dict, case_path::AbstractStri
# read module files add module-related attributes to resource dataframe
add_modules_to_resources!(resources, setup, resources_path)
-
+
# add resources information to inputs dict
add_resources_to_input_data!(inputs, setup, case_path, resources)
@@ -1264,36 +1331,38 @@ end
Function for reading input parameters related to multi fuels
"""
-function load_multi_fuels_data!(inputs::Dict, gen::Vector{<:AbstractResource}, setup::Dict, path::AbstractString)
-
- inputs["NUM_FUELS"] = num_fuels.(gen) # Number of fuels that this resource can use
- max_fuels = maximum(inputs["NUM_FUELS"])
- inputs["FUEL_COLS"] = [ Symbol(string("Fuel",f)) for f in 1:max_fuels ]
- fuel_types = [fuel_cols.(gen, tag=f) for f in 1:max_fuels]
- heat_rates = [heat_rate_cols.(gen, tag=f) for f in 1:max_fuels]
- max_cofire = [max_cofire_cols.(gen, tag=f) for f in 1:max_fuels]
- min_cofire = [min_cofire_cols.(gen, tag=f) for f in 1:max_fuels]
- max_cofire_start = [max_cofire_start_cols.(gen, tag=f) for f in 1:max_fuels]
- min_cofire_start = [min_cofire_start_cols.(gen, tag=f) for f in 1:max_fuels]
- inputs["HEAT_RATES"] = heat_rates
- inputs["MAX_COFIRE"] = max_cofire
- inputs["MIN_COFIRE"] = min_cofire
- inputs["MAX_COFIRE_START"] = max_cofire_start
- inputs["MIN_COFIRE_START"] = min_cofire_start
- inputs["FUEL_TYPES"] = fuel_types
- inputs["MAX_NUM_FUELS"] = max_fuels
+function load_multi_fuels_data!(inputs::Dict,
+ gen::Vector{<:AbstractResource},
+ setup::Dict,
+ path::AbstractString)
+ inputs["NUM_FUELS"] = num_fuels.(gen) # Number of fuels that this resource can use
+ max_fuels = maximum(inputs["NUM_FUELS"])
+ inputs["FUEL_COLS"] = [Symbol(string("Fuel", f)) for f in 1:max_fuels]
+ fuel_types = [fuel_cols.(gen, tag = f) for f in 1:max_fuels]
+ heat_rates = [heat_rate_cols.(gen, tag = f) for f in 1:max_fuels]
+ max_cofire = [max_cofire_cols.(gen, tag = f) for f in 1:max_fuels]
+ min_cofire = [min_cofire_cols.(gen, tag = f) for f in 1:max_fuels]
+ max_cofire_start = [max_cofire_start_cols.(gen, tag = f) for f in 1:max_fuels]
+ min_cofire_start = [min_cofire_start_cols.(gen, tag = f) for f in 1:max_fuels]
+ inputs["HEAT_RATES"] = heat_rates
+ inputs["MAX_COFIRE"] = max_cofire
+ inputs["MIN_COFIRE"] = min_cofire
+ inputs["MAX_COFIRE_START"] = max_cofire_start
+ inputs["MIN_COFIRE_START"] = min_cofire_start
+ inputs["FUEL_TYPES"] = fuel_types
+ inputs["MAX_NUM_FUELS"] = max_fuels
inputs["MAX_NUM_FUELS"] = max_fuels
- # check whether non-zero heat rates are used for resources that only use a single fuel
- for f in 1:max_fuels
- for hr in heat_rates[f][inputs["SINGLE_FUEL"]]
- if hr > 0
- error("Heat rates for multi fuels must be zero when only one fuel is used")
- end
- end
- end
- # do not allow the multi-fuel option when piece-wise heat rates are used
+ # check whether non-zero heat rates are used for resources that only use a single fuel
+ for f in 1:max_fuels
+ for hr in heat_rates[f][inputs["SINGLE_FUEL"]]
+ if hr > 0
+ error("Heat rates for multi fuels must be zero when only one fuel is used")
+ end
+ end
+ end
+ # do not allow the multi-fuel option when piece-wise heat rates are used
if haskey(inputs, "THERM_COMMIT_PWFU") && !isempty(inputs["THERM_COMMIT_PWFU"])
- error("Multi-fuel option is not available when piece-wise heat rates are used. Please remove multi fuels to avoid this error.")
- end
+ error("Multi-fuel option is not available when piece-wise heat rates are used. Please remove multi fuels to avoid this error.")
+ end
end
diff --git a/src/load_inputs/load_vre_stor_variability.jl b/src/load_inputs/load_vre_stor_variability.jl
index 591d2d9876..188780c6ec 100644
--- a/src/load_inputs/load_vre_stor_variability.jl
+++ b/src/load_inputs/load_vre_stor_variability.jl
@@ -7,39 +7,41 @@ Read input parameters related to hourly maximum capacity factors for the solar P
"""
function load_vre_stor_variability!(setup::Dict, path::AbstractString, inputs::Dict)
- # Hourly capacity factors
+ # Hourly capacity factors
TDR_directory = joinpath(path, setup["TimeDomainReductionFolder"])
# if TDR is used, my_dir = TDR_directory, else my_dir = "system"
my_dir = get_systemfiles_path(setup, TDR_directory, path)
-
- filename1 = "Vre_and_stor_solar_variability.csv"
- vre_stor_solar = load_dataframe(joinpath(my_dir, filename1))
- filename2 = "Vre_and_stor_wind_variability.csv"
- vre_stor_wind = load_dataframe(joinpath(my_dir, filename2))
+ filename1 = "Vre_and_stor_solar_variability.csv"
+ vre_stor_solar = load_dataframe(joinpath(my_dir, filename1))
- all_resources = inputs["RESOURCE_NAMES"]
+ filename2 = "Vre_and_stor_wind_variability.csv"
+ vre_stor_wind = load_dataframe(joinpath(my_dir, filename2))
- function ensure_column_zeros!(vre_stor_df, all_resources)
- existing_variability = names(vre_stor_df)
- for r in all_resources
- if r ∉ existing_variability
- ensure_column!(vre_stor_df, r, 0.0)
- end
- end
- end
+ all_resources = inputs["RESOURCE_NAMES"]
- ensure_column_zeros!(vre_stor_solar, all_resources)
- ensure_column_zeros!(vre_stor_wind, all_resources)
+ function ensure_column_zeros!(vre_stor_df, all_resources)
+ existing_variability = names(vre_stor_df)
+ for r in all_resources
+ if r ∉ existing_variability
+ ensure_column!(vre_stor_df, r, 0.0)
+ end
+ end
+ end
- # Reorder DataFrame to R_ID order (order provided in Vre_and_stor_data.csv)
- select!(vre_stor_solar, [:Time_Index; Symbol.(all_resources) ])
- select!(vre_stor_wind, [:Time_Index; Symbol.(all_resources) ])
+ ensure_column_zeros!(vre_stor_solar, all_resources)
+ ensure_column_zeros!(vre_stor_wind, all_resources)
- # Maximum power output and variability of each energy resource
- inputs["pP_Max_Solar"] = transpose(Matrix{Float64}(vre_stor_solar[1:inputs["T"],2:(inputs["G"]+1)]))
- inputs["pP_Max_Wind"] = transpose(Matrix{Float64}(vre_stor_wind[1:inputs["T"],2:(inputs["G"]+1)]))
+ # Reorder DataFrame to R_ID order (order provided in Vre_and_stor_data.csv)
+ select!(vre_stor_solar, [:Time_Index; Symbol.(all_resources)])
+ select!(vre_stor_wind, [:Time_Index; Symbol.(all_resources)])
- println(filename1 * " Successfully Read!")
- println(filename2 * " Successfully Read!")
+ # Maximum power output and variability of each energy resource
+ inputs["pP_Max_Solar"] = transpose(Matrix{Float64}(vre_stor_solar[1:inputs["T"],
+ 2:(inputs["G"] + 1)]))
+ inputs["pP_Max_Wind"] = transpose(Matrix{Float64}(vre_stor_wind[1:inputs["T"],
+ 2:(inputs["G"] + 1)]))
+
+ println(filename1 * " Successfully Read!")
+ println(filename2 * " Successfully Read!")
end
diff --git a/src/model/core/co2.jl b/src/model/core/co2.jl
index 0b4f861bba..95c3dd27de 100644
--- a/src/model/core/co2.jl
+++ b/src/model/core/co2.jl
@@ -51,7 +51,6 @@ eEmissionsCaptureByPlant_{g,t} = CO2\_Capture\_Fraction_y * vFuel_{y,t} * CO2_{
"""
function co2!(EP::Model, inputs::Dict)
-
println("CO2 Module")
gen = inputs["RESOURCES"]
@@ -66,65 +65,76 @@ function co2!(EP::Model, inputs::Dict)
omega = inputs["omega"]
if !isempty(MULTI_FUELS)
max_fuels = inputs["MAX_NUM_FUELS"]
- end
+ end
### Expressions ###
# CO2 emissions from power plants in "Generators_data.csv"
# If all the CO2 capture fractions from Generators_data are zeros, the CO2 emissions from thermal generators are determined by fuel consumption times CO2 content per MMBTU
if isempty(CCS)
- @expression(EP, eEmissionsByPlant[y=1:G, t=1:T],
+ @expression(EP, eEmissionsByPlant[y = 1:G, t = 1:T],
if y in SINGLE_FUEL
- ((1-biomass(gen[y])) *(EP[:vFuel][y, t] + EP[:vStartFuel][y, t]) * fuel_CO2[fuel(gen[y])])
+ ((1 - biomass(gen[y])) * (EP[:vFuel][y, t] + EP[:vStartFuel][y, t]) *
+ fuel_CO2[fuel(gen[y])])
else
- sum(((1-biomass(gen[y])) *(EP[:vMulFuels][y, i, t] + EP[:vMulStartFuels][y, i, t]) * fuel_CO2[fuel_cols(gen[y], tag=i)]) for i = 1:max_fuels)
- end)
- else
+ sum(((1 - biomass(gen[y])) *
+ (EP[:vMulFuels][y, i, t] + EP[:vMulStartFuels][y, i, t]) *
+ fuel_CO2[fuel_cols(gen[y], tag = i)]) for i in 1:max_fuels)
+ end)
+ else
@info "Using the CO2 module to determine the CO2 emissions of CCS-equipped plants"
# CO2_Capture_Fraction refers to the CO2 capture rate of CCS equiped power plants at a steady state
# CO2_Capture_Fraction_Startup refers to the CO2 capture rate of CCS equiped power plants during startup events
- @expression(EP, eEmissionsByPlant[y=1:G, t=1:T],
+ @expression(EP, eEmissionsByPlant[y = 1:G, t = 1:T],
if y in SINGLE_FUEL
- (1-biomass(gen[y]) - co2_capture_fraction(gen[y])) * EP[:vFuel][y, t] * fuel_CO2[fuel(gen[y])]+
- (1-biomass(gen[y]) - co2_capture_fraction_startup(gen[y])) * EP[:eStartFuel][y, t] * fuel_CO2[fuel(gen[y])]
+ (1 - biomass(gen[y]) - co2_capture_fraction(gen[y])) * EP[:vFuel][y, t] *
+ fuel_CO2[fuel(gen[y])] +
+ (1 - biomass(gen[y]) - co2_capture_fraction_startup(gen[y])) *
+ EP[:eStartFuel][y, t] * fuel_CO2[fuel(gen[y])]
else
- sum((1-biomass(gen[y]) - co2_capture_fraction(gen[y])) * EP[:vMulFuels][y, i, t] * fuel_CO2[fuel_cols(gen[y], tag=i)] for i = 1:max_fuels)+
- sum((1-biomass(gen[y]) - co2_capture_fraction_startup(gen[y])) * EP[:vMulStartFuels][y, i, t] * fuel_CO2[fuel_cols(gen[y], tag=i)] for i = 1:max_fuels)
+ sum((1 - biomass(gen[y]) - co2_capture_fraction(gen[y])) *
+ EP[:vMulFuels][y, i, t] * fuel_CO2[fuel_cols(gen[y], tag = i)]
+ for i in 1:max_fuels) +
+ sum((1 - biomass(gen[y]) - co2_capture_fraction_startup(gen[y])) *
+ EP[:vMulStartFuels][y, i, t] * fuel_CO2[fuel_cols(gen[y], tag = i)]
+ for i in 1:max_fuels)
end)
# CO2 captured from power plants in "Generators_data.csv"
- @expression(EP, eEmissionsCaptureByPlant[y in CCS, t=1:T],
+ @expression(EP, eEmissionsCaptureByPlant[y in CCS, t = 1:T],
if y in SINGLE_FUEL
- co2_capture_fraction(gen[y]) * EP[:vFuel][y, t] * fuel_CO2[fuel(gen[y])]+
- co2_capture_fraction_startup(gen[y]) * EP[:eStartFuel][y, t] * fuel_CO2[fuel(gen[y])]
+ co2_capture_fraction(gen[y]) * EP[:vFuel][y, t] * fuel_CO2[fuel(gen[y])] +
+ co2_capture_fraction_startup(gen[y]) * EP[:eStartFuel][y, t] *
+ fuel_CO2[fuel(gen[y])]
else
- sum(co2_capture_fraction(gen[y]) * EP[:vMulFuels][y, i, t] * fuel_CO2[fuel_cols(gen[y], tag=i)] for i = 1:max_fuels)+
- sum(co2_capture_fraction_startup(gen[y]) * EP[:vMulStartFuels][y, i, t] * fuel_CO2[fuel_cols(gen[y], tag=i)] for i = 1:max_fuels)
+ sum(co2_capture_fraction(gen[y]) * EP[:vMulFuels][y, i, t] *
+ fuel_CO2[fuel_cols(gen[y], tag = i)] for i in 1:max_fuels) +
+ sum(co2_capture_fraction_startup(gen[y]) * EP[:vMulStartFuels][y, i, t] *
+ fuel_CO2[fuel_cols(gen[y], tag = i)] for i in 1:max_fuels)
end)
- @expression(EP, eEmissionsCaptureByPlantYear[y in CCS],
- sum(omega[t] * eEmissionsCaptureByPlant[y, t]
+ @expression(EP, eEmissionsCaptureByPlantYear[y in CCS],
+ sum(omega[t] * eEmissionsCaptureByPlant[y, t]
for t in 1:T))
# add CO2 sequestration cost to objective function
# when scale factor is on tCO2/MWh = > kt CO2/GWh
- @expression(EP, ePlantCCO2Sequestration[y in CCS],
- sum(omega[t] * eEmissionsCaptureByPlant[y, t] *
+ @expression(EP, ePlantCCO2Sequestration[y in CCS],
+ sum(omega[t] * eEmissionsCaptureByPlant[y, t] *
ccs_disposal_cost_per_metric_ton(gen[y]) for t in 1:T))
-
- @expression(EP, eZonalCCO2Sequestration[z=1:Z],
- sum(ePlantCCO2Sequestration[y]
- for y in intersect(resources_in_zone_by_rid(gen,z), CCS)))
-
- @expression(EP, eTotaleCCO2Sequestration,
+
+ @expression(EP, eZonalCCO2Sequestration[z = 1:Z],
+ sum(ePlantCCO2Sequestration[y]
+ for y in intersect(resources_in_zone_by_rid(gen, z), CCS)))
+
+ @expression(EP, eTotaleCCO2Sequestration,
sum(eZonalCCO2Sequestration[z] for z in 1:Z))
-
+
add_to_expression!(EP[:eObj], EP[:eTotaleCCO2Sequestration])
end
# emissions by zone
- @expression(EP, eEmissionsByZone[z = 1:Z, t = 1:T],
- sum(eEmissionsByPlant[y, t] for y in resources_in_zone_by_rid(gen,z)))
+ @expression(EP, eEmissionsByZone[z = 1:Z, t = 1:T],
+ sum(eEmissionsByPlant[y, t] for y in resources_in_zone_by_rid(gen, z)))
return EP
-
end
diff --git a/src/model/core/discharge/discharge.jl b/src/model/core/discharge/discharge.jl
index 6955bfffb1..d67881c942 100644
--- a/src/model/core/discharge/discharge.jl
+++ b/src/model/core/discharge/discharge.jl
@@ -11,40 +11,40 @@ This module additionally defines contributions to the objective function from va
```
"""
function discharge!(EP::Model, inputs::Dict, setup::Dict)
+ println("Discharge Module")
- println("Discharge Module")
+ gen = inputs["RESOURCES"]
- gen = inputs["RESOURCES"]
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps
+ ### Variables ###
- ### Variables ###
+ # Energy injected into the grid by resource "y" at hour "t"
+ @variable(EP, vP[y = 1:G, t = 1:T]>=0)
- # Energy injected into the grid by resource "y" at hour "t"
- @variable(EP, vP[y=1:G,t=1:T] >=0);
+ ### Expressions ###
- ### Expressions ###
+ ## Objective Function Expressions ##
- ## Objective Function Expressions ##
+ # Variable costs of "generation" for resource "y" during hour "t" = variable O&M
+ @expression(EP,
+ eCVar_out[y = 1:G, t = 1:T],
+ (inputs["omega"][t]*(var_om_cost_per_mwh(gen[y]) * vP[y, t])))
+ # Sum individual resource contributions to variable discharging costs to get total variable discharging costs
+ @expression(EP, eTotalCVarOutT[t = 1:T], sum(eCVar_out[y, t] for y in 1:G))
+ @expression(EP, eTotalCVarOut, sum(eTotalCVarOutT[t] for t in 1:T))
- # Variable costs of "generation" for resource "y" during hour "t" = variable O&M
- @expression(EP, eCVar_out[y=1:G,t=1:T], (inputs["omega"][t]*(var_om_cost_per_mwh(gen[y])*vP[y,t])))
- # Sum individual resource contributions to variable discharging costs to get total variable discharging costs
- @expression(EP, eTotalCVarOutT[t=1:T], sum(eCVar_out[y,t] for y in 1:G))
- @expression(EP, eTotalCVarOut, sum(eTotalCVarOutT[t] for t in 1:T))
-
- # Add total variable discharging cost contribution to the objective function
- add_to_expression!(EP[:eObj], eTotalCVarOut)
-
- # ESR Policy
- if setup["EnergyShareRequirement"] >= 1
-
- @expression(EP, eESRDischarge[ESR=1:inputs["nESR"]],
- + sum(inputs["omega"][t] * esr(gen[y],tag=ESR) * EP[:vP][y,t] for y=ids_with_policy(gen, esr, tag=ESR), t=1:T)
- - sum(inputs["dfESR"][z,ESR]*inputs["omega"][t]*inputs["pD"][t,z] for t=1:T, z=findall(x->x>0,inputs["dfESR"][:,ESR]))
- )
- add_similar_to_expression!(EP[:eESR], eESRDischarge)
- end
+ # Add total variable discharging cost contribution to the objective function
+ add_to_expression!(EP[:eObj], eTotalCVarOut)
+ # ESR Policy
+ if setup["EnergyShareRequirement"] >= 1
+ @expression(EP, eESRDischarge[ESR = 1:inputs["nESR"]],
+ +sum(inputs["omega"][t] * esr(gen[y], tag = ESR) * EP[:vP][y, t]
+ for y in ids_with_policy(gen, esr, tag = ESR), t in 1:T)
+ -sum(inputs["dfESR"][z, ESR] * inputs["omega"][t] * inputs["pD"][t, z]
+ for t in 1:T, z in findall(x -> x > 0, inputs["dfESR"][:, ESR])))
+ add_similar_to_expression!(EP[:eESR], eESRDischarge)
+ end
end
diff --git a/src/model/core/discharge/investment_discharge.jl b/src/model/core/discharge/investment_discharge.jl
index 1bd1a5a07e..2db459fcb8 100755
--- a/src/model/core/discharge/investment_discharge.jl
+++ b/src/model/core/discharge/investment_discharge.jl
@@ -33,136 +33,150 @@ In addition, this function adds investment and fixed O\&M related costs related
```
"""
function investment_discharge!(EP::Model, inputs::Dict, setup::Dict)
+ println("Investment Discharge Module")
+ MultiStage = setup["MultiStage"]
- println("Investment Discharge Module")
- MultiStage = setup["MultiStage"]
+ gen = inputs["RESOURCES"]
- gen = inputs["RESOURCES"]
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
-
- NEW_CAP = inputs["NEW_CAP"] # Set of all resources eligible for new capacity
- RET_CAP = inputs["RET_CAP"] # Set of all resources eligible for capacity retirements
- COMMIT = inputs["COMMIT"] # Set of all resources eligible for unit commitment
- RETROFIT_CAP = inputs["RETROFIT_CAP"] # Set of all resources being retrofitted
+ NEW_CAP = inputs["NEW_CAP"] # Set of all resources eligible for new capacity
+ RET_CAP = inputs["RET_CAP"] # Set of all resources eligible for capacity retirements
+ COMMIT = inputs["COMMIT"] # Set of all resources eligible for unit commitment
+ RETROFIT_CAP = inputs["RETROFIT_CAP"] # Set of all resources being retrofitted
- ### Variables ###
+ ### Variables ###
- # Retired capacity of resource "y" from existing capacity
- @variable(EP, vRETCAP[y in RET_CAP] >= 0);
+ # Retired capacity of resource "y" from existing capacity
+ @variable(EP, vRETCAP[y in RET_CAP]>=0)
# New installed capacity of resource "y"
- @variable(EP, vCAP[y in NEW_CAP] >= 0);
-
- if MultiStage == 1
- @variable(EP, vEXISTINGCAP[y=1:G] >= 0);
- end
-
- # Being retrofitted capacity of resource y
- @variable(EP, vRETROFITCAP[y in RETROFIT_CAP] >= 0);
-
-
- ### Expressions ###
-
- if MultiStage == 1
- @expression(EP, eExistingCap[y in 1:G], vEXISTINGCAP[y])
- else
- @expression(EP, eExistingCap[y in 1:G], existing_cap_mw(gen[y]))
- end
-
- @expression(EP, eTotalCap[y in 1:G],
- if y in intersect(NEW_CAP, RET_CAP, RETROFIT_CAP) # Resources eligible for new capacity, retirements and being retrofitted
- if y in COMMIT
- eExistingCap[y] + cap_size(gen[y])*(EP[:vCAP][y] - EP[:vRETCAP][y] - EP[:vRETROFITCAP][y])
- else
- eExistingCap[y] + EP[:vCAP][y] - EP[:vRETCAP][y] - EP[:vRETROFITCAP][y]
- end
- elseif y in intersect(setdiff(RET_CAP, NEW_CAP), setdiff(RET_CAP, RETROFIT_CAP)) # Resources eligible for only capacity retirements
- if y in COMMIT
- eExistingCap[y] - cap_size(gen[y])*EP[:vRETCAP][y]
- else
- eExistingCap[y] - EP[:vRETCAP][y]
- end
- elseif y in setdiff(intersect(RET_CAP, NEW_CAP), RETROFIT_CAP) # Resources eligible for retirement and new capacity
- if y in COMMIT
- eExistingCap[y] + cap_size(gen[y])* (EP[:vCAP][y] - EP[:vRETCAP][y])
- else
- eExistingCap[y] + EP[:vCAP][y] - EP[:vRETCAP][y]
- end
- elseif y in setdiff(intersect(RET_CAP, RETROFIT_CAP), NEW_CAP) # Resources eligible for retirement and retrofitting
- if y in COMMIT
- eExistingCap[y] - cap_size(gen[y]) * (EP[:vRETROFITCAP][y] + EP[:vRETCAP][y])
- else
- eExistingCap[y] - (EP[:vRETROFITCAP][y] + EP[:vRETCAP][y])
- end
- elseif y in intersect(setdiff(NEW_CAP, RET_CAP),setdiff(NEW_CAP, RETROFIT_CAP)) # Resources eligible for only new capacity
- if y in COMMIT
- eExistingCap[y] + cap_size(gen[y])*EP[:vCAP][y]
- else
- eExistingCap[y] + EP[:vCAP][y]
- end
- else # Resources not eligible for new capacity or retirement
- eExistingCap[y] + EP[:vZERO]
- end
-)
-
- ### Need editting ##
- @expression(EP, eCFix[y in 1:G],
- if y in NEW_CAP # Resources eligible for new capacity (Non-Retrofit)
- if y in COMMIT
- inv_cost_per_mwyr(gen[y])*cap_size(gen[y])*vCAP[y] + fixed_om_cost_per_mwyr(gen[y])*eTotalCap[y]
- else
- inv_cost_per_mwyr(gen[y])*vCAP[y] + fixed_om_cost_per_mwyr(gen[y])*eTotalCap[y]
- end
- else
- fixed_om_cost_per_mwyr(gen[y])*eTotalCap[y]
- end
-)
- # Sum individual resource contributions to fixed costs to get total fixed costs
- @expression(EP, eTotalCFix, sum(EP[:eCFix][y] for y in 1:G))
-
- # Add term to objective function expression
- if MultiStage == 1
- # OPEX multiplier scales fixed costs to account for multiple years between two model stages
- # We divide by OPEXMULT since we are going to multiply the entire objective function by this term later,
- # and we have already accounted for multiple years between stages for fixed costs.
- add_to_expression!(EP[:eObj], 1/inputs["OPEXMULT"], eTotalCFix)
- else
- add_to_expression!(EP[:eObj], eTotalCFix)
- end
-
- ### Constratints ###
-
- if MultiStage == 1
- # Existing capacity variable is equal to existing capacity specified in the input file
- @constraint(EP, cExistingCap[y in 1:G], EP[:vEXISTINGCAP][y] == existing_cap_mw(gen[y]))
- end
-
- ## Constraints on retirements and capacity additions
- # Cannot retire more capacity than existing capacity
- @constraint(EP, cMaxRetNoCommit[y in setdiff(RET_CAP,COMMIT)], vRETCAP[y] <= eExistingCap[y])
- @constraint(EP, cMaxRetCommit[y in intersect(RET_CAP,COMMIT)], cap_size(gen[y])*vRETCAP[y] <= eExistingCap[y])
- @constraint(EP, cMaxRetroNoCommit[y in setdiff(RETROFIT_CAP,COMMIT)], vRETROFITCAP[y] + vRETCAP[y] <= eExistingCap[y])
- @constraint(EP, cMaxRetroCommit[y in intersect(RETROFIT_CAP,COMMIT)], cap_size(gen[y]) * (vRETROFITCAP[y] + vRETCAP[y]) <= eExistingCap[y])
-
- ## Constraints on new built capacity
- # Constraint on maximum capacity (if applicable) [set input to -1 if no constraint on maximum capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
- MAX_CAP = ids_with_positive(gen, max_cap_mw)
- @constraint(EP, cMaxCap[y in MAX_CAP], eTotalCap[y] <= max_cap_mw(gen[y]))
-
- # Constraint on minimum capacity (if applicable) [set input to -1 if no constraint on minimum capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is <= Min_Cap_MW and lead to infeasabilty
- MIN_CAP = ids_with_positive(gen, min_cap_mw)
- @constraint(EP, cMinCap[y in MIN_CAP], eTotalCap[y] >= min_cap_mw(gen[y]))
-
- if setup["MinCapReq"] == 1
- @expression(EP, eMinCapResInvest[mincap = 1:inputs["NumberOfMinCapReqs"]], sum(EP[:eTotalCap][y] for y in ids_with_policy(gen, min_cap, tag=mincap)))
- add_similar_to_expression!(EP[:eMinCapRes], eMinCapResInvest)
- end
-
- if setup["MaxCapReq"] == 1
- @expression(EP, eMaxCapResInvest[maxcap = 1:inputs["NumberOfMaxCapReqs"]], sum(EP[:eTotalCap][y] for y in ids_with_policy(gen, max_cap, tag=maxcap)))
- add_similar_to_expression!(EP[:eMaxCapRes], eMaxCapResInvest)
- end
+ @variable(EP, vCAP[y in NEW_CAP]>=0)
+
+ if MultiStage == 1
+ @variable(EP, vEXISTINGCAP[y = 1:G]>=0)
+ end
+
+ # Being retrofitted capacity of resource y
+ @variable(EP, vRETROFITCAP[y in RETROFIT_CAP]>=0)
+
+ ### Expressions ###
+
+ if MultiStage == 1
+ @expression(EP, eExistingCap[y in 1:G], vEXISTINGCAP[y])
+ else
+ @expression(EP, eExistingCap[y in 1:G], existing_cap_mw(gen[y]))
+ end
+
+ @expression(EP, eTotalCap[y in 1:G],
+ if y in intersect(NEW_CAP, RET_CAP, RETROFIT_CAP) # Resources eligible for new capacity, retirements and being retrofitted
+ if y in COMMIT
+ eExistingCap[y] +
+ cap_size(gen[y]) * (EP[:vCAP][y] - EP[:vRETCAP][y] - EP[:vRETROFITCAP][y])
+ else
+ eExistingCap[y] + EP[:vCAP][y] - EP[:vRETCAP][y] - EP[:vRETROFITCAP][y]
+ end
+ elseif y in intersect(setdiff(RET_CAP, NEW_CAP), setdiff(RET_CAP, RETROFIT_CAP)) # Resources eligible for only capacity retirements
+ if y in COMMIT
+ eExistingCap[y] - cap_size(gen[y]) * EP[:vRETCAP][y]
+ else
+ eExistingCap[y] - EP[:vRETCAP][y]
+ end
+ elseif y in setdiff(intersect(RET_CAP, NEW_CAP), RETROFIT_CAP) # Resources eligible for retirement and new capacity
+ if y in COMMIT
+ eExistingCap[y] + cap_size(gen[y]) * (EP[:vCAP][y] - EP[:vRETCAP][y])
+ else
+ eExistingCap[y] + EP[:vCAP][y] - EP[:vRETCAP][y]
+ end
+ elseif y in setdiff(intersect(RET_CAP, RETROFIT_CAP), NEW_CAP) # Resources eligible for retirement and retrofitting
+ if y in COMMIT
+ eExistingCap[y] -
+ cap_size(gen[y]) * (EP[:vRETROFITCAP][y] + EP[:vRETCAP][y])
+ else
+ eExistingCap[y] - (EP[:vRETROFITCAP][y] + EP[:vRETCAP][y])
+ end
+ elseif y in intersect(setdiff(NEW_CAP, RET_CAP), setdiff(NEW_CAP, RETROFIT_CAP)) # Resources eligible for only new capacity
+ if y in COMMIT
+ eExistingCap[y] + cap_size(gen[y]) * EP[:vCAP][y]
+ else
+ eExistingCap[y] + EP[:vCAP][y]
+ end
+ else # Resources not eligible for new capacity or retirement
+ eExistingCap[y] + EP[:vZERO]
+ end)
+
+ ### Need editting ##
+ @expression(EP, eCFix[y in 1:G],
+ if y in NEW_CAP # Resources eligible for new capacity (Non-Retrofit)
+ if y in COMMIT
+ inv_cost_per_mwyr(gen[y]) * cap_size(gen[y]) * vCAP[y] +
+ fixed_om_cost_per_mwyr(gen[y]) * eTotalCap[y]
+ else
+ inv_cost_per_mwyr(gen[y]) * vCAP[y] +
+ fixed_om_cost_per_mwyr(gen[y]) * eTotalCap[y]
+ end
+ else
+ fixed_om_cost_per_mwyr(gen[y]) * eTotalCap[y]
+ end)
+ # Sum individual resource contributions to fixed costs to get total fixed costs
+ @expression(EP, eTotalCFix, sum(EP[:eCFix][y] for y in 1:G))
+
+ # Add term to objective function expression
+ if MultiStage == 1
+ # OPEX multiplier scales fixed costs to account for multiple years between two model stages
+ # We divide by OPEXMULT since we are going to multiply the entire objective function by this term later,
+ # and we have already accounted for multiple years between stages for fixed costs.
+ add_to_expression!(EP[:eObj], 1 / inputs["OPEXMULT"], eTotalCFix)
+ else
+ add_to_expression!(EP[:eObj], eTotalCFix)
+ end
+
+ ### Constratints ###
+
+ if MultiStage == 1
+ # Existing capacity variable is equal to existing capacity specified in the input file
+ @constraint(EP,
+ cExistingCap[y in 1:G],
+ EP[:vEXISTINGCAP][y]==existing_cap_mw(gen[y]))
+ end
+
+ ## Constraints on retirements and capacity additions
+ # Cannot retire more capacity than existing capacity
+ @constraint(EP,
+ cMaxRetNoCommit[y in setdiff(RET_CAP, COMMIT)],
+ vRETCAP[y]<=eExistingCap[y])
+ @constraint(EP,
+ cMaxRetCommit[y in intersect(RET_CAP, COMMIT)],
+ cap_size(gen[y]) * vRETCAP[y]<=eExistingCap[y])
+ @constraint(EP,
+ cMaxRetroNoCommit[y in setdiff(RETROFIT_CAP, COMMIT)],
+ vRETROFITCAP[y] + vRETCAP[y]<=eExistingCap[y])
+ @constraint(EP,
+ cMaxRetroCommit[y in intersect(RETROFIT_CAP, COMMIT)],
+ cap_size(gen[y]) * (vRETROFITCAP[y] + vRETCAP[y])<=eExistingCap[y])
+
+ ## Constraints on new built capacity
+ # Constraint on maximum capacity (if applicable) [set input to -1 if no constraint on maximum capacity]
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
+ MAX_CAP = ids_with_positive(gen, max_cap_mw)
+ @constraint(EP, cMaxCap[y in MAX_CAP], eTotalCap[y]<=max_cap_mw(gen[y]))
+
+ # Constraint on minimum capacity (if applicable) [set input to -1 if no constraint on minimum capacity]
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is <= Min_Cap_MW and lead to infeasabilty
+ MIN_CAP = ids_with_positive(gen, min_cap_mw)
+ @constraint(EP, cMinCap[y in MIN_CAP], eTotalCap[y]>=min_cap_mw(gen[y]))
+
+ if setup["MinCapReq"] == 1
+ @expression(EP,
+ eMinCapResInvest[mincap = 1:inputs["NumberOfMinCapReqs"]],
+ sum(EP[:eTotalCap][y] for y in ids_with_policy(gen, min_cap, tag = mincap)))
+ add_similar_to_expression!(EP[:eMinCapRes], eMinCapResInvest)
+ end
+
+ if setup["MaxCapReq"] == 1
+ @expression(EP,
+ eMaxCapResInvest[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
+ sum(EP[:eTotalCap][y] for y in ids_with_policy(gen, max_cap, tag = maxcap)))
+ add_similar_to_expression!(EP[:eMaxCapRes], eMaxCapResInvest)
+ end
end
diff --git a/src/model/core/fuel.jl b/src/model/core/fuel.jl
index b8a8395ff2..253cc50985 100644
--- a/src/model/core/fuel.jl
+++ b/src/model/core/fuel.jl
@@ -80,7 +80,7 @@ vMulFuels_{y, i, t} <= vPower_{y,t} \times MaxCofire_{i}
"""
function fuel!(EP::Model, inputs::Dict, setup::Dict)
println("Fuel Module")
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
T = inputs["T"] # Number of time steps (hours)
Z = inputs["Z"] # Number of zones
@@ -89,17 +89,17 @@ function fuel!(EP::Model, inputs::Dict, setup::Dict)
HAS_FUEL = inputs["HAS_FUEL"]
MULTI_FUELS = inputs["MULTI_FUELS"]
SINGLE_FUEL = inputs["SINGLE_FUEL"]
-
+
fuels = inputs["fuels"]
fuel_costs = inputs["fuel_costs"]
omega = inputs["omega"]
NUM_FUEL = length(fuels)
-
+
# create variable for fuel consumption for output
# for resources that only use a single fuel
- @variable(EP, vFuel[y in SINGLE_FUEL, t = 1:T] >= 0)
- @variable(EP, vStartFuel[y in SINGLE_FUEL, t = 1:T] >= 0)
+ @variable(EP, vFuel[y in SINGLE_FUEL, t = 1:T]>=0)
+ @variable(EP, vStartFuel[y in SINGLE_FUEL, t = 1:T]>=0)
# for resources that use multi fuels
# vMulFuels[y, f, t]: y - resource ID; f - fuel ID; t: time
@@ -108,71 +108,76 @@ function fuel!(EP::Model, inputs::Dict, setup::Dict)
heat_rates = inputs["HEAT_RATES"]
min_cofire = inputs["MIN_COFIRE"]
max_cofire = inputs["MAX_COFIRE"]
- min_cofire_start =inputs["MIN_COFIRE_START"]
- max_cofire_start =inputs["MAX_COFIRE_START"]
-
- COFIRE_MAX = [findall(g -> max_cofire_cols(g, tag=i) < 1, gen[MULTI_FUELS]) for i in 1:max_fuels]
- COFIRE_MAX_START = [findall(g -> max_cofire_start_cols(g, tag=i) < 1, gen[MULTI_FUELS]) for i in 1:max_fuels]
- COFIRE_MIN = [findall(g -> min_cofire_cols(g, tag=i) > 0, gen[MULTI_FUELS]) for i in 1:max_fuels]
- COFIRE_MIN_START = [findall(g -> min_cofire_start_cols(g, tag=i) > 0, gen[MULTI_FUELS]) for i in 1:max_fuels]
-
- @variable(EP, vMulFuels[y in MULTI_FUELS, i = 1:max_fuels, t = 1:T] >= 0)
- @variable(EP, vMulStartFuels[y in MULTI_FUELS, i = 1:max_fuels, t = 1:T] >= 0)
- end
+ min_cofire_start = inputs["MIN_COFIRE_START"]
+ max_cofire_start = inputs["MAX_COFIRE_START"]
+
+ COFIRE_MAX = [findall(g -> max_cofire_cols(g, tag = i) < 1, gen[MULTI_FUELS])
+ for i in 1:max_fuels]
+ COFIRE_MAX_START = [findall(g -> max_cofire_start_cols(g, tag = i) < 1,
+ gen[MULTI_FUELS]) for i in 1:max_fuels]
+ COFIRE_MIN = [findall(g -> min_cofire_cols(g, tag = i) > 0, gen[MULTI_FUELS])
+ for i in 1:max_fuels]
+ COFIRE_MIN_START = [findall(g -> min_cofire_start_cols(g, tag = i) > 0,
+ gen[MULTI_FUELS]) for i in 1:max_fuels]
+
+ @variable(EP, vMulFuels[y in MULTI_FUELS, i = 1:max_fuels, t = 1:T]>=0)
+ @variable(EP, vMulStartFuels[y in MULTI_FUELS, i = 1:max_fuels, t = 1:T]>=0)
+ end
### Expressions ####
# Fuel consumed on start-up (MMBTU or kMMBTU (scaled))
# if unit commitment is modelled
@expression(EP, eStartFuel[y in 1:G, t = 1:T],
if y in THERM_COMMIT
- (cap_size(gen[y]) * EP[:vSTART][y, t] *
- start_fuel_mmbtu_per_mw(gen[y]))
+ (cap_size(gen[y]) * EP[:vSTART][y, t] *
+ start_fuel_mmbtu_per_mw(gen[y]))
else
0
end)
-
+
# time-series fuel consumption by plant
@expression(EP, ePlantFuel_generation[y in 1:G, t = 1:T],
if y in SINGLE_FUEL # for single fuel plants
EP[:vFuel][y, t]
else # for multi fuel plants
- sum(EP[:vMulFuels][y, i, t] for i in 1:max_fuels)
+ sum(EP[:vMulFuels][y, i, t] for i in 1:max_fuels)
end)
@expression(EP, ePlantFuel_start[y in 1:G, t = 1:T],
if y in SINGLE_FUEL # for single fuel plants
EP[:vStartFuel][y, t]
else # for multi fuel plants
- sum(EP[:vMulStartFuels][y, i, t] for i in 1:max_fuels)
+ sum(EP[:vMulStartFuels][y, i, t] for i in 1:max_fuels)
end)
# for multi-fuel resources
# annual fuel consumption by plant and fuel type
if !isempty(MULTI_FUELS)
- @expression(EP, ePlantFuelConsumptionYear_multi_generation[y in MULTI_FUELS, i in 1:max_fuels],
+ @expression(EP,
+ ePlantFuelConsumptionYear_multi_generation[y in MULTI_FUELS, i in 1:max_fuels],
sum(omega[t] * EP[:vMulFuels][y, i, t] for t in 1:T))
- @expression(EP, ePlantFuelConsumptionYear_multi_start[y in MULTI_FUELS, i in 1:max_fuels],
+ @expression(EP,
+ ePlantFuelConsumptionYear_multi_start[y in MULTI_FUELS, i in 1:max_fuels],
sum(omega[t] * EP[:vMulStartFuels][y, i, t] for t in 1:T))
- @expression(EP, ePlantFuelConsumptionYear_multi[y in MULTI_FUELS, i in 1:max_fuels],
- EP[:ePlantFuelConsumptionYear_multi_generation][y, i] + EP[:ePlantFuelConsumptionYear_multi_start][y, i])
+ @expression(EP, ePlantFuelConsumptionYear_multi[y in MULTI_FUELS, i in 1:max_fuels],
+ EP[:ePlantFuelConsumptionYear_multi_generation][y,
+ i]+EP[:ePlantFuelConsumptionYear_multi_start][y, i])
end
# fuel_cost is in $/MMBTU (M$/billion BTU if scaled)
# vFuel and eStartFuel is MMBTU (or billion BTU if scaled)
# eCFuel_start or eCFuel_out is $ or Million$
-
+
# Start up fuel cost
# for multi-fuel resources
if !isempty(MULTI_FUELS)
# time-series fuel consumption costs by plant and fuel type during startup
- @expression(EP, eCFuelOut_multi_start[y in MULTI_FUELS , i in 1:max_fuels, t = 1:T],
- fuel_costs[fuel_cols(gen[y], tag=i)][t] * EP[:vMulStartFuels][y, i, t]
- )
+ @expression(EP, eCFuelOut_multi_start[y in MULTI_FUELS, i in 1:max_fuels, t = 1:T],
+ fuel_costs[fuel_cols(gen[y], tag = i)][t]*EP[:vMulStartFuels][y, i, t])
# annual plant level fuel cost by fuel type during generation
- @expression(EP, ePlantCFuelOut_multi_start[y in MULTI_FUELS, i in 1:max_fuels],
+ @expression(EP, ePlantCFuelOut_multi_start[y in MULTI_FUELS, i in 1:max_fuels],
sum(omega[t] * EP[:eCFuelOut_multi_start][y, i, t] for t in 1:T))
-
end
- @expression(EP, eCFuelStart[y = 1:G, t = 1:T],
+ @expression(EP, eCFuelStart[y = 1:G, t = 1:T],
if y in SINGLE_FUEL
(fuel_costs[fuel(gen[y])][t] * EP[:vStartFuel][y, t])
else
@@ -180,44 +185,40 @@ function fuel!(EP::Model, inputs::Dict, setup::Dict)
end)
# plant level start-up fuel cost for output
- @expression(EP, ePlantCFuelStart[y = 1:G],
+ @expression(EP, ePlantCFuelStart[y = 1:G],
sum(omega[t] * EP[:eCFuelStart][y, t] for t in 1:T))
# zonal level total fuel cost for output
- @expression(EP, eZonalCFuelStart[z = 1:Z],
- sum(EP[:ePlantCFuelStart][y] for y in resources_in_zone_by_rid(gen,z)))
+ @expression(EP, eZonalCFuelStart[z = 1:Z],
+ sum(EP[:ePlantCFuelStart][y] for y in resources_in_zone_by_rid(gen, z)))
# Fuel cost for power generation
# for multi-fuel resources
if !isempty(MULTI_FUELS)
# time-series fuel consumption costs by plant and fuel type during generation
- @expression(EP, eCFuelOut_multi[y in MULTI_FUELS , i in 1:max_fuels, t = 1:T],
- fuel_costs[fuel_cols(gen[y], tag=i)][t] * EP[:vMulFuels][y,i,t]
- )
+ @expression(EP, eCFuelOut_multi[y in MULTI_FUELS, i in 1:max_fuels, t = 1:T],
+ fuel_costs[fuel_cols(gen[y], tag = i)][t]*EP[:vMulFuels][y, i, t])
# annual plant level fuel cost by fuel type during generation
- @expression(EP, ePlantCFuelOut_multi[y in MULTI_FUELS, i in 1:max_fuels],
+ @expression(EP, ePlantCFuelOut_multi[y in MULTI_FUELS, i in 1:max_fuels],
sum(omega[t] * EP[:eCFuelOut_multi][y, i, t] for t in 1:T))
-
end
- @expression(EP, eCFuelOut[y = 1:G, t = 1:T],
+ @expression(EP, eCFuelOut[y = 1:G, t = 1:T],
if y in SINGLE_FUEL
(fuel_costs[fuel(gen[y])][t] * EP[:vFuel][y, t])
else
sum(EP[:eCFuelOut_multi][y, i, t] for i in 1:max_fuels)
end)
# plant level start-up fuel cost for output
- @expression(EP, ePlantCFuelOut[y = 1:G],
+ @expression(EP, ePlantCFuelOut[y = 1:G],
sum(omega[t] * EP[:eCFuelOut][y, t] for t in 1:T))
# zonal level total fuel cost for output
- @expression(EP, eZonalCFuelOut[z = 1:Z],
- sum(EP[:ePlantCFuelOut][y] for y in resources_in_zone_by_rid(gen,z)))
-
+ @expression(EP, eZonalCFuelOut[z = 1:Z],
+ sum(EP[:ePlantCFuelOut][y] for y in resources_in_zone_by_rid(gen, z)))
# system level total fuel cost for output
@expression(EP, eTotalCFuelOut, sum(eZonalCFuelOut[z] for z in 1:Z))
@expression(EP, eTotalCFuelStart, sum(eZonalCFuelStart[z] for z in 1:Z))
-
add_to_expression!(EP[:eObj], EP[:eTotalCFuelOut] + EP[:eTotalCFuelStart])
#fuel consumption (MMBTU or Billion BTU)
@@ -225,40 +226,43 @@ function fuel!(EP::Model, inputs::Dict, setup::Dict)
if !isempty(MULTI_FUELS)
@expression(EP, eFuelConsumption_multi[f in 1:NUM_FUEL, t in 1:T],
sum((EP[:vMulFuels][y, i, t] + EP[:vMulStartFuels][y, i, t]) #i: fuel id
- for i in 1:max_fuels,
- y in intersect(resource_id.(gen[fuel_cols.(gen, tag=i) .== string(fuels[f])]), MULTI_FUELS))
- )
+ for i in 1:max_fuels,
+ y in intersect(resource_id.(gen[fuel_cols.(gen, tag = i) .== string(fuels[f])]),
+ MULTI_FUELS)))
end
@expression(EP, eFuelConsumption_single[f in 1:NUM_FUEL, t in 1:T],
- sum(EP[:vFuel][y, t] + EP[:eStartFuel][y,t]
+ sum(EP[:vFuel][y, t] + EP[:eStartFuel][y, t]
for y in intersect(resources_with_fuel(gen, fuels[f]), SINGLE_FUEL)))
-
+
@expression(EP, eFuelConsumption[f in 1:NUM_FUEL, t in 1:T],
if !isempty(MULTI_FUELS)
- eFuelConsumption_multi[f, t] + eFuelConsumption_single[f,t]
+ eFuelConsumption_multi[f, t] + eFuelConsumption_single[f, t]
else
- eFuelConsumption_single[f,t]
+ eFuelConsumption_single[f, t]
end)
@expression(EP, eFuelConsumptionYear[f in 1:NUM_FUEL],
sum(omega[t] * EP[:eFuelConsumption][f, t] for t in 1:T))
-
### Constraint ###
### only apply constraint to generators with fuel type other than None
- @constraint(EP, cFuelCalculation_single[y in intersect(SINGLE_FUEL, setdiff(HAS_FUEL, THERM_COMMIT)), t = 1:T],
- EP[:vFuel][y, t] - EP[:vP][y, t] * heat_rate_mmbtu_per_mwh(gen[y]) == 0)
+ @constraint(EP,
+ cFuelCalculation_single[y in intersect(SINGLE_FUEL, setdiff(HAS_FUEL, THERM_COMMIT)),
+ t = 1:T],
+ EP[:vFuel][y, t] - EP[:vP][y, t] * heat_rate_mmbtu_per_mwh(gen[y])==0)
if !isempty(MULTI_FUELS)
- @constraint(EP, cFuelCalculation_multi[y in intersect(MULTI_FUELS, setdiff(HAS_FUEL, THERM_COMMIT)), t = 1:T],
- sum(EP[:vMulFuels][y, i, t]/heat_rates[i][y] for i in 1:max_fuels) - EP[:vP][y, t] == 0
- )
+ @constraint(EP,
+ cFuelCalculation_multi[y in intersect(MULTI_FUELS,
+ setdiff(HAS_FUEL, THERM_COMMIT)),
+ t = 1:T],
+ sum(EP[:vMulFuels][y, i, t] / heat_rates[i][y] for i in 1:max_fuels) -
+ EP[:vP][y, t]==0)
end
-
- if !isempty(THERM_COMMIT)
+ if !isempty(THERM_COMMIT)
# Only apply piecewise fuel consumption to thermal generators in THERM_COMMIT_PWFU set
THERM_COMMIT_PWFU = inputs["THERM_COMMIT_PWFU"]
# segemnt for piecewise fuel usage
@@ -270,61 +274,74 @@ function fuel!(EP::Model, inputs::Dict, setup::Dict)
segment_intercept(y, seg) = PWFU_data[y, intercept_cols[seg]]
segment_slope(y, seg) = PWFU_data[y, slope_cols[seg]]
# constraint for piecewise fuel consumption
- @constraint(EP, PiecewiseFuelUsage[y in THERM_COMMIT_PWFU, t = 1:T, seg in segs],
- EP[:vFuel][y, t] >= (EP[:vP][y, t] * segment_slope(y, seg) +
- EP[:vCOMMIT][y, t] * segment_intercept(y, seg)))
+ @constraint(EP,
+ PiecewiseFuelUsage[y in THERM_COMMIT_PWFU, t = 1:T, seg in segs],
+ EP[:vFuel][y,
+ t]>=(EP[:vP][y, t] * segment_slope(y, seg) +
+ EP[:vCOMMIT][y, t] * segment_intercept(y, seg)))
end
-
+
# constraint for fuel consumption at a constant heat rate
- @constraint(EP, FuelCalculationCommit_single[y in intersect(setdiff(THERM_COMMIT,THERM_COMMIT_PWFU), SINGLE_FUEL), t = 1:T],
- EP[:vFuel][y, t] - EP[:vP][y, t] * heat_rate_mmbtu_per_mwh(gen[y]) == 0)
+ @constraint(EP,
+ FuelCalculationCommit_single[y in intersect(setdiff(THERM_COMMIT,
+ THERM_COMMIT_PWFU),
+ SINGLE_FUEL),
+ t = 1:T],
+ EP[:vFuel][y, t] - EP[:vP][y, t] * heat_rate_mmbtu_per_mwh(gen[y])==0)
if !isempty(MULTI_FUELS)
- @constraint(EP, FuelCalculationCommit_multi[y in intersect(setdiff(THERM_COMMIT,THERM_COMMIT_PWFU), MULTI_FUELS), t = 1:T],
- sum(EP[:vMulFuels][y, i, t]/heat_rates[i][y] for i in 1:max_fuels) - EP[:vP][y, t] .== 0
- )
+ @constraint(EP,
+ FuelCalculationCommit_multi[y in intersect(setdiff(THERM_COMMIT,
+ THERM_COMMIT_PWFU),
+ MULTI_FUELS),
+ t = 1:T],
+ sum(EP[:vMulFuels][y, i, t] / heat_rates[i][y] for i in 1:max_fuels) -
+ EP[:vP][y, t].==0)
end
end
# constraints on start up fuel use
@constraint(EP, cStartFuel_single[y in intersect(THERM_COMMIT, SINGLE_FUEL), t = 1:T],
- EP[:vStartFuel][y, t] - (cap_size(gen[y]) * EP[:vSTART][y, t] * start_fuel_mmbtu_per_mw(gen[y])) .== 0
- )
+ EP[:vStartFuel][y, t] -
+ (cap_size(gen[y]) * EP[:vSTART][y, t] * start_fuel_mmbtu_per_mw(gen[y])).==0)
if !isempty(MULTI_FUELS)
- @constraint(EP, cStartFuel_multi[y in intersect(THERM_COMMIT, MULTI_FUELS), t = 1:T],
- sum(EP[:vMulStartFuels][y, i, t] for i in 1:max_fuels) - (cap_size(gen[y]) * EP[:vSTART][y, t] * start_fuel_mmbtu_per_mw(gen[y])) .== 0
- )
+ @constraint(EP,
+ cStartFuel_multi[y in intersect(THERM_COMMIT, MULTI_FUELS), t = 1:T],
+ sum(EP[:vMulStartFuels][y, i, t] for i in 1:max_fuels) -
+ (cap_size(gen[y]) * EP[:vSTART][y, t] * start_fuel_mmbtu_per_mw(gen[y])).==0)
end
# constraints on co-fire ratio of different fuels used by one generator
# for example,
# fuel2/heat rate >= min_cofire_level * total power
# fuel2/heat rate <= max_cofire_level * total power without retrofit
- if !isempty(MULTI_FUELS)
+ if !isempty(MULTI_FUELS)
for i in 1:max_fuels
# during power generation
# cofire constraints without the name due to the loop
- @constraint(EP, [y in intersect(MULTI_FUELS, COFIRE_MIN[i]), t = 1:T],
- EP[:vMulFuels][y, i, t] >= min_cofire[i][y] * EP[:ePlantFuel_generation][y,t]
- )
- @constraint(EP, [y in intersect(MULTI_FUELS, COFIRE_MAX[i]), t = 1:T],
- EP[:vMulFuels][y, i, t] <= max_cofire[i][y] * EP[:ePlantFuel_generation][y,t]
- )
+ @constraint(EP, [y in intersect(MULTI_FUELS, COFIRE_MIN[i]), t = 1:T],
+ EP[:vMulFuels][y,
+ i,
+ t]>=min_cofire[i][y] * EP[:ePlantFuel_generation][y, t])
+ @constraint(EP, [y in intersect(MULTI_FUELS, COFIRE_MAX[i]), t = 1:T],
+ EP[:vMulFuels][y,
+ i,
+ t]<=max_cofire[i][y] * EP[:ePlantFuel_generation][y, t])
# startup
- @constraint(EP, [y in intersect(MULTI_FUELS, COFIRE_MIN_START[i]), t = 1:T],
- EP[:vMulStartFuels][y, i, t] >= min_cofire_start[i][y] * EP[:ePlantFuel_start][y,t]
- )
- @constraint(EP, [y in intersect(MULTI_FUELS, COFIRE_MAX_START[i]), t = 1:T],
- EP[:vMulStartFuels][y, i, t] <= max_cofire_start[i][y] * EP[:ePlantFuel_start][y,t]
- )
+ @constraint(EP, [y in intersect(MULTI_FUELS, COFIRE_MIN_START[i]), t = 1:T],
+ EP[:vMulStartFuels][y,
+ i,
+ t]>=min_cofire_start[i][y] * EP[:ePlantFuel_start][y, t])
+ @constraint(EP, [y in intersect(MULTI_FUELS, COFIRE_MAX_START[i]), t = 1:T],
+ EP[:vMulStartFuels][y,
+ i,
+ t]<=max_cofire_start[i][y] * EP[:ePlantFuel_start][y, t])
end
end
return EP
end
-
function resources_with_fuel(rs::Vector{<:AbstractResource}, fuel_name::AbstractString)
condition::BitVector = fuel.(rs) .== fuel_name
return resource_id.(rs[condition])
end
-
diff --git a/src/model/core/non_served_energy.jl b/src/model/core/non_served_energy.jl
index 4df302172f..0686a92cab 100644
--- a/src/model/core/non_served_energy.jl
+++ b/src/model/core/non_served_energy.jl
@@ -52,54 +52,61 @@ Additionally, total demand curtailed in each time step cannot exceed total deman
```
"""
function non_served_energy!(EP::Model, inputs::Dict, setup::Dict)
+ println("Non-served Energy Module")
- println("Non-served Energy Module")
+ T = inputs["T"] # Number of time steps
+ Z = inputs["Z"] # Number of zones
+ SEG = inputs["SEG"] # Number of demand curtailment segments
- T = inputs["T"] # Number of time steps
- Z = inputs["Z"] # Number of zones
- SEG = inputs["SEG"] # Number of demand curtailment segments
+ ### Variables ###
- ### Variables ###
+ # Non-served energy/curtailed demand in the segment "s" at hour "t" in zone "z"
+ @variable(EP, vNSE[s = 1:SEG, t = 1:T, z = 1:Z]>=0)
- # Non-served energy/curtailed demand in the segment "s" at hour "t" in zone "z"
- @variable(EP, vNSE[s=1:SEG,t=1:T,z=1:Z] >= 0);
+ ### Expressions ###
- ### Expressions ###
+ ## Objective Function Expressions ##
- ## Objective Function Expressions ##
+ # Cost of non-served energy/curtailed demand at hour "t" in zone "z"
+ @expression(EP,
+ eCNSE[s = 1:SEG, t = 1:T, z = 1:Z],
+ (inputs["omega"][t]*inputs["pC_D_Curtail"][s]*vNSE[s, t, z]))
- # Cost of non-served energy/curtailed demand at hour "t" in zone "z"
- @expression(EP, eCNSE[s=1:SEG,t=1:T,z=1:Z], (inputs["omega"][t]*inputs["pC_D_Curtail"][s]*vNSE[s,t,z]))
+ # Sum individual demand segment contributions to non-served energy costs to get total non-served energy costs
+ # Julia is fastest when summing over one row one column at a time
+ @expression(EP, eTotalCNSETS[t = 1:T, z = 1:Z], sum(eCNSE[s, t, z] for s in 1:SEG))
+ @expression(EP, eTotalCNSET[t = 1:T], sum(eTotalCNSETS[t, z] for z in 1:Z))
+ @expression(EP, eTotalCNSE, sum(eTotalCNSET[t] for t in 1:T))
- # Sum individual demand segment contributions to non-served energy costs to get total non-served energy costs
- # Julia is fastest when summing over one row one column at a time
- @expression(EP, eTotalCNSETS[t=1:T,z=1:Z], sum(eCNSE[s,t,z] for s in 1:SEG))
- @expression(EP, eTotalCNSET[t=1:T], sum(eTotalCNSETS[t,z] for z in 1:Z))
- @expression(EP, eTotalCNSE, sum(eTotalCNSET[t] for t in 1:T))
+ # Add total cost contribution of non-served energy/curtailed demand to the objective function
+ add_to_expression!(EP[:eObj], eTotalCNSE)
- # Add total cost contribution of non-served energy/curtailed demand to the objective function
- add_to_expression!(EP[:eObj], eTotalCNSE)
+ ## Power Balance Expressions ##
+ @expression(EP, ePowerBalanceNse[t = 1:T, z = 1:Z], sum(vNSE[s, t, z] for s in 1:SEG))
- ## Power Balance Expressions ##
- @expression(EP, ePowerBalanceNse[t=1:T, z=1:Z], sum(vNSE[s,t,z] for s=1:SEG))
+ # Add non-served energy/curtailed demand contribution to power balance expression
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceNse)
- # Add non-served energy/curtailed demand contribution to power balance expression
- add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceNse)
+ # Capacity Reserves Margin policy
+ if setup["CapacityReserveMargin"] > 0
+ if SEG >= 2
+ @expression(EP,
+ eCapResMarBalanceNSE[res = 1:inputs["NCapacityReserveMargin"], t = 1:T],
+ sum(EP[:vNSE][s, t, z]
+ for s in 2:SEG, z in findall(x -> x != 0, inputs["dfCapRes"][:, res])))
+ add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceNSE)
+ end
+ end
- # Capacity Reserves Margin policy
- if setup["CapacityReserveMargin"] > 0
- if SEG >=2
- @expression(EP, eCapResMarBalanceNSE[res=1:inputs["NCapacityReserveMargin"], t=1:T], sum(EP[:vNSE][s,t,z] for s in 2:SEG, z in findall(x->x!=0,inputs["dfCapRes"][:,res])))
- add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceNSE)
- end
- end
+ ### Constratints ###
- ### Constratints ###
-
- # Demand curtailed in each segment of curtailable demands cannot exceed maximum allowable share of demand
- @constraint(EP, cNSEPerSeg[s=1:SEG, t=1:T, z=1:Z], vNSE[s,t,z] <= inputs["pMax_D_Curtail"][s]*inputs["pD"][t,z])
-
- # Total demand curtailed in each time step (hourly) cannot exceed total demand
- @constraint(EP, cMaxNSE[t=1:T, z=1:Z], sum(vNSE[s,t,z] for s=1:SEG) <= inputs["pD"][t,z])
+ # Demand curtailed in each segment of curtailable demands cannot exceed maximum allowable share of demand
+ @constraint(EP,
+ cNSEPerSeg[s = 1:SEG, t = 1:T, z = 1:Z],
+ vNSE[s, t, z]<=inputs["pMax_D_Curtail"][s] * inputs["pD"][t, z])
+ # Total demand curtailed in each time step (hourly) cannot exceed total demand
+ @constraint(EP,
+ cMaxNSE[t = 1:T, z = 1:Z],
+ sum(vNSE[s, t, z] for s in 1:SEG)<=inputs["pD"][t, z])
end
diff --git a/src/model/core/operational_reserves.jl b/src/model/core/operational_reserves.jl
index ef26fbe01e..c77c8363a4 100644
--- a/src/model/core/operational_reserves.jl
+++ b/src/model/core/operational_reserves.jl
@@ -9,17 +9,16 @@
This function sets up reserve decisions and constraints, using the operational_reserves_core()` and operational_reserves_contingency()` functions.
"""
function operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
+ UCommit = setup["UCommit"]
- UCommit = setup["UCommit"]
-
- if inputs["pStatic_Contingency"] > 0 || (UCommit >= 1 && inputs["pDynamic_Contingency"] >= 1)
- operational_reserves_contingency!(EP, inputs, setup)
- end
+ if inputs["pStatic_Contingency"] > 0 ||
+ (UCommit >= 1 && inputs["pDynamic_Contingency"] >= 1)
+ operational_reserves_contingency!(EP, inputs, setup)
+ end
- operational_reserves_core!(EP, inputs, setup)
+ operational_reserves_core!(EP, inputs, setup)
end
-
@doc raw"""
operational_reserves_contingency!(EP::Model, inputs::Dict, setup::Dict)
@@ -68,71 +67,78 @@ Option 3 (dynamic commitment-based contingency) is expressed by the following se
where $M_y$ is a `big M' constant equal to the largest possible capacity that can be installed for generation cluster $y$, and $Contingency\_Aux_{y,z,t} \in [0,1]$ is a binary auxiliary variable that is forced by the second and third equations above to be 1 if the commitment state for that generation cluster $\nu_{y,z,t} > 0$ for any generator $y \in \mathcal{UC}$ and zone $z$ and time period $t$, and can be 0 otherwise. Note that this dynamic commitment-based contingency can only be specified if discrete unit commitment decisions are used (e.g. it will not work if relaxed unit commitment is used).
"""
function operational_reserves_contingency!(EP::Model, inputs::Dict, setup::Dict)
+ println("Operational Reserves Contingency Module")
- println("Operational Reserves Contingency Module")
-
- gen = inputs["RESOURCES"]
-
- T = inputs["T"] # Number of time steps (hours)
- UCommit = setup["UCommit"]
- COMMIT = inputs["COMMIT"]
-
- if UCommit >= 1
- pDynamic_Contingency = inputs["pDynamic_Contingency"]
- end
-
- ### Variables ###
-
- # NOTE: If Dynamic_Contingency == 0, then contingency is a fixed parameter equal the value specified in Operational_reserves.csv via pStatic_Contingency.
- if UCommit == 1 && pDynamic_Contingency == 1
- # Contingency = largest installed thermal unit
- @variable(EP, vLARGEST_CONTINGENCY >= 0)
- # Auxiliary variable that is 0 if vCAP = 0, 1 otherwise
- @variable(EP, vCONTINGENCY_AUX[y in COMMIT], Bin)
- elseif UCommit == 1 && pDynamic_Contingency == 2
- # Contingency = largest committed thermal unit in each time period
- @variable(EP, vLARGEST_CONTINGENCY[t=1:T] >= 0)
- # Auxiliary variable that is 0 if vCOMMIT = 0, 1 otherwise
- @variable(EP, vCONTINGENCY_AUX[y in COMMIT, t=1:T], Bin)
- end
-
- ### Expressions ###
- if UCommit == 1 && pDynamic_Contingency == 1
- # Largest contingency defined as largest installed generator
- println("Dynamic Contingency Type 1: Modeling the largest contingency as the largest installed generator")
- @expression(EP, eContingencyReq[t=1:T], vLARGEST_CONTINGENCY)
- elseif UCommit == 1 && pDynamic_Contingency == 2
- # Largest contingency defined for each hour as largest committed generator
- println("Dynamic Contingency Type 2: Modeling the largest contingency as the largest largest committed generator")
- @expression(EP, eContingencyReq[t=1:T], vLARGEST_CONTINGENCY[t])
- else
- # Largest contingency defined fixed as user-specifed static contingency in MW
- println("Static Contingency: Modeling the largest contingency as user-specifed static contingency")
- @expression(EP, eContingencyReq[t=1:T], inputs["pStatic_Contingency"])
- end
-
- ### Constraints ###
-
- # Dynamic contingency related constraints
- # option 1: ensures vLARGEST_CONTINGENCY is greater than the capacity of the largest installed generator
- if UCommit == 1 && pDynamic_Contingency == 1
- @constraint(EP, cContingency[y in COMMIT], vLARGEST_CONTINGENCY >= cap_size(gen[y])*vCONTINGENCY_AUX[y] )
- # Ensure vCONTINGENCY_AUX = 0 if total capacity = 0
- @constraint(EP, cContAux1[y in COMMIT], vCONTINGENCY_AUX[y] <= EP[:eTotalCap][y])
- # Ensure vCONTINGENCY_AUX = 1 if total capacity > 0
- @constraint(EP, cContAux2[y in COMMIT], EP[:eTotalCap][y] <= inputs["pContingency_BigM"][y]*vCONTINGENCY_AUX[y])
-
- # option 2: ensures vLARGEST_CONTINGENCY is greater than the capacity of the largest commited generator in each hour
- elseif UCommit == 1 && pDynamic_Contingency == 2
- @constraint(EP, cContingency[y in COMMIT, t=1:T], vLARGEST_CONTINGENCY[t] >= cap_size(gen[y])*vCONTINGENCY_AUX[y,t] )
- # Ensure vCONTINGENCY_AUX = 0 if vCOMMIT = 0
- @constraint(EP, cContAux[y in COMMIT, t=1:T], vCONTINGENCY_AUX[y,t] <= EP[:vCOMMIT][y,t])
- # Ensure vCONTINGENCY_AUX = 1 if vCOMMIT > 0
- @constraint(EP, cContAux2[y in COMMIT, t=1:T], EP[:vCOMMIT][y, t] <= inputs["pContingency_BigM"][y]*vCONTINGENCY_AUX[y,t])
- end
+ gen = inputs["RESOURCES"]
-end
+ T = inputs["T"] # Number of time steps (hours)
+ UCommit = setup["UCommit"]
+ COMMIT = inputs["COMMIT"]
+
+ if UCommit >= 1
+ pDynamic_Contingency = inputs["pDynamic_Contingency"]
+ end
+ ### Variables ###
+
+ # NOTE: If Dynamic_Contingency == 0, then contingency is a fixed parameter equal the value specified in Operational_reserves.csv via pStatic_Contingency.
+ if UCommit == 1 && pDynamic_Contingency == 1
+ # Contingency = largest installed thermal unit
+ @variable(EP, vLARGEST_CONTINGENCY>=0)
+ # Auxiliary variable that is 0 if vCAP = 0, 1 otherwise
+ @variable(EP, vCONTINGENCY_AUX[y in COMMIT], Bin)
+ elseif UCommit == 1 && pDynamic_Contingency == 2
+ # Contingency = largest committed thermal unit in each time period
+ @variable(EP, vLARGEST_CONTINGENCY[t = 1:T]>=0)
+ # Auxiliary variable that is 0 if vCOMMIT = 0, 1 otherwise
+ @variable(EP, vCONTINGENCY_AUX[y in COMMIT, t = 1:T], Bin)
+ end
+
+ ### Expressions ###
+ if UCommit == 1 && pDynamic_Contingency == 1
+ # Largest contingency defined as largest installed generator
+ println("Dynamic Contingency Type 1: Modeling the largest contingency as the largest installed generator")
+ @expression(EP, eContingencyReq[t = 1:T], vLARGEST_CONTINGENCY)
+ elseif UCommit == 1 && pDynamic_Contingency == 2
+ # Largest contingency defined for each hour as largest committed generator
+ println("Dynamic Contingency Type 2: Modeling the largest contingency as the largest largest committed generator")
+ @expression(EP, eContingencyReq[t = 1:T], vLARGEST_CONTINGENCY[t])
+ else
+ # Largest contingency defined fixed as user-specifed static contingency in MW
+ println("Static Contingency: Modeling the largest contingency as user-specifed static contingency")
+ @expression(EP, eContingencyReq[t = 1:T], inputs["pStatic_Contingency"])
+ end
+
+ ### Constraints ###
+
+ # Dynamic contingency related constraints
+ # option 1: ensures vLARGEST_CONTINGENCY is greater than the capacity of the largest installed generator
+ if UCommit == 1 && pDynamic_Contingency == 1
+ @constraint(EP,
+ cContingency[y in COMMIT],
+ vLARGEST_CONTINGENCY>=cap_size(gen[y]) * vCONTINGENCY_AUX[y])
+ # Ensure vCONTINGENCY_AUX = 0 if total capacity = 0
+ @constraint(EP, cContAux1[y in COMMIT], vCONTINGENCY_AUX[y]<=EP[:eTotalCap][y])
+ # Ensure vCONTINGENCY_AUX = 1 if total capacity > 0
+ @constraint(EP,
+ cContAux2[y in COMMIT],
+ EP[:eTotalCap][y]<=inputs["pContingency_BigM"][y] * vCONTINGENCY_AUX[y])
+
+ # option 2: ensures vLARGEST_CONTINGENCY is greater than the capacity of the largest commited generator in each hour
+ elseif UCommit == 1 && pDynamic_Contingency == 2
+ @constraint(EP,
+ cContingency[y in COMMIT, t = 1:T],
+ vLARGEST_CONTINGENCY[t]>=cap_size(gen[y]) * vCONTINGENCY_AUX[y, t])
+ # Ensure vCONTINGENCY_AUX = 0 if vCOMMIT = 0
+ @constraint(EP,
+ cContAux[y in COMMIT, t = 1:T],
+ vCONTINGENCY_AUX[y, t]<=EP[:vCOMMIT][y, t])
+ # Ensure vCONTINGENCY_AUX = 1 if vCOMMIT > 0
+ @constraint(EP,
+ cContAux2[y in COMMIT, t = 1:T],
+ EP[:vCOMMIT][y, t]<=inputs["pContingency_BigM"][y] * vCONTINGENCY_AUX[y, t])
+ end
+end
@doc raw"""
operational_reserves_core!(EP::Model, inputs::Dict, setup::Dict)
@@ -202,67 +208,82 @@ and $\epsilon^{demand}_{rsv}$ and $\epsilon^{vre}_{rsv}$ are parameters specifyi
"""
function operational_reserves_core!(EP::Model, inputs::Dict, setup::Dict)
- # DEV NOTE: After simplifying reserve changes are integrated/confirmed, should we revise such that reserves can be modeled without UC constraints on?
- # Is there a use case for economic dispatch constraints with reserves?
+ # DEV NOTE: After simplifying reserve changes are integrated/confirmed, should we revise such that reserves can be modeled without UC constraints on?
+ # Is there a use case for economic dispatch constraints with reserves?
- println("Operational Reserves Core Module")
+ println("Operational Reserves Core Module")
- gen = inputs["RESOURCES"]
- UCommit = setup["UCommit"]
+ gen = inputs["RESOURCES"]
+ UCommit = setup["UCommit"]
- T = inputs["T"] # Number of time steps (hours)
+ T = inputs["T"] # Number of time steps (hours)
- REG = inputs["REG"]
- RSV = inputs["RSV"]
+ REG = inputs["REG"]
+ RSV = inputs["RSV"]
STOR_ALL = inputs["STOR_ALL"]
pDemand = inputs["pD"]
pP_Max(y, t) = inputs["pP_Max"][y, t]
- systemwide_hourly_demand = sum(pDemand, dims=2)
- must_run_vre_generation(t) = sum(pP_Max(y, t) * EP[:eTotalCap][y] for y in intersect(inputs["VRE"], inputs["MUST_RUN"]); init=0)
-
- ### Variables ###
-
- ## Integer Unit Commitment configuration for variables
-
- ## Decision variables for operational reserves
- @variable(EP, vREG[y in REG, t=1:T] >= 0) # Contribution to regulation (primary reserves), assumed to be symmetric (up & down directions equal)
- @variable(EP, vRSV[y in RSV, t=1:T] >= 0) # Contribution to operating reserves (secondary reserves or contingency reserves); only model upward reserve requirements
-
- # Storage techs have two pairs of auxilary variables to reflect contributions to regulation and reserves
- # when charging and discharging (primary variable becomes equal to sum of these auxilary variables)
- @variable(EP, vREG_discharge[y in intersect(STOR_ALL, REG), t=1:T] >= 0) # Contribution to regulation (primary reserves) (mirrored variable used for storage devices)
- @variable(EP, vRSV_discharge[y in intersect(STOR_ALL, RSV), t=1:T] >= 0) # Contribution to operating reserves (secondary reserves) (mirrored variable used for storage devices)
- @variable(EP, vREG_charge[y in intersect(STOR_ALL, REG), t=1:T] >= 0) # Contribution to regulation (primary reserves) (mirrored variable used for storage devices)
- @variable(EP, vRSV_charge[y in intersect(STOR_ALL, RSV), t=1:T] >= 0) # Contribution to operating reserves (secondary reserves) (mirrored variable used for storage devices)
-
- @variable(EP, vUNMET_RSV[t=1:T] >= 0) # Unmet operating reserves penalty/cost
-
- ### Expressions ###
- ## Total system reserve expressions
- # Regulation requirements as a percentage of demand and scheduled variable renewable energy production in each hour
- # Reg up and down requirements are symmetric
- @expression(EP, eRegReq[t=1:T], inputs["pReg_Req_Demand"] * systemwide_hourly_demand[t] +
- inputs["pReg_Req_VRE"] * must_run_vre_generation(t))
- # Operating reserve up / contingency reserve requirements as ˚a percentage of demand and scheduled variable renewable energy production in each hour
- # and the largest single contingency (generator or transmission line outage)
- @expression(EP, eRsvReq[t=1:T], inputs["pRsv_Req_Demand"] * systemwide_hourly_demand[t] +
- inputs["pRsv_Req_VRE"] * must_run_vre_generation(t))
+ systemwide_hourly_demand = sum(pDemand, dims = 2)
+ function must_run_vre_generation(t)
+ sum(pP_Max(y, t) * EP[:eTotalCap][y]
+ for y in intersect(inputs["VRE"], inputs["MUST_RUN"]);
+ init = 0)
+ end
- # N-1 contingency requirement is considered only if Unit Commitment is being modeled
- if UCommit >= 1 && (inputs["pDynamic_Contingency"] >= 1 || inputs["pStatic_Contingency"] > 0)
+ ### Variables ###
+
+ ## Integer Unit Commitment configuration for variables
+
+ ## Decision variables for operational reserves
+ @variable(EP, vREG[y in REG, t = 1:T]>=0) # Contribution to regulation (primary reserves), assumed to be symmetric (up & down directions equal)
+ @variable(EP, vRSV[y in RSV, t = 1:T]>=0) # Contribution to operating reserves (secondary reserves or contingency reserves); only model upward reserve requirements
+
+ # Storage techs have two pairs of auxilary variables to reflect contributions to regulation and reserves
+ # when charging and discharging (primary variable becomes equal to sum of these auxilary variables)
+ @variable(EP, vREG_discharge[y in intersect(STOR_ALL, REG), t = 1:T]>=0) # Contribution to regulation (primary reserves) (mirrored variable used for storage devices)
+ @variable(EP, vRSV_discharge[y in intersect(STOR_ALL, RSV), t = 1:T]>=0) # Contribution to operating reserves (secondary reserves) (mirrored variable used for storage devices)
+ @variable(EP, vREG_charge[y in intersect(STOR_ALL, REG), t = 1:T]>=0) # Contribution to regulation (primary reserves) (mirrored variable used for storage devices)
+ @variable(EP, vRSV_charge[y in intersect(STOR_ALL, RSV), t = 1:T]>=0) # Contribution to operating reserves (secondary reserves) (mirrored variable used for storage devices)
+
+ @variable(EP, vUNMET_RSV[t = 1:T]>=0) # Unmet operating reserves penalty/cost
+
+ ### Expressions ###
+ ## Total system reserve expressions
+ # Regulation requirements as a percentage of demand and scheduled variable renewable energy production in each hour
+ # Reg up and down requirements are symmetric
+ @expression(EP,
+ eRegReq[t = 1:T],
+ inputs["pReg_Req_Demand"] *
+ systemwide_hourly_demand[t]+
+ inputs["pReg_Req_VRE"] * must_run_vre_generation(t))
+ # Operating reserve up / contingency reserve requirements as ˚a percentage of demand and scheduled variable renewable energy production in each hour
+ # and the largest single contingency (generator or transmission line outage)
+ @expression(EP,
+ eRsvReq[t = 1:T],
+ inputs["pRsv_Req_Demand"] *
+ systemwide_hourly_demand[t]+
+ inputs["pRsv_Req_VRE"] * must_run_vre_generation(t))
+
+ # N-1 contingency requirement is considered only if Unit Commitment is being modeled
+ if UCommit >= 1 &&
+ (inputs["pDynamic_Contingency"] >= 1 || inputs["pStatic_Contingency"] > 0)
add_to_expression!(EP[:eRsvReq], EP[:eContingencyReq])
- end
-
- ## Objective Function Expressions ##
+ end
- # Penalty for unmet operating reserves
- @expression(EP, eCRsvPen[t=1:T], inputs["omega"][t]*inputs["pC_Rsv_Penalty"]*vUNMET_RSV[t])
- @expression(EP, eTotalCRsvPen, sum(eCRsvPen[t] for t=1:T) +
- sum(reg_cost(gen[y])*vRSV[y,t] for y in RSV, t=1:T) +
- sum(rsv_cost(gen[y])*vREG[y,t] for y in REG, t=1:T) )
- add_to_expression!(EP[:eObj], eTotalCRsvPen)
+ ## Objective Function Expressions ##
+
+ # Penalty for unmet operating reserves
+ @expression(EP,
+ eCRsvPen[t = 1:T],
+ inputs["omega"][t]*inputs["pC_Rsv_Penalty"]*vUNMET_RSV[t])
+ @expression(EP,
+ eTotalCRsvPen,
+ sum(eCRsvPen[t] for t in 1:T)+
+ sum(reg_cost(gen[y]) * vRSV[y, t] for y in RSV, t in 1:T)+
+ sum(rsv_cost(gen[y]) * vREG[y, t] for y in REG, t in 1:T))
+ add_to_expression!(EP[:eObj], eTotalCRsvPen)
end
function operational_reserves_constraints!(EP, inputs)
@@ -283,9 +304,13 @@ function operational_reserves_constraints!(EP, inputs)
# contributing to regulation are assumed to contribute equal capacity to both up
# and down directions
if !isempty(REG)
- @constraint(EP, cReg[t=1:T], sum(vREG[y,t] for y in REG) >= eRegulationRequirement[t])
+ @constraint(EP,
+ cReg[t = 1:T],
+ sum(vREG[y, t] for y in REG)>=eRegulationRequirement[t])
end
if !isempty(RSV)
- @constraint(EP, cRsvReq[t=1:T], sum(vRSV[y,t] for y in RSV) + vUNMET_RSV[t] >= eReserveRequirement[t])
+ @constraint(EP,
+ cRsvReq[t = 1:T],
+ sum(vRSV[y, t] for y in RSV) + vUNMET_RSV[t]>=eReserveRequirement[t])
end
end
diff --git a/src/model/core/transmission/dcopf_transmission.jl b/src/model/core/transmission/dcopf_transmission.jl
index 1b2b853ddd..c833c532f2 100644
--- a/src/model/core/transmission/dcopf_transmission.jl
+++ b/src/model/core/transmission/dcopf_transmission.jl
@@ -23,32 +23,37 @@ Finally, we enforce the reference voltage phase angle constraint:
"""
function dcopf_transmission!(EP::Model, inputs::Dict, setup::Dict)
-
- println("DC-OPF Module")
-
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
- L = inputs["L"] # Number of transmission lines
-
- ### DC-OPF variables ###
-
- # Voltage angle variables of each zone "z" at hour "t"
- @variable(EP, vANGLE[z=1:Z,t=1:T])
-
- ### DC-OPF constraints ###
-
- # Power flow constraint:: vFLOW = DC_OPF_coeff * (vANGLE[START_ZONE] - vANGLE[END_ZONE])
- @constraint(EP, cPOWER_FLOW_OPF[l=1:L, t=1:T], EP[:vFLOW][l,t] == inputs["pDC_OPF_coeff"][l] * sum(inputs["pNet_Map"][l,z] * vANGLE[z,t] for z=1:Z))
-
- # Bus angle limits (except slack bus)
- @constraints(EP, begin
- cANGLE_ub[l=1:L, t=1:T], sum(inputs["pNet_Map"][l,z] * vANGLE[z,t] for z=1:Z) <= inputs["Line_Angle_Limit"][l]
- cANGLE_lb[l=1:L, t=1:T], sum(inputs["pNet_Map"][l,z] * vANGLE[z,t] for z=1:Z) >= -inputs["Line_Angle_Limit"][l]
- end)
-
- # Slack Bus angle limit
- @constraint(EP, cANGLE_SLACK[t=1:T], vANGLE[1,t]== 0)
-
-
-
+ println("DC-OPF Module")
+
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ L = inputs["L"] # Number of transmission lines
+
+ ### DC-OPF variables ###
+
+ # Voltage angle variables of each zone "z" at hour "t"
+ @variable(EP, vANGLE[z = 1:Z, t = 1:T])
+
+ ### DC-OPF constraints ###
+
+ # Power flow constraint:: vFLOW = DC_OPF_coeff * (vANGLE[START_ZONE] - vANGLE[END_ZONE])
+ @constraint(EP,
+ cPOWER_FLOW_OPF[l = 1:L, t = 1:T],
+ EP[:vFLOW][l,
+ t]==inputs["pDC_OPF_coeff"][l] *
+ sum(inputs["pNet_Map"][l, z] * vANGLE[z, t] for z in 1:Z))
+
+ # Bus angle limits (except slack bus)
+ @constraints(EP,
+ begin
+ cANGLE_ub[l = 1:L, t = 1:T],
+ sum(inputs["pNet_Map"][l, z] * vANGLE[z, t] for z in 1:Z) <=
+ inputs["Line_Angle_Limit"][l]
+ cANGLE_lb[l = 1:L, t = 1:T],
+ sum(inputs["pNet_Map"][l, z] * vANGLE[z, t] for z in 1:Z) >=
+ -inputs["Line_Angle_Limit"][l]
+ end)
+
+ # Slack Bus angle limit
+ @constraint(EP, cANGLE_SLACK[t = 1:T], vANGLE[1, t]==0)
end
diff --git a/src/model/core/transmission/investment_transmission.jl b/src/model/core/transmission/investment_transmission.jl
index 685d5d8046..813c06aab3 100644
--- a/src/model/core/transmission/investment_transmission.jl
+++ b/src/model/core/transmission/investment_transmission.jl
@@ -24,87 +24,90 @@
```
"""
function investment_transmission!(EP::Model, inputs::Dict, setup::Dict)
-
- println("Investment Transmission Module")
-
- L = inputs["L"] # Number of transmission lines
- NetworkExpansion = setup["NetworkExpansion"]
- MultiStage = setup["MultiStage"]
-
- if NetworkExpansion == 1
- # Network lines and zones that are expandable have non-negative maximum reinforcement inputs
- EXPANSION_LINES = inputs["EXPANSION_LINES"]
- end
-
- ### Variables ###
-
- if MultiStage == 1
- @variable(EP, vTRANSMAX[l=1:L] >= 0)
- end
-
- if NetworkExpansion == 1
- # Transmission network capacity reinforcements per line
- @variable(EP, vNEW_TRANS_CAP[l in EXPANSION_LINES] >= 0)
- end
-
-
- ### Expressions ###
-
- if MultiStage == 1
- @expression(EP, eTransMax[l=1:L], vTRANSMAX[l])
- else
- @expression(EP, eTransMax[l=1:L], inputs["pTrans_Max"][l])
- end
-
- ## Transmission power flow and loss related expressions:
- # Total availabile maximum transmission capacity is the sum of existing maximum transmission capacity plus new transmission capacity
- if NetworkExpansion == 1
- @expression(EP, eAvail_Trans_Cap[l=1:L],
- if l in EXPANSION_LINES
- eTransMax[l] + vNEW_TRANS_CAP[l]
- else
- eTransMax[l] + EP[:vZERO]
- end
- )
- else
- @expression(EP, eAvail_Trans_Cap[l=1:L], eTransMax[l] + EP[:vZERO])
- end
-
- ## Objective Function Expressions ##
-
- if NetworkExpansion == 1
- @expression(EP, eTotalCNetworkExp, sum(vNEW_TRANS_CAP[l]*inputs["pC_Line_Reinforcement"][l] for l in EXPANSION_LINES))
-
- if MultiStage == 1
- # OPEX multiplier to count multiple years between two model stages
- # We divide by OPEXMULT since we are going to multiply the entire objective function by this term later,
- # and we have already accounted for multiple years between stages for fixed costs.
- add_to_expression!(EP[:eObj], (1/inputs["OPEXMULT"]), eTotalCNetworkExp)
- else
- add_to_expression!(EP[:eObj], eTotalCNetworkExp)
- end
- end
-
- ## End Objective Function Expressions ##
-
- ### Constraints ###
-
- if MultiStage == 1
- # Linking constraint for existing transmission capacity
- @constraint(EP, cExistingTransCap[l=1:L], vTRANSMAX[l] == inputs["pTrans_Max"][l])
- end
-
-
- # If network expansion is used:
- if NetworkExpansion == 1
- # Transmission network related power flow and capacity constraints
- if MultiStage == 1
- # Constrain maximum possible flow for lines eligible for expansion regardless of previous expansions
- @constraint(EP, cMaxFlowPossible[l in EXPANSION_LINES], eAvail_Trans_Cap[l] <= inputs["pTrans_Max_Possible"][l])
- end
- # Constrain maximum single-stage line capacity reinforcement for lines eligible for expansion
- @constraint(EP, cMaxLineReinforcement[l in EXPANSION_LINES], vNEW_TRANS_CAP[l] <= inputs["pMax_Line_Reinforcement"][l])
- end
- #END network expansion contraints
+ println("Investment Transmission Module")
+
+ L = inputs["L"] # Number of transmission lines
+ NetworkExpansion = setup["NetworkExpansion"]
+ MultiStage = setup["MultiStage"]
+
+ if NetworkExpansion == 1
+ # Network lines and zones that are expandable have non-negative maximum reinforcement inputs
+ EXPANSION_LINES = inputs["EXPANSION_LINES"]
+ end
+
+ ### Variables ###
+
+ if MultiStage == 1
+ @variable(EP, vTRANSMAX[l = 1:L]>=0)
+ end
+
+ if NetworkExpansion == 1
+ # Transmission network capacity reinforcements per line
+ @variable(EP, vNEW_TRANS_CAP[l in EXPANSION_LINES]>=0)
+ end
+
+ ### Expressions ###
+
+ if MultiStage == 1
+ @expression(EP, eTransMax[l = 1:L], vTRANSMAX[l])
+ else
+ @expression(EP, eTransMax[l = 1:L], inputs["pTrans_Max"][l])
+ end
+
+ ## Transmission power flow and loss related expressions:
+ # Total availabile maximum transmission capacity is the sum of existing maximum transmission capacity plus new transmission capacity
+ if NetworkExpansion == 1
+ @expression(EP, eAvail_Trans_Cap[l = 1:L],
+ if l in EXPANSION_LINES
+ eTransMax[l] + vNEW_TRANS_CAP[l]
+ else
+ eTransMax[l] + EP[:vZERO]
+ end)
+ else
+ @expression(EP, eAvail_Trans_Cap[l = 1:L], eTransMax[l]+EP[:vZERO])
+ end
+
+ ## Objective Function Expressions ##
+
+ if NetworkExpansion == 1
+ @expression(EP,
+ eTotalCNetworkExp,
+ sum(vNEW_TRANS_CAP[l] * inputs["pC_Line_Reinforcement"][l]
+ for l in EXPANSION_LINES))
+
+ if MultiStage == 1
+ # OPEX multiplier to count multiple years between two model stages
+ # We divide by OPEXMULT since we are going to multiply the entire objective function by this term later,
+ # and we have already accounted for multiple years between stages for fixed costs.
+ add_to_expression!(EP[:eObj], (1 / inputs["OPEXMULT"]), eTotalCNetworkExp)
+ else
+ add_to_expression!(EP[:eObj], eTotalCNetworkExp)
+ end
+ end
+
+ ## End Objective Function Expressions ##
+
+ ### Constraints ###
+
+ if MultiStage == 1
+ # Linking constraint for existing transmission capacity
+ @constraint(EP, cExistingTransCap[l = 1:L], vTRANSMAX[l]==inputs["pTrans_Max"][l])
+ end
+
+ # If network expansion is used:
+ if NetworkExpansion == 1
+ # Transmission network related power flow and capacity constraints
+ if MultiStage == 1
+ # Constrain maximum possible flow for lines eligible for expansion regardless of previous expansions
+ @constraint(EP,
+ cMaxFlowPossible[l in EXPANSION_LINES],
+ eAvail_Trans_Cap[l]<=inputs["pTrans_Max_Possible"][l])
+ end
+ # Constrain maximum single-stage line capacity reinforcement for lines eligible for expansion
+ @constraint(EP,
+ cMaxLineReinforcement[l in EXPANSION_LINES],
+ vNEW_TRANS_CAP[l]<=inputs["pMax_Line_Reinforcement"][l])
+ end
+ #END network expansion contraints
end
diff --git a/src/model/core/transmission/transmission.jl b/src/model/core/transmission/transmission.jl
index 12aa50cd85..342b2d7610 100644
--- a/src/model/core/transmission/transmission.jl
+++ b/src/model/core/transmission/transmission.jl
@@ -84,177 +84,235 @@ As with losses option 2, this segment-wise approximation of a quadratic loss fun
```
"""
function transmission!(EP::Model, inputs::Dict, setup::Dict)
-
- println("Transmission Module")
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
- L = inputs["L"] # Number of transmission lines
-
- UCommit = setup["UCommit"]
- CapacityReserveMargin = setup["CapacityReserveMargin"]
- EnergyShareRequirement = setup["EnergyShareRequirement"]
- IncludeLossesInESR = setup["IncludeLossesInESR"]
-
- ## sets and indices for transmission losses
- TRANS_LOSS_SEGS = inputs["TRANS_LOSS_SEGS"] # Number of segments used in piecewise linear approximations quadratic loss functions - can only take values of TRANS_LOSS_SEGS =1, 2
- LOSS_LINES = inputs["LOSS_LINES"] # Lines for which loss coefficients apply (are non-zero);
-
-
- ### Variables ###
-
- # Power flow on each transmission line "l" at hour "t"
- @variable(EP, vFLOW[l=1:L,t=1:T]);
-
- if (TRANS_LOSS_SEGS==1) #loss is a constant times absolute value of power flow
- # Positive and negative flow variables
- @variable(EP, vTAUX_NEG[l in LOSS_LINES,t=1:T] >= 0)
- @variable(EP, vTAUX_POS[l in LOSS_LINES,t=1:T] >= 0)
-
- if UCommit == 1
- # Single binary variable to ensure positive or negative flows only
- @variable(EP, vTAUX_POS_ON[l in LOSS_LINES,t=1:T],Bin)
- # Continuous variable representing product of binary variable (vTAUX_POS_ON) and avail transmission capacity
- @variable(EP, vPROD_TRANSCAP_ON[l in LOSS_LINES,t=1:T]>=0)
- end
- else # TRANS_LOSS_SEGS>1
- # Auxiliary variables for linear piecewise interpolation of quadratic losses
- @variable(EP, vTAUX_NEG[l in LOSS_LINES, s=0:TRANS_LOSS_SEGS, t=1:T] >= 0)
- @variable(EP, vTAUX_POS[l in LOSS_LINES, s=0:TRANS_LOSS_SEGS, t=1:T] >= 0)
- if UCommit == 1
- # Binary auxilary variables for each segment >1 to ensure segments fill in order
- @variable(EP, vTAUX_POS_ON[l in LOSS_LINES, s=1:TRANS_LOSS_SEGS, t=1:T], Bin)
- @variable(EP, vTAUX_NEG_ON[l in LOSS_LINES, s=1:TRANS_LOSS_SEGS, t=1:T], Bin)
- end
+ println("Transmission Module")
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ L = inputs["L"] # Number of transmission lines
+
+ UCommit = setup["UCommit"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
+ EnergyShareRequirement = setup["EnergyShareRequirement"]
+ IncludeLossesInESR = setup["IncludeLossesInESR"]
+
+ ## sets and indices for transmission losses
+ TRANS_LOSS_SEGS = inputs["TRANS_LOSS_SEGS"] # Number of segments used in piecewise linear approximations quadratic loss functions - can only take values of TRANS_LOSS_SEGS =1, 2
+ LOSS_LINES = inputs["LOSS_LINES"] # Lines for which loss coefficients apply (are non-zero);
+
+ ### Variables ###
+
+ # Power flow on each transmission line "l" at hour "t"
+ @variable(EP, vFLOW[l = 1:L, t = 1:T])
+
+ if (TRANS_LOSS_SEGS == 1) #loss is a constant times absolute value of power flow
+ # Positive and negative flow variables
+ @variable(EP, vTAUX_NEG[l in LOSS_LINES, t = 1:T]>=0)
+ @variable(EP, vTAUX_POS[l in LOSS_LINES, t = 1:T]>=0)
+
+ if UCommit == 1
+ # Single binary variable to ensure positive or negative flows only
+ @variable(EP, vTAUX_POS_ON[l in LOSS_LINES, t = 1:T], Bin)
+ # Continuous variable representing product of binary variable (vTAUX_POS_ON) and avail transmission capacity
+ @variable(EP, vPROD_TRANSCAP_ON[l in LOSS_LINES, t = 1:T]>=0)
+ end
+ else # TRANS_LOSS_SEGS>1
+ # Auxiliary variables for linear piecewise interpolation of quadratic losses
+ @variable(EP, vTAUX_NEG[l in LOSS_LINES, s = 0:TRANS_LOSS_SEGS, t = 1:T]>=0)
+ @variable(EP, vTAUX_POS[l in LOSS_LINES, s = 0:TRANS_LOSS_SEGS, t = 1:T]>=0)
+ if UCommit == 1
+ # Binary auxilary variables for each segment >1 to ensure segments fill in order
+ @variable(EP,
+ vTAUX_POS_ON[l in LOSS_LINES, s = 1:TRANS_LOSS_SEGS, t = 1:T],
+ Bin)
+ @variable(EP,
+ vTAUX_NEG_ON[l in LOSS_LINES, s = 1:TRANS_LOSS_SEGS, t = 1:T],
+ Bin)
+ end
end
- # Transmission losses on each transmission line "l" at hour "t"
- @variable(EP, vTLOSS[l in LOSS_LINES,t=1:T] >= 0)
-
- ### Expressions ###
-
- ## Transmission power flow and loss related expressions:
-
- # Net power flow outgoing from zone "z" at hour "t" in MW
- @expression(EP, eNet_Export_Flows[z=1:Z,t=1:T], sum(inputs["pNet_Map"][l,z] * vFLOW[l,t] for l=1:L))
-
- # Losses from power flows into or out of zone "z" in MW
- @expression(EP, eLosses_By_Zone[z=1:Z,t=1:T], sum(abs(inputs["pNet_Map"][l,z]) * (1/2) *vTLOSS[l,t] for l in LOSS_LINES))
-
- ## Power Balance Expressions ##
-
- @expression(EP, ePowerBalanceNetExportFlows[t=1:T, z=1:Z],
- -eNet_Export_Flows[z,t])
- @expression(EP, ePowerBalanceLossesByZone[t=1:T, z=1:Z],
- -eLosses_By_Zone[z,t])
+ # Transmission losses on each transmission line "l" at hour "t"
+ @variable(EP, vTLOSS[l in LOSS_LINES, t = 1:T]>=0)
- add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceLossesByZone)
- add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceNetExportFlows)
+ ### Expressions ###
- # Capacity Reserves Margin policy
- if CapacityReserveMargin > 0
- if Z > 1
- @expression(EP, eCapResMarBalanceTrans[res=1:inputs["NCapacityReserveMargin"], t=1:T], sum(inputs["dfTransCapRes_excl"][l,res] * inputs["dfDerateTransCapRes"][l,res]* EP[:vFLOW][l,t] for l in 1:L))
- add_similar_to_expression!(EP[:eCapResMarBalance], -eCapResMarBalanceTrans)
- end
- end
+ ## Transmission power flow and loss related expressions:
- ### Constraints ###
+ # Net power flow outgoing from zone "z" at hour "t" in MW
+ @expression(EP,
+ eNet_Export_Flows[z = 1:Z, t = 1:T],
+ sum(inputs["pNet_Map"][l, z] * vFLOW[l, t] for l in 1:L))
- ## Power flow and transmission (between zone) loss related constraints
+ # Losses from power flows into or out of zone "z" in MW
+ @expression(EP,
+ eLosses_By_Zone[z = 1:Z, t = 1:T],
+ sum(abs(inputs["pNet_Map"][l, z]) * (1 / 2) * vTLOSS[l, t] for l in LOSS_LINES))
- # Maximum power flows, power flow on each transmission line cannot exceed maximum capacity of the line at any hour "t"
- @constraints(EP, begin
- cMaxFlow_out[l=1:L, t=1:T], vFLOW[l,t] <= EP[:eAvail_Trans_Cap][l]
- cMaxFlow_in[l=1:L, t=1:T], vFLOW[l,t] >= -EP[:eAvail_Trans_Cap][l]
- end)
+ ## Power Balance Expressions ##
- # Transmission loss related constraints - linear losses as a function of absolute value
- if TRANS_LOSS_SEGS == 1
+ @expression(EP, ePowerBalanceNetExportFlows[t = 1:T, z = 1:Z],
+ -eNet_Export_Flows[z, t])
+ @expression(EP, ePowerBalanceLossesByZone[t = 1:T, z = 1:Z],
+ -eLosses_By_Zone[z, t])
- @constraints(EP, begin
- # Losses are alpha times absolute values
- cTLoss[l in LOSS_LINES, t=1:T], vTLOSS[l,t] == inputs["pPercent_Loss"][l]*(vTAUX_POS[l,t]+vTAUX_NEG[l,t])
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceLossesByZone)
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceNetExportFlows)
- # Power flow is sum of positive and negative components
- cTAuxSum[l in LOSS_LINES, t=1:T], vTAUX_POS[l,t]-vTAUX_NEG[l,t] == vFLOW[l,t]
-
- # Sum of auxiliary flow variables in either direction cannot exceed maximum line flow capacity
- cTAuxLimit[l in LOSS_LINES, t=1:T], vTAUX_POS[l,t]+vTAUX_NEG[l,t] <= EP[:eAvail_Trans_Cap][l]
- end)
-
- if UCommit == 1
- # Constraints to limit phantom losses that can occur to avoid discrete cycling costs/opportunity costs due to min down
- @constraints(EP, begin
- cTAuxPosUB[l in LOSS_LINES, t=1:T], vTAUX_POS[l,t] <= vPROD_TRANSCAP_ON[l,t]
-
- # Either negative or positive flows are activated, not both
- cTAuxNegUB[l in LOSS_LINES, t=1:T], vTAUX_NEG[l,t] <= EP[:eAvail_Trans_Cap][l]-vPROD_TRANSCAP_ON[l,t]
-
- # McCormick representation of product of continuous and binary variable
- # (in this case, of: vPROD_TRANSCAP_ON[l,t] = EP[:eAvail_Trans_Cap][l] * vTAUX_POS_ON[l,t])
- # McCormick constraint 1
- [l in LOSS_LINES,t=1:T], vPROD_TRANSCAP_ON[l,t] <= inputs["pTrans_Max_Possible"][l]*vTAUX_POS_ON[l,t]
-
- # McCormick constraint 2
- [l in LOSS_LINES,t=1:T], vPROD_TRANSCAP_ON[l,t] <= EP[:eAvail_Trans_Cap][l]
-
- # McCormick constraint 3
- [l in LOSS_LINES,t=1:T], vPROD_TRANSCAP_ON[l,t] >= EP[:eAvail_Trans_Cap][l]-(1-vTAUX_POS_ON[l,t])*inputs["pTrans_Max_Possible"][l]
- end)
- end
-
- end # End if(TRANS_LOSS_SEGS == 1) block
-
- # When number of segments is greater than 1
- if (TRANS_LOSS_SEGS > 1)
- ## between zone transmission loss constraints
- # Losses are expressed as a piecewise approximation of a quadratic function of power flows across each line
- # Eq 1: Total losses are function of loss coefficient times the sum of auxilary segment variables across all segments of piecewise approximation
- # (Includes both positive domain and negative domain segments)
- @constraint(EP, cTLoss[l in LOSS_LINES, t=1:T], vTLOSS[l,t] ==
- (inputs["pTrans_Loss_Coef"][l]*sum((2*s-1)*(inputs["pTrans_Max_Possible"][l]/TRANS_LOSS_SEGS)*vTAUX_POS[l,s,t] for s=1:TRANS_LOSS_SEGS)) +
- (inputs["pTrans_Loss_Coef"][l]*sum((2*s-1)*(inputs["pTrans_Max_Possible"][l]/TRANS_LOSS_SEGS)*vTAUX_NEG[l,s,t] for s=1:TRANS_LOSS_SEGS)) )
- # Eq 2: Sum of auxilary segment variables (s >= 1) minus the "zero" segment (which allows values to go negative)
- # from both positive and negative domains must total the actual power flow across the line
- @constraints(EP, begin
- cTAuxSumPos[l in LOSS_LINES, t=1:T], sum(vTAUX_POS[l,s,t] for s=1:TRANS_LOSS_SEGS)-vTAUX_POS[l,0,t] == vFLOW[l,t]
- cTAuxSumNeg[l in LOSS_LINES, t=1:T], sum(vTAUX_NEG[l,s,t] for s=1:TRANS_LOSS_SEGS) - vTAUX_NEG[l,0,t] == -vFLOW[l,t]
- end)
- if UCommit == 0 || UCommit == 2
- # Eq 3: Each auxilary segment variables (s >= 1) must be less than the maximum power flow in the zone / number of segments
- @constraints(EP, begin
- cTAuxMaxPos[l in LOSS_LINES, s=1:TRANS_LOSS_SEGS, t=1:T], vTAUX_POS[l,s,t] <= (inputs["pTrans_Max_Possible"][l]/TRANS_LOSS_SEGS)
- cTAuxMaxNeg[l in LOSS_LINES, s=1:TRANS_LOSS_SEGS, t=1:T], vTAUX_NEG[l,s,t] <= (inputs["pTrans_Max_Possible"][l]/TRANS_LOSS_SEGS)
- end)
- else # Constraints that can be ommitted if problem is convex (i.e. if not using MILP unit commitment constraints)
- # Eqs 3-4: Ensure that auxilary segment variables do not exceed maximum value per segment and that they
- # "fill" in order: i.e. one segment cannot be non-zero unless prior segment is at it's maximum value
- # (These constraints are necessary to prevents phantom losses in MILP problems)
- @constraints(EP, begin
- cTAuxOrderPos1[l in LOSS_LINES, s=1:TRANS_LOSS_SEGS, t=1:T], vTAUX_POS[l,s,t] <= (inputs["pTrans_Max_Possible"][l]/TRANS_LOSS_SEGS)*vTAUX_POS_ON[l,s,t]
- cTAuxOrderNeg1[l in LOSS_LINES, s=1:TRANS_LOSS_SEGS, t=1:T], vTAUX_NEG[l,s,t] <= (inputs["pTrans_Max_Possible"][l]/TRANS_LOSS_SEGS)*vTAUX_NEG_ON[l,s,t]
- cTAuxOrderPos2[l in LOSS_LINES, s=1:(TRANS_LOSS_SEGS-1), t=1:T], vTAUX_POS[l,s,t] >= (inputs["pTrans_Max_Possible"][l]/TRANS_LOSS_SEGS)*vTAUX_POS_ON[l,s+1,t]
- cTAuxOrderNeg2[l in LOSS_LINES, s=1:(TRANS_LOSS_SEGS-1), t=1:T], vTAUX_NEG[l,s,t] >= (inputs["pTrans_Max_Possible"][l]/TRANS_LOSS_SEGS)*vTAUX_NEG_ON[l,s+1,t]
- end)
-
- # Eq 5: Binary constraints to deal with absolute value of vFLOW.
- @constraints(EP, begin
- # If flow is positive, vTAUX_POS segment 0 must be zero; If flow is negative, vTAUX_POS segment 0 must be positive
- # (and takes on value of the full negative flow), forcing all vTAUX_POS other segments (s>=1) to be zero
- cTAuxSegmentZeroPos[l in LOSS_LINES, t=1:T], vTAUX_POS[l,0,t] <= inputs["pTrans_Max_Possible"][l]*(1-vTAUX_POS_ON[l,1,t])
-
- # If flow is negative, vTAUX_NEG segment 0 must be zero; If flow is positive, vTAUX_NEG segment 0 must be positive
- # (and takes on value of the full positive flow), forcing all other vTAUX_NEG segments (s>=1) to be zero
- cTAuxSegmentZeroNeg[l in LOSS_LINES, t=1:T], vTAUX_NEG[l,0,t] <= inputs["pTrans_Max_Possible"][l]*(1-vTAUX_NEG_ON[l,1,t])
- end)
- end
- end # End if(TRANS_LOSS_SEGS > 0) block
-
- # ESR Lossses
- if EnergyShareRequirement >= 1 && IncludeLossesInESR ==1
- @expression(EP, eESRTran[ESR=1:inputs["nESR"]],
- sum(inputs["dfESR"][z,ESR]*sum(inputs["omega"][t]*EP[:eLosses_By_Zone][z,t] for t in 1:T) for z=findall(x->x>0,inputs["dfESR"][:,ESR])))
- add_similar_to_expression!(EP[:eESR], -eESRTran)
- end
+ # Capacity Reserves Margin policy
+ if CapacityReserveMargin > 0
+ if Z > 1
+ @expression(EP,
+ eCapResMarBalanceTrans[res = 1:inputs["NCapacityReserveMargin"], t = 1:T],
+ sum(inputs["dfTransCapRes_excl"][l, res] *
+ inputs["dfDerateTransCapRes"][l, res] * EP[:vFLOW][l, t] for l in 1:L))
+ add_similar_to_expression!(EP[:eCapResMarBalance], -eCapResMarBalanceTrans)
+ end
+ end
-end
\ No newline at end of file
+ ### Constraints ###
+
+ ## Power flow and transmission (between zone) loss related constraints
+
+ # Maximum power flows, power flow on each transmission line cannot exceed maximum capacity of the line at any hour "t"
+ @constraints(EP,
+ begin
+ cMaxFlow_out[l = 1:L, t = 1:T], vFLOW[l, t] <= EP[:eAvail_Trans_Cap][l]
+ cMaxFlow_in[l = 1:L, t = 1:T], vFLOW[l, t] >= -EP[:eAvail_Trans_Cap][l]
+ end)
+
+ # Transmission loss related constraints - linear losses as a function of absolute value
+ if TRANS_LOSS_SEGS == 1
+ @constraints(EP,
+ begin
+ # Losses are alpha times absolute values
+ cTLoss[l in LOSS_LINES, t = 1:T],
+ vTLOSS[l, t] ==
+ inputs["pPercent_Loss"][l] * (vTAUX_POS[l, t] + vTAUX_NEG[l, t])
+
+ # Power flow is sum of positive and negative components
+ cTAuxSum[l in LOSS_LINES, t = 1:T],
+ vTAUX_POS[l, t] - vTAUX_NEG[l, t] == vFLOW[l, t]
+
+ # Sum of auxiliary flow variables in either direction cannot exceed maximum line flow capacity
+ cTAuxLimit[l in LOSS_LINES, t = 1:T],
+ vTAUX_POS[l, t] + vTAUX_NEG[l, t] <= EP[:eAvail_Trans_Cap][l]
+ end)
+
+ if UCommit == 1
+ # Constraints to limit phantom losses that can occur to avoid discrete cycling costs/opportunity costs due to min down
+ @constraints(EP,
+ begin
+ cTAuxPosUB[l in LOSS_LINES, t = 1:T],
+ vTAUX_POS[l, t] <= vPROD_TRANSCAP_ON[l, t]
+
+ # Either negative or positive flows are activated, not both
+ cTAuxNegUB[l in LOSS_LINES, t = 1:T],
+ vTAUX_NEG[l, t] <= EP[:eAvail_Trans_Cap][l] - vPROD_TRANSCAP_ON[l, t]
+
+ # McCormick representation of product of continuous and binary variable
+ # (in this case, of: vPROD_TRANSCAP_ON[l,t] = EP[:eAvail_Trans_Cap][l] * vTAUX_POS_ON[l,t])
+ # McCormick constraint 1
+ [l in LOSS_LINES, t = 1:T],
+ vPROD_TRANSCAP_ON[l, t] <=
+ inputs["pTrans_Max_Possible"][l] * vTAUX_POS_ON[l, t]
+
+ # McCormick constraint 2
+ [l in LOSS_LINES, t = 1:T],
+ vPROD_TRANSCAP_ON[l, t] <= EP[:eAvail_Trans_Cap][l]
+
+ # McCormick constraint 3
+ [l in LOSS_LINES, t = 1:T],
+ vPROD_TRANSCAP_ON[l, t] >=
+ EP[:eAvail_Trans_Cap][l] -
+ (1 - vTAUX_POS_ON[l, t]) * inputs["pTrans_Max_Possible"][l]
+ end)
+ end
+ end # End if(TRANS_LOSS_SEGS == 1) block
+
+ # When number of segments is greater than 1
+ if (TRANS_LOSS_SEGS > 1)
+ ## between zone transmission loss constraints
+ # Losses are expressed as a piecewise approximation of a quadratic function of power flows across each line
+ # Eq 1: Total losses are function of loss coefficient times the sum of auxilary segment variables across all segments of piecewise approximation
+ # (Includes both positive domain and negative domain segments)
+ @constraint(EP,
+ cTLoss[l in LOSS_LINES, t = 1:T],
+ vTLOSS[l,
+ t]==
+ (inputs["pTrans_Loss_Coef"][l] *
+ sum((2 * s - 1) * (inputs["pTrans_Max_Possible"][l] / TRANS_LOSS_SEGS) *
+ vTAUX_POS[l, s, t] for s in 1:TRANS_LOSS_SEGS)) +
+ (inputs["pTrans_Loss_Coef"][l] *
+ sum((2 * s - 1) * (inputs["pTrans_Max_Possible"][l] / TRANS_LOSS_SEGS) *
+ vTAUX_NEG[l, s, t] for s in 1:TRANS_LOSS_SEGS)))
+ # Eq 2: Sum of auxilary segment variables (s >= 1) minus the "zero" segment (which allows values to go negative)
+ # from both positive and negative domains must total the actual power flow across the line
+ @constraints(EP,
+ begin
+ cTAuxSumPos[l in LOSS_LINES, t = 1:T],
+ sum(vTAUX_POS[l, s, t] for s in 1:TRANS_LOSS_SEGS) - vTAUX_POS[l, 0, t] ==
+ vFLOW[l, t]
+ cTAuxSumNeg[l in LOSS_LINES, t = 1:T],
+ sum(vTAUX_NEG[l, s, t] for s in 1:TRANS_LOSS_SEGS) - vTAUX_NEG[l, 0, t] ==
+ -vFLOW[l, t]
+ end)
+ if UCommit == 0 || UCommit == 2
+ # Eq 3: Each auxilary segment variables (s >= 1) must be less than the maximum power flow in the zone / number of segments
+ @constraints(EP,
+ begin
+ cTAuxMaxPos[l in LOSS_LINES, s = 1:TRANS_LOSS_SEGS, t = 1:T],
+ vTAUX_POS[l, s, t] <=
+ (inputs["pTrans_Max_Possible"][l] / TRANS_LOSS_SEGS)
+ cTAuxMaxNeg[l in LOSS_LINES, s = 1:TRANS_LOSS_SEGS, t = 1:T],
+ vTAUX_NEG[l, s, t] <=
+ (inputs["pTrans_Max_Possible"][l] / TRANS_LOSS_SEGS)
+ end)
+ else # Constraints that can be ommitted if problem is convex (i.e. if not using MILP unit commitment constraints)
+ # Eqs 3-4: Ensure that auxilary segment variables do not exceed maximum value per segment and that they
+ # "fill" in order: i.e. one segment cannot be non-zero unless prior segment is at it's maximum value
+ # (These constraints are necessary to prevents phantom losses in MILP problems)
+ @constraints(EP,
+ begin
+ cTAuxOrderPos1[l in LOSS_LINES, s = 1:TRANS_LOSS_SEGS, t = 1:T],
+ vTAUX_POS[l, s, t] <=
+ (inputs["pTrans_Max_Possible"][l] / TRANS_LOSS_SEGS) *
+ vTAUX_POS_ON[l, s, t]
+ cTAuxOrderNeg1[l in LOSS_LINES, s = 1:TRANS_LOSS_SEGS, t = 1:T],
+ vTAUX_NEG[l, s, t] <=
+ (inputs["pTrans_Max_Possible"][l] / TRANS_LOSS_SEGS) *
+ vTAUX_NEG_ON[l, s, t]
+ cTAuxOrderPos2[l in LOSS_LINES, s = 1:(TRANS_LOSS_SEGS - 1), t = 1:T],
+ vTAUX_POS[l, s, t] >=
+ (inputs["pTrans_Max_Possible"][l] / TRANS_LOSS_SEGS) *
+ vTAUX_POS_ON[l, s + 1, t]
+ cTAuxOrderNeg2[l in LOSS_LINES, s = 1:(TRANS_LOSS_SEGS - 1), t = 1:T],
+ vTAUX_NEG[l, s, t] >=
+ (inputs["pTrans_Max_Possible"][l] / TRANS_LOSS_SEGS) *
+ vTAUX_NEG_ON[l, s + 1, t]
+ end)
+
+ # Eq 5: Binary constraints to deal with absolute value of vFLOW.
+ @constraints(EP,
+ begin
+ # If flow is positive, vTAUX_POS segment 0 must be zero; If flow is negative, vTAUX_POS segment 0 must be positive
+ # (and takes on value of the full negative flow), forcing all vTAUX_POS other segments (s>=1) to be zero
+ cTAuxSegmentZeroPos[l in LOSS_LINES, t = 1:T],
+ vTAUX_POS[l, 0, t] <=
+ inputs["pTrans_Max_Possible"][l] * (1 - vTAUX_POS_ON[l, 1, t])
+
+ # If flow is negative, vTAUX_NEG segment 0 must be zero; If flow is positive, vTAUX_NEG segment 0 must be positive
+ # (and takes on value of the full positive flow), forcing all other vTAUX_NEG segments (s>=1) to be zero
+ cTAuxSegmentZeroNeg[l in LOSS_LINES, t = 1:T],
+ vTAUX_NEG[l, 0, t] <=
+ inputs["pTrans_Max_Possible"][l] * (1 - vTAUX_NEG_ON[l, 1, t])
+ end)
+ end
+ end # End if(TRANS_LOSS_SEGS > 0) block
+
+ # ESR Lossses
+ if EnergyShareRequirement >= 1 && IncludeLossesInESR == 1
+ @expression(EP, eESRTran[ESR = 1:inputs["nESR"]],
+ sum(inputs["dfESR"][z, ESR] *
+ sum(inputs["omega"][t] * EP[:eLosses_By_Zone][z, t] for t in 1:T)
+ for z in findall(x -> x > 0, inputs["dfESR"][:, ESR])))
+ add_similar_to_expression!(EP[:eESR], -eESRTran)
+ end
+end
diff --git a/src/model/core/ucommit.jl b/src/model/core/ucommit.jl
index c85a03b31a..5db836a24b 100644
--- a/src/model/core/ucommit.jl
+++ b/src/model/core/ucommit.jl
@@ -23,52 +23,53 @@ The total cost of start-ups across all generators subject to unit commitment ($y
The sum of start-up costs is added to the objective function.
"""
function ucommit!(EP::Model, inputs::Dict, setup::Dict)
-
- println("Unit Commitment Module")
-
- T = inputs["T"] # Number of time steps (hours)
- COMMIT = inputs["COMMIT"] # For not, thermal resources are the only ones eligible for Unit Committment
-
- ### Variables ###
-
- ## Decision variables for unit commitment
- # commitment state variable
- @variable(EP, vCOMMIT[y in COMMIT, t=1:T] >= 0)
- # startup event variable
- @variable(EP, vSTART[y in COMMIT, t=1:T] >= 0)
- # shutdown event variable
- @variable(EP, vSHUT[y in COMMIT, t=1:T] >= 0)
-
- ### Expressions ###
-
- ## Objective Function Expressions ##
-
- # Startup costs of "generation" for resource "y" during hour "t"
- @expression(EP, eCStart[y in COMMIT, t=1:T],(inputs["omega"][t]*inputs["C_Start"][y,t]*vSTART[y,t]))
-
- # Julia is fastest when summing over one row one column at a time
- @expression(EP, eTotalCStartT[t=1:T], sum(eCStart[y,t] for y in COMMIT))
- @expression(EP, eTotalCStart, sum(eTotalCStartT[t] for t=1:T))
-
- add_to_expression!(EP[:eObj], eTotalCStart)
-
- ### Constratints ###
- ## Declaration of integer/binary variables
- if setup["UCommit"] == 1 # Integer UC constraints
- for y in COMMIT
- set_integer.(vCOMMIT[y,:])
- set_integer.(vSTART[y,:])
- set_integer.(vSHUT[y,:])
- if y in inputs["RET_CAP"]
- set_integer(EP[:vRETCAP][y])
- end
- if y in inputs["NEW_CAP"]
- set_integer(EP[:vCAP][y])
- end
- if y in inputs["RETROFIT_CAP"]
- set_integer(EP[:vRETROFITCAP][y])
- end
- end
- end #END unit commitment configuration
- return EP
+ println("Unit Commitment Module")
+
+ T = inputs["T"] # Number of time steps (hours)
+ COMMIT = inputs["COMMIT"] # For not, thermal resources are the only ones eligible for Unit Committment
+
+ ### Variables ###
+
+ ## Decision variables for unit commitment
+ # commitment state variable
+ @variable(EP, vCOMMIT[y in COMMIT, t = 1:T]>=0)
+ # startup event variable
+ @variable(EP, vSTART[y in COMMIT, t = 1:T]>=0)
+ # shutdown event variable
+ @variable(EP, vSHUT[y in COMMIT, t = 1:T]>=0)
+
+ ### Expressions ###
+
+ ## Objective Function Expressions ##
+
+ # Startup costs of "generation" for resource "y" during hour "t"
+ @expression(EP,
+ eCStart[y in COMMIT, t = 1:T],
+ (inputs["omega"][t]*inputs["C_Start"][y, t]*vSTART[y, t]))
+
+ # Julia is fastest when summing over one row one column at a time
+ @expression(EP, eTotalCStartT[t = 1:T], sum(eCStart[y, t] for y in COMMIT))
+ @expression(EP, eTotalCStart, sum(eTotalCStartT[t] for t in 1:T))
+
+ add_to_expression!(EP[:eObj], eTotalCStart)
+
+ ### Constratints ###
+ ## Declaration of integer/binary variables
+ if setup["UCommit"] == 1 # Integer UC constraints
+ for y in COMMIT
+ set_integer.(vCOMMIT[y, :])
+ set_integer.(vSTART[y, :])
+ set_integer.(vSHUT[y, :])
+ if y in inputs["RET_CAP"]
+ set_integer(EP[:vRETCAP][y])
+ end
+ if y in inputs["NEW_CAP"]
+ set_integer(EP[:vCAP][y])
+ end
+ if y in inputs["RETROFIT_CAP"]
+ set_integer(EP[:vRETROFITCAP][y])
+ end
+ end
+ end #END unit commitment configuration
+ return EP
end
diff --git a/src/model/expression_manipulation.jl b/src/model/expression_manipulation.jl
index fe4fe6e7be..7e783eae55 100644
--- a/src/model/expression_manipulation.jl
+++ b/src/model/expression_manipulation.jl
@@ -25,7 +25,9 @@ This can lead to errors later if a method can only operate on expressions.
We don't currently have a method to do this with non-contiguous indexing.
"""
-function create_empty_expression!(EP::Model, exprname::Symbol, dims::NTuple{N, Int64}) where N
+function create_empty_expression!(EP::Model,
+ exprname::Symbol,
+ dims::NTuple{N, Int64}) where {N}
temp = Array{AffExpr}(undef, dims)
fill_with_zeros!(temp)
EP[exprname] = temp
@@ -49,7 +51,7 @@ end
Fill an array of expressions with zeros in-place.
"""
-function fill_with_zeros!(arr::AbstractArray{GenericAffExpr{C,T}, dims}) where {C,T,dims}
+function fill_with_zeros!(arr::AbstractArray{GenericAffExpr{C, T}, dims}) where {C, T, dims}
for i::Int64 in eachindex(IndexLinear(), arr)::Base.OneTo{Int64}
arr[i] = AffExpr(0.0)
end
@@ -64,7 +66,8 @@ Fill an array of expressions with the specified constant, in-place.
In the future we could expand this to non AffExpr, using GenericAffExpr
e.g. if we wanted to use Float32 instead of Float64
"""
-function fill_with_const!(arr::AbstractArray{GenericAffExpr{C,T}, dims}, con::Real) where {C,T,dims}
+function fill_with_const!(arr::AbstractArray{GenericAffExpr{C, T}, dims},
+ con::Real) where {C, T, dims}
for i in eachindex(arr)
arr[i] = AffExpr(con)
end
@@ -77,7 +80,7 @@ end
###### ###### ###### ###### ###### ######
#
function extract_time_series_to_expression(var::Matrix{VariableRef},
- set::AbstractVector{Int})
+ set::AbstractVector{Int})
TIME_DIM = 2
time_range = 1:size(var)[TIME_DIM]
@@ -87,8 +90,13 @@ function extract_time_series_to_expression(var::Matrix{VariableRef},
return expr
end
-function extract_time_series_to_expression(var::JuMP.Containers.DenseAxisArray{VariableRef, 2, Tuple{X, Base.OneTo{Int64}}, Y},
- set::AbstractVector{Int}) where {X, Y}
+function extract_time_series_to_expression(var::JuMP.Containers.DenseAxisArray{
+ VariableRef,
+ 2,
+ Tuple{X, Base.OneTo{Int64}},
+ Y,
+ },
+ set::AbstractVector{Int}) where {X, Y}
TIME_DIM = 2
time_range = var.axes[TIME_DIM]
@@ -104,7 +112,7 @@ end
###### ###### ###### ###### ###### ######
# Version for single element
-function add_similar_to_expression!(expr1::GenericAffExpr{C,T}, expr2::V) where {C,T,V}
+function add_similar_to_expression!(expr1::GenericAffExpr{C, T}, expr2::V) where {C, T, V}
add_to_expression!(expr1, expr2)
return nothing
end
@@ -116,7 +124,8 @@ Add an array of some type `V` to an array of expressions, in-place.
This will work on JuMP DenseContainers which do not have linear indexing from 1:length(arr).
However, the accessed parts of both arrays must have the same dimensions.
"""
-function add_similar_to_expression!(expr1::AbstractArray{GenericAffExpr{C,T}, dim1}, expr2::AbstractArray{V, dim2}) where {C,T,V,dim1,dim2}
+function add_similar_to_expression!(expr1::AbstractArray{GenericAffExpr{C, T}, dim1},
+ expr2::AbstractArray{V, dim2}) where {C, T, V, dim1, dim2}
# This is defined for Arrays of different dimensions
# despite the fact it will definitely throw an error
# because the error will tell the user / developer
@@ -134,7 +143,7 @@ end
###### ###### ###### ###### ###### ######
# Version for single element
-function add_term_to_expression!(expr1::GenericAffExpr{C,T}, expr2::V) where {C,T,V}
+function add_term_to_expression!(expr1::GenericAffExpr{C, T}, expr2::V) where {C, T, V}
add_to_expression!(expr1, expr2)
return nothing
end
@@ -145,7 +154,8 @@ end
Add an entry of type `V` to an array of expressions, in-place.
This will work on JuMP DenseContainers which do not have linear indexing from 1:length(arr).
"""
-function add_term_to_expression!(expr1::AbstractArray{GenericAffExpr{C,T}, dims}, expr2::V) where {C,T,V,dims}
+function add_term_to_expression!(expr1::AbstractArray{GenericAffExpr{C, T}, dims},
+ expr2::V) where {C, T, V, dims}
for i in eachindex(expr1)
add_to_expression!(expr1[i], expr2)
end
@@ -162,7 +172,8 @@ end
Check that two arrays have the same dimensions.
If not, return an error message which includes the dimensions of both arrays.
"""
-function check_sizes_match(expr1::AbstractArray{C, dim1}, expr2::AbstractArray{T, dim2}) where {C,T,dim1, dim2}
+function check_sizes_match(expr1::AbstractArray{C, dim1},
+ expr2::AbstractArray{T, dim2}) where {C, T, dim1, dim2}
# After testing, this appears to be just as fast as a method for Array{GenericAffExpr{C,T}, dims} or Array{AffExpr, dims}
if size(expr1) != size(expr2)
error("
@@ -181,7 +192,7 @@ as the method only works on the constituent types making up the GenericAffExpr,
Also, the default MethodError from add_to_expression! is sometime more informative than the error message here.
"""
function check_addable_to_expr(C::DataType, T::DataType)
- if !(hasmethod(add_to_expression!, (C,T)))
+ if !(hasmethod(add_to_expression!, (C, T)))
error("No method found for add_to_expression! with types $(C) and $(T)")
end
end
@@ -196,11 +207,11 @@ end
Sum an array of expressions into a single expression and return the result.
We're using errors from add_to_expression!() to check that the types are compatible.
"""
-function sum_expression(expr::AbstractArray{C, dims}) :: AffExpr where {C,dims}
+function sum_expression(expr::AbstractArray{C, dims})::AffExpr where {C, dims}
# check_addable_to_expr(C,C)
total = AffExpr(0.0)
for i in eachindex(expr)
add_to_expression!(total, expr[i])
end
return total
-end
\ No newline at end of file
+end
diff --git a/src/model/generate_model.jl b/src/model/generate_model.jl
index 677fc7b03d..ff16f66875 100644
--- a/src/model/generate_model.jl
+++ b/src/model/generate_model.jl
@@ -67,178 +67,181 @@ The power balance constraint of the model ensures that electricity demand is met
# Returns
- `Model`: The model object containing the entire optimization problem model to be solved by solve_model.jl
"""
-function generate_model(setup::Dict,inputs::Dict,OPTIMIZER::MOI.OptimizerWithAttributes)
+function generate_model(setup::Dict, inputs::Dict, OPTIMIZER::MOI.OptimizerWithAttributes)
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ ## Start pre-solve timer
+ presolver_start_time = time()
- ## Start pre-solve timer
- presolver_start_time = time()
+ # Generate Energy Portfolio (EP) Model
+ EP = Model(OPTIMIZER)
+ set_string_names_on_creation(EP, Bool(setup["EnableJuMPStringNames"]))
+ # Introduce dummy variable fixed to zero to ensure that expressions like eTotalCap,
+ # eTotalCapCharge, eTotalCapEnergy and eAvail_Trans_Cap all have a JuMP variable
+ @variable(EP, vZERO==0)
- # Generate Energy Portfolio (EP) Model
- EP = Model(OPTIMIZER)
- set_string_names_on_creation(EP, Bool(setup["EnableJuMPStringNames"]))
- # Introduce dummy variable fixed to zero to ensure that expressions like eTotalCap,
- # eTotalCapCharge, eTotalCapEnergy and eAvail_Trans_Cap all have a JuMP variable
- @variable(EP, vZERO == 0);
+ # Initialize Power Balance Expression
+ # Expression for "baseline" power balance constraint
+ create_empty_expression!(EP, :ePowerBalance, (T, Z))
+
+ # Initialize Objective Function Expression
+ EP[:eObj] = AffExpr(0.0)
+
+ create_empty_expression!(EP, :eGenerationByZone, (Z, T))
+
+ # Energy losses related to technologies
+ create_empty_expression!(EP, :eELOSSByZone, Z)
- # Initialize Power Balance Expression
- # Expression for "baseline" power balance constraint
- create_empty_expression!(EP, :ePowerBalance, (T, Z))
-
- # Initialize Objective Function Expression
- EP[:eObj] = AffExpr(0.0)
-
- create_empty_expression!(EP, :eGenerationByZone, (Z, T))
-
- # Energy losses related to technologies
- create_empty_expression!(EP, :eELOSSByZone, Z)
-
- # Initialize Capacity Reserve Margin Expression
- if setup["CapacityReserveMargin"] > 0
- create_empty_expression!(EP, :eCapResMarBalance, (inputs["NCapacityReserveMargin"], T))
- end
-
- # Energy Share Requirement
- if setup["EnergyShareRequirement"] >= 1
- create_empty_expression!(EP, :eESR, inputs["nESR"])
- end
-
- if setup["MinCapReq"] == 1
- create_empty_expression!(EP, :eMinCapRes, inputs["NumberOfMinCapReqs"])
- end
-
- if setup["MaxCapReq"] == 1
- create_empty_expression!(EP, :eMaxCapRes, inputs["NumberOfMaxCapReqs"])
- end
-
- # Infrastructure
- discharge!(EP, inputs, setup)
-
- non_served_energy!(EP, inputs, setup)
-
- investment_discharge!(EP, inputs, setup)
-
- if setup["UCommit"] > 0
- ucommit!(EP, inputs, setup)
- end
-
- fuel!(EP, inputs, setup)
-
- co2!(EP, inputs)
-
- if setup["OperationalReserves"] > 0
- operational_reserves!(EP, inputs, setup)
- end
-
- if Z > 1
- investment_transmission!(EP, inputs, setup)
- transmission!(EP, inputs, setup)
- end
-
- if Z > 1 && setup["DC_OPF"] != 0
- dcopf_transmission!(EP, inputs, setup)
- end
-
- # Technologies
- # Model constraints, variables, expression related to dispatchable renewable resources
-
- if !isempty(inputs["VRE"])
- curtailable_variable_renewable!(EP, inputs, setup)
- end
-
- # Model constraints, variables, expression related to non-dispatchable renewable resources
- if !isempty(inputs["MUST_RUN"])
- must_run!(EP, inputs, setup)
- end
-
- # Model constraints, variables, expression related to energy storage modeling
- if !isempty(inputs["STOR_ALL"])
- storage!(EP, inputs, setup)
- end
-
- # Model constraints, variables, expression related to reservoir hydropower resources
- if !isempty(inputs["HYDRO_RES"])
- hydro_res!(EP, inputs, setup)
- end
-
- if !isempty(inputs["ELECTROLYZER"])
- electrolyzer!(EP, inputs, setup)
- end
-
- # Model constraints, variables, expression related to reservoir hydropower resources with long duration storage
- if inputs["REP_PERIOD"] > 1 && !isempty(inputs["STOR_HYDRO_LONG_DURATION"])
- hydro_inter_period_linkage!(EP, inputs)
- end
-
- # Model constraints, variables, expression related to demand flexibility resources
- if !isempty(inputs["FLEX"])
- flexible_demand!(EP, inputs, setup)
- end
- # Model constraints, variables, expression related to thermal resource technologies
- if !isempty(inputs["THERM_ALL"])
- thermal!(EP, inputs, setup)
- end
-
- # Model constraints, variables, expression related to retrofit technologies
- if !isempty(inputs["RETROFIT_OPTIONS"])
- EP = retrofit(EP, inputs)
- end
-
- # Model constraints, variables, expressions related to the co-located VRE-storage resources
- if !isempty(inputs["VRE_STOR"])
- vre_stor!(EP, inputs, setup)
- end
-
- # Policies
-
- if setup["OperationalReserves"] > 0
- operational_reserves_constraints!(EP, inputs)
- end
-
- # CO2 emissions limits
- if setup["CO2Cap"] > 0
- co2_cap!(EP, inputs, setup)
- end
-
- # Endogenous Retirements
- if setup["MultiStage"] > 0
- endogenous_retirement!(EP, inputs, setup)
- end
-
- # Energy Share Requirement
- if setup["EnergyShareRequirement"] >= 1
- energy_share_requirement!(EP, inputs, setup)
- end
-
- #Capacity Reserve Margin
- if setup["CapacityReserveMargin"] > 0
- cap_reserve_margin!(EP, inputs, setup)
- end
-
- if (setup["MinCapReq"] == 1)
- minimum_capacity_requirement!(EP, inputs, setup)
- end
-
- if setup["MaxCapReq"] == 1
- maximum_capacity_requirement!(EP, inputs, setup)
- end
-
- ## Define the objective function
- @objective(EP,Min, setup["ObjScale"] * EP[:eObj])
-
- ## Power balance constraints
- # demand = generation + storage discharge - storage charge - demand deferral + deferred demand satisfaction - demand curtailment (NSE)
- # + incoming power flows - outgoing power flows - flow losses - charge of heat storage + generation from NACC
- @constraint(EP, cPowerBalance[t=1:T, z=1:Z], EP[:ePowerBalance][t,z] == inputs["pD"][t,z])
-
- ## Record pre-solver time
- presolver_time = time() - presolver_start_time
- if setup["PrintModel"] == 1
- filepath = joinpath(pwd(), "YourModel.lp")
- JuMP.write_to_file(EP, filepath)
- println("Model Printed")
- end
+ # Initialize Capacity Reserve Margin Expression
+ if setup["CapacityReserveMargin"] > 0
+ create_empty_expression!(EP,
+ :eCapResMarBalance,
+ (inputs["NCapacityReserveMargin"], T))
+ end
+
+ # Energy Share Requirement
+ if setup["EnergyShareRequirement"] >= 1
+ create_empty_expression!(EP, :eESR, inputs["nESR"])
+ end
+
+ if setup["MinCapReq"] == 1
+ create_empty_expression!(EP, :eMinCapRes, inputs["NumberOfMinCapReqs"])
+ end
+
+ if setup["MaxCapReq"] == 1
+ create_empty_expression!(EP, :eMaxCapRes, inputs["NumberOfMaxCapReqs"])
+ end
+
+ # Infrastructure
+ discharge!(EP, inputs, setup)
+
+ non_served_energy!(EP, inputs, setup)
+
+ investment_discharge!(EP, inputs, setup)
+
+ if setup["UCommit"] > 0
+ ucommit!(EP, inputs, setup)
+ end
+
+ fuel!(EP, inputs, setup)
+
+ co2!(EP, inputs)
+
+ if setup["OperationalReserves"] > 0
+ operational_reserves!(EP, inputs, setup)
+ end
+
+ if Z > 1
+ investment_transmission!(EP, inputs, setup)
+ transmission!(EP, inputs, setup)
+ end
+
+ if Z > 1 && setup["DC_OPF"] != 0
+ dcopf_transmission!(EP, inputs, setup)
+ end
+
+ # Technologies
+ # Model constraints, variables, expression related to dispatchable renewable resources
+
+ if !isempty(inputs["VRE"])
+ curtailable_variable_renewable!(EP, inputs, setup)
+ end
+
+ # Model constraints, variables, expression related to non-dispatchable renewable resources
+ if !isempty(inputs["MUST_RUN"])
+ must_run!(EP, inputs, setup)
+ end
+
+ # Model constraints, variables, expression related to energy storage modeling
+ if !isempty(inputs["STOR_ALL"])
+ storage!(EP, inputs, setup)
+ end
+
+ # Model constraints, variables, expression related to reservoir hydropower resources
+ if !isempty(inputs["HYDRO_RES"])
+ hydro_res!(EP, inputs, setup)
+ end
+
+ if !isempty(inputs["ELECTROLYZER"])
+ electrolyzer!(EP, inputs, setup)
+ end
+
+ # Model constraints, variables, expression related to reservoir hydropower resources with long duration storage
+ if inputs["REP_PERIOD"] > 1 && !isempty(inputs["STOR_HYDRO_LONG_DURATION"])
+ hydro_inter_period_linkage!(EP, inputs)
+ end
+
+ # Model constraints, variables, expression related to demand flexibility resources
+ if !isempty(inputs["FLEX"])
+ flexible_demand!(EP, inputs, setup)
+ end
+ # Model constraints, variables, expression related to thermal resource technologies
+ if !isempty(inputs["THERM_ALL"])
+ thermal!(EP, inputs, setup)
+ end
+
+ # Model constraints, variables, expression related to retrofit technologies
+ if !isempty(inputs["RETROFIT_OPTIONS"])
+ EP = retrofit(EP, inputs)
+ end
+
+ # Model constraints, variables, expressions related to the co-located VRE-storage resources
+ if !isempty(inputs["VRE_STOR"])
+ vre_stor!(EP, inputs, setup)
+ end
+
+ # Policies
+
+ if setup["OperationalReserves"] > 0
+ operational_reserves_constraints!(EP, inputs)
+ end
+
+ # CO2 emissions limits
+ if setup["CO2Cap"] > 0
+ co2_cap!(EP, inputs, setup)
+ end
+
+ # Endogenous Retirements
+ if setup["MultiStage"] > 0
+ endogenous_retirement!(EP, inputs, setup)
+ end
+
+ # Energy Share Requirement
+ if setup["EnergyShareRequirement"] >= 1
+ energy_share_requirement!(EP, inputs, setup)
+ end
+
+ #Capacity Reserve Margin
+ if setup["CapacityReserveMargin"] > 0
+ cap_reserve_margin!(EP, inputs, setup)
+ end
+
+ if (setup["MinCapReq"] == 1)
+ minimum_capacity_requirement!(EP, inputs, setup)
+ end
+
+ if setup["MaxCapReq"] == 1
+ maximum_capacity_requirement!(EP, inputs, setup)
+ end
+
+ ## Define the objective function
+ @objective(EP, Min, setup["ObjScale"]*EP[:eObj])
+
+ ## Power balance constraints
+ # demand = generation + storage discharge - storage charge - demand deferral + deferred demand satisfaction - demand curtailment (NSE)
+ # + incoming power flows - outgoing power flows - flow losses - charge of heat storage + generation from NACC
+ @constraint(EP,
+ cPowerBalance[t = 1:T, z = 1:Z],
+ EP[:ePowerBalance][t, z]==inputs["pD"][t, z])
+
+ ## Record pre-solver time
+ presolver_time = time() - presolver_start_time
+ if setup["PrintModel"] == 1
+ filepath = joinpath(pwd(), "YourModel.lp")
+ JuMP.write_to_file(EP, filepath)
+ println("Model Printed")
+ end
return EP
end
diff --git a/src/model/policies/cap_reserve_margin.jl b/src/model/policies/cap_reserve_margin.jl
index 5a6aa1ba1d..74052fabd4 100755
--- a/src/model/policies/cap_reserve_margin.jl
+++ b/src/model/policies/cap_reserve_margin.jl
@@ -57,23 +57,30 @@ The expressions establishing the capacity reserve margin contributions of each t
class are included in their respective technology modules.
"""
function cap_reserve_margin!(EP::Model, inputs::Dict, setup::Dict)
- # capacity reserve margin constraint
- T = inputs["T"]
- NCRM = inputs["NCapacityReserveMargin"]
- println("Capacity Reserve Margin Policies Module")
+ # capacity reserve margin constraint
+ T = inputs["T"]
+ NCRM = inputs["NCapacityReserveMargin"]
+ println("Capacity Reserve Margin Policies Module")
- # if input files are present, add capacity reserve margin slack variables
- if haskey(inputs, "dfCapRes_slack")
- @variable(EP,vCapResSlack[res=1:NCRM, t=1:T]>=0)
- add_similar_to_expression!(EP[:eCapResMarBalance], vCapResSlack)
+ # if input files are present, add capacity reserve margin slack variables
+ if haskey(inputs, "dfCapRes_slack")
+ @variable(EP, vCapResSlack[res = 1:NCRM, t = 1:T]>=0)
+ add_similar_to_expression!(EP[:eCapResMarBalance], vCapResSlack)
- @expression(EP, eCapResSlack_Year[res=1:NCRM], sum(EP[:vCapResSlack][res,t] * inputs["omega"][t] for t in 1:T))
- @expression(EP, eCCapResSlack[res=1:NCRM], inputs["dfCapRes_slack"][res,:PriceCap] * EP[:eCapResSlack_Year][res])
- @expression(EP, eCTotalCapResSlack, sum(EP[:eCCapResSlack][res] for res = 1:NCRM))
- add_to_expression!(EP[:eObj], eCTotalCapResSlack)
- end
+ @expression(EP,
+ eCapResSlack_Year[res = 1:NCRM],
+ sum(EP[:vCapResSlack][res, t] * inputs["omega"][t] for t in 1:T))
+ @expression(EP,
+ eCCapResSlack[res = 1:NCRM],
+ inputs["dfCapRes_slack"][res, :PriceCap]*EP[:eCapResSlack_Year][res])
+ @expression(EP, eCTotalCapResSlack, sum(EP[:eCCapResSlack][res] for res in 1:NCRM))
+ add_to_expression!(EP[:eObj], eCTotalCapResSlack)
+ end
- @constraint(EP, cCapacityResMargin[res=1:NCRM, t=1:T], EP[:eCapResMarBalance][res, t]
- >= sum(inputs["pD"][t,z] * (1 + inputs["dfCapRes"][z,res])
- for z=findall(x->x!=0,inputs["dfCapRes"][:,res])))
+ @constraint(EP,
+ cCapacityResMargin[res = 1:NCRM, t = 1:T],
+ EP[:eCapResMarBalance][res,
+ t]
+ >=sum(inputs["pD"][t, z] * (1 + inputs["dfCapRes"][z, res])
+ for z in findall(x -> x != 0, inputs["dfCapRes"][:, res])))
end
diff --git a/src/model/policies/co2_cap.jl b/src/model/policies/co2_cap.jl
index 252cb3a7f3..d14b69a265 100644
--- a/src/model/policies/co2_cap.jl
+++ b/src/model/policies/co2_cap.jl
@@ -66,54 +66,59 @@ Similarly, a generation based emission constraint is defined by setting the emis
Note that the generator-side rate-based constraint can be used to represent a fee-rebate (``feebate'') system: the dirty generators that emit above the bar ($\epsilon_{z,p,gen}^{maxCO_2}$) have to buy emission allowances from the emission regulator in the region $z$ where they are located; in the same vein, the clean generators get rebates from the emission regulator at an emission allowance price being the dual variable of the emissions rate constraint.
"""
function co2_cap!(EP::Model, inputs::Dict, setup::Dict)
-
- println("CO2 Policies Module")
-
- SEG = inputs["SEG"] # Number of lines
- T = inputs["T"] # Number of time steps (hours)
-
- ### Variable ###
- # if input files are present, add CO2 cap slack variables
- if haskey(inputs, "dfCO2Cap_slack")
- @variable(EP, vCO2Cap_slack[cap = 1:inputs["NCO2Cap"]]>=0)
-
- @expression(EP, eCCO2Cap_slack[cap = 1:inputs["NCO2Cap"]],
- inputs["dfCO2Cap_slack"][cap,:PriceCap] * EP[:vCO2Cap_slack][cap])
- @expression(EP, eCTotalCO2CapSlack,
- sum(EP[:eCCO2Cap_slack][cap] for cap = 1:inputs["NCO2Cap"]))
-
- add_to_expression!(EP[:eObj], eCTotalCO2CapSlack)
- else
- @variable(EP, vCO2Cap_slack[cap = 1:inputs["NCO2Cap"]]==0)
- end
-
- ### Constraints ###
-
- ## Mass-based: Emissions constraint in absolute emissions limit (tons)
- if setup["CO2Cap"] == 1
- @constraint(EP, cCO2Emissions_systemwide[cap=1:inputs["NCO2Cap"]],
- sum(inputs["omega"][t] * EP[:eEmissionsByZone][z,t] for z=findall(x->x==1, inputs["dfCO2CapZones"][:,cap]), t=1:T) -
- vCO2Cap_slack[cap] <=
- sum(inputs["dfMaxCO2"][z,cap] for z=findall(x->x==1, inputs["dfCO2CapZones"][:,cap]))
- )
-
- ## (fulfilled) demand + Rate-based: Emissions constraint in terms of rate (tons/MWh)
- elseif setup["CO2Cap"] == 2 ##This part moved to non_served_energy.jl
-
- @constraint(EP, cCO2Emissions_systemwide[cap=1:inputs["NCO2Cap"]],
- sum(inputs["omega"][t] * EP[:eEmissionsByZone][z,t] for z=findall(x->x==1, inputs["dfCO2CapZones"][:,cap]), t=1:T) -
- vCO2Cap_slack[cap] <=
- sum(inputs["dfMaxCO2Rate"][z,cap] * sum(inputs["omega"][t] * (inputs["pD"][t,z] - sum(EP[:vNSE][s,t,z] for s in 1:SEG)) for t=1:T) for z = findall(x->x==1, inputs["dfCO2CapZones"][:,cap])) +
- sum(inputs["dfMaxCO2Rate"][z,cap] * setup["StorageLosses"] * EP[:eELOSSByZone][z] for z=findall(x->x==1, inputs["dfCO2CapZones"][:,cap]))
- )
-
- ## Generation + Rate-based: Emissions constraint in terms of rate (tons/MWh)
- elseif (setup["CO2Cap"]==3)
- @constraint(EP, cCO2Emissions_systemwide[cap=1:inputs["NCO2Cap"]],
- sum(inputs["omega"][t] * EP[:eEmissionsByZone][z,t] for z=findall(x->x==1, inputs["dfCO2CapZones"][:,cap]), t=1:T) -
- vCO2Cap_slack[cap] <=
- sum(inputs["dfMaxCO2Rate"][z,cap] * inputs["omega"][t] * EP[:eGenerationByZone][z,t] for t=1:T, z=findall(x->x==1, inputs["dfCO2CapZones"][:,cap]))
- )
- end
-
+ println("CO2 Policies Module")
+
+ SEG = inputs["SEG"] # Number of lines
+ T = inputs["T"] # Number of time steps (hours)
+
+ ### Variable ###
+ # if input files are present, add CO2 cap slack variables
+ if haskey(inputs, "dfCO2Cap_slack")
+ @variable(EP, vCO2Cap_slack[cap = 1:inputs["NCO2Cap"]]>=0)
+
+ @expression(EP, eCCO2Cap_slack[cap = 1:inputs["NCO2Cap"]],
+ inputs["dfCO2Cap_slack"][cap, :PriceCap]*EP[:vCO2Cap_slack][cap])
+ @expression(EP, eCTotalCO2CapSlack,
+ sum(EP[:eCCO2Cap_slack][cap] for cap in 1:inputs["NCO2Cap"]))
+
+ add_to_expression!(EP[:eObj], eCTotalCO2CapSlack)
+ else
+ @variable(EP, vCO2Cap_slack[cap = 1:inputs["NCO2Cap"]]==0)
+ end
+
+ ### Constraints ###
+
+ ## Mass-based: Emissions constraint in absolute emissions limit (tons)
+ if setup["CO2Cap"] == 1
+ @constraint(EP, cCO2Emissions_systemwide[cap = 1:inputs["NCO2Cap"]],
+ sum(inputs["omega"][t] * EP[:eEmissionsByZone][z, t]
+ for z in findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap]), t in 1:T) -
+ vCO2Cap_slack[cap]<=
+ sum(inputs["dfMaxCO2"][z, cap]
+ for z in findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap])))
+
+ ## (fulfilled) demand + Rate-based: Emissions constraint in terms of rate (tons/MWh)
+ elseif setup["CO2Cap"] == 2 ##This part moved to non_served_energy.jl
+ @constraint(EP, cCO2Emissions_systemwide[cap = 1:inputs["NCO2Cap"]],
+ sum(inputs["omega"][t] * EP[:eEmissionsByZone][z, t]
+ for z in findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap]), t in 1:T) -
+ vCO2Cap_slack[cap]<=
+ sum(inputs["dfMaxCO2Rate"][z, cap] * sum(inputs["omega"][t] *
+ (inputs["pD"][t, z] - sum(EP[:vNSE][s, t, z] for s in 1:SEG))
+ for t in 1:T)
+ for z in findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap])) +
+ sum(inputs["dfMaxCO2Rate"][z, cap] * setup["StorageLosses"] *
+ EP[:eELOSSByZone][z]
+ for z in findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap])))
+
+ ## Generation + Rate-based: Emissions constraint in terms of rate (tons/MWh)
+ elseif (setup["CO2Cap"] == 3)
+ @constraint(EP, cCO2Emissions_systemwide[cap = 1:inputs["NCO2Cap"]],
+ sum(inputs["omega"][t] * EP[:eEmissionsByZone][z, t]
+ for z in findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap]), t in 1:T) -
+ vCO2Cap_slack[cap]<=
+ sum(inputs["dfMaxCO2Rate"][z, cap] * inputs["omega"][t] *
+ EP[:eGenerationByZone][z, t]
+ for t in 1:T, z in findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap])))
+ end
end
diff --git a/src/model/policies/energy_share_requirement.jl b/src/model/policies/energy_share_requirement.jl
index 2c65aa61ee..a4e3225c08 100644
--- a/src/model/policies/energy_share_requirement.jl
+++ b/src/model/policies/energy_share_requirement.jl
@@ -24,22 +24,23 @@ In practice, most existing renewable portfolio standard policies do not account
However, with 100% RPS or CES policies enacted in several jurisdictions, policy makers may wish to include storage losses in the minimum energy share, as otherwise there will be a difference between total generation and total demand that will permit continued use of non-qualifying resources (e.g. emitting generators).
"""
function energy_share_requirement!(EP::Model, inputs::Dict, setup::Dict)
+ println("Energy Share Requirement Policies Module")
- println("Energy Share Requirement Policies Module")
-
- # if input files are present, add energy share requirement slack variables
- if haskey(inputs, "dfESR_slack")
- @variable(EP, vESR_slack[ESR=1:inputs["nESR"]]>=0)
- add_similar_to_expression!(EP[:eESR], vESR_slack)
+ # if input files are present, add energy share requirement slack variables
+ if haskey(inputs, "dfESR_slack")
+ @variable(EP, vESR_slack[ESR = 1:inputs["nESR"]]>=0)
+ add_similar_to_expression!(EP[:eESR], vESR_slack)
- @expression(EP, eCESRSlack[ESR=1:inputs["nESR"]], inputs["dfESR_slack"][ESR,:PriceCap] * EP[:vESR_slack][ESR])
- @expression(EP, eCTotalESRSlack, sum(EP[:eCESRSlack][ESR] for ESR = 1:inputs["nESR"]))
-
- add_to_expression!(EP[:eObj], eCTotalESRSlack)
- end
-
- ## Energy Share Requirements (minimum energy share from qualifying renewable resources) constraint
- @constraint(EP, cESRShare[ESR=1:inputs["nESR"]], EP[:eESR][ESR] >= 0)
+ @expression(EP,
+ eCESRSlack[ESR = 1:inputs["nESR"]],
+ inputs["dfESR_slack"][ESR, :PriceCap]*EP[:vESR_slack][ESR])
+ @expression(EP,
+ eCTotalESRSlack,
+ sum(EP[:eCESRSlack][ESR] for ESR in 1:inputs["nESR"]))
+ add_to_expression!(EP[:eObj], eCTotalESRSlack)
+ end
+ ## Energy Share Requirements (minimum energy share from qualifying renewable resources) constraint
+ @constraint(EP, cESRShare[ESR = 1:inputs["nESR"]], EP[:eESR][ESR]>=0)
end
diff --git a/src/model/policies/maximum_capacity_requirement.jl b/src/model/policies/maximum_capacity_requirement.jl
index c36d994b99..4f92aa4017 100644
--- a/src/model/policies/maximum_capacity_requirement.jl
+++ b/src/model/policies/maximum_capacity_requirement.jl
@@ -9,21 +9,25 @@ The maximum capacity requirement constraint allows for modeling maximum deployme
Note that $\epsilon_{y,z,p}^{MaxCapReq}$ is the eligiblity of a generator of technology $y$ in zone $z$ of requirement $p$ and will be equal to $1$ for eligible generators and will be zero for ineligible resources. The dual value of each maximum capacity constraint can be interpreted as the required payment (e.g. subsidy) per MW per year required to ensure adequate revenue for the qualifying resources.
"""
function maximum_capacity_requirement!(EP::Model, inputs::Dict, setup::Dict)
+ println("Maximum Capacity Requirement Module")
+ NumberOfMaxCapReqs = inputs["NumberOfMaxCapReqs"]
- println("Maximum Capacity Requirement Module")
- NumberOfMaxCapReqs = inputs["NumberOfMaxCapReqs"]
+ # if input files are present, add maximum capacity requirement slack variables
+ if haskey(inputs, "MaxCapPriceCap")
+ @variable(EP, vMaxCap_slack[maxcap = 1:NumberOfMaxCapReqs]>=0)
+ add_similar_to_expression!(EP[:eMaxCapRes], -vMaxCap_slack)
- # if input files are present, add maximum capacity requirement slack variables
- if haskey(inputs, "MaxCapPriceCap")
- @variable(EP, vMaxCap_slack[maxcap = 1:NumberOfMaxCapReqs]>=0)
- add_similar_to_expression!(EP[:eMaxCapRes], -vMaxCap_slack)
+ @expression(EP,
+ eCMaxCap_slack[maxcap = 1:NumberOfMaxCapReqs],
+ inputs["MaxCapPriceCap"][maxcap]*EP[:vMaxCap_slack][maxcap])
+ @expression(EP,
+ eTotalCMaxCapSlack,
+ sum(EP[:eCMaxCap_slack][maxcap] for maxcap in 1:NumberOfMaxCapReqs))
- @expression(EP, eCMaxCap_slack[maxcap = 1:NumberOfMaxCapReqs], inputs["MaxCapPriceCap"][maxcap] * EP[:vMaxCap_slack][maxcap])
- @expression(EP, eTotalCMaxCapSlack, sum(EP[:eCMaxCap_slack][maxcap] for maxcap = 1:NumberOfMaxCapReqs))
-
- add_to_expression!(EP[:eObj], eTotalCMaxCapSlack)
- end
-
- @constraint(EP, cZoneMaxCapReq[maxcap = 1:NumberOfMaxCapReqs], EP[:eMaxCapRes][maxcap] <= inputs["MaxCapReq"][maxcap])
+ add_to_expression!(EP[:eObj], eTotalCMaxCapSlack)
+ end
+ @constraint(EP,
+ cZoneMaxCapReq[maxcap = 1:NumberOfMaxCapReqs],
+ EP[:eMaxCapRes][maxcap]<=inputs["MaxCapReq"][maxcap])
end
diff --git a/src/model/policies/minimum_capacity_requirement.jl b/src/model/policies/minimum_capacity_requirement.jl
index c07b10821e..333c6b551d 100644
--- a/src/model/policies/minimum_capacity_requirement.jl
+++ b/src/model/policies/minimum_capacity_requirement.jl
@@ -15,22 +15,25 @@ Also note that co-located VRE and storage resources, there are three different c
requirements.
"""
function minimum_capacity_requirement!(EP::Model, inputs::Dict, setup::Dict)
+ println("Minimum Capacity Requirement Module")
+ NumberOfMinCapReqs = inputs["NumberOfMinCapReqs"]
- println("Minimum Capacity Requirement Module")
- NumberOfMinCapReqs = inputs["NumberOfMinCapReqs"]
+ # if input files are present, add minimum capacity requirement slack variables
+ if haskey(inputs, "MinCapPriceCap")
+ @variable(EP, vMinCap_slack[mincap = 1:NumberOfMinCapReqs]>=0)
+ add_similar_to_expression!(EP[:eMinCapRes], vMinCap_slack)
- # if input files are present, add minimum capacity requirement slack variables
- if haskey(inputs, "MinCapPriceCap")
- @variable(EP, vMinCap_slack[mincap = 1:NumberOfMinCapReqs]>=0)
- add_similar_to_expression!(EP[:eMinCapRes], vMinCap_slack)
-
- @expression(EP, eCMinCap_slack[mincap = 1:NumberOfMinCapReqs], inputs["MinCapPriceCap"][mincap] * EP[:vMinCap_slack][mincap])
- @expression(EP, eTotalCMinCapSlack, sum(EP[:eCMinCap_slack][mincap] for mincap = 1:NumberOfMinCapReqs))
-
- add_to_expression!(EP[:eObj], eTotalCMinCapSlack)
- end
-
- @constraint(EP, cZoneMinCapReq[mincap = 1:NumberOfMinCapReqs], EP[:eMinCapRes][mincap] >= inputs["MinCapReq"][mincap])
+ @expression(EP,
+ eCMinCap_slack[mincap = 1:NumberOfMinCapReqs],
+ inputs["MinCapPriceCap"][mincap]*EP[:vMinCap_slack][mincap])
+ @expression(EP,
+ eTotalCMinCapSlack,
+ sum(EP[:eCMinCap_slack][mincap] for mincap in 1:NumberOfMinCapReqs))
+ add_to_expression!(EP[:eObj], eTotalCMinCapSlack)
+ end
+ @constraint(EP,
+ cZoneMinCapReq[mincap = 1:NumberOfMinCapReqs],
+ EP[:eMinCapRes][mincap]>=inputs["MinCapReq"][mincap])
end
diff --git a/src/model/resources/curtailable_variable_renewable/curtailable_variable_renewable.jl b/src/model/resources/curtailable_variable_renewable/curtailable_variable_renewable.jl
index e86179f132..9b790789d6 100644
--- a/src/model/resources/curtailable_variable_renewable/curtailable_variable_renewable.jl
+++ b/src/model/resources/curtailable_variable_renewable/curtailable_variable_renewable.jl
@@ -14,41 +14,43 @@ The above constraint is defined as an inequality instead of an equality to allow
Note that if ```OperationalReserves=1``` indicating that frequency regulation and operating reserves are modeled, then this function calls ```curtailable_variable_renewable_operational_reserves!()```, which replaces the above constraints with a formulation inclusive of reserve provision.
"""
function curtailable_variable_renewable!(EP::Model, inputs::Dict, setup::Dict)
- ## Controllable variable renewable generators
- ### Option of modeling VRE generators with multiple availability profiles and capacity limits - Num_VRE_Bins in Vre.csv >1
- ## Default value of Num_VRE_Bins ==1
- println("Dispatchable Resources Module")
+ ## Controllable variable renewable generators
+ ### Option of modeling VRE generators with multiple availability profiles and capacity limits - Num_VRE_Bins in Vre.csv >1
+ ## Default value of Num_VRE_Bins ==1
+ println("Dispatchable Resources Module")
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
- OperationalReserves = setup["OperationalReserves"]
- CapacityReserveMargin = setup["CapacityReserveMargin"]
+ OperationalReserves = setup["OperationalReserves"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- VRE = inputs["VRE"]
+ VRE = inputs["VRE"]
- VRE_POWER_OUT = intersect(VRE, ids_with_positive(gen, num_vre_bins))
- VRE_NO_POWER_OUT = setdiff(VRE, VRE_POWER_OUT)
+ VRE_POWER_OUT = intersect(VRE, ids_with_positive(gen, num_vre_bins))
+ VRE_NO_POWER_OUT = setdiff(VRE, VRE_POWER_OUT)
- ### Expressions ###
+ ### Expressions ###
- ## Power Balance Expressions ##
+ ## Power Balance Expressions ##
- @expression(EP, ePowerBalanceDisp[t=1:T, z=1:Z],
- sum(EP[:vP][y,t] for y in intersect(VRE, resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:ePowerBalance], EP[:ePowerBalanceDisp])
+ @expression(EP, ePowerBalanceDisp[t = 1:T, z = 1:Z],
+ sum(EP[:vP][y, t] for y in intersect(VRE, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:ePowerBalance], EP[:ePowerBalanceDisp])
- # Capacity Reserves Margin policy
- if CapacityReserveMargin > 0
- @expression(EP, eCapResMarBalanceVRE[res=1:inputs["NCapacityReserveMargin"], t=1:T], sum(derating_factor(gen[y], tag=res) * EP[:eTotalCap][y] * inputs["pP_Max"][y,t] for y in VRE))
- add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceVRE)
- end
+ # Capacity Reserves Margin policy
+ if CapacityReserveMargin > 0
+ @expression(EP,
+ eCapResMarBalanceVRE[res = 1:inputs["NCapacityReserveMargin"], t = 1:T],
+ sum(derating_factor(gen[y], tag = res) * EP[:eTotalCap][y] *
+ inputs["pP_Max"][y, t] for y in VRE))
+ add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceVRE)
+ end
- ### Constraints ###
+ ### Constraints ###
if OperationalReserves == 1
# Constraints on power output and contribution to regulation and reserves
curtailable_variable_renewable_operational_reserves!(EP, inputs)
@@ -58,25 +60,28 @@ function curtailable_variable_renewable!(EP::Model, inputs::Dict, setup::Dict)
for y in VRE_POWER_OUT
# Define the set of generator indices corresponding to the different sites (or bins) of a particular VRE technology (E.g. wind or solar) in a particular zone.
# For example the wind resource in a particular region could be include three types of bins corresponding to different sites with unique interconnection, hourly capacity factor and maximim available capacity limits.
- VRE_BINS = intersect(resource_id.(gen[resource_id.(gen) .>= y]), resource_id.(gen[resource_id.(gen) .<= y+num_vre_bins(gen[y])-1]))
+ VRE_BINS = intersect(resource_id.(gen[resource_id.(gen) .>= y]),
+ resource_id.(gen[resource_id.(gen) .<= y + num_vre_bins(gen[y]) - 1]))
# Maximum power generated per hour by renewable generators must be less than
# sum of product of hourly capacity factor for each bin times its the bin installed capacity
# Note: inequality constraint allows curtailment of output below maximum level.
- @constraint(EP, [t=1:T], EP[:vP][y,t] <= sum(inputs["pP_Max"][yy,t]*EP[:eTotalCap][yy] for yy in VRE_BINS))
+ @constraint(EP,
+ [t = 1:T],
+ EP[:vP][y,
+ t]<=sum(inputs["pP_Max"][yy, t] * EP[:eTotalCap][yy] for yy in VRE_BINS))
end
end
- # Set power variables for all bins that are not being modeled for hourly output to be zero
- for y in VRE_NO_POWER_OUT
- fix.(EP[:vP][y,:], 0.0, force=true)
- end
- ##CO2 Polcy Module VRE Generation by zone
- @expression(EP, eGenerationByVRE[z=1:Z, t=1:T], # the unit is GW
- sum(EP[:vP][y,t] for y in intersect(inputs["VRE"], resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:eGenerationByZone], eGenerationByVRE)
-
+ # Set power variables for all bins that are not being modeled for hourly output to be zero
+ for y in VRE_NO_POWER_OUT
+ fix.(EP[:vP][y, :], 0.0, force = true)
+ end
+ ##CO2 Polcy Module VRE Generation by zone
+ @expression(EP, eGenerationByVRE[z = 1:Z, t = 1:T], # the unit is GW
+ sum(EP[:vP][y, t]
+ for y in intersect(inputs["VRE"], resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:eGenerationByZone], eGenerationByVRE)
end
@doc raw"""
@@ -103,11 +108,11 @@ The amount of frequency regulation and operating reserves procured in each time
```
"""
function curtailable_variable_renewable_operational_reserves!(EP::Model, inputs::Dict)
- gen = inputs["RESOURCES"]
- T = inputs["T"]
+ gen = inputs["RESOURCES"]
+ T = inputs["T"]
VRE = inputs["VRE"]
- VRE_POWER_OUT = intersect(VRE, ids_with_positive(gen, num_vre_bins))
+ VRE_POWER_OUT = intersect(VRE, ids_with_positive(gen, num_vre_bins))
REG = intersect(VRE_POWER_OUT, inputs["REG"])
RSV = intersect(VRE_POWER_OUT, inputs["RSV"])
@@ -121,33 +126,37 @@ function curtailable_variable_renewable_operational_reserves!(EP::Model, inputs:
resources_in_bin(y) = UnitRange(y, y + num_vre_bins(gen[y]) - 1)
hourly_bin_capacity(y, t) = sum(hourly_capacity(yy, t) for yy in resources_in_bin(y))
- @constraint(EP, [y in REG, t in 1:T], vREG[y, t] <= reg_max(gen[y]) * hourly_bin_capacity(y, t))
- @constraint(EP, [y in RSV, t in 1:T], vRSV[y, t] <= rsv_max(gen[y]) * hourly_bin_capacity(y, t))
+ @constraint(EP,
+ [y in REG, t in 1:T],
+ vREG[y, t]<=reg_max(gen[y]) * hourly_bin_capacity(y, t))
+ @constraint(EP,
+ [y in RSV, t in 1:T],
+ vRSV[y, t]<=rsv_max(gen[y]) * hourly_bin_capacity(y, t))
expr = extract_time_series_to_expression(vP, VRE_POWER_OUT)
add_similar_to_expression!(expr[REG, :], -vREG[REG, :])
- @constraint(EP, [y in VRE_POWER_OUT, t in 1:T], expr[y, t] >= 0)
+ @constraint(EP, [y in VRE_POWER_OUT, t in 1:T], expr[y, t]>=0)
expr = extract_time_series_to_expression(vP, VRE_POWER_OUT)
add_similar_to_expression!(expr[REG, :], +vREG[REG, :])
add_similar_to_expression!(expr[RSV, :], +vRSV[RSV, :])
- @constraint(EP, [y in VRE_POWER_OUT, t in 1:T], expr[y, t] <= hourly_bin_capacity(y, t))
+ @constraint(EP, [y in VRE_POWER_OUT, t in 1:T], expr[y, t]<=hourly_bin_capacity(y, t))
end
function remove_operational_reserves_for_binned_vre_resources!(EP::Model, inputs::Dict)
gen = inputs["RESOURCES"]
VRE = inputs["VRE"]
- VRE_POWER_OUT = intersect(VRE, ids_with_positive(gen, num_vre_bins))
+ VRE_POWER_OUT = intersect(VRE, ids_with_positive(gen, num_vre_bins))
REG = inputs["REG"]
RSV = inputs["RSV"]
VRE_NO_POWER_OUT = setdiff(VRE, VRE_POWER_OUT)
for y in intersect(VRE_NO_POWER_OUT, REG)
- fix.(EP[:vREG][y,:], 0.0, force=true)
- end
+ fix.(EP[:vREG][y, :], 0.0, force = true)
+ end
for y in intersect(VRE_NO_POWER_OUT, RSV)
- fix.(EP[:vRSV][y,:], 0.0, force=true)
- end
+ fix.(EP[:vRSV][y, :], 0.0, force = true)
+ end
end
diff --git a/src/model/resources/flexible_demand/flexible_demand.jl b/src/model/resources/flexible_demand/flexible_demand.jl
index 7562d4ac43..18bdbbe681 100644
--- a/src/model/resources/flexible_demand/flexible_demand.jl
+++ b/src/model/resources/flexible_demand/flexible_demand.jl
@@ -36,89 +36,101 @@ A similar constraints maximum time steps of demand advancement. This is done by
If $t$ is first time step of the year (or the first time step of the representative period), then the above two constraints are implemented to look back over the last n time steps, starting with the last time step of the year (or the last time step of the representative period). This time-wrapping implementation is similar to the time-wrapping implementations used for defining the storage balance constraints for hydropower reservoir resources and energy storage resources.
"""
function flexible_demand!(EP::Model, inputs::Dict, setup::Dict)
-## Flexible demand resources available during all hours and can be either delayed or advanced (virtual storage-shiftable demand) - DR ==1
+ ## Flexible demand resources available during all hours and can be either delayed or advanced (virtual storage-shiftable demand) - DR ==1
-println("Flexible Demand Resources Module")
+ println("Flexible Demand Resources Module")
-T = inputs["T"] # Number of time steps (hours)
-Z = inputs["Z"] # Number of zones
-FLEX = inputs["FLEX"] # Set of flexible demand resources
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ FLEX = inputs["FLEX"] # Set of flexible demand resources
-gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
-hours_per_subperiod = inputs["hours_per_subperiod"] # Total number of hours per subperiod
+ hours_per_subperiod = inputs["hours_per_subperiod"] # Total number of hours per subperiod
-### Variables ###
+ ### Variables ###
-# Variable tracking total advanced (negative) or deferred (positive) demand for demand flex resource y in period t
-@variable(EP, vS_FLEX[y in FLEX, t=1:T]);
+ # Variable tracking total advanced (negative) or deferred (positive) demand for demand flex resource y in period t
+ @variable(EP, vS_FLEX[y in FLEX, t = 1:T])
-# Variable tracking demand deferred by demand flex resource y in period t
-@variable(EP, vCHARGE_FLEX[y in FLEX, t=1:T] >= 0);
+ # Variable tracking demand deferred by demand flex resource y in period t
+ @variable(EP, vCHARGE_FLEX[y in FLEX, t = 1:T]>=0)
-### Expressions ###
+ ### Expressions ###
-## Power Balance Expressions ##
-@expression(EP, ePowerBalanceDemandFlex[t=1:T, z=1:Z],
- sum(-EP[:vP][y,t]+EP[:vCHARGE_FLEX][y,t] for y in intersect(FLEX, resources_in_zone_by_rid(gen,z)))
-)
-add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceDemandFlex)
+ ## Power Balance Expressions ##
+ @expression(EP, ePowerBalanceDemandFlex[t = 1:T, z = 1:Z],
+ sum(-EP[:vP][y, t] + EP[:vCHARGE_FLEX][y, t]
+ for y in intersect(FLEX, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceDemandFlex)
-# Capacity Reserves Margin policy
-if setup["CapacityReserveMargin"] > 0
- @expression(EP, eCapResMarBalanceFlex[res=1:inputs["NCapacityReserveMargin"], t=1:T], sum(derating_factor(gen[y], tag=res) * (EP[:vCHARGE_FLEX][y,t] - EP[:vP][y,t]) for y in FLEX))
- add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceFlex)
-end
-
-## Objective Function Expressions ##
-
-# Variable costs of "charging" for technologies "y" during hour "t" in zone "z"
-@expression(EP, eCVarFlex_in[y in FLEX,t=1:T], inputs["omega"][t]*var_om_cost_per_mwh_in(gen[y])*vCHARGE_FLEX[y,t])
-
-# Sum individual resource contributions to variable charging costs to get total variable charging costs
-@expression(EP, eTotalCVarFlexInT[t=1:T], sum(eCVarFlex_in[y,t] for y in FLEX))
-@expression(EP, eTotalCVarFlexIn, sum(eTotalCVarFlexInT[t] for t in 1:T))
-add_to_expression!(EP[:eObj], eTotalCVarFlexIn)
-
-### Constraints ###
-
-## Flexible demand is available only during specified hours with time delay or time advance (virtual storage-shiftable demand)
-for z in 1:Z
- # NOTE: Flexible demand operates by zone since capacity is now related to zone demand
- FLEX_Z = intersect(FLEX, resources_in_zone_by_rid(gen,z))
-
- @constraints(EP, begin
- # State of "charge" constraint (equals previous state + charge - discharge)
- # NOTE: no maximum energy "stored" or deferred for later hours
- # NOTE: Flexible_Demand_Energy_Eff corresponds to energy loss due to time shifting
- [y in FLEX_Z, t in 1:T], EP[:vS_FLEX][y,t] == EP[:vS_FLEX][y, hoursbefore(hours_per_subperiod, t, 1)] - flexible_demand_energy_eff(gen[y]) * EP[:vP][y,t] + EP[:vCHARGE_FLEX][y,t]
-
- # Maximum charging rate
- # NOTE: the maximum amount that can be shifted is given by hourly availability of the resource times the maximum capacity of the resource
- [y in FLEX_Z, t=1:T], EP[:vCHARGE_FLEX][y,t] <= inputs["pP_Max"][y,t]*EP[:eTotalCap][y]
- # NOTE: no maximum discharge rate unless constrained by other factors like transmission, etc.
- end)
-
-
- for y in FLEX_Z
-
- # Require deferred demands to be satisfied within the specified time delay
- max_flex_demand_delay = Int(floor(max_flexible_demand_delay(gen[y])))
-
- # Require advanced demands to be satisfied within the specified time period
- max_flex_demand_advance = Int(floor(max_flexible_demand_advance(gen[y])))
-
- @constraint(EP, [t in 1:T],
- # cFlexibleDemandDelay: Constraints looks forward over next n hours, where n = max_flexible_demand_delay
- sum(EP[:vP][y,e] for e=hoursafter(hours_per_subperiod, t, 1:max_flex_demand_delay)) >= EP[:vS_FLEX][y,t])
-
- @constraint(EP, [t in 1:T],
- # cFlexibleDemandAdvance: Constraint looks forward over next n hours, where n = max_flexible_demand_advance
- sum(EP[:vCHARGE_FLEX][y,e] for e=hoursafter(hours_per_subperiod, t, 1:max_flex_demand_advance)) >= -EP[:vS_FLEX][y,t])
+ # Capacity Reserves Margin policy
+ if setup["CapacityReserveMargin"] > 0
+ @expression(EP,
+ eCapResMarBalanceFlex[res = 1:inputs["NCapacityReserveMargin"], t = 1:T],
+ sum(derating_factor(gen[y], tag = res) *
+ (EP[:vCHARGE_FLEX][y, t] - EP[:vP][y, t]) for y in FLEX))
+ add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceFlex)
+ end
+ ## Objective Function Expressions ##
+
+ # Variable costs of "charging" for technologies "y" during hour "t" in zone "z"
+ @expression(EP,
+ eCVarFlex_in[y in FLEX, t = 1:T],
+ inputs["omega"][t]*var_om_cost_per_mwh_in(gen[y])*vCHARGE_FLEX[y, t])
+
+ # Sum individual resource contributions to variable charging costs to get total variable charging costs
+ @expression(EP, eTotalCVarFlexInT[t = 1:T], sum(eCVarFlex_in[y, t] for y in FLEX))
+ @expression(EP, eTotalCVarFlexIn, sum(eTotalCVarFlexInT[t] for t in 1:T))
+ add_to_expression!(EP[:eObj], eTotalCVarFlexIn)
+
+ ### Constraints ###
+
+ ## Flexible demand is available only during specified hours with time delay or time advance (virtual storage-shiftable demand)
+ for z in 1:Z
+ # NOTE: Flexible demand operates by zone since capacity is now related to zone demand
+ FLEX_Z = intersect(FLEX, resources_in_zone_by_rid(gen, z))
+
+ @constraints(EP,
+ begin
+ # State of "charge" constraint (equals previous state + charge - discharge)
+ # NOTE: no maximum energy "stored" or deferred for later hours
+ # NOTE: Flexible_Demand_Energy_Eff corresponds to energy loss due to time shifting
+ [y in FLEX_Z, t in 1:T],
+ EP[:vS_FLEX][y, t] ==
+ EP[:vS_FLEX][y, hoursbefore(hours_per_subperiod, t, 1)] -
+ flexible_demand_energy_eff(gen[y]) * EP[:vP][y, t] +
+ EP[:vCHARGE_FLEX][y, t]
+
+ # Maximum charging rate
+ # NOTE: the maximum amount that can be shifted is given by hourly availability of the resource times the maximum capacity of the resource
+ [y in FLEX_Z, t = 1:T],
+ EP[:vCHARGE_FLEX][y, t] <= inputs["pP_Max"][y, t] * EP[:eTotalCap][y]
+ # NOTE: no maximum discharge rate unless constrained by other factors like transmission, etc.
+ end)
+
+ for y in FLEX_Z
+
+ # Require deferred demands to be satisfied within the specified time delay
+ max_flex_demand_delay = Int(floor(max_flexible_demand_delay(gen[y])))
+
+ # Require advanced demands to be satisfied within the specified time period
+ max_flex_demand_advance = Int(floor(max_flexible_demand_advance(gen[y])))
+
+ @constraint(EP, [t in 1:T],
+ # cFlexibleDemandDelay: Constraints looks forward over next n hours, where n = max_flexible_demand_delay
+ sum(EP[:vP][y, e]
+ for e in hoursafter(hours_per_subperiod, t, 1:max_flex_demand_delay))>=EP[:vS_FLEX][y,
+ t])
+
+ @constraint(EP, [t in 1:T],
+ # cFlexibleDemandAdvance: Constraint looks forward over next n hours, where n = max_flexible_demand_advance
+ sum(EP[:vCHARGE_FLEX][y, e]
+ for e in hoursafter(hours_per_subperiod, t, 1:max_flex_demand_advance))>=-EP[:vS_FLEX][y,
+ t])
+ end
end
-end
-return EP
+ return EP
end
-
diff --git a/src/model/resources/hydro/hydro_inter_period_linkage.jl b/src/model/resources/hydro/hydro_inter_period_linkage.jl
index 8ea3836047..5fdab6287d 100644
--- a/src/model/resources/hydro/hydro_inter_period_linkage.jl
+++ b/src/model/resources/hydro/hydro_inter_period_linkage.jl
@@ -44,55 +44,67 @@ Finally, the next constraint enforces that the initial storage level for each in
```
"""
function hydro_inter_period_linkage!(EP::Model, inputs::Dict)
-
- println("Long Duration Storage Module for Hydro Reservoir")
-
- gen = inputs["RESOURCES"]
-
- REP_PERIOD = inputs["REP_PERIOD"] # Number of representative periods
-
- STOR_HYDRO_LONG_DURATION = inputs["STOR_HYDRO_LONG_DURATION"]
-
- hours_per_subperiod = inputs["hours_per_subperiod"] #total number of hours per subperiod
-
- dfPeriodMap = inputs["Period_Map"] # Dataframe that maps modeled periods to representative periods
- NPeriods = size(inputs["Period_Map"])[1] # Number of modeled periods
-
- MODELED_PERIODS_INDEX = 1:NPeriods
- REP_PERIODS_INDEX = MODELED_PERIODS_INDEX[dfPeriodMap[!,:Rep_Period] .== MODELED_PERIODS_INDEX]
-
- ### Variables ###
-
- # Variables to define inter-period energy transferred between modeled periods
-
- # State of charge of storage at beginning of each modeled period n
- @variable(EP, vSOC_HYDROw[y in STOR_HYDRO_LONG_DURATION, n in MODELED_PERIODS_INDEX] >= 0)
-
- # Build up in storage inventory over each representative period w
- # Build up inventory can be positive or negative
- @variable(EP, vdSOC_HYDRO[y in STOR_HYDRO_LONG_DURATION, w=1:REP_PERIOD])
-
- ### Constraints ###
-
- # Links last time step with first time step, ensuring position in hour 1 is within eligible change from final hour position
- # Modified initial state of storage for long-duration storage - initialize wth value carried over from last period
- # Alternative to cSoCBalStart constraint which is included when not modeling operations wrapping and long duration storage
- # Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
- @constraint(EP, cHydroReservoirLongDurationStorageStart[w=1:REP_PERIOD, y in STOR_HYDRO_LONG_DURATION],
- EP[:vS_HYDRO][y,hours_per_subperiod*(w-1)+1] == (EP[:vS_HYDRO][y,hours_per_subperiod*w]-vdSOC_HYDRO[y,w])-(1/efficiency_down(gen[y])*EP[:vP][y,hours_per_subperiod*(w-1)+1])-EP[:vSPILL][y,hours_per_subperiod*(w-1)+1]+inputs["pP_Max"][y,hours_per_subperiod*(w-1)+1]*EP[:eTotalCap][y])
- # Storage at beginning of period w = storage at beginning of period w-1 + storage built up in period w (after n representative periods)
- ## Multiply storage build up term from prior period with corresponding weight
- @constraint(EP, cHydroReservoirLongDurationStorage[y in STOR_HYDRO_LONG_DURATION, r in MODELED_PERIODS_INDEX],
- vSOC_HYDROw[y, mod1(r+1, NPeriods)] == vSOC_HYDROw[y,r] + vdSOC_HYDRO[y,dfPeriodMap[r,:Rep_Period_Index]])
-
- # Storage at beginning of each modeled period cannot exceed installed energy capacity
- @constraint(EP, cHydroReservoirLongDurationStorageUpper[y in STOR_HYDRO_LONG_DURATION, r in MODELED_PERIODS_INDEX],
- vSOC_HYDROw[y,r] <= hydro_energy_to_power_ratio(gen[y])*EP[:eTotalCap][y])
-
- # Initial storage level for representative periods must also adhere to sub-period storage inventory balance
- # Initial storage = Final storage - change in storage inventory across representative period
- @constraint(EP, cHydroReservoirLongDurationStorageSub[y in STOR_HYDRO_LONG_DURATION, r in REP_PERIODS_INDEX],
- vSOC_HYDROw[y,r] == EP[:vS_HYDRO][y,hours_per_subperiod*dfPeriodMap[r,:Rep_Period_Index]] - vdSOC_HYDRO[y,dfPeriodMap[r,:Rep_Period_Index]])
-
-
+ println("Long Duration Storage Module for Hydro Reservoir")
+
+ gen = inputs["RESOURCES"]
+
+ REP_PERIOD = inputs["REP_PERIOD"] # Number of representative periods
+
+ STOR_HYDRO_LONG_DURATION = inputs["STOR_HYDRO_LONG_DURATION"]
+
+ hours_per_subperiod = inputs["hours_per_subperiod"] #total number of hours per subperiod
+
+ dfPeriodMap = inputs["Period_Map"] # Dataframe that maps modeled periods to representative periods
+ NPeriods = size(inputs["Period_Map"])[1] # Number of modeled periods
+
+ MODELED_PERIODS_INDEX = 1:NPeriods
+ REP_PERIODS_INDEX = MODELED_PERIODS_INDEX[dfPeriodMap[!, :Rep_Period] .== MODELED_PERIODS_INDEX]
+
+ ### Variables ###
+
+ # Variables to define inter-period energy transferred between modeled periods
+
+ # State of charge of storage at beginning of each modeled period n
+ @variable(EP, vSOC_HYDROw[y in STOR_HYDRO_LONG_DURATION, n in MODELED_PERIODS_INDEX]>=0)
+
+ # Build up in storage inventory over each representative period w
+ # Build up inventory can be positive or negative
+ @variable(EP, vdSOC_HYDRO[y in STOR_HYDRO_LONG_DURATION, w = 1:REP_PERIOD])
+
+ ### Constraints ###
+
+ # Links last time step with first time step, ensuring position in hour 1 is within eligible change from final hour position
+ # Modified initial state of storage for long-duration storage - initialize wth value carried over from last period
+ # Alternative to cSoCBalStart constraint which is included when not modeling operations wrapping and long duration storage
+ # Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
+ @constraint(EP,
+ cHydroReservoirLongDurationStorageStart[w = 1:REP_PERIOD,
+ y in STOR_HYDRO_LONG_DURATION],
+ EP[:vS_HYDRO][y,
+ hours_per_subperiod * (w - 1) + 1]==(EP[:vS_HYDRO][y, hours_per_subperiod * w] - vdSOC_HYDRO[y, w]) -
+ (1 / efficiency_down(gen[y]) * EP[:vP][y, hours_per_subperiod * (w - 1) + 1]) -
+ EP[:vSPILL][y, hours_per_subperiod * (w - 1) + 1] +
+ inputs["pP_Max"][y, hours_per_subperiod * (w - 1) + 1] * EP[:eTotalCap][y])
+ # Storage at beginning of period w = storage at beginning of period w-1 + storage built up in period w (after n representative periods)
+ ## Multiply storage build up term from prior period with corresponding weight
+ @constraint(EP,
+ cHydroReservoirLongDurationStorage[y in STOR_HYDRO_LONG_DURATION,
+ r in MODELED_PERIODS_INDEX],
+ vSOC_HYDROw[y,
+ mod1(r + 1, NPeriods)]==vSOC_HYDROw[y, r] + vdSOC_HYDRO[y, dfPeriodMap[r, :Rep_Period_Index]])
+
+ # Storage at beginning of each modeled period cannot exceed installed energy capacity
+ @constraint(EP,
+ cHydroReservoirLongDurationStorageUpper[y in STOR_HYDRO_LONG_DURATION,
+ r in MODELED_PERIODS_INDEX],
+ vSOC_HYDROw[y, r]<=hydro_energy_to_power_ratio(gen[y]) * EP[:eTotalCap][y])
+
+ # Initial storage level for representative periods must also adhere to sub-period storage inventory balance
+ # Initial storage = Final storage - change in storage inventory across representative period
+ @constraint(EP,
+ cHydroReservoirLongDurationStorageSub[y in STOR_HYDRO_LONG_DURATION,
+ r in REP_PERIODS_INDEX],
+ vSOC_HYDROw[y,
+ r]==EP[:vS_HYDRO][y, hours_per_subperiod * dfPeriodMap[r, :Rep_Period_Index]] -
+ vdSOC_HYDRO[y, dfPeriodMap[r, :Rep_Period_Index]])
end
diff --git a/src/model/resources/hydro/hydro_res.jl b/src/model/resources/hydro/hydro_res.jl
index e9734ed975..ce9b2c69f5 100644
--- a/src/model/resources/hydro/hydro_res.jl
+++ b/src/model/resources/hydro/hydro_res.jl
@@ -61,24 +61,23 @@ In case the reservoir capacity is known ($y \in W^{cap}$), then an additional co
```
"""
function hydro_res!(EP::Model, inputs::Dict, setup::Dict)
+ println("Hydro Reservoir Core Resources Module")
- println("Hydro Reservoir Core Resources Module")
+ gen = inputs["RESOURCES"]
- gen = inputs["RESOURCES"]
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ p = inputs["hours_per_subperiod"] # total number of hours per subperiod
- p = inputs["hours_per_subperiod"] # total number of hours per subperiod
+ HYDRO_RES = inputs["HYDRO_RES"]# Set of all reservoir hydro resources, used for common constraints
+ HYDRO_RES_KNOWN_CAP = inputs["HYDRO_RES_KNOWN_CAP"] # Reservoir hydro resources modeled with unknown reservoir energy capacity
- HYDRO_RES = inputs["HYDRO_RES"] # Set of all reservoir hydro resources, used for common constraints
- HYDRO_RES_KNOWN_CAP = inputs["HYDRO_RES_KNOWN_CAP"] # Reservoir hydro resources modeled with unknown reservoir energy capacity
+ STOR_HYDRO_SHORT_DURATION = inputs["STOR_HYDRO_SHORT_DURATION"]
+ representative_periods = inputs["REP_PERIOD"]
- STOR_HYDRO_SHORT_DURATION = inputs["STOR_HYDRO_SHORT_DURATION"]
- representative_periods = inputs["REP_PERIOD"]
-
- START_SUBPERIODS = inputs["START_SUBPERIODS"]
- INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
+ START_SUBPERIODS = inputs["START_SUBPERIODS"]
+ INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
# These variables are used in the ramp-up and ramp-down expressions
reserves_term = @expression(EP, [y in HYDRO_RES, t in 1:T], 0)
@@ -88,81 +87,99 @@ function hydro_res!(EP::Model, inputs::Dict, setup::Dict)
HYDRO_RES_REG = intersect(HYDRO_RES, inputs["REG"]) # Set of reservoir hydro resources with regulation reserves
HYDRO_RES_RSV = intersect(HYDRO_RES, inputs["RSV"]) # Set of reservoir hydro resources with spinning reserves
regulation_term = @expression(EP, [y in HYDRO_RES, t in 1:T],
- y ∈ HYDRO_RES_REG ? EP[:vREG][y,t] - EP[:vREG][y, hoursbefore(p, t, 1)] : 0)
+ y ∈ HYDRO_RES_REG ? EP[:vREG][y, t] - EP[:vREG][y, hoursbefore(p, t, 1)] : 0)
reserves_term = @expression(EP, [y in HYDRO_RES, t in 1:T],
- y ∈ HYDRO_RES_RSV ? EP[:vRSV][y,t] : 0)
+ y ∈ HYDRO_RES_RSV ? EP[:vRSV][y, t] : 0)
+ end
+
+ ### Variables ###
+
+ # Reservoir hydro storage level of resource "y" at hour "t" [MWh] on zone "z" - unbounded
+ @variable(EP, vS_HYDRO[y in HYDRO_RES, t = 1:T]>=0)
+
+ # Hydro reservoir overflow (water spill) variable
+ @variable(EP, vSPILL[y in HYDRO_RES, t = 1:T]>=0)
+
+ ### Expressions ###
+
+ ## Power Balance Expressions ##
+ @expression(EP, ePowerBalanceHydroRes[t = 1:T, z = 1:Z],
+ sum(EP[:vP][y, t] for y in intersect(HYDRO_RES, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceHydroRes)
+
+ # Capacity Reserves Margin policy
+ if setup["CapacityReserveMargin"] > 0
+ @expression(EP,
+ eCapResMarBalanceHydro[res = 1:inputs["NCapacityReserveMargin"], t = 1:T],
+ sum(derating_factor(gen[y], tag = res) * EP[:vP][y, t] for y in HYDRO_RES))
+ add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceHydro)
end
- ### Variables ###
-
- # Reservoir hydro storage level of resource "y" at hour "t" [MWh] on zone "z" - unbounded
- @variable(EP, vS_HYDRO[y in HYDRO_RES, t=1:T] >= 0);
-
- # Hydro reservoir overflow (water spill) variable
- @variable(EP, vSPILL[y in HYDRO_RES, t=1:T] >= 0)
-
- ### Expressions ###
-
- ## Power Balance Expressions ##
- @expression(EP, ePowerBalanceHydroRes[t=1:T, z=1:Z],
- sum(EP[:vP][y,t] for y in intersect(HYDRO_RES, resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceHydroRes)
-
- # Capacity Reserves Margin policy
- if setup["CapacityReserveMargin"] > 0
- @expression(EP, eCapResMarBalanceHydro[res=1:inputs["NCapacityReserveMargin"], t=1:T], sum(derating_factor(gen[y], tag=res) * EP[:vP][y,t] for y in HYDRO_RES))
- add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceHydro)
- end
-
- ### Constratints ###
-
- if representative_periods > 1 && !isempty(inputs["STOR_HYDRO_LONG_DURATION"])
- CONSTRAINTSET = STOR_HYDRO_SHORT_DURATION
- else
- CONSTRAINTSET = HYDRO_RES
- end
-
- @constraint(EP, cHydroReservoirStart[y in CONSTRAINTSET,t in START_SUBPERIODS], EP[:vS_HYDRO][y,t] == EP[:vS_HYDRO][y, hoursbefore(p,t,1)]- (1/efficiency_down(gen[y])*EP[:vP][y,t]) - vSPILL[y,t] + inputs["pP_Max"][y,t]*EP[:eTotalCap][y])
-
- ### Constraints commmon to all reservoir hydro (y in set HYDRO_RES) ###
- @constraints(EP, begin
- ### NOTE: time coupling constraints in this block do not apply to first hour in each sample period;
- # Energy stored in reservoir at end of each other hour is equal to energy at end of prior hour less generation and spill and + inflows in the current hour
- # The ["pP_Max"][y,t] term here refers to inflows as a fraction of peak discharge power capacity.
- # DEV NOTE: Last inputs["pP_Max"][y,t] term above is inflows; currently part of capacity factors inputs in Generators_variability.csv but should be moved to its own Hydro_inflows.csv input in future.
-
- # Constraints for reservoir hydro
- cHydroReservoirInterior[y in HYDRO_RES, t in INTERIOR_SUBPERIODS], EP[:vS_HYDRO][y,t] == (EP[:vS_HYDRO][y, hoursbefore(p,t,1)] - (1/efficiency_down(gen[y])*EP[:vP][y,t]) - vSPILL[y,t] + inputs["pP_Max"][y,t]*EP[:eTotalCap][y])
-
- # Maximum ramp up and down
- cRampUp[y in HYDRO_RES, t in 1:T], EP[:vP][y,t] + regulation_term[y,t] + reserves_term[y,t] - EP[:vP][y, hoursbefore(p,t,1)] <= ramp_up_fraction(gen[y])*EP[:eTotalCap][y]
- cRampDown[y in HYDRO_RES, t in 1:T], EP[:vP][y, hoursbefore(p,t,1)] - EP[:vP][y,t] - regulation_term[y,t] + reserves_term[y, hoursbefore(p,t,1)] <= ramp_down_fraction(gen[y])*EP[:eTotalCap][y]
- # Minimum streamflow running requirements (power generation and spills must be >= min value) in all hours
- cHydroMinFlow[y in HYDRO_RES, t in 1:T], EP[:vP][y,t] + EP[:vSPILL][y,t] >= min_power(gen[y])*EP[:eTotalCap][y]
- # DEV NOTE: When creating new hydro inputs, should rename Min_Power with Min_flow or similar for clarity since this includes spilled water as well
-
- # Maximum discharging rate must be less than power rating OR available stored energy at start of hour, whichever is less
- # DEV NOTE: We do not currently account for hydro power plant outages - leave it for later to figure out if we should.
- # DEV NOTE (CONTD): If we defin pPMax as hourly availability of the plant and define inflows as a separate parameter, then notation will be consistent with its use for other resources
- cHydroMaxPower[y in HYDRO_RES, t in 1:T], EP[:vP][y,t] <= EP[:eTotalCap][y]
- cHydroMaxOutflow[y in HYDRO_RES, t in 1:T], EP[:vP][y,t] <= EP[:vS_HYDRO][y, hoursbefore(p,t,1)]
- end)
-
- ### Constraints to limit maximum energy in storage based on known limits on reservoir energy capacity (only for HYDRO_RES_KNOWN_CAP)
- # Maximum energy stored in reservoir must be less than energy capacity in all hours - only applied to HYDRO_RES_KNOWN_CAP
- @constraint(EP, cHydroMaxEnergy[y in HYDRO_RES_KNOWN_CAP, t in 1:T], EP[:vS_HYDRO][y,t] <= hydro_energy_to_power_ratio(gen[y])*EP[:eTotalCap][y])
-
- if setup["OperationalReserves"] == 1
- ### Reserve related constraints for reservoir hydro resources (y in HYDRO_RES), if used
- hydro_res_operational_reserves!(EP, inputs)
- end
- ##CO2 Polcy Module Hydro Res Generation by zone
- @expression(EP, eGenerationByHydroRes[z=1:Z, t=1:T], # the unit is GW
- sum(EP[:vP][y,t] for y in intersect(HYDRO_RES, resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:eGenerationByZone], eGenerationByHydroRes)
+ ### Constratints ###
+
+ if representative_periods > 1 && !isempty(inputs["STOR_HYDRO_LONG_DURATION"])
+ CONSTRAINTSET = STOR_HYDRO_SHORT_DURATION
+ else
+ CONSTRAINTSET = HYDRO_RES
+ end
+ @constraint(EP,
+ cHydroReservoirStart[y in CONSTRAINTSET, t in START_SUBPERIODS],
+ EP[:vS_HYDRO][y,
+ t]==EP[:vS_HYDRO][y, hoursbefore(p, t, 1)] -
+ (1 / efficiency_down(gen[y]) * EP[:vP][y, t]) - vSPILL[y, t] +
+ inputs["pP_Max"][y, t] * EP[:eTotalCap][y])
+
+ ### Constraints commmon to all reservoir hydro (y in set HYDRO_RES) ###
+ @constraints(EP,
+ begin
+ ### NOTE: time coupling constraints in this block do not apply to first hour in each sample period;
+ # Energy stored in reservoir at end of each other hour is equal to energy at end of prior hour less generation and spill and + inflows in the current hour
+ # The ["pP_Max"][y,t] term here refers to inflows as a fraction of peak discharge power capacity.
+ # DEV NOTE: Last inputs["pP_Max"][y,t] term above is inflows; currently part of capacity factors inputs in Generators_variability.csv but should be moved to its own Hydro_inflows.csv input in future.
+
+ # Constraints for reservoir hydro
+ cHydroReservoirInterior[y in HYDRO_RES, t in INTERIOR_SUBPERIODS],
+ EP[:vS_HYDRO][y, t] == (EP[:vS_HYDRO][y, hoursbefore(p, t, 1)] -
+ (1 / efficiency_down(gen[y]) * EP[:vP][y, t]) - vSPILL[y, t] +
+ inputs["pP_Max"][y, t] * EP[:eTotalCap][y])
+
+ # Maximum ramp up and down
+ cRampUp[y in HYDRO_RES, t in 1:T],
+ EP[:vP][y, t] + regulation_term[y, t] + reserves_term[y, t] -
+ EP[:vP][y, hoursbefore(p, t, 1)] <=
+ ramp_up_fraction(gen[y]) * EP[:eTotalCap][y]
+ cRampDown[y in HYDRO_RES, t in 1:T],
+ EP[:vP][y, hoursbefore(p, t, 1)] - EP[:vP][y, t] - regulation_term[y, t] +
+ reserves_term[y, hoursbefore(p, t, 1)] <=
+ ramp_down_fraction(gen[y]) * EP[:eTotalCap][y]
+ # Minimum streamflow running requirements (power generation and spills must be >= min value) in all hours
+ cHydroMinFlow[y in HYDRO_RES, t in 1:T],
+ EP[:vP][y, t] + EP[:vSPILL][y, t] >= min_power(gen[y]) * EP[:eTotalCap][y]
+ # DEV NOTE: When creating new hydro inputs, should rename Min_Power with Min_flow or similar for clarity since this includes spilled water as well
+
+ # Maximum discharging rate must be less than power rating OR available stored energy at start of hour, whichever is less
+ # DEV NOTE: We do not currently account for hydro power plant outages - leave it for later to figure out if we should.
+ # DEV NOTE (CONTD): If we defin pPMax as hourly availability of the plant and define inflows as a separate parameter, then notation will be consistent with its use for other resources
+ cHydroMaxPower[y in HYDRO_RES, t in 1:T], EP[:vP][y, t] <= EP[:eTotalCap][y]
+ cHydroMaxOutflow[y in HYDRO_RES, t in 1:T],
+ EP[:vP][y, t] <= EP[:vS_HYDRO][y, hoursbefore(p, t, 1)]
+ end)
+
+ ### Constraints to limit maximum energy in storage based on known limits on reservoir energy capacity (only for HYDRO_RES_KNOWN_CAP)
+ # Maximum energy stored in reservoir must be less than energy capacity in all hours - only applied to HYDRO_RES_KNOWN_CAP
+ @constraint(EP,
+ cHydroMaxEnergy[y in HYDRO_RES_KNOWN_CAP, t in 1:T],
+ EP[:vS_HYDRO][y, t]<=hydro_energy_to_power_ratio(gen[y]) * EP[:eTotalCap][y])
+
+ if setup["OperationalReserves"] == 1
+ ### Reserve related constraints for reservoir hydro resources (y in HYDRO_RES), if used
+ hydro_res_operational_reserves!(EP, inputs)
+ end
+ ##CO2 Polcy Module Hydro Res Generation by zone
+ @expression(EP, eGenerationByHydroRes[z = 1:Z, t = 1:T], # the unit is GW
+ sum(EP[:vP][y, t] for y in intersect(HYDRO_RES, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:eGenerationByZone], eGenerationByHydroRes)
end
@doc raw"""
@@ -195,19 +212,18 @@ r_{y,z, t} \leq \upsilon^{rsv}_{y,z}\times \Delta^{total}_{y,z}
```
"""
function hydro_res_operational_reserves!(EP::Model, inputs::Dict)
+ println("Hydro Reservoir Operational Reserves Module")
- println("Hydro Reservoir Operational Reserves Module")
-
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
- T = inputs["T"] # Number of time steps (hours)
+ T = inputs["T"] # Number of time steps (hours)
- HYDRO_RES = inputs["HYDRO_RES"]
+ HYDRO_RES = inputs["HYDRO_RES"]
REG = inputs["REG"]
RSV = inputs["RSV"]
- HYDRO_RES_REG = intersect(HYDRO_RES, REG) # Set of reservoir hydro resources with regulation reserves
- HYDRO_RES_RSV = intersect(HYDRO_RES, RSV) # Set of reservoir hydro resources with spinning reserves
+ HYDRO_RES_REG = intersect(HYDRO_RES, REG) # Set of reservoir hydro resources with regulation reserves
+ HYDRO_RES_RSV = intersect(HYDRO_RES, RSV) # Set of reservoir hydro resources with spinning reserves
vP = EP[:vP]
vREG = EP[:vREG]
@@ -224,9 +240,13 @@ function hydro_res_operational_reserves!(EP::Model, inputs::Dict)
S = HYDRO_RES_RSV
add_similar_to_expression!(max_up_reserves_lhs[S, :], vRSV[S, :])
- @constraint(EP, [y in HYDRO_RES, t in 1:T], max_up_reserves_lhs[y, t] <= eTotalCap[y])
- @constraint(EP, [y in HYDRO_RES, t in 1:T], max_dn_reserves_lhs[y, t] >= 0)
+ @constraint(EP, [y in HYDRO_RES, t in 1:T], max_up_reserves_lhs[y, t]<=eTotalCap[y])
+ @constraint(EP, [y in HYDRO_RES, t in 1:T], max_dn_reserves_lhs[y, t]>=0)
- @constraint(EP, [y in HYDRO_RES_REG, t in 1:T], vREG[y, t] <= reg_max(gen[y]) * eTotalCap[y])
- @constraint(EP, [y in HYDRO_RES_RSV, t in 1:T], vRSV[y, t] <= rsv_max(gen[y]) * eTotalCap[y])
+ @constraint(EP,
+ [y in HYDRO_RES_REG, t in 1:T],
+ vREG[y, t]<=reg_max(gen[y]) * eTotalCap[y])
+ @constraint(EP,
+ [y in HYDRO_RES_RSV, t in 1:T],
+ vRSV[y, t]<=rsv_max(gen[y]) * eTotalCap[y])
end
diff --git a/src/model/resources/hydrogen/electrolyzer.jl b/src/model/resources/hydrogen/electrolyzer.jl
index f14b9a8c38..bfd21d505d 100644
--- a/src/model/resources/hydrogen/electrolyzer.jl
+++ b/src/model/resources/hydrogen/electrolyzer.jl
@@ -78,99 +78,122 @@ This optional constraint (enabled by setting `HydrogenHourlyMatching==1` in `gen
This constraint permits modeling of the 'three pillars' requirements for clean hydrogen supply of (1) new clean supply (if only new clean resources are designated as eligible), (2) that is deliverable to the electrolyzer (assuming co-location within the same modeled zone = deliverability), and (3) produced within the same hour as the electrolyzer consumes power (otherwise known as 'additionality/new supply', 'deliverability', and 'temporal matching requirements') See Ricks, Xu & Jenkins (2023), ''Minimizing emissions from grid-based hydrogen production in the United States'' *Environ. Res. Lett.* 18 014025 [doi:10.1088/1748-9326/acacb5](https://iopscience.iop.org/article/10.1088/1748-9326/acacb5/meta) for more.
"""
function electrolyzer!(EP::Model, inputs::Dict, setup::Dict)
- println("Electrolyzer Resources Module")
+ println("Electrolyzer Resources Module")
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
- ELECTROLYZERS = inputs["ELECTROLYZER"]
- STORAGE = inputs["STOR_ALL"]
+ ELECTROLYZERS = inputs["ELECTROLYZER"]
+ STORAGE = inputs["STOR_ALL"]
- p = inputs["hours_per_subperiod"] #total number of hours per subperiod
+ p = inputs["hours_per_subperiod"] #total number of hours per subperiod
- ### Variables ###
+ ### Variables ###
- # Electrical energy consumed by electrolyzer resource "y" at hour "t"
- @variable(EP, vUSE[y=ELECTROLYZERS, t in 1:T] >=0);
+ # Electrical energy consumed by electrolyzer resource "y" at hour "t"
+ @variable(EP, vUSE[y = ELECTROLYZERS, t in 1:T]>=0)
- ### Expressions ###
+ ### Expressions ###
- ## Power Balance Expressions ##
+ ## Power Balance Expressions ##
- @expression(EP, ePowerBalanceElectrolyzers[t in 1:T, z in 1:Z],
- sum(EP[:vUSE][y,t] for y in intersect(ELECTROLYZERS, resources_in_zone_by_rid(gen,z))))
+ @expression(EP, ePowerBalanceElectrolyzers[t in 1:T, z in 1:Z],
+ sum(EP[:vUSE][y, t]
+ for y in intersect(ELECTROLYZERS, resources_in_zone_by_rid(gen, z))))
- # Electrolyzers consume electricity so their vUSE is subtracted from power balance
- EP[:ePowerBalance] -= ePowerBalanceElectrolyzers
+ # Electrolyzers consume electricity so their vUSE is subtracted from power balance
+ EP[:ePowerBalance] -= ePowerBalanceElectrolyzers
- # Capacity Reserves Margin policy
- ## Electrolyzers currently do not contribute to capacity reserve margin. Could allow them to contribute as a curtailable demand in future.
+ # Capacity Reserves Margin policy
+ ## Electrolyzers currently do not contribute to capacity reserve margin. Could allow them to contribute as a curtailable demand in future.
- ### Constraints ###
+ ### Constraints ###
- ### Maximum ramp up and down between consecutive hours (Constraints #1-2)
- @constraints(EP, begin
- ## Maximum ramp up between consecutive hours
- [y in ELECTROLYZERS, t in 1:T], EP[:vUSE][y,t] - EP[:vUSE][y, hoursbefore(p,t,1)] <= ramp_up_fraction(gen[y])*EP[:eTotalCap][y]
+ ### Maximum ramp up and down between consecutive hours (Constraints #1-2)
+ @constraints(EP,
+ begin
+ ## Maximum ramp up between consecutive hours
+ [y in ELECTROLYZERS, t in 1:T],
+ EP[:vUSE][y, t] - EP[:vUSE][y, hoursbefore(p, t, 1)] <=
+ ramp_up_fraction(gen[y]) * EP[:eTotalCap][y]
- ## Maximum ramp down between consecutive hours
- [y in ELECTROLYZERS, t in 1:T], EP[:vUSE][y, hoursbefore(p,t,1)] - EP[:vUSE][y,t] <= ramp_down_fraction(gen[y])*EP[:eTotalCap][y]
- end)
+ ## Maximum ramp down between consecutive hours
+ [y in ELECTROLYZERS, t in 1:T],
+ EP[:vUSE][y, hoursbefore(p, t, 1)] - EP[:vUSE][y, t] <=
+ ramp_down_fraction(gen[y]) * EP[:eTotalCap][y]
+ end)
- ### Minimum and maximum power output constraints (Constraints #3-4)
+ ### Minimum and maximum power output constraints (Constraints #3-4)
# Electrolyzers currently do not contribute to operating reserves, so there is not
# special case (for OperationalReserves == 1) here.
# Could allow them to contribute as a curtailable demand in future.
+ @constraints(EP,
+ begin
+ # Minimum stable power generated per technology "y" at hour "t" Min_Power
+ [y in ELECTROLYZERS, t in 1:T],
+ EP[:vUSE][y, t] >= min_power(gen[y]) * EP[:eTotalCap][y]
+
+ # Maximum power generated per technology "y" at hour "t"
+ [y in ELECTROLYZERS, t in 1:T],
+ EP[:vUSE][y, t] <= inputs["pP_Max"][y, t] * EP[:eTotalCap][y]
+ end)
+
+ ### Minimum hydrogen production constraint (if any) (Constraint #5)
+ kt_to_t = 10^3
+ @constraint(EP,
+ cHydrogenMin[y in ELECTROLYZERS],
+ sum(inputs["omega"][t] * EP[:vUSE][y, t] / hydrogen_mwh_per_tonne(gen[y])
+ for t in 1:T)>=electrolyzer_min_kt(gen[y]) * kt_to_t)
+
+ ### Remove vP (electrolyzers do not produce power so vP = 0 for all periods)
@constraints(EP, begin
- # Minimum stable power generated per technology "y" at hour "t" Min_Power
- [y in ELECTROLYZERS, t in 1:T], EP[:vUSE][y,t] >= min_power(gen[y])*EP[:eTotalCap][y]
-
- # Maximum power generated per technology "y" at hour "t"
- [y in ELECTROLYZERS, t in 1:T], EP[:vUSE][y,t] <= inputs["pP_Max"][y,t]*EP[:eTotalCap][y]
+ [y in ELECTROLYZERS, t in 1:T], EP[:vP][y, t] == 0
end)
- ### Minimum hydrogen production constraint (if any) (Constraint #5)
- kt_to_t = 10^3
- @constraint(EP,
- cHydrogenMin[y in ELECTROLYZERS],
- sum(inputs["omega"][t] * EP[:vUSE][y,t] / hydrogen_mwh_per_tonne(gen[y]) for t=1:T) >= electrolyzer_min_kt(gen[y]) * kt_to_t
- )
-
- ### Remove vP (electrolyzers do not produce power so vP = 0 for all periods)
- @constraints(EP, begin
- [y in ELECTROLYZERS, t in 1:T], EP[:vP][y,t] == 0
- end)
-
- ### Hydrogen Hourly Supply Matching Constraint (Constraint #6) ###
- # Requires generation from qualified resources (indicated by Qualified_Hydrogen_Supply==1 in the resource .csv files)
- # from within the same zone as the electrolyzers are located to be >= hourly consumption from electrolyzers in the zone
- # (and any charging by qualified storage within the zone used to help increase electrolyzer utilization).
- if setup["HydrogenHourlyMatching"] == 1
- HYDROGEN_ZONES = unique(zone_id.(gen.Electrolyzer))
- QUALIFIED_SUPPLY = ids_with(gen, qualified_hydrogen_supply)
- @constraint(EP, cHourlyMatching[z in HYDROGEN_ZONES, t in 1:T],
- sum(EP[:vP][y,t] for y=intersect(resources_in_zone_by_rid(gen,z), QUALIFIED_SUPPLY)) >= sum(EP[:vUSE][y,t] for y=intersect(resources_in_zone_by_rid(gen,z), ELECTROLYZERS)) + sum(EP[:vCHARGE][y,t] for y=intersect(resources_in_zone_by_rid(gen,z), QUALIFIED_SUPPLY, STORAGE))
- )
- end
-
-
- ### Energy Share Requirement Policy ###
- # Since we're using vUSE to denote electrolyzer consumption, we subtract this from the eESR Energy Share Requirement balance to increase demand for clean resources if desired
- # Electrolyzer demand is only accounted for in an ESR that the electrolyzer resources is tagged in in Generates_data.csv (e.g. ESR_N > 0) and
- # a share of electrolyzer demand equal to df[y,:ESR_N] must be met by resources qualifying for ESR_N for each electrolyzer resource y.
- if setup["EnergyShareRequirement"] >= 1
- @expression(EP, eElectrolyzerESR[ESR in 1:inputs["nESR"]], sum(inputs["omega"][t]*EP[:vUSE][y,t] for y=intersect(ELECTROLYZERS, ids_with_policy(gen,esr,tag=ESR)), t in 1:T))
- EP[:eESR] -= eElectrolyzerESR
- end
-
- ### Objective Function ###
- # Subtract hydrogen revenue from objective function
- scale_factor = setup["ParameterScale"] == 1 ? 10^6 : 1 # If ParameterScale==1, costs are in millions of $
- @expression(EP, eHydrogenValue[y in ELECTROLYZERS, t in 1:T], (inputs["omega"][t] * EP[:vUSE][y,t] / hydrogen_mwh_per_tonne(gen[y]) * hydrogen_price_per_tonne(gen[y]) / scale_factor))
- @expression(EP, eTotalHydrogenValueT[t in 1:T], sum(eHydrogenValue[y,t] for y in ELECTROLYZERS))
- @expression(EP, eTotalHydrogenValue, sum(eTotalHydrogenValueT[t] for t in 1:T))
- EP[:eObj] -= eTotalHydrogenValue
-
+ ### Hydrogen Hourly Supply Matching Constraint (Constraint #6) ###
+ # Requires generation from qualified resources (indicated by Qualified_Hydrogen_Supply==1 in the resource .csv files)
+ # from within the same zone as the electrolyzers are located to be >= hourly consumption from electrolyzers in the zone
+ # (and any charging by qualified storage within the zone used to help increase electrolyzer utilization).
+ if setup["HydrogenHourlyMatching"] == 1
+ HYDROGEN_ZONES = unique(zone_id.(gen.Electrolyzer))
+ QUALIFIED_SUPPLY = ids_with(gen, qualified_hydrogen_supply)
+ @constraint(EP, cHourlyMatching[z in HYDROGEN_ZONES, t in 1:T],
+ sum(EP[:vP][y, t]
+ for y in intersect(resources_in_zone_by_rid(gen, z), QUALIFIED_SUPPLY))>=sum(EP[:vUSE][y,
+ t] for y in intersect(resources_in_zone_by_rid(gen,
+ z),
+ ELECTROLYZERS)) + sum(EP[:vCHARGE][y,
+ t] for y in intersect(resources_in_zone_by_rid(gen,
+ z),
+ QUALIFIED_SUPPLY,
+ STORAGE)))
+ end
+
+ ### Energy Share Requirement Policy ###
+ # Since we're using vUSE to denote electrolyzer consumption, we subtract this from the eESR Energy Share Requirement balance to increase demand for clean resources if desired
+ # Electrolyzer demand is only accounted for in an ESR that the electrolyzer resources is tagged in in Generates_data.csv (e.g. ESR_N > 0) and
+ # a share of electrolyzer demand equal to df[y,:ESR_N] must be met by resources qualifying for ESR_N for each electrolyzer resource y.
+ if setup["EnergyShareRequirement"] >= 1
+ @expression(EP,
+ eElectrolyzerESR[ESR in 1:inputs["nESR"]],
+ sum(inputs["omega"][t] * EP[:vUSE][y, t]
+ for y in intersect(ELECTROLYZERS, ids_with_policy(gen, esr, tag = ESR)),
+ t in 1:T))
+ EP[:eESR] -= eElectrolyzerESR
+ end
+
+ ### Objective Function ###
+ # Subtract hydrogen revenue from objective function
+ scale_factor = setup["ParameterScale"] == 1 ? 10^6 : 1 # If ParameterScale==1, costs are in millions of $
+ @expression(EP,
+ eHydrogenValue[y in ELECTROLYZERS, t in 1:T],
+ (inputs["omega"][t] * EP[:vUSE][y, t] / hydrogen_mwh_per_tonne(gen[y]) *
+ hydrogen_price_per_tonne(gen[y])/scale_factor))
+ @expression(EP,
+ eTotalHydrogenValueT[t in 1:T],
+ sum(eHydrogenValue[y, t] for y in ELECTROLYZERS))
+ @expression(EP, eTotalHydrogenValue, sum(eTotalHydrogenValueT[t] for t in 1:T))
+ EP[:eObj] -= eTotalHydrogenValue
end
diff --git a/src/model/resources/maintenance.jl b/src/model/resources/maintenance.jl
index 1499fa09c8..37d9c0ce82 100644
--- a/src/model/resources/maintenance.jl
+++ b/src/model/resources/maintenance.jl
@@ -12,7 +12,7 @@ const MAINTENANCE_SHUT_VARS = "MaintenanceShutVariables"
"""
function resources_with_maintenance(df::DataFrame)::Vector{Int}
if "MAINT" in names(df)
- df[df.MAINT.>0, :R_ID]
+ df[df.MAINT .> 0, :R_ID]
else
Vector{Int}[]
end
@@ -58,13 +58,11 @@ end
maintenance_duration: length of a maintenance period
maintenance_begin_hours: collection of hours in which maintenance is allowed to start
"""
-function controlling_maintenance_start_hours(
- p::Int,
+function controlling_maintenance_start_hours(p::Int,
t::Int,
maintenance_duration::Int,
- maintenance_begin_hours,
-)
- controlled_hours = hoursbefore(p, t, 0:(maintenance_duration-1))
+ maintenance_begin_hours)
+ controlled_hours = hoursbefore(p, t, 0:(maintenance_duration - 1))
return intersect(controlled_hours, maintenance_begin_hours)
end
@@ -103,8 +101,7 @@ end
Creates maintenance-tracking variables and adds their Symbols to two Sets in `inputs`.
Adds constraints which act on the vCOMMIT-like variable.
"""
-function maintenance_formulation!(
- EP::Model,
+function maintenance_formulation!(EP::Model,
inputs::Dict,
resource_component::AbstractString,
r_id::Int,
@@ -114,9 +111,7 @@ function maintenance_formulation!(
cap::Float64,
vcommit::Symbol,
ecap::Symbol,
- integer_operational_unit_commitment::Bool,
-)
-
+ integer_operational_unit_commitment::Bool)
T = 1:inputs["T"]
hours_per_subperiod = inputs["hours_per_subperiod"]
@@ -132,14 +127,11 @@ function maintenance_formulation!(
maintenance_begin_hours = 1:maint_begin_cadence:T[end]
# create variables
- vMDOWN = EP[down] = @variable(EP, [t in T], base_name = down_name, lower_bound = 0)
- vMSHUT =
- EP[shut] = @variable(
- EP,
- [t in maintenance_begin_hours],
- base_name = shut_name,
- lower_bound = 0
- )
+ vMDOWN = EP[down] = @variable(EP, [t in T], base_name=down_name, lower_bound=0)
+ vMSHUT = EP[shut] = @variable(EP,
+ [t in maintenance_begin_hours],
+ base_name=shut_name,
+ lower_bound=0)
if integer_operational_unit_commitment
set_integer.(vMDOWN)
@@ -155,22 +147,20 @@ function maintenance_formulation!(
end)
# Plant is non-committed during maintenance
- @constraint(EP, [t in T], vMDOWN[t] + vcommit[y, t] <= ecap[y] / cap)
-
- controlling_hours(t) = controlling_maintenance_start_hours(
- hours_per_subperiod,
- t,
- maint_dur,
- maintenance_begin_hours,
- )
+ @constraint(EP, [t in T], vMDOWN[t] + vcommit[y, t]<=ecap[y] / cap)
+
+ function controlling_hours(t)
+ controlling_maintenance_start_hours(hours_per_subperiod,
+ t,
+ maint_dur,
+ maintenance_begin_hours)
+ end
# Plant is down for the required number of hours
- @constraint(EP, [t in T], vMDOWN[t] == sum(vMSHUT[controlling_hours(t)]))
+ @constraint(EP, [t in T], vMDOWN[t]==sum(vMSHUT[controlling_hours(t)]))
# Plant requires maintenance every (certain number of) year(s)
- @constraint(
- EP,
- sum(vMSHUT[t] for t in maintenance_begin_hours) >= ecap[y] / cap / maint_freq_years
- )
+ @constraint(EP,
+ sum(vMSHUT[t] for t in maintenance_begin_hours)>=ecap[y] / cap / maint_freq_years)
return
end
diff --git a/src/model/resources/must_run/must_run.jl b/src/model/resources/must_run/must_run.jl
index a16efb1141..fddcba6258 100644
--- a/src/model/resources/must_run/must_run.jl
+++ b/src/model/resources/must_run/must_run.jl
@@ -13,40 +13,41 @@ For must-run resources ($y\in \mathcal{MR}$) output in each time period $t$ must
```
"""
function must_run!(EP::Model, inputs::Dict, setup::Dict)
+ println("Must-Run Resources Module")
- println("Must-Run Resources Module")
+ gen = inputs["RESOURCES"]
- gen = inputs["RESOURCES"]
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ G = inputs["G"] # Number of generators
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
- G = inputs["G"] # Number of generators
+ MUST_RUN = inputs["MUST_RUN"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
- MUST_RUN = inputs["MUST_RUN"]
- CapacityReserveMargin = setup["CapacityReserveMargin"]
+ ### Expressions ###
- ### Expressions ###
+ ## Power Balance Expressions ##
- ## Power Balance Expressions ##
+ @expression(EP, ePowerBalanceNdisp[t = 1:T, z = 1:Z],
+ sum(EP[:vP][y, t] for y in intersect(MUST_RUN, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceNdisp)
- @expression(EP, ePowerBalanceNdisp[t=1:T, z=1:Z],
- sum(EP[:vP][y,t] for y in intersect(MUST_RUN, resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceNdisp)
+ # Capacity Reserves Margin policy
+ if CapacityReserveMargin > 0
+ @expression(EP,
+ eCapResMarBalanceMustRun[res = 1:inputs["NCapacityReserveMargin"], t = 1:T],
+ sum(derating_factor(gen[y], tag = res) * EP[:eTotalCap][y] *
+ inputs["pP_Max"][y, t] for y in MUST_RUN))
+ add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceMustRun)
+ end
- # Capacity Reserves Margin policy
- if CapacityReserveMargin > 0
- @expression(EP, eCapResMarBalanceMustRun[res=1:inputs["NCapacityReserveMargin"], t=1:T], sum(derating_factor(gen[y], tag=res) * EP[:eTotalCap][y] * inputs["pP_Max"][y,t] for y in MUST_RUN))
- add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceMustRun)
- end
-
- ### Constratints ###
-
- @constraint(EP, [y in MUST_RUN, t=1:T], EP[:vP][y,t] == inputs["pP_Max"][y,t]*EP[:eTotalCap][y])
- ##CO2 Polcy Module Must Run Generation by zone
- @expression(EP, eGenerationByMustRun[z=1:Z, t=1:T], # the unit is GW
- sum(EP[:vP][y,t] for y in intersect(MUST_RUN, resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:eGenerationByZone], eGenerationByMustRun)
+ ### Constratints ###
+ @constraint(EP,
+ [y in MUST_RUN, t = 1:T],
+ EP[:vP][y, t]==inputs["pP_Max"][y, t] * EP[:eTotalCap][y])
+ ##CO2 Polcy Module Must Run Generation by zone
+ @expression(EP, eGenerationByMustRun[z = 1:Z, t = 1:T], # the unit is GW
+ sum(EP[:vP][y, t] for y in intersect(MUST_RUN, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:eGenerationByZone], eGenerationByMustRun)
end
diff --git a/src/model/resources/resources.jl b/src/model/resources/resources.jl
index 4b86f9ee7d..1e12375064 100644
--- a/src/model/resources/resources.jl
+++ b/src/model/resources/resources.jl
@@ -14,20 +14,20 @@ Possible values:
- :Electrolyzer
"""
const resource_types = (:Thermal,
- :Vre,
- :Hydro,
- :Storage,
- :MustRun,
- :FlexDemand,
- :VreStorage,
- :Electrolyzer)
+ :Vre,
+ :Hydro,
+ :Storage,
+ :MustRun,
+ :FlexDemand,
+ :VreStorage,
+ :Electrolyzer)
# Create composite types (structs) for each resource type in resource_types
for r in resource_types
let dict = :dict, r = r
@eval begin
- struct $r{names<:Symbol, T<:Any} <: AbstractResource
- $dict::Dict{names,T}
+ struct $r{names <: Symbol, T <: Any} <: AbstractResource
+ $dict::Dict{names, T}
end
Base.parent(r::$r) = getfield(r, $(QuoteNode(dict)))
end
@@ -66,7 +66,9 @@ Allows to set the attribute `sym` of an `AbstractResource` object using dot synt
- `value`: The value to set for the attribute.
"""
-Base.setproperty!(r::AbstractResource, sym::Symbol, value) = setindex!(parent(r), value, sym)
+Base.setproperty!(r::AbstractResource, sym::Symbol, value) = setindex!(parent(r),
+ value,
+ sym)
"""
haskey(r::AbstractResource, sym::Symbol)
@@ -97,8 +99,8 @@ Retrieves the value of a specific attribute from an `AbstractResource` object. I
- The value of the attribute if it exists in the parent object, `default` otherwise.
"""
-function Base.get(r::AbstractResource, sym::Symbol, default)
- return haskey(r, sym) ? getproperty(r,sym) : default
+function Base.get(r::AbstractResource, sym::Symbol, default)
+ return haskey(r, sym) ? getproperty(r, sym) : default
end
"""
@@ -124,7 +126,7 @@ julia> vre_gen.zone
"""
function Base.getproperty(rs::Vector{<:AbstractResource}, sym::Symbol)
# if sym is Type then return a vector resources of that type
- if sym ∈ resource_types
+ if sym ∈ resource_types
res_type = eval(sym)
return Vector{res_type}(rs[isa.(rs, res_type)])
end
@@ -149,7 +151,7 @@ Set the attributes specified by `sym` to the corresponding values in `value` for
function Base.setproperty!(rs::Vector{<:AbstractResource}, sym::Symbol, value::Vector)
# if sym is a field of the resource then set that field for all resources
@assert length(rs) == length(value)
- for (r,v) in zip(rs, value)
+ for (r, v) in zip(rs, value)
setproperty!(r, sym, v)
end
return rs
@@ -172,7 +174,7 @@ Define dot syntax for setting the attributes specified by `sym` to the correspon
function Base.setindex!(rs::Vector{<:AbstractResource}, value::Vector, sym::Symbol)
# if sym is a field of the resource then set that field for all resources
@assert length(rs) == length(value)
- for (r,v) in zip(rs, value)
+ for (r, v) in zip(rs, value)
setproperty!(r, sym, v)
end
return rs
@@ -207,8 +209,8 @@ function Base.show(io::IO, r::AbstractResource)
value_length = length(resource_name(r)) + 3
println(io, "\nResource: $(r.resource) (id: $(r.id))")
println(io, repeat("-", key_length + value_length))
- for (k,v) in pairs(r)
- k,v = string(k), string(v)
+ for (k, v) in pairs(r)
+ k, v = string(k), string(v)
k = k * repeat(" ", key_length - length(k))
println(io, "$k | $v")
end
@@ -231,7 +233,6 @@ function attributes(r::AbstractResource)
return tuple(keys(parent(r))...)
end
-
"""
findall(f::Function, rs::Vector{<:AbstractResource})
@@ -254,7 +255,8 @@ julia> findall(r -> max_cap_mwh(r) != 0, gen.Storage)
50
```
"""
-Base.findall(f::Function, rs::Vector{<:AbstractResource}) = resource_id.(filter(r -> f(r), rs))
+Base.findall(f::Function, rs::Vector{<:AbstractResource}) = resource_id.(filter(r -> f(r),
+ rs))
"""
interface(name, default=default_zero, type=AbstractResource)
@@ -283,7 +285,7 @@ julia> max_cap_mw.(gen.Vre) # vectorized
9.848441999999999
```
"""
-macro interface(name, default=default_zero, type=AbstractResource)
+macro interface(name, default = default_zero, type = AbstractResource)
quote
function $(esc(name))(r::$(esc(type)))
return get(r, $(QuoteNode(name)), $(esc(default)))
@@ -314,7 +316,7 @@ julia> max_cap_mw(gen[3])
4.888236
```
"""
-function ids_with_positive(rs::Vector{T}, f::Function) where T <: AbstractResource
+function ids_with_positive(rs::Vector{T}, f::Function) where {T <: AbstractResource}
return findall(r -> f(r) > 0, rs)
end
@@ -341,13 +343,14 @@ julia> max_cap_mw(gen[3])
4.888236
```
"""
-function ids_with_positive(rs::Vector{T}, name::Symbol) where T <: AbstractResource
+function ids_with_positive(rs::Vector{T}, name::Symbol) where {T <: AbstractResource}
# if the getter function exists in GenX then use it, otherwise get the attribute directly
f = isdefined(GenX, name) ? getfield(GenX, name) : r -> getproperty(r, name)
return ids_with_positive(rs, f)
end
-function ids_with_positive(rs::Vector{T}, name::AbstractString) where T <: AbstractResource
+function ids_with_positive(rs::Vector{T},
+ name::AbstractString) where {T <: AbstractResource}
return ids_with_positive(rs, Symbol(lowercase(name)))
end
@@ -368,7 +371,7 @@ Function for finding resources in a vector `rs` where the attribute specified by
julia> ids_with_nonneg(gen, max_cap_mw)
```
"""
-function ids_with_nonneg(rs::Vector{T}, f::Function) where T <: AbstractResource
+function ids_with_nonneg(rs::Vector{T}, f::Function) where {T <: AbstractResource}
return findall(r -> f(r) >= 0, rs)
end
@@ -389,13 +392,13 @@ Function for finding resources in a vector `rs` where the attribute specified by
julia> ids_with_nonneg(gen, max_cap_mw)
```
"""
-function ids_with_nonneg(rs::Vector{T}, name::Symbol) where T <: AbstractResource
+function ids_with_nonneg(rs::Vector{T}, name::Symbol) where {T <: AbstractResource}
# if the getter function exists in GenX then use it, otherwise get the attribute directly
f = isdefined(GenX, name) ? getfield(GenX, name) : r -> getproperty(r, name)
return ids_with_nonneg(rs, f)
end
-function ids_with_nonneg(rs::Vector{T}, name::AbstractString) where T <: AbstractResource
+function ids_with_nonneg(rs::Vector{T}, name::AbstractString) where {T <: AbstractResource}
return ids_with_nonneg(rs, Symbol(lowercase(name)))
end
@@ -425,7 +428,9 @@ julia> existing_cap_mw(gen[21])
7.0773
```
"""
-function ids_with(rs::Vector{T}, f::Function, default=default_zero) where T <: AbstractResource
+function ids_with(rs::Vector{T},
+ f::Function,
+ default = default_zero) where {T <: AbstractResource}
return findall(r -> f(r) != default, rs)
end
@@ -454,13 +459,17 @@ julia> existing_cap_mw(gen[21])
7.0773
```
"""
-function ids_with(rs::Vector{T}, name::Symbol, default=default_zero) where T <: AbstractResource
+function ids_with(rs::Vector{T},
+ name::Symbol,
+ default = default_zero) where {T <: AbstractResource}
# if the getter function exists in GenX then use it, otherwise get the attribute directly
f = isdefined(GenX, name) ? getfield(GenX, name) : r -> getproperty(r, name)
return ids_with(rs, f, default)
end
-function ids_with(rs::Vector{T}, name::AbstractString, default=default_zero) where T <: AbstractResource
+function ids_with(rs::Vector{T},
+ name::AbstractString,
+ default = default_zero) where {T <: AbstractResource}
return ids_with(rs, Symbol(lowercase(name)), default)
end
@@ -477,8 +486,10 @@ Function for finding resources in a vector `rs` where the policy specified by `f
# Returns
- `ids (Vector{Int64})`: The vector of resource ids with a positive value for policy `f` and tag `tag`.
"""
-function ids_with_policy(rs::Vector{T}, f::Function; tag::Int64) where T <: AbstractResource
- return findall(r -> f(r, tag=tag) > 0, rs)
+function ids_with_policy(rs::Vector{T},
+ f::Function;
+ tag::Int64) where {T <: AbstractResource}
+ return findall(r -> f(r, tag = tag) > 0, rs)
end
"""
@@ -494,17 +505,21 @@ Function for finding resources in a vector `rs` where the policy specified by `n
# Returns
- `ids (Vector{Int64})`: The vector of resource ids with a positive value for policy `name` and tag `tag`.
"""
-function ids_with_policy(rs::Vector{T}, name::Symbol; tag::Int64) where T <: AbstractResource
+function ids_with_policy(rs::Vector{T},
+ name::Symbol;
+ tag::Int64) where {T <: AbstractResource}
# if the getter function exists in GenX then use it, otherwise get the attribute directly
if isdefined(GenX, name)
f = getfield(GenX, name)
- return ids_with_policy(rs, f, tag=tag)
+ return ids_with_policy(rs, f, tag = tag)
end
return findall(r -> getproperty(r, Symbol(string(name, "_$tag"))) > 0, rs)
end
-function ids_with_policy(rs::Vector{T}, name::AbstractString; tag::Int64) where T <: AbstractResource
- return ids_with_policy(rs, Symbol(lowercase(name)), tag=tag)
+function ids_with_policy(rs::Vector{T},
+ name::AbstractString;
+ tag::Int64) where {T <: AbstractResource}
+ return ids_with_policy(rs, Symbol(lowercase(name)), tag = tag)
end
"""
@@ -512,18 +527,18 @@ end
Default value for resource attributes.
"""
-const default_zero = 0
+const default_zero = 0
# INTERFACE FOR ALL RESOURCES
resource_name(r::AbstractResource) = r.resource
-resource_name(rs::Vector{T}) where T <: AbstractResource = rs.resource
+resource_name(rs::Vector{T}) where {T <: AbstractResource} = rs.resource
resource_id(r::AbstractResource)::Int64 = r.id
-resource_id(rs::Vector{T}) where T <: AbstractResource = resource_id.(rs)
+resource_id(rs::Vector{T}) where {T <: AbstractResource} = resource_id.(rs)
resource_type_mga(r::AbstractResource) = r.resource_type
zone_id(r::AbstractResource) = r.zone
-zone_id(rs::Vector{T}) where T <: AbstractResource = rs.zone
+zone_id(rs::Vector{T}) where {T <: AbstractResource} = rs.zone
# getter for boolean attributes (true or false) with validation
function new_build(r::AbstractResource)
@@ -551,7 +566,7 @@ function can_contribute_min_retirement(r::AbstractResource)
return Bool(get(r, :contribute_min_retirement, true))
end
-const default_minmax_cap = -1.
+const default_minmax_cap = -1.0
max_cap_mw(r::AbstractResource) = get(r, :max_cap_mw, default_minmax_cap)
min_cap_mw(r::AbstractResource) = get(r, :min_cap_mw, default_minmax_cap)
@@ -569,9 +584,13 @@ cap_size(r::AbstractResource) = get(r, :cap_size, default_zero)
num_vre_bins(r::AbstractResource) = get(r, :num_vre_bins, default_zero)
-hydro_energy_to_power_ratio(r::AbstractResource) = get(r, :hydro_energy_to_power_ratio, default_zero)
+function hydro_energy_to_power_ratio(r::AbstractResource)
+ get(r, :hydro_energy_to_power_ratio, default_zero)
+end
-qualified_hydrogen_supply(r::AbstractResource) = get(r, :qualified_hydrogen_supply, default_zero)
+function qualified_hydrogen_supply(r::AbstractResource)
+ get(r, :qualified_hydrogen_supply, default_zero)
+end
retrofit_id(r::AbstractResource)::String = get(r, :retrofit_id, "None")
function retrofit_efficiency(r::AbstractResource)
@@ -590,32 +609,58 @@ inv_cost_per_mwyr(r::AbstractResource) = get(r, :inv_cost_per_mwyr, default_zero
fixed_om_cost_per_mwyr(r::AbstractResource) = get(r, :fixed_om_cost_per_mwyr, default_zero)
var_om_cost_per_mwh(r::AbstractResource) = get(r, :var_om_cost_per_mwh, default_zero)
inv_cost_per_mwhyr(r::AbstractResource) = get(r, :inv_cost_per_mwhyr, default_zero)
-fixed_om_cost_per_mwhyr(r::AbstractResource) = get(r, :fixed_om_cost_per_mwhyr, default_zero)
-inv_cost_charge_per_mwyr(r::AbstractResource) = get(r, :inv_cost_charge_per_mwyr, default_zero)
-fixed_om_cost_charge_per_mwyr(r::AbstractResource) = get(r, :fixed_om_cost_charge_per_mwyr, default_zero)
+function fixed_om_cost_per_mwhyr(r::AbstractResource)
+ get(r, :fixed_om_cost_per_mwhyr, default_zero)
+end
+function inv_cost_charge_per_mwyr(r::AbstractResource)
+ get(r, :inv_cost_charge_per_mwyr, default_zero)
+end
+function fixed_om_cost_charge_per_mwyr(r::AbstractResource)
+ get(r, :fixed_om_cost_charge_per_mwyr, default_zero)
+end
start_cost_per_mw(r::AbstractResource) = get(r, :start_cost_per_mw, default_zero)
# fuel
fuel(r::AbstractResource) = get(r, :fuel, "None")
-start_fuel_mmbtu_per_mw(r::AbstractResource) = get(r, :start_fuel_mmbtu_per_mw, default_zero)
-heat_rate_mmbtu_per_mwh(r::AbstractResource) = get(r, :heat_rate_mmbtu_per_mwh, default_zero)
+function start_fuel_mmbtu_per_mw(r::AbstractResource)
+ get(r, :start_fuel_mmbtu_per_mw, default_zero)
+end
+function heat_rate_mmbtu_per_mwh(r::AbstractResource)
+ get(r, :heat_rate_mmbtu_per_mwh, default_zero)
+end
co2_capture_fraction(r::AbstractResource) = get(r, :co2_capture_fraction, default_zero)
-co2_capture_fraction_startup(r::AbstractResource) = get(r, :co2_capture_fraction_startup, default_zero)
-ccs_disposal_cost_per_metric_ton(r::AbstractResource) = get(r, :ccs_disposal_cost_per_metric_ton, default_zero)
+function co2_capture_fraction_startup(r::AbstractResource)
+ get(r, :co2_capture_fraction_startup, default_zero)
+end
+function ccs_disposal_cost_per_metric_ton(r::AbstractResource)
+ get(r, :ccs_disposal_cost_per_metric_ton, default_zero)
+end
biomass(r::AbstractResource) = get(r, :biomass, default_zero)
multi_fuels(r::AbstractResource) = get(r, :multi_fuels, default_zero)
-fuel_cols(r::AbstractResource; tag::Int64) = get(r, Symbol(string("fuel",tag)), "None")
+fuel_cols(r::AbstractResource; tag::Int64) = get(r, Symbol(string("fuel", tag)), "None")
num_fuels(r::AbstractResource) = get(r, :num_fuels, default_zero)
-heat_rate_cols(r::AbstractResource; tag::Int64) = get(r, Symbol(string("heat_rate",tag, "_mmbtu_per_mwh")), default_zero)
-max_cofire_cols(r::AbstractResource; tag::Int64) = get(r, Symbol(string("fuel",tag, "_max_cofire_level")), 1)
-min_cofire_cols(r::AbstractResource; tag::Int64) = get(r, Symbol(string("fuel",tag, "_min_cofire_level")), default_zero)
-max_cofire_start_cols(r::AbstractResource; tag::Int64) = get(r, Symbol(string("fuel",tag, "_max_cofire_level_start")), 1)
-min_cofire_start_cols(r::AbstractResource; tag::Int64) = get(r, Symbol(string("fuel",tag, "_min_cofire_level_start")), default_zero)
+function heat_rate_cols(r::AbstractResource; tag::Int64)
+ get(r, Symbol(string("heat_rate", tag, "_mmbtu_per_mwh")), default_zero)
+end
+function max_cofire_cols(r::AbstractResource; tag::Int64)
+ get(r, Symbol(string("fuel", tag, "_max_cofire_level")), 1)
+end
+function min_cofire_cols(r::AbstractResource; tag::Int64)
+ get(r, Symbol(string("fuel", tag, "_min_cofire_level")), default_zero)
+end
+function max_cofire_start_cols(r::AbstractResource; tag::Int64)
+ get(r, Symbol(string("fuel", tag, "_max_cofire_level_start")), 1)
+end
+function min_cofire_start_cols(r::AbstractResource; tag::Int64)
+ get(r, Symbol(string("fuel", tag, "_min_cofire_level_start")), default_zero)
+end
# Reservoir hydro and storage
const default_percent = 1.0
-efficiency_up(r::T) where T <: Union{Hydro,Storage} = get(r, :eff_up, default_percent)
-efficiency_down(r::T) where T <: Union{Hydro,Storage} = get(r, :eff_down, default_percent)
+efficiency_up(r::T) where {T <: Union{Hydro, Storage}} = get(r, :eff_up, default_percent)
+function efficiency_down(r::T) where {T <: Union{Hydro, Storage}}
+ get(r, :eff_down, default_percent)
+end
# Ramp up and down
const VarPower = Union{Electrolyzer, Hydro, Thermal}
@@ -630,8 +675,12 @@ capital_recovery_period(r::Storage) = get(r, :capital_recovery_period, 15)
capital_recovery_period(r::AbstractResource) = get(r, :capital_recovery_period, 30)
tech_wacc(r::AbstractResource) = get(r, :wacc, default_zero)
min_retired_cap_mw(r::AbstractResource) = get(r, :min_retired_cap_mw, default_zero)
-min_retired_energy_cap_mw(r::AbstractResource) = get(r, :min_retired_energy_cap_mw, default_zero)
-min_retired_charge_cap_mw(r::AbstractResource) = get(r, :min_retired_charge_cap_mw, default_zero)
+function min_retired_energy_cap_mw(r::AbstractResource)
+ get(r, :min_retired_energy_cap_mw, default_zero)
+end
+function min_retired_charge_cap_mw(r::AbstractResource)
+ get(r, :min_retired_charge_cap_mw, default_zero)
+end
cum_min_retired_cap_mw(r::AbstractResource) = r.cum_min_retired_cap_mw
cum_min_retired_energy_cap_mw(r::AbstractResource) = r.cum_min_retired_energy_cap_mw
cum_min_retired_charge_cap_mw(r::AbstractResource) = r.cum_min_retired_charge_cap_mw
@@ -643,45 +692,85 @@ mga(r::AbstractResource) = get(r, :mga, default_zero)
esr(r::AbstractResource; tag::Int64) = get(r, Symbol("esr_$tag"), default_zero)
min_cap(r::AbstractResource; tag::Int64) = get(r, Symbol("min_cap_$tag"), default_zero)
max_cap(r::AbstractResource; tag::Int64) = get(r, Symbol("max_cap_$tag"), default_zero)
-derating_factor(r::AbstractResource; tag::Int64) = get(r, Symbol("derating_factor_$tag"), default_zero)
+function derating_factor(r::AbstractResource; tag::Int64)
+ get(r, Symbol("derating_factor_$tag"), default_zero)
+end
# write_outputs
region(r::AbstractResource) = r.region
cluster(r::AbstractResource) = r.cluster
# UTILITY FUNCTIONS for working with resources
-is_LDS(rs::Vector{T}) where T <: AbstractResource = findall(r -> get(r, :lds, default_zero) == 1, rs)
-is_SDS(rs::Vector{T}) where T <: AbstractResource = findall(r -> get(r, :lds, default_zero) == 0, rs)
+function is_LDS(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> get(r, :lds, default_zero) == 1, rs)
+end
+function is_SDS(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> get(r, :lds, default_zero) == 0, rs)
+end
-ids_with_mga(rs::Vector{T}) where T <: AbstractResource = findall(r -> mga(r) == 1, rs)
+ids_with_mga(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> mga(r) == 1, rs)
-ids_with_fuel(rs::Vector{T}) where T <: AbstractResource = findall(r -> fuel(r) != "None", rs)
+function ids_with_fuel(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> fuel(r) != "None", rs)
+end
-ids_with_singlefuel(rs::Vector{T}) where T <: AbstractResource = findall(r -> multi_fuels(r) == 0, rs)
-ids_with_multifuels(rs::Vector{T}) where T <: AbstractResource = findall(r -> multi_fuels(r) == 1, rs)
+function ids_with_singlefuel(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> multi_fuels(r) == 0, rs)
+end
+function ids_with_multifuels(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> multi_fuels(r) == 1, rs)
+end
-is_buildable(rs::Vector{T}) where T <: AbstractResource = findall(r -> new_build(r) == true, rs)
-is_retirable(rs::Vector{T}) where T <: AbstractResource = findall(r -> can_retire(r) == true, rs)
-ids_can_retrofit(rs::Vector{T}) where T <: AbstractResource = findall(r -> can_retrofit(r) == true, rs)
-ids_retrofit_options(rs::Vector{T}) where T <: AbstractResource = findall(r -> is_retrofit_option(r) == true, rs)
+function is_buildable(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> new_build(r) == true, rs)
+end
+function is_retirable(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> can_retire(r) == true, rs)
+end
+function ids_can_retrofit(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> can_retrofit(r) == true, rs)
+end
+function ids_retrofit_options(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> is_retrofit_option(r) == true, rs)
+end
# Unit commitment
-ids_with_unit_commitment(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Thermal) && r.model == 1, rs)
+function ids_with_unit_commitment(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, Thermal) && r.model == 1, rs)
+end
# Without unit commitment
-no_unit_commitment(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Thermal) && r.model == 2, rs)
+function no_unit_commitment(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, Thermal) && r.model == 2, rs)
+end
# Operational Reserves
-ids_with_regulation_reserve_requirements(rs::Vector{T}) where T <: AbstractResource = findall(r -> reg_max(r) > 0, rs)
-ids_with_spinning_reserve_requirements(rs::Vector{T}) where T <: AbstractResource = findall(r -> rsv_max(r) > 0, rs)
+function ids_with_regulation_reserve_requirements(rs::Vector{
+ T,
+}) where {T <: AbstractResource}
+ findall(r -> reg_max(r) > 0, rs)
+end
+function ids_with_spinning_reserve_requirements(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> rsv_max(r) > 0, rs)
+end
# Maintenance
-ids_with_maintenance(rs::Vector{T}) where T <: AbstractResource = findall(r -> get(r, :maint, default_zero) == 1, rs)
+function ids_with_maintenance(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> get(r, :maint, default_zero) == 1, rs)
+end
maintenance_duration(r::AbstractResource) = get(r, :maintenance_duration, default_zero)
-maintenance_cycle_length_years(r::AbstractResource) = get(r, :maintenance_cycle_length_years, default_zero)
-maintenance_begin_cadence(r::AbstractResource) = get(r, :maintenance_begin_cadence, default_zero)
+function maintenance_cycle_length_years(r::AbstractResource)
+ get(r, :maintenance_cycle_length_years, default_zero)
+end
+function maintenance_begin_cadence(r::AbstractResource)
+ get(r, :maintenance_begin_cadence, default_zero)
+end
-ids_contribute_min_retirement(rs::Vector{T}) where T <: AbstractResource = findall(r -> can_contribute_min_retirement(r) == true, rs)
-ids_not_contribute_min_retirement(rs::Vector{T}) where T <: AbstractResource = findall(r -> can_contribute_min_retirement(r) == false, rs)
+function ids_contribute_min_retirement(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> can_contribute_min_retirement(r) == true, rs)
+end
+function ids_not_contribute_min_retirement(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> can_contribute_min_retirement(r) == false, rs)
+end
# STORAGE interface
"""
@@ -689,14 +778,18 @@ ids_not_contribute_min_retirement(rs::Vector{T}) where T <: AbstractResource = f
Returns the indices of all storage resources in the vector `rs`.
"""
-storage(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Storage), rs)
+storage(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r, Storage), rs)
self_discharge(r::Storage) = r.self_disch
min_duration(r::Storage) = r.min_duration
max_duration(r::Storage) = r.max_duration
var_om_cost_per_mwh_in(r::Storage) = get(r, :var_om_cost_per_mwh_in, default_zero)
-symmetric_storage(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Storage) && r.model == 1, rs)
-asymmetric_storage(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Storage) && r.model == 2, rs)
+function symmetric_storage(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, Storage) && r.model == 1, rs)
+end
+function asymmetric_storage(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, Storage) && r.model == 2, rs)
+end
# HYDRO interface
"""
@@ -704,7 +797,7 @@ asymmetric_storage(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa
Returns the indices of all hydro resources in the vector `rs`.
"""
-hydro(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Hydro), rs)
+hydro(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r, Hydro), rs)
# THERMAL interface
"""
@@ -712,10 +805,12 @@ hydro(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Hydro), rs
Returns the indices of all thermal resources in the vector `rs`.
"""
-thermal(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Thermal), rs)
+thermal(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r, Thermal), rs)
up_time(r::Thermal) = get(r, :up_time, default_zero)
down_time(r::Thermal) = get(r, :down_time, default_zero)
-pwfu_fuel_usage_zero_load_mmbtu_per_h(r::Thermal) = get(r, :pwfu_fuel_usage_zero_load_mmbtu_per_h, default_zero)
+function pwfu_fuel_usage_zero_load_mmbtu_per_h(r::Thermal)
+ get(r, :pwfu_fuel_usage_zero_load_mmbtu_per_h, default_zero)
+end
# VRE interface
"""
@@ -723,7 +818,7 @@ pwfu_fuel_usage_zero_load_mmbtu_per_h(r::Thermal) = get(r, :pwfu_fuel_usage_zero
Returns the indices of all Vre resources in the vector `rs`.
"""
-vre(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Vre), rs)
+vre(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r, Vre), rs)
# ELECTROLYZER interface
"""
@@ -731,7 +826,9 @@ vre(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Vre), rs)
Returns the indices of all electrolyzer resources in the vector `rs`.
"""
-electrolyzer(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,Electrolyzer), rs)
+electrolyzer(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r,
+ Electrolyzer),
+ rs)
electrolyzer_min_kt(r::Electrolyzer) = r.electrolyzer_min_kt
hydrogen_mwh_per_tonne(r::Electrolyzer) = r.hydrogen_mwh_per_tonne
hydrogen_price_per_tonne(r::Electrolyzer) = r.hydrogen_price_per_tonne
@@ -742,7 +839,8 @@ hydrogen_price_per_tonne(r::Electrolyzer) = r.hydrogen_price_per_tonne
Returns the indices of all flexible demand resources in the vector `rs`.
"""
-flex_demand(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,FlexDemand), rs)
+flex_demand(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r, FlexDemand),
+ rs)
flexible_demand_energy_eff(r::FlexDemand) = r.flexible_demand_energy_eff
max_flexible_demand_delay(r::FlexDemand) = r.max_flexible_demand_delay
max_flexible_demand_advance(r::FlexDemand) = r.max_flexible_demand_advance
@@ -754,7 +852,7 @@ var_om_cost_per_mwh_in(r::FlexDemand) = get(r, :var_om_cost_per_mwh_in, default_
Returns the indices of all must-run resources in the vector `rs`.
"""
-must_run(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,MustRun), rs)
+must_run(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r, MustRun), rs)
# VRE_STOR interface
"""
@@ -762,7 +860,7 @@ must_run(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,MustRun
Returns the indices of all VRE_STOR resources in the vector `rs`.
"""
-vre_stor(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage), rs)
+vre_stor(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r, VreStorage), rs)
technology(r::VreStorage) = r.technology
self_discharge(r::VreStorage) = r.self_disch
@@ -771,154 +869,200 @@ self_discharge(r::VreStorage) = r.self_disch
Returns the indices of all co-located solar resources in the vector `rs`.
"""
-solar(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.solar != 0, rs)
+solar(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r, VreStorage) &&
+ r.solar != 0,
+ rs)
"""
wind(rs::Vector{T}) where T <: AbstractResource
Returns the indices of all co-located wind resources in the vector `rs`.
"""
-wind(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.wind != 0, rs)
+wind(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r, VreStorage) &&
+ r.wind != 0,
+ rs)
"""
storage_dc_discharge(rs::Vector{T}) where T <: AbstractResource
Returns the indices of all co-located storage resources in the vector `rs` that discharge DC.
"""
-storage_dc_discharge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_dc_discharge >= 1, rs)
-storage_sym_dc_discharge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_dc_discharge == 1, rs)
-storage_asym_dc_discharge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_dc_discharge == 2, rs)
+storage_dc_discharge(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r,
+ VreStorage) && r.stor_dc_discharge >= 1,
+ rs)
+function storage_sym_dc_discharge(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, VreStorage) && r.stor_dc_discharge == 1, rs)
+end
+function storage_asym_dc_discharge(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, VreStorage) && r.stor_dc_discharge == 2, rs)
+end
"""
storage_dc_charge(rs::Vector{T}) where T <: AbstractResource
Returns the indices of all co-located storage resources in the vector `rs` that charge DC.
"""
-storage_dc_charge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_dc_charge >= 1, rs)
-storage_sym_dc_charge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_dc_charge == 1, rs)
-storage_asym_dc_charge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_dc_charge == 2, rs)
+storage_dc_charge(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r,
+ VreStorage) && r.stor_dc_charge >= 1,
+ rs)
+function storage_sym_dc_charge(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, VreStorage) && r.stor_dc_charge == 1, rs)
+end
+function storage_asym_dc_charge(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, VreStorage) && r.stor_dc_charge == 2, rs)
+end
"""
storage_ac_discharge(rs::Vector{T}) where T <: AbstractResource
Returns the indices of all co-located storage resources in the vector `rs` that discharge AC.
"""
-storage_ac_discharge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_ac_discharge >= 1, rs)
-storage_sym_ac_discharge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_ac_discharge == 1, rs)
-storage_asym_ac_discharge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_ac_discharge == 2, rs)
+storage_ac_discharge(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r,
+ VreStorage) && r.stor_ac_discharge >= 1,
+ rs)
+function storage_sym_ac_discharge(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, VreStorage) && r.stor_ac_discharge == 1, rs)
+end
+function storage_asym_ac_discharge(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, VreStorage) && r.stor_ac_discharge == 2, rs)
+end
"""
storage_ac_charge(rs::Vector{T}) where T <: AbstractResource
Returns the indices of all co-located storage resources in the vector `rs` that charge AC.
"""
-storage_ac_charge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_ac_charge >= 1, rs)
-storage_sym_ac_charge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_ac_charge == 1, rs)
-storage_asym_ac_charge(rs::Vector{T}) where T <: AbstractResource = findall(r -> isa(r,VreStorage) && r.stor_ac_charge == 2, rs)
+storage_ac_charge(rs::Vector{T}) where {T <: AbstractResource} = findall(r -> isa(r,
+ VreStorage) && r.stor_ac_charge >= 1,
+ rs)
+function storage_sym_ac_charge(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, VreStorage) && r.stor_ac_charge == 1, rs)
+end
+function storage_asym_ac_charge(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> isa(r, VreStorage) && r.stor_ac_charge == 2, rs)
+end
-is_LDS_VRE_STOR(rs::Vector{T}) where T <: AbstractResource = findall(r -> get(r, :lds_vre_stor, default_zero) != 0, rs)
+function is_LDS_VRE_STOR(rs::Vector{T}) where {T <: AbstractResource}
+ findall(r -> get(r, :lds_vre_stor, default_zero) != 0, rs)
+end
# loop over the above attributes and define function interfaces for each one
-for attr in (:existing_cap_solar_mw,
- :existing_cap_wind_mw,
- :existing_cap_inverter_mw,
- :existing_cap_charge_dc_mw,
- :existing_cap_charge_ac_mw,
- :existing_cap_discharge_dc_mw,
- :existing_cap_discharge_ac_mw)
+for attr in (:existing_cap_solar_mw,
+ :existing_cap_wind_mw,
+ :existing_cap_inverter_mw,
+ :existing_cap_charge_dc_mw,
+ :existing_cap_charge_ac_mw,
+ :existing_cap_discharge_dc_mw,
+ :existing_cap_discharge_ac_mw)
@eval @interface $attr
end
-for attr in (:max_cap_solar_mw,
- :max_cap_wind_mw,
- :max_cap_inverter_mw,
- :max_cap_charge_dc_mw,
- :max_cap_charge_ac_mw,
- :max_cap_discharge_dc_mw,
- :max_cap_discharge_ac_mw,
- :min_cap_solar_mw,
- :min_cap_wind_mw,
- :min_cap_inverter_mw,
- :min_cap_charge_dc_mw,
- :min_cap_charge_ac_mw,
- :min_cap_discharge_dc_mw,
- :min_cap_discharge_ac_mw,
- :inverter_ratio_solar,
- :inverter_ratio_wind,)
+for attr in (:max_cap_solar_mw,
+ :max_cap_wind_mw,
+ :max_cap_inverter_mw,
+ :max_cap_charge_dc_mw,
+ :max_cap_charge_ac_mw,
+ :max_cap_discharge_dc_mw,
+ :max_cap_discharge_ac_mw,
+ :min_cap_solar_mw,
+ :min_cap_wind_mw,
+ :min_cap_inverter_mw,
+ :min_cap_charge_dc_mw,
+ :min_cap_charge_ac_mw,
+ :min_cap_discharge_dc_mw,
+ :min_cap_discharge_ac_mw,
+ :inverter_ratio_solar,
+ :inverter_ratio_wind)
@eval @interface $attr default_minmax_cap
end
for attr in (:etainverter,
- :inv_cost_inverter_per_mwyr,
- :inv_cost_solar_per_mwyr,
- :inv_cost_wind_per_mwyr,
- :inv_cost_discharge_dc_per_mwyr,
- :inv_cost_charge_dc_per_mwyr,
- :inv_cost_discharge_ac_per_mwyr,
- :inv_cost_charge_ac_per_mwyr,
- :fixed_om_inverter_cost_per_mwyr,
- :fixed_om_solar_cost_per_mwyr,
- :fixed_om_wind_cost_per_mwyr,
- :fixed_om_cost_discharge_dc_per_mwyr,
- :fixed_om_cost_charge_dc_per_mwyr,
- :fixed_om_cost_discharge_ac_per_mwyr,
- :fixed_om_cost_charge_ac_per_mwyr,
- :var_om_cost_per_mwh_solar,
- :var_om_cost_per_mwh_wind,
- :var_om_cost_per_mwh_charge_dc,
- :var_om_cost_per_mwh_discharge_dc,
- :var_om_cost_per_mwh_charge_ac,
- :var_om_cost_per_mwh_discharge_ac,
- :eff_up_ac,
- :eff_down_ac,
- :eff_up_dc,
- :eff_down_dc,
- :power_to_energy_ac,
- :power_to_energy_dc)
+ :inv_cost_inverter_per_mwyr,
+ :inv_cost_solar_per_mwyr,
+ :inv_cost_wind_per_mwyr,
+ :inv_cost_discharge_dc_per_mwyr,
+ :inv_cost_charge_dc_per_mwyr,
+ :inv_cost_discharge_ac_per_mwyr,
+ :inv_cost_charge_ac_per_mwyr,
+ :fixed_om_inverter_cost_per_mwyr,
+ :fixed_om_solar_cost_per_mwyr,
+ :fixed_om_wind_cost_per_mwyr,
+ :fixed_om_cost_discharge_dc_per_mwyr,
+ :fixed_om_cost_charge_dc_per_mwyr,
+ :fixed_om_cost_discharge_ac_per_mwyr,
+ :fixed_om_cost_charge_ac_per_mwyr,
+ :var_om_cost_per_mwh_solar,
+ :var_om_cost_per_mwh_wind,
+ :var_om_cost_per_mwh_charge_dc,
+ :var_om_cost_per_mwh_discharge_dc,
+ :var_om_cost_per_mwh_charge_ac,
+ :var_om_cost_per_mwh_discharge_ac,
+ :eff_up_ac,
+ :eff_down_ac,
+ :eff_up_dc,
+ :eff_down_dc,
+ :power_to_energy_ac,
+ :power_to_energy_dc)
@eval @interface $attr default_zero VreStorage
end
# Multistage
for attr in (:capital_recovery_period_dc,
- :capital_recovery_period_solar,
- :capital_recovery_period_wind,
- :capital_recovery_period_charge_dc,
- :capital_recovery_period_discharge_dc,
- :capital_recovery_period_charge_ac,
- :capital_recovery_period_discharge_ac,
- :tech_wacc_dc,
- :tech_wacc_solar,
- :tech_wacc_wind,
- :tech_wacc_charge_dc,
- :tech_wacc_discharge_dc,
- :tech_wacc_charge_ac,
- :tech_wacc_discharge_ac)
+ :capital_recovery_period_solar,
+ :capital_recovery_period_wind,
+ :capital_recovery_period_charge_dc,
+ :capital_recovery_period_discharge_dc,
+ :capital_recovery_period_charge_ac,
+ :capital_recovery_period_discharge_ac,
+ :tech_wacc_dc,
+ :tech_wacc_solar,
+ :tech_wacc_wind,
+ :tech_wacc_charge_dc,
+ :tech_wacc_discharge_dc,
+ :tech_wacc_charge_ac,
+ :tech_wacc_discharge_ac)
@eval @interface $attr default_zero VreStorage
end
# Endogenous retirement
-for attr in (:min_retired_cap_inverter_mw,
- :min_retired_cap_solar_mw,
- :min_retired_cap_wind_mw,
- :min_retired_cap_discharge_dc_mw,
- :min_retired_cap_charge_dc_mw,
- :min_retired_cap_discharge_ac_mw,
- :min_retired_cap_charge_ac_mw,)
- @eval @interface $attr default_zero
- cum_attr = Symbol("cum_"*String(attr))
- @eval @interface $cum_attr default_zero
+for attr in (:min_retired_cap_inverter_mw,
+ :min_retired_cap_solar_mw,
+ :min_retired_cap_wind_mw,
+ :min_retired_cap_discharge_dc_mw,
+ :min_retired_cap_charge_dc_mw,
+ :min_retired_cap_discharge_ac_mw,
+ :min_retired_cap_charge_ac_mw)
+ @eval @interface $attr default_zero
+ cum_attr = Symbol("cum_" * String(attr))
+ @eval @interface $cum_attr default_zero
end
## policies
# co-located storage
-esr_vrestor(r::AbstractResource; tag::Int64) = get(r, Symbol("esr_vrestor_$tag"), default_zero)
-min_cap_stor(r::AbstractResource; tag::Int64) = get(r, Symbol("min_cap_stor_$tag"), default_zero)
-max_cap_stor(r::AbstractResource; tag::Int64) = get(r, Symbol("max_cap_stor_$tag"), default_zero)
+function esr_vrestor(r::AbstractResource; tag::Int64)
+ get(r, Symbol("esr_vrestor_$tag"), default_zero)
+end
+function min_cap_stor(r::AbstractResource; tag::Int64)
+ get(r, Symbol("min_cap_stor_$tag"), default_zero)
+end
+function max_cap_stor(r::AbstractResource; tag::Int64)
+ get(r, Symbol("max_cap_stor_$tag"), default_zero)
+end
# vre part
-min_cap_solar(r::AbstractResource; tag::Int64) = get(r, Symbol("min_cap_solar_$tag"), default_zero)
-max_cap_solar(r::AbstractResource; tag::Int64) = get(r, Symbol("max_cap_solar_$tag"), default_zero)
-min_cap_wind(r::AbstractResource; tag::Int64) = get(r, Symbol("min_cap_wind_$tag"), default_zero)
-max_cap_wind(r::AbstractResource; tag::Int64) = get(r, Symbol("max_cap_wind_$tag"), default_zero)
+function min_cap_solar(r::AbstractResource; tag::Int64)
+ get(r, Symbol("min_cap_solar_$tag"), default_zero)
+end
+function max_cap_solar(r::AbstractResource; tag::Int64)
+ get(r, Symbol("max_cap_solar_$tag"), default_zero)
+end
+function min_cap_wind(r::AbstractResource; tag::Int64)
+ get(r, Symbol("min_cap_wind_$tag"), default_zero)
+end
+function max_cap_wind(r::AbstractResource; tag::Int64)
+ get(r, Symbol("max_cap_wind_$tag"), default_zero)
+end
## Utility functions for working with resources
in_zone(r::AbstractResource, zone::Int) = zone_id(r) == zone
-resources_in_zone(rs::Vector{<:AbstractResource}, zone::Int) = filter(r -> in_zone(r, zone), rs)
+function resources_in_zone(rs::Vector{<:AbstractResource}, zone::Int)
+ filter(r -> in_zone(r, zone), rs)
+end
@doc raw"""
resources_in_zone_by_rid(rs::Vector{<:AbstractResource}, zone::Int)
@@ -940,7 +1084,8 @@ Find R_ID's of resources with retrofit cluster id `cluster_id`.
# Returns
- `Vector{Int64}`: The vector of resource ids in the retrofit cluster.
"""
-function resources_in_retrofit_cluster_by_rid(rs::Vector{<:AbstractResource}, cluster_id::String)
+function resources_in_retrofit_cluster_by_rid(rs::Vector{<:AbstractResource},
+ cluster_id::String)
return resource_id.(rs[retrofit_id.(rs) .== cluster_id])
end
@@ -959,7 +1104,8 @@ Find the resource with `name` in the vector `rs`.
function resource_by_name(rs::Vector{<:AbstractResource}, name::AbstractString)
r_id = findfirst(r -> resource_name(r) == name, rs)
# check that the resource exists
- isnothing(r_id) && error("Resource $name not found in resource data. \nHint: Make sure that the resource names in input files match the ones in the \"resource\" folder.\n")
+ isnothing(r_id) &&
+ error("Resource $name not found in resource data. \nHint: Make sure that the resource names in input files match the ones in the \"resource\" folder.\n")
return rs[r_id]
end
@@ -976,7 +1122,7 @@ function validate_boolean_attribute(r::AbstractResource, attr::Symbol)
attr_value = get(r, attr, 0)
if attr_value != 0 && attr_value != 1
error("Attribute $attr in resource $(resource_name(r)) must be boolean." *
- "The only valid values are {0,1}, not $attr_value.")
+ "The only valid values are {0,1}, not $attr_value.")
end
end
@@ -991,7 +1137,7 @@ Find the resource ids of the retrofit units in the vector `rs` where all retrofi
# Returns
- `Vector{Int64}`: The vector of resource ids.
"""
-function ids_with_all_options_contributing(rs::Vector{T}) where T <: AbstractResource
+function ids_with_all_options_contributing(rs::Vector{T}) where {T <: AbstractResource}
# select resources that can retrofit
units_can_retrofit = ids_can_retrofit(rs)
# check if all retrofit options in the retrofit cluster of each retrofit resource contribute to min retirement
@@ -1011,10 +1157,13 @@ Check if all retrofit options in the retrofit cluster of the retrofit resource `
# Returns
- `Bool`: True if all retrofit options contribute to min retirement, otherwise false.
"""
-function has_all_options_contributing(retrofit_res::AbstractResource, rs::Vector{T}) where T <: AbstractResource
+function has_all_options_contributing(retrofit_res::AbstractResource,
+ rs::Vector{T}) where {T <: AbstractResource}
retro_id = retrofit_id(retrofit_res)
- return isempty(intersect(resources_in_retrofit_cluster_by_rid(rs, retro_id), ids_retrofit_options(rs), ids_not_contribute_min_retirement(rs)))
-end
+ return isempty(intersect(resources_in_retrofit_cluster_by_rid(rs, retro_id),
+ ids_retrofit_options(rs),
+ ids_not_contribute_min_retirement(rs)))
+end
"""
ids_with_all_options_not_contributing(rs::Vector{T}) where T <: AbstractResource
@@ -1027,11 +1176,12 @@ Find the resource ids of the retrofit units in the vector `rs` where all retrofi
# Returns
- `Vector{Int64}`: The vector of resource ids.
"""
-function ids_with_all_options_not_contributing(rs::Vector{T}) where T <: AbstractResource
+function ids_with_all_options_not_contributing(rs::Vector{T}) where {T <: AbstractResource}
# select resources that can retrofit
units_can_retrofit = ids_can_retrofit(rs)
# check if all retrofit options in the retrofit cluster of each retrofit resource contribute to min retirement
- condition::Vector{Bool} = has_all_options_not_contributing.(rs[units_can_retrofit], Ref(rs))
+ condition::Vector{Bool} = has_all_options_not_contributing.(rs[units_can_retrofit],
+ Ref(rs))
return units_can_retrofit[condition]
end
@@ -1047,7 +1197,10 @@ Check if all retrofit options in the retrofit cluster of the retrofit resource `
# Returns
- `Bool`: True if all retrofit options do not contribute to min retirement, otherwise false.
"""
-function has_all_options_not_contributing(retrofit_res::AbstractResource, rs::Vector{T}) where T <: AbstractResource
+function has_all_options_not_contributing(retrofit_res::AbstractResource,
+ rs::Vector{T}) where {T <: AbstractResource}
retro_id = retrofit_id(retrofit_res)
- return isempty(intersect(resources_in_retrofit_cluster_by_rid(rs, retro_id), ids_retrofit_options(rs), ids_contribute_min_retirement(rs)))
-end
\ No newline at end of file
+ return isempty(intersect(resources_in_retrofit_cluster_by_rid(rs, retro_id),
+ ids_retrofit_options(rs),
+ ids_contribute_min_retirement(rs)))
+end
diff --git a/src/model/resources/retrofits/retrofits.jl b/src/model/resources/retrofits/retrofits.jl
index d29d13fe33..6920dffd79 100644
--- a/src/model/resources/retrofits/retrofits.jl
+++ b/src/model/resources/retrofits/retrofits.jl
@@ -17,25 +17,40 @@ where ${RETROFIT}$ represents the set of all retrofit IDs (clusters) in the mode
"""
function retrofit(EP::Model, inputs::Dict)
-
- println("Retrofit Resources Module")
-
- gen = inputs["RESOURCES"]
-
- COMMIT = inputs["COMMIT"] # Set of all resources subject to unit commitment
- RETROFIT_CAP = inputs["RETROFIT_CAP"] # Set of all resources being retrofitted
- RETROFIT_OPTIONS = inputs["RETROFIT_OPTIONS"] # Set of all resources being created
- RETROFIT_IDS = inputs["RETROFIT_IDS"] # Set of unique IDs for retrofit resources
-
- @expression(EP,eRetrofittedCapByRetroId[id in RETROFIT_IDS],
- sum(cap_size(gen[y]) * EP[:vRETROFITCAP][y] for y in intersect(RETROFIT_CAP, COMMIT, resources_in_retrofit_cluster_by_rid(gen,id)); init=0)
- + sum(EP[:vRETROFITCAP][y] for y in setdiff(intersect(RETROFIT_CAP, resources_in_retrofit_cluster_by_rid(gen,id)), COMMIT); init=0))
-
- @expression(EP,eRetrofitCapByRetroId[id in RETROFIT_IDS],
- sum(cap_size(gen[y]) * EP[:vCAP][y] * (1/retrofit_efficiency(gen[y])) for y in intersect(RETROFIT_OPTIONS, COMMIT, resources_in_retrofit_cluster_by_rid(gen,id)); init=0)
- + sum(EP[:vCAP][y] * (1/retrofit_efficiency(gen[y])) for y in setdiff(intersect(RETROFIT_OPTIONS, resources_in_retrofit_cluster_by_rid(gen,id)), COMMIT); init=0))
-
- @constraint(EP, cRetrofitCapacity[id in RETROFIT_IDS], eRetrofittedCapByRetroId[id] == eRetrofitCapByRetroId[id])
-
- return EP
+ println("Retrofit Resources Module")
+
+ gen = inputs["RESOURCES"]
+
+ COMMIT = inputs["COMMIT"] # Set of all resources subject to unit commitment
+ RETROFIT_CAP = inputs["RETROFIT_CAP"] # Set of all resources being retrofitted
+ RETROFIT_OPTIONS = inputs["RETROFIT_OPTIONS"] # Set of all resources being created
+ RETROFIT_IDS = inputs["RETROFIT_IDS"] # Set of unique IDs for retrofit resources
+
+ @expression(EP, eRetrofittedCapByRetroId[id in RETROFIT_IDS],
+ sum(cap_size(gen[y]) * EP[:vRETROFITCAP][y] for y in intersect(RETROFIT_CAP,
+ COMMIT,
+ resources_in_retrofit_cluster_by_rid(gen, id));
+ init = 0)
+ +sum(EP[:vRETROFITCAP][y] for y in setdiff(intersect(RETROFIT_CAP,
+ resources_in_retrofit_cluster_by_rid(gen, id)),
+ COMMIT);
+ init = 0))
+
+ @expression(EP, eRetrofitCapByRetroId[id in RETROFIT_IDS],
+ sum(cap_size(gen[y]) * EP[:vCAP][y] * (1 / retrofit_efficiency(gen[y]))
+ for y in intersect(RETROFIT_OPTIONS,
+ COMMIT,
+ resources_in_retrofit_cluster_by_rid(gen, id));
+ init = 0)
+ +sum(EP[:vCAP][y] * (1 / retrofit_efficiency(gen[y]))
+ for y in setdiff(intersect(RETROFIT_OPTIONS,
+ resources_in_retrofit_cluster_by_rid(gen, id)),
+ COMMIT);
+ init = 0))
+
+ @constraint(EP,
+ cRetrofitCapacity[id in RETROFIT_IDS],
+ eRetrofittedCapByRetroId[id]==eRetrofitCapByRetroId[id])
+
+ return EP
end
diff --git a/src/model/resources/storage/investment_charge.jl b/src/model/resources/storage/investment_charge.jl
index 77f67f76bc..5f92ec684c 100644
--- a/src/model/resources/storage/investment_charge.jl
+++ b/src/model/resources/storage/investment_charge.jl
@@ -39,97 +39,105 @@ In addition, this function adds investment and fixed O&M related costs related t
```
"""
function investment_charge!(EP::Model, inputs::Dict, setup::Dict)
-
- println("Charge Investment Module")
-
- gen = inputs["RESOURCES"]
-
- MultiStage = setup["MultiStage"]
-
- STOR_ASYMMETRIC = inputs["STOR_ASYMMETRIC"] # Set of storage resources with asymmetric (separte) charge/discharge capacity components
-
- NEW_CAP_CHARGE = inputs["NEW_CAP_CHARGE"] # Set of asymmetric charge/discharge storage resources eligible for new charge capacity
- RET_CAP_CHARGE = inputs["RET_CAP_CHARGE"] # Set of asymmetric charge/discharge storage resources eligible for charge capacity retirements
-
- ### Variables ###
-
- ## Storage capacity built and retired for storage resources with independent charge and discharge power capacities (STOR=2)
-
- # New installed charge capacity of resource "y"
- @variable(EP, vCAPCHARGE[y in NEW_CAP_CHARGE] >= 0)
-
- # Retired charge capacity of resource "y" from existing capacity
- @variable(EP, vRETCAPCHARGE[y in RET_CAP_CHARGE] >= 0)
-
- if MultiStage == 1
- @variable(EP, vEXISTINGCAPCHARGE[y in STOR_ASYMMETRIC] >= 0);
- end
-
- ### Expressions ###
-
- if MultiStage == 1
- @expression(EP, eExistingCapCharge[y in STOR_ASYMMETRIC], vEXISTINGCAPCHARGE[y])
- else
- @expression(EP, eExistingCapCharge[y in STOR_ASYMMETRIC], existing_charge_cap_mw(gen[y]))
- end
-
- @expression(EP, eTotalCapCharge[y in STOR_ASYMMETRIC],
- if (y in intersect(NEW_CAP_CHARGE, RET_CAP_CHARGE))
- eExistingCapCharge[y] + EP[:vCAPCHARGE][y] - EP[:vRETCAPCHARGE][y]
- elseif (y in setdiff(NEW_CAP_CHARGE, RET_CAP_CHARGE))
- eExistingCapCharge[y] + EP[:vCAPCHARGE][y]
- elseif (y in setdiff(RET_CAP_CHARGE, NEW_CAP_CHARGE))
- eExistingCapCharge[y] - EP[:vRETCAPCHARGE][y]
- else
- eExistingCapCharge[y] + EP[:vZERO]
- end
- )
-
- ## Objective Function Expressions ##
-
- # Fixed costs for resource "y" = annuitized investment cost plus fixed O&M costs
- # If resource is not eligible for new charge capacity, fixed costs are only O&M costs
- @expression(EP, eCFixCharge[y in STOR_ASYMMETRIC],
- if y in NEW_CAP_CHARGE # Resources eligible for new charge capacity
- inv_cost_charge_per_mwyr(gen[y])*vCAPCHARGE[y] + fixed_om_cost_charge_per_mwyr(gen[y])*eTotalCapCharge[y]
- else
- fixed_om_cost_charge_per_mwyr(gen[y])*eTotalCapCharge[y]
- end
- )
-
- # Sum individual resource contributions to fixed costs to get total fixed costs
- @expression(EP, eTotalCFixCharge, sum(EP[:eCFixCharge][y] for y in STOR_ASYMMETRIC))
-
- # Add term to objective function expression
- if MultiStage == 1
- # OPEX multiplier scales fixed costs to account for multiple years between two model stages
- # We divide by OPEXMULT since we are going to multiply the entire objective function by this term later,
- # and we have already accounted for multiple years between stages for fixed costs.
- add_to_expression!(EP[:eObj], (1/inputs["OPEXMULT"]), eTotalCFixCharge)
- else
- add_to_expression!(EP[:eObj], eTotalCFixCharge)
- end
-
- ### Constratints ###
-
- if MultiStage == 1
- # Existing capacity variable is equal to existing capacity specified in the input file
- @constraint(EP, cExistingCapCharge[y in STOR_ASYMMETRIC], EP[:vEXISTINGCAPCHARGE][y] == existing_charge_cap_mw(gen[y]))
- end
-
- ## Constraints on retirements and capacity additions
- #Cannot retire more charge capacity than existing charge capacity
- @constraint(EP, cMaxRetCharge[y in RET_CAP_CHARGE], vRETCAPCHARGE[y] <= eExistingCapCharge[y])
-
- #Constraints on new built capacity
-
- # Constraint on maximum charge capacity (if applicable) [set input to -1 if no constraint on maximum charge capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is >= Max_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cMaxCapCharge[y in intersect(ids_with_positive(gen, max_charge_cap_mw), STOR_ASYMMETRIC)], eTotalCapCharge[y] <= max_charge_cap_mw(gen[y]))
-
- # Constraint on minimum charge capacity (if applicable) [set input to -1 if no constraint on minimum charge capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is <= Min_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cMinCapCharge[y in intersect(ids_with_positive(gen, min_charge_cap_mw), STOR_ASYMMETRIC)], eTotalCapCharge[y] >= min_charge_cap_mw(gen[y]))
-
-
+ println("Charge Investment Module")
+
+ gen = inputs["RESOURCES"]
+
+ MultiStage = setup["MultiStage"]
+
+ STOR_ASYMMETRIC = inputs["STOR_ASYMMETRIC"] # Set of storage resources with asymmetric (separte) charge/discharge capacity components
+
+ NEW_CAP_CHARGE = inputs["NEW_CAP_CHARGE"] # Set of asymmetric charge/discharge storage resources eligible for new charge capacity
+ RET_CAP_CHARGE = inputs["RET_CAP_CHARGE"] # Set of asymmetric charge/discharge storage resources eligible for charge capacity retirements
+
+ ### Variables ###
+
+ ## Storage capacity built and retired for storage resources with independent charge and discharge power capacities (STOR=2)
+
+ # New installed charge capacity of resource "y"
+ @variable(EP, vCAPCHARGE[y in NEW_CAP_CHARGE]>=0)
+
+ # Retired charge capacity of resource "y" from existing capacity
+ @variable(EP, vRETCAPCHARGE[y in RET_CAP_CHARGE]>=0)
+
+ if MultiStage == 1
+ @variable(EP, vEXISTINGCAPCHARGE[y in STOR_ASYMMETRIC]>=0)
+ end
+
+ ### Expressions ###
+
+ if MultiStage == 1
+ @expression(EP, eExistingCapCharge[y in STOR_ASYMMETRIC], vEXISTINGCAPCHARGE[y])
+ else
+ @expression(EP,
+ eExistingCapCharge[y in STOR_ASYMMETRIC],
+ existing_charge_cap_mw(gen[y]))
+ end
+
+ @expression(EP, eTotalCapCharge[y in STOR_ASYMMETRIC],
+ if (y in intersect(NEW_CAP_CHARGE, RET_CAP_CHARGE))
+ eExistingCapCharge[y] + EP[:vCAPCHARGE][y] - EP[:vRETCAPCHARGE][y]
+ elseif (y in setdiff(NEW_CAP_CHARGE, RET_CAP_CHARGE))
+ eExistingCapCharge[y] + EP[:vCAPCHARGE][y]
+ elseif (y in setdiff(RET_CAP_CHARGE, NEW_CAP_CHARGE))
+ eExistingCapCharge[y] - EP[:vRETCAPCHARGE][y]
+ else
+ eExistingCapCharge[y] + EP[:vZERO]
+ end)
+
+ ## Objective Function Expressions ##
+
+ # Fixed costs for resource "y" = annuitized investment cost plus fixed O&M costs
+ # If resource is not eligible for new charge capacity, fixed costs are only O&M costs
+ @expression(EP, eCFixCharge[y in STOR_ASYMMETRIC],
+ if y in NEW_CAP_CHARGE # Resources eligible for new charge capacity
+ inv_cost_charge_per_mwyr(gen[y]) * vCAPCHARGE[y] +
+ fixed_om_cost_charge_per_mwyr(gen[y]) * eTotalCapCharge[y]
+ else
+ fixed_om_cost_charge_per_mwyr(gen[y]) * eTotalCapCharge[y]
+ end)
+
+ # Sum individual resource contributions to fixed costs to get total fixed costs
+ @expression(EP, eTotalCFixCharge, sum(EP[:eCFixCharge][y] for y in STOR_ASYMMETRIC))
+
+ # Add term to objective function expression
+ if MultiStage == 1
+ # OPEX multiplier scales fixed costs to account for multiple years between two model stages
+ # We divide by OPEXMULT since we are going to multiply the entire objective function by this term later,
+ # and we have already accounted for multiple years between stages for fixed costs.
+ add_to_expression!(EP[:eObj], (1 / inputs["OPEXMULT"]), eTotalCFixCharge)
+ else
+ add_to_expression!(EP[:eObj], eTotalCFixCharge)
+ end
+
+ ### Constratints ###
+
+ if MultiStage == 1
+ # Existing capacity variable is equal to existing capacity specified in the input file
+ @constraint(EP,
+ cExistingCapCharge[y in STOR_ASYMMETRIC],
+ EP[:vEXISTINGCAPCHARGE][y]==existing_charge_cap_mw(gen[y]))
+ end
+
+ ## Constraints on retirements and capacity additions
+ #Cannot retire more charge capacity than existing charge capacity
+ @constraint(EP,
+ cMaxRetCharge[y in RET_CAP_CHARGE],
+ vRETCAPCHARGE[y]<=eExistingCapCharge[y])
+
+ #Constraints on new built capacity
+
+ # Constraint on maximum charge capacity (if applicable) [set input to -1 if no constraint on maximum charge capacity]
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is >= Max_Charge_Cap_MWh and lead to infeasabilty
+ @constraint(EP,
+ cMaxCapCharge[y in intersect(ids_with_positive(gen, max_charge_cap_mw),
+ STOR_ASYMMETRIC)],
+ eTotalCapCharge[y]<=max_charge_cap_mw(gen[y]))
+
+ # Constraint on minimum charge capacity (if applicable) [set input to -1 if no constraint on minimum charge capacity]
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is <= Min_Charge_Cap_MWh and lead to infeasabilty
+ @constraint(EP,
+ cMinCapCharge[y in intersect(ids_with_positive(gen, min_charge_cap_mw),
+ STOR_ASYMMETRIC)],
+ eTotalCapCharge[y]>=min_charge_cap_mw(gen[y]))
end
diff --git a/src/model/resources/storage/investment_energy.jl b/src/model/resources/storage/investment_energy.jl
index 35757fca6b..af28ba15c2 100644
--- a/src/model/resources/storage/investment_energy.jl
+++ b/src/model/resources/storage/investment_energy.jl
@@ -42,97 +42,106 @@ In addition, this function adds investment and fixed O\&M related costs related
```
"""
function investment_energy!(EP::Model, inputs::Dict, setup::Dict)
-
- println("Storage Investment Module")
-
- gen = inputs["RESOURCES"]
-
- MultiStage = setup["MultiStage"]
-
- STOR_ALL = inputs["STOR_ALL"] # Set of all storage resources
- NEW_CAP_ENERGY = inputs["NEW_CAP_ENERGY"] # Set of all storage resources eligible for new energy capacity
- RET_CAP_ENERGY = inputs["RET_CAP_ENERGY"] # Set of all storage resources eligible for energy capacity retirements
-
- ### Variables ###
-
- ## Energy storage reservoir capacity (MWh capacity) built/retired for storage with variable power to energy ratio (STOR=1 or STOR=2)
-
- # New installed energy capacity of resource "y"
- @variable(EP, vCAPENERGY[y in NEW_CAP_ENERGY] >= 0)
-
- # Retired energy capacity of resource "y" from existing capacity
- @variable(EP, vRETCAPENERGY[y in RET_CAP_ENERGY] >= 0)
-
- if MultiStage == 1
- @variable(EP, vEXISTINGCAPENERGY[y in STOR_ALL] >= 0);
- end
-
- ### Expressions ###
-
- if MultiStage == 1
- @expression(EP, eExistingCapEnergy[y in STOR_ALL], vEXISTINGCAPENERGY[y])
- else
- @expression(EP, eExistingCapEnergy[y in STOR_ALL], existing_cap_mwh(gen[y]))
- end
-
- @expression(EP, eTotalCapEnergy[y in STOR_ALL],
- if (y in intersect(NEW_CAP_ENERGY, RET_CAP_ENERGY))
- eExistingCapEnergy[y] + EP[:vCAPENERGY][y] - EP[:vRETCAPENERGY][y]
- elseif (y in setdiff(NEW_CAP_ENERGY, RET_CAP_ENERGY))
- eExistingCapEnergy[y] + EP[:vCAPENERGY][y]
- elseif (y in setdiff(RET_CAP_ENERGY, NEW_CAP_ENERGY))
- eExistingCapEnergy[y] - EP[:vRETCAPENERGY][y]
- else
- eExistingCapEnergy[y] + EP[:vZERO]
- end
- )
-
- ## Objective Function Expressions ##
-
- # Fixed costs for resource "y" = annuitized investment cost plus fixed O&M costs
- # If resource is not eligible for new energy capacity, fixed costs are only O&M costs
- @expression(EP, eCFixEnergy[y in STOR_ALL],
- if y in NEW_CAP_ENERGY # Resources eligible for new capacity
- inv_cost_per_mwhyr(gen[y])*vCAPENERGY[y] + fixed_om_cost_per_mwhyr(gen[y])*eTotalCapEnergy[y]
- else
- fixed_om_cost_per_mwhyr(gen[y])*eTotalCapEnergy[y]
- end
- )
-
- # Sum individual resource contributions to fixed costs to get total fixed costs
- @expression(EP, eTotalCFixEnergy, sum(EP[:eCFixEnergy][y] for y in STOR_ALL))
-
- # Add term to objective function expression
- if MultiStage == 1
- # OPEX multiplier scales fixed costs to account for multiple years between two model stages
- # We divide by OPEXMULT since we are going to multiply the entire objective function by this term later,
- # and we have already accounted for multiple years between stages for fixed costs.
- add_to_expression!(EP[:eObj], (1/inputs["OPEXMULT"]), eTotalCFixEnergy)
- else
- add_to_expression!(EP[:eObj], eTotalCFixEnergy)
- end
-
- ### Constraints ###
-
- if MultiStage == 1
- @constraint(EP, cExistingCapEnergy[y in STOR_ALL], EP[:vEXISTINGCAPENERGY][y] == existing_cap_mwh(gen[y]))
- end
-
- ## Constraints on retirements and capacity additions
- # Cannot retire more energy capacity than existing energy capacity
- @constraint(EP, cMaxRetEnergy[y in RET_CAP_ENERGY], vRETCAPENERGY[y] <= eExistingCapEnergy[y])
-
- ## Constraints on new built energy capacity
- # Constraint on maximum energy capacity (if applicable) [set input to -1 if no constraint on maximum energy capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MWh is >= Max_Cap_MWh and lead to infeasabilty
- @constraint(EP, cMaxCapEnergy[y in intersect(ids_with_positive(gen, max_cap_mwh), STOR_ALL)], eTotalCapEnergy[y] <= max_cap_mwh(gen[y]))
-
- # Constraint on minimum energy capacity (if applicable) [set input to -1 if no constraint on minimum energy apacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MWh is <= Min_Cap_MWh and lead to infeasabilty
- @constraint(EP, cMinCapEnergy[y in intersect(ids_with_positive(gen, min_cap_mwh), STOR_ALL)], eTotalCapEnergy[y] >= min_cap_mwh(gen[y]))
-
- # Max and min constraints on energy storage capacity built (as proportion to discharge power capacity)
- @constraint(EP, cMinCapEnergyDuration[y in STOR_ALL], EP[:eTotalCapEnergy][y] >= min_duration(gen[y]) * EP[:eTotalCap][y])
- @constraint(EP, cMaxCapEnergyDuration[y in STOR_ALL], EP[:eTotalCapEnergy][y] <= max_duration(gen[y]) * EP[:eTotalCap][y])
-
+ println("Storage Investment Module")
+
+ gen = inputs["RESOURCES"]
+
+ MultiStage = setup["MultiStage"]
+
+ STOR_ALL = inputs["STOR_ALL"] # Set of all storage resources
+ NEW_CAP_ENERGY = inputs["NEW_CAP_ENERGY"] # Set of all storage resources eligible for new energy capacity
+ RET_CAP_ENERGY = inputs["RET_CAP_ENERGY"] # Set of all storage resources eligible for energy capacity retirements
+
+ ### Variables ###
+
+ ## Energy storage reservoir capacity (MWh capacity) built/retired for storage with variable power to energy ratio (STOR=1 or STOR=2)
+
+ # New installed energy capacity of resource "y"
+ @variable(EP, vCAPENERGY[y in NEW_CAP_ENERGY]>=0)
+
+ # Retired energy capacity of resource "y" from existing capacity
+ @variable(EP, vRETCAPENERGY[y in RET_CAP_ENERGY]>=0)
+
+ if MultiStage == 1
+ @variable(EP, vEXISTINGCAPENERGY[y in STOR_ALL]>=0)
+ end
+
+ ### Expressions ###
+
+ if MultiStage == 1
+ @expression(EP, eExistingCapEnergy[y in STOR_ALL], vEXISTINGCAPENERGY[y])
+ else
+ @expression(EP, eExistingCapEnergy[y in STOR_ALL], existing_cap_mwh(gen[y]))
+ end
+
+ @expression(EP, eTotalCapEnergy[y in STOR_ALL],
+ if (y in intersect(NEW_CAP_ENERGY, RET_CAP_ENERGY))
+ eExistingCapEnergy[y] + EP[:vCAPENERGY][y] - EP[:vRETCAPENERGY][y]
+ elseif (y in setdiff(NEW_CAP_ENERGY, RET_CAP_ENERGY))
+ eExistingCapEnergy[y] + EP[:vCAPENERGY][y]
+ elseif (y in setdiff(RET_CAP_ENERGY, NEW_CAP_ENERGY))
+ eExistingCapEnergy[y] - EP[:vRETCAPENERGY][y]
+ else
+ eExistingCapEnergy[y] + EP[:vZERO]
+ end)
+
+ ## Objective Function Expressions ##
+
+ # Fixed costs for resource "y" = annuitized investment cost plus fixed O&M costs
+ # If resource is not eligible for new energy capacity, fixed costs are only O&M costs
+ @expression(EP, eCFixEnergy[y in STOR_ALL],
+ if y in NEW_CAP_ENERGY # Resources eligible for new capacity
+ inv_cost_per_mwhyr(gen[y]) * vCAPENERGY[y] +
+ fixed_om_cost_per_mwhyr(gen[y]) * eTotalCapEnergy[y]
+ else
+ fixed_om_cost_per_mwhyr(gen[y]) * eTotalCapEnergy[y]
+ end)
+
+ # Sum individual resource contributions to fixed costs to get total fixed costs
+ @expression(EP, eTotalCFixEnergy, sum(EP[:eCFixEnergy][y] for y in STOR_ALL))
+
+ # Add term to objective function expression
+ if MultiStage == 1
+ # OPEX multiplier scales fixed costs to account for multiple years between two model stages
+ # We divide by OPEXMULT since we are going to multiply the entire objective function by this term later,
+ # and we have already accounted for multiple years between stages for fixed costs.
+ add_to_expression!(EP[:eObj], (1 / inputs["OPEXMULT"]), eTotalCFixEnergy)
+ else
+ add_to_expression!(EP[:eObj], eTotalCFixEnergy)
+ end
+
+ ### Constraints ###
+
+ if MultiStage == 1
+ @constraint(EP,
+ cExistingCapEnergy[y in STOR_ALL],
+ EP[:vEXISTINGCAPENERGY][y]==existing_cap_mwh(gen[y]))
+ end
+
+ ## Constraints on retirements and capacity additions
+ # Cannot retire more energy capacity than existing energy capacity
+ @constraint(EP,
+ cMaxRetEnergy[y in RET_CAP_ENERGY],
+ vRETCAPENERGY[y]<=eExistingCapEnergy[y])
+
+ ## Constraints on new built energy capacity
+ # Constraint on maximum energy capacity (if applicable) [set input to -1 if no constraint on maximum energy capacity]
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MWh is >= Max_Cap_MWh and lead to infeasabilty
+ @constraint(EP,
+ cMaxCapEnergy[y in intersect(ids_with_positive(gen, max_cap_mwh), STOR_ALL)],
+ eTotalCapEnergy[y]<=max_cap_mwh(gen[y]))
+
+ # Constraint on minimum energy capacity (if applicable) [set input to -1 if no constraint on minimum energy apacity]
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MWh is <= Min_Cap_MWh and lead to infeasabilty
+ @constraint(EP,
+ cMinCapEnergy[y in intersect(ids_with_positive(gen, min_cap_mwh), STOR_ALL)],
+ eTotalCapEnergy[y]>=min_cap_mwh(gen[y]))
+
+ # Max and min constraints on energy storage capacity built (as proportion to discharge power capacity)
+ @constraint(EP,
+ cMinCapEnergyDuration[y in STOR_ALL],
+ EP[:eTotalCapEnergy][y]>=min_duration(gen[y]) * EP[:eTotalCap][y])
+ @constraint(EP,
+ cMaxCapEnergyDuration[y in STOR_ALL],
+ EP[:eTotalCapEnergy][y]<=max_duration(gen[y]) * EP[:eTotalCap][y])
end
diff --git a/src/model/resources/storage/long_duration_storage.jl b/src/model/resources/storage/long_duration_storage.jl
index f88b22b0f6..1708332784 100644
--- a/src/model/resources/storage/long_duration_storage.jl
+++ b/src/model/resources/storage/long_duration_storage.jl
@@ -58,92 +58,119 @@ If the capacity reserve margin constraint is enabled, a similar set of constrain
All other constraints are identical to those used to track the actual state of charge, except with the new variables $Q^{CRM}_{o,z,n}$ and $\Delta Q^{CRM}_{o,z,n}$ used in place of $Q_{o,z,n}$ and $\Delta Q_{o,z,n}$, respectively.
"""
function long_duration_storage!(EP::Model, inputs::Dict, setup::Dict)
-
- println("Long Duration Storage Module")
-
- gen = inputs["RESOURCES"]
-
- CapacityReserveMargin = setup["CapacityReserveMargin"]
-
- REP_PERIOD = inputs["REP_PERIOD"] # Number of representative periods
-
- STOR_LONG_DURATION = inputs["STOR_LONG_DURATION"]
-
- hours_per_subperiod = inputs["hours_per_subperiod"] #total number of hours per subperiod
-
- dfPeriodMap = inputs["Period_Map"] # Dataframe that maps modeled periods to representative periods
- NPeriods = size(inputs["Period_Map"])[1] # Number of modeled periods
-
- MODELED_PERIODS_INDEX = 1:NPeriods
- REP_PERIODS_INDEX = MODELED_PERIODS_INDEX[dfPeriodMap[!,:Rep_Period] .== MODELED_PERIODS_INDEX]
-
- ### Variables ###
-
- # Variables to define inter-period energy transferred between modeled periods
-
- # State of charge of storage at beginning of each modeled period n
- @variable(EP, vSOCw[y in STOR_LONG_DURATION, n in MODELED_PERIODS_INDEX] >= 0)
-
- # Build up in storage inventory over each representative period w
- # Build up inventory can be positive or negative
- @variable(EP, vdSOC[y in STOR_LONG_DURATION, w=1:REP_PERIOD])
-
- if CapacityReserveMargin > 0
- # State of charge held in reserve for storage at beginning of each modeled period n
- @variable(EP, vCAPRES_socw[y in STOR_LONG_DURATION, n in MODELED_PERIODS_INDEX] >= 0)
-
- # Build up in storage inventory held in reserve over each representative period w
- # Build up inventory can be positive or negative
- @variable(EP, vCAPRES_dsoc[y in STOR_LONG_DURATION, w=1:REP_PERIOD])
- end
-
- ### Constraints ###
-
- # Links last time step with first time step, ensuring position in hour 1 is within eligible change from final hour position
- # Modified initial state of storage for long-duration storage - initialize wth value carried over from last period
- # Alternative to cSoCBalStart constraint which is included when not modeling operations wrapping and long duration storage
- # Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
- @constraint(EP, cSoCBalLongDurationStorageStart[w=1:REP_PERIOD, y in STOR_LONG_DURATION],
- EP[:vS][y,hours_per_subperiod*(w-1)+1] == (1-self_discharge(gen[y]))*(EP[:vS][y,hours_per_subperiod*w]-vdSOC[y,w])
- -(1/efficiency_down(gen[y])*EP[:vP][y,hours_per_subperiod*(w-1)+1])+(efficiency_up(gen[y])*EP[:vCHARGE][y,hours_per_subperiod*(w-1)+1]))
-
- # Storage at beginning of period w = storage at beginning of period w-1 + storage built up in period w (after n representative periods)
- ## Multiply storage build up term from prior period with corresponding weight
- @constraint(EP, cSoCBalLongDurationStorage[y in STOR_LONG_DURATION, r in MODELED_PERIODS_INDEX],
- vSOCw[y, mod1(r+1, NPeriods)] == vSOCw[y,r] + vdSOC[y,dfPeriodMap[r,:Rep_Period_Index]])
-
- # Storage at beginning of each modeled period cannot exceed installed energy capacity
- @constraint(EP, cSoCBalLongDurationStorageUpper[y in STOR_LONG_DURATION, r in MODELED_PERIODS_INDEX],
- vSOCw[y,r] <= EP[:eTotalCapEnergy][y])
-
- # Initial storage level for representative periods must also adhere to sub-period storage inventory balance
- # Initial storage = Final storage - change in storage inventory across representative period
- @constraint(EP, cSoCBalLongDurationStorageSub[y in STOR_LONG_DURATION, r in REP_PERIODS_INDEX],
- vSOCw[y,r] == EP[:vS][y,hours_per_subperiod*dfPeriodMap[r,:Rep_Period_Index]] - vdSOC[y,dfPeriodMap[r,:Rep_Period_Index]])
-
- # Capacity Reserve Margin policy
- if CapacityReserveMargin > 0
- # LDES Constraints for storage held in reserve
-
- # Links last time step with first time step, ensuring position in hour 1 is within eligible change from final hour position
- # Modified initial virtual state of storage for long-duration storage - initialize wth value carried over from last period
- # Alternative to cVSoCBalStart constraint which is included when not modeling operations wrapping and long duration storage
- # Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
- @constraint(EP, cVSoCBalLongDurationStorageStart[w=1:REP_PERIOD, y in STOR_LONG_DURATION],
- EP[:vCAPRES_socinreserve][y,hours_per_subperiod*(w-1)+1] == (1-self_discharge(gen[y]))*(EP[:vCAPRES_socinreserve][y,hours_per_subperiod*w]-vCAPRES_dsoc[y,w])
- +(1/efficiency_down(gen[y])*EP[:vCAPRES_discharge][y,hours_per_subperiod*(w-1)+1])-(efficiency_up(gen[y])*EP[:vCAPRES_charge][y,hours_per_subperiod*(w-1)+1]))
-
- # Storage held in reserve at beginning of period w = storage at beginning of period w-1 + storage built up in period w (after n representative periods)
- ## Multiply storage build up term from prior period with corresponding weight
- @constraint(EP, cVSoCBalLongDurationStorage[y in STOR_LONG_DURATION, r in MODELED_PERIODS_INDEX],
- vCAPRES_socw[y,mod1(r+1, NPeriods)] == vCAPRES_socw[y,r] + vCAPRES_dsoc[y,dfPeriodMap[r,:Rep_Period_Index]])
-
- # Initial reserve storage level for representative periods must also adhere to sub-period storage inventory balance
- # Initial storage = Final storage - change in storage inventory across representative period
- @constraint(EP, cVSoCBalLongDurationStorageSub[y in STOR_LONG_DURATION, r in REP_PERIODS_INDEX],
- vCAPRES_socw[y,r] == EP[:vCAPRES_socinreserve][y,hours_per_subperiod*dfPeriodMap[r,:Rep_Period_Index]] - vCAPRES_dsoc[y,dfPeriodMap[r,:Rep_Period_Index]])
-
- # energy held in reserve at the beginning of each modeled period acts as a lower bound on the total energy held in storage
- @constraint(EP, cSOCMinCapResLongDurationStorage[y in STOR_LONG_DURATION, r in MODELED_PERIODS_INDEX], vSOCw[y,r] >= vCAPRES_socw[y,r])
- end
+ println("Long Duration Storage Module")
+
+ gen = inputs["RESOURCES"]
+
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
+
+ REP_PERIOD = inputs["REP_PERIOD"] # Number of representative periods
+
+ STOR_LONG_DURATION = inputs["STOR_LONG_DURATION"]
+
+ hours_per_subperiod = inputs["hours_per_subperiod"] #total number of hours per subperiod
+
+ dfPeriodMap = inputs["Period_Map"] # Dataframe that maps modeled periods to representative periods
+ NPeriods = size(inputs["Period_Map"])[1] # Number of modeled periods
+
+ MODELED_PERIODS_INDEX = 1:NPeriods
+ REP_PERIODS_INDEX = MODELED_PERIODS_INDEX[dfPeriodMap[!, :Rep_Period] .== MODELED_PERIODS_INDEX]
+
+ ### Variables ###
+
+ # Variables to define inter-period energy transferred between modeled periods
+
+ # State of charge of storage at beginning of each modeled period n
+ @variable(EP, vSOCw[y in STOR_LONG_DURATION, n in MODELED_PERIODS_INDEX]>=0)
+
+ # Build up in storage inventory over each representative period w
+ # Build up inventory can be positive or negative
+ @variable(EP, vdSOC[y in STOR_LONG_DURATION, w = 1:REP_PERIOD])
+
+ if CapacityReserveMargin > 0
+ # State of charge held in reserve for storage at beginning of each modeled period n
+ @variable(EP, vCAPRES_socw[y in STOR_LONG_DURATION, n in MODELED_PERIODS_INDEX]>=0)
+
+ # Build up in storage inventory held in reserve over each representative period w
+ # Build up inventory can be positive or negative
+ @variable(EP, vCAPRES_dsoc[y in STOR_LONG_DURATION, w = 1:REP_PERIOD])
+ end
+
+ ### Constraints ###
+
+ # Links last time step with first time step, ensuring position in hour 1 is within eligible change from final hour position
+ # Modified initial state of storage for long-duration storage - initialize wth value carried over from last period
+ # Alternative to cSoCBalStart constraint which is included when not modeling operations wrapping and long duration storage
+ # Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
+ @constraint(EP,
+ cSoCBalLongDurationStorageStart[w = 1:REP_PERIOD, y in STOR_LONG_DURATION],
+ EP[:vS][y,
+ hours_per_subperiod * (w - 1) + 1]==(1 - self_discharge(gen[y])) *
+ (EP[:vS][y, hours_per_subperiod * w] - vdSOC[y, w])
+ -
+ (1 / efficiency_down(gen[y]) * EP[:vP][y, hours_per_subperiod * (w - 1) + 1]) +
+ (efficiency_up(gen[y]) * EP[:vCHARGE][y, hours_per_subperiod * (w - 1) + 1]))
+
+ # Storage at beginning of period w = storage at beginning of period w-1 + storage built up in period w (after n representative periods)
+ ## Multiply storage build up term from prior period with corresponding weight
+ @constraint(EP,
+ cSoCBalLongDurationStorage[y in STOR_LONG_DURATION, r in MODELED_PERIODS_INDEX],
+ vSOCw[y,
+ mod1(r + 1, NPeriods)]==vSOCw[y, r] + vdSOC[y, dfPeriodMap[r, :Rep_Period_Index]])
+
+ # Storage at beginning of each modeled period cannot exceed installed energy capacity
+ @constraint(EP,
+ cSoCBalLongDurationStorageUpper[y in STOR_LONG_DURATION,
+ r in MODELED_PERIODS_INDEX],
+ vSOCw[y, r]<=EP[:eTotalCapEnergy][y])
+
+ # Initial storage level for representative periods must also adhere to sub-period storage inventory balance
+ # Initial storage = Final storage - change in storage inventory across representative period
+ @constraint(EP,
+ cSoCBalLongDurationStorageSub[y in STOR_LONG_DURATION, r in REP_PERIODS_INDEX],
+ vSOCw[y,
+ r]==EP[:vS][y, hours_per_subperiod * dfPeriodMap[r, :Rep_Period_Index]] -
+ vdSOC[y, dfPeriodMap[r, :Rep_Period_Index]])
+
+ # Capacity Reserve Margin policy
+ if CapacityReserveMargin > 0
+ # LDES Constraints for storage held in reserve
+
+ # Links last time step with first time step, ensuring position in hour 1 is within eligible change from final hour position
+ # Modified initial virtual state of storage for long-duration storage - initialize wth value carried over from last period
+ # Alternative to cVSoCBalStart constraint which is included when not modeling operations wrapping and long duration storage
+ # Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
+ @constraint(EP,
+ cVSoCBalLongDurationStorageStart[w = 1:REP_PERIOD, y in STOR_LONG_DURATION],
+ EP[:vCAPRES_socinreserve][y,
+ hours_per_subperiod * (w - 1) + 1]==(1 - self_discharge(gen[y])) *
+ (EP[:vCAPRES_socinreserve][y, hours_per_subperiod * w] - vCAPRES_dsoc[y, w])
+ +
+ (1 / efficiency_down(gen[y]) *
+ EP[:vCAPRES_discharge][y, hours_per_subperiod * (w - 1) + 1]) -
+ (efficiency_up(gen[y]) *
+ EP[:vCAPRES_charge][y, hours_per_subperiod * (w - 1) + 1]))
+
+ # Storage held in reserve at beginning of period w = storage at beginning of period w-1 + storage built up in period w (after n representative periods)
+ ## Multiply storage build up term from prior period with corresponding weight
+ @constraint(EP,
+ cVSoCBalLongDurationStorage[y in STOR_LONG_DURATION,
+ r in MODELED_PERIODS_INDEX],
+ vCAPRES_socw[y,
+ mod1(r + 1, NPeriods)]==vCAPRES_socw[y, r] + vCAPRES_dsoc[y, dfPeriodMap[r, :Rep_Period_Index]])
+
+ # Initial reserve storage level for representative periods must also adhere to sub-period storage inventory balance
+ # Initial storage = Final storage - change in storage inventory across representative period
+ @constraint(EP,
+ cVSoCBalLongDurationStorageSub[y in STOR_LONG_DURATION, r in REP_PERIODS_INDEX],
+ vCAPRES_socw[y,
+ r]==EP[:vCAPRES_socinreserve][y,
+ hours_per_subperiod * dfPeriodMap[r, :Rep_Period_Index]] - vCAPRES_dsoc[y, dfPeriodMap[r, :Rep_Period_Index]])
+
+ # energy held in reserve at the beginning of each modeled period acts as a lower bound on the total energy held in storage
+ @constraint(EP,
+ cSOCMinCapResLongDurationStorage[y in STOR_LONG_DURATION,
+ r in MODELED_PERIODS_INDEX],
+ vSOCw[y, r]>=vCAPRES_socw[y, r])
+ end
end
diff --git a/src/model/resources/storage/storage.jl b/src/model/resources/storage/storage.jl
index baa03217aa..ed6c0630ce 100644
--- a/src/model/resources/storage/storage.jl
+++ b/src/model/resources/storage/storage.jl
@@ -129,55 +129,65 @@ Finally, the constraints on maximum discharge rate are replaced by the following
The above reserve related constraints are established by ```storage_all_operational_reserves!()``` in ```storage_all.jl```
"""
function storage!(EP::Model, inputs::Dict, setup::Dict)
+ println("Storage Resources Module")
+ gen = inputs["RESOURCES"]
+ T = inputs["T"]
+ STOR_ALL = inputs["STOR_ALL"]
- println("Storage Resources Module")
- gen = inputs["RESOURCES"]
- T = inputs["T"]
- STOR_ALL = inputs["STOR_ALL"]
-
- p = inputs["hours_per_subperiod"]
+ p = inputs["hours_per_subperiod"]
rep_periods = inputs["REP_PERIOD"]
- EnergyShareRequirement = setup["EnergyShareRequirement"]
- CapacityReserveMargin = setup["CapacityReserveMargin"]
- IncludeLossesInESR = setup["IncludeLossesInESR"]
- StorageVirtualDischarge = setup["StorageVirtualDischarge"]
-
- if !isempty(STOR_ALL)
- investment_energy!(EP, inputs, setup)
- storage_all!(EP, inputs, setup)
+ EnergyShareRequirement = setup["EnergyShareRequirement"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
+ IncludeLossesInESR = setup["IncludeLossesInESR"]
+ StorageVirtualDischarge = setup["StorageVirtualDischarge"]
- # Include Long Duration Storage only when modeling representative periods and long-duration storage
- if rep_periods > 1 && !isempty(inputs["STOR_LONG_DURATION"])
- long_duration_storage!(EP, inputs, setup)
- end
- end
+ if !isempty(STOR_ALL)
+ investment_energy!(EP, inputs, setup)
+ storage_all!(EP, inputs, setup)
- if !isempty(inputs["STOR_ASYMMETRIC"])
- investment_charge!(EP, inputs, setup)
- storage_asymmetric!(EP, inputs, setup)
- end
+ # Include Long Duration Storage only when modeling representative periods and long-duration storage
+ if rep_periods > 1 && !isempty(inputs["STOR_LONG_DURATION"])
+ long_duration_storage!(EP, inputs, setup)
+ end
+ end
- if !isempty(inputs["STOR_SYMMETRIC"])
- storage_symmetric!(EP, inputs, setup)
- end
+ if !isempty(inputs["STOR_ASYMMETRIC"])
+ investment_charge!(EP, inputs, setup)
+ storage_asymmetric!(EP, inputs, setup)
+ end
- # ESR Lossses
- if EnergyShareRequirement >= 1
- if IncludeLossesInESR == 1
- @expression(EP, eESRStor[ESR=1:inputs["nESR"]], sum(inputs["dfESR"][z,ESR]*sum(EP[:eELOSS][y] for y in intersect(resources_in_zone_by_rid(gen,z),STOR_ALL)) for z=findall(x->x>0,inputs["dfESR"][:,ESR])))
- add_similar_to_expression!(EP[:eESR], -eESRStor)
- end
- end
+ if !isempty(inputs["STOR_SYMMETRIC"])
+ storage_symmetric!(EP, inputs, setup)
+ end
- # Capacity Reserves Margin policy
- if CapacityReserveMargin > 0
- @expression(EP, eCapResMarBalanceStor[res=1:inputs["NCapacityReserveMargin"], t=1:T], sum(derating_factor(gen[y], tag=res) * (EP[:vP][y,t] - EP[:vCHARGE][y,t]) for y in STOR_ALL))
- if StorageVirtualDischarge > 0
- @expression(EP, eCapResMarBalanceStorVirtual[res=1:inputs["NCapacityReserveMargin"], t=1:T], sum(derating_factor(gen[y], tag=res) * (EP[:vCAPRES_discharge][y,t] - EP[:vCAPRES_charge][y,t]) for y in STOR_ALL))
- add_similar_to_expression!(eCapResMarBalanceStor,eCapResMarBalanceStorVirtual)
- end
- add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceStor)
- end
+ # ESR Lossses
+ if EnergyShareRequirement >= 1
+ if IncludeLossesInESR == 1
+ @expression(EP,
+ eESRStor[ESR = 1:inputs["nESR"]],
+ sum(inputs["dfESR"][z, ESR] * sum(EP[:eELOSS][y]
+ for y in intersect(resources_in_zone_by_rid(gen, z), STOR_ALL))
+ for z in findall(x -> x > 0, inputs["dfESR"][:, ESR])))
+ add_similar_to_expression!(EP[:eESR], -eESRStor)
+ end
+ end
+ # Capacity Reserves Margin policy
+ if CapacityReserveMargin > 0
+ @expression(EP,
+ eCapResMarBalanceStor[res = 1:inputs["NCapacityReserveMargin"], t = 1:T],
+ sum(derating_factor(gen[y], tag = res) * (EP[:vP][y, t] - EP[:vCHARGE][y, t])
+ for y in STOR_ALL))
+ if StorageVirtualDischarge > 0
+ @expression(EP,
+ eCapResMarBalanceStorVirtual[res = 1:inputs["NCapacityReserveMargin"],
+ t = 1:T],
+ sum(derating_factor(gen[y], tag = res) *
+ (EP[:vCAPRES_discharge][y, t] - EP[:vCAPRES_charge][y, t])
+ for y in STOR_ALL))
+ add_similar_to_expression!(eCapResMarBalanceStor, eCapResMarBalanceStorVirtual)
+ end
+ add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceStor)
+ end
end
diff --git a/src/model/resources/storage/storage_all.jl b/src/model/resources/storage/storage_all.jl
index 13c433235c..cc002c78f5 100644
--- a/src/model/resources/storage/storage_all.jl
+++ b/src/model/resources/storage/storage_all.jl
@@ -4,155 +4,203 @@
Sets up variables and constraints common to all storage resources. See ```storage()``` in ```storage.jl``` for description of constraints.
"""
function storage_all!(EP::Model, inputs::Dict, setup::Dict)
- # Setup variables, constraints, and expressions common to all storage resources
- println("Storage Core Resources Module")
+ # Setup variables, constraints, and expressions common to all storage resources
+ println("Storage Core Resources Module")
- gen = inputs["RESOURCES"]
- OperationalReserves = setup["OperationalReserves"]
- CapacityReserveMargin = setup["CapacityReserveMargin"]
+ gen = inputs["RESOURCES"]
+ OperationalReserves = setup["OperationalReserves"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
+
+ virtual_discharge_cost = inputs["VirtualChargeDischargeCost"]
+
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+
+ STOR_ALL = inputs["STOR_ALL"]
+ STOR_SHORT_DURATION = inputs["STOR_SHORT_DURATION"]
+ representative_periods = inputs["REP_PERIOD"]
+
+ START_SUBPERIODS = inputs["START_SUBPERIODS"]
+ INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
- virtual_discharge_cost = inputs["VirtualChargeDischargeCost"]
+ hours_per_subperiod = inputs["hours_per_subperiod"] #total number of hours per subperiod
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ ### Variables ###
- STOR_ALL = inputs["STOR_ALL"]
- STOR_SHORT_DURATION = inputs["STOR_SHORT_DURATION"]
- representative_periods = inputs["REP_PERIOD"]
+ # Storage level of resource "y" at hour "t" [MWh] on zone "z" - unbounded
+ @variable(EP, vS[y in STOR_ALL, t = 1:T]>=0)
- START_SUBPERIODS = inputs["START_SUBPERIODS"]
- INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
+ # Energy withdrawn from grid by resource "y" at hour "t" [MWh] on zone "z"
+ @variable(EP, vCHARGE[y in STOR_ALL, t = 1:T]>=0)
- hours_per_subperiod = inputs["hours_per_subperiod"] #total number of hours per subperiod
-
- ### Variables ###
-
- # Storage level of resource "y" at hour "t" [MWh] on zone "z" - unbounded
- @variable(EP, vS[y in STOR_ALL, t=1:T] >= 0);
-
- # Energy withdrawn from grid by resource "y" at hour "t" [MWh] on zone "z"
- @variable(EP, vCHARGE[y in STOR_ALL, t=1:T] >= 0);
-
- if CapacityReserveMargin > 0
- # Virtual discharge contributing to capacity reserves at timestep t for storage cluster y
- @variable(EP, vCAPRES_discharge[y in STOR_ALL, t=1:T] >= 0)
-
- # Virtual charge contributing to capacity reserves at timestep t for storage cluster y
- @variable(EP, vCAPRES_charge[y in STOR_ALL, t=1:T] >= 0)
-
- # Total state of charge being held in reserve at timestep t for storage cluster y
- @variable(EP, vCAPRES_socinreserve[y in STOR_ALL, t=1:T] >= 0)
- end
-
- ### Expressions ###
-
- # Energy losses related to technologies (increase in effective demand)
- @expression(EP, eELOSS[y in STOR_ALL], sum(inputs["omega"][t]*EP[:vCHARGE][y,t] for t in 1:T) - sum(inputs["omega"][t]*EP[:vP][y,t] for t in 1:T))
-
- ## Objective Function Expressions ##
-
- #Variable costs of "charging" for technologies "y" during hour "t" in zone "z"
- @expression(EP, eCVar_in[y in STOR_ALL,t=1:T], inputs["omega"][t]*var_om_cost_per_mwh_in(gen[y])*vCHARGE[y,t])
-
- # Sum individual resource contributions to variable charging costs to get total variable charging costs
- @expression(EP, eTotalCVarInT[t=1:T], sum(eCVar_in[y,t] for y in STOR_ALL))
- @expression(EP, eTotalCVarIn, sum(eTotalCVarInT[t] for t in 1:T))
- add_to_expression!(EP[:eObj], eTotalCVarIn)
-
- if CapacityReserveMargin > 0
- #Variable costs of "virtual charging" for technologies "y" during hour "t" in zone "z"
- @expression(EP, eCVar_in_virtual[y in STOR_ALL,t=1:T], inputs["omega"][t]*virtual_discharge_cost*vCAPRES_charge[y,t])
- @expression(EP, eTotalCVarInT_virtual[t=1:T], sum(eCVar_in_virtual[y,t] for y in STOR_ALL))
- @expression(EP, eTotalCVarIn_virtual, sum(eTotalCVarInT_virtual[t] for t in 1:T))
- EP[:eObj] += eTotalCVarIn_virtual
-
- #Variable costs of "virtual discharging" for technologies "y" during hour "t" in zone "z"
- @expression(EP, eCVar_out_virtual[y in STOR_ALL,t=1:T], inputs["omega"][t]*virtual_discharge_cost*vCAPRES_discharge[y,t])
- @expression(EP, eTotalCVarOutT_virtual[t=1:T], sum(eCVar_out_virtual[y,t] for y in STOR_ALL))
- @expression(EP, eTotalCVarOut_virtual, sum(eTotalCVarOutT_virtual[t] for t in 1:T))
- EP[:eObj] += eTotalCVarOut_virtual
- end
-
- ## Power Balance Expressions ##
-
- # Term to represent net dispatch from storage in any period
- @expression(EP, ePowerBalanceStor[t=1:T, z=1:Z],
- sum(EP[:vP][y,t]-EP[:vCHARGE][y,t] for y in intersect(resources_in_zone_by_rid(gen,z),STOR_ALL))
- )
- add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceStor)
-
- ### Constraints ###
-
- ## Storage energy capacity and state of charge related constraints:
-
- # Links state of charge in first time step with decisions in last time step of each subperiod
- # We use a modified formulation of this constraint (cSoCBalLongDurationStorageStart) when operations wrapping and long duration storage are being modeled
- if representative_periods > 1 && !isempty(inputs["STOR_LONG_DURATION"])
- CONSTRAINTSET = STOR_SHORT_DURATION
- else
- CONSTRAINTSET = STOR_ALL
- end
- @constraint(EP, cSoCBalStart[t in START_SUBPERIODS, y in CONSTRAINTSET], EP[:vS][y,t] ==
- EP[:vS][y,t+hours_per_subperiod-1] - (1/efficiency_down(gen[y]) * EP[:vP][y,t])
- + (efficiency_up(gen[y])*EP[:vCHARGE][y,t]) - (self_discharge(gen[y]) * EP[:vS][y,t+hours_per_subperiod-1]))
-
- @constraints(EP, begin
-
- # Maximum energy stored must be less than energy capacity
- [y in STOR_ALL, t in 1:T], EP[:vS][y,t] <= EP[:eTotalCapEnergy][y]
-
- # energy stored for the next hour
- cSoCBalInterior[t in INTERIOR_SUBPERIODS, y in STOR_ALL], EP[:vS][y,t] ==
- EP[:vS][y,t-1]-(1/efficiency_down(gen[y])*EP[:vP][y,t])+(efficiency_up(gen[y])*EP[:vCHARGE][y,t])-(self_discharge(gen[y])*EP[:vS][y,t-1])
- end)
-
- # Storage discharge and charge power (and reserve contribution) related constraints:
- if OperationalReserves == 1
- storage_all_operational_reserves!(EP, inputs, setup)
- else
- if CapacityReserveMargin > 0
- # Note: maximum charge rate is also constrained by maximum charge power capacity, but as this differs by storage type,
- # this constraint is set in functions below for each storage type
-
- # Maximum discharging rate must be less than power rating OR available stored energy in the prior period, whichever is less
- # wrapping from end of sample period to start of sample period for energy capacity constraint
- @constraints(EP, begin
- [y in STOR_ALL, t=1:T], EP[:vP][y,t] + EP[:vCAPRES_discharge][y,t] <= EP[:eTotalCap][y]
- [y in STOR_ALL, t=1:T], EP[:vP][y,t] + EP[:vCAPRES_discharge][y,t] <= EP[:vS][y, hoursbefore(hours_per_subperiod,t,1)]*efficiency_down(gen[y])
- end)
- else
- @constraints(EP, begin
- [y in STOR_ALL, t=1:T], EP[:vP][y,t] <= EP[:eTotalCap][y]
- [y in STOR_ALL, t=1:T], EP[:vP][y,t] <= EP[:vS][y, hoursbefore(hours_per_subperiod,t,1)]*efficiency_down(gen[y])
- end)
- end
- end
-
- # From CO2 Policy module
- expr = @expression(EP, [z=1:Z], sum(EP[:eELOSS][y] for y in intersect(STOR_ALL, resources_in_zone_by_rid(gen,z))))
- add_similar_to_expression!(EP[:eELOSSByZone], expr)
-
- # Capacity Reserve Margin policy
- if CapacityReserveMargin > 0
- # Constraints governing energy held in reserve when storage makes virtual capacity reserve margin contributions:
-
- # Links energy held in reserve in first time step with decisions in last time step of each subperiod
- # We use a modified formulation of this constraint (cVSoCBalLongDurationStorageStart) when operations wrapping and long duration storage are being modeled
- @constraint(EP, cVSoCBalStart[t in START_SUBPERIODS, y in CONSTRAINTSET], EP[:vCAPRES_socinreserve][y,t] ==
- EP[:vCAPRES_socinreserve][y,t+hours_per_subperiod-1] + (1/efficiency_down(gen[y]) * EP[:vCAPRES_discharge][y,t])
- - (efficiency_up(gen[y])*EP[:vCAPRES_charge][y,t]) - (self_discharge(gen[y]) * EP[:vCAPRES_socinreserve][y,t+hours_per_subperiod-1]))
-
- # energy held in reserve for the next hour
- @constraint(EP, cVSoCBalInterior[t in INTERIOR_SUBPERIODS, y in STOR_ALL], EP[:vCAPRES_socinreserve][y,t] ==
- EP[:vCAPRES_socinreserve][y,t-1]+(1/efficiency_down(gen[y])*EP[:vCAPRES_discharge][y,t])-(efficiency_up(gen[y])*EP[:vCAPRES_charge][y,t])-(self_discharge(gen[y])*EP[:vCAPRES_socinreserve][y,t-1]))
-
- # energy held in reserve acts as a lower bound on the total energy held in storage
- @constraint(EP, cSOCMinCapRes[t in 1:T, y in STOR_ALL], EP[:vS][y,t] >= EP[:vCAPRES_socinreserve][y,t])
- end
+ if CapacityReserveMargin > 0
+ # Virtual discharge contributing to capacity reserves at timestep t for storage cluster y
+ @variable(EP, vCAPRES_discharge[y in STOR_ALL, t = 1:T]>=0)
+
+ # Virtual charge contributing to capacity reserves at timestep t for storage cluster y
+ @variable(EP, vCAPRES_charge[y in STOR_ALL, t = 1:T]>=0)
+
+ # Total state of charge being held in reserve at timestep t for storage cluster y
+ @variable(EP, vCAPRES_socinreserve[y in STOR_ALL, t = 1:T]>=0)
+ end
+
+ ### Expressions ###
+
+ # Energy losses related to technologies (increase in effective demand)
+ @expression(EP,
+ eELOSS[y in STOR_ALL],
+ sum(inputs["omega"][t] * EP[:vCHARGE][y, t] for t in 1:T)-sum(inputs["omega"][t] *
+ EP[:vP][y, t]
+ for t in 1:T))
+
+ ## Objective Function Expressions ##
+
+ #Variable costs of "charging" for technologies "y" during hour "t" in zone "z"
+ @expression(EP,
+ eCVar_in[y in STOR_ALL, t = 1:T],
+ inputs["omega"][t]*var_om_cost_per_mwh_in(gen[y])*vCHARGE[y, t])
+
+ # Sum individual resource contributions to variable charging costs to get total variable charging costs
+ @expression(EP, eTotalCVarInT[t = 1:T], sum(eCVar_in[y, t] for y in STOR_ALL))
+ @expression(EP, eTotalCVarIn, sum(eTotalCVarInT[t] for t in 1:T))
+ add_to_expression!(EP[:eObj], eTotalCVarIn)
+
+ if CapacityReserveMargin > 0
+ #Variable costs of "virtual charging" for technologies "y" during hour "t" in zone "z"
+ @expression(EP,
+ eCVar_in_virtual[y in STOR_ALL, t = 1:T],
+ inputs["omega"][t]*virtual_discharge_cost*vCAPRES_charge[y, t])
+ @expression(EP,
+ eTotalCVarInT_virtual[t = 1:T],
+ sum(eCVar_in_virtual[y, t] for y in STOR_ALL))
+ @expression(EP, eTotalCVarIn_virtual, sum(eTotalCVarInT_virtual[t] for t in 1:T))
+ EP[:eObj] += eTotalCVarIn_virtual
+
+ #Variable costs of "virtual discharging" for technologies "y" during hour "t" in zone "z"
+ @expression(EP,
+ eCVar_out_virtual[y in STOR_ALL, t = 1:T],
+ inputs["omega"][t]*virtual_discharge_cost*vCAPRES_discharge[y, t])
+ @expression(EP,
+ eTotalCVarOutT_virtual[t = 1:T],
+ sum(eCVar_out_virtual[y, t] for y in STOR_ALL))
+ @expression(EP, eTotalCVarOut_virtual, sum(eTotalCVarOutT_virtual[t] for t in 1:T))
+ EP[:eObj] += eTotalCVarOut_virtual
+ end
+
+ ## Power Balance Expressions ##
+
+ # Term to represent net dispatch from storage in any period
+ @expression(EP, ePowerBalanceStor[t = 1:T, z = 1:Z],
+ sum(EP[:vP][y, t] - EP[:vCHARGE][y, t]
+ for y in intersect(resources_in_zone_by_rid(gen, z), STOR_ALL)))
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceStor)
+
+ ### Constraints ###
+
+ ## Storage energy capacity and state of charge related constraints:
+
+ # Links state of charge in first time step with decisions in last time step of each subperiod
+ # We use a modified formulation of this constraint (cSoCBalLongDurationStorageStart) when operations wrapping and long duration storage are being modeled
+ if representative_periods > 1 && !isempty(inputs["STOR_LONG_DURATION"])
+ CONSTRAINTSET = STOR_SHORT_DURATION
+ else
+ CONSTRAINTSET = STOR_ALL
+ end
+ @constraint(EP,
+ cSoCBalStart[t in START_SUBPERIODS, y in CONSTRAINTSET],
+ EP[:vS][y,
+ t]==
+ EP[:vS][y, t + hours_per_subperiod - 1] -
+ (1 / efficiency_down(gen[y]) * EP[:vP][y, t])
+ +
+ (efficiency_up(gen[y]) * EP[:vCHARGE][y, t]) -
+ (self_discharge(gen[y]) * EP[:vS][y, t + hours_per_subperiod - 1]))
+
+ @constraints(EP,
+ begin
+
+ # Maximum energy stored must be less than energy capacity
+ [y in STOR_ALL, t in 1:T], EP[:vS][y, t] <= EP[:eTotalCapEnergy][y]
+
+ # energy stored for the next hour
+ cSoCBalInterior[t in INTERIOR_SUBPERIODS, y in STOR_ALL],
+ EP[:vS][y, t] ==
+ EP[:vS][y, t - 1] - (1 / efficiency_down(gen[y]) * EP[:vP][y, t]) +
+ (efficiency_up(gen[y]) * EP[:vCHARGE][y, t]) -
+ (self_discharge(gen[y]) * EP[:vS][y, t - 1])
+ end)
+
+ # Storage discharge and charge power (and reserve contribution) related constraints:
+ if OperationalReserves == 1
+ storage_all_operational_reserves!(EP, inputs, setup)
+ else
+ if CapacityReserveMargin > 0
+ # Note: maximum charge rate is also constrained by maximum charge power capacity, but as this differs by storage type,
+ # this constraint is set in functions below for each storage type
+
+ # Maximum discharging rate must be less than power rating OR available stored energy in the prior period, whichever is less
+ # wrapping from end of sample period to start of sample period for energy capacity constraint
+ @constraints(EP,
+ begin
+ [y in STOR_ALL, t = 1:T],
+ EP[:vP][y, t] + EP[:vCAPRES_discharge][y, t] <= EP[:eTotalCap][y]
+ [y in STOR_ALL, t = 1:T],
+ EP[:vP][y, t] + EP[:vCAPRES_discharge][y, t] <=
+ EP[:vS][y, hoursbefore(hours_per_subperiod, t, 1)] *
+ efficiency_down(gen[y])
+ end)
+ else
+ @constraints(EP,
+ begin
+ [y in STOR_ALL, t = 1:T], EP[:vP][y, t] <= EP[:eTotalCap][y]
+ [y in STOR_ALL, t = 1:T],
+ EP[:vP][y, t] <=
+ EP[:vS][y, hoursbefore(hours_per_subperiod, t, 1)] *
+ efficiency_down(gen[y])
+ end)
+ end
+ end
+
+ # From CO2 Policy module
+ expr = @expression(EP,
+ [z = 1:Z],
+ sum(EP[:eELOSS][y] for y in intersect(STOR_ALL, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:eELOSSByZone], expr)
+
+ # Capacity Reserve Margin policy
+ if CapacityReserveMargin > 0
+ # Constraints governing energy held in reserve when storage makes virtual capacity reserve margin contributions:
+
+ # Links energy held in reserve in first time step with decisions in last time step of each subperiod
+ # We use a modified formulation of this constraint (cVSoCBalLongDurationStorageStart) when operations wrapping and long duration storage are being modeled
+ @constraint(EP,
+ cVSoCBalStart[t in START_SUBPERIODS, y in CONSTRAINTSET],
+ EP[:vCAPRES_socinreserve][y,
+ t]==
+ EP[:vCAPRES_socinreserve][y, t + hours_per_subperiod - 1] +
+ (1 / efficiency_down(gen[y]) * EP[:vCAPRES_discharge][y, t])
+ -
+ (efficiency_up(gen[y]) * EP[:vCAPRES_charge][y, t]) - (self_discharge(gen[y]) *
+ EP[:vCAPRES_socinreserve][y, t + hours_per_subperiod - 1]))
+
+ # energy held in reserve for the next hour
+ @constraint(EP,
+ cVSoCBalInterior[t in INTERIOR_SUBPERIODS, y in STOR_ALL],
+ EP[:vCAPRES_socinreserve][y,
+ t]==
+ EP[:vCAPRES_socinreserve][y, t - 1] +
+ (1 / efficiency_down(gen[y]) * EP[:vCAPRES_discharge][y, t]) -
+ (efficiency_up(gen[y]) * EP[:vCAPRES_charge][y, t]) -
+ (self_discharge(gen[y]) * EP[:vCAPRES_socinreserve][y, t - 1]))
+
+ # energy held in reserve acts as a lower bound on the total energy held in storage
+ @constraint(EP,
+ cSOCMinCapRes[t in 1:T, y in STOR_ALL],
+ EP[:vS][y, t]>=EP[:vCAPRES_socinreserve][y, t])
+ end
end
function storage_all_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
-
gen = inputs["RESOURCES"]
T = inputs["T"]
p = inputs["hours_per_subperiod"]
@@ -176,27 +224,35 @@ function storage_all_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
eTotalCap = EP[:eTotalCap]
eTotalCapEnergy = EP[:eTotalCapEnergy]
- # Maximum storage contribution to reserves is a specified fraction of installed capacity
- @constraint(EP, [y in STOR_REG, t in 1:T], vREG[y, t] <= reg_max(gen[y]) * eTotalCap[y])
- @constraint(EP, [y in STOR_RSV, t in 1:T], vRSV[y, t] <= rsv_max(gen[y]) * eTotalCap[y])
+ # Maximum storage contribution to reserves is a specified fraction of installed capacity
+ @constraint(EP, [y in STOR_REG, t in 1:T], vREG[y, t]<=reg_max(gen[y]) * eTotalCap[y])
+ @constraint(EP, [y in STOR_RSV, t in 1:T], vRSV[y, t]<=rsv_max(gen[y]) * eTotalCap[y])
- # Actual contribution to regulation and reserves is sum of auxilary variables for portions contributed during charging and discharging
- @constraint(EP, [y in STOR_REG, t in 1:T], vREG[y, t] == vREG_charge[y, t] + vREG_discharge[y, t])
- @constraint(EP, [y in STOR_RSV, t in 1:T], vRSV[y, t] == vRSV_charge[y, t] + vRSV_discharge[y, t])
+ # Actual contribution to regulation and reserves is sum of auxilary variables for portions contributed during charging and discharging
+ @constraint(EP,
+ [y in STOR_REG, t in 1:T],
+ vREG[y, t]==vREG_charge[y, t] + vREG_discharge[y, t])
+ @constraint(EP,
+ [y in STOR_RSV, t in 1:T],
+ vRSV[y, t]==vRSV_charge[y, t] + vRSV_discharge[y, t])
# Maximum charging rate plus contribution to reserves up must be greater than zero
# Note: when charging, reducing charge rate is contributing to upwards reserve & regulation as it drops net demand
expr = extract_time_series_to_expression(vCHARGE, STOR_ALL)
add_similar_to_expression!(expr[STOR_REG, :], -vREG_charge[STOR_REG, :])
add_similar_to_expression!(expr[STOR_RSV, :], -vRSV_charge[STOR_RSV, :])
- @constraint(EP, [y in STOR_ALL, t in 1:T], expr[y, t] >= 0)
+ @constraint(EP, [y in STOR_ALL, t in 1:T], expr[y, t]>=0)
# Maximum discharging rate and contribution to reserves down must be greater than zero
# Note: when discharging, reducing discharge rate is contributing to downwards regulation as it drops net supply
- @constraint(EP, [y in STOR_REG, t in 1:T], vP[y, t] - vREG_discharge[y, t] >= 0)
+ @constraint(EP, [y in STOR_REG, t in 1:T], vP[y, t] - vREG_discharge[y, t]>=0)
# Maximum charging rate plus contribution to regulation down must be less than available storage capacity
- @constraint(EP, [y in STOR_REG, t in 1:T], efficiency_up(gen[y])*(vCHARGE[y, t]+vREG_charge[y, t]) <= eTotalCapEnergy[y]-vS[y, hoursbefore(p,t,1)])
+ @constraint(EP,
+ [y in STOR_REG, t in 1:T],
+ efficiency_up(gen[y]) *
+ (vCHARGE[y, t] +
+ vREG_charge[y, t])<=eTotalCapEnergy[y] - vS[y, hoursbefore(p, t, 1)])
# Note: maximum charge rate is also constrained by maximum charge power capacity, but as this differs by storage type,
# this constraint is set in functions below for each storage type
@@ -208,7 +264,9 @@ function storage_all_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
add_similar_to_expression!(expr[STOR_ALL, :], vCAPRES_discharge[STOR_ALL, :])
end
# Maximum discharging rate and contribution to reserves up must be less than power rating
- @constraint(EP, [y in STOR_ALL, t in 1:T], expr[y, t] <= eTotalCap[y])
+ @constraint(EP, [y in STOR_ALL, t in 1:T], expr[y, t]<=eTotalCap[y])
# Maximum discharging rate and contribution to reserves up must be less than available stored energy in prior period
- @constraint(EP, [y in STOR_ALL, t in 1:T], expr[y, t] <= vS[y, hoursbefore(p,t,1)] * efficiency_down(gen[y]))
+ @constraint(EP,
+ [y in STOR_ALL, t in 1:T],
+ expr[y, t]<=vS[y, hoursbefore(p, t, 1)] * efficiency_down(gen[y]))
end
diff --git a/src/model/resources/storage/storage_asymmetric.jl b/src/model/resources/storage/storage_asymmetric.jl
index f77fe0fa23..8554d129e8 100644
--- a/src/model/resources/storage/storage_asymmetric.jl
+++ b/src/model/resources/storage/storage_asymmetric.jl
@@ -4,34 +4,37 @@
Sets up variables and constraints specific to storage resources with asymmetric charge and discharge capacities. See ```storage()``` in ```storage.jl``` for description of constraints.
"""
function storage_asymmetric!(EP::Model, inputs::Dict, setup::Dict)
- # Set up additional variables, constraints, and expressions associated with storage resources with asymmetric charge & discharge capacity
- # (e.g. most chemical, thermal, and mechanical storage options with distinct charge & discharge components/processes)
- # STOR = 2 corresponds to storage with distinct power and energy capacity decisions and distinct charge and discharge power capacity decisions/ratings
+ # Set up additional variables, constraints, and expressions associated with storage resources with asymmetric charge & discharge capacity
+ # (e.g. most chemical, thermal, and mechanical storage options with distinct charge & discharge components/processes)
+ # STOR = 2 corresponds to storage with distinct power and energy capacity decisions and distinct charge and discharge power capacity decisions/ratings
- println("Storage Resources with Asmymetric Charge/Discharge Capacity Module")
+ println("Storage Resources with Asmymetric Charge/Discharge Capacity Module")
- OperationalReserves = setup["OperationalReserves"]
- CapacityReserveMargin = setup["CapacityReserveMargin"]
+ OperationalReserves = setup["OperationalReserves"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
- T = inputs["T"] # Number of time steps (hours)
+ T = inputs["T"] # Number of time steps (hours)
- STOR_ASYMMETRIC = inputs["STOR_ASYMMETRIC"]
+ STOR_ASYMMETRIC = inputs["STOR_ASYMMETRIC"]
- ### Constraints ###
-
- # Storage discharge and charge power (and reserve contribution) related constraints for symmetric storage resources:
- if OperationalReserves == 1
- storage_asymmetric_operational_reserves!(EP, inputs, setup)
- else
- if CapacityReserveMargin > 0
- # Maximum charging rate (including virtual charging to move energy held in reserve back to available storage) must be less than charge power rating
- @constraint(EP, [y in STOR_ASYMMETRIC, t in 1:T], EP[:vCHARGE][y,t] + EP[:vCAPRES_charge][y,t] <= EP[:eTotalCapCharge][y])
- else
- # Maximum charging rate (including virtual charging to move energy held in reserve back to available storage) must be less than charge power rating
- @constraint(EP, [y in STOR_ASYMMETRIC, t in 1:T], EP[:vCHARGE][y,t] <= EP[:eTotalCapCharge][y])
- end
- end
+ ### Constraints ###
+ # Storage discharge and charge power (and reserve contribution) related constraints for symmetric storage resources:
+ if OperationalReserves == 1
+ storage_asymmetric_operational_reserves!(EP, inputs, setup)
+ else
+ if CapacityReserveMargin > 0
+ # Maximum charging rate (including virtual charging to move energy held in reserve back to available storage) must be less than charge power rating
+ @constraint(EP,
+ [y in STOR_ASYMMETRIC, t in 1:T],
+ EP[:vCHARGE][y, t] + EP[:vCAPRES_charge][y, t]<=EP[:eTotalCapCharge][y])
+ else
+ # Maximum charging rate (including virtual charging to move energy held in reserve back to available storage) must be less than charge power rating
+ @constraint(EP,
+ [y in STOR_ASYMMETRIC, t in 1:T],
+ EP[:vCHARGE][y, t]<=EP[:eTotalCapCharge][y])
+ end
+ end
end
@doc raw"""
@@ -40,12 +43,11 @@ end
Sets up variables and constraints specific to storage resources with asymmetric charge and discharge capacities when reserves are modeled. See ```storage()``` in ```storage.jl``` for description of constraints.
"""
function storage_asymmetric_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
+ T = inputs["T"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"] > 0
- T = inputs["T"]
- CapacityReserveMargin = setup["CapacityReserveMargin"] > 0
-
- STOR_ASYMMETRIC = inputs["STOR_ASYMMETRIC"]
- STOR_ASYM_REG = intersect(STOR_ASYMMETRIC, inputs["REG"]) # Set of asymmetric storage resources with REG reserves
+ STOR_ASYMMETRIC = inputs["STOR_ASYMMETRIC"]
+ STOR_ASYM_REG = intersect(STOR_ASYMMETRIC, inputs["REG"]) # Set of asymmetric storage resources with REG reserves
vCHARGE = EP[:vCHARGE]
vREG_charge = EP[:vREG_charge]
@@ -55,7 +57,8 @@ function storage_asymmetric_operational_reserves!(EP::Model, inputs::Dict, setup
add_similar_to_expression!(expr[STOR_ASYM_REG, :], vREG_charge[STOR_ASYM_REG, :])
if CapacityReserveMargin
vCAPRES_charge = EP[:vCAPRES_charge]
- add_similar_to_expression!(expr[STOR_ASYMMETRIC, :], vCAPRES_charge[STOR_ASYMMETRIC, :])
+ add_similar_to_expression!(expr[STOR_ASYMMETRIC, :],
+ vCAPRES_charge[STOR_ASYMMETRIC, :])
end
- @constraint(EP, [y in STOR_ASYMMETRIC, t in 1:T], expr[y, t] <= eTotalCapCharge[y])
+ @constraint(EP, [y in STOR_ASYMMETRIC, t in 1:T], expr[y, t]<=eTotalCapCharge[y])
end
diff --git a/src/model/resources/storage/storage_symmetric.jl b/src/model/resources/storage/storage_symmetric.jl
index 3ac73f2ed2..3c20d2368b 100644
--- a/src/model/resources/storage/storage_symmetric.jl
+++ b/src/model/resources/storage/storage_symmetric.jl
@@ -4,40 +4,44 @@
Sets up variables and constraints specific to storage resources with symmetric charge and discharge capacities. See ```storage()``` in ```storage.jl``` for description of constraints.
"""
function storage_symmetric!(EP::Model, inputs::Dict, setup::Dict)
- # Set up additional variables, constraints, and expressions associated with storage resources with symmetric charge & discharge capacity
- # (e.g. most electrochemical batteries that use same components for charge & discharge)
- # STOR = 1 corresponds to storage with distinct power and energy capacity decisions but symmetric charge/discharge power ratings
+ # Set up additional variables, constraints, and expressions associated with storage resources with symmetric charge & discharge capacity
+ # (e.g. most electrochemical batteries that use same components for charge & discharge)
+ # STOR = 1 corresponds to storage with distinct power and energy capacity decisions but symmetric charge/discharge power ratings
- println("Storage Resources with Symmetric Charge/Discharge Capacity Module")
+ println("Storage Resources with Symmetric Charge/Discharge Capacity Module")
- OperationalReserves = setup["OperationalReserves"]
- CapacityReserveMargin = setup["CapacityReserveMargin"]
+ OperationalReserves = setup["OperationalReserves"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
- T = inputs["T"] # Number of time steps (hours)
+ T = inputs["T"] # Number of time steps (hours)
- STOR_SYMMETRIC = inputs["STOR_SYMMETRIC"]
+ STOR_SYMMETRIC = inputs["STOR_SYMMETRIC"]
- ### Constraints ###
-
- # Storage discharge and charge power (and reserve contribution) related constraints for symmetric storage resources:
- if OperationalReserves == 1
- storage_symmetric_operational_reserves!(EP, inputs, setup)
- else
- if CapacityReserveMargin > 0
- @constraints(EP, begin
- # Maximum charging rate (including virtual charging to move energy held in reserve back to available storage) must be less than symmetric power rating
- # Max simultaneous charge and discharge cannot be greater than capacity
- [y in STOR_SYMMETRIC, t in 1:T], EP[:vP][y,t]+EP[:vCHARGE][y,t]+EP[:vCAPRES_discharge][y,t]+EP[:vCAPRES_charge][y,t] <= EP[:eTotalCap][y]
- end)
- else
- @constraints(EP, begin
- # Maximum charging rate (including virtual charging to move energy held in reserve back to available storage) must be less than symmetric power rating
- # Max simultaneous charge and discharge cannot be greater than capacity
- [y in STOR_SYMMETRIC, t in 1:T], EP[:vP][y,t]+EP[:vCHARGE][y,t] <= EP[:eTotalCap][y]
- end)
- end
- end
+ ### Constraints ###
+ # Storage discharge and charge power (and reserve contribution) related constraints for symmetric storage resources:
+ if OperationalReserves == 1
+ storage_symmetric_operational_reserves!(EP, inputs, setup)
+ else
+ if CapacityReserveMargin > 0
+ @constraints(EP,
+ begin
+ # Maximum charging rate (including virtual charging to move energy held in reserve back to available storage) must be less than symmetric power rating
+ # Max simultaneous charge and discharge cannot be greater than capacity
+ [y in STOR_SYMMETRIC, t in 1:T],
+ EP[:vP][y, t] + EP[:vCHARGE][y, t] + EP[:vCAPRES_discharge][y, t] +
+ EP[:vCAPRES_charge][y, t] <= EP[:eTotalCap][y]
+ end)
+ else
+ @constraints(EP,
+ begin
+ # Maximum charging rate (including virtual charging to move energy held in reserve back to available storage) must be less than symmetric power rating
+ # Max simultaneous charge and discharge cannot be greater than capacity
+ [y in STOR_SYMMETRIC, t in 1:T],
+ EP[:vP][y, t] + EP[:vCHARGE][y, t] <= EP[:eTotalCap][y]
+ end)
+ end
+ end
end
@doc raw"""
@@ -46,14 +50,13 @@ end
Sets up variables and constraints specific to storage resources with symmetric charge and discharge capacities when reserves are modeled. See ```storage()``` in ```storage.jl``` for description of constraints.
"""
function storage_symmetric_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
+ T = inputs["T"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"] > 0
- T = inputs["T"]
- CapacityReserveMargin = setup["CapacityReserveMargin"] > 0
-
- SYMMETRIC = inputs["STOR_SYMMETRIC"]
+ SYMMETRIC = inputs["STOR_SYMMETRIC"]
- REG = intersect(SYMMETRIC, inputs["REG"])
- RSV = intersect(SYMMETRIC, inputs["RSV"])
+ REG = intersect(SYMMETRIC, inputs["REG"])
+ RSV = intersect(SYMMETRIC, inputs["RSV"])
vP = EP[:vP]
vCHARGE = EP[:vCHARGE]
@@ -65,7 +68,7 @@ function storage_symmetric_operational_reserves!(EP::Model, inputs::Dict, setup:
# Maximum charging rate plus contribution to regulation down must be less than symmetric power rating
# Max simultaneous charge and discharge rates cannot be greater than symmetric charge/discharge capacity
- expr = @expression(EP, [y in SYMMETRIC, t in 1:T], vP[y, t] + vCHARGE[y, t])
+ expr = @expression(EP, [y in SYMMETRIC, t in 1:T], vP[y, t]+vCHARGE[y, t])
add_similar_to_expression!(expr[REG, :], vREG_charge[REG, :])
add_similar_to_expression!(expr[REG, :], vREG_discharge[REG, :])
add_similar_to_expression!(expr[RSV, :], vRSV_discharge[RSV, :])
@@ -75,5 +78,5 @@ function storage_symmetric_operational_reserves!(EP::Model, inputs::Dict, setup:
add_similar_to_expression!(expr[SYMMETRIC, :], vCAPRES_charge[SYMMETRIC, :])
add_similar_to_expression!(expr[SYMMETRIC, :], vCAPRES_discharge[SYMMETRIC, :])
end
- @constraint(EP, [y in SYMMETRIC, t in 1:T], expr[y, t] <= eTotalCap[y])
+ @constraint(EP, [y in SYMMETRIC, t in 1:T], expr[y, t]<=eTotalCap[y])
end
diff --git a/src/model/resources/thermal/thermal.jl b/src/model/resources/thermal/thermal.jl
index 894c2da2c0..ef5f9df385 100644
--- a/src/model/resources/thermal/thermal.jl
+++ b/src/model/resources/thermal/thermal.jl
@@ -4,46 +4,38 @@ The thermal module creates decision variables, expressions, and constraints rela
This module uses the following 'helper' functions in separate files: ```thermal_commit()``` for thermal resources subject to unit commitment decisions and constraints (if any) and ```thermal_no_commit()``` for thermal resources not subject to unit commitment (if any).
"""
function thermal!(EP::Model, inputs::Dict, setup::Dict)
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
- THERM_COMMIT = inputs["THERM_COMMIT"]
- THERM_NO_COMMIT = inputs["THERM_NO_COMMIT"]
- THERM_ALL = inputs["THERM_ALL"]
+ THERM_COMMIT = inputs["THERM_COMMIT"]
+ THERM_NO_COMMIT = inputs["THERM_NO_COMMIT"]
+ THERM_ALL = inputs["THERM_ALL"]
- if !isempty(THERM_COMMIT)
- thermal_commit!(EP, inputs, setup)
- end
+ if !isempty(THERM_COMMIT)
+ thermal_commit!(EP, inputs, setup)
+ end
- if !isempty(THERM_NO_COMMIT)
- thermal_no_commit!(EP, inputs, setup)
- end
- ##CO2 Polcy Module Thermal Generation by zone
- @expression(EP, eGenerationByThermAll[z=1:Z, t=1:T], # the unit is GW
- sum(EP[:vP][y,t] for y in intersect(inputs["THERM_ALL"], resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:eGenerationByZone], eGenerationByThermAll)
+ if !isempty(THERM_NO_COMMIT)
+ thermal_no_commit!(EP, inputs, setup)
+ end
+ ##CO2 Polcy Module Thermal Generation by zone
+ @expression(EP, eGenerationByThermAll[z = 1:Z, t = 1:T], # the unit is GW
+ sum(EP[:vP][y, t]
+ for y in intersect(inputs["THERM_ALL"], resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:eGenerationByZone], eGenerationByThermAll)
- # Capacity Reserves Margin policy
- if setup["CapacityReserveMargin"] > 0
+ # Capacity Reserves Margin policy
+ if setup["CapacityReserveMargin"] > 0
ncapres = inputs["NCapacityReserveMargin"]
@expression(EP, eCapResMarBalanceThermal[capres in 1:ncapres, t in 1:T],
- sum(derating_factor(gen[y], tag=capres) * EP[:eTotalCap][y] for y in THERM_ALL))
- add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceThermal)
+ sum(derating_factor(gen[y], tag = capres) * EP[:eTotalCap][y] for y in THERM_ALL))
+ add_similar_to_expression!(EP[:eCapResMarBalance], eCapResMarBalanceThermal)
MAINT = ids_with_maintenance(gen)
if !isempty(intersect(MAINT, THERM_COMMIT))
thermal_maintenance_capacity_reserve_margin_adjustment!(EP, inputs)
end
- end
-#=
- ##CO2 Polcy Module Thermal Generation by zone
- @expression(EP, eGenerationByThermAll[z=1:Z, t=1:T], # the unit is GW
- sum(EP[:vP][y,t] for y in intersect(inputs["THERM_ALL"], resources_in_zone_by_rid(gen,z)))
- )
- EP[:eGenerationByZone] += eGenerationByThermAll
- =# ##From main
+ end
end
-
diff --git a/src/model/resources/thermal/thermal_commit.jl b/src/model/resources/thermal/thermal_commit.jl
index 84e3e7020d..347bdfbed5 100644
--- a/src/model/resources/thermal/thermal_commit.jl
+++ b/src/model/resources/thermal/thermal_commit.jl
@@ -125,20 +125,19 @@ Like with the ramping constraints, the minimum up and down constraint time also
It is recommended that users of GenX must use longer subperiods than the longest min up/down time if modeling UC. Otherwise, the model will report error.
"""
function thermal_commit!(EP::Model, inputs::Dict, setup::Dict)
+ println("Thermal (Unit Commitment) Resources Module")
- println("Thermal (Unit Commitment) Resources Module")
-
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- p = inputs["hours_per_subperiod"] #total number of hours per subperiod
+ p = inputs["hours_per_subperiod"] #total number of hours per subperiod
- THERM_COMMIT = inputs["THERM_COMMIT"]
+ THERM_COMMIT = inputs["THERM_COMMIT"]
- ### Expressions ###
+ ### Expressions ###
# These variables are used in the ramp-up and ramp-down expressions
reserves_term = @expression(EP, [y in THERM_COMMIT, t in 1:T], 0)
@@ -148,76 +147,97 @@ function thermal_commit!(EP::Model, inputs::Dict, setup::Dict)
THERM_COMMIT_REG = intersect(THERM_COMMIT, inputs["REG"]) # Set of thermal resources with regulation reserves
THERM_COMMIT_RSV = intersect(THERM_COMMIT, inputs["RSV"]) # Set of thermal resources with spinning reserves
regulation_term = @expression(EP, [y in THERM_COMMIT, t in 1:T],
- y ∈ THERM_COMMIT_REG ? EP[:vREG][y,t] - EP[:vREG][y, hoursbefore(p, t, 1)] : 0)
+ y ∈ THERM_COMMIT_REG ? EP[:vREG][y, t] - EP[:vREG][y, hoursbefore(p, t, 1)] : 0)
reserves_term = @expression(EP, [y in THERM_COMMIT, t in 1:T],
- y ∈ THERM_COMMIT_RSV ? EP[:vRSV][y,t] : 0)
+ y ∈ THERM_COMMIT_RSV ? EP[:vRSV][y, t] : 0)
+ end
+
+ ## Power Balance Expressions ##
+ @expression(EP, ePowerBalanceThermCommit[t = 1:T, z = 1:Z],
+ sum(EP[:vP][y, t] for y in intersect(THERM_COMMIT, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceThermCommit)
+
+ ### Constraints ###
+
+ ### Capacitated limits on unit commitment decision variables (Constraints #1-3)
+ @constraints(EP,
+ begin
+ [y in THERM_COMMIT, t = 1:T],
+ EP[:vCOMMIT][y, t] <= EP[:eTotalCap][y] / cap_size(gen[y])
+ [y in THERM_COMMIT, t = 1:T],
+ EP[:vSTART][y, t] <= EP[:eTotalCap][y] / cap_size(gen[y])
+ [y in THERM_COMMIT, t = 1:T],
+ EP[:vSHUT][y, t] <= EP[:eTotalCap][y] / cap_size(gen[y])
+ end)
+
+ # Commitment state constraint linking startup and shutdown decisions (Constraint #4)
+ @constraints(EP,
+ begin
+ [y in THERM_COMMIT, t in 1:T],
+ EP[:vCOMMIT][y, t] ==
+ EP[:vCOMMIT][y, hoursbefore(p, t, 1)] + EP[:vSTART][y, t] - EP[:vSHUT][y, t]
+ end)
+
+ ### Maximum ramp up and down between consecutive hours (Constraints #5-6)
+
+ ## For Start Hours
+ # Links last time step with first time step, ensuring position in hour 1 is within eligible ramp of final hour position
+ # rampup constraints
+ @constraint(EP, [y in THERM_COMMIT, t in 1:T],
+ EP[:vP][y, t] - EP[:vP][y, hoursbefore(p, t, 1)] + regulation_term[y, t] +
+ reserves_term[y, t]<=ramp_up_fraction(gen[y]) * cap_size(gen[y]) *
+ (EP[:vCOMMIT][y, t] - EP[:vSTART][y, t])
+ +
+ min(inputs["pP_Max"][y, t],
+ max(min_power(gen[y]), ramp_up_fraction(gen[y]))) * cap_size(gen[y]) * EP[:vSTART][y, t]
+ -
+ min_power(gen[y]) * cap_size(gen[y]) * EP[:vSHUT][y, t])
+
+ # rampdown constraints
+ @constraint(EP, [y in THERM_COMMIT, t in 1:T],
+ EP[:vP][y, hoursbefore(p, t, 1)] - EP[:vP][y, t] - regulation_term[y, t] +
+ reserves_term[y,
+ hoursbefore(p, t, 1)]<=ramp_down_fraction(gen[y]) * cap_size(gen[y]) *
+ (EP[:vCOMMIT][y, t] - EP[:vSTART][y, t])
+ -
+ min_power(gen[y]) * cap_size(gen[y]) * EP[:vSTART][y, t]
+ +
+ min(inputs["pP_Max"][y, t],
+ max(min_power(gen[y]), ramp_down_fraction(gen[y]))) * cap_size(gen[y]) * EP[:vSHUT][y, t])
+
+ ### Minimum and maximum power output constraints (Constraints #7-8)
+ if setup["OperationalReserves"] == 1
+ # If modeling with regulation and reserves, constraints are established by thermal_commit_operational_reserves() function below
+ thermal_commit_operational_reserves!(EP, inputs)
+ else
+ @constraints(EP,
+ begin
+ # Minimum stable power generated per technology "y" at hour "t" > Min power
+ [y in THERM_COMMIT, t = 1:T],
+ EP[:vP][y, t] >= min_power(gen[y]) * cap_size(gen[y]) * EP[:vCOMMIT][y, t]
+
+ # Maximum power generated per technology "y" at hour "t" < Max power
+ [y in THERM_COMMIT, t = 1:T],
+ EP[:vP][y, t] <=
+ inputs["pP_Max"][y, t] * cap_size(gen[y]) * EP[:vCOMMIT][y, t]
+ end)
end
- ## Power Balance Expressions ##
- @expression(EP, ePowerBalanceThermCommit[t=1:T, z=1:Z],
- sum(EP[:vP][y,t] for y in intersect(THERM_COMMIT, resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceThermCommit)
-
- ### Constraints ###
-
- ### Capacitated limits on unit commitment decision variables (Constraints #1-3)
- @constraints(EP, begin
- [y in THERM_COMMIT, t=1:T], EP[:vCOMMIT][y,t] <= EP[:eTotalCap][y]/cap_size(gen[y])
- [y in THERM_COMMIT, t=1:T], EP[:vSTART][y,t] <= EP[:eTotalCap][y]/cap_size(gen[y])
- [y in THERM_COMMIT, t=1:T], EP[:vSHUT][y,t] <= EP[:eTotalCap][y]/cap_size(gen[y])
- end)
-
- # Commitment state constraint linking startup and shutdown decisions (Constraint #4)
- @constraints(EP, begin
- [y in THERM_COMMIT, t in 1:T], EP[:vCOMMIT][y,t] == EP[:vCOMMIT][y, hoursbefore(p, t, 1)] + EP[:vSTART][y,t] - EP[:vSHUT][y,t]
- end)
-
- ### Maximum ramp up and down between consecutive hours (Constraints #5-6)
-
- ## For Start Hours
- # Links last time step with first time step, ensuring position in hour 1 is within eligible ramp of final hour position
- # rampup constraints
- @constraint(EP,[y in THERM_COMMIT, t in 1:T],
- EP[:vP][y,t] - EP[:vP][y, hoursbefore(p, t, 1)] + regulation_term[y,t] + reserves_term[y,t] <= ramp_up_fraction(gen[y])*cap_size(gen[y])*(EP[:vCOMMIT][y,t]-EP[:vSTART][y,t])
- + min(inputs["pP_Max"][y,t],max(min_power(gen[y]),ramp_up_fraction(gen[y])))*cap_size(gen[y])*EP[:vSTART][y,t]
- - min_power(gen[y])*cap_size(gen[y])*EP[:vSHUT][y,t])
-
- # rampdown constraints
- @constraint(EP,[y in THERM_COMMIT, t in 1:T],
- EP[:vP][y, hoursbefore(p,t,1)] - EP[:vP][y,t] - regulation_term[y,t] + reserves_term[y, hoursbefore(p,t,1)] <= ramp_down_fraction(gen[y])*cap_size(gen[y])*(EP[:vCOMMIT][y,t]-EP[:vSTART][y,t])
- - min_power(gen[y])*cap_size(gen[y])*EP[:vSTART][y,t]
- + min(inputs["pP_Max"][y,t],max(min_power(gen[y]),ramp_down_fraction(gen[y])))*cap_size(gen[y])*EP[:vSHUT][y,t])
-
-
- ### Minimum and maximum power output constraints (Constraints #7-8)
- if setup["OperationalReserves"] == 1
- # If modeling with regulation and reserves, constraints are established by thermal_commit_operational_reserves() function below
- thermal_commit_operational_reserves!(EP, inputs)
- else
- @constraints(EP, begin
- # Minimum stable power generated per technology "y" at hour "t" > Min power
- [y in THERM_COMMIT, t=1:T], EP[:vP][y,t] >= min_power(gen[y])*cap_size(gen[y])*EP[:vCOMMIT][y,t]
-
- # Maximum power generated per technology "y" at hour "t" < Max power
- [y in THERM_COMMIT, t=1:T], EP[:vP][y,t] <= inputs["pP_Max"][y,t]*cap_size(gen[y])*EP[:vCOMMIT][y,t]
- end)
- end
-
- ### Minimum up and down times (Constraints #9-10)
- Up_Time = zeros(Int, G)
- Up_Time[THERM_COMMIT] .= Int.(floor.(up_time.(gen[THERM_COMMIT])))
- @constraint(EP, [y in THERM_COMMIT, t in 1:T],
- EP[:vCOMMIT][y,t] >= sum(EP[:vSTART][y, u] for u in hoursbefore(p, t, 0:(Up_Time[y] - 1)))
- )
-
- Down_Time = zeros(Int, G)
- Down_Time[THERM_COMMIT] .= Int.(floor.(down_time.(gen[THERM_COMMIT])))
- @constraint(EP, [y in THERM_COMMIT, t in 1:T],
- EP[:eTotalCap][y]/cap_size(gen[y])-EP[:vCOMMIT][y,t] >= sum(EP[:vSHUT][y, u] for u in hoursbefore(p, t, 0:(Down_Time[y] - 1)))
- )
-
- ## END Constraints for thermal units subject to integer (discrete) unit commitment decisions
+ ### Minimum up and down times (Constraints #9-10)
+ Up_Time = zeros(Int, G)
+ Up_Time[THERM_COMMIT] .= Int.(floor.(up_time.(gen[THERM_COMMIT])))
+ @constraint(EP, [y in THERM_COMMIT, t in 1:T],
+ EP[:vCOMMIT][y,
+ t]>=sum(EP[:vSTART][y, u] for u in hoursbefore(p, t, 0:(Up_Time[y] - 1))))
+
+ Down_Time = zeros(Int, G)
+ Down_Time[THERM_COMMIT] .= Int.(floor.(down_time.(gen[THERM_COMMIT])))
+ @constraint(EP, [y in THERM_COMMIT, t in 1:T],
+ EP[:eTotalCap][y] / cap_size(gen[y]) -
+ EP[:vCOMMIT][y,
+ t]>=sum(EP[:vSHUT][y, u] for u in hoursbefore(p, t, 0:(Down_Time[y] - 1))))
+
+ ## END Constraints for thermal units subject to integer (discrete) unit commitment decisions
if !isempty(ids_with_maintenance(gen))
maintenance_formulation_thermal_commit!(EP, inputs, setup)
end
@@ -266,39 +286,46 @@ When modeling frequency regulation and spinning reserves contributions, thermal
"""
function thermal_commit_operational_reserves!(EP::Model, inputs::Dict)
+ println("Thermal Commit Operational Reserves Module")
- println("Thermal Commit Operational Reserves Module")
-
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
- T = inputs["T"] # Number of time steps (hours)
+ T = inputs["T"] # Number of time steps (hours)
- THERM_COMMIT = inputs["THERM_COMMIT"]
+ THERM_COMMIT = inputs["THERM_COMMIT"]
- REG = intersect(THERM_COMMIT, inputs["REG"]) # Set of thermal resources with regulation reserves
- RSV = intersect(THERM_COMMIT, inputs["RSV"]) # Set of thermal resources with spinning reserves
+ REG = intersect(THERM_COMMIT, inputs["REG"]) # Set of thermal resources with regulation reserves
+ RSV = intersect(THERM_COMMIT, inputs["RSV"]) # Set of thermal resources with spinning reserves
vP = EP[:vP]
vREG = EP[:vREG]
vRSV = EP[:vRSV]
- commit(y,t) = cap_size(gen[y]) * EP[:vCOMMIT][y,t]
- max_power(y,t) = inputs["pP_Max"][y,t]
+ commit(y, t) = cap_size(gen[y]) * EP[:vCOMMIT][y, t]
+ max_power(y, t) = inputs["pP_Max"][y, t]
# Maximum regulation and reserve contributions
- @constraint(EP, [y in REG, t in 1:T], vREG[y, t] <= max_power(y, t) * reg_max(gen[y]) * commit(y, t))
- @constraint(EP, [y in RSV, t in 1:T], vRSV[y, t] <= max_power(y, t) * rsv_max(gen[y]) * commit(y, t))
+ @constraint(EP,
+ [y in REG, t in 1:T],
+ vREG[y, t]<=max_power(y, t) * reg_max(gen[y]) * commit(y, t))
+ @constraint(EP,
+ [y in RSV, t in 1:T],
+ vRSV[y, t]<=max_power(y, t) * rsv_max(gen[y]) * commit(y, t))
# Minimum stable power generated per technology "y" at hour "t" and contribution to regulation must be > min power
expr = extract_time_series_to_expression(vP, THERM_COMMIT)
add_similar_to_expression!(expr[REG, :], -vREG[REG, :])
- @constraint(EP, [y in THERM_COMMIT, t in 1:T], expr[y, t] >= min_power(gen[y]) * commit(y, t))
+ @constraint(EP,
+ [y in THERM_COMMIT, t in 1:T],
+ expr[y, t]>=min_power(gen[y]) * commit(y, t))
# Maximum power generated per technology "y" at hour "t" and contribution to regulation and reserves up must be < max power
expr = extract_time_series_to_expression(vP, THERM_COMMIT)
add_similar_to_expression!(expr[REG, :], vREG[REG, :])
add_similar_to_expression!(expr[RSV, :], vRSV[RSV, :])
- @constraint(EP, [y in THERM_COMMIT, t in 1:T], expr[y, t] <= max_power(y, t) * commit(y, t))
+ @constraint(EP,
+ [y in THERM_COMMIT, t in 1:T],
+ expr[y, t]<=max_power(y, t) * commit(y, t))
end
@doc raw"""
@@ -307,12 +334,11 @@ end
Creates maintenance variables and constraints for thermal-commit plants.
"""
function maintenance_formulation_thermal_commit!(EP::Model, inputs::Dict, setup::Dict)
-
@info "Maintenance Module for Thermal plants"
ensure_maintenance_variable_records!(inputs)
gen = inputs["RESOURCES"]
-
+
by_rid(rid, sym) = by_rid_res(rid, sym, gen)
MAINT = ids_with_maintenance(gen)
@@ -331,16 +357,16 @@ function maintenance_formulation_thermal_commit!(EP::Model, inputs::Dict, setup:
for y in MAINT
maintenance_formulation!(EP,
- inputs,
- resource_component(y),
- y,
- maint_begin_cadence(y),
- maint_dur(y),
- maint_freq(y),
- cap(y),
- vcommit,
- ecap,
- integer_operational_unit_committment)
+ inputs,
+ resource_component(y),
+ y,
+ maint_begin_cadence(y),
+ maint_dur(y),
+ maint_freq(y),
+ cap(y),
+ vcommit,
+ ecap,
+ integer_operational_unit_committment)
end
end
@@ -350,7 +376,7 @@ end
Eliminates the contribution of a plant to the capacity reserve margin while it is down for maintenance.
"""
function thermal_maintenance_capacity_reserve_margin_adjustment!(EP::Model,
- inputs::Dict)
+ inputs::Dict)
gen = inputs["RESOURCES"]
T = inputs["T"] # Number of time steps (hours)
@@ -360,18 +386,22 @@ function thermal_maintenance_capacity_reserve_margin_adjustment!(EP::Model,
applicable_resources = intersect(MAINT, THERM_COMMIT)
maint_adj = @expression(EP, [capres in 1:ncapres, t in 1:T],
- sum(thermal_maintenance_capacity_reserve_margin_adjustment(EP, inputs, y, capres, t) for y in applicable_resources))
+ sum(thermal_maintenance_capacity_reserve_margin_adjustment(EP,
+ inputs,
+ y,
+ capres,
+ t) for y in applicable_resources))
add_similar_to_expression!(EP[:eCapResMarBalance], maint_adj)
end
function thermal_maintenance_capacity_reserve_margin_adjustment(EP::Model,
- inputs::Dict,
- y::Int,
- capres::Int,
- t)
+ inputs::Dict,
+ y::Int,
+ capres::Int,
+ t)
gen = inputs["RESOURCES"]
resource_component = resource_name(gen[y])
- capresfactor = derating_factor(gen[y], tag=capres)
+ capresfactor = derating_factor(gen[y], tag = capres)
cap = cap_size(gen[y])
down_var = EP[Symbol(maintenance_down_name(resource_component))]
return -capresfactor * down_var[t] * cap
diff --git a/src/model/resources/thermal/thermal_no_commit.jl b/src/model/resources/thermal/thermal_no_commit.jl
index 975a8c67f3..1a75eb0980 100644
--- a/src/model/resources/thermal/thermal_no_commit.jl
+++ b/src/model/resources/thermal/thermal_no_commit.jl
@@ -42,53 +42,59 @@ When not modeling regulation and reserves, thermal units not subject to unit com
(See Constraints 3-4 in the code)
"""
function thermal_no_commit!(EP::Model, inputs::Dict, setup::Dict)
+ println("Thermal (No Unit Commitment) Resources Module")
- println("Thermal (No Unit Commitment) Resources Module")
+ gen = inputs["RESOURCES"]
- gen = inputs["RESOURCES"]
-
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
-
- p = inputs["hours_per_subperiod"] #total number of hours per subperiod
-
- THERM_NO_COMMIT = inputs["THERM_NO_COMMIT"]
-
- ### Expressions ###
-
- ## Power Balance Expressions ##
- @expression(EP, ePowerBalanceThermNoCommit[t=1:T, z=1:Z],
- sum(EP[:vP][y,t] for y in intersect(THERM_NO_COMMIT, resources_in_zone_by_rid(gen,z)))
- )
- add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceThermNoCommit)
-
- ### Constraints ###
-
- ### Maximum ramp up and down between consecutive hours (Constraints #1-2)
- @constraints(EP, begin
-
- ## Maximum ramp up between consecutive hours
- [y in THERM_NO_COMMIT, t in 1:T], EP[:vP][y,t] - EP[:vP][y, hoursbefore(p,t,1)] <= ramp_up_fraction(gen[y])*EP[:eTotalCap][y]
-
- ## Maximum ramp down between consecutive hours
- [y in THERM_NO_COMMIT, t in 1:T], EP[:vP][y, hoursbefore(p,t,1)] - EP[:vP][y,t] <= ramp_down_fraction(gen[y])*EP[:eTotalCap][y]
- end)
-
- ### Minimum and maximum power output constraints (Constraints #3-4)
- if setup["OperationalReserves"] == 1
- # If modeling with regulation and reserves, constraints are established by thermal_no_commit_operational_reserves() function below
- thermal_no_commit_operational_reserves!(EP, inputs)
- else
- @constraints(EP, begin
- # Minimum stable power generated per technology "y" at hour "t" Min_Power
- [y in THERM_NO_COMMIT, t=1:T], EP[:vP][y,t] >= min_power(gen[y])*EP[:eTotalCap][y]
-
- # Maximum power generated per technology "y" at hour "t"
- [y in THERM_NO_COMMIT, t=1:T], EP[:vP][y,t] <= inputs["pP_Max"][y,t]*EP[:eTotalCap][y]
- end)
-
- end
- # END Constraints for thermal resources not subject to unit commitment
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+
+ p = inputs["hours_per_subperiod"] #total number of hours per subperiod
+
+ THERM_NO_COMMIT = inputs["THERM_NO_COMMIT"]
+
+ ### Expressions ###
+
+ ## Power Balance Expressions ##
+ @expression(EP, ePowerBalanceThermNoCommit[t = 1:T, z = 1:Z],
+ sum(EP[:vP][y, t]
+ for y in intersect(THERM_NO_COMMIT, resources_in_zone_by_rid(gen, z))))
+ add_similar_to_expression!(EP[:ePowerBalance], ePowerBalanceThermNoCommit)
+
+ ### Constraints ###
+
+ ### Maximum ramp up and down between consecutive hours (Constraints #1-2)
+ @constraints(EP,
+ begin
+
+ ## Maximum ramp up between consecutive hours
+ [y in THERM_NO_COMMIT, t in 1:T],
+ EP[:vP][y, t] - EP[:vP][y, hoursbefore(p, t, 1)] <=
+ ramp_up_fraction(gen[y]) * EP[:eTotalCap][y]
+
+ ## Maximum ramp down between consecutive hours
+ [y in THERM_NO_COMMIT, t in 1:T],
+ EP[:vP][y, hoursbefore(p, t, 1)] - EP[:vP][y, t] <=
+ ramp_down_fraction(gen[y]) * EP[:eTotalCap][y]
+ end)
+
+ ### Minimum and maximum power output constraints (Constraints #3-4)
+ if setup["OperationalReserves"] == 1
+ # If modeling with regulation and reserves, constraints are established by thermal_no_commit_operational_reserves() function below
+ thermal_no_commit_operational_reserves!(EP, inputs)
+ else
+ @constraints(EP,
+ begin
+ # Minimum stable power generated per technology "y" at hour "t" Min_Power
+ [y in THERM_NO_COMMIT, t = 1:T],
+ EP[:vP][y, t] >= min_power(gen[y]) * EP[:eTotalCap][y]
+
+ # Maximum power generated per technology "y" at hour "t"
+ [y in THERM_NO_COMMIT, t = 1:T],
+ EP[:vP][y, t] <= inputs["pP_Max"][y, t] * EP[:eTotalCap][y]
+ end)
+ end
+ # END Constraints for thermal resources not subject to unit commitment
end
@doc raw"""
@@ -135,10 +141,9 @@ When modeling regulation and spinning reserves, thermal units not subject to uni
Note there are multiple versions of these constraints in the code in order to avoid creation of unecessary constraints and decision variables for thermal units unable to provide regulation and/or reserves contributions due to input parameters (e.g. ```Reg_Max=0``` and/or ```RSV_Max=0```).
"""
function thermal_no_commit_operational_reserves!(EP::Model, inputs::Dict)
+ println("Thermal No Commit Reserves Module")
- println("Thermal No Commit Reserves Module")
-
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
T = inputs["T"] # Number of time steps (hours)
@@ -152,20 +157,28 @@ function thermal_no_commit_operational_reserves!(EP::Model, inputs::Dict)
vRSV = EP[:vRSV]
eTotalCap = EP[:eTotalCap]
- max_power(y,t) = inputs["pP_Max"][y,t]
+ max_power(y, t) = inputs["pP_Max"][y, t]
# Maximum regulation and reserve contributions
- @constraint(EP, [y in REG, t in 1:T], vREG[y, t] <= max_power(y, t) * reg_max(gen[y]) * eTotalCap[y])
- @constraint(EP, [y in RSV, t in 1:T], vRSV[y, t] <= max_power(y, t) * rsv_max(gen[y]) * eTotalCap[y])
+ @constraint(EP,
+ [y in REG, t in 1:T],
+ vREG[y, t]<=max_power(y, t) * reg_max(gen[y]) * eTotalCap[y])
+ @constraint(EP,
+ [y in RSV, t in 1:T],
+ vRSV[y, t]<=max_power(y, t) * rsv_max(gen[y]) * eTotalCap[y])
# Minimum stable power generated per technology "y" at hour "t" and contribution to regulation must be > min power
expr = extract_time_series_to_expression(vP, THERM_NO_COMMIT)
add_similar_to_expression!(expr[REG, :], -vREG[REG, :])
- @constraint(EP, [y in THERM_NO_COMMIT, t in 1:T], expr[y, t] >= min_power(gen[y]) * eTotalCap[y])
+ @constraint(EP,
+ [y in THERM_NO_COMMIT, t in 1:T],
+ expr[y, t]>=min_power(gen[y]) * eTotalCap[y])
# Maximum power generated per technology "y" at hour "t" and contribution to regulation and reserves up must be < max power
expr = extract_time_series_to_expression(vP, THERM_NO_COMMIT)
add_similar_to_expression!(expr[REG, :], vREG[REG, :])
add_similar_to_expression!(expr[RSV, :], vRSV[RSV, :])
- @constraint(EP, [y in THERM_NO_COMMIT, t in 1:T], expr[y, t] <= max_power(y, t) * eTotalCap[y])
+ @constraint(EP,
+ [y in THERM_NO_COMMIT, t in 1:T],
+ expr[y, t]<=max_power(y, t) * eTotalCap[y])
end
diff --git a/src/model/resources/vre_stor/vre_stor.jl b/src/model/resources/vre_stor/vre_stor.jl
index 7ad0a07dd1..911fdca66b 100644
--- a/src/model/resources/vre_stor/vre_stor.jl
+++ b/src/model/resources/vre_stor/vre_stor.jl
@@ -79,69 +79,70 @@ The second constraint with both capacity reserve margins and operating reserves
The rest of the constraints are dependent upon specific configurable components within the module and are listed below.
"""
function vre_stor!(EP::Model, inputs::Dict, setup::Dict)
-
- println("VRE-Storage Module")
+ println("VRE-Storage Module")
### LOAD DATA ###
# Load generators dataframe, sets, and time periods
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
# Load VRE-storage inputs
- VRE_STOR = inputs["VRE_STOR"] # Set of VRE-STOR generators (indices)
+ VRE_STOR = inputs["VRE_STOR"] # Set of VRE-STOR generators (indices)
gen_VRE_STOR = gen.VreStorage # Set of VRE-STOR generators (objects)
SOLAR = inputs["VS_SOLAR"] # Set of VRE-STOR generators with solar-component
DC = inputs["VS_DC"] # Set of VRE-STOR generators with inverter-component
WIND = inputs["VS_WIND"] # Set of VRE-STOR generators with wind-component
STOR = inputs["VS_STOR"] # Set of VRE-STOR generators with storage-component
NEW_CAP = intersect(VRE_STOR, inputs["NEW_CAP"]) # Set of VRE-STOR generators eligible for new buildout
-
+
# Policy flags
EnergyShareRequirement = setup["EnergyShareRequirement"]
- CapacityReserveMargin = setup["CapacityReserveMargin"]
+ CapacityReserveMargin = setup["CapacityReserveMargin"]
MinCapReq = setup["MinCapReq"]
MaxCapReq = setup["MaxCapReq"]
IncludeLossesInESR = setup["IncludeLossesInESR"]
OperationalReserves = setup["OperationalReserves"]
-
+
by_rid(rid, sym) = by_rid_res(rid, sym, gen_VRE_STOR)
### VARIABLES ARE DEFINED IN RESPECTIVE MODULES ###
-
- ### EXPRESSIONS ###
+
+ ### EXPRESSIONS ###
## 1. Objective Function Expressions ##
# Separate grid costs
@expression(EP, eCGrid[y in VRE_STOR],
if y in NEW_CAP # Resources eligible for new capacity
- inv_cost_per_mwyr(gen[y])*EP[:vCAP][y] + fixed_om_cost_per_mwyr(gen[y])*EP[:eTotalCap][y]
+ inv_cost_per_mwyr(gen[y]) * EP[:vCAP][y] +
+ fixed_om_cost_per_mwyr(gen[y]) * EP[:eTotalCap][y]
else
- fixed_om_cost_per_mwyr(gen[y])*EP[:eTotalCap][y]
- end
- )
+ fixed_om_cost_per_mwyr(gen[y]) * EP[:eTotalCap][y]
+ end)
@expression(EP, eTotalCGrid, sum(eCGrid[y] for y in VRE_STOR))
- ## 2. Power Balance Expressions ##
+ ## 2. Power Balance Expressions ##
# Note: The subtraction of the charging component can be found in STOR function
- @expression(EP, ePowerBalance_VRE_STOR[t=1:T, z=1:Z], JuMP.AffExpr())
- for t=1:T, z=1:Z
+ @expression(EP, ePowerBalance_VRE_STOR[t = 1:T, z = 1:Z], JuMP.AffExpr())
+ for t in 1:T, z in 1:Z
if !isempty(resources_in_zone_by_rid(gen_VRE_STOR, z))
- ePowerBalance_VRE_STOR[t,z] += sum(EP[:vP][y,t] for y=resources_in_zone_by_rid(gen_VRE_STOR, z))
+ ePowerBalance_VRE_STOR[t, z] += sum(EP[:vP][y, t]
+ for y in resources_in_zone_by_rid(gen_VRE_STOR,
+ z))
end
end
## 3. Module Expressions ##
# Inverter AC Balance
- @expression(EP, eInvACBalance[y in VRE_STOR, t=1:T], JuMP.AffExpr())
+ @expression(EP, eInvACBalance[y in VRE_STOR, t = 1:T], JuMP.AffExpr())
# Grid Exports
- @expression(EP, eGridExport[y in VRE_STOR, t=1:T], JuMP.AffExpr())
+ @expression(EP, eGridExport[y in VRE_STOR, t = 1:T], JuMP.AffExpr())
### COMPONENT MODULE CONSTRAINTS ###
@@ -169,87 +170,111 @@ function vre_stor!(EP::Model, inputs::Dict, setup::Dict)
# Energy Share Requirement
if EnergyShareRequirement >= 1
- @expression(EP, eESRVREStor[ESR=1:inputs["nESR"]],
- sum(inputs["omega"][t]*esr_vrestor(gen[y],tag=ESR)*EP[:vP_SOLAR][y,t]*by_rid(y,:etainverter)
- for y=intersect(SOLAR, ids_with_policy(gen, esr_vrestor, tag=ESR)), t=1:T)
- + sum(inputs["omega"][t]*esr_vrestor(gen[y],tag=ESR)*EP[:vP_WIND][y,t]
- for y=intersect(WIND, ids_with_policy(gen, esr_vrestor, tag=ESR)), t=1:T))
+ @expression(EP, eESRVREStor[ESR = 1:inputs["nESR"]],
+ sum(inputs["omega"][t] * esr_vrestor(gen[y], tag = ESR) * EP[:vP_SOLAR][y, t] *
+ by_rid(y, :etainverter)
+ for y in intersect(SOLAR, ids_with_policy(gen, esr_vrestor, tag = ESR)),
+ t in 1:T)
+ +sum(inputs["omega"][t] * esr_vrestor(gen[y], tag = ESR) * EP[:vP_WIND][y, t]
+ for y in intersect(WIND, ids_with_policy(gen, esr_vrestor, tag = ESR)),
+ t in 1:T))
EP[:eESR] += eESRVREStor
if IncludeLossesInESR == 1
- @expression(EP, eESRVREStorLosses[ESR=1:inputs["nESR"]],
- sum(inputs["dfESR"][z,ESR]*sum(EP[:eELOSS_VRE_STOR][y]
- for y=intersect(STOR, resources_in_zone_by_rid(gen_VRE_STOR, z))) for z=findall(x->x>0,inputs["dfESR"][:,ESR])))
+ @expression(EP, eESRVREStorLosses[ESR = 1:inputs["nESR"]],
+ sum(inputs["dfESR"][z, ESR] * sum(EP[:eELOSS_VRE_STOR][y]
+ for y in intersect(STOR, resources_in_zone_by_rid(gen_VRE_STOR, z)))
+ for z in findall(x -> x > 0, inputs["dfESR"][:, ESR])))
EP[:eESR] -= eESRVREStorLosses
end
end
# Minimum Capacity Requirement
if MinCapReq == 1
- @expression(EP, eMinCapResSolar[mincap = 1:inputs["NumberOfMinCapReqs"]],
- sum(by_rid(y,:etainverter)*EP[:eTotalCap_SOLAR][y] for y in intersect(SOLAR, ids_with_policy(gen_VRE_STOR, min_cap_solar, tag=mincap))))
- EP[:eMinCapRes] += eMinCapResSolar
+ @expression(EP, eMinCapResSolar[mincap = 1:inputs["NumberOfMinCapReqs"]],
+ sum(by_rid(y, :etainverter) * EP[:eTotalCap_SOLAR][y] for y in intersect(SOLAR,
+ ids_with_policy(gen_VRE_STOR, min_cap_solar, tag = mincap))))
+ EP[:eMinCapRes] += eMinCapResSolar
- @expression(EP, eMinCapResWind[mincap = 1:inputs["NumberOfMinCapReqs"]],
- sum(EP[:eTotalCap_WIND][y] for y in intersect(WIND, ids_with_policy(gen_VRE_STOR, min_cap_wind, tag=mincap))))
- EP[:eMinCapRes] += eMinCapResWind
+ @expression(EP, eMinCapResWind[mincap = 1:inputs["NumberOfMinCapReqs"]],
+ sum(EP[:eTotalCap_WIND][y] for y in intersect(WIND,
+ ids_with_policy(gen_VRE_STOR, min_cap_wind, tag = mincap))))
+ EP[:eMinCapRes] += eMinCapResWind
if !isempty(inputs["VS_ASYM_AC_DISCHARGE"])
- @expression(EP, eMinCapResACDis[mincap = 1:inputs["NumberOfMinCapReqs"]],
- sum(EP[:eTotalCapDischarge_AC][y] for y in intersect(inputs["VS_ASYM_AC_DISCHARGE"], ids_with_policy(gen_VRE_STOR, min_cap_stor, tag=mincap))))
- EP[:eMinCapRes] += eMinCapResACDis
+ @expression(EP, eMinCapResACDis[mincap = 1:inputs["NumberOfMinCapReqs"]],
+ sum(EP[:eTotalCapDischarge_AC][y]
+ for y in intersect(inputs["VS_ASYM_AC_DISCHARGE"],
+ ids_with_policy(gen_VRE_STOR, min_cap_stor, tag = mincap))))
+ EP[:eMinCapRes] += eMinCapResACDis
end
if !isempty(inputs["VS_ASYM_DC_DISCHARGE"])
- @expression(EP, eMinCapResDCDis[mincap = 1:inputs["NumberOfMinCapReqs"]],
- sum(EP[:eTotalCapDischarge_DC][y] for y in intersect(inputs["VS_ASYM_DC_DISCHARGE"], ids_with_policy(gen_VRE_STOR, min_cap_stor, tag=mincap))))
- EP[:eMinCapRes] += eMinCapResDCDis
+ @expression(EP, eMinCapResDCDis[mincap = 1:inputs["NumberOfMinCapReqs"]],
+ sum(EP[:eTotalCapDischarge_DC][y]
+ for y in intersect(inputs["VS_ASYM_DC_DISCHARGE"],
+ ids_with_policy(gen_VRE_STOR, min_cap_stor, tag = mincap))))
+ EP[:eMinCapRes] += eMinCapResDCDis
end
if !isempty(inputs["VS_SYM_AC"])
- @expression(EP, eMinCapResACStor[mincap = 1:inputs["NumberOfMinCapReqs"]],
- sum(by_rid(y,:power_to_energy_ac)*EP[:eTotalCap_STOR][y] for y in intersect(inputs["VS_SYM_AC"], ids_with_policy(gen_VRE_STOR, min_cap_stor, tag=mincap))))
- EP[:eMinCapRes] += eMinCapResACStor
+ @expression(EP, eMinCapResACStor[mincap = 1:inputs["NumberOfMinCapReqs"]],
+ sum(by_rid(y, :power_to_energy_ac) * EP[:eTotalCap_STOR][y]
+ for y in intersect(inputs["VS_SYM_AC"],
+ ids_with_policy(gen_VRE_STOR, min_cap_stor, tag = mincap))))
+ EP[:eMinCapRes] += eMinCapResACStor
end
if !isempty(inputs["VS_SYM_DC"])
- @expression(EP, eMinCapResDCStor[mincap = 1:inputs["NumberOfMinCapReqs"]],
- sum(by_rid(y,:power_to_energy_dc)*EP[:eTotalCap_STOR][y] for y in intersect(inputs["VS_SYM_DC"], ids_with_policy(gen_VRE_STOR, min_cap_stor, tag=mincap))))
- EP[:eMinCapRes] += eMinCapResDCStor
+ @expression(EP, eMinCapResDCStor[mincap = 1:inputs["NumberOfMinCapReqs"]],
+ sum(by_rid(y, :power_to_energy_dc) * EP[:eTotalCap_STOR][y]
+ for y in intersect(inputs["VS_SYM_DC"],
+ ids_with_policy(gen_VRE_STOR, min_cap_stor, tag = mincap))))
+ EP[:eMinCapRes] += eMinCapResDCStor
end
end
# Maximum Capacity Requirement
if MaxCapReq == 1
- @expression(EP, eMaxCapResSolar[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
- sum(by_rid(y,:etainverter)*EP[:eTotalCap_SOLAR][y] for y in intersect(SOLAR, ids_with_policy(gen_VRE_STOR, max_cap_solar, tag=maxcap))))
- EP[:eMaxCapRes] += eMaxCapResSolar
+ @expression(EP, eMaxCapResSolar[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
+ sum(by_rid(y, :etainverter) * EP[:eTotalCap_SOLAR][y] for y in intersect(SOLAR,
+ ids_with_policy(gen_VRE_STOR, max_cap_solar, tag = maxcap))))
+ EP[:eMaxCapRes] += eMaxCapResSolar
- @expression(EP, eMaxCapResWind[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
- sum(EP[:eTotalCap_WIND][y] for y in intersect(WIND, ids_with_policy(gen_VRE_STOR, max_cap_wind, tag=maxcap))))
- EP[:eMaxCapRes] += eMaxCapResWind
+ @expression(EP, eMaxCapResWind[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
+ sum(EP[:eTotalCap_WIND][y] for y in intersect(WIND,
+ ids_with_policy(gen_VRE_STOR, max_cap_wind, tag = maxcap))))
+ EP[:eMaxCapRes] += eMaxCapResWind
if !isempty(inputs["VS_ASYM_AC_DISCHARGE"])
- @expression(EP, eMaxCapResACDis[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
- sum(EP[:eTotalCapDischarge_AC][y] for y in intersect(inputs["VS_ASYM_AC_DISCHARGE"], ids_with_policy(gen_VRE_STOR, max_cap_stor, tag=maxcap))))
- EP[:eMaxCapRes] += eMaxCapResACDis
+ @expression(EP, eMaxCapResACDis[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
+ sum(EP[:eTotalCapDischarge_AC][y]
+ for y in intersect(inputs["VS_ASYM_AC_DISCHARGE"],
+ ids_with_policy(gen_VRE_STOR, max_cap_stor, tag = maxcap))))
+ EP[:eMaxCapRes] += eMaxCapResACDis
end
if !isempty(inputs["VS_ASYM_DC_DISCHARGE"])
- @expression(EP, eMaxCapResDCDis[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
- sum(EP[:eTotalCapDischarge_DC][y] for y in intersect(inputs["VS_ASYM_DC_DISCHARGE"], ids_with_policy(gen_VRE_STOR, max_cap_stor, tag=maxcap))))
- EP[:eMaxCapRes] += eMaxCapResDCDis
+ @expression(EP, eMaxCapResDCDis[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
+ sum(EP[:eTotalCapDischarge_DC][y]
+ for y in intersect(inputs["VS_ASYM_DC_DISCHARGE"],
+ ids_with_policy(gen_VRE_STOR, max_cap_stor, tag = maxcap))))
+ EP[:eMaxCapRes] += eMaxCapResDCDis
end
if !isempty(inputs["VS_SYM_AC"])
- @expression(EP, eMaxCapResACStor[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
- sum(by_rid(y,:power_to_energy_ac)*EP[:eTotalCap_STOR][y] for y in intersect(inputs["VS_SYM_AC"], ids_with_policy(gen_VRE_STOR, max_cap_stor, tag=maxcap))))
- EP[:eMaxCapRes] += eMaxCapResACStor
+ @expression(EP, eMaxCapResACStor[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
+ sum(by_rid(y, :power_to_energy_ac) * EP[:eTotalCap_STOR][y]
+ for y in intersect(inputs["VS_SYM_AC"],
+ ids_with_policy(gen_VRE_STOR, max_cap_stor, tag = maxcap))))
+ EP[:eMaxCapRes] += eMaxCapResACStor
end
if !isempty(inputs["VS_SYM_DC"])
- @expression(EP, eMaxCapResDCStor[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
- sum(by_rid(y,:power_to_energy_dc)*EP[:eTotalCap_STOR][y] for y in intersect(inputs["VS_SYM_DC"], ids_with_policy(gen_VRE_STOR, max_cap_stor, tag=maxcap))))
- EP[:eMaxCapRes] += eMaxCapResDCStor
+ @expression(EP, eMaxCapResDCStor[maxcap = 1:inputs["NumberOfMaxCapReqs"]],
+ sum(by_rid(y, :power_to_energy_dc) * EP[:eTotalCap_STOR][y]
+ for y in intersect(inputs["VS_SYM_DC"],
+ ids_with_policy(gen_VRE_STOR, max_cap_stor, tag = maxcap))))
+ EP[:eMaxCapRes] += eMaxCapResDCStor
end
end
@@ -269,33 +294,49 @@ function vre_stor!(EP::Model, inputs::Dict, setup::Dict)
### CONSTRAINTS ###
# Constraint 1: Energy Balance Constraint
- @constraint(EP, cEnergyBalance[y in VRE_STOR, t=1:T],
- EP[:vP][y,t] == eInvACBalance[y,t])
-
+ @constraint(EP, cEnergyBalance[y in VRE_STOR, t = 1:T],
+ EP[:vP][y, t]==eInvACBalance[y, t])
+
# Constraint 2: Grid Export/Import Maximum
- @constraint(EP, cGridExport[y in VRE_STOR, t=1:T],
- EP[:vP][y,t] + eGridExport[y,t] <= EP[:eTotalCap][y])
-
+ @constraint(EP, cGridExport[y in VRE_STOR, t = 1:T],
+ EP[:vP][y, t] + eGridExport[y, t]<=EP[:eTotalCap][y])
+
# Constraint 3: Inverter Export/Import Maximum (implemented in main module due to potential capacity reserve margin and operating reserve constraints)
- @constraint(EP, cInverterExport[y in DC, t=1:T], EP[:eInverterExport][y,t] <= EP[:eTotalCap_DC][y])
+ @constraint(EP,
+ cInverterExport[y in DC, t = 1:T],
+ EP[:eInverterExport][y, t]<=EP[:eTotalCap_DC][y])
# Constraint 4: PV Generation (implemented in main module due to potential capacity reserve margin and operating reserve constraints)
- @constraint(EP, cSolarGenMaxS[y in SOLAR, t=1:T], EP[:eSolarGenMaxS][y,t] <= inputs["pP_Max_Solar"][y,t]*EP[:eTotalCap_SOLAR][y])
+ @constraint(EP,
+ cSolarGenMaxS[y in SOLAR, t = 1:T],
+ EP[:eSolarGenMaxS][y, t]<=inputs["pP_Max_Solar"][y, t] * EP[:eTotalCap_SOLAR][y])
# Constraint 5: Wind Generation (implemented in main module due to potential capacity reserve margin and operating reserve constraints)
- @constraint(EP, cWindGenMaxW[y in WIND, t=1:T], EP[:eWindGenMaxW][y,t] <= inputs["pP_Max_Wind"][y,t]*EP[:eTotalCap_WIND][y])
+ @constraint(EP,
+ cWindGenMaxW[y in WIND, t = 1:T],
+ EP[:eWindGenMaxW][y, t]<=inputs["pP_Max_Wind"][y, t] * EP[:eTotalCap_WIND][y])
# Constraint 6: Symmetric Storage Resources (implemented in main module due to potential capacity reserve margin and operating reserve constraints)
- @constraint(EP, cChargeDischargeMaxDC[y in inputs["VS_SYM_DC"], t=1:T],
- EP[:eChargeDischargeMaxDC][y,t] <= by_rid(y,:power_to_energy_dc)*EP[:eTotalCap_STOR][y])
- @constraint(EP, cChargeDischargeMaxAC[y in inputs["VS_SYM_AC"], t=1:T],
- EP[:eChargeDischargeMaxAC][y,t] <= by_rid(y,:power_to_energy_ac)*EP[:eTotalCap_STOR][y])
+ @constraint(EP, cChargeDischargeMaxDC[y in inputs["VS_SYM_DC"], t = 1:T],
+ EP[:eChargeDischargeMaxDC][y,
+ t]<=by_rid(y, :power_to_energy_dc) * EP[:eTotalCap_STOR][y])
+ @constraint(EP, cChargeDischargeMaxAC[y in inputs["VS_SYM_AC"], t = 1:T],
+ EP[:eChargeDischargeMaxAC][y,
+ t]<=by_rid(y, :power_to_energy_ac) * EP[:eTotalCap_STOR][y])
# Constraint 7: Asymmetric Storage Resources (implemented in main module due to potential capacity reserve margin and operating reserve constraints)
- @constraint(EP, cVreStorMaxDischargingDC[y in inputs["VS_ASYM_DC_DISCHARGE"], t=1:T], EP[:eVreStorMaxDischargingDC][y,t] <= EP[:eTotalCapDischarge_DC][y])
- @constraint(EP, cVreStorMaxChargingDC[y in inputs["VS_ASYM_DC_CHARGE"], t=1:T], EP[:eVreStorMaxChargingDC][y,t] <= EP[:eTotalCapCharge_DC][y])
- @constraint(EP, cVreStorMaxDischargingAC[y in inputs["VS_ASYM_AC_DISCHARGE"], t=1:T], EP[:eVreStorMaxDischargingAC][y,t] <= EP[:eTotalCapDischarge_AC][y])
- @constraint(EP, cVreStorMaxChargingAC[y in inputs["VS_ASYM_AC_CHARGE"], t=1:T], EP[:eVreStorMaxChargingAC][y,t] <= EP[:eTotalCapCharge_AC][y])
+ @constraint(EP,
+ cVreStorMaxDischargingDC[y in inputs["VS_ASYM_DC_DISCHARGE"], t = 1:T],
+ EP[:eVreStorMaxDischargingDC][y, t]<=EP[:eTotalCapDischarge_DC][y])
+ @constraint(EP,
+ cVreStorMaxChargingDC[y in inputs["VS_ASYM_DC_CHARGE"], t = 1:T],
+ EP[:eVreStorMaxChargingDC][y, t]<=EP[:eTotalCapCharge_DC][y])
+ @constraint(EP,
+ cVreStorMaxDischargingAC[y in inputs["VS_ASYM_AC_DISCHARGE"], t = 1:T],
+ EP[:eVreStorMaxDischargingAC][y, t]<=EP[:eTotalCapDischarge_AC][y])
+ @constraint(EP,
+ cVreStorMaxChargingAC[y in inputs["VS_ASYM_AC_CHARGE"], t = 1:T],
+ EP[:eVreStorMaxChargingAC][y, t]<=EP[:eTotalCapCharge_AC][y])
end
@doc raw"""
@@ -371,7 +412,6 @@ In addition, this function adds investment and fixed O&M related costs related t
```
"""
function inverter_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
-
println("VRE-STOR Inverter Module")
### LOAD DATA ###
@@ -382,7 +422,7 @@ function inverter_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
RET_CAP_DC = inputs["RET_CAP_DC"]
gen = inputs["RESOURCES"]
gen_VRE_STOR = gen.VreStorage
-
+
MultiStage = setup["MultiStage"]
by_rid(rid, sym) = by_rid_res(rid, sym, gen_VRE_STOR)
@@ -396,72 +436,73 @@ function inverter_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
end)
if MultiStage == 1
- @variable(EP, vEXISTINGDCCAP[y in DC] >= 0);
+ @variable(EP, vEXISTINGDCCAP[y in DC]>=0)
end
### EXPRESSIONS ###
# 0. Multistage existing capacity definition
if MultiStage == 1
- @expression(EP, eExistingCapDC[y in DC], vEXISTINGDCCAP[y])
- else
- @expression(EP, eExistingCapDC[y in DC], by_rid(y,:existing_cap_inverter_mw))
- end
+ @expression(EP, eExistingCapDC[y in DC], vEXISTINGDCCAP[y])
+ else
+ @expression(EP, eExistingCapDC[y in DC], by_rid(y, :existing_cap_inverter_mw))
+ end
# 1. Total inverter capacity
@expression(EP, eTotalCap_DC[y in DC],
- if (y in intersect(NEW_CAP_DC, RET_CAP_DC)) # Resources eligible for new capacity and retirements
- eExistingCapDC[y] + EP[:vDCCAP][y] - EP[:vRETDCCAP][y]
- elseif (y in setdiff(NEW_CAP_DC, RET_CAP_DC)) # Resources eligible for only new capacity
- eExistingCapDC[y] + EP[:vDCCAP][y]
- elseif (y in setdiff(RET_CAP_DC, NEW_CAP_DC)) # Resources eligible for only capacity retirements
- eExistingCapDC[y] - EP[:vRETDCCAP][y]
- else
- eExistingCapDC[y]
- end
- )
+ if (y in intersect(NEW_CAP_DC, RET_CAP_DC)) # Resources eligible for new capacity and retirements
+ eExistingCapDC[y] + EP[:vDCCAP][y] - EP[:vRETDCCAP][y]
+ elseif (y in setdiff(NEW_CAP_DC, RET_CAP_DC)) # Resources eligible for only new capacity
+ eExistingCapDC[y] + EP[:vDCCAP][y]
+ elseif (y in setdiff(RET_CAP_DC, NEW_CAP_DC)) # Resources eligible for only capacity retirements
+ eExistingCapDC[y] - EP[:vRETDCCAP][y]
+ else
+ eExistingCapDC[y]
+ end)
# 2. Objective function additions
# Fixed costs for inverter component (if resource is not eligible for new inverter capacity, fixed costs are only O&M costs)
@expression(EP, eCFixDC[y in DC],
if y in NEW_CAP_DC # Resources eligible for new capacity
- by_rid(y,:inv_cost_inverter_per_mwyr)*vDCCAP[y] + by_rid(y,:fixed_om_inverter_cost_per_mwyr)*eTotalCap_DC[y]
+ by_rid(y, :inv_cost_inverter_per_mwyr) * vDCCAP[y] +
+ by_rid(y, :fixed_om_inverter_cost_per_mwyr) * eTotalCap_DC[y]
else
- by_rid(y,:fixed_om_inverter_cost_per_mwyr)*eTotalCap_DC[y]
- end
- )
-
+ by_rid(y, :fixed_om_inverter_cost_per_mwyr) * eTotalCap_DC[y]
+ end)
+
# Sum individual resource contributions
@expression(EP, eTotalCFixDC, sum(eCFixDC[y] for y in DC))
if MultiStage == 1
- EP[:eObj] += eTotalCFixDC/inputs["OPEXMULT"]
+ EP[:eObj] += eTotalCFixDC / inputs["OPEXMULT"]
else
EP[:eObj] += eTotalCFixDC
end
# 3. Inverter exports expression
- @expression(EP, eInverterExport[y in DC, t=1:T], JuMP.AffExpr())
+ @expression(EP, eInverterExport[y in DC, t = 1:T], JuMP.AffExpr())
### CONSTRAINTS ###
# Constraint 0: Existing capacity variable is equal to existing capacity specified in the input file
if MultiStage == 1
- @constraint(EP, cExistingCapDC[y in DC], EP[:vEXISTINGDCCAP][y] == by_rid(y,:existing_cap_inverter_mw))
+ @constraint(EP,
+ cExistingCapDC[y in DC],
+ EP[:vEXISTINGDCCAP][y]==by_rid(y, :existing_cap_inverter_mw))
end
# Constraints 1: Retirements and capacity additions
# Cannot retire more capacity than existing capacity for VRE-STOR technologies
- @constraint(EP, cMaxRet_DC[y=RET_CAP_DC], vRETDCCAP[y] <= eExistingCapDC[y])
+ @constraint(EP, cMaxRet_DC[y = RET_CAP_DC], vRETDCCAP[y]<=eExistingCapDC[y])
# Constraint on maximum capacity (if applicable) [set input to -1 if no constraint on maximum capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
- @constraint(EP, cMaxCap_DC[y in ids_with_nonneg(gen_VRE_STOR, max_cap_inverter_mw)],
- eTotalCap_DC[y] <= by_rid(y,:max_cap_inverter_mw))
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
+ @constraint(EP, cMaxCap_DC[y in ids_with_nonneg(gen_VRE_STOR, max_cap_inverter_mw)],
+ eTotalCap_DC[y]<=by_rid(y, :max_cap_inverter_mw))
# Constraint on Minimum capacity (if applicable) [set input to -1 if no constraint on minimum capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is <= Min_Cap_MW and lead to infeasabilty
- @constraint(EP, cMinCap_DC[y in ids_with_positive(gen_VRE_STOR, min_cap_inverter_mw)],
- eTotalCap_DC[y] >= by_rid(y,:min_cap_inverter_mw))
+ @constraint(EP, cMinCap_DC[y in ids_with_positive(gen_VRE_STOR, min_cap_inverter_mw)],
+ eTotalCap_DC[y]>=by_rid(y, :min_cap_inverter_mw))
# Constraint 2: Inverter Exports Maximum: see main module because capacity reserve margin/operating reserves may alter constraint
end
@@ -530,7 +571,6 @@ In addition, this function adds investment, fixed O&M, and variable O&M costs re
```
"""
function solar_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
-
println("VRE-STOR Solar Module")
### LOAD DATA ###
@@ -554,91 +594,94 @@ function solar_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
vSOLARCAP[y in NEW_CAP_SOLAR] >= 0 # New installed solar capacity [MW DC]
# Solar-component generation [MWh]
- vP_SOLAR[y in SOLAR, t=1:T] >= 0
+ vP_SOLAR[y in SOLAR, t = 1:T] >= 0
end)
if MultiStage == 1
- @variable(EP, vEXISTINGSOLARCAP[y in SOLAR] >= 0);
+ @variable(EP, vEXISTINGSOLARCAP[y in SOLAR]>=0)
end
### EXPRESSIONS ###
# 0. Multistage existing capacity definition
if MultiStage == 1
- @expression(EP, eExistingCapSolar[y in SOLAR], vEXISTINGSOLARCAP[y])
- else
- @expression(EP, eExistingCapSolar[y in SOLAR], by_rid(y,:existing_cap_solar_mw))
- end
+ @expression(EP, eExistingCapSolar[y in SOLAR], vEXISTINGSOLARCAP[y])
+ else
+ @expression(EP, eExistingCapSolar[y in SOLAR], by_rid(y, :existing_cap_solar_mw))
+ end
# 1. Total solar capacity
@expression(EP, eTotalCap_SOLAR[y in SOLAR],
- if (y in intersect(NEW_CAP_SOLAR, RET_CAP_SOLAR)) # Resources eligible for new capacity and retirements
- eExistingCapSolar[y] + EP[:vSOLARCAP][y] - EP[:vRETSOLARCAP][y]
- elseif (y in setdiff(NEW_CAP_SOLAR, RET_CAP_SOLAR)) # Resources eligible for only new capacity
- eExistingCapSolar[y] + EP[:vSOLARCAP][y]
- elseif (y in setdiff(RET_CAP_SOLAR, NEW_CAP_SOLAR)) # Resources eligible for only capacity retirements
- eExistingCapSolar[y] - EP[:vRETSOLARCAP][y]
- else
- eExistingCapSolar[y]
- end
- )
+ if (y in intersect(NEW_CAP_SOLAR, RET_CAP_SOLAR)) # Resources eligible for new capacity and retirements
+ eExistingCapSolar[y] + EP[:vSOLARCAP][y] - EP[:vRETSOLARCAP][y]
+ elseif (y in setdiff(NEW_CAP_SOLAR, RET_CAP_SOLAR)) # Resources eligible for only new capacity
+ eExistingCapSolar[y] + EP[:vSOLARCAP][y]
+ elseif (y in setdiff(RET_CAP_SOLAR, NEW_CAP_SOLAR)) # Resources eligible for only capacity retirements
+ eExistingCapSolar[y] - EP[:vRETSOLARCAP][y]
+ else
+ eExistingCapSolar[y]
+ end)
# 2. Objective function additions
# Fixed costs for solar resources (if resource is not eligible for new solar capacity, fixed costs are only O&M costs)
@expression(EP, eCFixSolar[y in SOLAR],
if y in NEW_CAP_SOLAR # Resources eligible for new capacity
- by_rid(y,:inv_cost_solar_per_mwyr)*vSOLARCAP[y] + by_rid(y,:fixed_om_solar_cost_per_mwyr)*eTotalCap_SOLAR[y]
+ by_rid(y, :inv_cost_solar_per_mwyr) * vSOLARCAP[y] +
+ by_rid(y, :fixed_om_solar_cost_per_mwyr) * eTotalCap_SOLAR[y]
else
- by_rid(y,:fixed_om_solar_cost_per_mwyr)*eTotalCap_SOLAR[y]
- end
- )
+ by_rid(y, :fixed_om_solar_cost_per_mwyr) * eTotalCap_SOLAR[y]
+ end)
@expression(EP, eTotalCFixSolar, sum(eCFixSolar[y] for y in SOLAR))
if MultiStage == 1
- EP[:eObj] += eTotalCFixSolar/inputs["OPEXMULT"]
+ EP[:eObj] += eTotalCFixSolar / inputs["OPEXMULT"]
else
EP[:eObj] += eTotalCFixSolar
end
# Variable costs of "generation" for solar resource "y" during hour "t"
- @expression(EP, eCVarOutSolar[y in SOLAR, t=1:T],
- inputs["omega"][t]*by_rid(y,:var_om_cost_per_mwh_solar)*by_rid(y,:etainverter)*EP[:vP_SOLAR][y,t])
- @expression(EP, eTotalCVarOutSolar, sum(eCVarOutSolar[y,t] for y in SOLAR, t=1:T))
+ @expression(EP, eCVarOutSolar[y in SOLAR, t = 1:T],
+ inputs["omega"][t]*by_rid(y, :var_om_cost_per_mwh_solar)*by_rid(y, :etainverter)*
+ EP[:vP_SOLAR][y, t])
+ @expression(EP, eTotalCVarOutSolar, sum(eCVarOutSolar[y, t] for y in SOLAR, t in 1:T))
EP[:eObj] += eTotalCVarOutSolar
# 3. Inverter Balance, PV Generation Maximum
- @expression(EP, eSolarGenMaxS[y in SOLAR, t=1:T], JuMP.AffExpr())
- for y in SOLAR, t=1:T
- EP[:eInvACBalance][y,t] += by_rid(y,:etainverter)*EP[:vP_SOLAR][y,t]
- EP[:eInverterExport][y,t] += by_rid(y,:etainverter)*EP[:vP_SOLAR][y,t]
- eSolarGenMaxS[y,t] += EP[:vP_SOLAR][y,t]
+ @expression(EP, eSolarGenMaxS[y in SOLAR, t = 1:T], JuMP.AffExpr())
+ for y in SOLAR, t in 1:T
+ EP[:eInvACBalance][y, t] += by_rid(y, :etainverter) * EP[:vP_SOLAR][y, t]
+ EP[:eInverterExport][y, t] += by_rid(y, :etainverter) * EP[:vP_SOLAR][y, t]
+ eSolarGenMaxS[y, t] += EP[:vP_SOLAR][y, t]
end
### CONSTRAINTS ###
# Constraint 0: Existing capacity variable is equal to existing capacity specified in the input file
if MultiStage == 1
- @constraint(EP, cExistingCapSolar[y in SOLAR], EP[:vEXISTINGSOLARCAP][y] == by_rid(y,:existing_cap_solar_mw))
- end
+ @constraint(EP,
+ cExistingCapSolar[y in SOLAR],
+ EP[:vEXISTINGSOLARCAP][y]==by_rid(y, :existing_cap_solar_mw))
+ end
# Constraints 1: Retirements and capacity additions
# Cannot retire more capacity than existing capacity for VRE-STOR technologies
- @constraint(EP, cMaxRet_Solar[y=RET_CAP_SOLAR], vRETSOLARCAP[y] <= eExistingCapSolar[y])
+ @constraint(EP, cMaxRet_Solar[y = RET_CAP_SOLAR], vRETSOLARCAP[y]<=eExistingCapSolar[y])
# Constraint on maximum capacity (if applicable) [set input to -1 if no constraint on maximum capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
- @constraint(EP, cMaxCap_Solar[y in ids_with_nonneg(gen_VRE_STOR, max_cap_solar_mw)],
- eTotalCap_SOLAR[y] <= by_rid(y,:max_cap_solar_mw))
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
+ @constraint(EP, cMaxCap_Solar[y in ids_with_nonneg(gen_VRE_STOR, max_cap_solar_mw)],
+ eTotalCap_SOLAR[y]<=by_rid(y, :max_cap_solar_mw))
# Constraint on Minimum capacity (if applicable) [set input to -1 if no constraint on minimum capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is <= Min_Cap_MW and lead to infeasabilty
- @constraint(EP, cMinCap_Solar[y in ids_with_positive(gen_VRE_STOR, min_cap_solar_mw)],
- eTotalCap_SOLAR[y] >= by_rid(y,:min_cap_solar_mw))
+ @constraint(EP, cMinCap_Solar[y in ids_with_positive(gen_VRE_STOR, min_cap_solar_mw)],
+ eTotalCap_SOLAR[y]>=by_rid(y, :min_cap_solar_mw))
# Constraint 2: PV Generation: see main module because operating reserves may alter constraint
# Constraint 3: Inverter Ratio between solar capacity and grid
- @constraint(EP, cInverterRatio_Solar[y in ids_with_positive(gen_VRE_STOR, inverter_ratio_solar)],
- EP[:eTotalCap_SOLAR][y] == by_rid(y,:inverter_ratio_solar)*EP[:eTotalCap_DC][y])
+ @constraint(EP,
+ cInverterRatio_Solar[y in ids_with_positive(gen_VRE_STOR, inverter_ratio_solar)],
+ EP[:eTotalCap_SOLAR][y]==by_rid(y, :inverter_ratio_solar) * EP[:eTotalCap_DC][y])
end
@doc raw"""
@@ -705,7 +748,6 @@ In addition, this function adds investment, fixed O&M, and variable O&M costs re
```
"""
function wind_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
-
println("VRE-STOR Wind Module")
### LOAD DATA ###
@@ -729,89 +771,93 @@ function wind_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
vWINDCAP[y in NEW_CAP_WIND] >= 0 # New installed wind capacity [MW AC]
# Wind-component generation [MWh]
- vP_WIND[y in WIND, t=1:T] >= 0
+ vP_WIND[y in WIND, t = 1:T] >= 0
end)
if MultiStage == 1
- @variable(EP, vEXISTINGWINDCAP[y in WIND] >= 0);
- end
+ @variable(EP, vEXISTINGWINDCAP[y in WIND]>=0)
+ end
### EXPRESSIONS ###
# 0. Multistage existing capacity definition
if MultiStage == 1
- @expression(EP, eExistingCapWind[y in WIND], vEXISTINGWINDCAP[y])
- else
- @expression(EP, eExistingCapWind[y in WIND], by_rid(y,:existing_cap_wind_mw))
- end
+ @expression(EP, eExistingCapWind[y in WIND], vEXISTINGWINDCAP[y])
+ else
+ @expression(EP, eExistingCapWind[y in WIND], by_rid(y, :existing_cap_wind_mw))
+ end
# 1. Total wind capacity
@expression(EP, eTotalCap_WIND[y in WIND],
- if (y in intersect(NEW_CAP_WIND, RET_CAP_WIND)) # Resources eligible for new capacity and retirements
- eExistingCapWind[y] + EP[:vWINDCAP][y] - EP[:vRETWINDCAP][y]
- elseif (y in setdiff(NEW_CAP_WIND, RET_CAP_WIND)) # Resources eligible for only new capacity
- eExistingCapWind[y] + EP[:vWINDCAP][y]
- elseif (y in setdiff(RET_CAP_WIND, NEW_CAP_WIND)) # Resources eligible for only capacity retirements
- eExistingCapWind[y] - EP[:vRETWINDCAP][y]
- else
- eExistingCapWind[y]
- end
- )
+ if (y in intersect(NEW_CAP_WIND, RET_CAP_WIND)) # Resources eligible for new capacity and retirements
+ eExistingCapWind[y] + EP[:vWINDCAP][y] - EP[:vRETWINDCAP][y]
+ elseif (y in setdiff(NEW_CAP_WIND, RET_CAP_WIND)) # Resources eligible for only new capacity
+ eExistingCapWind[y] + EP[:vWINDCAP][y]
+ elseif (y in setdiff(RET_CAP_WIND, NEW_CAP_WIND)) # Resources eligible for only capacity retirements
+ eExistingCapWind[y] - EP[:vRETWINDCAP][y]
+ else
+ eExistingCapWind[y]
+ end)
# 2. Objective function additions
# Fixed costs for wind resources (if resource is not eligible for new wind capacity, fixed costs are only O&M costs)
@expression(EP, eCFixWind[y in WIND],
if y in NEW_CAP_WIND # Resources eligible for new capacity
- by_rid(y,:inv_cost_wind_per_mwyr)*vWINDCAP[y] + by_rid(y,:fixed_om_wind_cost_per_mwyr)*eTotalCap_WIND[y]
+ by_rid(y, :inv_cost_wind_per_mwyr) * vWINDCAP[y] +
+ by_rid(y, :fixed_om_wind_cost_per_mwyr) * eTotalCap_WIND[y]
else
- by_rid(y,:fixed_om_wind_cost_per_mwyr)*eTotalCap_WIND[y]
- end
- )
+ by_rid(y, :fixed_om_wind_cost_per_mwyr) * eTotalCap_WIND[y]
+ end)
@expression(EP, eTotalCFixWind, sum(eCFixWind[y] for y in WIND))
if MultiStage == 1
- EP[:eObj] += eTotalCFixWind/inputs["OPEXMULT"]
+ EP[:eObj] += eTotalCFixWind / inputs["OPEXMULT"]
else
EP[:eObj] += eTotalCFixWind
end
# Variable costs of "generation" for wind resource "y" during hour "t"
- @expression(EP, eCVarOutWind[y in WIND, t=1:T], inputs["omega"][t]*by_rid(y,:var_om_cost_per_mwh_wind)*EP[:vP_WIND][y,t])
- @expression(EP, eTotalCVarOutWind, sum(eCVarOutWind[y,t] for y in WIND, t=1:T))
+ @expression(EP,
+ eCVarOutWind[y in WIND, t = 1:T],
+ inputs["omega"][t]*by_rid(y, :var_om_cost_per_mwh_wind)*EP[:vP_WIND][y, t])
+ @expression(EP, eTotalCVarOutWind, sum(eCVarOutWind[y, t] for y in WIND, t in 1:T))
EP[:eObj] += eTotalCVarOutWind
# 3. Inverter Balance, Wind Generation Maximum
- @expression(EP, eWindGenMaxW[y in WIND, t=1:T], JuMP.AffExpr())
- for y in WIND, t=1:T
- EP[:eInvACBalance][y,t] += EP[:vP_WIND][y,t]
- eWindGenMaxW[y,t] += EP[:vP_WIND][y,t]
+ @expression(EP, eWindGenMaxW[y in WIND, t = 1:T], JuMP.AffExpr())
+ for y in WIND, t in 1:T
+ EP[:eInvACBalance][y, t] += EP[:vP_WIND][y, t]
+ eWindGenMaxW[y, t] += EP[:vP_WIND][y, t]
end
### CONSTRAINTS ###
# Constraint 0: Existing capacity variable is equal to existing capacity specified in the input file
if MultiStage == 1
- @constraint(EP, cExistingCapWind[y in WIND], EP[:vEXISTINGWINDCAP][y] == by_rid(y,:existing_cap_wind_mw))
- end
+ @constraint(EP,
+ cExistingCapWind[y in WIND],
+ EP[:vEXISTINGWINDCAP][y]==by_rid(y, :existing_cap_wind_mw))
+ end
# Constraints 1: Retirements and capacity additions
# Cannot retire more capacity than existing capacity for VRE-STOR technologies
- @constraint(EP, cMaxRet_Wind[y=RET_CAP_WIND], vRETWINDCAP[y] <= eExistingCapWind[y])
+ @constraint(EP, cMaxRet_Wind[y = RET_CAP_WIND], vRETWINDCAP[y]<=eExistingCapWind[y])
# Constraint on maximum capacity (if applicable) [set input to -1 if no constraint on maximum capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
- @constraint(EP, cMaxCap_Wind[y in ids_with_nonneg(gen_VRE_STOR, max_cap_wind_mw)],
- eTotalCap_WIND[y] <= by_rid(y,:max_cap_wind_mw))
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
+ @constraint(EP, cMaxCap_Wind[y in ids_with_nonneg(gen_VRE_STOR, max_cap_wind_mw)],
+ eTotalCap_WIND[y]<=by_rid(y, :max_cap_wind_mw))
# Constraint on Minimum capacity (if applicable) [set input to -1 if no constraint on minimum capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is <= Min_Cap_MW and lead to infeasabilty
- @constraint(EP, cMinCap_Wind[y in ids_with_positive(gen_VRE_STOR, min_cap_wind_mw)],
- eTotalCap_WIND[y] >= by_rid(y,:min_cap_wind_mw))
+ @constraint(EP, cMinCap_Wind[y in ids_with_positive(gen_VRE_STOR, min_cap_wind_mw)],
+ eTotalCap_WIND[y]>=by_rid(y, :min_cap_wind_mw))
# Constraint 2: Wind Generation: see main module because capacity reserve margin/operating reserves may alter constraint
# Constraint 3: Inverter Ratio between wind capacity and grid
- @constraint(EP, cInverterRatio_Wind[y in ids_with_positive(gen_VRE_STOR, inverter_ratio_wind)],
- EP[:eTotalCap_WIND][y] == by_rid(y,:inverter_ratio_wind)*EP[:eTotalCap][y])
+ @constraint(EP,
+ cInverterRatio_Wind[y in ids_with_positive(gen_VRE_STOR, inverter_ratio_wind)],
+ EP[:eTotalCap_WIND][y]==by_rid(y, :inverter_ratio_wind) * EP[:eTotalCap][y])
end
@doc raw"""
@@ -941,12 +987,11 @@ In addition, this function adds investment, fixed O&M, and variable O&M costs re
```
"""
function stor_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
-
println("VRE-STOR Storage Module")
### LOAD DATA ###
- T = inputs["T"]
+ T = inputs["T"]
Z = inputs["Z"]
gen = inputs["RESOURCES"]
@@ -964,8 +1009,8 @@ function stor_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
VS_LDS = inputs["VS_LDS"]
START_SUBPERIODS = inputs["START_SUBPERIODS"]
- INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
- hours_per_subperiod = inputs["hours_per_subperiod"] # total number of hours per subperiod
+ INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
+ hours_per_subperiod = inputs["hours_per_subperiod"] # total number of hours per subperiod
rep_periods = inputs["REP_PERIOD"]
MultiStage = setup["MultiStage"]
@@ -978,104 +1023,110 @@ function stor_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
# Storage energy capacity
vCAPENERGY_VS[y in NEW_CAP_STOR] >= 0 # Energy storage reservoir capacity (MWh capacity) built for VRE storage [MWh]
vRETCAPENERGY_VS[y in RET_CAP_STOR] >= 0 # Energy storage reservoir capacity retired for VRE storage [MWh]
-
+
# State of charge variable
- vS_VRE_STOR[y in STOR, t=1:T] >= 0 # Storage level of resource "y" at hour "t" [MWh] on zone "z"
+ vS_VRE_STOR[y in STOR, t = 1:T] >= 0 # Storage level of resource "y" at hour "t" [MWh] on zone "z"
# DC-battery discharge [MWh]
- vP_DC_DISCHARGE[y in DC_DISCHARGE, t=1:T] >= 0
+ vP_DC_DISCHARGE[y in DC_DISCHARGE, t = 1:T] >= 0
# DC-battery charge [MWh]
- vP_DC_CHARGE[y in DC_CHARGE, t=1:T] >= 0
+ vP_DC_CHARGE[y in DC_CHARGE, t = 1:T] >= 0
# AC-battery discharge [MWh]
- vP_AC_DISCHARGE[y in AC_DISCHARGE, t=1:T] >= 0
+ vP_AC_DISCHARGE[y in AC_DISCHARGE, t = 1:T] >= 0
# AC-battery charge [MWh]
- vP_AC_CHARGE[y in AC_CHARGE, t=1:T] >= 0
+ vP_AC_CHARGE[y in AC_CHARGE, t = 1:T] >= 0
# Grid-interfacing charge (Energy withdrawn from grid by resource VRE_STOR at hour "t") [MWh]
- vCHARGE_VRE_STOR[y in STOR, t=1:T] >= 0
+ vCHARGE_VRE_STOR[y in STOR, t = 1:T] >= 0
end)
if MultiStage == 1
- @variable(EP, vEXISTINGCAPENERGY_VS[y in STOR] >= 0);
- end
+ @variable(EP, vEXISTINGCAPENERGY_VS[y in STOR]>=0)
+ end
### EXPRESSIONS ###
# 0. Multistage existing capacity definition
if MultiStage == 1
- @expression(EP, eExistingCapEnergy_VS[y in STOR], vEXISTINGCAPENERGY_VS[y])
- else
- @expression(EP, eExistingCapEnergy_VS[y in STOR], existing_cap_mwh(gen[y]))
- end
+ @expression(EP, eExistingCapEnergy_VS[y in STOR], vEXISTINGCAPENERGY_VS[y])
+ else
+ @expression(EP, eExistingCapEnergy_VS[y in STOR], existing_cap_mwh(gen[y]))
+ end
# 1. Total storage energy capacity
@expression(EP, eTotalCap_STOR[y in STOR],
- if (y in intersect(NEW_CAP_STOR, RET_CAP_STOR)) # Resources eligible for new capacity and retirements
- eExistingCapEnergy_VS[y] + EP[:vCAPENERGY_VS][y] - EP[:vRETCAPENERGY_VS][y]
- elseif (y in setdiff(NEW_CAP_STOR, RET_CAP_STOR)) # Resources eligible for only new capacity
- eExistingCapEnergy_VS[y] + EP[:vCAPENERGY_VS][y]
- elseif (y in setdiff(RET_CAP_STOR, NEW_CAP_STOR)) # Resources eligible for only capacity retirements
- eExistingCapEnergy_VS[y] - EP[:vRETCAPENERGY_VS][y]
- else
- eExistingCapEnergy_VS[y]
- end
- )
+ if (y in intersect(NEW_CAP_STOR, RET_CAP_STOR)) # Resources eligible for new capacity and retirements
+ eExistingCapEnergy_VS[y] + EP[:vCAPENERGY_VS][y] - EP[:vRETCAPENERGY_VS][y]
+ elseif (y in setdiff(NEW_CAP_STOR, RET_CAP_STOR)) # Resources eligible for only new capacity
+ eExistingCapEnergy_VS[y] + EP[:vCAPENERGY_VS][y]
+ elseif (y in setdiff(RET_CAP_STOR, NEW_CAP_STOR)) # Resources eligible for only capacity retirements
+ eExistingCapEnergy_VS[y] - EP[:vRETCAPENERGY_VS][y]
+ else
+ eExistingCapEnergy_VS[y]
+ end)
# 2. Objective function additions
# Fixed costs for storage resources (if resource is not eligible for new energy capacity, fixed costs are only O&M costs)
- @expression(EP, eCFixEnergy_VS[y in STOR],
+ @expression(EP, eCFixEnergy_VS[y in STOR],
if y in NEW_CAP_STOR # Resources eligible for new capacity
- inv_cost_per_mwhyr(gen[y])*vCAPENERGY_VS[y] + fixed_om_cost_per_mwhyr(gen[y])*eTotalCap_STOR[y]
+ inv_cost_per_mwhyr(gen[y]) * vCAPENERGY_VS[y] +
+ fixed_om_cost_per_mwhyr(gen[y]) * eTotalCap_STOR[y]
else
- fixed_om_cost_per_mwhyr(gen[y])*eTotalCap_STOR[y]
- end
- )
+ fixed_om_cost_per_mwhyr(gen[y]) * eTotalCap_STOR[y]
+ end)
@expression(EP, eTotalCFixStor, sum(eCFixEnergy_VS[y] for y in STOR))
if MultiStage == 1
- EP[:eObj] += eTotalCFixStor/inputs["OPEXMULT"]
+ EP[:eObj] += eTotalCFixStor / inputs["OPEXMULT"]
else
EP[:eObj] += eTotalCFixStor
end
# Variable costs of charging DC for VRE-STOR resources "y" during hour "t"
- @expression(EP, eCVar_Charge_DC[y in DC_CHARGE, t=1:T],
- inputs["omega"][t]*by_rid(y,:var_om_cost_per_mwh_charge_dc)*EP[:vP_DC_CHARGE][y,t]/by_rid(y,:etainverter))
+ @expression(EP, eCVar_Charge_DC[y in DC_CHARGE, t = 1:T],
+ inputs["omega"][t] * by_rid(y, :var_om_cost_per_mwh_charge_dc) *
+ EP[:vP_DC_CHARGE][y, t]/by_rid(y, :etainverter))
# Variable costs of discharging DC for VRE-STOR resources "y" during hour "t"
- @expression(EP, eCVar_Discharge_DC[y in DC_DISCHARGE, t=1:T],
- inputs["omega"][t]*by_rid(y,:var_om_cost_per_mwh_discharge_dc)*by_rid(y,:etainverter)*EP[:vP_DC_DISCHARGE][y,t])
+ @expression(EP, eCVar_Discharge_DC[y in DC_DISCHARGE, t = 1:T],
+ inputs["omega"][t]*by_rid(y, :var_om_cost_per_mwh_discharge_dc)*
+ by_rid(y, :etainverter)*EP[:vP_DC_DISCHARGE][y, t])
# Variable costs of charging AC for VRE-STOR resources "y" during hour "t"
- @expression(EP, eCVar_Charge_AC[y in AC_CHARGE, t=1:T],
- inputs["omega"][t]*by_rid(y,:var_om_cost_per_mwh_charge_ac)*EP[:vP_AC_CHARGE][y,t])
+ @expression(EP, eCVar_Charge_AC[y in AC_CHARGE, t = 1:T],
+ inputs["omega"][t]*by_rid(y, :var_om_cost_per_mwh_charge_ac)*
+ EP[:vP_AC_CHARGE][y, t])
# Variable costs of discharging AC for VRE-STOR resources "y" during hour "t"
- @expression(EP, eCVar_Discharge_AC[y in AC_DISCHARGE, t=1:T],
- inputs["omega"][t]*by_rid(y,:var_om_cost_per_mwh_discharge_ac)*EP[:vP_AC_DISCHARGE][y,t])
+ @expression(EP, eCVar_Discharge_AC[y in AC_DISCHARGE, t = 1:T],
+ inputs["omega"][t]*by_rid(y, :var_om_cost_per_mwh_discharge_ac)*
+ EP[:vP_AC_DISCHARGE][y, t])
# Sum individual resource contributions
- @expression(EP, eTotalCVarStor, sum(eCVar_Charge_DC[y,t] for y in DC_CHARGE, t=1:T)
- + sum(eCVar_Discharge_DC[y,t] for y in DC_DISCHARGE, t=1:T)
- + sum(eCVar_Charge_AC[y,t] for y in AC_CHARGE, t=1:T)
- + sum(eCVar_Discharge_AC[y,t] for y in AC_CHARGE, t=1:T))
+ @expression(EP,
+ eTotalCVarStor,
+ sum(eCVar_Charge_DC[y, t] for y in DC_CHARGE, t in 1:T)
+ +sum(eCVar_Discharge_DC[y, t] for y in DC_DISCHARGE, t in 1:T)
+ +sum(eCVar_Charge_AC[y, t] for y in AC_CHARGE, t in 1:T)
+ +sum(eCVar_Discharge_AC[y, t] for y in AC_CHARGE, t in 1:T))
EP[:eObj] += eTotalCVarStor
# 3. Inverter & Power Balance, SoC Expressions
# Check for rep_periods > 1 & LDS=1
- if rep_periods > 1 && !isempty(VS_LDS)
- CONSTRAINTSET = inputs["VS_nonLDS"]
- else
- CONSTRAINTSET = STOR
- end
+ if rep_periods > 1 && !isempty(VS_LDS)
+ CONSTRAINTSET = inputs["VS_nonLDS"]
+ else
+ CONSTRAINTSET = STOR
+ end
# SoC expressions
@expression(EP, eSoCBalStart_VRE_STOR[y in CONSTRAINTSET, t in START_SUBPERIODS],
- vS_VRE_STOR[y,t+hours_per_subperiod-1] - self_discharge(gen[y])*vS_VRE_STOR[y,t+hours_per_subperiod-1])
+ vS_VRE_STOR[y,
+ t + hours_per_subperiod - 1]-self_discharge(gen[y]) * vS_VRE_STOR[y, t + hours_per_subperiod - 1])
@expression(EP, eSoCBalInterior_VRE_STOR[y in STOR, t in INTERIOR_SUBPERIODS],
- vS_VRE_STOR[y,t-1] - self_discharge(gen[y])*vS_VRE_STOR[y,t-1])
+ vS_VRE_STOR[y, t - 1]-self_discharge(gen[y]) * vS_VRE_STOR[y, t - 1])
# Expression for energy losses related to technologies (increase in effective demand)
@expression(EP, eELOSS_VRE_STOR[y in STOR], JuMP.AffExpr())
@@ -1085,117 +1136,134 @@ function stor_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
AC_CHARGE_CONSTRAINTSET = intersect(CONSTRAINTSET, AC_CHARGE)
for t in START_SUBPERIODS
for y in DC_DISCHARGE_CONSTRAINTSET
- eSoCBalStart_VRE_STOR[y,t] -= EP[:vP_DC_DISCHARGE][y,t]/by_rid(y,:eff_down_dc)
+ eSoCBalStart_VRE_STOR[y, t] -= EP[:vP_DC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_dc)
end
for y in DC_CHARGE_CONSTRAINTSET
- eSoCBalStart_VRE_STOR[y,t] += by_rid(y,:eff_up_dc)*EP[:vP_DC_CHARGE][y,t]
+ eSoCBalStart_VRE_STOR[y, t] += by_rid(y, :eff_up_dc) * EP[:vP_DC_CHARGE][y, t]
end
for y in AC_DISCHARGE_CONSTRAINTSET
- eSoCBalStart_VRE_STOR[y,t] -= EP[:vP_AC_DISCHARGE][y,t]/by_rid(y,:eff_down_ac)
+ eSoCBalStart_VRE_STOR[y, t] -= EP[:vP_AC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_ac)
end
for y in AC_CHARGE_CONSTRAINTSET
- eSoCBalStart_VRE_STOR[y,t] += by_rid(y,:eff_up_ac)*EP[:vP_AC_CHARGE][y,t]
+ eSoCBalStart_VRE_STOR[y, t] += by_rid(y, :eff_up_ac) * EP[:vP_AC_CHARGE][y, t]
end
end
for y in DC_DISCHARGE
- EP[:eELOSS_VRE_STOR][y] -= sum(inputs["omega"][t]*vP_DC_DISCHARGE[y,t]*by_rid(y,:etainverter) for t=1:T)
- for t=1:T
- EP[:eInvACBalance][y,t] += by_rid(y,:etainverter)*vP_DC_DISCHARGE[y,t]
- EP[:eInverterExport][y,t] += by_rid(y,:etainverter)*vP_DC_DISCHARGE[y,t]
+ EP[:eELOSS_VRE_STOR][y] -= sum(inputs["omega"][t] * vP_DC_DISCHARGE[y, t] *
+ by_rid(y, :etainverter) for t in 1:T)
+ for t in 1:T
+ EP[:eInvACBalance][y, t] += by_rid(y, :etainverter) * vP_DC_DISCHARGE[y, t]
+ EP[:eInverterExport][y, t] += by_rid(y, :etainverter) * vP_DC_DISCHARGE[y, t]
end
for t in INTERIOR_SUBPERIODS
- eSoCBalInterior_VRE_STOR[y,t] -= EP[:vP_DC_DISCHARGE][y,t]/by_rid(y,:eff_down_dc)
+ eSoCBalInterior_VRE_STOR[y, t] -= EP[:vP_DC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_dc)
end
end
for y in DC_CHARGE
- EP[:eELOSS_VRE_STOR][y] += sum(inputs["omega"][t]*vP_DC_CHARGE[y,t]/by_rid(y,:etainverter) for t=1:T)
- for t=1:T
- EP[:eInvACBalance][y,t] -= vP_DC_CHARGE[y,t]/by_rid(y,:etainverter)
- EP[:eInverterExport][y,t] += vP_DC_CHARGE[y,t]/by_rid(y,:etainverter)
+ EP[:eELOSS_VRE_STOR][y] += sum(inputs["omega"][t] * vP_DC_CHARGE[y, t] /
+ by_rid(y, :etainverter) for t in 1:T)
+ for t in 1:T
+ EP[:eInvACBalance][y, t] -= vP_DC_CHARGE[y, t] / by_rid(y, :etainverter)
+ EP[:eInverterExport][y, t] += vP_DC_CHARGE[y, t] / by_rid(y, :etainverter)
end
for t in INTERIOR_SUBPERIODS
- eSoCBalInterior_VRE_STOR[y,t] += by_rid(y,:eff_up_dc)*EP[:vP_DC_CHARGE][y,t]
+ eSoCBalInterior_VRE_STOR[y, t] += by_rid(y, :eff_up_dc) *
+ EP[:vP_DC_CHARGE][y, t]
end
end
for y in AC_DISCHARGE
- EP[:eELOSS_VRE_STOR][y] -= sum(inputs["omega"][t]*vP_AC_DISCHARGE[y,t] for t=1:T)
- for t=1:T
- EP[:eInvACBalance][y,t] += vP_AC_DISCHARGE[y,t]
+ EP[:eELOSS_VRE_STOR][y] -= sum(inputs["omega"][t] * vP_AC_DISCHARGE[y, t]
+ for t in 1:T)
+ for t in 1:T
+ EP[:eInvACBalance][y, t] += vP_AC_DISCHARGE[y, t]
end
for t in INTERIOR_SUBPERIODS
- eSoCBalInterior_VRE_STOR[y,t] -= EP[:vP_AC_DISCHARGE][y,t]/by_rid(y,:eff_down_ac)
+ eSoCBalInterior_VRE_STOR[y, t] -= EP[:vP_AC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_ac)
end
end
for y in AC_CHARGE
- EP[:eELOSS_VRE_STOR][y] += sum(inputs["omega"][t]*vP_AC_CHARGE[y,t] for t=1:T)
- for t=1:T
- EP[:eInvACBalance][y,t] -= vP_AC_CHARGE[y,t]
+ EP[:eELOSS_VRE_STOR][y] += sum(inputs["omega"][t] * vP_AC_CHARGE[y, t] for t in 1:T)
+ for t in 1:T
+ EP[:eInvACBalance][y, t] -= vP_AC_CHARGE[y, t]
end
for t in INTERIOR_SUBPERIODS
- eSoCBalInterior_VRE_STOR[y,t] += by_rid(y,:eff_up_ac)*EP[:vP_AC_CHARGE][y,t]
+ eSoCBalInterior_VRE_STOR[y, t] += by_rid(y, :eff_up_ac) *
+ EP[:vP_AC_CHARGE][y, t]
end
end
- for y in STOR, t=1:T
- EP[:eInvACBalance][y,t] += vCHARGE_VRE_STOR[y,t]
- EP[:eGridExport][y,t] += vCHARGE_VRE_STOR[y,t]
+ for y in STOR, t in 1:T
+ EP[:eInvACBalance][y, t] += vCHARGE_VRE_STOR[y, t]
+ EP[:eGridExport][y, t] += vCHARGE_VRE_STOR[y, t]
end
- for z in 1:Z, t=1:T
- if !isempty(resources_in_zone_by_rid(gen_VRE_STOR,z))
- EP[:ePowerBalance_VRE_STOR][t, z] -= sum(vCHARGE_VRE_STOR[y,t] for y=intersect(resources_in_zone_by_rid(gen_VRE_STOR,z),STOR))
+ for z in 1:Z, t in 1:T
+ if !isempty(resources_in_zone_by_rid(gen_VRE_STOR, z))
+ EP[:ePowerBalance_VRE_STOR][t, z] -= sum(vCHARGE_VRE_STOR[y, t]
+ for y in intersect(resources_in_zone_by_rid(gen_VRE_STOR,
+ z),
+ STOR))
end
end
# 4. Energy Share Requirement & CO2 Policy Module
# From CO2 Policy module
- @expression(EP, eELOSSByZone_VRE_STOR[z=1:Z],
- sum(EP[:eELOSS_VRE_STOR][y] for y in intersect(resources_in_zone_by_rid(gen_VRE_STOR,z),STOR)))
+ @expression(EP, eELOSSByZone_VRE_STOR[z = 1:Z],
+ sum(EP[:eELOSS_VRE_STOR][y]
+ for y in intersect(resources_in_zone_by_rid(gen_VRE_STOR, z), STOR)))
add_similar_to_expression!(EP[:eELOSSByZone], eELOSSByZone_VRE_STOR)
### CONSTRAINTS ###
# Constraint 0: Existing capacity variable is equal to existing capacity specified in the input file
if MultiStage == 1
- @constraint(EP, cExistingCapEnergy_VS[y in STOR], EP[:vEXISTINGCAPENERGY_VS][y] == existing_cap_mwh(gen[y]))
- end
+ @constraint(EP,
+ cExistingCapEnergy_VS[y in STOR],
+ EP[:vEXISTINGCAPENERGY_VS][y]==existing_cap_mwh(gen[y]))
+ end
# Constraints 1: Retirements and capacity additions
# Cannot retire more capacity than existing capacity for VRE-STOR technologies
- @constraint(EP, cMaxRet_Stor[y=RET_CAP_STOR], vRETCAPENERGY_VS[y] <= eExistingCapEnergy_VS[y])
+ @constraint(EP,
+ cMaxRet_Stor[y = RET_CAP_STOR],
+ vRETCAPENERGY_VS[y]<=eExistingCapEnergy_VS[y])
# Constraint on maximum capacity (if applicable) [set input to -1 if no constraint on maximum capacity]
- # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
- @constraint(EP, cMaxCap_Stor[y in intersect(ids_with_nonneg(gen, max_cap_mwh), STOR)],
- eTotalCap_STOR[y] <= max_cap_mwh(gen[y]))
+ # DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is >= Max_Cap_MW and lead to infeasabilty
+ @constraint(EP, cMaxCap_Stor[y in intersect(ids_with_nonneg(gen, max_cap_mwh), STOR)],
+ eTotalCap_STOR[y]<=max_cap_mwh(gen[y]))
# Constraint on minimum capacity (if applicable) [set input to -1 if no constraint on minimum capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Cap_MW is <= Min_Cap_MW and lead to infeasabilty
- @constraint(EP, cMinCap_Stor[y in intersect(ids_with_positive(gen, min_cap_mwh), STOR)],
- eTotalCap_STOR[y] >= min_cap_mwh(gen[y]))
+ @constraint(EP, cMinCap_Stor[y in intersect(ids_with_positive(gen, min_cap_mwh), STOR)],
+ eTotalCap_STOR[y]>=min_cap_mwh(gen[y]))
# Constraint 2: SOC Maximum
- @constraint(EP, cSOCMax[y in STOR, t=1:T], vS_VRE_STOR[y,t] <= eTotalCap_STOR[y])
+ @constraint(EP, cSOCMax[y in STOR, t = 1:T], vS_VRE_STOR[y, t]<=eTotalCap_STOR[y])
# Constraint 3: State of Charge (energy stored for the next hour)
@constraint(EP, cSoCBalStart_VRE_STOR[y in CONSTRAINTSET, t in START_SUBPERIODS],
- vS_VRE_STOR[y,t] == eSoCBalStart_VRE_STOR[y,t])
- @constraint(EP, cSoCBalInterior_VRE_STOR[y in STOR, t in INTERIOR_SUBPERIODS],
- vS_VRE_STOR[y,t] == eSoCBalInterior_VRE_STOR[y,t])
+ vS_VRE_STOR[y, t]==eSoCBalStart_VRE_STOR[y, t])
+ @constraint(EP, cSoCBalInterior_VRE_STOR[y in STOR, t in INTERIOR_SUBPERIODS],
+ vS_VRE_STOR[y, t]==eSoCBalInterior_VRE_STOR[y, t])
### SYMMETRIC RESOURCE CONSTRAINTS ###
if !isempty(VS_SYM_DC)
# Constraint 4: Charging + Discharging DC Maximum: see main module because capacity reserve margin/operating reserves may alter constraint
- @expression(EP, eChargeDischargeMaxDC[y in VS_SYM_DC, t=1:T],
- EP[:vP_DC_DISCHARGE][y,t] + EP[:vP_DC_CHARGE][y,t])
+ @expression(EP, eChargeDischargeMaxDC[y in VS_SYM_DC, t = 1:T],
+ EP[:vP_DC_DISCHARGE][y, t]+EP[:vP_DC_CHARGE][y, t])
end
if !isempty(VS_SYM_AC)
# Constraint 4: Charging + Discharging AC Maximum: see main module because capacity reserve margin/operating reserves may alter constraint
- @expression(EP, eChargeDischargeMaxAC[y in VS_SYM_AC, t=1:T],
- EP[:vP_AC_DISCHARGE][y,t] + EP[:vP_AC_CHARGE][y,t])
+ @expression(EP, eChargeDischargeMaxAC[y in VS_SYM_AC, t = 1:T],
+ EP[:vP_AC_DISCHARGE][y, t]+EP[:vP_AC_CHARGE][y, t])
end
### ASYMMETRIC RESOURCE MODULE ###
@@ -1234,7 +1302,6 @@ The rest of the long duration energy storage constraints are copied and applied
long duration energy storage resources are further elaborated upon in ```vre_stor_capres!()```.
"""
function lds_vre_stor!(EP::Model, inputs::Dict)
-
println("VRE-STOR LDS Module")
### LOAD DATA ###
@@ -1244,11 +1311,11 @@ function lds_vre_stor!(EP::Model, inputs::Dict)
gen_VRE_STOR = gen.VreStorage
REP_PERIOD = inputs["REP_PERIOD"] # Number of representative periods
- dfPeriodMap = inputs["Period_Map"] # Dataframe that maps modeled periods to representative periods
- NPeriods = size(inputs["Period_Map"])[1] # Number of modeled periods
+ dfPeriodMap = inputs["Period_Map"] # Dataframe that maps modeled periods to representative periods
+ NPeriods = size(inputs["Period_Map"])[1] # Number of modeled periods
hours_per_subperiod = inputs["hours_per_subperiod"] #total number of hours per subperiod
- MODELED_PERIODS_INDEX = 1:NPeriods
- REP_PERIODS_INDEX = MODELED_PERIODS_INDEX[dfPeriodMap[!,:Rep_Period] .== MODELED_PERIODS_INDEX]
+ MODELED_PERIODS_INDEX = 1:NPeriods
+ REP_PERIODS_INDEX = MODELED_PERIODS_INDEX[dfPeriodMap[!, :Rep_Period] .== MODELED_PERIODS_INDEX]
by_rid(rid, sym) = by_rid_res(rid, sym, gen_VRE_STOR)
@@ -1259,57 +1326,73 @@ function lds_vre_stor!(EP::Model, inputs::Dict)
vSOCw_VRE_STOR[y in VS_LDS, n in MODELED_PERIODS_INDEX] >= 0
# Build up in storage inventory over each representative period w (can be pos or neg)
- vdSOC_VRE_STOR[y in VS_LDS, w=1:REP_PERIOD]
+ vdSOC_VRE_STOR[y in VS_LDS, w = 1:REP_PERIOD]
end)
### EXPRESSIONS ###
# Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
- @expression(EP, eVreStorSoCBalLongDurationStorageStart[y in VS_LDS, w=1:REP_PERIOD],
- (1-self_discharge(gen[y])) * (EP[:vS_VRE_STOR][y,hours_per_subperiod*w]-EP[:vdSOC_VRE_STOR][y,w]))
-
+ @expression(EP, eVreStorSoCBalLongDurationStorageStart[y in VS_LDS, w = 1:REP_PERIOD],
+ (1 -
+ self_discharge(gen[y]))*(EP[:vS_VRE_STOR][y, hours_per_subperiod * w] -
+ EP[:vdSOC_VRE_STOR][y, w]))
+
DC_DISCHARGE_CONSTRAINTSET = intersect(inputs["VS_STOR_DC_DISCHARGE"], VS_LDS)
DC_CHARGE_CONSTRAINTSET = intersect(inputs["VS_STOR_DC_CHARGE"], VS_LDS)
AC_DISCHARGE_CONSTRAINTSET = intersect(inputs["VS_STOR_AC_DISCHARGE"], VS_LDS)
AC_CHARGE_CONSTRAINTSET = intersect(inputs["VS_STOR_AC_CHARGE"], VS_LDS)
- for w=1:REP_PERIOD
+ for w in 1:REP_PERIOD
for y in DC_DISCHARGE_CONSTRAINTSET
- EP[:eVreStorSoCBalLongDurationStorageStart][y,w] -= EP[:vP_DC_DISCHARGE][y,hours_per_subperiod*(w-1)+1]/by_rid(y,:eff_down_dc)
+ EP[:eVreStorSoCBalLongDurationStorageStart][y, w] -= EP[:vP_DC_DISCHARGE][y,
+ hours_per_subperiod * (w - 1) + 1] / by_rid(y, :eff_down_dc)
end
for y in DC_CHARGE_CONSTRAINTSET
- EP[:eVreStorSoCBalLongDurationStorageStart][y,w] += by_rid(y,:eff_up_dc)*EP[:vP_DC_CHARGE][y,hours_per_subperiod*(w-1)+1]
+ EP[:eVreStorSoCBalLongDurationStorageStart][y, w] += by_rid(y, :eff_up_dc) *
+ EP[:vP_DC_CHARGE][y,
+ hours_per_subperiod * (w - 1) + 1]
end
for y in AC_DISCHARGE_CONSTRAINTSET
- EP[:eVreStorSoCBalLongDurationStorageStart][y,w] -= EP[:vP_AC_DISCHARGE][y,hours_per_subperiod*(w-1)+1]/by_rid(y,:eff_down_ac)
+ EP[:eVreStorSoCBalLongDurationStorageStart][y, w] -= EP[:vP_AC_DISCHARGE][y,
+ hours_per_subperiod * (w - 1) + 1] / by_rid(y, :eff_down_ac)
end
for y in AC_CHARGE_CONSTRAINTSET
- EP[:eVreStorSoCBalLongDurationStorageStart][y,w] += by_rid(y,:eff_up_ac)*EP[:vP_AC_CHARGE][y,hours_per_subperiod*(w-1)+1]
+ EP[:eVreStorSoCBalLongDurationStorageStart][y, w] += by_rid(y, :eff_up_ac) *
+ EP[:vP_AC_CHARGE][y,
+ hours_per_subperiod * (w - 1) + 1]
end
end
### CONSTRAINTS ###
# Constraint 1: Link the state of charge between the start of periods for LDS resources
- @constraint(EP, cVreStorSoCBalLongDurationStorageStart[y in VS_LDS, w=1:REP_PERIOD],
- EP[:vS_VRE_STOR][y,hours_per_subperiod*(w-1)+1] == EP[:eVreStorSoCBalLongDurationStorageStart][y,w])
+ @constraint(EP, cVreStorSoCBalLongDurationStorageStart[y in VS_LDS, w = 1:REP_PERIOD],
+ EP[:vS_VRE_STOR][y,
+ hours_per_subperiod * (w - 1) + 1]==EP[:eVreStorSoCBalLongDurationStorageStart][y, w])
# Constraint 2: Storage at beginning of period w = storage at beginning of period w-1 + storage built up in period w (after n representative periods)
# Multiply storage build up term from prior period with corresponding weight
- @constraint(EP, cVreStorSoCBalLongDurationStorage[y in VS_LDS, r in MODELED_PERIODS_INDEX],
- EP[:vSOCw_VRE_STOR][y,mod1(r+1, NPeriods)] == EP[:vSOCw_VRE_STOR][y,r] + EP[:vdSOC_VRE_STOR][y,dfPeriodMap[r,:Rep_Period_Index]])
+ @constraint(EP,
+ cVreStorSoCBalLongDurationStorage[y in VS_LDS, r in MODELED_PERIODS_INDEX],
+ EP[:vSOCw_VRE_STOR][y,
+ mod1(r + 1, NPeriods)]==EP[:vSOCw_VRE_STOR][y, r] +
+ EP[:vdSOC_VRE_STOR][y, dfPeriodMap[r, :Rep_Period_Index]])
# Constraint 3: Storage at beginning of each modeled period cannot exceed installed energy capacity
- @constraint(EP, cVreStorSoCBalLongDurationStorageUpper[y in VS_LDS, r in MODELED_PERIODS_INDEX],
- EP[:vSOCw_VRE_STOR][y,r] <= EP[:eTotalCap_STOR][y])
+ @constraint(EP,
+ cVreStorSoCBalLongDurationStorageUpper[y in VS_LDS, r in MODELED_PERIODS_INDEX],
+ EP[:vSOCw_VRE_STOR][y, r]<=EP[:eTotalCap_STOR][y])
# Constraint 4: Initial storage level for representative periods must also adhere to sub-period storage inventory balance
# Initial storage = Final storage - change in storage inventory across representative period
- @constraint(EP, cVreStorSoCBalLongDurationStorageSub[y in VS_LDS, r in REP_PERIODS_INDEX],
- EP[:vSOCw_VRE_STOR][y,r] == EP[:vS_VRE_STOR][y,hours_per_subperiod*dfPeriodMap[r,:Rep_Period_Index]]
- - EP[:vdSOC_VRE_STOR][y,dfPeriodMap[r,:Rep_Period_Index]])
+ @constraint(EP,
+ cVreStorSoCBalLongDurationStorageSub[y in VS_LDS, r in REP_PERIODS_INDEX],
+ EP[:vSOCw_VRE_STOR][y,
+ r]==EP[:vS_VRE_STOR][y, hours_per_subperiod * dfPeriodMap[r, :Rep_Period_Index]]
+ -
+ EP[:vdSOC_VRE_STOR][y, dfPeriodMap[r, :Rep_Period_Index]])
end
@doc raw"""
@@ -1437,7 +1520,6 @@ In addition, this function adds investment and fixed O&M costs related to charge
```
"""
function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
-
println("VRE-STOR Charge Investment Module")
### LOAD INPUTS ###
@@ -1464,8 +1546,11 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
by_rid(rid, sym) = by_rid_res(rid, sym, gen_VRE_STOR)
if !isempty(VS_ASYM_DC_DISCHARGE)
- MAX_DC_DISCHARGE = intersect(ids_with_nonneg(gen_VRE_STOR, max_cap_discharge_dc_mw), VS_ASYM_DC_DISCHARGE)
- MIN_DC_DISCHARGE = intersect(ids_with_positive(gen_VRE_STOR, min_cap_discharge_dc_mw), VS_ASYM_DC_DISCHARGE)
+ MAX_DC_DISCHARGE = intersect(ids_with_nonneg(gen_VRE_STOR, max_cap_discharge_dc_mw),
+ VS_ASYM_DC_DISCHARGE)
+ MIN_DC_DISCHARGE = intersect(ids_with_positive(gen_VRE_STOR,
+ min_cap_discharge_dc_mw),
+ VS_ASYM_DC_DISCHARGE)
### VARIABLES ###
@variables(EP, begin
@@ -1474,47 +1559,53 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
end)
if MultiStage == 1
- @variable(EP, vEXISTINGCAPDISCHARGEDC[y in VS_ASYM_DC_DISCHARGE] >= 0);
+ @variable(EP, vEXISTINGCAPDISCHARGEDC[y in VS_ASYM_DC_DISCHARGE]>=0)
end
### EXPRESSIONS ###
# 0. Multistage existing capacity definition
if MultiStage == 1
- @expression(EP, eExistingCapDischargeDC[y in VS_ASYM_DC_DISCHARGE], vEXISTINGCAPDISCHARGEDC[y])
+ @expression(EP,
+ eExistingCapDischargeDC[y in VS_ASYM_DC_DISCHARGE],
+ vEXISTINGCAPDISCHARGEDC[y])
else
- @expression(EP, eExistingCapDischargeDC[y in VS_ASYM_DC_DISCHARGE], by_rid(y,:existing_cap_discharge_dc_mw))
+ @expression(EP,
+ eExistingCapDischargeDC[y in VS_ASYM_DC_DISCHARGE],
+ by_rid(y, :existing_cap_discharge_dc_mw))
end
# 1. Total storage discharge DC capacity
@expression(EP, eTotalCapDischarge_DC[y in VS_ASYM_DC_DISCHARGE],
if (y in intersect(NEW_CAP_DISCHARGE_DC, RET_CAP_DISCHARGE_DC))
- eExistingCapDischargeDC[y] + EP[:vCAPDISCHARGE_DC][y] - EP[:vRETCAPDISCHARGE_DC][y]
+ eExistingCapDischargeDC[y] + EP[:vCAPDISCHARGE_DC][y] -
+ EP[:vRETCAPDISCHARGE_DC][y]
elseif (y in setdiff(NEW_CAP_DISCHARGE_DC, RET_CAP_DISCHARGE_DC))
eExistingCapDischargeDC[y] + EP[:vCAPDISCHARGE_DC][y]
elseif (y in setdiff(RET_CAP_DISCHARGE_DC, NEW_CAP_DISCHARGE_DC))
eExistingCapDischargeDC[y] - EP[:vRETCAPDISCHARGE_DC][y]
else
eExistingCapDischargeDC[y]
- end
- )
+ end)
# 2. Objective Function Additions
# If resource is not eligible for new discharge DC capacity, fixed costs are only O&M costs
@expression(EP, eCFixDischarge_DC[y in VS_ASYM_DC_DISCHARGE],
if y in NEW_CAP_DISCHARGE_DC # Resources eligible for new discharge DC capacity
- by_rid(y,:inv_cost_discharge_dc_per_mwyr)*vCAPDISCHARGE_DC[y] + by_rid(y,:fixed_om_cost_discharge_dc_per_mwyr)*eTotalCapDischarge_DC[y]
+ by_rid(y, :inv_cost_discharge_dc_per_mwyr) * vCAPDISCHARGE_DC[y] +
+ by_rid(y, :fixed_om_cost_discharge_dc_per_mwyr) * eTotalCapDischarge_DC[y]
else
- by_rid(y,:fixed_om_cost_discharge_dc_per_mwyr)*eTotalCapDischarge_DC[y]
- end
- )
-
+ by_rid(y, :fixed_om_cost_discharge_dc_per_mwyr) * eTotalCapDischarge_DC[y]
+ end)
+
# Sum individual resource contributions to fixed costs to get total fixed costs
- @expression(EP, eTotalCFixDischarge_DC, sum(EP[:eCFixDischarge_DC][y] for y in VS_ASYM_DC_DISCHARGE))
+ @expression(EP,
+ eTotalCFixDischarge_DC,
+ sum(EP[:eCFixDischarge_DC][y] for y in VS_ASYM_DC_DISCHARGE))
if MultiStage == 1
- EP[:eObj] += eTotalCFixDischarge_DC/inputs["OPEXMULT"]
+ EP[:eObj] += eTotalCFixDischarge_DC / inputs["OPEXMULT"]
else
EP[:eObj] += eTotalCFixDischarge_DC
end
@@ -1523,29 +1614,41 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
# Constraint 0: Existing capacity variable is equal to existing capacity specified in the input file
if MultiStage == 1
- @constraint(EP, cExistingCapDischargeDC[y in VS_ASYM_DC_DISCHARGE], EP[:vEXISTINGCAPDISCHARGEDC][y] == by_rid(y,:existing_cap_discharge_dc_mw))
+ @constraint(EP,
+ cExistingCapDischargeDC[y in VS_ASYM_DC_DISCHARGE],
+ EP[:vEXISTINGCAPDISCHARGEDC][y]==by_rid(y, :existing_cap_discharge_dc_mw))
end
# Constraints 1: Retirements and capacity additions
# Cannot retire more discharge DC capacity than existing discharge capacity
- @constraint(EP, cVreStorMaxRetDischargeDC[y in RET_CAP_DISCHARGE_DC], vRETCAPDISCHARGE_DC[y] <= eExistingCapDischargeDC[y])
+ @constraint(EP,
+ cVreStorMaxRetDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ vRETCAPDISCHARGE_DC[y]<=eExistingCapDischargeDC[y])
# Constraint on maximum discharge DC capacity (if applicable) [set input to -1 if no constraint on maximum discharge capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is >= Max_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cVreStorMaxCapDischargeDC[y in MAX_DC_DISCHARGE], eTotalCapDischarge_DC[y] <= by_rid(y,:Max_Cap_Discharge_DC_MW))
+ @constraint(EP,
+ cVreStorMaxCapDischargeDC[y in MAX_DC_DISCHARGE],
+ eTotalCapDischarge_DC[y]<=by_rid(y, :Max_Cap_Discharge_DC_MW))
# Constraint on minimum discharge DC capacity (if applicable) [set input to -1 if no constraint on minimum discharge capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is <= Min_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cVreStorMinCapDischargeDC[y in MIN_DC_DISCHARGE], eTotalCapDischarge_DC[y] >= by_rid(y,:Min_Cap_Discharge_DC_MW))
+ @constraint(EP,
+ cVreStorMinCapDischargeDC[y in MIN_DC_DISCHARGE],
+ eTotalCapDischarge_DC[y]>=by_rid(y, :Min_Cap_Discharge_DC_MW))
# Constraint 2: Maximum discharging must be less than discharge power rating
- @expression(EP, eVreStorMaxDischargingDC[y in VS_ASYM_DC_DISCHARGE, t=1:T], JuMP.AffExpr())
- for y in VS_ASYM_DC_DISCHARGE, t=1:T
- eVreStorMaxDischargingDC[y,t] += EP[:vP_DC_DISCHARGE][y,t]
+ @expression(EP,
+ eVreStorMaxDischargingDC[y in VS_ASYM_DC_DISCHARGE, t = 1:T],
+ JuMP.AffExpr())
+ for y in VS_ASYM_DC_DISCHARGE, t in 1:T
+ eVreStorMaxDischargingDC[y, t] += EP[:vP_DC_DISCHARGE][y, t]
end
end
-
+
if !isempty(VS_ASYM_DC_CHARGE)
- MAX_DC_CHARGE = intersect(ids_with_nonneg(gen_VRE_STOR, max_cap_charge_dc_mw), VS_ASYM_DC_CHARGE)
- MIN_DC_CHARGE = intersect(ids_with_positive(gen_VRE_STOR, min_cap_charge_dc_mw), VS_ASYM_DC_CHARGE)
+ MAX_DC_CHARGE = intersect(ids_with_nonneg(gen_VRE_STOR, max_cap_charge_dc_mw),
+ VS_ASYM_DC_CHARGE)
+ MIN_DC_CHARGE = intersect(ids_with_positive(gen_VRE_STOR, min_cap_charge_dc_mw),
+ VS_ASYM_DC_CHARGE)
### VARIABLES ###
@variables(EP, begin
@@ -1554,16 +1657,20 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
end)
if MultiStage == 1
- @variable(EP, vEXISTINGCAPCHARGEDC[y in VS_ASYM_DC_CHARGE] >= 0);
+ @variable(EP, vEXISTINGCAPCHARGEDC[y in VS_ASYM_DC_CHARGE]>=0)
end
### EXPRESSIONS ###
# 0. Multistage existing capacity definition
if MultiStage == 1
- @expression(EP, eExistingCapChargeDC[y in VS_ASYM_DC_CHARGE], vEXISTINGCAPCHARGEDC[y])
+ @expression(EP,
+ eExistingCapChargeDC[y in VS_ASYM_DC_CHARGE],
+ vEXISTINGCAPCHARGEDC[y])
else
- @expression(EP, eExistingCapChargeDC[y in VS_ASYM_DC_CHARGE], by_rid(y,:existing_cap_charge_dc_mw))
+ @expression(EP,
+ eExistingCapChargeDC[y in VS_ASYM_DC_CHARGE],
+ by_rid(y, :existing_cap_charge_dc_mw))
end
# 1. Total storage charge DC capacity
@@ -1576,25 +1683,26 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
eExistingCapChargeDC[y] - EP[:vRETCAPCHARGE_DC][y]
else
eExistingCapChargeDC[y]
- end
- )
+ end)
# 2. Objective Function Additions
# If resource is not eligible for new charge DC capacity, fixed costs are only O&M costs
@expression(EP, eCFixCharge_DC[y in VS_ASYM_DC_CHARGE],
if y in NEW_CAP_CHARGE_DC # Resources eligible for new charge DC capacity
- by_rid(y,:inv_cost_charge_dc_per_mwyr)*vCAPCHARGE_DC[y] + by_rid(y,:fixed_om_cost_charge_dc_per_mwyr)*eTotalCapCharge_DC[y]
+ by_rid(y, :inv_cost_charge_dc_per_mwyr) * vCAPCHARGE_DC[y] +
+ by_rid(y, :fixed_om_cost_charge_dc_per_mwyr) * eTotalCapCharge_DC[y]
else
- by_rid(y,:fixed_om_cost_charge_dc_per_mwyr)*eTotalCapCharge_DC[y]
- end
- )
-
+ by_rid(y, :fixed_om_cost_charge_dc_per_mwyr) * eTotalCapCharge_DC[y]
+ end)
+
# Sum individual resource contributions to fixed costs to get total fixed costs
- @expression(EP, eTotalCFixCharge_DC, sum(EP[:eCFixCharge_DC][y] for y in VS_ASYM_DC_CHARGE))
+ @expression(EP,
+ eTotalCFixCharge_DC,
+ sum(EP[:eCFixCharge_DC][y] for y in VS_ASYM_DC_CHARGE))
if MultiStage == 1
- EP[:eObj] += eTotalCFixCharge_DC/inputs["OPEXMULT"]
+ EP[:eObj] += eTotalCFixCharge_DC / inputs["OPEXMULT"]
else
EP[:eObj] += eTotalCFixCharge_DC
end
@@ -1603,29 +1711,42 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
# Constraint 0: Existing capacity variable is equal to existing capacity specified in the input file
if MultiStage == 1
- @constraint(EP, cExistingCapChargeDC[y in VS_ASYM_DC_CHARGE], EP[:vEXISTINGCAPCHARGEDC][y] == by_rid(y,:Existing_Cap_Charge_DC_MW))
+ @constraint(EP,
+ cExistingCapChargeDC[y in VS_ASYM_DC_CHARGE],
+ EP[:vEXISTINGCAPCHARGEDC][y]==by_rid(y, :Existing_Cap_Charge_DC_MW))
end
# Constraints 1: Retirements and capacity additions
# Cannot retire more charge DC capacity than existing charge capacity
- @constraint(EP, cVreStorMaxRetChargeDC[y in RET_CAP_CHARGE_DC], vRETCAPCHARGE_DC[y] <= eExistingCapChargeDC[y])
+ @constraint(EP,
+ cVreStorMaxRetChargeDC[y in RET_CAP_CHARGE_DC],
+ vRETCAPCHARGE_DC[y]<=eExistingCapChargeDC[y])
# Constraint on maximum charge DC capacity (if applicable) [set input to -1 if no constraint on maximum charge capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is >= Max_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cVreStorMaxCapChargeDC[y in MAX_DC_CHARGE], eTotalCapCharge_DC[y] <= by_rid(y,:max_cap_charge_dc_mw))
+ @constraint(EP,
+ cVreStorMaxCapChargeDC[y in MAX_DC_CHARGE],
+ eTotalCapCharge_DC[y]<=by_rid(y, :max_cap_charge_dc_mw))
# Constraint on minimum charge DC capacity (if applicable) [set input to -1 if no constraint on minimum charge capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is <= Min_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cVreStorMinCapChargeDC[y in MIN_DC_CHARGE], eTotalCapCharge_DC[y] >= by_rid(y,:min_cap_charge_dc_mw))
+ @constraint(EP,
+ cVreStorMinCapChargeDC[y in MIN_DC_CHARGE],
+ eTotalCapCharge_DC[y]>=by_rid(y, :min_cap_charge_dc_mw))
# Constraint 2: Maximum charging must be less than charge power rating
- @expression(EP, eVreStorMaxChargingDC[y in VS_ASYM_DC_CHARGE, t=1:T], JuMP.AffExpr())
- for y in VS_ASYM_DC_CHARGE, t=1:T
- eVreStorMaxChargingDC[y,t] += EP[:vP_DC_CHARGE][y,t]
+ @expression(EP,
+ eVreStorMaxChargingDC[y in VS_ASYM_DC_CHARGE, t = 1:T],
+ JuMP.AffExpr())
+ for y in VS_ASYM_DC_CHARGE, t in 1:T
+ eVreStorMaxChargingDC[y, t] += EP[:vP_DC_CHARGE][y, t]
end
end
if !isempty(VS_ASYM_AC_DISCHARGE)
- MAX_AC_DISCHARGE = intersect(ids_with_nonneg(gen_VRE_STOR, max_cap_discharge_ac_mw), VS_ASYM_AC_DISCHARGE)
- MIN_AC_DISCHARGE = intersect(ids_with_positive(gen_VRE_STOR, min_cap_discharge_ac_mw), VS_ASYM_AC_DISCHARGE)
+ MAX_AC_DISCHARGE = intersect(ids_with_nonneg(gen_VRE_STOR, max_cap_discharge_ac_mw),
+ VS_ASYM_AC_DISCHARGE)
+ MIN_AC_DISCHARGE = intersect(ids_with_positive(gen_VRE_STOR,
+ min_cap_discharge_ac_mw),
+ VS_ASYM_AC_DISCHARGE)
### VARIABLES ###
@variables(EP, begin
@@ -1634,47 +1755,53 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
end)
if MultiStage == 1
- @variable(EP, vEXISTINGCAPDISCHARGEAC[y in VS_ASYM_AC_DISCHARGE] >= 0);
+ @variable(EP, vEXISTINGCAPDISCHARGEAC[y in VS_ASYM_AC_DISCHARGE]>=0)
end
### EXPRESSIONS ###
# 0. Multistage existing capacity definition
if MultiStage == 1
- @expression(EP, eExistingCapDischargeAC[y in VS_ASYM_AC_DISCHARGE], vEXISTINGCAPDISCHARGEAC[y])
+ @expression(EP,
+ eExistingCapDischargeAC[y in VS_ASYM_AC_DISCHARGE],
+ vEXISTINGCAPDISCHARGEAC[y])
else
- @expression(EP, eExistingCapDischargeAC[y in VS_ASYM_AC_DISCHARGE], by_rid(y,:existing_cap_discharge_ac_mw))
+ @expression(EP,
+ eExistingCapDischargeAC[y in VS_ASYM_AC_DISCHARGE],
+ by_rid(y, :existing_cap_discharge_ac_mw))
end
# 1. Total storage discharge AC capacity
@expression(EP, eTotalCapDischarge_AC[y in VS_ASYM_AC_DISCHARGE],
if (y in intersect(NEW_CAP_DISCHARGE_AC, RET_CAP_DISCHARGE_AC))
- eExistingCapDischargeAC[y] + EP[:vCAPDISCHARGE_AC][y] - EP[:vRETCAPDISCHARGE_AC][y]
+ eExistingCapDischargeAC[y] + EP[:vCAPDISCHARGE_AC][y] -
+ EP[:vRETCAPDISCHARGE_AC][y]
elseif (y in setdiff(NEW_CAP_DISCHARGE_AC, RET_CAP_DISCHARGE_AC))
eExistingCapDischargeAC[y] + EP[:vCAPDISCHARGE_AC][y]
elseif (y in setdiff(RET_CAP_DISCHARGE_AC, NEW_CAP_DISCHARGE_AC))
eExistingCapDischargeAC[y] - EP[:vRETCAPDISCHARGE_AC][y]
else
eExistingCapDischargeAC[y]
- end
- )
+ end)
# 2. Objective Function Additions
# If resource is not eligible for new discharge AC capacity, fixed costs are only O&M costs
@expression(EP, eCFixDischarge_AC[y in VS_ASYM_AC_DISCHARGE],
if y in NEW_CAP_DISCHARGE_AC # Resources eligible for new discharge AC capacity
- by_rid(y,:inv_cost_discharge_ac_per_mwyr)*vCAPDISCHARGE_AC[y] + by_rid(y,:fixed_om_cost_discharge_ac_per_mwyr)*eTotalCapDischarge_AC[y]
+ by_rid(y, :inv_cost_discharge_ac_per_mwyr) * vCAPDISCHARGE_AC[y] +
+ by_rid(y, :fixed_om_cost_discharge_ac_per_mwyr) * eTotalCapDischarge_AC[y]
else
- by_rid(y,:fixed_om_cost_discharge_ac_per_mwyr)*eTotalCapDischarge_AC[y]
- end
- )
-
+ by_rid(y, :fixed_om_cost_discharge_ac_per_mwyr) * eTotalCapDischarge_AC[y]
+ end)
+
# Sum individual resource contributions to fixed costs to get total fixed costs
- @expression(EP, eTotalCFixDischarge_AC, sum(EP[:eCFixDischarge_AC][y] for y in VS_ASYM_AC_DISCHARGE))
+ @expression(EP,
+ eTotalCFixDischarge_AC,
+ sum(EP[:eCFixDischarge_AC][y] for y in VS_ASYM_AC_DISCHARGE))
if MultiStage == 1
- EP[:eObj] += eTotalCFixDischarge_AC/inputs["OPEXMULT"]
+ EP[:eObj] += eTotalCFixDischarge_AC / inputs["OPEXMULT"]
else
EP[:eObj] += eTotalCFixDischarge_AC
end
@@ -1683,29 +1810,41 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
# Constraint 0: Existing capacity variable is equal to existing capacity specified in the input file
if MultiStage == 1
- @constraint(EP, cExistingCapDischargeAC[y in VS_ASYM_AC_DISCHARGE], EP[:vEXISTINGCAPDISCHARGEAC][y] == by_rid(y,:existing_cap_discharge_ac_mw))
+ @constraint(EP,
+ cExistingCapDischargeAC[y in VS_ASYM_AC_DISCHARGE],
+ EP[:vEXISTINGCAPDISCHARGEAC][y]==by_rid(y, :existing_cap_discharge_ac_mw))
end
# Constraints 1: Retirements and capacity additions
# Cannot retire more discharge AC capacity than existing charge capacity
- @constraint(EP, cVreStorMaxRetDischargeAC[y in RET_CAP_DISCHARGE_AC], vRETCAPDISCHARGE_AC[y] <= eExistingCapDischargeAC[y])
+ @constraint(EP,
+ cVreStorMaxRetDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ vRETCAPDISCHARGE_AC[y]<=eExistingCapDischargeAC[y])
# Constraint on maximum discharge AC capacity (if applicable) [set input to -1 if no constraint on maximum charge capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is >= Max_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cVreStorMaxCapDischargeAC[y in MAX_AC_DISCHARGE], eTotalCapDischarge_AC[y] <= by_rid(y,:max_cap_discharge_ac_mw))
+ @constraint(EP,
+ cVreStorMaxCapDischargeAC[y in MAX_AC_DISCHARGE],
+ eTotalCapDischarge_AC[y]<=by_rid(y, :max_cap_discharge_ac_mw))
# Constraint on minimum discharge AC capacity (if applicable) [set input to -1 if no constraint on minimum charge capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is <= Min_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cVreStorMinCapDischargeAC[y in MIN_AC_DISCHARGE], eTotalCapDischarge_AC[y] >= by_rid(y,:min_cap_discharge_ac_mw))
+ @constraint(EP,
+ cVreStorMinCapDischargeAC[y in MIN_AC_DISCHARGE],
+ eTotalCapDischarge_AC[y]>=by_rid(y, :min_cap_discharge_ac_mw))
# Constraint 2: Maximum discharging rate must be less than discharge power rating
- @expression(EP, eVreStorMaxDischargingAC[y in VS_ASYM_AC_DISCHARGE, t=1:T], JuMP.AffExpr())
- for y in VS_ASYM_AC_DISCHARGE, t=1:T
- eVreStorMaxDischargingAC[y,t] += EP[:vP_AC_DISCHARGE][y,t]
+ @expression(EP,
+ eVreStorMaxDischargingAC[y in VS_ASYM_AC_DISCHARGE, t = 1:T],
+ JuMP.AffExpr())
+ for y in VS_ASYM_AC_DISCHARGE, t in 1:T
+ eVreStorMaxDischargingAC[y, t] += EP[:vP_AC_DISCHARGE][y, t]
end
end
if !isempty(VS_ASYM_AC_CHARGE)
- MAX_AC_CHARGE = intersect(ids_with_nonneg(gen_VRE_STOR, max_cap_charge_ac_mw), VS_ASYM_AC_CHARGE)
- MIN_AC_CHARGE = intersect(ids_with_positive(gen_VRE_STOR, min_cap_charge_ac_mw), VS_ASYM_AC_CHARGE)
+ MAX_AC_CHARGE = intersect(ids_with_nonneg(gen_VRE_STOR, max_cap_charge_ac_mw),
+ VS_ASYM_AC_CHARGE)
+ MIN_AC_CHARGE = intersect(ids_with_positive(gen_VRE_STOR, min_cap_charge_ac_mw),
+ VS_ASYM_AC_CHARGE)
### VARIABLES ###
@variables(EP, begin
@@ -1714,16 +1853,20 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
end)
if MultiStage == 1
- @variable(EP, vEXISTINGCAPCHARGEAC[y in VS_ASYM_AC_CHARGE] >= 0);
+ @variable(EP, vEXISTINGCAPCHARGEAC[y in VS_ASYM_AC_CHARGE]>=0)
end
### EXPRESSIONS ###
# 0. Multistage existing capacity definition
if MultiStage == 1
- @expression(EP, eExistingCapChargeAC[y in VS_ASYM_AC_CHARGE], vEXISTINGCAPCHARGEAC[y])
+ @expression(EP,
+ eExistingCapChargeAC[y in VS_ASYM_AC_CHARGE],
+ vEXISTINGCAPCHARGEAC[y])
else
- @expression(EP, eExistingCapChargeAC[y in VS_ASYM_AC_CHARGE], by_rid(y,:existing_cap_charge_ac_mw))
+ @expression(EP,
+ eExistingCapChargeAC[y in VS_ASYM_AC_CHARGE],
+ by_rid(y, :existing_cap_charge_ac_mw))
end
# 1. Total storage charge AC capacity
@@ -1736,25 +1879,26 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
eExistingCapChargeAC[y] - EP[:vRETCAPCHARGE_AC][y]
else
eExistingCapChargeAC[y]
- end
- )
+ end)
# 2. Objective Function Additions
# If resource is not eligible for new charge AC capacity, fixed costs are only O&M costs
@expression(EP, eCFixCharge_AC[y in VS_ASYM_AC_CHARGE],
if y in NEW_CAP_CHARGE_AC # Resources eligible for new charge AC capacity
- by_rid(y,:inv_cost_charge_ac_per_mwyr)*vCAPCHARGE_AC[y] + by_rid(y,:fixed_om_cost_charge_ac_per_mwyr)*eTotalCapCharge_AC[y]
+ by_rid(y, :inv_cost_charge_ac_per_mwyr) * vCAPCHARGE_AC[y] +
+ by_rid(y, :fixed_om_cost_charge_ac_per_mwyr) * eTotalCapCharge_AC[y]
else
- by_rid(y,:fixed_om_cost_charge_ac_per_mwyr)*eTotalCapCharge_AC[y]
- end
- )
-
+ by_rid(y, :fixed_om_cost_charge_ac_per_mwyr) * eTotalCapCharge_AC[y]
+ end)
+
# Sum individual resource contributions to fixed costs to get total fixed costs
- @expression(EP, eTotalCFixCharge_AC, sum(EP[:eCFixCharge_AC][y] for y in VS_ASYM_AC_CHARGE))
+ @expression(EP,
+ eTotalCFixCharge_AC,
+ sum(EP[:eCFixCharge_AC][y] for y in VS_ASYM_AC_CHARGE))
if MultiStage == 1
- EP[:eObj] += eTotalCFixCharge_AC/inputs["OPEXMULT"]
+ EP[:eObj] += eTotalCFixCharge_AC / inputs["OPEXMULT"]
else
EP[:eObj] += eTotalCFixCharge_AC
end
@@ -1763,23 +1907,33 @@ function investment_charge_vre_stor!(EP::Model, inputs::Dict, setup::Dict)
# Constraint 0: Existing capacity variable is equal to existing capacity specified in the input file
if MultiStage == 1
- @constraint(EP, cExistingCapChargeAC[y in VS_ASYM_AC_CHARGE], EP[:vEXISTINGCAPCHARGEAC][y] == by_rid(y,:existing_cap_charge_ac_mw))
+ @constraint(EP,
+ cExistingCapChargeAC[y in VS_ASYM_AC_CHARGE],
+ EP[:vEXISTINGCAPCHARGEAC][y]==by_rid(y, :existing_cap_charge_ac_mw))
end
# Constraints 1: Retirements and capacity additions
# Cannot retire more charge AC capacity than existing charge capacity
- @constraint(EP, cVreStorMaxRetChargeAC[y in RET_CAP_CHARGE_AC], vRETCAPCHARGE_AC[y] <= eExistingCapChargeAC[y])
+ @constraint(EP,
+ cVreStorMaxRetChargeAC[y in RET_CAP_CHARGE_AC],
+ vRETCAPCHARGE_AC[y]<=eExistingCapChargeAC[y])
# Constraint on maximum charge AC capacity (if applicable) [set input to -1 if no constraint on maximum charge capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is >= Max_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cVreStorMaxCapChargeAC[y in MAX_AC_CHARGE], eTotalCapCharge_AC[y] <= by_rid(y,:max_cap_charge_ac_mw))
+ @constraint(EP,
+ cVreStorMaxCapChargeAC[y in MAX_AC_CHARGE],
+ eTotalCapCharge_AC[y]<=by_rid(y, :max_cap_charge_ac_mw))
# Constraint on minimum charge AC capacity (if applicable) [set input to -1 if no constraint on minimum charge capacity]
# DEV NOTE: This constraint may be violated in some cases where Existing_Charge_Cap_MW is <= Min_Charge_Cap_MWh and lead to infeasabilty
- @constraint(EP, cVreStorMinCapChargeAC[y in MIN_AC_CHARGE], eTotalCapCharge_AC[y] >= by_rid(y,:min_cap_charge_ac_mw))
+ @constraint(EP,
+ cVreStorMinCapChargeAC[y in MIN_AC_CHARGE],
+ eTotalCapCharge_AC[y]>=by_rid(y, :min_cap_charge_ac_mw))
# Constraint 2: Maximum charging rate must be less than charge power rating
- @expression(EP, eVreStorMaxChargingAC[y in VS_ASYM_AC_CHARGE, t=1:T], JuMP.AffExpr())
- for y in VS_ASYM_AC_CHARGE, t=1:T
- eVreStorMaxChargingAC[y,t] += EP[:vP_AC_CHARGE][y,t]
+ @expression(EP,
+ eVreStorMaxChargingAC[y in VS_ASYM_AC_CHARGE, t = 1:T],
+ JuMP.AffExpr())
+ for y in VS_ASYM_AC_CHARGE, t in 1:T
+ eVreStorMaxChargingAC[y, t] += EP[:vP_AC_CHARGE][y, t]
end
end
end
@@ -1858,7 +2012,6 @@ All other constraints are identical to those used to track the actual state of c
state of charge, build up storage inventory and state of charge at the beginning of each period.
"""
function vre_stor_capres!(EP::Model, inputs::Dict, setup::Dict)
-
println("VRE-STOR Capacity Reserve Margin Module")
### LOAD DATA ###
@@ -1880,32 +2033,32 @@ function vre_stor_capres!(EP::Model, inputs::Dict, setup::Dict)
VS_LDS = inputs["VS_LDS"]
START_SUBPERIODS = inputs["START_SUBPERIODS"]
- INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
- hours_per_subperiod = inputs["hours_per_subperiod"] # total number of hours per subperiod
+ INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
+ hours_per_subperiod = inputs["hours_per_subperiod"] # total number of hours per subperiod
rep_periods = inputs["REP_PERIOD"]
virtual_discharge_cost = inputs["VirtualChargeDischargeCost"]
StorageVirtualDischarge = setup["StorageVirtualDischarge"]
-
+
by_rid(rid, sym) = by_rid_res(rid, sym, gen_VRE_STOR)
-
+
### VARIABLES ###
@variables(EP, begin
# Virtual DC discharge contributing to capacity reserves at timestep t for VRE-storage cluster y
- vCAPRES_DC_DISCHARGE[y in DC_DISCHARGE, t=1:T] >= 0
+ vCAPRES_DC_DISCHARGE[y in DC_DISCHARGE, t = 1:T] >= 0
# Virtual AC discharge contributing to capacity reserves at timestep t for VRE-storage cluster y
- vCAPRES_AC_DISCHARGE[y in AC_DISCHARGE, t=1:T] >= 0
+ vCAPRES_AC_DISCHARGE[y in AC_DISCHARGE, t = 1:T] >= 0
# Virtual DC charge contributing to capacity reserves at timestep t for VRE-storage cluster y
- vCAPRES_DC_CHARGE[y in DC_CHARGE, t=1:T] >= 0
+ vCAPRES_DC_CHARGE[y in DC_CHARGE, t = 1:T] >= 0
# Virtual AC charge contributing to capacity reserves at timestep t for VRE-storage cluster y
- vCAPRES_AC_CHARGE[y in AC_CHARGE, t=1:T] >= 0
+ vCAPRES_AC_CHARGE[y in AC_CHARGE, t = 1:T] >= 0
# Total state of charge being held in reserve at timestep t for VRE-storage cluster y
- vCAPRES_VS_VRE_STOR[y in STOR, t=1:T] >= 0
+ vCAPRES_VS_VRE_STOR[y in STOR, t = 1:T] >= 0
end)
### EXPRESSIONS ###
@@ -1921,11 +2074,13 @@ function vre_stor_capres!(EP::Model, inputs::Dict, setup::Dict)
# Virtual State of Charge Expressions
@expression(EP, eVreStorVSoCBalStart[y in CONSTRAINTSET, t in START_SUBPERIODS],
- EP[:vCAPRES_VS_VRE_STOR][y,t+hours_per_subperiod-1]
- - self_discharge(gen[y])*EP[:vCAPRES_VS_VRE_STOR][y,t+hours_per_subperiod-1])
+ EP[:vCAPRES_VS_VRE_STOR][y,
+ t + hours_per_subperiod - 1]
+ -self_discharge(gen[y]) * EP[:vCAPRES_VS_VRE_STOR][y, t + hours_per_subperiod - 1])
@expression(EP, eVreStorVSoCBalInterior[y in STOR, t in INTERIOR_SUBPERIODS],
- EP[:vCAPRES_VS_VRE_STOR][y,t-1]
- - self_discharge(gen[y])*EP[:vCAPRES_VS_VRE_STOR][y,t-1])
+ EP[:vCAPRES_VS_VRE_STOR][y,
+ t - 1]
+ -self_discharge(gen[y]) * EP[:vCAPRES_VS_VRE_STOR][y, t - 1])
DC_DISCHARGE_CONSTRAINTSET = intersect(CONSTRAINTSET, DC_DISCHARGE)
DC_CHARGE_CONSTRAINTSET = intersect(CONSTRAINTSET, DC_CHARGE)
@@ -1933,132 +2088,184 @@ function vre_stor_capres!(EP::Model, inputs::Dict, setup::Dict)
AC_CHARGE_CONSTRAINTSET = intersect(CONSTRAINTSET, AC_CHARGE)
for t in START_SUBPERIODS
for y in DC_DISCHARGE_CONSTRAINTSET
- eVreStorVSoCBalStart[y,t] += EP[:vCAPRES_DC_DISCHARGE][y,t]/by_rid(y,:eff_down_dc)
+ eVreStorVSoCBalStart[y, t] += EP[:vCAPRES_DC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_dc)
end
for y in DC_CHARGE_CONSTRAINTSET
- eVreStorVSoCBalStart[y,t] -= by_rid(y,:eff_up_dc)*EP[:vCAPRES_DC_CHARGE][y,t]
+ eVreStorVSoCBalStart[y, t] -= by_rid(y, :eff_up_dc) *
+ EP[:vCAPRES_DC_CHARGE][y, t]
end
for y in AC_DISCHARGE_CONSTRAINTSET
- eVreStorVSoCBalStart[y,t] += EP[:vCAPRES_AC_DISCHARGE][y,t]/by_rid(y,:eff_down_ac)
+ eVreStorVSoCBalStart[y, t] += EP[:vCAPRES_AC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_ac)
end
for y in AC_CHARGE_CONSTRAINTSET
- eVreStorVSoCBalStart[y,t] -= by_rid(y,:eff_up_ac)*EP[:vCAPRES_AC_CHARGE][y,t]
+ eVreStorVSoCBalStart[y, t] -= by_rid(y, :eff_up_ac) *
+ EP[:vCAPRES_AC_CHARGE][y, t]
end
end
for t in INTERIOR_SUBPERIODS
for y in DC_DISCHARGE
- eVreStorVSoCBalInterior[y,t] += EP[:vCAPRES_DC_DISCHARGE][y,t]/by_rid(y,:eff_down_dc)
+ eVreStorVSoCBalInterior[y, t] += EP[:vCAPRES_DC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_dc)
end
for y in DC_CHARGE
- eVreStorVSoCBalInterior[y,t] -= by_rid(y,:eff_up_dc)*EP[:vCAPRES_DC_CHARGE][y,t]
+ eVreStorVSoCBalInterior[y, t] -= by_rid(y, :eff_up_dc) *
+ EP[:vCAPRES_DC_CHARGE][y, t]
end
for y in AC_DISCHARGE
- eVreStorVSoCBalInterior[y,t] += EP[:vCAPRES_AC_DISCHARGE][y,t]/by_rid(y,:eff_down_ac)
+ eVreStorVSoCBalInterior[y, t] += EP[:vCAPRES_AC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_ac)
end
for y in AC_CHARGE
- eVreStorVSoCBalInterior[y,t] -= by_rid(y,:eff_up_ac)*EP[:vCAPRES_AC_CHARGE][y,t]
+ eVreStorVSoCBalInterior[y, t] -= by_rid(y, :eff_up_ac) *
+ EP[:vCAPRES_AC_CHARGE][y, t]
end
end
# Inverter & grid connection export additions
- for t=1:T
+ for t in 1:T
for y in DC_DISCHARGE
- EP[:eInverterExport][y,t] += by_rid(y,:etainverter)*vCAPRES_DC_DISCHARGE[y,t]
- EP[:eGridExport][y,t] += by_rid(y,:etainverter)*vCAPRES_DC_DISCHARGE[y,t]
+ EP[:eInverterExport][y, t] += by_rid(y, :etainverter) *
+ vCAPRES_DC_DISCHARGE[y, t]
+ EP[:eGridExport][y, t] += by_rid(y, :etainverter) * vCAPRES_DC_DISCHARGE[y, t]
end
for y in DC_CHARGE
- EP[:eInverterExport][y,t] += vCAPRES_DC_CHARGE[y,t]/by_rid(y,:etainverter)
- EP[:eGridExport][y,t] += vCAPRES_DC_CHARGE[y,t]/by_rid(y,:etainverter)
+ EP[:eInverterExport][y, t] += vCAPRES_DC_CHARGE[y, t] / by_rid(y, :etainverter)
+ EP[:eGridExport][y, t] += vCAPRES_DC_CHARGE[y, t] / by_rid(y, :etainverter)
end
for y in AC_DISCHARGE
- EP[:eGridExport][y,t] += vCAPRES_AC_DISCHARGE[y,t]
+ EP[:eGridExport][y, t] += vCAPRES_AC_DISCHARGE[y, t]
end
for y in AC_CHARGE
- EP[:eGridExport][y,t] += vCAPRES_AC_CHARGE[y,t]
+ EP[:eGridExport][y, t] += vCAPRES_AC_CHARGE[y, t]
end
-
- # Asymmetric and symmetric storage contributions
+
+ # Asymmetric and symmetric storage contributions
for y in VS_ASYM_DC_DISCHARGE
- EP[:eVreStorMaxDischargingDC][y,t] += vCAPRES_DC_DISCHARGE[y,t]
+ EP[:eVreStorMaxDischargingDC][y, t] += vCAPRES_DC_DISCHARGE[y, t]
end
for y in VS_ASYM_AC_DISCHARGE
- EP[:eVreStorMaxDischargingAC][y,t] += vCAPRES_AC_DISCHARGE[y,t]
+ EP[:eVreStorMaxDischargingAC][y, t] += vCAPRES_AC_DISCHARGE[y, t]
end
for y in VS_ASYM_DC_CHARGE
- EP[:eVreStorMaxChargingDC][y,t] += vCAPRES_DC_CHARGE[y,t]
+ EP[:eVreStorMaxChargingDC][y, t] += vCAPRES_DC_CHARGE[y, t]
end
for y in VS_ASYM_AC_CHARGE
- EP[:eVreStorMaxChargingAC][y,t] += vCAPRES_AC_CHARGE[y,t]
+ EP[:eVreStorMaxChargingAC][y, t] += vCAPRES_AC_CHARGE[y, t]
end
for y in VS_SYM_DC
- EP[:eChargeDischargeMaxDC][y,t] += (vCAPRES_DC_DISCHARGE[y,t]
- + vCAPRES_DC_CHARGE[y,t])
+ EP[:eChargeDischargeMaxDC][y, t] += (vCAPRES_DC_DISCHARGE[y, t]
+ +
+ vCAPRES_DC_CHARGE[y, t])
end
for y in VS_SYM_AC
- EP[:eChargeDischargeMaxAC][y,t] += (vCAPRES_AC_DISCHARGE[y,t]
- + vCAPRES_AC_CHARGE[y,t])
+ EP[:eChargeDischargeMaxAC][y, t] += (vCAPRES_AC_DISCHARGE[y, t]
+ +
+ vCAPRES_AC_CHARGE[y, t])
end
end
### CONSTRAINTS ###
# Constraint 1: Links energy held in reserve in first time step with decisions in last time step of each subperiod
# We use a modified formulation of this constraint (cVSoCBalLongDurationStorageStart) when modeling multiple representative periods and long duration storage
- @constraint(EP, cVreStorVSoCBalStart[y in CONSTRAINTSET, t in START_SUBPERIODS],
- vCAPRES_VS_VRE_STOR[y,t] == eVreStorVSoCBalStart[y,t])
+ @constraint(EP, cVreStorVSoCBalStart[y in CONSTRAINTSET, t in START_SUBPERIODS],
+ vCAPRES_VS_VRE_STOR[y, t]==eVreStorVSoCBalStart[y, t])
# Energy held in reserve for the next hour
- @constraint(EP, cVreStorVSoCBalInterior[y in STOR, t in INTERIOR_SUBPERIODS],
- vCAPRES_VS_VRE_STOR[y,t] == eVreStorVSoCBalInterior[y,t])
+ @constraint(EP, cVreStorVSoCBalInterior[y in STOR, t in INTERIOR_SUBPERIODS],
+ vCAPRES_VS_VRE_STOR[y, t]==eVreStorVSoCBalInterior[y, t])
# Constraint 2: Energy held in reserve acts as a lower bound on the total energy held in storage
- @constraint(EP, cVreStorSOCMinCapRes[y in STOR, t=1:T], EP[:vS_VRE_STOR][y,t] >= vCAPRES_VS_VRE_STOR[y,t])
+ @constraint(EP,
+ cVreStorSOCMinCapRes[y in STOR, t = 1:T],
+ EP[:vS_VRE_STOR][y, t]>=vCAPRES_VS_VRE_STOR[y, t])
# Constraint 3: Add capacity reserve margin contributions from VRE-STOR resources to capacity reserve margin constraint
- @expression(EP, eCapResMarBalanceStor_VRE_STOR[res=1:inputs["NCapacityReserveMargin"], t=1:T],(
- sum(derating_factor(gen[y],tag=res)*by_rid(y,:etainverter)*inputs["pP_Max_Solar"][y,t]*EP[:eTotalCap_SOLAR][y] for y in inputs["VS_SOLAR"])
- + sum(derating_factor(gen[y],tag=res)*inputs["pP_Max_Wind"][y,t]*EP[:eTotalCap_WIND][y] for y in inputs["VS_WIND"])
- + sum(derating_factor(gen[y],tag=res)*by_rid(y,:etainverter)*(EP[:vP_DC_DISCHARGE][y,t]) for y in DC_DISCHARGE)
- + sum(derating_factor(gen[y],tag=res)*(EP[:vP_AC_DISCHARGE][y,t]) for y in AC_DISCHARGE)
- - sum(derating_factor(gen[y],tag=res)*(EP[:vP_DC_CHARGE][y,t])/by_rid(y,:etainverter) for y in DC_CHARGE)
- - sum(derating_factor(gen[y],tag=res)*(EP[:vP_AC_CHARGE][y,t]) for y in AC_CHARGE)))
+ @expression(EP,
+ eCapResMarBalanceStor_VRE_STOR[res = 1:inputs["NCapacityReserveMargin"], t = 1:T],
+ (sum(derating_factor(gen[y], tag = res) * by_rid(y, :etainverter) *
+ inputs["pP_Max_Solar"][y, t] * EP[:eTotalCap_SOLAR][y]
+ for y in inputs["VS_SOLAR"])
+ +
+ sum(derating_factor(gen[y], tag = res) * inputs["pP_Max_Wind"][y, t] *
+ EP[:eTotalCap_WIND][y] for y in inputs["VS_WIND"])
+ +
+ sum(derating_factor(gen[y], tag = res) * by_rid(y, :etainverter) *
+ (EP[:vP_DC_DISCHARGE][y, t]) for y in DC_DISCHARGE)
+ +
+ sum(derating_factor(gen[y], tag = res) * (EP[:vP_AC_DISCHARGE][y, t])
+ for y in AC_DISCHARGE)
+ -
+ sum(derating_factor(gen[y], tag = res) * (EP[:vP_DC_CHARGE][y, t]) /
+ by_rid(y, :etainverter) for y in DC_CHARGE)
+ -sum(derating_factor(gen[y], tag = res) * (EP[:vP_AC_CHARGE][y, t])
+ for y in AC_CHARGE)))
if StorageVirtualDischarge > 0
- @expression(EP, eCapResMarBalanceStor_VRE_STOR_Virtual[res=1:inputs["NCapacityReserveMargin"], t=1:T],(
- sum(derating_factor(gen[y],tag=res)*by_rid(y,:etainverter)*(vCAPRES_DC_DISCHARGE[y,t]) for y in DC_DISCHARGE)
- + sum(derating_factor(gen[y],tag=res)*(vCAPRES_AC_DISCHARGE[y,t]) for y in AC_DISCHARGE)
- - sum(derating_factor(gen[y],tag=res)*(vCAPRES_DC_CHARGE[y,t])/by_rid(y,:etainverter) for y in DC_CHARGE)
- - sum(derating_factor(gen[y],tag=res)*(vCAPRES_AC_CHARGE[y,t]) for y in AC_CHARGE)))
- add_similar_to_expression!(eCapResMarBalanceStor_VRE_STOR,eCapResMarBalanceStor_VRE_STOR_Virtual)
+ @expression(EP,
+ eCapResMarBalanceStor_VRE_STOR_Virtual[res = 1:inputs["NCapacityReserveMargin"],
+ t = 1:T],
+ (sum(derating_factor(gen[y], tag = res) * by_rid(y, :etainverter) *
+ (vCAPRES_DC_DISCHARGE[y, t]) for y in DC_DISCHARGE)
+ +
+ sum(derating_factor(gen[y], tag = res) * (vCAPRES_AC_DISCHARGE[y, t])
+ for y in AC_DISCHARGE)
+ -
+ sum(derating_factor(gen[y], tag = res) * (vCAPRES_DC_CHARGE[y, t]) /
+ by_rid(y, :etainverter) for y in DC_CHARGE)
+ -sum(derating_factor(gen[y], tag = res) * (vCAPRES_AC_CHARGE[y, t])
+ for y in AC_CHARGE)))
+ add_similar_to_expression!(eCapResMarBalanceStor_VRE_STOR,
+ eCapResMarBalanceStor_VRE_STOR_Virtual)
end
EP[:eCapResMarBalance] += EP[:eCapResMarBalanceStor_VRE_STOR]
### OBJECTIVE FUNCTION ADDITIONS ###
#Variable costs of DC "virtual charging" for technologies "y" during hour "t" in zone "z"
- @expression(EP, eCVar_Charge_DC_virtual[y in DC_CHARGE,t=1:T],
- inputs["omega"][t]*virtual_discharge_cost*vCAPRES_DC_CHARGE[y,t]/by_rid(y,:etainverter))
- @expression(EP, eTotalCVar_Charge_DC_T_virtual[t=1:T], sum(eCVar_Charge_DC_virtual[y,t] for y in DC_CHARGE))
- @expression(EP, eTotalCVar_Charge_DC_virtual, sum(eTotalCVar_Charge_DC_T_virtual[t] for t in 1:T))
+ @expression(EP, eCVar_Charge_DC_virtual[y in DC_CHARGE, t = 1:T],
+ inputs["omega"][t] * virtual_discharge_cost *
+ vCAPRES_DC_CHARGE[y, t]/by_rid(y, :etainverter))
+ @expression(EP,
+ eTotalCVar_Charge_DC_T_virtual[t = 1:T],
+ sum(eCVar_Charge_DC_virtual[y, t] for y in DC_CHARGE))
+ @expression(EP,
+ eTotalCVar_Charge_DC_virtual,
+ sum(eTotalCVar_Charge_DC_T_virtual[t] for t in 1:T))
EP[:eObj] += eTotalCVar_Charge_DC_virtual
#Variable costs of DC "virtual discharging" for technologies "y" during hour "t" in zone "z"
- @expression(EP, eCVar_Discharge_DC_virtual[y in DC_DISCHARGE,t=1:T],
- inputs["omega"][t]*virtual_discharge_cost*by_rid(y,:etainverter)*vCAPRES_DC_DISCHARGE[y,t])
- @expression(EP, eTotalCVar_Discharge_DC_T_virtual[t=1:T], sum(eCVar_Discharge_DC_virtual[y,t] for y in DC_DISCHARGE))
- @expression(EP, eTotalCVar_Discharge_DC_virtual, sum(eTotalCVar_Discharge_DC_T_virtual[t] for t in 1:T))
+ @expression(EP, eCVar_Discharge_DC_virtual[y in DC_DISCHARGE, t = 1:T],
+ inputs["omega"][t]*virtual_discharge_cost*by_rid(y, :etainverter)*
+ vCAPRES_DC_DISCHARGE[y, t])
+ @expression(EP,
+ eTotalCVar_Discharge_DC_T_virtual[t = 1:T],
+ sum(eCVar_Discharge_DC_virtual[y, t] for y in DC_DISCHARGE))
+ @expression(EP,
+ eTotalCVar_Discharge_DC_virtual,
+ sum(eTotalCVar_Discharge_DC_T_virtual[t] for t in 1:T))
EP[:eObj] += eTotalCVar_Discharge_DC_virtual
#Variable costs of AC "virtual charging" for technologies "y" during hour "t" in zone "z"
- @expression(EP, eCVar_Charge_AC_virtual[y in AC_CHARGE,t=1:T],
- inputs["omega"][t]*virtual_discharge_cost*vCAPRES_AC_CHARGE[y,t])
- @expression(EP, eTotalCVar_Charge_AC_T_virtual[t=1:T], sum(eCVar_Charge_AC_virtual[y,t] for y in AC_CHARGE))
- @expression(EP, eTotalCVar_Charge_AC_virtual, sum(eTotalCVar_Charge_AC_T_virtual[t] for t in 1:T))
+ @expression(EP, eCVar_Charge_AC_virtual[y in AC_CHARGE, t = 1:T],
+ inputs["omega"][t]*virtual_discharge_cost*vCAPRES_AC_CHARGE[y, t])
+ @expression(EP,
+ eTotalCVar_Charge_AC_T_virtual[t = 1:T],
+ sum(eCVar_Charge_AC_virtual[y, t] for y in AC_CHARGE))
+ @expression(EP,
+ eTotalCVar_Charge_AC_virtual,
+ sum(eTotalCVar_Charge_AC_T_virtual[t] for t in 1:T))
EP[:eObj] += eTotalCVar_Charge_AC_virtual
#Variable costs of AC "virtual discharging" for technologies "y" during hour "t" in zone "z"
- @expression(EP, eCVar_Discharge_AC_virtual[y in AC_DISCHARGE,t=1:T],
- inputs["omega"][t]*virtual_discharge_cost*vCAPRES_AC_DISCHARGE[y,t])
- @expression(EP, eTotalCVar_Discharge_AC_T_virtual[t=1:T], sum(eCVar_Discharge_AC_virtual[y,t] for y in AC_DISCHARGE))
- @expression(EP, eTotalCVar_Discharge_AC_virtual, sum(eTotalCVar_Discharge_AC_T_virtual[t] for t in 1:T))
+ @expression(EP, eCVar_Discharge_AC_virtual[y in AC_DISCHARGE, t = 1:T],
+ inputs["omega"][t]*virtual_discharge_cost*vCAPRES_AC_DISCHARGE[y, t])
+ @expression(EP,
+ eTotalCVar_Discharge_AC_T_virtual[t = 1:T],
+ sum(eCVar_Discharge_AC_virtual[y, t] for y in AC_DISCHARGE))
+ @expression(EP,
+ eTotalCVar_Discharge_AC_virtual,
+ sum(eTotalCVar_Discharge_AC_T_virtual[t] for t in 1:T))
EP[:eObj] += eTotalCVar_Discharge_AC_virtual
### LONG DURATION ENERGY STORAGE CAPACITY RESERVE MARGIN MODULE ###
@@ -2070,63 +2277,83 @@ function vre_stor_capres!(EP::Model, inputs::Dict, setup::Dict)
dfPeriodMap = inputs["Period_Map"] # Dataframe that maps modeled periods to representative periods
NPeriods = size(inputs["Period_Map"])[1] # Number of modeled periods
MODELED_PERIODS_INDEX = 1:NPeriods
- REP_PERIODS_INDEX = MODELED_PERIODS_INDEX[dfPeriodMap[!,:Rep_Period] .== MODELED_PERIODS_INDEX]
+ REP_PERIODS_INDEX = MODELED_PERIODS_INDEX[dfPeriodMap[!, :Rep_Period] .== MODELED_PERIODS_INDEX]
### VARIABLES ###
- @variables(EP, begin
- # State of charge held in reserve for storage at beginning of each modeled period n
- vCAPCONTRSTOR_VSOCw_VRE_STOR[y in VS_LDS, n in MODELED_PERIODS_INDEX] >= 0
+ @variables(EP,
+ begin
+ # State of charge held in reserve for storage at beginning of each modeled period n
+ vCAPCONTRSTOR_VSOCw_VRE_STOR[y in VS_LDS, n in MODELED_PERIODS_INDEX] >= 0
- # Build up in storage inventory held in reserve over each representative period w (can be pos or neg)
- vCAPCONTRSTOR_VdSOC_VRE_STOR[y in VS_LDS, w=1:REP_PERIOD]
- end)
+ # Build up in storage inventory held in reserve over each representative period w (can be pos or neg)
+ vCAPCONTRSTOR_VdSOC_VRE_STOR[y in VS_LDS, w = 1:REP_PERIOD]
+ end)
### EXPRESSIONS ###
- @expression(EP, eVreStorVSoCBalLongDurationStorageStart[y in VS_LDS, w=1:REP_PERIOD],
- (1-self_discharge(gen[y]))*(EP[:vCAPRES_VS_VRE_STOR][y,hours_per_subperiod*w]-vCAPCONTRSTOR_VdSOC_VRE_STOR[y,w]))
-
+ @expression(EP,
+ eVreStorVSoCBalLongDurationStorageStart[y in VS_LDS, w = 1:REP_PERIOD],
+ (1 -
+ self_discharge(gen[y]))*(EP[:vCAPRES_VS_VRE_STOR][y, hours_per_subperiod * w] -
+ vCAPCONTRSTOR_VdSOC_VRE_STOR[y, w]))
+
DC_DISCHARGE_CONSTRAINTSET = intersect(DC_DISCHARGE, VS_LDS)
DC_CHARGE_CONSTRAINTSET = intersect(DC_CHARGE, VS_LDS)
AC_DISCHARGE_CONSTRAINTSET = intersect(AC_DISCHARGE, VS_LDS)
AC_CHARGE_CONSTRAINTSET = intersect(AC_CHARGE, VS_LDS)
- for w=1:REP_PERIOD
+ for w in 1:REP_PERIOD
for y in DC_DISCHARGE_CONSTRAINTSET
- eVreStorVSoCBalLongDurationStorageStart[y,w] += EP[:vCAPRES_DC_DISCHARGE][y,hours_per_subperiod*(w-1)+1]/by_rid(y,:eff_down_dc)
+ eVreStorVSoCBalLongDurationStorageStart[y, w] += EP[:vCAPRES_DC_DISCHARGE][y,
+ hours_per_subperiod * (w - 1) + 1] / by_rid(y, :eff_down_dc)
end
for y in DC_CHARGE_CONSTRAINTSET
- eVreStorVSoCBalLongDurationStorageStart[y,w] -= by_rid(y,:eff_up_dc)*EP[:vCAPRES_DC_CHARGE][y,hours_per_subperiod*(w-1)+1]
+ eVreStorVSoCBalLongDurationStorageStart[y, w] -= by_rid(y, :eff_up_dc) *
+ EP[:vCAPRES_DC_CHARGE][y,
+ hours_per_subperiod * (w - 1) + 1]
end
for y in AC_DISCHARGE_CONSTRAINTSET
- eVreStorVSoCBalLongDurationStorageStart[y,w] += EP[:vCAPRES_AC_DISCHARGE][y,hours_per_subperiod*(w-1)+1]/by_rid(y,:eff_down_ac)
+ eVreStorVSoCBalLongDurationStorageStart[y, w] += EP[:vCAPRES_AC_DISCHARGE][y,
+ hours_per_subperiod * (w - 1) + 1] / by_rid(y, :eff_down_ac)
end
for y in AC_CHARGE_CONSTRAINTSET
- eVreStorVSoCBalLongDurationStorageStart[y,w] -= by_rid(y,:eff_up_ac)*EP[:vCAPRES_AC_CHARGE][y,hours_per_subperiod*(w-1)+1]
+ eVreStorVSoCBalLongDurationStorageStart[y, w] -= by_rid(y, :eff_up_ac) *
+ EP[:vCAPRES_AC_CHARGE][y,
+ hours_per_subperiod * (w - 1) + 1]
end
end
### CONSTRAINTS ###
- # Constraint 1: Links last time step with first time step, ensuring position in hour 1 is within eligible change from final hour position
- # Modified initial virtual state of storage for long duration storage - initialize wth value carried over from last period
- # Alternative to cVSoCBalStart constraint which is included when modeling multiple representative periods and long duration storage
- # Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
- @constraint(EP, cVreStorVSoCBalLongDurationStorageStart[y in VS_LDS, w=1:REP_PERIOD],
- EP[:vCAPRES_VS_VRE_STOR][y,hours_per_subperiod*(w-1)+1] == eVreStorVSoCBalLongDurationStorageStart[y,w])
+ # Constraint 1: Links last time step with first time step, ensuring position in hour 1 is within eligible change from final hour position
+ # Modified initial virtual state of storage for long duration storage - initialize wth value carried over from last period
+ # Alternative to cVSoCBalStart constraint which is included when modeling multiple representative periods and long duration storage
+ # Note: tw_min = hours_per_subperiod*(w-1)+1; tw_max = hours_per_subperiod*w
+ @constraint(EP,
+ cVreStorVSoCBalLongDurationStorageStart[y in VS_LDS, w = 1:REP_PERIOD],
+ EP[:vCAPRES_VS_VRE_STOR][y,
+ hours_per_subperiod * (w - 1) + 1]==eVreStorVSoCBalLongDurationStorageStart[y, w])
# Constraint 2: Storage held in reserve at beginning of period w = storage at beginning of period w-1 + storage built up in period w (after n representative periods)
# Multiply storage build up term from prior period with corresponding weight
- @constraint(EP, cVreStorVSoCBalLongDurationStorage[y in VS_LDS, r in MODELED_PERIODS_INDEX],
- vCAPCONTRSTOR_VSOCw_VRE_STOR[y,mod1(r+1, NPeriods)] == vCAPCONTRSTOR_VSOCw_VRE_STOR[y,r] + vCAPCONTRSTOR_VdSOC_VRE_STOR[y,dfPeriodMap[r,:Rep_Period_Index]])
+ @constraint(EP,
+ cVreStorVSoCBalLongDurationStorage[y in VS_LDS, r in MODELED_PERIODS_INDEX],
+ vCAPCONTRSTOR_VSOCw_VRE_STOR[y,
+ mod1(r + 1, NPeriods)]==vCAPCONTRSTOR_VSOCw_VRE_STOR[y, r] +
+ vCAPCONTRSTOR_VdSOC_VRE_STOR[y, dfPeriodMap[r, :Rep_Period_Index]])
# Constraint 3: Initial reserve storage level for representative periods must also adhere to sub-period storage inventory balance
# Initial storage = Final storage - change in storage inventory across representative period
- @constraint(EP, cVreStorVSoCBalLongDurationStorageSub[y in VS_LDS, r in REP_PERIODS_INDEX],
- vCAPCONTRSTOR_VSOCw_VRE_STOR[y,r] == EP[:vCAPRES_VS_VRE_STOR][y,hours_per_subperiod*dfPeriodMap[r,:Rep_Period_Index]] - vCAPCONTRSTOR_VdSOC_VRE_STOR[y,dfPeriodMap[r,:Rep_Period_Index]])
+ @constraint(EP,
+ cVreStorVSoCBalLongDurationStorageSub[y in VS_LDS, r in REP_PERIODS_INDEX],
+ vCAPCONTRSTOR_VSOCw_VRE_STOR[y,
+ r]==EP[:vCAPRES_VS_VRE_STOR][y,
+ hours_per_subperiod * dfPeriodMap[r, :Rep_Period_Index]] - vCAPCONTRSTOR_VdSOC_VRE_STOR[y, dfPeriodMap[r, :Rep_Period_Index]])
# Constraint 4: Energy held in reserve at the beginning of each modeled period acts as a lower bound on the total energy held in storage
- @constraint(EP, cSOCMinCapResLongDurationStorage[y in VS_LDS, r in MODELED_PERIODS_INDEX], EP[:vSOCw_VRE_STOR][y,r] >= vCAPCONTRSTOR_VSOCw_VRE_STOR[y,r])
+ @constraint(EP,
+ cSOCMinCapResLongDurationStorage[y in VS_LDS, r in MODELED_PERIODS_INDEX],
+ EP[:vSOCw_VRE_STOR][y, r]>=vCAPCONTRSTOR_VSOCw_VRE_STOR[y, r])
end
end
@@ -2212,15 +2439,14 @@ Lastly, if the co-located resource has a variable renewable energy component, th
```
"""
function vre_stor_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
-
println("VRE-STOR Operational Reserves Module")
### LOAD DATA & CREATE SETS ###
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
gen_VRE_STOR = gen.VreStorage
- T = inputs["T"]
+ T = inputs["T"]
VRE_STOR = inputs["VRE_STOR"]
STOR = inputs["VS_STOR"]
DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
@@ -2250,10 +2476,10 @@ function vre_stor_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
SOLAR_RSV = intersect(SOLAR, inputs["RSV"]) # Set of solar resources with RSV reserves
WIND_REG = intersect(WIND, inputs["REG"]) # Set of wind resources with REG reserves
WIND_RSV = intersect(WIND, inputs["RSV"]) # Set of wind resources with RSV reserves
-
- STOR_REG = intersect(STOR, inputs["REG"]) # Set of storage resources with REG reserves
- STOR_RSV = intersect(STOR, inputs["RSV"]) # Set of storage resources with RSV reserves
- STOR_REG_RSV_UNION = union(STOR_REG, STOR_RSV) # Set of storage resources with either or both REG and RSV reserves
+
+ STOR_REG = intersect(STOR, inputs["REG"]) # Set of storage resources with REG reserves
+ STOR_RSV = intersect(STOR, inputs["RSV"]) # Set of storage resources with RSV reserves
+ STOR_REG_RSV_UNION = union(STOR_REG, STOR_RSV) # Set of storage resources with either or both REG and RSV reserves
DC_DISCHARGE_REG = intersect(DC_DISCHARGE, STOR_REG) # Set of DC discharge resources with REG reserves
DC_DISCHARGE_RSV = intersect(DC_DISCHARGE, STOR_RSV) # Set of DC discharge resources with RSV reserves
AC_DISCHARGE_REG = intersect(AC_DISCHARGE, STOR_REG) # Set of AC discharge resources with REG reserves
@@ -2279,173 +2505,179 @@ function vre_stor_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
@variables(EP, begin
# Contribution to regulation (primary reserves), assumed to be symmetric (up & down directions equal)
- vREG_SOLAR[y in SOLAR_REG, t=1:T] >= 0
- vREG_WIND[y in WIND_REG, t=1:T] >= 0
- vREG_DC_Discharge[y in DC_DISCHARGE_REG, t=1:T] >= 0
- vREG_DC_Charge[y in DC_CHARGE_REG, t=1:T] >= 0
- vREG_AC_Discharge[y in AC_DISCHARGE_REG, t=1:T] >= 0
- vREG_AC_Charge[y in AC_CHARGE_REG, t=1:T] >= 0
+ vREG_SOLAR[y in SOLAR_REG, t = 1:T] >= 0
+ vREG_WIND[y in WIND_REG, t = 1:T] >= 0
+ vREG_DC_Discharge[y in DC_DISCHARGE_REG, t = 1:T] >= 0
+ vREG_DC_Charge[y in DC_CHARGE_REG, t = 1:T] >= 0
+ vREG_AC_Discharge[y in AC_DISCHARGE_REG, t = 1:T] >= 0
+ vREG_AC_Charge[y in AC_CHARGE_REG, t = 1:T] >= 0
# Contribution to operating reserves (secondary reserves or contingency reserves); only model upward reserve requirements
- vRSV_SOLAR[y in SOLAR_RSV, t=1:T] >= 0
- vRSV_WIND[y in WIND_RSV, t=1:T] >= 0
- vRSV_DC_Discharge[y in DC_DISCHARGE_RSV, t=1:T] >= 0
- vRSV_DC_Charge[y in DC_CHARGE_RSV, t=1:T] >= 0
- vRSV_AC_Discharge[y in AC_DISCHARGE_RSV, t=1:T] >= 0
- vRSV_AC_Charge[y in AC_CHARGE_RSV, t=1:T] >= 0
+ vRSV_SOLAR[y in SOLAR_RSV, t = 1:T] >= 0
+ vRSV_WIND[y in WIND_RSV, t = 1:T] >= 0
+ vRSV_DC_Discharge[y in DC_DISCHARGE_RSV, t = 1:T] >= 0
+ vRSV_DC_Charge[y in DC_CHARGE_RSV, t = 1:T] >= 0
+ vRSV_AC_Discharge[y in AC_DISCHARGE_RSV, t = 1:T] >= 0
+ vRSV_AC_Charge[y in AC_CHARGE_RSV, t = 1:T] >= 0
end)
### EXPRESSIONS ###
- @expression(EP, eVreStorRegOnlyBalance[y in VRE_STOR_REG, t=1:T], JuMP.AffExpr())
- @expression(EP, eVreStorRsvOnlyBalance[y in VRE_STOR_RSV, t=1:T], JuMP.AffExpr())
- @expression(EP, eDischargeDCMin[y in DC_DISCHARGE, t=1:T], JuMP.AffExpr())
- @expression(EP, eChargeDCMin[y in DC_CHARGE, t=1:T], JuMP.AffExpr())
- @expression(EP, eDischargeACMin[y in AC_DISCHARGE, t=1:T], JuMP.AffExpr())
- @expression(EP, eChargeACMin[y in AC_CHARGE, t=1:T], JuMP.AffExpr())
- @expression(EP, eChargeMax[y in STOR_REG_RSV_UNION, t=1:T], JuMP.AffExpr())
- @expression(EP, eDischargeMax[y in STOR_REG_RSV_UNION, t=1:T], JuMP.AffExpr())
+ @expression(EP, eVreStorRegOnlyBalance[y in VRE_STOR_REG, t = 1:T], JuMP.AffExpr())
+ @expression(EP, eVreStorRsvOnlyBalance[y in VRE_STOR_RSV, t = 1:T], JuMP.AffExpr())
+ @expression(EP, eDischargeDCMin[y in DC_DISCHARGE, t = 1:T], JuMP.AffExpr())
+ @expression(EP, eChargeDCMin[y in DC_CHARGE, t = 1:T], JuMP.AffExpr())
+ @expression(EP, eDischargeACMin[y in AC_DISCHARGE, t = 1:T], JuMP.AffExpr())
+ @expression(EP, eChargeACMin[y in AC_CHARGE, t = 1:T], JuMP.AffExpr())
+ @expression(EP, eChargeMax[y in STOR_REG_RSV_UNION, t = 1:T], JuMP.AffExpr())
+ @expression(EP, eDischargeMax[y in STOR_REG_RSV_UNION, t = 1:T], JuMP.AffExpr())
- for t=1:T
+ for t in 1:T
for y in DC_DISCHARGE
- eDischargeDCMin[y,t] += EP[:vP_DC_DISCHARGE][y,t]
- eDischargeMax[y,t] += EP[:vP_DC_DISCHARGE][y,t]/by_rid(y,:eff_down_dc)
+ eDischargeDCMin[y, t] += EP[:vP_DC_DISCHARGE][y, t]
+ eDischargeMax[y, t] += EP[:vP_DC_DISCHARGE][y, t] / by_rid(y, :eff_down_dc)
end
for y in DC_CHARGE
- eChargeDCMin[y,t] += EP[:vP_DC_CHARGE][y,t]
- eChargeMax[y,t] += by_rid(y,:eff_up_dc)*EP[:vP_DC_CHARGE][y,t]
+ eChargeDCMin[y, t] += EP[:vP_DC_CHARGE][y, t]
+ eChargeMax[y, t] += by_rid(y, :eff_up_dc) * EP[:vP_DC_CHARGE][y, t]
end
for y in AC_DISCHARGE
- eDischargeACMin[y,t] += EP[:vP_AC_DISCHARGE][y,t]
- eDischargeMax[y,t] += EP[:vP_AC_DISCHARGE][y,t]/by_rid(y,:eff_down_ac)
+ eDischargeACMin[y, t] += EP[:vP_AC_DISCHARGE][y, t]
+ eDischargeMax[y, t] += EP[:vP_AC_DISCHARGE][y, t] / by_rid(y, :eff_down_ac)
end
for y in AC_CHARGE
- eChargeACMin[y,t] += EP[:vP_AC_CHARGE][y,t]
- eChargeMax[y,t] += by_rid(y,:eff_up_ac)*EP[:vP_AC_CHARGE][y,t]
+ eChargeACMin[y, t] += EP[:vP_AC_CHARGE][y, t]
+ eChargeMax[y, t] += by_rid(y, :eff_up_ac) * EP[:vP_AC_CHARGE][y, t]
end
for y in SOLAR_REG
- eVreStorRegOnlyBalance[y,t] += by_rid(y,:etainverter)*vREG_SOLAR[y,t]
- EP[:eGridExport][y,t] += by_rid(y,:etainverter)*vREG_SOLAR[y,t]
- EP[:eInverterExport][y,t] += by_rid(y,:etainverter)*vREG_SOLAR[y,t]
- EP[:eSolarGenMaxS][y,t] += vREG_SOLAR[y,t]
+ eVreStorRegOnlyBalance[y, t] += by_rid(y, :etainverter) * vREG_SOLAR[y, t]
+ EP[:eGridExport][y, t] += by_rid(y, :etainverter) * vREG_SOLAR[y, t]
+ EP[:eInverterExport][y, t] += by_rid(y, :etainverter) * vREG_SOLAR[y, t]
+ EP[:eSolarGenMaxS][y, t] += vREG_SOLAR[y, t]
end
for y in SOLAR_RSV
- eVreStorRsvOnlyBalance[y,t] += by_rid(y,:etainverter)*vRSV_SOLAR[y,t]
- EP[:eGridExport][y,t] += by_rid(y,:etainverter)*vRSV_SOLAR[y,t]
- EP[:eInverterExport][y,t] += by_rid(y,:etainverter)*vRSV_SOLAR[y,t]
- EP[:eSolarGenMaxS][y,t] += vRSV_SOLAR[y,t]
+ eVreStorRsvOnlyBalance[y, t] += by_rid(y, :etainverter) * vRSV_SOLAR[y, t]
+ EP[:eGridExport][y, t] += by_rid(y, :etainverter) * vRSV_SOLAR[y, t]
+ EP[:eInverterExport][y, t] += by_rid(y, :etainverter) * vRSV_SOLAR[y, t]
+ EP[:eSolarGenMaxS][y, t] += vRSV_SOLAR[y, t]
end
for y in WIND_REG
- eVreStorRegOnlyBalance[y,t] += vREG_WIND[y,t]
- EP[:eGridExport][y,t] += vREG_WIND[y,t]
- EP[:eWindGenMaxW][y,t] += vREG_WIND[y,t]
+ eVreStorRegOnlyBalance[y, t] += vREG_WIND[y, t]
+ EP[:eGridExport][y, t] += vREG_WIND[y, t]
+ EP[:eWindGenMaxW][y, t] += vREG_WIND[y, t]
end
for y in WIND_RSV
- eVreStorRsvOnlyBalance[y,t] += vRSV_WIND[y,t]
- EP[:eGridExport][y,t] += vRSV_WIND[y,t]
- EP[:eWindGenMaxW][y,t] += vRSV_WIND[y,t]
+ eVreStorRsvOnlyBalance[y, t] += vRSV_WIND[y, t]
+ EP[:eGridExport][y, t] += vRSV_WIND[y, t]
+ EP[:eWindGenMaxW][y, t] += vRSV_WIND[y, t]
end
for y in DC_DISCHARGE_REG
- eVreStorRegOnlyBalance[y,t] += by_rid(y,:etainverter)*vREG_DC_Discharge[y,t]
- eDischargeDCMin[y,t] -= vREG_DC_Discharge[y,t]
- eDischargeMax[y,t] += EP[:vREG_DC_Discharge][y,t]/by_rid(y,:eff_down_dc)
- EP[:eGridExport][y,t] += by_rid(y,:etainverter)*vREG_DC_Discharge[y,t]
- EP[:eInverterExport][y,t] += by_rid(y,:etainverter)*vREG_DC_Discharge[y,t]
+ eVreStorRegOnlyBalance[y, t] += by_rid(y, :etainverter) *
+ vREG_DC_Discharge[y, t]
+ eDischargeDCMin[y, t] -= vREG_DC_Discharge[y, t]
+ eDischargeMax[y, t] += EP[:vREG_DC_Discharge][y, t] / by_rid(y, :eff_down_dc)
+ EP[:eGridExport][y, t] += by_rid(y, :etainverter) * vREG_DC_Discharge[y, t]
+ EP[:eInverterExport][y, t] += by_rid(y, :etainverter) * vREG_DC_Discharge[y, t]
end
for y in DC_DISCHARGE_RSV
- eVreStorRsvOnlyBalance[y,t] += by_rid(y,:etainverter)*vRSV_DC_Discharge[y,t]
- eDischargeMax[y,t] += EP[:vRSV_DC_Discharge][y,t]/by_rid(y,:eff_down_dc)
- EP[:eGridExport][y,t] += by_rid(y,:etainverter)*vRSV_DC_Discharge[y,t]
- EP[:eInverterExport][y,t] += by_rid(y,:etainverter)*vRSV_DC_Discharge[y,t]
+ eVreStorRsvOnlyBalance[y, t] += by_rid(y, :etainverter) *
+ vRSV_DC_Discharge[y, t]
+ eDischargeMax[y, t] += EP[:vRSV_DC_Discharge][y, t] / by_rid(y, :eff_down_dc)
+ EP[:eGridExport][y, t] += by_rid(y, :etainverter) * vRSV_DC_Discharge[y, t]
+ EP[:eInverterExport][y, t] += by_rid(y, :etainverter) * vRSV_DC_Discharge[y, t]
end
for y in DC_CHARGE_REG
- eVreStorRegOnlyBalance[y,t] += vREG_DC_Charge[y,t]/by_rid(y,:etainverter)
- eChargeDCMin[y,t] -= vREG_DC_Charge[y,t]
- eChargeMax[y,t] += by_rid(y,:eff_up_dc)*EP[:vREG_DC_Charge][y,t]
- EP[:eGridExport][y,t] += vREG_DC_Charge[y,t]/by_rid(y,:etainverter)
- EP[:eInverterExport][y,t] += vREG_DC_Charge[y,t]/by_rid(y,:etainverter)
+ eVreStorRegOnlyBalance[y, t] += vREG_DC_Charge[y, t] / by_rid(y, :etainverter)
+ eChargeDCMin[y, t] -= vREG_DC_Charge[y, t]
+ eChargeMax[y, t] += by_rid(y, :eff_up_dc) * EP[:vREG_DC_Charge][y, t]
+ EP[:eGridExport][y, t] += vREG_DC_Charge[y, t] / by_rid(y, :etainverter)
+ EP[:eInverterExport][y, t] += vREG_DC_Charge[y, t] / by_rid(y, :etainverter)
end
for y in DC_CHARGE_RSV
- eVreStorRsvOnlyBalance[y,t] += vRSV_DC_Charge[y,t]/by_rid(y,:etainverter)
- eChargeDCMin[y,t] -= vRSV_DC_Charge[y,t]
+ eVreStorRsvOnlyBalance[y, t] += vRSV_DC_Charge[y, t] / by_rid(y, :etainverter)
+ eChargeDCMin[y, t] -= vRSV_DC_Charge[y, t]
end
for y in AC_DISCHARGE_REG
- eVreStorRegOnlyBalance[y,t] += vREG_AC_Discharge[y,t]
- eDischargeACMin[y,t] -= vREG_AC_Discharge[y,t]
- eDischargeMax[y,t] += EP[:vREG_AC_Discharge][y,t]/by_rid(y,:eff_down_ac)
- EP[:eGridExport][y,t] += vREG_AC_Discharge[y,t]
+ eVreStorRegOnlyBalance[y, t] += vREG_AC_Discharge[y, t]
+ eDischargeACMin[y, t] -= vREG_AC_Discharge[y, t]
+ eDischargeMax[y, t] += EP[:vREG_AC_Discharge][y, t] / by_rid(y, :eff_down_ac)
+ EP[:eGridExport][y, t] += vREG_AC_Discharge[y, t]
end
for y in AC_DISCHARGE_RSV
- eVreStorRsvOnlyBalance[y,t] += vRSV_AC_Discharge[y,t]
- eDischargeMax[y,t] += EP[:vRSV_AC_Discharge][y,t]/by_rid(y,:eff_down_ac)
- EP[:eGridExport][y,t] += vRSV_AC_Discharge[y,t]
+ eVreStorRsvOnlyBalance[y, t] += vRSV_AC_Discharge[y, t]
+ eDischargeMax[y, t] += EP[:vRSV_AC_Discharge][y, t] / by_rid(y, :eff_down_ac)
+ EP[:eGridExport][y, t] += vRSV_AC_Discharge[y, t]
end
for y in AC_CHARGE_REG
- eVreStorRegOnlyBalance[y,t] += vREG_AC_Charge[y,t]
- eChargeACMin[y,t] -= vREG_AC_Charge[y,t]
- eChargeMax[y,t] += by_rid(y,:eff_down_ac)*EP[:vREG_AC_Charge][y,t]
- EP[:eGridExport][y,t] += vREG_AC_Charge[y,t]
+ eVreStorRegOnlyBalance[y, t] += vREG_AC_Charge[y, t]
+ eChargeACMin[y, t] -= vREG_AC_Charge[y, t]
+ eChargeMax[y, t] += by_rid(y, :eff_down_ac) * EP[:vREG_AC_Charge][y, t]
+ EP[:eGridExport][y, t] += vREG_AC_Charge[y, t]
end
for y in AC_CHARGE_RSV
- eVreStorRsvOnlyBalance[y,t] += vRSV_AC_Charge[y,t]
- eChargeACMin[y,t] -= vRSV_AC_Charge[y,t]
+ eVreStorRsvOnlyBalance[y, t] += vRSV_AC_Charge[y, t]
+ eChargeACMin[y, t] -= vRSV_AC_Charge[y, t]
end
for y in VS_SYM_DC_REG
- EP[:eChargeDischargeMaxDC][y,t] += (vREG_DC_Discharge[y,t]
- + vREG_DC_Charge[y,t])
+ EP[:eChargeDischargeMaxDC][y, t] += (vREG_DC_Discharge[y, t]
+ +
+ vREG_DC_Charge[y, t])
end
for y in VS_SYM_DC_RSV
- EP[:eChargeDischargeMaxDC][y,t] += vRSV_DC_Discharge[y,t]
+ EP[:eChargeDischargeMaxDC][y, t] += vRSV_DC_Discharge[y, t]
end
for y in VS_SYM_AC_REG
- EP[:eChargeDischargeMaxAC][y,t] += (vREG_AC_Discharge[y,t]
- + vREG_AC_Charge[y,t])
+ EP[:eChargeDischargeMaxAC][y, t] += (vREG_AC_Discharge[y, t]
+ +
+ vREG_AC_Charge[y, t])
end
for y in VS_SYM_AC_RSV
- EP[:eChargeDischargeMaxAC][y,t] += vRSV_AC_Discharge[y,t]
+ EP[:eChargeDischargeMaxAC][y, t] += vRSV_AC_Discharge[y, t]
end
for y in VS_ASYM_DC_DISCHARGE_REG
- EP[:eVreStorMaxDischargingDC][y,t] += vREG_DC_Discharge[y,t]
+ EP[:eVreStorMaxDischargingDC][y, t] += vREG_DC_Discharge[y, t]
end
for y in VS_ASYM_DC_DISCHARGE_RSV
- EP[:eVreStorMaxDischargingDC][y,t] += vRSV_DC_Discharge[y,t]
+ EP[:eVreStorMaxDischargingDC][y, t] += vRSV_DC_Discharge[y, t]
end
for y in VS_ASYM_DC_CHARGE_REG
- EP[:eVreStorMaxChargingDC][y,t] += vREG_DC_Charge[y,t]
+ EP[:eVreStorMaxChargingDC][y, t] += vREG_DC_Charge[y, t]
end
for y in VS_ASYM_AC_DISCHARGE_REG
- EP[:eVreStorMaxDischargingAC][y,t] += vREG_AC_Discharge[y,t]
+ EP[:eVreStorMaxDischargingAC][y, t] += vREG_AC_Discharge[y, t]
end
for y in VS_ASYM_AC_DISCHARGE_RSV
- EP[:eVreStorMaxDischargingAC][y,t] += vRSV_AC_Discharge[y,t]
+ EP[:eVreStorMaxDischargingAC][y, t] += vRSV_AC_Discharge[y, t]
end
for y in VS_ASYM_AC_CHARGE_REG
- EP[:eVreStorMaxChargingAC][y,t] += vREG_AC_Charge[y,t]
+ EP[:eVreStorMaxChargingAC][y, t] += vREG_AC_Charge[y, t]
end
end
if CapacityReserveMargin > 0
- for t=1:T
+ for t in 1:T
for y in DC_DISCHARGE
- eDischargeMax[y,t] += EP[:vCAPRES_DC_DISCHARGE][y,t]/by_rid(y,:eff_down_dc)
+ eDischargeMax[y, t] += EP[:vCAPRES_DC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_dc)
end
for y in AC_DISCHARGE
- eDischargeMax[y,t] += EP[:vCAPRES_AC_DISCHARGE][y,t]/by_rid(y,:eff_down_ac)
+ eDischargeMax[y, t] += EP[:vCAPRES_AC_DISCHARGE][y, t] /
+ by_rid(y, :eff_down_ac)
end
end
end
@@ -2454,88 +2686,121 @@ function vre_stor_operational_reserves!(EP::Model, inputs::Dict, setup::Dict)
# Frequency regulation and operating reserves for all co-located VRE-STOR resources
if !isempty(VRE_STOR_REG_RSV)
- @constraints(EP, begin
- # Maximum VRE-STOR contribution to reserves is a specified fraction of installed grid connection capacity
- [y in VRE_STOR_REG_RSV, t=1:T], EP[:vREG][y,t] <= reg_max(gen[y])*EP[:eTotalCap][y]
- [y in VRE_STOR_REG_RSV, t=1:T], EP[:vRSV][y,t] <= rsv_max(gen[y])*EP[:eTotalCap][y]
-
- # Actual contribution to regulation and reserves is sum of auxilary variables
- [y in VRE_STOR_REG_RSV, t=1:T], EP[:vREG][y,t] == eVreStorRegOnlyBalance[y,t]
- [y in VRE_STOR_REG_RSV, t=1:T], EP[:vRSV][y,t] == eVreStorRsvOnlyBalance[y,t]
- end)
+ @constraints(EP,
+ begin
+ # Maximum VRE-STOR contribution to reserves is a specified fraction of installed grid connection capacity
+ [y in VRE_STOR_REG_RSV, t = 1:T],
+ EP[:vREG][y, t] <= reg_max(gen[y]) * EP[:eTotalCap][y]
+ [y in VRE_STOR_REG_RSV, t = 1:T],
+ EP[:vRSV][y, t] <= rsv_max(gen[y]) * EP[:eTotalCap][y]
+
+ # Actual contribution to regulation and reserves is sum of auxilary variables
+ [y in VRE_STOR_REG_RSV, t = 1:T],
+ EP[:vREG][y, t] == eVreStorRegOnlyBalance[y, t]
+ [y in VRE_STOR_REG_RSV, t = 1:T],
+ EP[:vRSV][y, t] == eVreStorRsvOnlyBalance[y, t]
+ end)
end
if !isempty(VRE_STOR_REG_ONLY)
- @constraints(EP, begin
- # Maximum VRE-STOR contribution to reserves is a specified fraction of installed grid connection capacity
- [y in VRE_STOR_REG_ONLY, t=1:T], EP[:vREG][y,t] <= reg_max(gen[y])*EP[:eTotalCap][y]
-
- # Actual contribution to regulation is sum of auxilary variables
- [y in VRE_STOR_REG_ONLY, t=1:T], EP[:vREG][y,t] == eVreStorRegOnlyBalance[y,t]
- end)
+ @constraints(EP,
+ begin
+ # Maximum VRE-STOR contribution to reserves is a specified fraction of installed grid connection capacity
+ [y in VRE_STOR_REG_ONLY, t = 1:T],
+ EP[:vREG][y, t] <= reg_max(gen[y]) * EP[:eTotalCap][y]
+
+ # Actual contribution to regulation is sum of auxilary variables
+ [y in VRE_STOR_REG_ONLY, t = 1:T],
+ EP[:vREG][y, t] == eVreStorRegOnlyBalance[y, t]
+ end)
end
if !isempty(VRE_STOR_RSV_ONLY)
- @constraints(EP, begin
- # Maximum VRE-STOR contribution to reserves is a specified fraction of installed grid connection capacity
- [y in VRE_STOR_RSV_ONLY, t=1:T], EP[:vRSV][y,t] <= rsv_max(gen[y])*EP[:eTotalCap][y]
-
- # Actual contribution to reserves is sum of auxilary variables
- [y in VRE_STOR_RSV_ONLY, t=1:T], EP[:vRSV][y,t] == eVreStorRsvOnlyBalance[y,t]
- end)
+ @constraints(EP,
+ begin
+ # Maximum VRE-STOR contribution to reserves is a specified fraction of installed grid connection capacity
+ [y in VRE_STOR_RSV_ONLY, t = 1:T],
+ EP[:vRSV][y, t] <= rsv_max(gen[y]) * EP[:eTotalCap][y]
+
+ # Actual contribution to reserves is sum of auxilary variables
+ [y in VRE_STOR_RSV_ONLY, t = 1:T],
+ EP[:vRSV][y, t] == eVreStorRsvOnlyBalance[y, t]
+ end)
end
# Frequency regulation and operating reserves for VRE-STOR resources with a VRE component
if !isempty(SOLAR_REG)
- @constraints(EP, begin
- # Maximum generation and contribution to reserves up must be greater than zero
- [y in SOLAR_REG, t=1:T], EP[:vP_SOLAR][y,t] - EP[:vREG_SOLAR][y,t] >= 0
- end)
+ @constraints(EP,
+ begin
+ # Maximum generation and contribution to reserves up must be greater than zero
+ [y in SOLAR_REG, t = 1:T], EP[:vP_SOLAR][y, t] - EP[:vREG_SOLAR][y, t] >= 0
+ end)
end
if !isempty(WIND_REG)
- @constraints(EP, begin
- # Maximum generation and contribution to reserves up must be greater than zero
- [y in WIND_REG, t=1:T], EP[:vP_WIND][y,t] - EP[:vREG_WIND][y,t] >= 0
- end)
+ @constraints(EP,
+ begin
+ # Maximum generation and contribution to reserves up must be greater than zero
+ [y in WIND_REG, t = 1:T], EP[:vP_WIND][y, t] - EP[:vREG_WIND][y, t] >= 0
+ end)
end
# Frequency regulation and operating reserves for VRE-STOR resources with a storage component
if !isempty(STOR_REG_RSV_UNION)
- @constraints(EP, begin
- # Maximum DC charging rate plus contribution to reserves up must be greater than zero
- # Note: when charging, reducing charge rate is contributing to upwards reserve & regulation as it drops net demand
- [y in DC_CHARGE, t=1:T], eChargeDCMin[y,t] >= 0
-
- # Maximum AC charging rate plus contribution to reserves up must be greater than zero
- # Note: when charging, reducing charge rate is contributing to upwards reserve & regulation as it drops net demand
- [y in AC_CHARGE, t=1:T], eChargeACMin[y,t] >= 0
-
- # Maximum DC discharging rate and contribution to reserves down must be greater than zero
- # Note: when discharging, reducing discharge rate is contributing to downwards regulation as it drops net supply
- [y in DC_DISCHARGE, t=1:T], eDischargeDCMin[y,t] >= 0
-
- # Maximum AC discharging rate and contribution to reserves down must be greater than zero
- # Note: when discharging, reducing discharge rate is contributing to downwards regulation as it drops net supply
- [y in AC_DISCHARGE, t=1:T], eDischargeACMin[y,t] >= 0
-
- # Maximum charging rate plus contributions must be less than available storage capacity
- [y in STOR_REG_RSV_UNION, t=1:T], eChargeMax[y,t] <= EP[:eTotalCap_STOR][y]-EP[:vS_VRE_STOR][y, hoursbefore(p,t,1)]
-
- # Maximum discharging rate and contributions must be less than the available stored energy in prior period
- # wrapping from end of sample period to start of sample period for energy capacity constraint
- [y in STOR_REG_RSV_UNION, t=1:T], eDischargeMax[y,t] <= EP[:vS_VRE_STOR][y, hoursbefore(p,t,1)]
- end)
+ @constraints(EP,
+ begin
+ # Maximum DC charging rate plus contribution to reserves up must be greater than zero
+ # Note: when charging, reducing charge rate is contributing to upwards reserve & regulation as it drops net demand
+ [y in DC_CHARGE, t = 1:T], eChargeDCMin[y, t] >= 0
+
+ # Maximum AC charging rate plus contribution to reserves up must be greater than zero
+ # Note: when charging, reducing charge rate is contributing to upwards reserve & regulation as it drops net demand
+ [y in AC_CHARGE, t = 1:T], eChargeACMin[y, t] >= 0
+
+ # Maximum DC discharging rate and contribution to reserves down must be greater than zero
+ # Note: when discharging, reducing discharge rate is contributing to downwards regulation as it drops net supply
+ [y in DC_DISCHARGE, t = 1:T], eDischargeDCMin[y, t] >= 0
+
+ # Maximum AC discharging rate and contribution to reserves down must be greater than zero
+ # Note: when discharging, reducing discharge rate is contributing to downwards regulation as it drops net supply
+ [y in AC_DISCHARGE, t = 1:T], eDischargeACMin[y, t] >= 0
+
+ # Maximum charging rate plus contributions must be less than available storage capacity
+ [y in STOR_REG_RSV_UNION, t = 1:T],
+ eChargeMax[y, t] <=
+ EP[:eTotalCap_STOR][y] - EP[:vS_VRE_STOR][y, hoursbefore(p, t, 1)]
+
+ # Maximum discharging rate and contributions must be less than the available stored energy in prior period
+ # wrapping from end of sample period to start of sample period for energy capacity constraint
+ [y in STOR_REG_RSV_UNION, t = 1:T],
+ eDischargeMax[y, t] <= EP[:vS_VRE_STOR][y, hoursbefore(p, t, 1)]
+ end)
end
# Total system reserve constraints
- @expression(EP, eRegReqVreStor[t=1:T], inputs["pReg_Req_VRE"]*sum(inputs["pP_Max_Solar"][y,t]*EP[:eTotalCap_SOLAR][y]*by_rid(y, :etainverter) for y in SOLAR_REG)
- + inputs["pReg_Req_VRE"]*sum(inputs["pP_Max_Wind"][y,t]*EP[:eTotalCap_WIND][y] for y in WIND_REG))
- @expression(EP, eRsvReqVreStor[t=1:T], inputs["pRsv_Req_VRE"]*sum(inputs["pP_Max_Solar"][y,t]*EP[:eTotalCap_SOLAR][y]*by_rid(y, :etainverter) for y in SOLAR_RSV)
- + inputs["pRsv_Req_VRE"]*sum(inputs["pP_Max_Wind"][y,t]*EP[:eTotalCap_WIND][y] for y in WIND_RSV))
+ @expression(EP,
+ eRegReqVreStor[t = 1:T],
+ inputs["pReg_Req_VRE"] *
+ sum(inputs["pP_Max_Solar"][y, t] * EP[:eTotalCap_SOLAR][y] *
+ by_rid(y, :etainverter) for y in SOLAR_REG)
+ +inputs["pReg_Req_VRE"] *
+ sum(inputs["pP_Max_Wind"][y, t] * EP[:eTotalCap_WIND][y] for y in WIND_REG))
+ @expression(EP,
+ eRsvReqVreStor[t = 1:T],
+ inputs["pRsv_Req_VRE"] *
+ sum(inputs["pP_Max_Solar"][y, t] * EP[:eTotalCap_SOLAR][y] *
+ by_rid(y, :etainverter) for y in SOLAR_RSV)
+ +inputs["pRsv_Req_VRE"] *
+ sum(inputs["pP_Max_Wind"][y, t] * EP[:eTotalCap_WIND][y] for y in WIND_RSV))
if !isempty(VRE_STOR_REG)
- @constraint(EP, cRegVreStor[t=1:T], sum(EP[:vREG][y,t] for y in inputs["REG"]) >= EP[:eRegReq][t] + eRegReqVreStor[t])
+ @constraint(EP,
+ cRegVreStor[t = 1:T],
+ sum(EP[:vREG][y, t] for y in inputs["REG"])>=EP[:eRegReq][t] +
+ eRegReqVreStor[t])
end
if !isempty(VRE_STOR_RSV)
- @constraint(EP, cRsvReqVreStor[t=1:T], sum(EP[:vRSV][y,t] for y in inputs["RSV"]) + EP[:vUNMET_RSV][t] >= EP[:eRsvReq][t] + eRsvReqVreStor[t])
+ @constraint(EP,
+ cRsvReqVreStor[t = 1:T],
+ sum(EP[:vRSV][y, t] for y in inputs["RSV"]) +
+ EP[:vUNMET_RSV][t]>=EP[:eRsvReq][t] + eRsvReqVreStor[t])
end
end
diff --git a/src/model/solve_model.jl b/src/model/solve_model.jl
index 6519a8c863..e3713bc67b 100644
--- a/src/model/solve_model.jl
+++ b/src/model/solve_model.jl
@@ -11,28 +11,28 @@ nothing (modifies an existing-solved model in the memory). `solve()` must be run
"""
function fix_integers(jump_model::Model)
- ################################################################################
- ## function fix_integers()
- ##
- ## inputs: jump_model - a model object containing that has been previously solved.
- ##
- ## description: fixes the iteger variables ones the model has been solved in order
- ## to calculate approximations of dual variables
- ##
- ## returns: no result since it modifies an existing-solved model in the memory.
- ## solve() must be run again to solve and getdual veriables
- ##
- ################################################################################
- values = Dict(v => value(v) for v in all_variables(jump_model))
- for v in all_variables(jump_model)
- if is_integer(v)
- fix(v,values[v],force=true)
- unset_integer(v)
+ ################################################################################
+ ## function fix_integers()
+ ##
+ ## inputs: jump_model - a model object containing that has been previously solved.
+ ##
+ ## description: fixes the iteger variables ones the model has been solved in order
+ ## to calculate approximations of dual variables
+ ##
+ ## returns: no result since it modifies an existing-solved model in the memory.
+ ## solve() must be run again to solve and getdual veriables
+ ##
+ ################################################################################
+ values = Dict(v => value(v) for v in all_variables(jump_model))
+ for v in all_variables(jump_model)
+ if is_integer(v)
+ fix(v, values[v], force = true)
+ unset_integer(v)
elseif is_binary(v)
- fix(v,values[v],force=true)
- unset_binary(v)
+ fix(v, values[v], force = true)
+ unset_binary(v)
end
- end
+ end
end
@doc raw"""
@@ -48,62 +48,58 @@ Description: Solves and extracts solution variables for later processing
- `solver_time::Float64`: time taken to solve the model
"""
function solve_model(EP::Model, setup::Dict)
- ## Start solve timer
- solver_start_time = time()
- solver_time = time()
-
- ## Solve Model
- optimize!(EP)
-
- if has_values(EP)
-
- if has_duals(EP) # fully linear model
- println("LP solved for primal")
- else
- println("MILP solved for primal")
- end
-
- ## Record solver time
- solver_time = time() - solver_start_time
- elseif setup["ComputeConflicts"]==0
-
- @info "No model solution. You can try to set ComputeConflicts to 1 in the genx_settings.yml file to compute conflicting constraints."
-
- elseif setup["ComputeConflicts"]==1
-
- @info "No model solution. Trying to identify conflicting constriants..."
+ ## Start solve timer
+ solver_start_time = time()
+ solver_time = time()
+
+ ## Solve Model
+ optimize!(EP)
+
+ if has_values(EP)
+ if has_duals(EP) # fully linear model
+ println("LP solved for primal")
+ else
+ println("MILP solved for primal")
+ end
- try
- compute_conflict!(EP)
- catch e
- if isa(e, JuMP.ArgumentError)
- @warn "$(solver_name(EP)) does not support computing conflicting constraints. This is available using either Gurobi or CPLEX."
- solver_time = time() - solver_start_time
- return EP, solver_time
- else
- rethrow(e)
- end
- end
+ ## Record solver time
+ solver_time = time() - solver_start_time
+ elseif setup["ComputeConflicts"] == 0
+ @info "No model solution. You can try to set ComputeConflicts to 1 in the genx_settings.yml file to compute conflicting constraints."
+
+ elseif setup["ComputeConflicts"] == 1
+ @info "No model solution. Trying to identify conflicting constriants..."
+
+ try
+ compute_conflict!(EP)
+ catch e
+ if isa(e, JuMP.ArgumentError)
+ @warn "$(solver_name(EP)) does not support computing conflicting constraints. This is available using either Gurobi or CPLEX."
+ solver_time = time() - solver_start_time
+ return EP, solver_time
+ else
+ rethrow(e)
+ end
+ end
- list_of_conflicting_constraints = ConstraintRef[]
- if get_attribute(EP, MOI.ConflictStatus()) == MOI.CONFLICT_FOUND
- for (F, S) in list_of_constraint_types(EP)
- for con in all_constraints(EP, F, S)
- if get_attribute(con, MOI.ConstraintConflictStatus()) == MOI.IN_CONFLICT
- push!(list_of_conflicting_constraints, con)
- end
- end
- end
- display(list_of_conflicting_constraints)
- solver_time = time() - solver_start_time
- return EP, solver_time, list_of_conflicting_constraints
- else
- @info "Conflicts computation failed."
- solver_time = time() - solver_start_time
- return EP, solver_time, list_of_conflicting_constraints
- end
+ list_of_conflicting_constraints = ConstraintRef[]
+ if get_attribute(EP, MOI.ConflictStatus()) == MOI.CONFLICT_FOUND
+ for (F, S) in list_of_constraint_types(EP)
+ for con in all_constraints(EP, F, S)
+ if get_attribute(con, MOI.ConstraintConflictStatus()) == MOI.IN_CONFLICT
+ push!(list_of_conflicting_constraints, con)
+ end
+ end
+ end
+ display(list_of_conflicting_constraints)
+ solver_time = time() - solver_start_time
+ return EP, solver_time, list_of_conflicting_constraints
+ else
+ @info "Conflicts computation failed."
+ solver_time = time() - solver_start_time
+ return EP, solver_time, list_of_conflicting_constraints
+ end
+ end
- end
-
- return EP, solver_time
-end # END solve_model()
\ No newline at end of file
+ return EP, solver_time
+end # END solve_model()
diff --git a/src/model/utility.jl b/src/model/utility.jl
index 22e7329e9b..15e6841958 100644
--- a/src/model/utility.jl
+++ b/src/model/utility.jl
@@ -11,8 +11,8 @@ For example, if p = 10,
1 hour before t=11 is t=20
"""
function hoursbefore(p::Int, t::Int, b::Int)::Int
- period = div(t - 1, p)
- return period * p + mod1(t - b, p)
+ period = div(t - 1, p)
+ return period * p + mod1(t - b, p)
end
@doc raw"""
@@ -23,11 +23,10 @@ to allow for example b=1:3 to fetch a Vector{Int} of the three hours before
time index t.
"""
function hoursbefore(p::Int, t::Int, b::UnitRange{Int})::Vector{Int}
- period = div(t - 1, p)
- return period * p .+ mod1.(t .- b, p)
+ period = div(t - 1, p)
+ return period * p .+ mod1.(t .- b, p)
end
-
@doc raw"""
hoursafter(p::Int, t::Int, a::Int)
@@ -55,7 +54,6 @@ time index t.
function hoursafter(p::Int, t::Int, a::UnitRange{Int})::Vector{Int}
period = div(t - 1, p)
return period * p .+ mod1.(t .+ a, p)
-
end
@doc raw"""
@@ -64,7 +62,7 @@ end
This function checks if a column in a dataframe is all zeros.
"""
function is_nonzero(df::DataFrame, col::Symbol)::BitVector
- convert(BitVector, df[!, col] .> 0)::BitVector
+ convert(BitVector, df[!, col] .> 0)::BitVector
end
function is_nonzero(rs::Vector{<:AbstractResource}, col::Symbol)
@@ -82,4 +80,3 @@ function by_rid_res(rid::Integer, sym::Symbol, rs::Vector{<:AbstractResource})
f = isdefined(GenX, sym) ? getfield(GenX, sym) : x -> getproperty(x, sym)
return f(r)
end
-
diff --git a/src/multi_stage/configure_multi_stage_inputs.jl b/src/multi_stage/configure_multi_stage_inputs.jl
index 870d98cdca..0bd5d45928 100644
--- a/src/multi_stage/configure_multi_stage_inputs.jl
+++ b/src/multi_stage/configure_multi_stage_inputs.jl
@@ -21,29 +21,32 @@ NOTE: The inv\_costs\_yr and crp arrays must be the same length; values with the
returns: array object containing overnight capital costs, the discounted sum of annual investment costs incured within the model horizon.
"""
-function compute_overnight_capital_cost(settings_d::Dict,inv_costs_yr::Array,crp::Array, tech_wacc::Array)
-
- cur_stage = settings_d["CurStage"] # Current model
- num_stages = settings_d["NumStages"] # Total number of model stages
- stage_lens = settings_d["StageLengths"]
-
- # 1) For each resource, find the minimum of the capital recovery period and the end of the model horizon
- # Total time between the end of the final model stage and the start of the current stage
- model_yrs_remaining = sum(stage_lens[cur_stage:end])
-
- # We will sum annualized costs through the full capital recovery period or the end of planning horizon, whichever comes first
- payment_yrs_remaining = min.(crp, model_yrs_remaining)
-
- # KEY ASSUMPTION: Investment costs after the planning horizon are fully recoverable, so we don't need to include these costs
- # 2) Compute the present value of investment associated with capital recovery period within the model horizon - discounting to year 1 and not year 0
- # (Factor to adjust discounting to year 0 for capital cost is included in the discounting coefficient applied to all terms in the objective function value.)
- occ = zeros(length(inv_costs_yr))
- for i in 1:length(occ)
- occ[i] = sum(inv_costs_yr[i]/(1+tech_wacc[i]) .^ (p) for p=1:payment_yrs_remaining[i])
- end
+function compute_overnight_capital_cost(settings_d::Dict,
+ inv_costs_yr::Array,
+ crp::Array,
+ tech_wacc::Array)
+ cur_stage = settings_d["CurStage"] # Current model
+ num_stages = settings_d["NumStages"] # Total number of model stages
+ stage_lens = settings_d["StageLengths"]
+
+ # 1) For each resource, find the minimum of the capital recovery period and the end of the model horizon
+ # Total time between the end of the final model stage and the start of the current stage
+ model_yrs_remaining = sum(stage_lens[cur_stage:end])
+
+ # We will sum annualized costs through the full capital recovery period or the end of planning horizon, whichever comes first
+ payment_yrs_remaining = min.(crp, model_yrs_remaining)
+
+ # KEY ASSUMPTION: Investment costs after the planning horizon are fully recoverable, so we don't need to include these costs
+ # 2) Compute the present value of investment associated with capital recovery period within the model horizon - discounting to year 1 and not year 0
+ # (Factor to adjust discounting to year 0 for capital cost is included in the discounting coefficient applied to all terms in the objective function value.)
+ occ = zeros(length(inv_costs_yr))
+ for i in 1:length(occ)
+ occ[i] = sum(inv_costs_yr[i] / (1 + tech_wacc[i]) .^ (p)
+ for p in 1:payment_yrs_remaining[i])
+ end
- # 3) Return the overnight capital cost (discounted sum of annual investment costs incured within the model horizon)
- return occ
+ # 3) Return the overnight capital cost (discounted sum of annual investment costs incured within the model horizon)
+ return occ
end
@doc raw"""
@@ -67,91 +70,139 @@ inputs:
returns: dictionary containing updated model inputs, to be used in the generate\_model() method.
"""
-function configure_multi_stage_inputs(inputs_d::Dict, settings_d::Dict, NetworkExpansion::Int64)
-
+function configure_multi_stage_inputs(inputs_d::Dict,
+ settings_d::Dict,
+ NetworkExpansion::Int64)
gen = inputs_d["RESOURCES"]
- # Parameter inputs when multi-year discounting is activated
- cur_stage = settings_d["CurStage"]
- stage_len = settings_d["StageLengths"][cur_stage]
- wacc = settings_d["WACC"] # Interest Rate and also the discount rate unless specified other wise
- myopic = settings_d["Myopic"] == 1 # 1 if myopic (only one forward pass), 0 if full DDP
-
- # Define OPEXMULT here, include in inputs_dict[t] for use in dual_dynamic_programming.jl, transmission_multi_stage.jl, and investment_multi_stage.jl
- OPEXMULT = myopic ? 1 : sum([1/(1+wacc)^(i-1) for i in range(1,stop=stage_len)])
- inputs_d["OPEXMULT"] = OPEXMULT
-
- if !myopic ### Leave myopic costs in annualized form and do not scale OPEX costs
- # 1. Convert annualized investment costs incured within the model horizon into overnight capital costs
- # NOTE: Although the "yr" suffix is still in use in these parameter names, they no longer represent annualized costs but rather truncated overnight capital costs
- gen.inv_cost_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_per_mwyr.(gen), capital_recovery_period.(gen), tech_wacc.(gen))
- gen.inv_cost_per_mwhyr = compute_overnight_capital_cost(settings_d, inv_cost_per_mwhyr.(gen), capital_recovery_period.(gen), tech_wacc.(gen))
- gen.inv_cost_charge_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_charge_per_mwyr.(gen), capital_recovery_period.(gen), tech_wacc.(gen))
-
- # 2. Update fixed O&M costs to account for the possibility of more than 1 year between two model stages
- # NOTE: Although the "yr" suffix is still in use in these parameter names, they now represent total costs incured in each stage, which may be multiple years
- gen.fixed_om_cost_per_mwyr = fixed_om_cost_per_mwyr.(gen) .* OPEXMULT
- gen.fixed_om_cost_per_mwhyr = fixed_om_cost_per_mwhyr.(gen) .* OPEXMULT
- gen.fixed_om_cost_charge_per_mwyr = fixed_om_cost_charge_per_mwyr.(gen) .* OPEXMULT
-
- # Conduct 1. and 2. for any co-located VRE-STOR resources
- if !isempty(inputs_d["VRE_STOR"])
- gen_VRE_STOR = gen.VreStorage
- gen_VRE_STOR.inv_cost_inverter_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_inverter_per_mwyr.(gen_VRE_STOR), capital_recovery_period_dc.(gen_VRE_STOR), tech_wacc_dc.(gen_VRE_STOR))
- gen_VRE_STOR.inv_cost_solar_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_solar_per_mwyr.(gen_VRE_STOR), capital_recovery_period_solar.(gen_VRE_STOR), tech_wacc_solar.(gen_VRE_STOR))
- gen_VRE_STOR.inv_cost_wind_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_wind_per_mwyr.(gen_VRE_STOR), capital_recovery_period_wind.(gen_VRE_STOR), tech_wacc_wind.(gen_VRE_STOR))
- gen_VRE_STOR.inv_cost_discharge_dc_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_discharge_dc_per_mwyr.(gen_VRE_STOR), capital_recovery_period_discharge_dc.(gen_VRE_STOR), tech_wacc_discharge_dc.(gen_VRE_STOR))
- gen_VRE_STOR.inv_cost_charge_dc_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_charge_dc_per_mwyr.(gen_VRE_STOR), capital_recovery_period_charge_dc.(gen_VRE_STOR), tech_wacc_charge_dc.(gen_VRE_STOR))
- gen_VRE_STOR.inv_cost_discharge_ac_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_discharge_ac_per_mwyr.(gen_VRE_STOR), capital_recovery_period_discharge_ac.(gen_VRE_STOR), tech_wacc_discharge_ac.(gen_VRE_STOR))
- gen_VRE_STOR.inv_cost_charge_ac_per_mwyr = compute_overnight_capital_cost(settings_d, inv_cost_charge_ac_per_mwyr.(gen_VRE_STOR), capital_recovery_period_charge_ac.(gen_VRE_STOR), tech_wacc_charge_ac.(gen_VRE_STOR))
-
- gen_VRE_STOR.fixed_om_inverter_cost_per_mwyr = fixed_om_inverter_cost_per_mwyr.(gen_VRE_STOR) .* OPEXMULT
- gen_VRE_STOR.fixed_om_solar_cost_per_mwyr = fixed_om_solar_cost_per_mwyr.(gen_VRE_STOR) .* OPEXMULT
- gen_VRE_STOR.fixed_om_wind_cost_per_mwyr = fixed_om_wind_cost_per_mwyr.(gen_VRE_STOR) .* OPEXMULT
- gen_VRE_STOR.fixed_om_cost_discharge_dc_per_mwyr = fixed_om_cost_discharge_dc_per_mwyr.(gen_VRE_STOR) .* OPEXMULT
- gen_VRE_STOR.fixed_om_cost_charge_dc_per_mwyr = fixed_om_cost_charge_dc_per_mwyr.(gen_VRE_STOR) .* OPEXMULT
- gen_VRE_STOR.fixed_om_cost_discharge_ac_per_mwyr = fixed_om_cost_discharge_ac_per_mwyr.(gen_VRE_STOR) .* OPEXMULT
- gen_VRE_STOR.fixed_om_cost_charge_ac_per_mwyr = fixed_om_cost_charge_ac_per_mwyr.(gen_VRE_STOR) .* OPEXMULT
- end
- end
+ # Parameter inputs when multi-year discounting is activated
+ cur_stage = settings_d["CurStage"]
+ stage_len = settings_d["StageLengths"][cur_stage]
+ wacc = settings_d["WACC"] # Interest Rate and also the discount rate unless specified other wise
+ myopic = settings_d["Myopic"] == 1 # 1 if myopic (only one forward pass), 0 if full DDP
+
+ # Define OPEXMULT here, include in inputs_dict[t] for use in dual_dynamic_programming.jl, transmission_multi_stage.jl, and investment_multi_stage.jl
+ OPEXMULT = myopic ? 1 :
+ sum([1 / (1 + wacc)^(i - 1) for i in range(1, stop = stage_len)])
+ inputs_d["OPEXMULT"] = OPEXMULT
+
+ if !myopic ### Leave myopic costs in annualized form and do not scale OPEX costs
+ # 1. Convert annualized investment costs incured within the model horizon into overnight capital costs
+ # NOTE: Although the "yr" suffix is still in use in these parameter names, they no longer represent annualized costs but rather truncated overnight capital costs
+ gen.inv_cost_per_mwyr = compute_overnight_capital_cost(settings_d,
+ inv_cost_per_mwyr.(gen),
+ capital_recovery_period.(gen),
+ tech_wacc.(gen))
+ gen.inv_cost_per_mwhyr = compute_overnight_capital_cost(settings_d,
+ inv_cost_per_mwhyr.(gen),
+ capital_recovery_period.(gen),
+ tech_wacc.(gen))
+ gen.inv_cost_charge_per_mwyr = compute_overnight_capital_cost(settings_d,
+ inv_cost_charge_per_mwyr.(gen),
+ capital_recovery_period.(gen),
+ tech_wacc.(gen))
+
+ # 2. Update fixed O&M costs to account for the possibility of more than 1 year between two model stages
+ # NOTE: Although the "yr" suffix is still in use in these parameter names, they now represent total costs incured in each stage, which may be multiple years
+ gen.fixed_om_cost_per_mwyr = fixed_om_cost_per_mwyr.(gen) .* OPEXMULT
+ gen.fixed_om_cost_per_mwhyr = fixed_om_cost_per_mwhyr.(gen) .* OPEXMULT
+ gen.fixed_om_cost_charge_per_mwyr = fixed_om_cost_charge_per_mwyr.(gen) .* OPEXMULT
+
+ # Conduct 1. and 2. for any co-located VRE-STOR resources
+ if !isempty(inputs_d["VRE_STOR"])
+ gen_VRE_STOR = gen.VreStorage
+ gen_VRE_STOR.inv_cost_inverter_per_mwyr = compute_overnight_capital_cost(settings_d,
+ inv_cost_inverter_per_mwyr.(gen_VRE_STOR),
+ capital_recovery_period_dc.(gen_VRE_STOR),
+ tech_wacc_dc.(gen_VRE_STOR))
+ gen_VRE_STOR.inv_cost_solar_per_mwyr = compute_overnight_capital_cost(settings_d,
+ inv_cost_solar_per_mwyr.(gen_VRE_STOR),
+ capital_recovery_period_solar.(gen_VRE_STOR),
+ tech_wacc_solar.(gen_VRE_STOR))
+ gen_VRE_STOR.inv_cost_wind_per_mwyr = compute_overnight_capital_cost(settings_d,
+ inv_cost_wind_per_mwyr.(gen_VRE_STOR),
+ capital_recovery_period_wind.(gen_VRE_STOR),
+ tech_wacc_wind.(gen_VRE_STOR))
+ gen_VRE_STOR.inv_cost_discharge_dc_per_mwyr = compute_overnight_capital_cost(settings_d,
+ inv_cost_discharge_dc_per_mwyr.(gen_VRE_STOR),
+ capital_recovery_period_discharge_dc.(gen_VRE_STOR),
+ tech_wacc_discharge_dc.(gen_VRE_STOR))
+ gen_VRE_STOR.inv_cost_charge_dc_per_mwyr = compute_overnight_capital_cost(settings_d,
+ inv_cost_charge_dc_per_mwyr.(gen_VRE_STOR),
+ capital_recovery_period_charge_dc.(gen_VRE_STOR),
+ tech_wacc_charge_dc.(gen_VRE_STOR))
+ gen_VRE_STOR.inv_cost_discharge_ac_per_mwyr = compute_overnight_capital_cost(settings_d,
+ inv_cost_discharge_ac_per_mwyr.(gen_VRE_STOR),
+ capital_recovery_period_discharge_ac.(gen_VRE_STOR),
+ tech_wacc_discharge_ac.(gen_VRE_STOR))
+ gen_VRE_STOR.inv_cost_charge_ac_per_mwyr = compute_overnight_capital_cost(settings_d,
+ inv_cost_charge_ac_per_mwyr.(gen_VRE_STOR),
+ capital_recovery_period_charge_ac.(gen_VRE_STOR),
+ tech_wacc_charge_ac.(gen_VRE_STOR))
+
+ gen_VRE_STOR.fixed_om_inverter_cost_per_mwyr = fixed_om_inverter_cost_per_mwyr.(gen_VRE_STOR) .*
+ OPEXMULT
+ gen_VRE_STOR.fixed_om_solar_cost_per_mwyr = fixed_om_solar_cost_per_mwyr.(gen_VRE_STOR) .*
+ OPEXMULT
+ gen_VRE_STOR.fixed_om_wind_cost_per_mwyr = fixed_om_wind_cost_per_mwyr.(gen_VRE_STOR) .*
+ OPEXMULT
+ gen_VRE_STOR.fixed_om_cost_discharge_dc_per_mwyr = fixed_om_cost_discharge_dc_per_mwyr.(gen_VRE_STOR) .*
+ OPEXMULT
+ gen_VRE_STOR.fixed_om_cost_charge_dc_per_mwyr = fixed_om_cost_charge_dc_per_mwyr.(gen_VRE_STOR) .*
+ OPEXMULT
+ gen_VRE_STOR.fixed_om_cost_discharge_ac_per_mwyr = fixed_om_cost_discharge_ac_per_mwyr.(gen_VRE_STOR) .*
+ OPEXMULT
+ gen_VRE_STOR.fixed_om_cost_charge_ac_per_mwyr = fixed_om_cost_charge_ac_per_mwyr.(gen_VRE_STOR) .*
+ OPEXMULT
+ end
+ end
retirable = is_retirable(gen)
- # TODO: ask Sam about this
+ # TODO: ask Sam about this
# Set of all resources eligible for capacity retirements
- inputs_d["RET_CAP"] = retirable
- # Set of all storage resources eligible for energy capacity retirements
- inputs_d["RET_CAP_ENERGY"] = intersect(retirable, inputs_d["STOR_ALL"])
- # Set of asymmetric charge/discharge storage resources eligible for charge capacity retirements
- inputs_d["RET_CAP_CHARGE"] = intersect(retirable, inputs_d["STOR_ASYMMETRIC"])
- # Set of all co-located resources' components eligible for capacity retirements
- if !isempty(inputs_d["VRE_STOR"])
- inputs_d["RET_CAP_DC"] = intersect(retirable, inputs_d["VS_DC"])
- inputs_d["RET_CAP_SOLAR"] = intersect(retirable, inputs_d["VS_SOLAR"])
- inputs_d["RET_CAP_WIND"] = intersect(retirable, inputs_d["VS_WIND"])
- inputs_d["RET_CAP_STOR"] = intersect(retirable, inputs_d["VS_STOR"])
- inputs_d["RET_CAP_DISCHARGE_DC"] = intersect(retirable, inputs_d["VS_ASYM_DC_DISCHARGE"])
- inputs_d["RET_CAP_CHARGE_DC"] = intersect(retirable, inputs_d["VS_ASYM_DC_CHARGE"])
- inputs_d["RET_CAP_DISCHARGE_AC"] = intersect(retirable, inputs_d["VS_ASYM_AC_DISCHARGE"])
- inputs_d["RET_CAP_CHARGE_AC"] = intersect(retirable, inputs_d["VS_ASYM_AC_CHARGE"])
- end
-
- # Transmission
- if NetworkExpansion == 1 && inputs_d["Z"] > 1
-
- if !myopic ### Leave myopic costs in annualized form
- # 1. Convert annualized tramsmission investment costs incured within the model horizon into overnight capital costs
- inputs_d["pC_Line_Reinforcement"] = compute_overnight_capital_cost(settings_d,inputs_d["pC_Line_Reinforcement"],inputs_d["Capital_Recovery_Period_Trans"],inputs_d["transmission_WACC"])
- end
-
- # Scale max_allowed_reinforcement to allow for possibility of deploying maximum reinforcement in each investment stage
- inputs_d["pTrans_Max_Possible"] = inputs_d["pLine_Max_Flow_Possible_MW"]
+ inputs_d["RET_CAP"] = retirable
+ # Set of all storage resources eligible for energy capacity retirements
+ inputs_d["RET_CAP_ENERGY"] = intersect(retirable, inputs_d["STOR_ALL"])
+ # Set of asymmetric charge/discharge storage resources eligible for charge capacity retirements
+ inputs_d["RET_CAP_CHARGE"] = intersect(retirable, inputs_d["STOR_ASYMMETRIC"])
+ # Set of all co-located resources' components eligible for capacity retirements
+ if !isempty(inputs_d["VRE_STOR"])
+ inputs_d["RET_CAP_DC"] = intersect(retirable, inputs_d["VS_DC"])
+ inputs_d["RET_CAP_SOLAR"] = intersect(retirable, inputs_d["VS_SOLAR"])
+ inputs_d["RET_CAP_WIND"] = intersect(retirable, inputs_d["VS_WIND"])
+ inputs_d["RET_CAP_STOR"] = intersect(retirable, inputs_d["VS_STOR"])
+ inputs_d["RET_CAP_DISCHARGE_DC"] = intersect(retirable,
+ inputs_d["VS_ASYM_DC_DISCHARGE"])
+ inputs_d["RET_CAP_CHARGE_DC"] = intersect(retirable, inputs_d["VS_ASYM_DC_CHARGE"])
+ inputs_d["RET_CAP_DISCHARGE_AC"] = intersect(retirable,
+ inputs_d["VS_ASYM_AC_DISCHARGE"])
+ inputs_d["RET_CAP_CHARGE_AC"] = intersect(retirable, inputs_d["VS_ASYM_AC_CHARGE"])
+ end
+
+ # Transmission
+ if NetworkExpansion == 1 && inputs_d["Z"] > 1
+ if !myopic ### Leave myopic costs in annualized form
+ # 1. Convert annualized tramsmission investment costs incured within the model horizon into overnight capital costs
+ inputs_d["pC_Line_Reinforcement"] = compute_overnight_capital_cost(settings_d,
+ inputs_d["pC_Line_Reinforcement"],
+ inputs_d["Capital_Recovery_Period_Trans"],
+ inputs_d["transmission_WACC"])
+ end
+
+ # Scale max_allowed_reinforcement to allow for possibility of deploying maximum reinforcement in each investment stage
+ inputs_d["pTrans_Max_Possible"] = inputs_d["pLine_Max_Flow_Possible_MW"]
# Network lines and zones that are expandable have greater maximum possible line flow than the available capacity of the previous stage as well as available line reinforcement
- inputs_d["EXPANSION_LINES"] = findall((inputs_d["pLine_Max_Flow_Possible_MW"] .> inputs_d["pTrans_Max"]) .& (inputs_d["pMax_Line_Reinforcement"] .> 0))
- inputs_d["NO_EXPANSION_LINES"] = findall((inputs_d["pLine_Max_Flow_Possible_MW"] .<= inputs_d["pTrans_Max"]) .| (inputs_d["pMax_Line_Reinforcement"] .<= 0))
- # To-Do: Error Handling
- # 1.) Enforce that pLine_Max_Flow_Possible_MW for the first model stage be equal to (for transmission expansion to be disalowed) or greater (to allow transmission expansion) than pTrans_Max in inputs/inputs_p1
+ inputs_d["EXPANSION_LINES"] = findall((inputs_d["pLine_Max_Flow_Possible_MW"] .>
+ inputs_d["pTrans_Max"]) .&
+ (inputs_d["pMax_Line_Reinforcement"] .> 0))
+ inputs_d["NO_EXPANSION_LINES"] = findall((inputs_d["pLine_Max_Flow_Possible_MW"] .<=
+ inputs_d["pTrans_Max"]) .|
+ (inputs_d["pMax_Line_Reinforcement"] .<=
+ 0))
+ # To-Do: Error Handling
+ # 1.) Enforce that pLine_Max_Flow_Possible_MW for the first model stage be equal to (for transmission expansion to be disalowed) or greater (to allow transmission expansion) than pTrans_Max in inputs/inputs_p1
end
return inputs_d
diff --git a/src/multi_stage/dual_dynamic_programming.jl b/src/multi_stage/dual_dynamic_programming.jl
index bb9f14df68..9703784473 100644
--- a/src/multi_stage/dual_dynamic_programming.jl
+++ b/src/multi_stage/dual_dynamic_programming.jl
@@ -132,7 +132,6 @@ returns:
* inputs\_d – Dictionary of inputs for each model stage, generated by the load\_inputs() method, modified by this method.
"""
function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
-
settings_d = setup["MultiStageSettingsDict"]
num_stages = settings_d["NumStages"] # Total number of investment planning stages
EPSILON = settings_d["ConvergenceTolerance"] # Tolerance
@@ -150,7 +149,7 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
# Step a.i) Initialize cost-to-go function for t = 1:num_stages
for t in 1:num_stages
- settings_d["CurStage"] = t;
+ settings_d["CurStage"] = t
models_d[t] = initialize_cost_to_go(settings_d, models_d[t], inputs_d[t])
end
@@ -162,7 +161,6 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
println("Solving First Stage Problem")
println("***********")
-
t = 1 # Stage = 1
solve_time_d = Dict()
ddp_prev_time = time() # Begin tracking time of each iteration
@@ -174,7 +172,6 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
# Step c.ii) If the relative difference between upper and lower bounds are small, break loop
while ((z_upper - z_lower) / z_lower > EPSILON)
-
ic = ic + 1 # Increase iteration counter by 1
if (ic > 10000)
@@ -207,21 +204,25 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
end
## Forward pass for t=2:num_stages
for t in 2:num_stages
-
println("***********")
println(string("Forward Pass t = ", t))
println("***********")
# Step d.i) Fix initial investments for model at time t given optimal solution for time t-1
- models_d[t] = fix_initial_investments(models_d[t-1], models_d[t], start_cap_d, inputs_d[t])
+ models_d[t] = fix_initial_investments(models_d[t - 1],
+ models_d[t],
+ start_cap_d,
+ inputs_d[t])
# Step d.ii) Fix capacity tracking variables for endogenous retirements
- models_d[t] = fix_capacity_tracking(models_d[t-1], models_d[t], cap_track_d, t)
+ models_d[t] = fix_capacity_tracking(models_d[t - 1],
+ models_d[t],
+ cap_track_d,
+ t)
# Step d.iii) Solve the model at time t
models_d[t], solve_time_d[t] = solve_model(models_d[t], setup)
inputs_d[t]["solve_time"] = solve_time_d[t]
-
end
### For the myopic solution, algorithm should terminate here after the first forward pass calculation and then move to Outputs writing.
@@ -242,7 +243,8 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
# Step e) Calculate the new upper bound
z_upper_temp = 0
for t in 1:num_stages
- z_upper_temp = z_upper_temp + (objective_value(models_d[t]) - value(models_d[t][:vALPHA]))
+ z_upper_temp = z_upper_temp +
+ (objective_value(models_d[t]) - value(models_d[t][:vALPHA]))
end
# If the upper bound decreased, set it as the new upper bound
@@ -254,17 +256,19 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
# Step f) Backward pass for t = num_stages:2
for t in num_stages:-1:2
-
println("***********")
println(string("Backward Pass t = ", t))
println("***********")
# Step f.i) Add a cut to the previous time step using information from the current time step
- models_d[t-1] = add_cut(models_d[t-1], models_d[t], start_cap_d, cap_track_d)
+ models_d[t - 1] = add_cut(models_d[t - 1],
+ models_d[t],
+ start_cap_d,
+ cap_track_d)
# Step f.ii) Solve the model with the additional cut at time t-1
- models_d[t-1], solve_time_d[t-1] = solve_model(models_d[t-1], setup)
- inputs_d[t-1]["solve_time"] = solve_time_d[t-1]
+ models_d[t - 1], solve_time_d[t - 1] = solve_model(models_d[t - 1], setup)
+ inputs_d[t - 1]["solve_time"] = solve_time_d[t - 1]
end
# Step g) Recalculate lower bound and go back to c)
@@ -283,7 +287,6 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
println(string("Lower Bound = ", z_lower))
println("***********")
-
### STEP I) One final forward pass to guarantee convergence
# Forward pass for t = 1:num_stages
t = 1 # update forward pass solution for the first stage
@@ -296,10 +299,13 @@ function run_ddp(models_d::Dict, setup::Dict, inputs_d::Dict)
println("***********")
# Step d.i) Fix initial investments for model at time t given optimal solution for time t-1
- models_d[t] = fix_initial_investments(models_d[t-1], models_d[t], start_cap_d, inputs_d[t])
+ models_d[t] = fix_initial_investments(models_d[t - 1],
+ models_d[t],
+ start_cap_d,
+ inputs_d[t])
# Step d.ii) Fix capacity tracking variables for endogenous retirements
- models_d[t] = fix_capacity_tracking(models_d[t-1], models_d[t], cap_track_d, t)
+ models_d[t] = fix_capacity_tracking(models_d[t - 1], models_d[t], cap_track_d, t)
# Step d.iii) Solve the model at time t
models_d[t], solve_time_d[t] = solve_model(models_d[t], setup)
@@ -325,20 +331,21 @@ inputs:
* outpath – String which represents the path to the Results directory.
* settings\_d - Dictionary containing settings configured in the GenX settings genx\_settings.yml file as well as the multi-stage settings file multi\_stage\_settings.yml.
"""
-function write_multi_stage_outputs(stats_d::Dict, outpath::String, settings_d::Dict, inputs_dict::Dict)
-
+function write_multi_stage_outputs(stats_d::Dict,
+ outpath::String,
+ settings_d::Dict,
+ inputs_dict::Dict)
multi_stage_settings_d = settings_d["MultiStageSettingsDict"]
write_multi_stage_capacities_discharge(outpath, multi_stage_settings_d)
write_multi_stage_capacities_charge(outpath, multi_stage_settings_d)
write_multi_stage_capacities_energy(outpath, multi_stage_settings_d)
if settings_d["NetworkExpansion"] == 1
- write_multi_stage_network_expansion(outpath, multi_stage_settings_d)
+ write_multi_stage_network_expansion(outpath, multi_stage_settings_d)
end
write_multi_stage_costs(outpath, multi_stage_settings_d, inputs_dict)
multi_stage_settings_d["Myopic"] == 0 && write_multi_stage_stats(outpath, stats_d)
write_multi_stage_settings(outpath, settings_d)
-
end
@doc raw"""
@@ -354,22 +361,24 @@ inputs:
returns: JuMP model with updated linking constraints.
"""
-function fix_initial_investments(EP_prev::Model, EP_cur::Model, start_cap_d::Dict, inputs_d::Dict)
-
- ALL_CAP = union(inputs_d["RET_CAP"],inputs_d["NEW_CAP"]) # Set of all resources subject to inter-stage capacity tracking
-
+function fix_initial_investments(EP_prev::Model,
+ EP_cur::Model,
+ start_cap_d::Dict,
+ inputs_d::Dict)
+ ALL_CAP = union(inputs_d["RET_CAP"], inputs_d["NEW_CAP"]) # Set of all resources subject to inter-stage capacity tracking
+
# start_cap_d dictionary contains the starting capacity expression name (e) as a key,
# and the associated linking constraint name (c) as a value
for (e, c) in start_cap_d
for y in keys(EP_cur[c])
- # Set the right hand side value of the linking initial capacity constraint in the current stage to the value of the available capacity variable solved for in the previous stages
- if c == :cExistingTransCap
+ # Set the right hand side value of the linking initial capacity constraint in the current stage to the value of the available capacity variable solved for in the previous stages
+ if c == :cExistingTransCap
+ set_normalized_rhs(EP_cur[c][y], value(EP_prev[e][y]))
+ else
+ if y[1] in ALL_CAP # extract resource integer index value from key
set_normalized_rhs(EP_cur[c][y], value(EP_prev[e][y]))
- else
- if y[1] in ALL_CAP # extract resource integer index value from key
- set_normalized_rhs(EP_cur[c][y], value(EP_prev[e][y]))
- end
end
+ end
end
end
return EP_cur
@@ -391,7 +400,10 @@ inputs:
returns: JuMP model with updated linking constraints.
"""
-function fix_capacity_tracking(EP_prev::Model, EP_cur::Model, cap_track_d::Dict, cur_stage::Int)
+function fix_capacity_tracking(EP_prev::Model,
+ EP_cur::Model,
+ cap_track_d::Dict,
+ cur_stage::Int)
# cap_track_d dictionary contains the endogenous retirement tracking array variable name (v) as a key,
# and the associated linking constraint name (c) as a value
@@ -407,7 +419,7 @@ function fix_capacity_tracking(EP_prev::Model, EP_cur::Model, cap_track_d::Dict,
# For all previous stages, set the right hand side value of the tracking constraint in the current
# stage to the value of the tracking constraint observed in the previous stage
- for p in 1:(cur_stage-1)
+ for p in 1:(cur_stage - 1)
# Tracking newly buily capacity over all previous stages
JuMP.set_normalized_rhs(EP_cur[c][i, p], value(EP_prev[v][i, p]))
# Tracking retired capacity over all previous stages
@@ -432,7 +444,6 @@ inputs:
returns: JuMP expression representing a sum of Benders cuts for linking capacity investment variables to be added to the cost-to-go function.
"""
function add_cut(EP_cur::Model, EP_next::Model, start_cap_d::Dict, cap_track_d::Dict)
-
next_obj_value = objective_value(EP_next) # Get the objective function value for the next investment planning stage
eRHS = @expression(EP_cur, 0) # Initialize RHS of cut to 0
@@ -480,7 +491,7 @@ function add_cut(EP_cur::Model, EP_next::Model, start_cap_d::Dict, cap_track_d::
end
# Add the cut to the model
- @constraint(EP_cur, EP_cur[:vALPHA] >= next_obj_value - eRHS)
+ @constraint(EP_cur, EP_cur[:vALPHA]>=next_obj_value - eRHS)
return EP_cur
end
@@ -505,8 +516,10 @@ inputs:
returns: JuMP expression representing a sum of Benders cuts for linking capacity investment variables to be added to the cost-to-go function.
"""
-function generate_cut_component_track(EP_cur::Model, EP_next::Model, var_name::Symbol, constr_name::Symbol)
-
+function generate_cut_component_track(EP_cur::Model,
+ EP_next::Model,
+ var_name::Symbol,
+ constr_name::Symbol)
next_dual_value = Float64[]
cur_inv_value = Float64[]
cur_inv_var = []
@@ -520,7 +533,8 @@ function generate_cut_component_track(EP_cur::Model, EP_next::Model, var_name::S
push!(cur_inv_var, EP_cur[var_name][y, p])
end
- eCutComponent = @expression(EP_cur, dot(next_dual_value, (cur_inv_value .- cur_inv_var)))
+ eCutComponent = @expression(EP_cur,
+ dot(next_dual_value, (cur_inv_value .- cur_inv_var)))
return eCutComponent
end
@@ -545,20 +559,22 @@ inputs:
returns: JuMP expression representing a sum of Benders cuts for linking capacity investment variables to be added to the cost-to-go function.
"""
-function generate_cut_component_inv(EP_cur::Model, EP_next::Model, expr_name::Symbol, constr_name::Symbol)
-
+function generate_cut_component_inv(EP_cur::Model,
+ EP_next::Model,
+ expr_name::Symbol,
+ constr_name::Symbol)
next_dual_value = Float64[]
cur_inv_value = Float64[]
cur_inv_var = []
for y in keys(EP_next[constr_name])
-
push!(next_dual_value, dual(EP_next[constr_name][y]))
push!(cur_inv_value, value(EP_cur[expr_name][y]))
push!(cur_inv_var, EP_cur[expr_name][y])
end
- eCutComponent = @expression(EP_cur, dot(next_dual_value, (cur_inv_value .- cur_inv_var)))
+ eCutComponent = @expression(EP_cur,
+ dot(next_dual_value, (cur_inv_value .- cur_inv_var)))
return eCutComponent
end
@@ -600,7 +616,6 @@ inputs:
returns: JuMP model with updated objective function.
"""
function initialize_cost_to_go(settings_d::Dict, EP::Model, inputs::Dict)
-
cur_stage = settings_d["CurStage"] # Current DDP Investment Planning Stage
stage_len = settings_d["StageLengths"][cur_stage]
wacc = settings_d["WACC"] # Interest Rate and also the discount rate unless specified other wise
@@ -616,10 +631,9 @@ function initialize_cost_to_go(settings_d::Dict, EP::Model, inputs::Dict)
else
DF = 1 / (1 + wacc)^(stage_len * (cur_stage - 1)) # Discount factor applied all to costs in each stage ###
# Initialize the cost-to-go variable
- @variable(EP, vALPHA >= 0)
- @objective(EP, Min, DF * OPEXMULT * EP[:eObj] + vALPHA)
+ @variable(EP, vALPHA>=0)
+ @objective(EP, Min, DF * OPEXMULT * EP[:eObj]+vALPHA)
end
return EP
-
end
diff --git a/src/multi_stage/endogenous_retirement.jl b/src/multi_stage/endogenous_retirement.jl
index fca0ebb0bf..b88ac93e1d 100644
--- a/src/multi_stage/endogenous_retirement.jl
+++ b/src/multi_stage/endogenous_retirement.jl
@@ -12,113 +12,143 @@ inputs:
returns: An Int representing the model stage in before which the resource must retire due to endogenous lifetime retirements.
"""
function get_retirement_stage(cur_stage::Int, lifetime::Int, stage_lens::Array{Int, 1})
- years_from_start = sum(stage_lens[1:cur_stage]) # Years from start from the END of the current stage
- ret_years = years_from_start - lifetime # Difference between end of current stage and technology lifetime
- ret_stage = 0 # Compute the stage before which all newly built capacity must be retired by the end of the current stage
- while (ret_years - stage_lens[ret_stage+1] >= 0) & (ret_stage < cur_stage)
- ret_stage += 1
- ret_years -= stage_lens[ret_stage]
- end
+ years_from_start = sum(stage_lens[1:cur_stage]) # Years from start from the END of the current stage
+ ret_years = years_from_start - lifetime # Difference between end of current stage and technology lifetime
+ ret_stage = 0 # Compute the stage before which all newly built capacity must be retired by the end of the current stage
+ while (ret_years - stage_lens[ret_stage + 1] >= 0) & (ret_stage < cur_stage)
+ ret_stage += 1
+ ret_years -= stage_lens[ret_stage]
+ end
return Int(ret_stage)
end
-function update_cumulative_min_ret!(inputs_d::Dict,t::Int,Resource_Set::String,RetCap::Symbol)
-
- gen_name = "RESOURCES"
- CumRetCap = Symbol("cum_"*String(RetCap))
- # if the getter function exists in GenX then use it, otherwise get the attribute directly
- ret_cap_f = isdefined(GenX, RetCap) ? getfield(GenX, RetCap) : r -> getproperty(r, RetCap)
- cum_ret_cap_f = isdefined(GenX, CumRetCap) ? getfield(GenX, CumRetCap) : r -> getproperty(r, CumRetCap)
- if !isempty(inputs_d[1][Resource_Set])
- gen_t = inputs_d[t][gen_name]
- if t==1
- gen_t[CumRetCap] = ret_cap_f.(gen_t)
- else
- gen_t[CumRetCap] = cum_ret_cap_f.(inputs_d[t-1][gen_name]) + ret_cap_f.(gen_t)
- end
- end
+function update_cumulative_min_ret!(inputs_d::Dict,
+ t::Int,
+ Resource_Set::String,
+ RetCap::Symbol)
+ gen_name = "RESOURCES"
+ CumRetCap = Symbol("cum_" * String(RetCap))
+ # if the getter function exists in GenX then use it, otherwise get the attribute directly
+ ret_cap_f = isdefined(GenX, RetCap) ? getfield(GenX, RetCap) :
+ r -> getproperty(r, RetCap)
+ cum_ret_cap_f = isdefined(GenX, CumRetCap) ? getfield(GenX, CumRetCap) :
+ r -> getproperty(r, CumRetCap)
+ if !isempty(inputs_d[1][Resource_Set])
+ gen_t = inputs_d[t][gen_name]
+ if t == 1
+ gen_t[CumRetCap] = ret_cap_f.(gen_t)
+ else
+ gen_t[CumRetCap] = cum_ret_cap_f.(inputs_d[t - 1][gen_name]) + ret_cap_f.(gen_t)
+ end
+ end
end
-
-function compute_cumulative_min_retirements!(inputs_d::Dict,t::Int)
-
- mytab =[("G", :min_retired_cap_mw),
- ("STOR_ALL", :min_retired_energy_cap_mw),
- ("STOR_ASYMMETRIC", :min_retired_charge_cap_mw)];
-
- if !isempty(inputs_d[1]["VRE_STOR"])
- append!(mytab,[("VS_STOR", :min_retired_energy_cap_mw),
- ("VS_DC", :min_retired_cap_inverter_mw),
- ("VS_SOLAR", :min_retired_cap_solar_mw),
- ("VS_WIND", :min_retired_cap_wind_mw),
- ("VS_ASYM_DC_DISCHARGE", :min_retired_cap_discharge_dc_mw),
- ("VS_ASYM_DC_CHARGE", :min_retired_cap_charge_dc_mw),
- ("VS_ASYM_AC_DISCHARGE", :min_retired_cap_discharge_ac_mw),
- ("VS_ASYM_AC_CHARGE", :min_retired_cap_charge_ac_mw)])
-
- end
-
- for (Resource_Set,RetCap) in mytab
- update_cumulative_min_ret!(inputs_d,t,Resource_Set,RetCap)
- end
-
-
+function compute_cumulative_min_retirements!(inputs_d::Dict, t::Int)
+ mytab = [("G", :min_retired_cap_mw),
+ ("STOR_ALL", :min_retired_energy_cap_mw),
+ ("STOR_ASYMMETRIC", :min_retired_charge_cap_mw)]
+
+ if !isempty(inputs_d[1]["VRE_STOR"])
+ append!(mytab,
+ [("VS_STOR", :min_retired_energy_cap_mw),
+ ("VS_DC", :min_retired_cap_inverter_mw),
+ ("VS_SOLAR", :min_retired_cap_solar_mw),
+ ("VS_WIND", :min_retired_cap_wind_mw),
+ ("VS_ASYM_DC_DISCHARGE", :min_retired_cap_discharge_dc_mw),
+ ("VS_ASYM_DC_CHARGE", :min_retired_cap_charge_dc_mw),
+ ("VS_ASYM_AC_DISCHARGE", :min_retired_cap_discharge_ac_mw),
+ ("VS_ASYM_AC_CHARGE", :min_retired_cap_charge_ac_mw)])
+ end
+
+ for (Resource_Set, RetCap) in mytab
+ update_cumulative_min_ret!(inputs_d, t, Resource_Set, RetCap)
+ end
end
-
function endogenous_retirement!(EP::Model, inputs::Dict, setup::Dict)
- multi_stage_settings = setup["MultiStageSettingsDict"]
-
- println("Endogenous Retirement Module")
-
- num_stages = multi_stage_settings["NumStages"]
- cur_stage = multi_stage_settings["CurStage"]
- stage_lens = multi_stage_settings["StageLengths"]
-
- endogenous_retirement_discharge!(EP, inputs, num_stages, cur_stage, stage_lens)
-
- if !isempty(inputs["STOR_ALL"])
- endogenous_retirement_energy!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["STOR_ASYMMETRIC"])
- endogenous_retirement_charge!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["VRE_STOR"])
- if !isempty(inputs["VS_DC"])
- endogenous_retirement_vre_stor_dc!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["VS_SOLAR"])
- endogenous_retirement_vre_stor_solar!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["VS_WIND"])
- endogenous_retirement_vre_stor_wind!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["VS_STOR"])
- endogenous_retirement_vre_stor_stor!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["VS_ASYM_DC_DISCHARGE"])
- endogenous_retirement_vre_stor_discharge_dc!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["VS_ASYM_DC_CHARGE"])
- endogenous_retirement_vre_stor_charge_dc!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["VS_ASYM_AC_DISCHARGE"])
- endogenous_retirement_vre_stor_discharge_ac!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
-
- if !isempty(inputs["VS_ASYM_AC_CHARGE"])
- endogenous_retirement_vre_stor_charge_ac!(EP, inputs, num_stages, cur_stage, stage_lens)
- end
- end
-
+ multi_stage_settings = setup["MultiStageSettingsDict"]
+
+ println("Endogenous Retirement Module")
+
+ num_stages = multi_stage_settings["NumStages"]
+ cur_stage = multi_stage_settings["CurStage"]
+ stage_lens = multi_stage_settings["StageLengths"]
+
+ endogenous_retirement_discharge!(EP, inputs, num_stages, cur_stage, stage_lens)
+
+ if !isempty(inputs["STOR_ALL"])
+ endogenous_retirement_energy!(EP, inputs, num_stages, cur_stage, stage_lens)
+ end
+
+ if !isempty(inputs["STOR_ASYMMETRIC"])
+ endogenous_retirement_charge!(EP, inputs, num_stages, cur_stage, stage_lens)
+ end
+
+ if !isempty(inputs["VRE_STOR"])
+ if !isempty(inputs["VS_DC"])
+ endogenous_retirement_vre_stor_dc!(EP,
+ inputs,
+ num_stages,
+ cur_stage,
+ stage_lens)
+ end
+
+ if !isempty(inputs["VS_SOLAR"])
+ endogenous_retirement_vre_stor_solar!(EP,
+ inputs,
+ num_stages,
+ cur_stage,
+ stage_lens)
+ end
+
+ if !isempty(inputs["VS_WIND"])
+ endogenous_retirement_vre_stor_wind!(EP,
+ inputs,
+ num_stages,
+ cur_stage,
+ stage_lens)
+ end
+
+ if !isempty(inputs["VS_STOR"])
+ endogenous_retirement_vre_stor_stor!(EP,
+ inputs,
+ num_stages,
+ cur_stage,
+ stage_lens)
+ end
+
+ if !isempty(inputs["VS_ASYM_DC_DISCHARGE"])
+ endogenous_retirement_vre_stor_discharge_dc!(EP,
+ inputs,
+ num_stages,
+ cur_stage,
+ stage_lens)
+ end
+
+ if !isempty(inputs["VS_ASYM_DC_CHARGE"])
+ endogenous_retirement_vre_stor_charge_dc!(EP,
+ inputs,
+ num_stages,
+ cur_stage,
+ stage_lens)
+ end
+
+ if !isempty(inputs["VS_ASYM_AC_DISCHARGE"])
+ endogenous_retirement_vre_stor_discharge_ac!(EP,
+ inputs,
+ num_stages,
+ cur_stage,
+ stage_lens)
+ end
+
+ if !isempty(inputs["VS_ASYM_AC_CHARGE"])
+ endogenous_retirement_vre_stor_charge_ac!(EP,
+ inputs,
+ num_stages,
+ cur_stage,
+ stage_lens)
+ end
+ end
end
@doc raw"""
@@ -139,547 +169,753 @@ In other words, it is the largest index $r \in \{1, ..., (p-1)\}$ such that:
\end{aligned}
```
"""
-function endogenous_retirement_discharge!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (Discharge) Module")
-
- gen = inputs["RESOURCES"]
-
- NEW_CAP = inputs["NEW_CAP"] # Set of all resources eligible for new capacity
- RET_CAP = inputs["RET_CAP"] # Set of all resources eligible for capacity retirements
- COMMIT = inputs["COMMIT"] # Set of all resources eligible for unit commitment
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACK[y in RET_CAP,p=1:num_stages] >= 0 )
- @variable(EP, vRETCAPTRACK[y in RET_CAP,p=1:num_stages] >= 0 )
-
- ### Expressions ###
-
- @expression(EP, eNewCap[y in RET_CAP],
- if y in NEW_CAP
- EP[:vCAP][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCap[y in RET_CAP],
- if y in ids_with_all_options_contributing(gen)
- EP[:vRETCAP][y] + EP[:vRETROFITCAP][y]
- else
- EP[:vRETCAP][y]
- end
- )
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrack[y in RET_CAP], sum(EP[:vRETCAPTRACK][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrack[y in RET_CAP], sum(EP[:vCAPTRACK][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrack[y in RET_CAP],
- if y in COMMIT
- cum_min_retired_cap_mw(gen[y])/cap_size(gen[y])
- else
- cum_min_retired_cap_mw(gen[y])
- end
- )
-
- ### Constraints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackNew[y in RET_CAP], eNewCap[y] == vCAPTRACK[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrack[y in RET_CAP,p=1:(cur_stage-1)], vCAPTRACK[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackNew[y in RET_CAP], eRetCap[y] == vRETCAPTRACK[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrack[y in RET_CAP,p=1:(cur_stage-1)], vRETCAPTRACK[y,p] == 0)
-
- # Create a slack variable for each resource that is not contributing to the retired capacity being tracked
- # This ensures that the model is able to satisfy the minimum retirement constraint
- RETROFIT_WITH_SLACK = ids_with_all_options_not_contributing(gen)
- if !isempty(RETROFIT_WITH_SLACK)
- @variable(EP, vslack_lifetime[y in RETROFIT_WITH_SLACK] >=0)
- @expression(EP, vslack_term, 2*maximum(inv_cost_per_mwyr.(gen))*sum(vslack_lifetime[y] for y in RETROFIT_WITH_SLACK; init=0))
- add_to_expression!(EP[:eObj], vslack_term)
- end
-
- @expression(EP,eLifetimeRetRHS[y in RET_CAP],
- if y in RETROFIT_WITH_SLACK
- eRetCapTrack[y] + vslack_lifetime[y]
- else
- eRetCapTrack[y]
- end
- )
-
- @constraint(EP, cLifetimeRet[y in RET_CAP], eNewCapTrack[y] + eMinRetCapTrack[y] <= eLifetimeRetRHS[y])
+function endogenous_retirement_discharge!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (Discharge) Module")
+
+ gen = inputs["RESOURCES"]
+
+ NEW_CAP = inputs["NEW_CAP"] # Set of all resources eligible for new capacity
+ RET_CAP = inputs["RET_CAP"] # Set of all resources eligible for capacity retirements
+ COMMIT = inputs["COMMIT"] # Set of all resources eligible for unit commitment
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACK[y in RET_CAP, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACK[y in RET_CAP, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCap[y in RET_CAP],
+ if y in NEW_CAP
+ EP[:vCAP][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCap[y in RET_CAP],
+ if y in ids_with_all_options_contributing(gen)
+ EP[:vRETCAP][y] + EP[:vRETROFITCAP][y]
+ else
+ EP[:vRETCAP][y]
+ end)
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrack[y in RET_CAP],
+ sum(EP[:vRETCAPTRACK][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrack[y in RET_CAP],
+ sum(EP[:vCAPTRACK][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP, eMinRetCapTrack[y in RET_CAP],
+ if y in COMMIT
+ cum_min_retired_cap_mw(gen[y]) / cap_size(gen[y])
+ else
+ cum_min_retired_cap_mw(gen[y])
+ end)
+
+ ### Constraints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP, cCapTrackNew[y in RET_CAP], eNewCap[y]==vCAPTRACK[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP, cCapTrack[y in RET_CAP, p = 1:(cur_stage - 1)], vCAPTRACK[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP, cRetCapTrackNew[y in RET_CAP], eRetCap[y]==vRETCAPTRACK[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrack[y in RET_CAP, p = 1:(cur_stage - 1)],
+ vRETCAPTRACK[y, p]==0)
+
+ # Create a slack variable for each resource that is not contributing to the retired capacity being tracked
+ # This ensures that the model is able to satisfy the minimum retirement constraint
+ RETROFIT_WITH_SLACK = ids_with_all_options_not_contributing(gen)
+ if !isempty(RETROFIT_WITH_SLACK)
+ @variable(EP, vslack_lifetime[y in RETROFIT_WITH_SLACK]>=0)
+ @expression(EP,
+ vslack_term,
+ 2*maximum(inv_cost_per_mwyr.(gen))*
+ sum(vslack_lifetime[y] for y in RETROFIT_WITH_SLACK; init = 0))
+ add_to_expression!(EP[:eObj], vslack_term)
+ end
+
+ @expression(EP, eLifetimeRetRHS[y in RET_CAP],
+ if y in RETROFIT_WITH_SLACK
+ eRetCapTrack[y] + vslack_lifetime[y]
+ else
+ eRetCapTrack[y]
+ end)
+
+ @constraint(EP,
+ cLifetimeRet[y in RET_CAP],
+ eNewCapTrack[y] + eMinRetCapTrack[y]<=eLifetimeRetRHS[y])
end
-function endogenous_retirement_charge!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (Charge) Module")
-
- gen = inputs["RESOURCES"]
-
- NEW_CAP_CHARGE = inputs["NEW_CAP_CHARGE"] # Set of asymmetric charge/discharge storage resources eligible for new charge capacity
- RET_CAP_CHARGE = inputs["RET_CAP_CHARGE"] # Set of asymmetric charge/discharge storage resources eligible for charge capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKCHARGE[y in RET_CAP_CHARGE,p=1:num_stages] >= 0)
- @variable(EP, vRETCAPTRACKCHARGE[y in RET_CAP_CHARGE,p=1:num_stages] >= 0)
-
- ### Expressions ###
-
- @expression(EP, eNewCapCharge[y in RET_CAP_CHARGE],
- if y in NEW_CAP_CHARGE
- EP[:vCAPCHARGE][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapCharge[y in RET_CAP_CHARGE], EP[:vRETCAPCHARGE][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackCharge[y in RET_CAP_CHARGE], sum(EP[:vRETCAPTRACKCHARGE][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackCharge[y in RET_CAP_CHARGE], sum(EP[:vCAPTRACKCHARGE][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackCharge[y in RET_CAP_CHARGE], cum_min_retired_charge_cap_mw(gen[y]))
-
- ### Constratints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackChargeNew[y in RET_CAP_CHARGE], eNewCapCharge[y] == vCAPTRACKCHARGE[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackCharge[y in RET_CAP_CHARGE,p=1:(cur_stage-1)], vCAPTRACKCHARGE[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackChargeNew[y in RET_CAP_CHARGE], eRetCapCharge[y] == vRETCAPTRACKCHARGE[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackCharge[y in RET_CAP_CHARGE,p=1:(cur_stage-1)], vRETCAPTRACKCHARGE[y,p] == 0)
-
- @constraint(EP, cLifetimeRetCharge[y in RET_CAP_CHARGE], eNewCapTrackCharge[y] + eMinRetCapTrackCharge[y] <= eRetCapTrackCharge[y])
-
+function endogenous_retirement_charge!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (Charge) Module")
+
+ gen = inputs["RESOURCES"]
+
+ NEW_CAP_CHARGE = inputs["NEW_CAP_CHARGE"] # Set of asymmetric charge/discharge storage resources eligible for new charge capacity
+ RET_CAP_CHARGE = inputs["RET_CAP_CHARGE"] # Set of asymmetric charge/discharge storage resources eligible for charge capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKCHARGE[y in RET_CAP_CHARGE, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKCHARGE[y in RET_CAP_CHARGE, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapCharge[y in RET_CAP_CHARGE],
+ if y in NEW_CAP_CHARGE
+ EP[:vCAPCHARGE][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCapCharge[y in RET_CAP_CHARGE], EP[:vRETCAPCHARGE][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackCharge[y in RET_CAP_CHARGE],
+ sum(EP[:vRETCAPTRACKCHARGE][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackCharge[y in RET_CAP_CHARGE],
+ sum(EP[:vCAPTRACKCHARGE][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackCharge[y in RET_CAP_CHARGE],
+ cum_min_retired_charge_cap_mw(gen[y]))
+
+ ### Constratints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackChargeNew[y in RET_CAP_CHARGE],
+ eNewCapCharge[y]==vCAPTRACKCHARGE[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackCharge[y in RET_CAP_CHARGE, p = 1:(cur_stage - 1)],
+ vCAPTRACKCHARGE[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackChargeNew[y in RET_CAP_CHARGE],
+ eRetCapCharge[y]==vRETCAPTRACKCHARGE[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackCharge[y in RET_CAP_CHARGE, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKCHARGE[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetCharge[y in RET_CAP_CHARGE],
+ eNewCapTrackCharge[y] + eMinRetCapTrackCharge[y]<=eRetCapTrackCharge[y])
end
-function endogenous_retirement_energy!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (Energy) Module")
-
- gen = inputs["RESOURCES"]
-
- NEW_CAP_ENERGY = inputs["NEW_CAP_ENERGY"] # Set of all storage resources eligible for new energy capacity
- RET_CAP_ENERGY = inputs["RET_CAP_ENERGY"] # Set of all storage resources eligible for energy capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKENERGY[y in RET_CAP_ENERGY,p=1:num_stages] >= 0)
- @variable(EP, vRETCAPTRACKENERGY[y in RET_CAP_ENERGY,p=1:num_stages] >= 0)
-
- ### Expressions ###
-
- @expression(EP, eNewCapEnergy[y in RET_CAP_ENERGY],
- if y in NEW_CAP_ENERGY
- EP[:vCAPENERGY][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapEnergy[y in RET_CAP_ENERGY], EP[:vRETCAPENERGY][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackEnergy[y in RET_CAP_ENERGY], sum(EP[:vRETCAPTRACKENERGY][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackEnergy[y in RET_CAP_ENERGY], sum(EP[:vCAPTRACKENERGY][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackEnergy[y in RET_CAP_ENERGY], cum_min_retired_energy_cap_mw(gen[y]))
-
- ### Constratints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackEnergyNew[y in RET_CAP_ENERGY], eNewCapEnergy[y] == vCAPTRACKENERGY[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackEnergy[y in RET_CAP_ENERGY,p=1:(cur_stage-1)], vCAPTRACKENERGY[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackEnergyNew[y in RET_CAP_ENERGY], eRetCapEnergy[y] == vRETCAPTRACKENERGY[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackEnergy[y in RET_CAP_ENERGY,p=1:(cur_stage-1)], vRETCAPTRACKENERGY[y,p] == 0)
-
- @constraint(EP, cLifetimeRetEnergy[y in RET_CAP_ENERGY], eNewCapTrackEnergy[y] + eMinRetCapTrackEnergy[y] <= eRetCapTrackEnergy[y])
+function endogenous_retirement_energy!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (Energy) Module")
+
+ gen = inputs["RESOURCES"]
+
+ NEW_CAP_ENERGY = inputs["NEW_CAP_ENERGY"] # Set of all storage resources eligible for new energy capacity
+ RET_CAP_ENERGY = inputs["RET_CAP_ENERGY"] # Set of all storage resources eligible for energy capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKENERGY[y in RET_CAP_ENERGY, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKENERGY[y in RET_CAP_ENERGY, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapEnergy[y in RET_CAP_ENERGY],
+ if y in NEW_CAP_ENERGY
+ EP[:vCAPENERGY][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCapEnergy[y in RET_CAP_ENERGY], EP[:vRETCAPENERGY][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackEnergy[y in RET_CAP_ENERGY],
+ sum(EP[:vRETCAPTRACKENERGY][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackEnergy[y in RET_CAP_ENERGY],
+ sum(EP[:vCAPTRACKENERGY][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackEnergy[y in RET_CAP_ENERGY],
+ cum_min_retired_energy_cap_mw(gen[y]))
+
+ ### Constratints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackEnergyNew[y in RET_CAP_ENERGY],
+ eNewCapEnergy[y]==vCAPTRACKENERGY[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackEnergy[y in RET_CAP_ENERGY, p = 1:(cur_stage - 1)],
+ vCAPTRACKENERGY[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackEnergyNew[y in RET_CAP_ENERGY],
+ eRetCapEnergy[y]==vRETCAPTRACKENERGY[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackEnergy[y in RET_CAP_ENERGY, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKENERGY[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetEnergy[y in RET_CAP_ENERGY],
+ eNewCapTrackEnergy[y] + eMinRetCapTrackEnergy[y]<=eRetCapTrackEnergy[y])
end
-function endogenous_retirement_vre_stor_dc!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (VRE-Storage DC) Module")
-
- gen = inputs["RESOURCES"]
-
- NEW_CAP_DC = inputs["NEW_CAP_DC"] # Set of all resources eligible for new capacity
- RET_CAP_DC = inputs["RET_CAP_DC"] # Set of all resources eligible for capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKDC[y in RET_CAP_DC,p=1:num_stages] >= 0 )
- @variable(EP, vRETCAPTRACKDC[y in RET_CAP_DC,p=1:num_stages] >= 0 )
-
- ### Expressions ###
-
- @expression(EP, eNewCapDC[y in RET_CAP_DC],
- if y in NEW_CAP_DC
- EP[:vDCCAP][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapDC[y in RET_CAP_DC], EP[:vRETDCCAP][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackDC[y in RET_CAP_DC], sum(EP[:vRETCAPTRACKDC][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackDC[y in RET_CAP_DC], sum(EP[:vCAPTRACKDC][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackDC[y in RET_CAP_DC], cum_min_retired_cap_inverter_mw(gen[y]))
-
- ### Constraints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackNewDC[y in RET_CAP_DC], eNewCapDC[y] == vCAPTRACKDC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackDC[y in RET_CAP_DC,p=1:(cur_stage-1)], vCAPTRACKDC[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackNewDC[y in RET_CAP_DC], eRetCapDC[y] == vRETCAPTRACKDC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackDC[y in RET_CAP_DC,p=1:(cur_stage-1)], vRETCAPTRACKDC[y,p] == 0)
-
- @constraint(EP, cLifetimeRetDC[y in RET_CAP_DC], eNewCapTrackDC[y] + eMinRetCapTrackDC[y] <= eRetCapTrackDC[y])
+function endogenous_retirement_vre_stor_dc!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (VRE-Storage DC) Module")
+
+ gen = inputs["RESOURCES"]
+
+ NEW_CAP_DC = inputs["NEW_CAP_DC"] # Set of all resources eligible for new capacity
+ RET_CAP_DC = inputs["RET_CAP_DC"] # Set of all resources eligible for capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKDC[y in RET_CAP_DC, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKDC[y in RET_CAP_DC, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapDC[y in RET_CAP_DC],
+ if y in NEW_CAP_DC
+ EP[:vDCCAP][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCapDC[y in RET_CAP_DC], EP[:vRETDCCAP][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackDC[y in RET_CAP_DC],
+ sum(EP[:vRETCAPTRACKDC][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackDC[y in RET_CAP_DC],
+ sum(EP[:vCAPTRACKDC][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackDC[y in RET_CAP_DC],
+ cum_min_retired_cap_inverter_mw(gen[y]))
+
+ ### Constraints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackNewDC[y in RET_CAP_DC],
+ eNewCapDC[y]==vCAPTRACKDC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackDC[y in RET_CAP_DC, p = 1:(cur_stage - 1)],
+ vCAPTRACKDC[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackNewDC[y in RET_CAP_DC],
+ eRetCapDC[y]==vRETCAPTRACKDC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackDC[y in RET_CAP_DC, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKDC[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetDC[y in RET_CAP_DC],
+ eNewCapTrackDC[y] + eMinRetCapTrackDC[y]<=eRetCapTrackDC[y])
end
-function endogenous_retirement_vre_stor_solar!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (VRE-Storage Solar) Module")
-
- gen = inputs["RESOURCES"]
-
- NEW_CAP_SOLAR = inputs["NEW_CAP_SOLAR"] # Set of all resources eligible for new capacity
- RET_CAP_SOLAR = inputs["RET_CAP_SOLAR"] # Set of all resources eligible for capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKSOLAR[y in RET_CAP_SOLAR,p=1:num_stages] >= 0 )
- @variable(EP, vRETCAPTRACKSOLAR[y in RET_CAP_SOLAR,p=1:num_stages] >= 0 )
-
- ### Expressions ###
-
- @expression(EP, eNewCapSolar[y in RET_CAP_SOLAR],
- if y in NEW_CAP_SOLAR
- EP[:vSOLARCAP][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapSolar[y in RET_CAP_SOLAR], EP[:vRETSOLARCAP][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackSolar[y in RET_CAP_SOLAR], sum(EP[:vRETCAPTRACKSOLAR][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackSolar[y in RET_CAP_SOLAR], sum(EP[:vCAPTRACKSOLAR][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackSolar[y in RET_CAP_SOLAR], cum_min_retired_cap_solar_mw(gen[y]))
-
- ### Constraints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackNewSolar[y in RET_CAP_SOLAR], eNewCapSolar[y] == vCAPTRACKSOLAR[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackSolar[y in RET_CAP_SOLAR,p=1:(cur_stage-1)], vCAPTRACKSOLAR[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackNewSolar[y in RET_CAP_SOLAR], eRetCapSolar[y] == vRETCAPTRACKSOLAR[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackSolar[y in RET_CAP_SOLAR,p=1:(cur_stage-1)], vRETCAPTRACKSOLAR[y,p] == 0)
-
- @constraint(EP, cLifetimeRetSolar[y in RET_CAP_SOLAR], eNewCapTrackSolar[y] + eMinRetCapTrackSolar[y] <= eRetCapTrackSolar[y])
+function endogenous_retirement_vre_stor_solar!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (VRE-Storage Solar) Module")
+
+ gen = inputs["RESOURCES"]
+
+ NEW_CAP_SOLAR = inputs["NEW_CAP_SOLAR"] # Set of all resources eligible for new capacity
+ RET_CAP_SOLAR = inputs["RET_CAP_SOLAR"] # Set of all resources eligible for capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKSOLAR[y in RET_CAP_SOLAR, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKSOLAR[y in RET_CAP_SOLAR, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapSolar[y in RET_CAP_SOLAR],
+ if y in NEW_CAP_SOLAR
+ EP[:vSOLARCAP][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCapSolar[y in RET_CAP_SOLAR], EP[:vRETSOLARCAP][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackSolar[y in RET_CAP_SOLAR],
+ sum(EP[:vRETCAPTRACKSOLAR][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackSolar[y in RET_CAP_SOLAR],
+ sum(EP[:vCAPTRACKSOLAR][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackSolar[y in RET_CAP_SOLAR],
+ cum_min_retired_cap_solar_mw(gen[y]))
+
+ ### Constraints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackNewSolar[y in RET_CAP_SOLAR],
+ eNewCapSolar[y]==vCAPTRACKSOLAR[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackSolar[y in RET_CAP_SOLAR, p = 1:(cur_stage - 1)],
+ vCAPTRACKSOLAR[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackNewSolar[y in RET_CAP_SOLAR],
+ eRetCapSolar[y]==vRETCAPTRACKSOLAR[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackSolar[y in RET_CAP_SOLAR, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKSOLAR[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetSolar[y in RET_CAP_SOLAR],
+ eNewCapTrackSolar[y] + eMinRetCapTrackSolar[y]<=eRetCapTrackSolar[y])
end
-function endogenous_retirement_vre_stor_wind!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (VRE-Storage Wind) Module")
-
- gen = inputs["RESOURCES"]
-
- NEW_CAP_WIND = inputs["NEW_CAP_WIND"] # Set of all resources eligible for new capacity
- RET_CAP_WIND = inputs["RET_CAP_WIND"] # Set of all resources eligible for capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKWIND[y in RET_CAP_WIND,p=1:num_stages] >= 0 )
- @variable(EP, vRETCAPTRACKWIND[y in RET_CAP_WIND,p=1:num_stages] >= 0 )
-
- ### Expressions ###
-
- @expression(EP, eNewCapWind[y in RET_CAP_WIND],
- if y in NEW_CAP_WIND
- EP[:vWINDCAP][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapWind[y in RET_CAP_WIND], EP[:vRETWINDCAP][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackWind[y in RET_CAP_WIND], sum(EP[:vRETCAPTRACKWIND][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackWind[y in RET_CAP_WIND], sum(EP[:vCAPTRACKWIND][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackWind[y in RET_CAP_WIND], cum_min_retired_cap_wind_mw(gen[y]))
-
- ### Constraints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackNewWind[y in RET_CAP_WIND], eNewCapWind[y] == vCAPTRACKWIND[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackWind[y in RET_CAP_WIND,p=1:(cur_stage-1)], vCAPTRACKWIND[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackNewWind[y in RET_CAP_WIND], eRetCapWind[y] == vRETCAPTRACKWIND[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackWind[y in RET_CAP_WIND,p=1:(cur_stage-1)], vRETCAPTRACKWIND[y,p] == 0)
-
- @constraint(EP, cLifetimeRetWind[y in RET_CAP_WIND], eNewCapTrackWind[y] + eMinRetCapTrackWind[y] <= eRetCapTrackWind[y])
+function endogenous_retirement_vre_stor_wind!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (VRE-Storage Wind) Module")
+
+ gen = inputs["RESOURCES"]
+
+ NEW_CAP_WIND = inputs["NEW_CAP_WIND"] # Set of all resources eligible for new capacity
+ RET_CAP_WIND = inputs["RET_CAP_WIND"] # Set of all resources eligible for capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKWIND[y in RET_CAP_WIND, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKWIND[y in RET_CAP_WIND, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapWind[y in RET_CAP_WIND],
+ if y in NEW_CAP_WIND
+ EP[:vWINDCAP][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCapWind[y in RET_CAP_WIND], EP[:vRETWINDCAP][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackWind[y in RET_CAP_WIND],
+ sum(EP[:vRETCAPTRACKWIND][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackWind[y in RET_CAP_WIND],
+ sum(EP[:vCAPTRACKWIND][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackWind[y in RET_CAP_WIND],
+ cum_min_retired_cap_wind_mw(gen[y]))
+
+ ### Constraints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackNewWind[y in RET_CAP_WIND],
+ eNewCapWind[y]==vCAPTRACKWIND[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackWind[y in RET_CAP_WIND, p = 1:(cur_stage - 1)],
+ vCAPTRACKWIND[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackNewWind[y in RET_CAP_WIND],
+ eRetCapWind[y]==vRETCAPTRACKWIND[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackWind[y in RET_CAP_WIND, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKWIND[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetWind[y in RET_CAP_WIND],
+ eNewCapTrackWind[y] + eMinRetCapTrackWind[y]<=eRetCapTrackWind[y])
end
-function endogenous_retirement_vre_stor_stor!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (VRE-Storage Storage) Module")
-
- gen = inputs["RESOURCES"]
-
- NEW_CAP_STOR = inputs["NEW_CAP_STOR"] # Set of all resources eligible for new capacity
- RET_CAP_STOR = inputs["RET_CAP_STOR"] # Set of all resources eligible for capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKENERGY_VS[y in RET_CAP_STOR,p=1:num_stages] >= 0)
- @variable(EP, vRETCAPTRACKENERGY_VS[y in RET_CAP_STOR,p=1:num_stages] >= 0)
-
- ### Expressions ###
-
- @expression(EP, eNewCapEnergy_VS[y in RET_CAP_STOR],
- if y in NEW_CAP_STOR
- EP[:vCAPENERGY_VS][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapEnergy_VS[y in RET_CAP_STOR], EP[:vRETCAPENERGY_VS][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackEnergy_VS[y in RET_CAP_STOR], sum(EP[:vRETCAPTRACKENERGY_VS][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackEnergy_VS[y in RET_CAP_STOR], sum(EP[:vCAPTRACKENERGY_VS][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackEnergy_VS[y in RET_CAP_STOR], cum_min_retired_energy_cap_mw(gen[y]))
-
- ### Constratints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackEnergyNew_VS[y in RET_CAP_STOR], eNewCapEnergy_VS[y] == vCAPTRACKENERGY_VS[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackEnergy_VS[y in RET_CAP_STOR,p=1:(cur_stage-1)], vCAPTRACKENERGY_VS[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackEnergyNew_VS[y in RET_CAP_STOR], eRetCapEnergy_VS[y] == vRETCAPTRACKENERGY_VS[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackEnergy_VS[y in RET_CAP_STOR,p=1:(cur_stage-1)], vRETCAPTRACKENERGY_VS[y,p] == 0)
-
- @constraint(EP, cLifetimeRetEnergy_VS[y in RET_CAP_STOR], eNewCapTrackEnergy_VS[y] + eMinRetCapTrackEnergy_VS[y] <= eRetCapTrackEnergy_VS[y])
+function endogenous_retirement_vre_stor_stor!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (VRE-Storage Storage) Module")
+
+ gen = inputs["RESOURCES"]
+
+ NEW_CAP_STOR = inputs["NEW_CAP_STOR"] # Set of all resources eligible for new capacity
+ RET_CAP_STOR = inputs["RET_CAP_STOR"] # Set of all resources eligible for capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKENERGY_VS[y in RET_CAP_STOR, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKENERGY_VS[y in RET_CAP_STOR, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapEnergy_VS[y in RET_CAP_STOR],
+ if y in NEW_CAP_STOR
+ EP[:vCAPENERGY_VS][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCapEnergy_VS[y in RET_CAP_STOR], EP[:vRETCAPENERGY_VS][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackEnergy_VS[y in RET_CAP_STOR],
+ sum(EP[:vRETCAPTRACKENERGY_VS][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackEnergy_VS[y in RET_CAP_STOR],
+ sum(EP[:vCAPTRACKENERGY_VS][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackEnergy_VS[y in RET_CAP_STOR],
+ cum_min_retired_energy_cap_mw(gen[y]))
+
+ ### Constratints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackEnergyNew_VS[y in RET_CAP_STOR],
+ eNewCapEnergy_VS[y]==vCAPTRACKENERGY_VS[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackEnergy_VS[y in RET_CAP_STOR, p = 1:(cur_stage - 1)],
+ vCAPTRACKENERGY_VS[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackEnergyNew_VS[y in RET_CAP_STOR],
+ eRetCapEnergy_VS[y]==vRETCAPTRACKENERGY_VS[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackEnergy_VS[y in RET_CAP_STOR, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKENERGY_VS[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetEnergy_VS[y in RET_CAP_STOR],
+ eNewCapTrackEnergy_VS[y] + eMinRetCapTrackEnergy_VS[y]<=eRetCapTrackEnergy_VS[y])
end
-function endogenous_retirement_vre_stor_discharge_dc!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (VRE-Storage Discharge DC) Module")
-
- gen = inputs["RESOURCES"]
-
- NEW_CAP_DISCHARGE_DC = inputs["NEW_CAP_DISCHARGE_DC"] # Set of all resources eligible for new capacity
- RET_CAP_DISCHARGE_DC = inputs["RET_CAP_DISCHARGE_DC"] # Set of all resources eligible for capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKDISCHARGEDC[y in RET_CAP_DISCHARGE_DC,p=1:num_stages] >= 0 )
- @variable(EP, vRETCAPTRACKDISCHARGEDC[y in RET_CAP_DISCHARGE_DC,p=1:num_stages] >= 0 )
-
- ### Expressions ###
-
- @expression(EP, eNewCapDischargeDC[y in RET_CAP_DISCHARGE_DC],
- if y in NEW_CAP_DISCHARGE_DC
- EP[:vCAPDISCHARGE_DC][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapDischargeDC[y in RET_CAP_DISCHARGE_DC], EP[:vRETCAPDISCHARGE_DC][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC], sum(EP[:vRETCAPTRACKDISCHARGEDC][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC], sum(EP[:vCAPTRACKDISCHARGEDC][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC], cum_min_retired_cap_discharge_dc_mw(gen[y]))
-
- ### Constraints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackNewDischargeDC[y in RET_CAP_DISCHARGE_DC], eNewCapDischargeDC[y] == vCAPTRACKDISCHARGEDC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC,p=1:(cur_stage-1)], vCAPTRACKDISCHARGEDC[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackNewDischargeDC[y in RET_CAP_DISCHARGE_DC], eRetCapTrackDischargeDC[y] == vRETCAPTRACKDISCHARGEDC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC,p=1:(cur_stage-1)], vRETCAPTRACKDISCHARGEDC[y,p] == 0)
-
- @constraint(EP, cLifetimeRetDischargeDC[y in RET_CAP_DISCHARGE_DC], eNewCapTrackDischargeDC[y] + eMinRetCapTrackDischargeDC[y] <= eRetCapTrackDischargeDC[y])
+function endogenous_retirement_vre_stor_discharge_dc!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (VRE-Storage Discharge DC) Module")
+
+ gen = inputs["RESOURCES"]
+
+ NEW_CAP_DISCHARGE_DC = inputs["NEW_CAP_DISCHARGE_DC"] # Set of all resources eligible for new capacity
+ RET_CAP_DISCHARGE_DC = inputs["RET_CAP_DISCHARGE_DC"] # Set of all resources eligible for capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKDISCHARGEDC[y in RET_CAP_DISCHARGE_DC, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKDISCHARGEDC[y in RET_CAP_DISCHARGE_DC, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ if y in NEW_CAP_DISCHARGE_DC
+ EP[:vCAPDISCHARGE_DC][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP,
+ eRetCapDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ EP[:vRETCAPDISCHARGE_DC][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ sum(EP[:vRETCAPTRACKDISCHARGEDC][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ sum(EP[:vCAPTRACKDISCHARGEDC][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ cum_min_retired_cap_discharge_dc_mw(gen[y]))
+
+ ### Constraints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackNewDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ eNewCapDischargeDC[y]==vCAPTRACKDISCHARGEDC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC, p = 1:(cur_stage - 1)],
+ vCAPTRACKDISCHARGEDC[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackNewDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ eRetCapTrackDischargeDC[y]==vRETCAPTRACKDISCHARGEDC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackDischargeDC[y in RET_CAP_DISCHARGE_DC, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKDISCHARGEDC[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetDischargeDC[y in RET_CAP_DISCHARGE_DC],
+ eNewCapTrackDischargeDC[y] +
+ eMinRetCapTrackDischargeDC[y]<=eRetCapTrackDischargeDC[y])
end
-function endogenous_retirement_vre_stor_charge_dc!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (VRE-Storage Charge DC) Module")
-
- gen = inputs["RESOURCES"]
- NEW_CAP_CHARGE_DC = inputs["NEW_CAP_CHARGE_DC"] # Set of all resources eligible for new capacity
- RET_CAP_CHARGE_DC = inputs["RET_CAP_CHARGE_DC"] # Set of all resources eligible for capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKCHARGEDC[y in RET_CAP_CHARGE_DC,p=1:num_stages] >= 0 )
- @variable(EP, vRETCAPTRACKCHARGEDC[y in RET_CAP_CHARGE_DC,p=1:num_stages] >= 0 )
-
- ### Expressions ###
-
- @expression(EP, eNewCapChargeDC[y in RET_CAP_CHARGE_DC],
- if y in NEW_CAP_CHARGE_DC
- EP[:vCAPCHARGE_DC][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapChargeDC[y in RET_CAP_CHARGE_DC], EP[:vRETCAPCHARGE_DC][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackChargeDC[y in RET_CAP_CHARGE_DC], sum(EP[:vRETCAPTRACKCHARGEDC][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackChargeDC[y in RET_CAP_CHARGE_DC], sum(EP[:vCAPTRACKCHARGEDC][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackChargeDC[y in RET_CAP_CHARGE_DC], cum_min_retired_cap_charge_dc_mw(gen[y]))
-
- ### Constraints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackNewChargeDC[y in RET_CAP_CHARGE_DC], eNewCapChargeDC[y] == vCAPTRACKCHARGEDC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackChargeDC[y in RET_CAP_CHARGE_DC,p=1:(cur_stage-1)], vCAPTRACKCHARGEDC[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackNewChargeDC[y in RET_CAP_CHARGE_DC], eRetCapTrackChargeDC[y] == vRETCAPTRACKCHARGEDC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackChargeDC[y in RET_CAP_CHARGE_DC,p=1:(cur_stage-1)], vRETCAPTRACKCHARGEDC[y,p] == 0)
-
- @constraint(EP, cLifetimeRetChargeDC[y in RET_CAP_CHARGE_DC], eNewCapTrackChargeDC[y] + eMinRetCapTrackChargeDC[y] <= eRetCapTrackChargeDC[y])
+function endogenous_retirement_vre_stor_charge_dc!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (VRE-Storage Charge DC) Module")
+
+ gen = inputs["RESOURCES"]
+ NEW_CAP_CHARGE_DC = inputs["NEW_CAP_CHARGE_DC"] # Set of all resources eligible for new capacity
+ RET_CAP_CHARGE_DC = inputs["RET_CAP_CHARGE_DC"] # Set of all resources eligible for capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKCHARGEDC[y in RET_CAP_CHARGE_DC, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKCHARGEDC[y in RET_CAP_CHARGE_DC, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapChargeDC[y in RET_CAP_CHARGE_DC],
+ if y in NEW_CAP_CHARGE_DC
+ EP[:vCAPCHARGE_DC][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCapChargeDC[y in RET_CAP_CHARGE_DC], EP[:vRETCAPCHARGE_DC][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackChargeDC[y in RET_CAP_CHARGE_DC],
+ sum(EP[:vRETCAPTRACKCHARGEDC][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackChargeDC[y in RET_CAP_CHARGE_DC],
+ sum(EP[:vCAPTRACKCHARGEDC][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackChargeDC[y in RET_CAP_CHARGE_DC],
+ cum_min_retired_cap_charge_dc_mw(gen[y]))
+
+ ### Constraints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackNewChargeDC[y in RET_CAP_CHARGE_DC],
+ eNewCapChargeDC[y]==vCAPTRACKCHARGEDC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackChargeDC[y in RET_CAP_CHARGE_DC, p = 1:(cur_stage - 1)],
+ vCAPTRACKCHARGEDC[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackNewChargeDC[y in RET_CAP_CHARGE_DC],
+ eRetCapTrackChargeDC[y]==vRETCAPTRACKCHARGEDC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackChargeDC[y in RET_CAP_CHARGE_DC, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKCHARGEDC[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetChargeDC[y in RET_CAP_CHARGE_DC],
+ eNewCapTrackChargeDC[y] + eMinRetCapTrackChargeDC[y]<=eRetCapTrackChargeDC[y])
end
-function endogenous_retirement_vre_stor_discharge_ac!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (VRE-Storage Discharge AC) Module")
-
- gen = inputs["RESOURCES"]
- NEW_CAP_DISCHARGE_AC = inputs["NEW_CAP_DISCHARGE_AC"] # Set of all resources eligible for new capacity
- RET_CAP_DISCHARGE_AC = inputs["RET_CAP_DISCHARGE_AC"] # Set of all resources eligible for capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKDISCHARGEAC[y in RET_CAP_DISCHARGE_AC,p=1:num_stages] >= 0 )
- @variable(EP, vRETCAPTRACKDISCHARGEAC[y in RET_CAP_DISCHARGE_AC,p=1:num_stages] >= 0 )
-
- ### Expressions ###
-
- @expression(EP, eNewCapDischargeAC[y in RET_CAP_DISCHARGE_AC],
- if y in NEW_CAP_DISCHARGE_AC
- EP[:vCAPDISCHARGE_AC][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapDischargeAC[y in RET_CAP_DISCHARGE_AC], EP[:vRETCAPDISCHARGE_AC][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC], sum(EP[:vRETCAPTRACKDISCHARGEAC][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC], sum(EP[:vCAPTRACKDISCHARGEAC][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC], cum_min_retired_cap_discharge_ac_mw(gen[y]))
-
- ### Constraints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackNewDischargeAC[y in RET_CAP_DISCHARGE_AC], eNewCapDischargeAC[y] == vCAPTRACKDISCHARGEAC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC,p=1:(cur_stage-1)], vCAPTRACKDISCHARGEAC[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackNewDischargeAC[y in RET_CAP_DISCHARGE_AC], eRetCapTrackDischargeAC[y] == vRETCAPTRACKDISCHARGEAC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC,p=1:(cur_stage-1)], vRETCAPTRACKDISCHARGEAC[y,p] == 0)
-
- @constraint(EP, cLifetimeRetDischargeAC[y in RET_CAP_DISCHARGE_AC], eNewCapTrackDischargeAC[y] + eMinRetCapTrackDischargeAC[y] <= eRetCapTrackDischargeAC[y])
+function endogenous_retirement_vre_stor_discharge_ac!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (VRE-Storage Discharge AC) Module")
+
+ gen = inputs["RESOURCES"]
+ NEW_CAP_DISCHARGE_AC = inputs["NEW_CAP_DISCHARGE_AC"] # Set of all resources eligible for new capacity
+ RET_CAP_DISCHARGE_AC = inputs["RET_CAP_DISCHARGE_AC"] # Set of all resources eligible for capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKDISCHARGEAC[y in RET_CAP_DISCHARGE_AC, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKDISCHARGEAC[y in RET_CAP_DISCHARGE_AC, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ if y in NEW_CAP_DISCHARGE_AC
+ EP[:vCAPDISCHARGE_AC][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP,
+ eRetCapDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ EP[:vRETCAPDISCHARGE_AC][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ sum(EP[:vRETCAPTRACKDISCHARGEAC][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ sum(EP[:vCAPTRACKDISCHARGEAC][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ cum_min_retired_cap_discharge_ac_mw(gen[y]))
+
+ ### Constraints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackNewDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ eNewCapDischargeAC[y]==vCAPTRACKDISCHARGEAC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC, p = 1:(cur_stage - 1)],
+ vCAPTRACKDISCHARGEAC[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackNewDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ eRetCapTrackDischargeAC[y]==vRETCAPTRACKDISCHARGEAC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackDischargeAC[y in RET_CAP_DISCHARGE_AC, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKDISCHARGEAC[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetDischargeAC[y in RET_CAP_DISCHARGE_AC],
+ eNewCapTrackDischargeAC[y] +
+ eMinRetCapTrackDischargeAC[y]<=eRetCapTrackDischargeAC[y])
end
-function endogenous_retirement_vre_stor_charge_ac!(EP::Model, inputs::Dict, num_stages::Int, cur_stage::Int, stage_lens::Array{Int, 1})
-
- println("Endogenous Retirement (VRE-Storage Charge AC) Module")
-
- gen = inputs["RESOURCES"]
- NEW_CAP_CHARGE_AC = inputs["NEW_CAP_CHARGE_AC"] # Set of all resources eligible for new capacity
- RET_CAP_CHARGE_AC = inputs["RET_CAP_CHARGE_AC"] # Set of all resources eligible for capacity retirements
-
- ### Variables ###
-
- # Keep track of all new and retired capacity from all stages
- @variable(EP, vCAPTRACKCHARGEAC[y in RET_CAP_CHARGE_AC,p=1:num_stages] >= 0 )
- @variable(EP, vRETCAPTRACKCHARGEAC[y in RET_CAP_CHARGE_AC,p=1:num_stages] >= 0 )
-
- ### Expressions ###
-
- @expression(EP, eNewCapChargeAC[y in RET_CAP_CHARGE_AC],
- if y in NEW_CAP_CHARGE_AC
- EP[:vCAPCHARGE_AC][y]
- else
- EP[:vZERO]
- end
- )
-
- @expression(EP, eRetCapChargeAC[y in RET_CAP_CHARGE_AC], EP[:vRETCAPCHARGE_AC][y])
-
- # Construct and add the endogenous retirement constraint expressions
- @expression(EP, eRetCapTrackChargeAC[y in RET_CAP_CHARGE_AC], sum(EP[:vRETCAPTRACKCHARGEAC][y,p] for p=1:cur_stage))
- @expression(EP, eNewCapTrackChargeAC[y in RET_CAP_CHARGE_AC], sum(EP[:vCAPTRACKCHARGEAC][y,p] for p=1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
- @expression(EP, eMinRetCapTrackChargeAC[y in RET_CAP_CHARGE_AC], cum_min_retired_cap_charge_ac_mw(gen[y]))
-
- ### Constraints ###
-
- # Keep track of newly built capacity from previous stages
- @constraint(EP, cCapTrackNewChargeAC[y in RET_CAP_CHARGE_AC], eNewCapChargeAC[y] == vCAPTRACKCHARGEAC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cCapTrackChargeAC[y in RET_CAP_CHARGE_AC,p=1:(cur_stage-1)], vCAPTRACKCHARGEAC[y,p] == 0)
-
- # Keep track of retired capacity from previous stages
- @constraint(EP, cRetCapTrackNewChargeAC[y in RET_CAP_CHARGE_AC], eRetCapTrackChargeAC[y] == vRETCAPTRACKCHARGEAC[y,cur_stage])
- # The RHS of this constraint will be updated in the forward pass
- @constraint(EP, cRetCapTrackChargeAC[y in RET_CAP_CHARGE_AC,p=1:(cur_stage-1)], vRETCAPTRACKCHARGEAC[y,p] == 0)
-
- @constraint(EP, cLifetimeRetChargeAC[y in RET_CAP_CHARGE_AC], eNewCapTrackChargeAC[y] + eMinRetCapTrackChargeAC[y] <= eRetCapTrackChargeAC[y])
+function endogenous_retirement_vre_stor_charge_ac!(EP::Model,
+ inputs::Dict,
+ num_stages::Int,
+ cur_stage::Int,
+ stage_lens::Array{Int, 1})
+ println("Endogenous Retirement (VRE-Storage Charge AC) Module")
+
+ gen = inputs["RESOURCES"]
+ NEW_CAP_CHARGE_AC = inputs["NEW_CAP_CHARGE_AC"] # Set of all resources eligible for new capacity
+ RET_CAP_CHARGE_AC = inputs["RET_CAP_CHARGE_AC"] # Set of all resources eligible for capacity retirements
+
+ ### Variables ###
+
+ # Keep track of all new and retired capacity from all stages
+ @variable(EP, vCAPTRACKCHARGEAC[y in RET_CAP_CHARGE_AC, p = 1:num_stages]>=0)
+ @variable(EP, vRETCAPTRACKCHARGEAC[y in RET_CAP_CHARGE_AC, p = 1:num_stages]>=0)
+
+ ### Expressions ###
+
+ @expression(EP, eNewCapChargeAC[y in RET_CAP_CHARGE_AC],
+ if y in NEW_CAP_CHARGE_AC
+ EP[:vCAPCHARGE_AC][y]
+ else
+ EP[:vZERO]
+ end)
+
+ @expression(EP, eRetCapChargeAC[y in RET_CAP_CHARGE_AC], EP[:vRETCAPCHARGE_AC][y])
+
+ # Construct and add the endogenous retirement constraint expressions
+ @expression(EP,
+ eRetCapTrackChargeAC[y in RET_CAP_CHARGE_AC],
+ sum(EP[:vRETCAPTRACKCHARGEAC][y, p] for p in 1:cur_stage))
+ @expression(EP,
+ eNewCapTrackChargeAC[y in RET_CAP_CHARGE_AC],
+ sum(EP[:vCAPTRACKCHARGEAC][y, p]
+ for p in 1:get_retirement_stage(cur_stage, lifetime(gen[y]), stage_lens)))
+ @expression(EP,
+ eMinRetCapTrackChargeAC[y in RET_CAP_CHARGE_AC],
+ cum_min_retired_cap_charge_ac_mw(gen[y]))
+
+ ### Constraints ###
+
+ # Keep track of newly built capacity from previous stages
+ @constraint(EP,
+ cCapTrackNewChargeAC[y in RET_CAP_CHARGE_AC],
+ eNewCapChargeAC[y]==vCAPTRACKCHARGEAC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cCapTrackChargeAC[y in RET_CAP_CHARGE_AC, p = 1:(cur_stage - 1)],
+ vCAPTRACKCHARGEAC[y, p]==0)
+
+ # Keep track of retired capacity from previous stages
+ @constraint(EP,
+ cRetCapTrackNewChargeAC[y in RET_CAP_CHARGE_AC],
+ eRetCapTrackChargeAC[y]==vRETCAPTRACKCHARGEAC[y, cur_stage])
+ # The RHS of this constraint will be updated in the forward pass
+ @constraint(EP,
+ cRetCapTrackChargeAC[y in RET_CAP_CHARGE_AC, p = 1:(cur_stage - 1)],
+ vRETCAPTRACKCHARGEAC[y, p]==0)
+
+ @constraint(EP,
+ cLifetimeRetChargeAC[y in RET_CAP_CHARGE_AC],
+ eNewCapTrackChargeAC[y] + eMinRetCapTrackChargeAC[y]<=eRetCapTrackChargeAC[y])
end
diff --git a/src/multi_stage/write_multi_stage_capacities_charge.jl b/src/multi_stage/write_multi_stage_capacities_charge.jl
index b098cae598..a9d7f4cf11 100644
--- a/src/multi_stage/write_multi_stage_capacities_charge.jl
+++ b/src/multi_stage/write_multi_stage_capacities_charge.jl
@@ -9,7 +9,6 @@ inputs:
* settings\_d - Dictionary containing settings dictionary configured in the multi-stage settings file multi\_stage\_settings.yml.
"""
function write_multi_stage_capacities_charge(outpath::String, settings_d::Dict)
-
num_stages = settings_d["NumStages"] # Total number of investment planning stages
capacities_d = Dict()
@@ -19,7 +18,8 @@ function write_multi_stage_capacities_charge(outpath::String, settings_d::Dict)
end
# Set first column of DataFrame as resource names from the first stage
- df_cap = DataFrame(Resource=capacities_d[1][!, :Resource], Zone=capacities_d[1][!, :Zone])
+ df_cap = DataFrame(Resource = capacities_d[1][!, :Resource],
+ Zone = capacities_d[1][!, :Zone])
# Store starting capacities from the first stage
df_cap[!, Symbol("StartChargeCap_p1")] = capacities_d[1][!, :StartChargeCap]
@@ -30,5 +30,4 @@ function write_multi_stage_capacities_charge(outpath::String, settings_d::Dict)
end
CSV.write(joinpath(outpath, "capacities_charge_multi_stage.csv"), df_cap)
-
end
diff --git a/src/multi_stage/write_multi_stage_capacities_discharge.jl b/src/multi_stage/write_multi_stage_capacities_discharge.jl
index b4a84f433f..0da02b7002 100644
--- a/src/multi_stage/write_multi_stage_capacities_discharge.jl
+++ b/src/multi_stage/write_multi_stage_capacities_discharge.jl
@@ -9,7 +9,6 @@ inputs:
* settings\_d - Dictionary containing settings dictionary configured in the multi-stage settings file multi\_stage\_settings.yml.
"""
function write_multi_stage_capacities_discharge(outpath::String, settings_d::Dict)
-
num_stages = settings_d["NumStages"] # Total number of investment planning stages
capacities_d = Dict()
@@ -19,7 +18,8 @@ function write_multi_stage_capacities_discharge(outpath::String, settings_d::Dic
end
# Set first column of DataFrame as resource names from the first stage
- df_cap = DataFrame(Resource=capacities_d[1][!, :Resource], Zone=capacities_d[1][!, :Zone])
+ df_cap = DataFrame(Resource = capacities_d[1][!, :Resource],
+ Zone = capacities_d[1][!, :Zone])
# Store starting capacities from the first stage
df_cap[!, Symbol("StartCap_p1")] = capacities_d[1][!, :StartCap]
@@ -30,5 +30,4 @@ function write_multi_stage_capacities_discharge(outpath::String, settings_d::Dic
end
CSV.write(joinpath(outpath, "capacities_multi_stage.csv"), df_cap)
-
end
diff --git a/src/multi_stage/write_multi_stage_capacities_energy.jl b/src/multi_stage/write_multi_stage_capacities_energy.jl
index b9d2d81849..9c7a5c1567 100644
--- a/src/multi_stage/write_multi_stage_capacities_energy.jl
+++ b/src/multi_stage/write_multi_stage_capacities_energy.jl
@@ -9,7 +9,6 @@ inputs:
* settings\_d - Dictionary containing settings dictionary configured in the multi-stage settings file multi\_stage\_settings.yml.
"""
function write_multi_stage_capacities_energy(outpath::String, settings_d::Dict)
-
num_stages = settings_d["NumStages"] # Total number of investment planning stages
capacities_d = Dict()
@@ -19,7 +18,8 @@ function write_multi_stage_capacities_energy(outpath::String, settings_d::Dict)
end
# Set first column of DataFrame as resource names from the first stage
- df_cap = DataFrame(Resource=capacities_d[1][!, :Resource], Zone=capacities_d[1][!, :Zone])
+ df_cap = DataFrame(Resource = capacities_d[1][!, :Resource],
+ Zone = capacities_d[1][!, :Zone])
# Store starting capacities from the first stage
df_cap[!, Symbol("StartEnergyCap_p1")] = capacities_d[1][!, :StartEnergyCap]
@@ -30,5 +30,4 @@ function write_multi_stage_capacities_energy(outpath::String, settings_d::Dict)
end
CSV.write(joinpath(outpath, "capacities_energy_multi_stage.csv"), df_cap)
-
end
diff --git a/src/multi_stage/write_multi_stage_costs.jl b/src/multi_stage/write_multi_stage_costs.jl
index dcc5533f27..0c229a8d35 100644
--- a/src/multi_stage/write_multi_stage_costs.jl
+++ b/src/multi_stage/write_multi_stage_costs.jl
@@ -9,7 +9,6 @@ inputs:
* settings\_d - Dictionary containing settings dictionary configured in the multi-stage settings file multi\_stage\_settings.yml.
"""
function write_multi_stage_costs(outpath::String, settings_d::Dict, inputs_dict::Dict)
-
num_stages = settings_d["NumStages"] # Total number of DDP stages
wacc = settings_d["WACC"] # Interest Rate and also the discount rate unless specified other wise
stage_lens = settings_d["StageLengths"]
@@ -24,7 +23,7 @@ function write_multi_stage_costs(outpath::String, settings_d::Dict, inputs_dict:
OPEXMULTS = [inputs_dict[j]["OPEXMULT"] for j in 1:num_stages] # Stage-wise OPEX multipliers to count multiple years between two model stages
# Set first column of DataFrame as resource names from the first stage
- df_costs = DataFrame(Costs=costs_d[1][!, :Costs])
+ df_costs = DataFrame(Costs = costs_d[1][!, :Costs])
# Store discounted total costs for each stage in a data frame
for p in 1:num_stages
@@ -39,13 +38,14 @@ function write_multi_stage_costs(outpath::String, settings_d::Dict, inputs_dict:
# For OPEX costs, apply additional discounting
for cost in ["cVar", "cNSE", "cStart", "cUnmetRsv"]
if cost in df_costs[!, :Costs]
- df_costs[df_costs[!, :Costs].==cost, 2:end] = transpose(OPEXMULTS) .* df_costs[df_costs[!, :Costs].==cost, 2:end]
+ df_costs[df_costs[!, :Costs] .== cost, 2:end] = transpose(OPEXMULTS) .*
+ df_costs[df_costs[!, :Costs] .== cost,
+ 2:end]
end
end
# Remove "cTotal" from results (as this includes Cost-to-Go)
- df_costs = df_costs[df_costs[!, :Costs].!="cTotal", :]
+ df_costs = df_costs[df_costs[!, :Costs] .!= "cTotal", :]
CSV.write(joinpath(outpath, "costs_multi_stage.csv"), df_costs)
-
end
diff --git a/src/multi_stage/write_multi_stage_network_expansion.jl b/src/multi_stage/write_multi_stage_network_expansion.jl
index 3b9808d29f..1a6ddc7015 100644
--- a/src/multi_stage/write_multi_stage_network_expansion.jl
+++ b/src/multi_stage/write_multi_stage_network_expansion.jl
@@ -19,11 +19,12 @@ function write_multi_stage_network_expansion(outpath::String, settings_d::Dict)
end
# Set first column of output DataFrame as line IDs
- df_trans_cap = DataFrame(Line=trans_capacities_d[1][!, :Line])
+ df_trans_cap = DataFrame(Line = trans_capacities_d[1][!, :Line])
# Store new transmission capacities for all stages
for p in 1:num_stages
- df_trans_cap[!, Symbol("New_Trans_Capacity_p$p")] = trans_capacities_d[p][!, :New_Trans_Capacity]
+ df_trans_cap[!, Symbol("New_Trans_Capacity_p$p")] = trans_capacities_d[p][!,
+ :New_Trans_Capacity]
end
CSV.write(joinpath(outpath, "network_expansion_multi_stage.csv"), df_trans_cap)
diff --git a/src/multi_stage/write_multi_stage_stats.jl b/src/multi_stage/write_multi_stage_stats.jl
index 75919d067f..b0c089a9d0 100644
--- a/src/multi_stage/write_multi_stage_stats.jl
+++ b/src/multi_stage/write_multi_stage_stats.jl
@@ -9,7 +9,6 @@ inputs:
* stats\_d – Dictionary which contains the run time, upper bound, and lower bound of each DDP iteration.
"""
function write_multi_stage_stats(outpath::String, stats_d::Dict)
-
times_a = stats_d["TIMES"] # Time (seconds) of each iteration
upper_bounds_a = stats_d["UPPER_BOUNDS"] # Upper bound of each iteration
lower_bounds_a = stats_d["LOWER_BOUNDS"] # Lower bound of each iteration
@@ -20,12 +19,11 @@ function write_multi_stage_stats(outpath::String, stats_d::Dict)
realtive_gap_a = (upper_bounds_a .- lower_bounds_a) ./ lower_bounds_a
# Construct dataframe where first column is iteration number, second is iteration time
- df_stats = DataFrame(Iteration_Number=iteration_count_a,
- Seconds=times_a,
- Upper_Bound=upper_bounds_a,
- Lower_Bound=lower_bounds_a,
- Relative_Gap=realtive_gap_a)
+ df_stats = DataFrame(Iteration_Number = iteration_count_a,
+ Seconds = times_a,
+ Upper_Bound = upper_bounds_a,
+ Lower_Bound = lower_bounds_a,
+ Relative_Gap = realtive_gap_a)
CSV.write(joinpath(outpath, "stats_multi_stage.csv"), df_stats)
-
end
diff --git a/src/time_domain_reduction/precluster.jl b/src/time_domain_reduction/precluster.jl
index b4ddb4df76..1d7352b8d4 100644
--- a/src/time_domain_reduction/precluster.jl
+++ b/src/time_domain_reduction/precluster.jl
@@ -45,4 +45,4 @@ function run_timedomainreduction_multistage!(case::AbstractString)
end
return
-end
\ No newline at end of file
+end
diff --git a/src/time_domain_reduction/time_domain_reduction.jl b/src/time_domain_reduction/time_domain_reduction.jl
index ca6b25ac6b..71b39419b3 100644
--- a/src/time_domain_reduction/time_domain_reduction.jl
+++ b/src/time_domain_reduction/time_domain_reduction.jl
@@ -18,7 +18,6 @@ using Distances
using CSV
using GenX
-
const SEED = 1234
@doc raw"""
@@ -51,8 +50,9 @@ function parse_data(myinputs)
ZONES = myinputs["R_ZONES"]
# DEMAND - Demand_data.csv
- demand_profiles = [ myinputs["pD"][:,l] for l in 1:size(myinputs["pD"],2) ]
- demand_col_names = [DEMAND_COLUMN_PREFIX()*string(l) for l in 1:size(demand_profiles)[1]]
+ demand_profiles = [myinputs["pD"][:, l] for l in 1:size(myinputs["pD"], 2)]
+ demand_col_names = [DEMAND_COLUMN_PREFIX() * string(l)
+ for l in 1:size(demand_profiles)[1]]
demand_zones = [l for l in 1:size(demand_profiles)[1]]
col_to_zone_map = Dict(demand_col_names .=> 1:length(demand_col_names))
@@ -64,15 +64,18 @@ function parse_data(myinputs)
wind_col_names = []
var_col_names = []
for r in 1:length(RESOURCE_ZONES)
- if occursin("PV", RESOURCE_ZONES[r]) || occursin("pv", RESOURCE_ZONES[r]) || occursin("Pv", RESOURCE_ZONES[r]) || occursin("Solar", RESOURCE_ZONES[r]) || occursin("SOLAR", RESOURCE_ZONES[r]) || occursin("solar", RESOURCE_ZONES[r])
+ if occursin("PV", RESOURCE_ZONES[r]) || occursin("pv", RESOURCE_ZONES[r]) ||
+ occursin("Pv", RESOURCE_ZONES[r]) || occursin("Solar", RESOURCE_ZONES[r]) ||
+ occursin("SOLAR", RESOURCE_ZONES[r]) || occursin("solar", RESOURCE_ZONES[r])
push!(solar_col_names, RESOURCE_ZONES[r])
- push!(solar_profiles, myinputs["pP_Max"][r,:])
- elseif occursin("Wind", RESOURCE_ZONES[r]) || occursin("WIND", RESOURCE_ZONES[r]) || occursin("wind", RESOURCE_ZONES[r])
+ push!(solar_profiles, myinputs["pP_Max"][r, :])
+ elseif occursin("Wind", RESOURCE_ZONES[r]) || occursin("WIND", RESOURCE_ZONES[r]) ||
+ occursin("wind", RESOURCE_ZONES[r])
push!(wind_col_names, RESOURCE_ZONES[r])
- push!(wind_profiles, myinputs["pP_Max"][r,:])
+ push!(wind_profiles, myinputs["pP_Max"][r, :])
end
push!(var_col_names, RESOURCE_ZONES[r])
- push!(var_profiles, myinputs["pP_Max"][r,:])
+ push!(var_profiles, myinputs["pP_Max"][r, :])
col_to_zone_map[RESOURCE_ZONES[r]] = ZONES[r]
end
@@ -82,15 +85,18 @@ function parse_data(myinputs)
AllFuelsConst = true
for f in 1:length(fuel_col_names)
push!(fuel_profiles, myinputs["fuel_costs"][fuel_col_names[f]])
- if AllFuelsConst && (minimum(myinputs["fuel_costs"][fuel_col_names[f]]) != maximum(myinputs["fuel_costs"][fuel_col_names[f]]))
+ if AllFuelsConst && (minimum(myinputs["fuel_costs"][fuel_col_names[f]]) !=
+ maximum(myinputs["fuel_costs"][fuel_col_names[f]]))
AllFuelsConst = false
end
end
all_col_names = [demand_col_names; var_col_names; fuel_col_names]
all_profiles = [demand_profiles..., var_profiles..., fuel_profiles...]
- return demand_col_names, var_col_names, solar_col_names, wind_col_names, fuel_col_names, all_col_names,
- demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles, all_profiles,
- col_to_zone_map, AllFuelsConst
+ return demand_col_names, var_col_names, solar_col_names, wind_col_names, fuel_col_names,
+ all_col_names,
+ demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles,
+ all_profiles,
+ col_to_zone_map, AllFuelsConst
end
@doc raw"""
@@ -113,39 +119,46 @@ function parse_multi_stage_data(inputs_dict)
# [ REPLACE THIS with multi_stage_settings.yml StageLengths ]
# In case not all stages have the same length, check relative lengths
- stage_lengths = [ size(inputs_dict[t]["pD"][:,1],1) for t in 1:length(keys(inputs_dict)) ]
+ stage_lengths = [size(inputs_dict[t]["pD"][:, 1], 1)
+ for t in 1:length(keys(inputs_dict))]
total_length = sum(stage_lengths)
- relative_lengths = stage_lengths/total_length
+ relative_lengths = stage_lengths / total_length
# DEMAND - Demand_data.csv
- stage_demand_profiles = [ inputs_dict[t]["pD"][:,l] for t in 1:length(keys(inputs_dict)), l in 1:size(inputs_dict[1]["pD"],2) ]
- vector_lps = [stage_demand_profiles[:,l] for l in 1:size(inputs_dict[1]["pD"],2)]
- demand_profiles = [reduce(vcat,vector_lps[l]) for l in 1:size(inputs_dict[1]["pD"],2)]
- demand_col_names = [DEMAND_COLUMN_PREFIX()*string(l) for l in 1:size(demand_profiles)[1]]
+ stage_demand_profiles = [inputs_dict[t]["pD"][:, l]
+ for t in 1:length(keys(inputs_dict)),
+ l in 1:size(inputs_dict[1]["pD"], 2)]
+ vector_lps = [stage_demand_profiles[:, l] for l in 1:size(inputs_dict[1]["pD"], 2)]
+ demand_profiles = [reduce(vcat, vector_lps[l]) for l in 1:size(inputs_dict[1]["pD"], 2)]
+ demand_col_names = [DEMAND_COLUMN_PREFIX() * string(l)
+ for l in 1:size(demand_profiles)[1]]
demand_zones = [l for l in 1:size(demand_profiles)[1]]
col_to_zone_map = Dict(demand_col_names .=> 1:length(demand_col_names))
# CAPACITY FACTORS - Generators_variability.csv
for r in 1:length(RESOURCE_ZONES)
- if occursin("PV", RESOURCE_ZONES[r]) || occursin("pv", RESOURCE_ZONES[r]) || occursin("Pv", RESOURCE_ZONES[r]) || occursin("Solar", RESOURCE_ZONES[r]) || occursin("SOLAR", RESOURCE_ZONES[r]) || occursin("solar", RESOURCE_ZONES[r])
+ if occursin("PV", RESOURCE_ZONES[r]) || occursin("pv", RESOURCE_ZONES[r]) ||
+ occursin("Pv", RESOURCE_ZONES[r]) || occursin("Solar", RESOURCE_ZONES[r]) ||
+ occursin("SOLAR", RESOURCE_ZONES[r]) || occursin("solar", RESOURCE_ZONES[r])
push!(solar_col_names, RESOURCE_ZONES[r])
pv_all_stages = []
for t in 1:length(keys(inputs_dict))
- pv_all_stages = vcat(pv_all_stages, inputs_dict[t]["pP_Max"][r,:])
+ pv_all_stages = vcat(pv_all_stages, inputs_dict[t]["pP_Max"][r, :])
end
push!(solar_profiles, pv_all_stages)
- elseif occursin("Wind", RESOURCE_ZONES[r]) || occursin("WIND", RESOURCE_ZONES[r]) || occursin("wind", RESOURCE_ZONES[r])
+ elseif occursin("Wind", RESOURCE_ZONES[r]) || occursin("WIND", RESOURCE_ZONES[r]) ||
+ occursin("wind", RESOURCE_ZONES[r])
push!(wind_col_names, RESOURCE_ZONES[r])
wind_all_stages = []
for t in 1:length(keys(inputs_dict))
- wind_all_stages = vcat(wind_all_stages, inputs_dict[t]["pP_Max"][r,:])
+ wind_all_stages = vcat(wind_all_stages, inputs_dict[t]["pP_Max"][r, :])
end
push!(wind_profiles, wind_all_stages)
end
push!(var_col_names, RESOURCE_ZONES[r])
var_all_stages = []
for t in 1:length(keys(inputs_dict))
- var_all_stages = vcat(var_all_stages, inputs_dict[t]["pP_Max"][r,:])
+ var_all_stages = vcat(var_all_stages, inputs_dict[t]["pP_Max"][r, :])
end
push!(var_profiles, var_all_stages)
col_to_zone_map[RESOURCE_ZONES[r]] = ZONES[r]
@@ -158,8 +171,10 @@ function parse_multi_stage_data(inputs_dict)
for f in 1:length(fuel_col_names)
fuel_all_stages = []
for t in 1:length(keys(inputs_dict))
- fuel_all_stages = vcat(fuel_all_stages, inputs_dict[t]["fuel_costs"][fuel_col_names[f]])
- if AllFuelsConst && (minimum(inputs_dict[t]["fuel_costs"][fuel_col_names[f]]) != maximum(inputs_dict[t]["fuel_costs"][fuel_col_names[f]]))
+ fuel_all_stages = vcat(fuel_all_stages,
+ inputs_dict[t]["fuel_costs"][fuel_col_names[f]])
+ if AllFuelsConst && (minimum(inputs_dict[t]["fuel_costs"][fuel_col_names[f]]) !=
+ maximum(inputs_dict[t]["fuel_costs"][fuel_col_names[f]]))
AllFuelsConst = false
end
end
@@ -168,9 +183,11 @@ function parse_multi_stage_data(inputs_dict)
all_col_names = [demand_col_names; var_col_names; fuel_col_names]
all_profiles = [demand_profiles..., var_profiles..., fuel_profiles...]
- return demand_col_names, var_col_names, solar_col_names, wind_col_names, fuel_col_names, all_col_names,
- demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles, all_profiles,
- col_to_zone_map, AllFuelsConst, stage_lengths, total_length, relative_lengths
+ return demand_col_names, var_col_names, solar_col_names, wind_col_names, fuel_col_names,
+ all_col_names,
+ demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles,
+ all_profiles,
+ col_to_zone_map, AllFuelsConst, stage_lengths, total_length, relative_lengths
end
@doc raw"""
@@ -184,13 +201,16 @@ representation is within a given proportion of the "maximum" possible deviation.
"""
function check_condition(Threshold, R, OldColNames, ScalingMethod, TimestepsPerRepPeriod)
if ScalingMethod == "N"
- return maximum(R.costs)/(length(OldColNames)*TimestepsPerRepPeriod) < Threshold
+ return maximum(R.costs) / (length(OldColNames) * TimestepsPerRepPeriod) < Threshold
elseif ScalingMethod == "S"
- return maximum(R.costs)/(length(OldColNames)*TimestepsPerRepPeriod*4) < Threshold
+ return maximum(R.costs) / (length(OldColNames) * TimestepsPerRepPeriod * 4) <
+ Threshold
else
- println("INVALID Scaling Method ", ScalingMethod, " / Choose N for Normalization or S for Standardization. Proceeding with N.")
+ println("INVALID Scaling Method ",
+ ScalingMethod,
+ " / Choose N for Normalization or S for Standardization. Proceeding with N.")
end
- return maximum(R.costs)/(length(OldColNames)*TimestepsPerRepPeriod) < Threshold
+ return maximum(R.costs) / (length(OldColNames) * TimestepsPerRepPeriod) < Threshold
end
@doc raw"""
@@ -213,20 +233,28 @@ K-Means: [https://juliastats.org/Clustering.jl/dev/kmeans.html](https://juliasta
K-Medoids: [https://juliastats.org/Clustering.jl/stable/kmedoids.html](https://juliastats.org/Clustering.jl/stable/kmedoids.html)
"""
-function cluster(ClusterMethod, ClusteringInputDF, NClusters, nIters, v=false, random=true)
+function cluster(ClusterMethod,
+ ClusteringInputDF,
+ NClusters,
+ nIters,
+ v = false,
+ random = true)
if ClusterMethod == "kmeans"
- DistMatrix = pairwise(Euclidean(), Matrix(ClusteringInputDF), dims=2)
- R = kmeans(Matrix(ClusteringInputDF), NClusters, init=:kmcen)
+ DistMatrix = pairwise(Euclidean(), Matrix(ClusteringInputDF), dims = 2)
+ R = kmeans(Matrix(ClusteringInputDF), NClusters, init = :kmcen)
for i in 1:nIters
- if !random; Random.seed!(SEED); end
+ if !random
+ Random.seed!(SEED)
+ end
R_i = kmeans(Matrix(ClusteringInputDF), NClusters)
if R_i.totalcost < R.totalcost
R = R_i
end
- if v && (i % (nIters/10) == 0)
- println(string(i) * " : " * string(round(R_i.totalcost, digits=3)) * " " * string(round(R.totalcost, digits=3)) )
+ if v && (i % (nIters / 10) == 0)
+ println(string(i) * " : " * string(round(R_i.totalcost, digits = 3)) * " " *
+ string(round(R.totalcost, digits = 3)))
end
end
@@ -236,22 +264,26 @@ function cluster(ClusterMethod, ClusteringInputDF, NClusters, nIters, v=false, r
M = []
for i in 1:NClusters
- dists = [euclidean(Centers[:,i], ClusteringInputDF[!, j]) for j in 1:size(ClusteringInputDF, 2)]
- push!(M,argmin(dists))
+ dists = [euclidean(Centers[:, i], ClusteringInputDF[!, j])
+ for j in 1:size(ClusteringInputDF, 2)]
+ push!(M, argmin(dists))
end
elseif ClusterMethod == "kmedoids"
- DistMatrix = pairwise(Euclidean(), Matrix(ClusteringInputDF), dims=2)
- R = kmedoids(DistMatrix, NClusters, init=:kmcen)
+ DistMatrix = pairwise(Euclidean(), Matrix(ClusteringInputDF), dims = 2)
+ R = kmedoids(DistMatrix, NClusters, init = :kmcen)
for i in 1:nIters
- if !random; Random.seed!(SEED); end
+ if !random
+ Random.seed!(SEED)
+ end
R_i = kmedoids(DistMatrix, NClusters)
if R_i.totalcost < R.totalcost
R = R_i
end
- if v && (i % (nIters/10) == 0)
- println(string(i) * " : " * string(round(R_i.totalcost, digits=3)) * " " * string(round(R.totalcost, digits=3)) )
+ if v && (i % (nIters / 10) == 0)
+ println(string(i) * " : " * string(round(R_i.totalcost, digits = 3)) * " " *
+ string(round(R.totalcost, digits = 3)))
end
end
@@ -271,14 +303,16 @@ end
Remove and store the columns that do not vary during the period.
"""
-function RemoveConstCols(all_profiles, all_col_names, v=false)
+function RemoveConstCols(all_profiles, all_col_names, v = false)
ConstData = []
ConstIdx = []
ConstCols = []
for c in 1:length(all_col_names)
Const = minimum(all_profiles[c]) == maximum(all_profiles[c])
if Const
- if v println("Removing constant col: ", all_col_names[c]) end
+ if v
+ println("Removing constant col: ", all_col_names[c])
+ end
push!(ConstData, all_profiles[c])
push!(ConstCols, all_col_names[c])
push!(ConstIdx, c)
@@ -304,37 +338,59 @@ system to be included among the extreme periods. They would select
"""
function get_extreme_period(DF, GDF, profKey, typeKey, statKey,
- ConstCols, demand_col_names, solar_col_names, wind_col_names, v=false)
- if v println(profKey," ", typeKey," ", statKey) end
+ ConstCols, demand_col_names, solar_col_names, wind_col_names, v = false)
+ if v
+ println(profKey, " ", typeKey, " ", statKey)
+ end
if typeKey == "Integral"
if profKey == "Demand"
- (stat, group_idx) = get_integral_extreme(GDF, statKey, demand_col_names, ConstCols)
+ (stat, group_idx) = get_integral_extreme(GDF,
+ statKey,
+ demand_col_names,
+ ConstCols)
elseif profKey == "PV"
- (stat, group_idx) = get_integral_extreme(GDF, statKey, solar_col_names, ConstCols)
+ (stat, group_idx) = get_integral_extreme(GDF,
+ statKey,
+ solar_col_names,
+ ConstCols)
elseif profKey == "Wind"
- (stat, group_idx) = get_integral_extreme(GDF, statKey, wind_col_names, ConstCols)
+ (stat, group_idx) = get_integral_extreme(GDF,
+ statKey,
+ wind_col_names,
+ ConstCols)
else
- println("Error: Profile Key ", profKey, " is invalid. Choose `Demand', `PV' or `Wind'.")
+ println("Error: Profile Key ",
+ profKey,
+ " is invalid. Choose `Demand', `PV' or `Wind'.")
end
elseif typeKey == "Absolute"
if profKey == "Demand"
- (stat, group_idx) = get_absolute_extreme(DF, statKey, demand_col_names, ConstCols)
+ (stat, group_idx) = get_absolute_extreme(DF,
+ statKey,
+ demand_col_names,
+ ConstCols)
elseif profKey == "PV"
- (stat, group_idx) = get_absolute_extreme(DF, statKey, solar_col_names, ConstCols)
+ (stat, group_idx) = get_absolute_extreme(DF,
+ statKey,
+ solar_col_names,
+ ConstCols)
elseif profKey == "Wind"
(stat, group_idx) = get_absolute_extreme(DF, statKey, wind_col_names, ConstCols)
else
- println("Error: Profile Key ", profKey, " is invalid. Choose `Demand', `PV' or `Wind'.")
+ println("Error: Profile Key ",
+ profKey,
+ " is invalid. Choose `Demand', `PV' or `Wind'.")
end
- else
- println("Error: Type Key ", typeKey, " is invalid. Choose `Absolute' or `Integral'.")
- stat = 0
- group_idx = 0
- end
+ else
+ println("Error: Type Key ",
+ typeKey,
+ " is invalid. Choose `Absolute' or `Integral'.")
+ stat = 0
+ group_idx = 0
+ end
return (stat, group_idx)
end
-
@doc raw"""
get_integral_extreme(GDF, statKey, col_names, ConstCols)
@@ -345,9 +401,11 @@ summed over the period.
"""
function get_integral_extreme(GDF, statKey, col_names, ConstCols)
if statKey == "Max"
- (stat, stat_idx) = findmax( sum([GDF[!, Symbol(c)] for c in setdiff(col_names, ConstCols) ]) )
+ (stat, stat_idx) = findmax(sum([GDF[!, Symbol(c)]
+ for c in setdiff(col_names, ConstCols)]))
elseif statKey == "Min"
- (stat, stat_idx) = findmin( sum([GDF[!, Symbol(c)] for c in setdiff(col_names, ConstCols) ]) )
+ (stat, stat_idx) = findmin(sum([GDF[!, Symbol(c)]
+ for c in setdiff(col_names, ConstCols)]))
else
println("Error: Statistic Key ", statKey, " is invalid. Choose `Max' or `Min'.")
end
@@ -363,10 +421,12 @@ Get the period index of the single timestep with the minimum or maximum demand o
"""
function get_absolute_extreme(DF, statKey, col_names, ConstCols)
if statKey == "Max"
- (stat, stat_idx) = findmax( sum([DF[!, Symbol(c)] for c in setdiff(col_names, ConstCols) ]) )
+ (stat, stat_idx) = findmax(sum([DF[!, Symbol(c)]
+ for c in setdiff(col_names, ConstCols)]))
group_idx = DF.Group[stat_idx]
elseif statKey == "Min"
- (stat, stat_idx) = findmin( sum([DF[!, Symbol(c)] for c in setdiff(col_names, ConstCols) ]) )
+ (stat, stat_idx) = findmin(sum([DF[!, Symbol(c)]
+ for c in setdiff(col_names, ConstCols)]))
group_idx = DF.Group[stat_idx]
else
println("Error: Statistic Key ", statKey, " is invalid. Choose `Max' or `Min'.")
@@ -374,7 +434,6 @@ function get_absolute_extreme(DF, statKey, col_names, ConstCols)
return (stat, group_idx)
end
-
@doc raw"""
scale_weights(W, H)
@@ -386,9 +445,11 @@ w_j \leftarrow H \cdot \frac{w_j}{\sum_i w_i} \: \: \: \forall w_j \in W
```
"""
-function scale_weights(W, H, v=false)
- if v println("Weights before scaling: ", W) end
- W = [ float(w)/sum(W) * H for w in W] # Scale to number of hours in input data
+function scale_weights(W, H, v = false)
+ if v
+ println("Weights before scaling: ", W)
+ end
+ W = [float(w) / sum(W) * H for w in W] # Scale to number of hours in input data
if v
println("Weights after scaling: ", W)
println("Sum of Updated Cluster Weights: ", sum(W))
@@ -396,7 +457,6 @@ function scale_weights(W, H, v=false)
return W
end
-
@doc raw"""
get_demand_multipliers(ClusterOutputData, ModifiedData, M, W, DemandCols, TimestepsPerRepPeriod, NewColNames, NClusters, Ncols)
@@ -416,7 +476,16 @@ demand in timestep $i$ for representative period $m$ in zone $z$, $w_m$ is the w
hours that one hour in representative period $m$ represents in the original profile, and $k_z$ is the zonal demand multiplier returned by the function.
"""
-function get_demand_multipliers(ClusterOutputData, InputData, M, W, DemandCols, TimestepsPerRepPeriod, NewColNames, NClusters, Ncols, v=false)
+function get_demand_multipliers(ClusterOutputData,
+ InputData,
+ M,
+ W,
+ DemandCols,
+ TimestepsPerRepPeriod,
+ NewColNames,
+ NClusters,
+ Ncols,
+ v = false)
# Compute original zonal total demands
zone_sums = Dict()
for demandcol in DemandCols
@@ -426,7 +495,9 @@ function get_demand_multipliers(ClusterOutputData, InputData, M, W, DemandCols,
# Compute zonal demands per representative period
cluster_zone_sums = Dict()
for m in 1:NClusters
- clustered_lp_DF = DataFrame( Dict( NewColNames[i] => ClusterOutputData[!,m][TimestepsPerRepPeriod*(i-1)+1 : TimestepsPerRepPeriod*i] for i in 1:Ncols if (Symbol(NewColNames[i]) in DemandCols)) )
+ clustered_lp_DF = DataFrame(Dict(NewColNames[i] => ClusterOutputData[!, m][(TimestepsPerRepPeriod * (i - 1) + 1):(TimestepsPerRepPeriod * i)]
+ for i in 1:Ncols
+ if (Symbol(NewColNames[i]) in DemandCols)))
cluster_zone_sums[m] = Dict()
for demandcol in DemandCols
cluster_zone_sums[m][demandcol] = sum(clustered_lp_DF[:, demandcol])
@@ -439,10 +510,20 @@ function get_demand_multipliers(ClusterOutputData, InputData, M, W, DemandCols,
demand_mults = Dict()
for demandcol in DemandCols
for m in 1:NClusters
- weighted_cluster_zone_sums[demandcol] += (W[m]/(TimestepsPerRepPeriod))*cluster_zone_sums[m][demandcol]
+ weighted_cluster_zone_sums[demandcol] += (W[m] / (TimestepsPerRepPeriod)) *
+ cluster_zone_sums[m][demandcol]
+ end
+ demand_mults[demandcol] = zone_sums[demandcol] /
+ weighted_cluster_zone_sums[demandcol]
+ if v
+ println(demandcol,
+ ": ",
+ weighted_cluster_zone_sums[demandcol],
+ " vs. ",
+ zone_sums[demandcol],
+ " => ",
+ demand_mults[demandcol])
end
- demand_mults[demandcol] = zone_sums[demandcol]/weighted_cluster_zone_sums[demandcol]
- if v println(demandcol, ": ", weighted_cluster_zone_sums[demandcol], " vs. ", zone_sums[demandcol], " => ", demand_mults[demandcol]) end
end
# Zone-wise validation that scaled clustered demand equals original demand (Don't actually scale demand in this function)
@@ -453,20 +534,34 @@ function get_demand_multipliers(ClusterOutputData, InputData, M, W, DemandCols,
if (NewColNames[i] in DemandCols)
# Uncomment this line if we decide to scale demand here instead of later. (Also remove "demand_mults[NewColNames[i]]*" term from new_zone_sums computation)
#ClusterOutputData[!,m][TimestepsPerRepPeriod*(i-1)+1 : TimestepsPerRepPeriod*i] *= demand_mults[NewColNames[i]]
- println(" Scaling ", M[m], " (", NewColNames[i], ") : ", cluster_zone_sums[m][NewColNames[i]], " => ", demand_mults[NewColNames[i]]*sum(ClusterOutputData[!,m][TimestepsPerRepPeriod*(i-1)+1 : TimestepsPerRepPeriod*i]))
- new_zone_sums[NewColNames[i]] += (W[m]/(TimestepsPerRepPeriod))*demand_mults[NewColNames[i]]*sum(ClusterOutputData[!,m][TimestepsPerRepPeriod*(i-1)+1 : TimestepsPerRepPeriod*i])
+ println(" Scaling ",
+ M[m],
+ " (",
+ NewColNames[i],
+ ") : ",
+ cluster_zone_sums[m][NewColNames[i]],
+ " => ",
+ demand_mults[NewColNames[i]] *
+ sum(ClusterOutputData[!, m][(TimestepsPerRepPeriod * (i - 1) + 1):(TimestepsPerRepPeriod * i)]))
+ new_zone_sums[NewColNames[i]] += (W[m] / (TimestepsPerRepPeriod)) *
+ demand_mults[NewColNames[i]] *
+ sum(ClusterOutputData[!, m][(TimestepsPerRepPeriod * (i - 1) + 1):(TimestepsPerRepPeriod * i)])
end
end
end
for demandcol in DemandCols
- println(demandcol, ": ", new_zone_sums[demandcol], " =?= ", zone_sums[demandcol])
+ println(demandcol,
+ ": ",
+ new_zone_sums[demandcol],
+ " =?= ",
+ zone_sums[demandcol])
end
end
return demand_mults
end
-function update_deprecated_tdr_inputs!(setup::Dict{Any,Any})
+function update_deprecated_tdr_inputs!(setup::Dict{Any, Any})
if "LoadWeight" in keys(setup)
setup["DemandWeight"] = setup["LoadWeight"]
delete!(setup, "LoadWeight")
@@ -479,14 +574,13 @@ function update_deprecated_tdr_inputs!(setup::Dict{Any,Any})
extr_dict = setup[extreme_periods]
if "Load" in keys(extr_dict)
- extr_dict["Demand"] = extr_dict["Load"]
+ extr_dict["Demand"] = extr_dict["Load"]
delete!(extr_dict, "Load")
- @info "In time_domain_reduction_settings file the key Load is deprecated. Prefer Demand."
- end
+ @info "In time_domain_reduction_settings file the key Load is deprecated. Prefer Demand."
+ end
end
end
-
@doc raw"""
cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; random=true)
@@ -541,13 +635,21 @@ to separate Vre_and_stor_solar_variability.csv and Vre_and_stor_wind_variability
and wind profiles for co-located resources will be separated into different CSV files to be read by loading the inputs
after the clustering of the inputs has occurred.
"""
-function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; random=true)
- if v println(now()) end
+function cluster_inputs(inpath,
+ settings_path,
+ mysetup,
+ stage_id = -99,
+ v = false;
+ random = true)
+ if v
+ println(now())
+ end
##### Step 0: Load in settings and data
# Read time domain reduction settings file time_domain_reduction_settings.yml
- myTDRsetup = YAML.load(open(joinpath(settings_path,"time_domain_reduction_settings.yml")))
+ myTDRsetup = YAML.load(open(joinpath(settings_path,
+ "time_domain_reduction_settings.yml")))
update_deprecated_tdr_inputs!(myTDRsetup)
# Accept model parameters from the settings file time_domain_reduction_settings.yml
@@ -582,46 +684,55 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
# Define a local version of the setup so that you can modify the mysetup["ParameterScale"] value to be zero in case it is 1
mysetup_local = copy(mysetup)
# If ParameterScale =1 then make it zero, since clustered inputs will be scaled prior to generating model
- mysetup_local["ParameterScale"]=0 # Performing cluster and report outputs in user-provided units
+ mysetup_local["ParameterScale"] = 0 # Performing cluster and report outputs in user-provided units
# Define another local version of setup such that Multi-Stage Non-Concatentation TDR can iteratively read in the raw data
mysetup_MS = copy(mysetup)
- mysetup_MS["TimeDomainReduction"]=0
- mysetup_MS["DoNotReadPeriodMap"]=1
- mysetup_MS["ParameterScale"]=0
+ mysetup_MS["TimeDomainReduction"] = 0
+ mysetup_MS["DoNotReadPeriodMap"] = 1
+ mysetup_MS["ParameterScale"] = 0
if MultiStage == 1
- model_dict=Dict()
- inputs_dict=Dict()
+ model_dict = Dict()
+ inputs_dict = Dict()
for t in 1:NumStages
- # Step 0) Set Model Year
- mysetup["MultiStageSettingsDict"]["CurStage"] = t
+ # Step 0) Set Model Year
+ mysetup["MultiStageSettingsDict"]["CurStage"] = t
- # Step 1) Load Inputs
- global inpath_sub = string("$inpath/inputs/inputs_p",t)
+ # Step 1) Load Inputs
+ global inpath_sub = string("$inpath/inputs/inputs_p", t)
# this prevents doubled time domain reduction in stages past
# the first, even if the first stage is okay.
- prevent_doubled_timedomainreduction(joinpath(inpath_sub, mysetup["SystemFolder"]))
+ prevent_doubled_timedomainreduction(joinpath(inpath_sub,
+ mysetup["SystemFolder"]))
- inputs_dict[t] = load_inputs(mysetup_MS, inpath_sub)
+ inputs_dict[t] = load_inputs(mysetup_MS, inpath_sub)
- inputs_dict[t] = configure_multi_stage_inputs(inputs_dict[t],mysetup["MultiStageSettingsDict"],mysetup["NetworkExpansion"])
+ inputs_dict[t] = configure_multi_stage_inputs(inputs_dict[t],
+ mysetup["MultiStageSettingsDict"],
+ mysetup["NetworkExpansion"])
end
if MultiStageConcatenate == 1
- if v println("MultiStage with Concatenation") end
+ if v
+ println("MultiStage with Concatenation")
+ end
RESOURCE_ZONES = inputs_dict[1]["RESOURCE_ZONES"]
RESOURCES = inputs_dict[1]["RESOURCE_NAMES"]
ZONES = inputs_dict[1]["R_ZONES"]
# Parse input data into useful structures divided by type (demand, wind, solar, fuel, groupings thereof, etc.)
# TO DO LATER: Replace these with collections of col_names, profiles, zones
demand_col_names, var_col_names, solar_col_names, wind_col_names, fuel_col_names, all_col_names,
- demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles, all_profiles,
- col_to_zone_map, AllFuelsConst, stage_lengths, total_length, relative_lengths = parse_multi_stage_data(inputs_dict)
+ demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles, all_profiles,
+ col_to_zone_map, AllFuelsConst, stage_lengths, total_length, relative_lengths = parse_multi_stage_data(inputs_dict)
else # TDR each period individually
- if v println("MultiStage without Concatenation") end
- if v println("---> STAGE ", stage_id) end
+ if v
+ println("MultiStage without Concatenation")
+ end
+ if v
+ println("---> STAGE ", stage_id)
+ end
myinputs = inputs_dict[stage_id]
RESOURCE_ZONES = myinputs["RESOURCE_ZONES"]
RESOURCES = myinputs["RESOURCE_NAMES"]
@@ -629,32 +740,41 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
# Parse input data into useful structures divided by type (demand, wind, solar, fuel, groupings thereof, etc.)
# TO DO LATER: Replace these with collections of col_names, profiles, zones
demand_col_names, var_col_names, solar_col_names, wind_col_names, fuel_col_names, all_col_names,
- demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles, all_profiles,
- col_to_zone_map, AllFuelsConst = parse_data(myinputs)
+ demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles, all_profiles,
+ col_to_zone_map, AllFuelsConst = parse_data(myinputs)
end
else
- if v println("Not MultiStage") end
- myinputs = load_inputs(mysetup_local,inpath)
+ if v
+ println("Not MultiStage")
+ end
+ myinputs = load_inputs(mysetup_local, inpath)
RESOURCE_ZONES = myinputs["RESOURCE_ZONES"]
RESOURCES = myinputs["RESOURCE_NAMES"]
ZONES = myinputs["R_ZONES"]
# Parse input data into useful structures divided by type (demand, wind, solar, fuel, groupings thereof, etc.)
# TO DO LATER: Replace these with collections of col_names, profiles, zones
demand_col_names, var_col_names, solar_col_names, wind_col_names, fuel_col_names, all_col_names,
- demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles, all_profiles,
- col_to_zone_map, AllFuelsConst = parse_data(myinputs)
+ demand_profiles, var_profiles, solar_profiles, wind_profiles, fuel_profiles, all_profiles,
+ col_to_zone_map, AllFuelsConst = parse_data(myinputs)
+ end
+ if v
+ println()
end
- if v println() end
# Remove Constant Columns - Add back later in final output
- all_profiles, all_col_names, ConstData, ConstCols, ConstIdx = RemoveConstCols(all_profiles, all_col_names, v)
+ all_profiles, all_col_names, ConstData, ConstCols, ConstIdx = RemoveConstCols(all_profiles,
+ all_col_names,
+ v)
# Determine whether or not to time domain reduce fuel profiles as well based on user choice and file structure (i.e., variable fuels in Fuels_data.csv)
IncludeFuel = true
- if (ClusterFuelPrices != 1) || (AllFuelsConst) IncludeFuel = false end
+ if (ClusterFuelPrices != 1) || (AllFuelsConst)
+ IncludeFuel = false
+ end
# Put it together!
- InputData = DataFrame( Dict( all_col_names[c]=>all_profiles[c] for c in 1:length(all_col_names) ) )
+ InputData = DataFrame(Dict(all_col_names[c] => all_profiles[c]
+ for c in 1:length(all_col_names)))
InputData = convert.(Float64, InputData)
if v
println("Demand (MW) and Capacity Factor Profiles: ")
@@ -666,27 +786,37 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
Nhours = nrow(InputData) # Timesteps
Ncols = length(NewColNames) - 1
-
##### Step 1: Normalize or standardize all demand, renewables, and fuel data / optionally scale with DemandWeight
# Normalize/standardize data based on user-provided method
if ScalingMethod == "N"
- normProfiles = [ StatsBase.transform(fit(UnitRangeTransform, InputData[:,c]; dims=1, unit=true), InputData[:,c]) for c in 1:length(OldColNames) ]
+ normProfiles = [StatsBase.transform(fit(UnitRangeTransform,
+ InputData[:, c];
+ dims = 1,
+ unit = true),
+ InputData[:, c]) for c in 1:length(OldColNames)]
elseif ScalingMethod == "S"
- normProfiles = [ StatsBase.transform(fit(ZScoreTransform, InputData[:,c]; dims=1), InputData[:,c]) for c in 1:length(OldColNames) ]
+ normProfiles = [StatsBase.transform(fit(ZScoreTransform, InputData[:, c]; dims = 1),
+ InputData[:, c]) for c in 1:length(OldColNames)]
else
println("ERROR InvalidScalingMethod: Use N for Normalization or S for Standardization.")
println("CONTINUING using 0->1 normalization...")
- normProfiles = [ StatsBase.transform(fit(UnitRangeTransform, InputData[:,c]; dims=1, unit=true), InputData[:,c]) for c in 1:length(OldColNames) ]
+ normProfiles = [StatsBase.transform(fit(UnitRangeTransform,
+ InputData[:, c];
+ dims = 1,
+ unit = true),
+ InputData[:, c]) for c in 1:length(OldColNames)]
end
# Compile newly normalized/standardized profiles
- AnnualTSeriesNormalized = DataFrame(Dict( OldColNames[c] => normProfiles[c] for c in 1:length(OldColNames) ))
+ AnnualTSeriesNormalized = DataFrame(Dict(OldColNames[c] => normProfiles[c]
+ for c in 1:length(OldColNames)))
# Optional pre-scaling of demand in order to give it more preference in clutering algorithm
if DemandWeight != 1 # If we want to value demand more/less than capacity factors. Assume nonnegative. LW=1 means no scaling.
for c in demand_col_names
- AnnualTSeriesNormalized[!, Symbol(c)] .= AnnualTSeriesNormalized[!, Symbol(c)] .* DemandWeight
+ AnnualTSeriesNormalized[!, Symbol(c)] .= AnnualTSeriesNormalized[!,
+ Symbol(c)] .* DemandWeight
end
end
@@ -696,121 +826,193 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
println()
end
-
##### STEP 2: Identify extreme periods in the model, Reshape data for clustering
# Total number of subperiods available in the dataset, where each subperiod length = TimestepsPerRepPeriod
- NumDataPoints = Nhours÷TimestepsPerRepPeriod # 364 weeks in 7 years
- if v println("Total Subperiods in the data set: ", NumDataPoints) end
- InputData[:, :Group] .= (1:Nhours) .÷ (TimestepsPerRepPeriod+0.0001) .+ 1 # Group col identifies the subperiod ID of each hour (e.g., all hours in week 2 have Group=2 if using TimestepsPerRepPeriod=168)
+ NumDataPoints = Nhours ÷ TimestepsPerRepPeriod # 364 weeks in 7 years
+ if v
+ println("Total Subperiods in the data set: ", NumDataPoints)
+ end
+ InputData[:, :Group] .= (1:Nhours) .÷ (TimestepsPerRepPeriod + 0.0001) .+ 1 # Group col identifies the subperiod ID of each hour (e.g., all hours in week 2 have Group=2 if using TimestepsPerRepPeriod=168)
# Group by period (e.g., week)
cgdf = combine(groupby(InputData, :Group), [c .=> sum for c in OldColNames])
- cgdf = cgdf[setdiff(1:end, NumDataPoints+1), :]
+ cgdf = cgdf[setdiff(1:end, NumDataPoints + 1), :]
rename!(cgdf, [:Group; Symbol.(OldColNames)])
# Extreme period identification based on user selection in time_domain_reduction_settings.yml
DemandExtremePeriod = false # Used when deciding whether or not to scale demand curves to equal original total demand
ExtremeWksList = []
if UseExtremePeriods == 1
- for profKey in keys(ExtPeriodSelections)
- for geoKey in keys(ExtPeriodSelections[profKey])
- for typeKey in keys(ExtPeriodSelections[profKey][geoKey])
- for statKey in keys(ExtPeriodSelections[profKey][geoKey][typeKey])
- if ExtPeriodSelections[profKey][geoKey][typeKey][statKey] == 1
- if profKey == "Demand"
- DemandExtremePeriod = true
- end
- if geoKey == "System"
- if v print(geoKey, " ") end
- (stat, group_idx) = get_extreme_period(InputData, cgdf, profKey, typeKey, statKey, ConstCols, demand_col_names, solar_col_names, wind_col_names, v)
- push!(ExtremeWksList, floor(Int, group_idx))
- if v println(group_idx, " : ", stat) end
- elseif geoKey == "Zone"
- for z in sort(unique(ZONES))
- z_cols = [k for (k,v) in col_to_zone_map if v==z]
- if profKey == "Demand" z_cols_type = intersect(z_cols, demand_col_names)
- elseif profKey == "PV" z_cols_type = intersect(z_cols, solar_col_names)
- elseif profKey == "Wind" z_cols_type = intersect(z_cols, wind_col_names)
- else z_cols_type = []
- end
- z_cols_type = setdiff(z_cols_type, ConstCols)
- if length(z_cols_type) > 0
- if v print(geoKey, " ") end
- (stat, group_idx) = get_extreme_period(select(InputData, [:Group; Symbol.(z_cols_type)]), select(cgdf, [:Group; Symbol.(z_cols_type)]), profKey, typeKey, statKey, ConstCols, z_cols_type, z_cols_type, z_cols_type, v)
- push!(ExtremeWksList, floor(Int, group_idx))
- if v println(group_idx, " : ", stat, "(", z, ")") end
- else
- if v println("Zone ", z, " has no time series profiles of type ", profKey) end
- end
- end
- else
- println("Error: Geography Key ", geoKey, " is invalid. Select `System' or `Zone'.")
- end
- end
- end
- end
- end
- end
- if v println(ExtremeWksList) end
- sort!(unique!(ExtremeWksList))
- if v println("Reduced to ", ExtremeWksList) end
+ for profKey in keys(ExtPeriodSelections)
+ for geoKey in keys(ExtPeriodSelections[profKey])
+ for typeKey in keys(ExtPeriodSelections[profKey][geoKey])
+ for statKey in keys(ExtPeriodSelections[profKey][geoKey][typeKey])
+ if ExtPeriodSelections[profKey][geoKey][typeKey][statKey] == 1
+ if profKey == "Demand"
+ DemandExtremePeriod = true
+ end
+ if geoKey == "System"
+ if v
+ print(geoKey, " ")
+ end
+ (stat, group_idx) = get_extreme_period(InputData,
+ cgdf,
+ profKey,
+ typeKey,
+ statKey,
+ ConstCols,
+ demand_col_names,
+ solar_col_names,
+ wind_col_names,
+ v)
+ push!(ExtremeWksList, floor(Int, group_idx))
+ if v
+ println(group_idx, " : ", stat)
+ end
+ elseif geoKey == "Zone"
+ for z in sort(unique(ZONES))
+ z_cols = [k for (k, v) in col_to_zone_map if v == z]
+ if profKey == "Demand"
+ z_cols_type = intersect(z_cols, demand_col_names)
+ elseif profKey == "PV"
+ z_cols_type = intersect(z_cols, solar_col_names)
+ elseif profKey == "Wind"
+ z_cols_type = intersect(z_cols, wind_col_names)
+ else
+ z_cols_type = []
+ end
+ z_cols_type = setdiff(z_cols_type, ConstCols)
+ if length(z_cols_type) > 0
+ if v
+ print(geoKey, " ")
+ end
+ (stat, group_idx) = get_extreme_period(select(InputData,
+ [:Group; Symbol.(z_cols_type)]),
+ select(cgdf, [:Group; Symbol.(z_cols_type)]),
+ profKey,
+ typeKey,
+ statKey,
+ ConstCols,
+ z_cols_type,
+ z_cols_type,
+ z_cols_type,
+ v)
+ push!(ExtremeWksList, floor(Int, group_idx))
+ if v
+ println(group_idx, " : ", stat, "(", z, ")")
+ end
+ else
+ if v
+ println("Zone ",
+ z,
+ " has no time series profiles of type ",
+ profKey)
+ end
+ end
+ end
+ else
+ println("Error: Geography Key ",
+ geoKey,
+ " is invalid. Select `System' or `Zone'.")
+ end
+ end
+ end
+ end
+ end
+ end
+ if v
+ println(ExtremeWksList)
+ end
+ sort!(unique!(ExtremeWksList))
+ if v
+ println("Reduced to ", ExtremeWksList)
+ end
end
### DATA MODIFICATION - Shifting InputData and Normalized InputData
# from 8760 (# hours) by n (# profiles) DF to
# 168*n (n period-stacked profiles) by 52 (# periods) DF
- DFsToConcat = [stack(InputData[isequal.(InputData.Group,w),:], OldColNames)[!,:value] for w in 1:NumDataPoints if w <= NumDataPoints ]
+ DFsToConcat = [stack(InputData[isequal.(InputData.Group, w), :], OldColNames)[!,
+ :value] for w in 1:NumDataPoints if w <= NumDataPoints]
ModifiedData = DataFrame(Dict(Symbol(i) => DFsToConcat[i] for i in 1:NumDataPoints))
- AnnualTSeriesNormalized[:, :Group] .= (1:Nhours) .÷ (TimestepsPerRepPeriod+0.0001) .+ 1
- DFsToConcatNorm = [stack(AnnualTSeriesNormalized[isequal.(AnnualTSeriesNormalized.Group,w),:], OldColNames)[!,:value] for w in 1:NumDataPoints if w <= NumDataPoints ]
- ModifiedDataNormalized = DataFrame(Dict(Symbol(i) => DFsToConcatNorm[i] for i in 1:NumDataPoints))
+ AnnualTSeriesNormalized[:, :Group] .= (1:Nhours) .÷ (TimestepsPerRepPeriod + 0.0001) .+
+ 1
+ DFsToConcatNorm = [stack(AnnualTSeriesNormalized[isequal.(AnnualTSeriesNormalized.Group,
+ w),
+ :],
+ OldColNames)[!,
+ :value] for w in 1:NumDataPoints if w <= NumDataPoints]
+ ModifiedDataNormalized = DataFrame(Dict(Symbol(i) => DFsToConcatNorm[i]
+ for i in 1:NumDataPoints))
# Remove extreme periods from normalized data before clustering
NClusters = MinPeriods
if UseExtremePeriods == 1
- if v println("Pre-removal: ", names(ModifiedDataNormalized)) end
- if v println("Extreme Periods: ", string.(ExtremeWksList)) end
+ if v
+ println("Pre-removal: ", names(ModifiedDataNormalized))
+ end
+ if v
+ println("Extreme Periods: ", string.(ExtremeWksList))
+ end
ClusteringInputDF = select(ModifiedDataNormalized, Not(string.(ExtremeWksList)))
- if v println("Post-removal: ", names(ClusteringInputDF)) end
+ if v
+ println("Post-removal: ", names(ClusteringInputDF))
+ end
NClusters -= length(ExtremeWksList)
else
ClusteringInputDF = ModifiedDataNormalized
end
-
##### STEP 3: Clustering
cluster_results = []
# Cluster once regardless of iteration decisions
- push!(cluster_results, cluster(ClusterMethod, ClusteringInputDF, NClusters, nReps, v, random))
+ push!(cluster_results,
+ cluster(ClusterMethod, ClusteringInputDF, NClusters, nReps, v, random))
# Iteratively add worst periods as extreme periods OR increment number of clusters k
# until threshold is met or maximum periods are added (If chosen in inputs)
if (Iterate == 1)
- while (!check_condition(Threshold, last(cluster_results)[1], OldColNames, ScalingMethod, TimestepsPerRepPeriod)) & ((length(ExtremeWksList)+NClusters) < MaxPeriods)
+ while (!check_condition(Threshold,
+ last(cluster_results)[1],
+ OldColNames,
+ ScalingMethod,
+ TimestepsPerRepPeriod)) & ((length(ExtremeWksList) + NClusters) < MaxPeriods)
if IterateMethod == "cluster"
- if v println("Adding a new Cluster! ") end
+ if v
+ println("Adding a new Cluster! ")
+ end
NClusters += 1
- push!(cluster_results, cluster(ClusterMethod, ClusteringInputDF, NClusters, nReps, v, random))
+ push!(cluster_results,
+ cluster(ClusterMethod, ClusteringInputDF, NClusters, nReps, v, random))
elseif (IterateMethod == "extreme") & (UseExtremePeriods == 1)
- if v println("Adding a new Extreme Period! ") end
+ if v
+ println("Adding a new Extreme Period! ")
+ end
worst_period_idx = get_worst_period_idx(last(cluster_results)[1])
removed_period = string(names(ClusteringInputDF)[worst_period_idx])
select!(ClusteringInputDF, Not(worst_period_idx))
push!(ExtremeWksList, parse(Int, removed_period))
- if v println(worst_period_idx, " (", removed_period, ") ", ExtremeWksList) end
- push!(cluster_results, cluster(ClusterMethod, ClusteringInputDF, NClusters, nReps, v, random))
+ if v
+ println(worst_period_idx, " (", removed_period, ") ", ExtremeWksList)
+ end
+ push!(cluster_results,
+ cluster(ClusterMethod, ClusteringInputDF, NClusters, nReps, v, random))
elseif IterateMethod == "extreme"
- println("INVALID IterateMethod ", IterateMethod, " because UseExtremePeriods is off. Set to 1 if you wish to add extreme periods.")
+ println("INVALID IterateMethod ",
+ IterateMethod,
+ " because UseExtremePeriods is off. Set to 1 if you wish to add extreme periods.")
break
else
- println("INVALID IterateMethod ", IterateMethod, ". Choose 'cluster' or 'extreme'.")
+ println("INVALID IterateMethod ",
+ IterateMethod,
+ ". Choose 'cluster' or 'extreme'.")
break
end
end
- if v && (length(ExtremeWksList)+NClusters == MaxPeriods)
+ if v && (length(ExtremeWksList) + NClusters == MaxPeriods)
println("Stopped iterating by hitting the maximum number of periods.")
elseif v
println("Stopped by meeting the accuracy threshold.")
@@ -842,7 +1044,9 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
# ClusterInputDF Reframing of Centers/Medoids (i.e., alphabetical as opposed to indices, same order)
M = [parse(Int64, string(names(ClusteringInputDF)[i])) for i in M]
- if v println("Fixed M: ", M) end
+ if v
+ println("Fixed M: ", M)
+ end
# ClusterInputDF Ordering of All Periods (i.e., alphabetical as opposed to indices)
A_Dict = Dict() # States index of representative period within M for each period a in A
@@ -855,7 +1059,9 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
# Add extreme periods into the clustering result with # of occurences = 1 for each
ExtremeWksList = sort(ExtremeWksList)
if UseExtremePeriods == 1
- if v println("Extreme Periods: ", ExtremeWksList) end
+ if v
+ println("Extreme Periods: ", ExtremeWksList)
+ end
M = [M; ExtremeWksList]
A_idx = NClusters + 1
for w in ExtremeWksList
@@ -868,7 +1074,7 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
end
# Recreate A in numeric order (as opposed to ClusterInputDF order)
- A = [A_Dict[i] for i in 1:(length(A)+length(ExtremeWksList))]
+ A = [A_Dict[i] for i in 1:(length(A) + length(ExtremeWksList))]
N = W # Keep cluster version of weights stored as N, number of periods represented by RP
@@ -879,32 +1085,40 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
# SORT A W M in conjunction, chronologically by M, before handling them elsewhere to be consistent
# A points to an index of M. We need it to point to a new index of sorted M. Hence, AssignMap.
old_M = M
- df_sort = DataFrame( Weights = W, NumPeriodsRepresented = N, Rep_Period = M)
+ df_sort = DataFrame(Weights = W, NumPeriodsRepresented = N, Rep_Period = M)
sort!(df_sort, [:Rep_Period])
W = df_sort[!, :Weights]
N = df_sort[!, :NumPeriodsRepresented]
M = df_sort[!, :Rep_Period]
- AssignMap = Dict( i => findall(x->x==old_M[i], M)[1] for i in 1:length(M))
+ AssignMap = Dict(i => findall(x -> x == old_M[i], M)[1] for i in 1:length(M))
A = [AssignMap[a] for a in A]
# Make PeriodMap, maps each period to its representative period
PeriodMap = DataFrame(Period_Index = 1:length(A),
- Rep_Period = [M[a] for a in A],
- Rep_Period_Index = [a for a in A])
+ Rep_Period = [M[a] for a in A],
+ Rep_Period_Index = [a for a in A])
# Get Symbol-version of column names by type for later analysis
DemandCols = Symbol.(demand_col_names)
- VarCols = [Symbol(var_col_names[i]) for i in 1:length(var_col_names) ]
- FuelCols = [Symbol(fuel_col_names[i]) for i in 1:length(fuel_col_names) ]
- ConstCol_Syms = [Symbol(ConstCols[i]) for i in 1:length(ConstCols) ]
+ VarCols = [Symbol(var_col_names[i]) for i in 1:length(var_col_names)]
+ FuelCols = [Symbol(fuel_col_names[i]) for i in 1:length(fuel_col_names)]
+ ConstCol_Syms = [Symbol(ConstCols[i]) for i in 1:length(ConstCols)]
# Cluster Ouput: The original data at the medoids/centers
- ClusterOutputData = ModifiedData[:,Symbol.(M)]
+ ClusterOutputData = ModifiedData[:, Symbol.(M)]
# Get zone-wise demand multipliers for later scaling in order for weighted-representative-total-zonal demand to equal original total-zonal demand
# (Only if we don't have demand-related extreme periods because we don't want to change peak demand periods)
if !DemandExtremePeriod
- demand_mults = get_demand_multipliers(ClusterOutputData, InputData, M, W, DemandCols, TimestepsPerRepPeriod, NewColNames, NClusters, Ncols)
+ demand_mults = get_demand_multipliers(ClusterOutputData,
+ InputData,
+ M,
+ W,
+ DemandCols,
+ TimestepsPerRepPeriod,
+ NewColNames,
+ NClusters,
+ Ncols)
end
# Reorganize Data by Demand, Solar, Wind, Fuel, and GrpWeight by Hour, Add Constant Data Back In
@@ -914,37 +1128,47 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
fpDFs = [] # Fuel Profile DataFrames - Just Fuel Profiles
for m in 1:NClusters
- rpDF = DataFrame( Dict( NewColNames[i] => ClusterOutputData[!,m][TimestepsPerRepPeriod*(i-1)+1 : TimestepsPerRepPeriod*i] for i in 1:Ncols) )
- gvDF = DataFrame( Dict( NewColNames[i] => ClusterOutputData[!,m][TimestepsPerRepPeriod*(i-1)+1 : TimestepsPerRepPeriod*i] for i in 1:Ncols if (Symbol(NewColNames[i]) in VarCols)) )
- dmDF = DataFrame( Dict( NewColNames[i] => ClusterOutputData[!,m][TimestepsPerRepPeriod*(i-1)+1 : TimestepsPerRepPeriod*i] for i in 1:Ncols if (Symbol(NewColNames[i]) in DemandCols)) )
- if IncludeFuel fpDF = DataFrame( Dict( NewColNames[i] => ClusterOutputData[!,m][TimestepsPerRepPeriod*(i-1)+1 : TimestepsPerRepPeriod*i] for i in 1:Ncols if (Symbol(NewColNames[i]) in FuelCols)) ) end
- if !IncludeFuel fpDF = DataFrame(Placeholder = 1:TimestepsPerRepPeriod) end
+ rpDF = DataFrame(Dict(NewColNames[i] => ClusterOutputData[!, m][(TimestepsPerRepPeriod * (i - 1) + 1):(TimestepsPerRepPeriod * i)]
+ for i in 1:Ncols))
+ gvDF = DataFrame(Dict(NewColNames[i] => ClusterOutputData[!, m][(TimestepsPerRepPeriod * (i - 1) + 1):(TimestepsPerRepPeriod * i)]
+ for i in 1:Ncols if (Symbol(NewColNames[i]) in VarCols)))
+ dmDF = DataFrame(Dict(NewColNames[i] => ClusterOutputData[!, m][(TimestepsPerRepPeriod * (i - 1) + 1):(TimestepsPerRepPeriod * i)]
+ for i in 1:Ncols if (Symbol(NewColNames[i]) in DemandCols)))
+ if IncludeFuel
+ fpDF = DataFrame(Dict(NewColNames[i] => ClusterOutputData[!, m][(TimestepsPerRepPeriod * (i - 1) + 1):(TimestepsPerRepPeriod * i)]
+ for i in 1:Ncols if (Symbol(NewColNames[i]) in FuelCols)))
+ end
+ if !IncludeFuel
+ fpDF = DataFrame(Placeholder = 1:TimestepsPerRepPeriod)
+ end
# Add Constant Columns back in
for c in 1:length(ConstCols)
- rpDF[!,Symbol(ConstCols[c])] .= ConstData[c][1]
+ rpDF[!, Symbol(ConstCols[c])] .= ConstData[c][1]
if Symbol(ConstCols[c]) in VarCols
- gvDF[!,Symbol(ConstCols[c])] .= ConstData[c][1]
+ gvDF[!, Symbol(ConstCols[c])] .= ConstData[c][1]
elseif Symbol(ConstCols[c]) in FuelCols
- fpDF[!,Symbol(ConstCols[c])] .= ConstData[c][1]
+ fpDF[!, Symbol(ConstCols[c])] .= ConstData[c][1]
elseif Symbol(ConstCols[c]) in DemandCols
- dmDF[!,Symbol(ConstCols[c])] .= ConstData[c][1]
+ dmDF[!, Symbol(ConstCols[c])] .= ConstData[c][1]
end
end
- if !IncludeFuel select!(fpDF, Not(:Placeholder)) end
+ if !IncludeFuel
+ select!(fpDF, Not(:Placeholder))
+ end
# Scale Demand using previously identified multipliers
# Scale dmDF but not rpDF which compares to input data but is not written to file.
for demandcol in DemandCols
if demandcol ∉ ConstCol_Syms
if !DemandExtremePeriod
- dmDF[!,demandcol] .*= demand_mults[demandcol]
+ dmDF[!, demandcol] .*= demand_mults[demandcol]
end
end
end
- rpDF[!,:GrpWeight] .= W[m]
- rpDF[!,:Cluster] .= M[m]
+ rpDF[!, :GrpWeight] .= W[m]
+ rpDF[!, :Cluster] .= M[m]
push!(rpDFs, rpDF)
push!(gvDFs, gvDF)
push!(dmDFs, dmDF)
@@ -955,35 +1179,54 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
DMOutputData = vcat(dmDFs...) # Demand Profiles
FPOutputData = vcat(fpDFs...) # Fuel Profiles
-
##### Step 5: Evaluation
- InputDataTest = InputData[(InputData.Group .<= NumDataPoints*1.0), :]
+ InputDataTest = InputData[(InputData.Group .<= NumDataPoints * 1.0), :]
ClusterDataTest = vcat([rpDFs[a] for a in A]...) # To compare fairly, demand is not scaled here
- RMSE = Dict( c => rmse_score(InputDataTest[:, c], ClusterDataTest[:, c]) for c in OldColNames)
+ RMSE = Dict(c => rmse_score(InputDataTest[:, c], ClusterDataTest[:, c])
+ for c in OldColNames)
##### Step 6: Print to File
if MultiStage == 1
- if v print("Outputs: MultiStage") end
+ if v
+ print("Outputs: MultiStage")
+ end
if MultiStageConcatenate == 1
- if v println(" with Concatenation") end
- groups_per_stage = round.(Int, size(A,1)*relative_lengths)
- group_ranges = [if i == 1 1:groups_per_stage[1] else sum(groups_per_stage[1:i-1])+1:sum(groups_per_stage[1:i]) end for i in 1:size(relative_lengths,1)]
+ if v
+ println(" with Concatenation")
+ end
+ groups_per_stage = round.(Int, size(A, 1) * relative_lengths)
+ group_ranges = [if i == 1
+ 1:groups_per_stage[1]
+ else
+ (sum(groups_per_stage[1:(i - 1)]) + 1):sum(groups_per_stage[1:i])
+ end
+ for i in 1:size(relative_lengths, 1)]
Stage_Weights = Dict()
Stage_PeriodMaps = Dict()
Stage_Outfiles = Dict()
- SolarVar_Outfile = joinpath(TimeDomainReductionFolder, "Vre_and_stor_solar_variability.csv")
- WindVar_Outfile = joinpath(TimeDomainReductionFolder, "Vre_and_stor_wind_variability.csv")
+ SolarVar_Outfile = joinpath(TimeDomainReductionFolder,
+ "Vre_and_stor_solar_variability.csv")
+ WindVar_Outfile = joinpath(TimeDomainReductionFolder,
+ "Vre_and_stor_wind_variability.csv")
for per in 1:NumStages # Iterate over multi-stages
- mkpath(joinpath(inpath,"inputs","inputs_p$per", TimeDomainReductionFolder))
+ mkpath(joinpath(inpath,
+ "inputs",
+ "inputs_p$per",
+ TimeDomainReductionFolder))
# Stage-specific weights and mappings
cmap = countmap(A[group_ranges[per]]) # Count number of each rep. period in the planning stage
- weight_props = [ if i in keys(cmap) cmap[i]/N[i] else 0 end for i in 1:size(M,1) ] # Proportions of each rep. period associated with each planning stage
- Stage_Weights[per] = weight_props.*W # Total hours that each rep. period represents within the planning stage
- Stage_PeriodMaps[per] = PeriodMap[group_ranges[per],:]
- Stage_PeriodMaps[per][!,:Period_Index] = 1:(group_ranges[per][end]-group_ranges[per][1]+1)
+ weight_props = [if i in keys(cmap)
+ cmap[i] / N[i]
+ else
+ 0
+ end
+ for i in 1:size(M, 1)] # Proportions of each rep. period associated with each planning stage
+ Stage_Weights[per] = weight_props .* W # Total hours that each rep. period represents within the planning stage
+ Stage_PeriodMaps[per] = PeriodMap[group_ranges[per], :]
+ Stage_PeriodMaps[per][!, :Period_Index] = 1:(group_ranges[per][end] - group_ranges[per][1] + 1)
# Outfiles
Stage_Outfiles[per] = Dict()
Stage_Outfiles[per]["Demand"] = joinpath("inputs_p$per", Demand_Outfile)
@@ -992,239 +1235,349 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
Stage_Outfiles[per]["PMap"] = joinpath("inputs_p$per", PMap_Outfile)
Stage_Outfiles[per]["YAML"] = joinpath("inputs_p$per", YAML_Outfile)
if !isempty(inputs_dict[per]["VRE_STOR"])
- Stage_Outfiles[per]["GSolar"] = joinpath("inputs_p$per", SolarVar_Outfile)
+ Stage_Outfiles[per]["GSolar"] = joinpath("inputs_p$per",
+ SolarVar_Outfile)
Stage_Outfiles[per]["GWind"] = joinpath("inputs_p$per", WindVar_Outfile)
end
# Save output data to stage-specific locations
### TDR_Results/Demand_data_clustered.csv
- demand_in = get_demand_dataframe(joinpath(inpath, "inputs", "inputs_p$per"), mysetup["SystemFolder"])
- demand_in[!,:Sub_Weights] = demand_in[!,:Sub_Weights] * 1.
- demand_in[1:length(Stage_Weights[per]),:Sub_Weights] .= Stage_Weights[per]
- demand_in[!,:Rep_Periods][1] = length(Stage_Weights[per])
- demand_in[!,:Timesteps_per_Rep_Period][1] = TimestepsPerRepPeriod
+ demand_in = get_demand_dataframe(joinpath(inpath, "inputs", "inputs_p$per"),
+ mysetup["SystemFolder"])
+ demand_in[!, :Sub_Weights] = demand_in[!, :Sub_Weights] * 1.0
+ demand_in[1:length(Stage_Weights[per]), :Sub_Weights] .= Stage_Weights[per]
+ demand_in[!, :Rep_Periods][1] = length(Stage_Weights[per])
+ demand_in[!, :Timesteps_per_Rep_Period][1] = TimestepsPerRepPeriod
select!(demand_in, Not(DemandCols))
select!(demand_in, Not(:Time_Index))
- Time_Index_M = Union{Int64, Missings.Missing}[missing for i in 1:size(demand_in,1)]
- Time_Index_M[1:size(DMOutputData,1)] = 1:size(DMOutputData,1)
- demand_in[!,:Time_Index] .= Time_Index_M
+ Time_Index_M = Union{Int64, Missings.Missing}[missing
+ for i in 1:size(demand_in, 1)]
+ Time_Index_M[1:size(DMOutputData, 1)] = 1:size(DMOutputData, 1)
+ demand_in[!, :Time_Index] .= Time_Index_M
for c in DemandCols
- new_col = Union{Float64, Missings.Missing}[missing for i in 1:size(demand_in,1)]
- new_col[1:size(DMOutputData,1)] = DMOutputData[!,c]
- demand_in[!,c] .= new_col
+ new_col = Union{Float64, Missings.Missing}[missing
+ for i in 1:size(demand_in, 1)]
+ new_col[1:size(DMOutputData, 1)] = DMOutputData[!, c]
+ demand_in[!, c] .= new_col
end
- demand_in = demand_in[1:size(DMOutputData,1),:]
+ demand_in = demand_in[1:size(DMOutputData, 1), :]
- if v println("Writing demand file...") end
- CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["Demand"]), demand_in)
+ if v
+ println("Writing demand file...")
+ end
+ CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["Demand"]),
+ demand_in)
### TDR_Results/Generators_variability.csv
# Reset column ordering, add time index, and solve duplicate column name trouble with CSV.write's header kwarg
- GVColMap = Dict(RESOURCE_ZONES[i] => RESOURCES[i] for i in 1:length(inputs_dict[1]["RESOURCE_NAMES"]))
+ GVColMap = Dict(RESOURCE_ZONES[i] => RESOURCES[i]
+ for i in 1:length(inputs_dict[1]["RESOURCE_NAMES"]))
GVColMap["Time_Index"] = "Time_Index"
GVOutputData = GVOutputData[!, Symbol.(RESOURCE_ZONES)]
- insertcols!(GVOutputData, 1, :Time_Index => 1:size(GVOutputData,1))
+ insertcols!(GVOutputData, 1, :Time_Index => 1:size(GVOutputData, 1))
NewGVColNames = [GVColMap[string(c)] for c in names(GVOutputData)]
- if v println("Writing resource file...") end
- CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["GVar"]), GVOutputData, header=NewGVColNames)
+ if v
+ println("Writing resource file...")
+ end
+ CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["GVar"]),
+ GVOutputData,
+ header = NewGVColNames)
if !isempty(inputs_dict[per]["VRE_STOR"])
- gen_var = load_dataframe(joinpath(inpath, "inputs", Stage_Outfiles[per]["GVar"]))
-
+ gen_var = load_dataframe(joinpath(inpath,
+ "inputs",
+ Stage_Outfiles[per]["GVar"]))
+
# Find which indexes have solar PV/wind names
RESOURCE_ZONES_VRE_STOR = NewGVColNames
solar_col_names = []
wind_col_names = []
for r in 1:length(RESOURCE_ZONES_VRE_STOR)
- if occursin("PV", RESOURCE_ZONES_VRE_STOR[r]) || occursin("pv", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Pv", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Solar", RESOURCE_ZONES_VRE_STOR[r]) || occursin("SOLAR", RESOURCE_ZONES_VRE_STOR[r]) || occursin("solar", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
- push!(solar_col_names,r)
+ if occursin("PV", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("pv", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Pv", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Solar", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("SOLAR", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("solar", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
+ push!(solar_col_names, r)
end
- if occursin("Wind", RESOURCE_ZONES_VRE_STOR[r]) || occursin("WIND", RESOURCE_ZONES_VRE_STOR[r]) || occursin("wind", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
+ if occursin("Wind", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("WIND", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("wind", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
push!(wind_col_names, r)
end
end
-
+
# Index into dataframe and output them
solar_var = gen_var[!, solar_col_names]
- solar_var[!, :Time_Index] = 1:size(solar_var,1)
+ solar_var[!, :Time_Index] = 1:size(solar_var, 1)
wind_var = gen_var[!, wind_col_names]
- wind_var[!, :Time_Index] = 1:size(wind_var,1)
-
- CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["GSolar"]), solar_var)
- CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["GWind"]), wind_var)
+ wind_var[!, :Time_Index] = 1:size(wind_var, 1)
+
+ CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["GSolar"]),
+ solar_var)
+ CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["GWind"]),
+ wind_var)
end
### TDR_Results/Fuels_data.csv
- fuel_in = load_dataframe(joinpath(inpath, "inputs", "inputs_p$per", mysetup["SystemFolder"], "Fuels_data.csv"))
+ fuel_in = load_dataframe(joinpath(inpath,
+ "inputs",
+ "inputs_p$per",
+ mysetup["SystemFolder"],
+ "Fuels_data.csv"))
select!(fuel_in, Not(:Time_Index))
SepFirstRow = DataFrame(fuel_in[1, :])
NewFuelOutput = vcat(SepFirstRow, FPOutputData)
rename!(NewFuelOutput, FuelCols)
- insertcols!(NewFuelOutput, 1, :Time_Index => 0:size(NewFuelOutput,1)-1)
- if v println("Writing fuel profiles...") end
- CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["Fuel"]), NewFuelOutput)
+ insertcols!(NewFuelOutput, 1, :Time_Index => 0:(size(NewFuelOutput, 1) - 1))
+ if v
+ println("Writing fuel profiles...")
+ end
+ CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["Fuel"]),
+ NewFuelOutput)
### TDR_Results/Period_map.csv
- if v println("Writing period map...") end
- CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["PMap"]), Stage_PeriodMaps[per])
+ if v
+ println("Writing period map...")
+ end
+ CSV.write(joinpath(inpath, "inputs", Stage_Outfiles[per]["PMap"]),
+ Stage_PeriodMaps[per])
### TDR_Results/time_domain_reduction_settings.yml
- if v println("Writing .yml settings...") end
- YAML.write_file(joinpath(inpath, "inputs", Stage_Outfiles[per]["YAML"]), myTDRsetup)
-
+ if v
+ println("Writing .yml settings...")
+ end
+ YAML.write_file(joinpath(inpath, "inputs", Stage_Outfiles[per]["YAML"]),
+ myTDRsetup)
end
else
- if v print("without Concatenation has not yet been fully implemented. ") end
- if v println("( STAGE ", stage_id, " )") end
- input_stage_directory = "inputs_p"*string(stage_id)
- mkpath(joinpath(inpath,"inputs",input_stage_directory, TimeDomainReductionFolder))
+ if v
+ print("without Concatenation has not yet been fully implemented. ")
+ end
+ if v
+ println("( STAGE ", stage_id, " )")
+ end
+ input_stage_directory = "inputs_p" * string(stage_id)
+ mkpath(joinpath(inpath,
+ "inputs",
+ input_stage_directory,
+ TimeDomainReductionFolder))
### TDR_Results/Demand_data.csv
- demand_in = get_demand_dataframe(joinpath(inpath, "inputs", input_stage_directory, mysetup["SystemFolder"]))
- demand_in[!,:Sub_Weights] = demand_in[!,:Sub_Weights] * 1.
- demand_in[1:length(W),:Sub_Weights] .= W
- demand_in[!,:Rep_Periods][1] = length(W)
- demand_in[!,:Timesteps_per_Rep_Period][1] = TimestepsPerRepPeriod
+ demand_in = get_demand_dataframe(joinpath(inpath,
+ "inputs",
+ input_stage_directory,
+ mysetup["SystemFolder"]))
+ demand_in[!, :Sub_Weights] = demand_in[!, :Sub_Weights] * 1.0
+ demand_in[1:length(W), :Sub_Weights] .= W
+ demand_in[!, :Rep_Periods][1] = length(W)
+ demand_in[!, :Timesteps_per_Rep_Period][1] = TimestepsPerRepPeriod
select!(demand_in, Not(DemandCols))
select!(demand_in, Not(:Time_Index))
- Time_Index_M = Union{Int64, Missings.Missing}[missing for i in 1:size(demand_in,1)]
- Time_Index_M[1:size(DMOutputData,1)] = 1:size(DMOutputData,1)
- demand_in[!,:Time_Index] .= Time_Index_M
+ Time_Index_M = Union{Int64, Missings.Missing}[missing
+ for i in 1:size(demand_in, 1)]
+ Time_Index_M[1:size(DMOutputData, 1)] = 1:size(DMOutputData, 1)
+ demand_in[!, :Time_Index] .= Time_Index_M
for c in DemandCols
- new_col = Union{Float64, Missings.Missing}[missing for i in 1:size(demand_in,1)]
- new_col[1:size(DMOutputData,1)] = DMOutputData[!,c]
- demand_in[!,c] .= new_col
+ new_col = Union{Float64, Missings.Missing}[missing
+ for i in 1:size(demand_in, 1)]
+ new_col[1:size(DMOutputData, 1)] = DMOutputData[!, c]
+ demand_in[!, c] .= new_col
end
- demand_in = demand_in[1:size(DMOutputData,1),:]
+ demand_in = demand_in[1:size(DMOutputData, 1), :]
- if v println("Writing demand file...") end
- CSV.write(joinpath(inpath,"inputs",input_stage_directory,Demand_Outfile), demand_in)
+ if v
+ println("Writing demand file...")
+ end
+ CSV.write(joinpath(inpath, "inputs", input_stage_directory, Demand_Outfile),
+ demand_in)
### TDR_Results/Generators_variability.csv
# Reset column ordering, add time index, and solve duplicate column name trouble with CSV.write's header kwarg
- GVColMap = Dict(RESOURCE_ZONES[i] => RESOURCES[i] for i in 1:length(myinputs["RESOURCE_NAMES"]))
+ GVColMap = Dict(RESOURCE_ZONES[i] => RESOURCES[i]
+ for i in 1:length(myinputs["RESOURCE_NAMES"]))
GVColMap["Time_Index"] = "Time_Index"
GVOutputData = GVOutputData[!, Symbol.(RESOURCE_ZONES)]
- insertcols!(GVOutputData, 1, :Time_Index => 1:size(GVOutputData,1))
+ insertcols!(GVOutputData, 1, :Time_Index => 1:size(GVOutputData, 1))
NewGVColNames = [GVColMap[string(c)] for c in names(GVOutputData)]
- if v println("Writing resource file...") end
- CSV.write(joinpath(inpath,"inputs",input_stage_directory,GVar_Outfile), GVOutputData, header=NewGVColNames)
+ if v
+ println("Writing resource file...")
+ end
+ CSV.write(joinpath(inpath, "inputs", input_stage_directory, GVar_Outfile),
+ GVOutputData,
+ header = NewGVColNames)
# Break up VRE-storage components if needed
if !isempty(myinputs["VRE_STOR"])
- gen_var = load_dataframe(joinpath(inpath,"inputs",input_stage_directory,GVar_Outfile))
+ gen_var = load_dataframe(joinpath(inpath,
+ "inputs",
+ input_stage_directory,
+ GVar_Outfile))
# Find which indexes have solar PV/wind names
RESOURCE_ZONES_VRE_STOR = NewGVColNames
solar_col_names = []
wind_col_names = []
for r in 1:length(RESOURCE_ZONES_VRE_STOR)
- if occursin("PV", RESOURCE_ZONES_VRE_STOR[r]) || occursin("pv", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Pv", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Solar", RESOURCE_ZONES_VRE_STOR[r]) || occursin("SOLAR", RESOURCE_ZONES_VRE_STOR[r]) || occursin("solar", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
- push!(solar_col_names,r)
+ if occursin("PV", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("pv", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Pv", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Solar", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("SOLAR", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("solar", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
+ push!(solar_col_names, r)
end
- if occursin("Wind", RESOURCE_ZONES_VRE_STOR[r]) || occursin("WIND", RESOURCE_ZONES_VRE_STOR[r]) || occursin("wind", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
+ if occursin("Wind", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("WIND", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("wind", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
push!(wind_col_names, r)
end
end
# Index into dataframe and output them
solar_var = gen_var[!, solar_col_names]
- solar_var[!, :Time_Index] = 1:size(solar_var,1)
+ solar_var[!, :Time_Index] = 1:size(solar_var, 1)
wind_var = gen_var[!, wind_col_names]
- wind_var[!, :Time_Index] = 1:size(wind_var,1)
-
- SolarVar_Outfile = joinpath(TimeDomainReductionFolder, "Vre_and_stor_solar_variability.csv")
- WindVar_Outfile = joinpath(TimeDomainReductionFolder, "Vre_and_stor_wind_variability.csv")
- CSV.write(joinpath(inpath,"inputs",input_stage_directory,SolarVar_Outfile), solar_var)
- CSV.write(joinpath(inpath,"inputs",input_stage_directory, WindVar_Outfile), wind_var)
+ wind_var[!, :Time_Index] = 1:size(wind_var, 1)
+
+ SolarVar_Outfile = joinpath(TimeDomainReductionFolder,
+ "Vre_and_stor_solar_variability.csv")
+ WindVar_Outfile = joinpath(TimeDomainReductionFolder,
+ "Vre_and_stor_wind_variability.csv")
+ CSV.write(joinpath(inpath,
+ "inputs",
+ input_stage_directory,
+ SolarVar_Outfile),
+ solar_var)
+ CSV.write(joinpath(inpath,
+ "inputs",
+ input_stage_directory,
+ WindVar_Outfile),
+ wind_var)
end
### TDR_Results/Fuels_data.csv
- fuel_in = load_dataframe(joinpath(inpath, "inputs", input_stage_directory, mysetup["SystemFolder"], "Fuels_data.csv"))
+ fuel_in = load_dataframe(joinpath(inpath,
+ "inputs",
+ input_stage_directory,
+ mysetup["SystemFolder"],
+ "Fuels_data.csv"))
select!(fuel_in, Not(:Time_Index))
SepFirstRow = DataFrame(fuel_in[1, :])
NewFuelOutput = vcat(SepFirstRow, FPOutputData)
rename!(NewFuelOutput, FuelCols)
- insertcols!(NewFuelOutput, 1, :Time_Index => 0:size(NewFuelOutput,1)-1)
- if v println("Writing fuel profiles...") end
- CSV.write(joinpath(inpath,"inputs",input_stage_directory,Fuel_Outfile), NewFuelOutput)
+ insertcols!(NewFuelOutput, 1, :Time_Index => 0:(size(NewFuelOutput, 1) - 1))
+ if v
+ println("Writing fuel profiles...")
+ end
+ CSV.write(joinpath(inpath, "inputs", input_stage_directory, Fuel_Outfile),
+ NewFuelOutput)
### Period_map.csv
- if v println("Writing period map...") end
- CSV.write(joinpath(inpath,"inputs",input_stage_directory,PMap_Outfile), PeriodMap)
+ if v
+ println("Writing period map...")
+ end
+ CSV.write(joinpath(inpath, "inputs", input_stage_directory, PMap_Outfile),
+ PeriodMap)
### time_domain_reduction_settings.yml
- if v println("Writing .yml settings...") end
- YAML.write_file(joinpath(inpath,"inputs",input_stage_directory,YAML_Outfile), myTDRsetup)
+ if v
+ println("Writing .yml settings...")
+ end
+ YAML.write_file(joinpath(inpath, "inputs", input_stage_directory, YAML_Outfile),
+ myTDRsetup)
end
else
- if v println("Outputs: Single-Stage") end
+ if v
+ println("Outputs: Single-Stage")
+ end
mkpath(joinpath(inpath, TimeDomainReductionFolder))
### TDR_Results/Demand_data.csv
system_path = joinpath(inpath, mysetup["SystemFolder"])
demand_in = get_demand_dataframe(system_path)
- demand_in[!,:Sub_Weights] = demand_in[!,:Sub_Weights] * 1.
- demand_in[1:length(W),:Sub_Weights] .= W
- demand_in[!,:Rep_Periods][1] = length(W)
- demand_in[!,:Timesteps_per_Rep_Period][1] = TimestepsPerRepPeriod
+ demand_in[!, :Sub_Weights] = demand_in[!, :Sub_Weights] * 1.0
+ demand_in[1:length(W), :Sub_Weights] .= W
+ demand_in[!, :Rep_Periods][1] = length(W)
+ demand_in[!, :Timesteps_per_Rep_Period][1] = TimestepsPerRepPeriod
select!(demand_in, Not(DemandCols))
select!(demand_in, Not(:Time_Index))
- Time_Index_M = Union{Int64, Missings.Missing}[missing for i in 1:size(demand_in,1)]
- Time_Index_M[1:size(DMOutputData,1)] = 1:size(DMOutputData,1)
- demand_in[!,:Time_Index] .= Time_Index_M
+ Time_Index_M = Union{Int64, Missings.Missing}[missing for i in 1:size(demand_in, 1)]
+ Time_Index_M[1:size(DMOutputData, 1)] = 1:size(DMOutputData, 1)
+ demand_in[!, :Time_Index] .= Time_Index_M
for c in DemandCols
- new_col = Union{Float64, Missings.Missing}[missing for i in 1:size(demand_in,1)]
- new_col[1:size(DMOutputData,1)] = DMOutputData[!,c]
- demand_in[!,c] .= new_col
+ new_col = Union{Float64, Missings.Missing}[missing for i in 1:size(demand_in, 1)]
+ new_col[1:size(DMOutputData, 1)] = DMOutputData[!, c]
+ demand_in[!, c] .= new_col
end
- demand_in = demand_in[1:size(DMOutputData,1),:]
+ demand_in = demand_in[1:size(DMOutputData, 1), :]
- if v println("Writing demand file...") end
+ if v
+ println("Writing demand file...")
+ end
CSV.write(joinpath(inpath, Demand_Outfile), demand_in)
### TDR_Results/Generators_variability.csv
# Reset column ordering, add time index, and solve duplicate column name trouble with CSV.write's header kwarg
- GVColMap = Dict(RESOURCE_ZONES[i] => RESOURCES[i] for i in 1:length(myinputs["RESOURCE_NAMES"]))
+ GVColMap = Dict(RESOURCE_ZONES[i] => RESOURCES[i]
+ for i in 1:length(myinputs["RESOURCE_NAMES"]))
GVColMap["Time_Index"] = "Time_Index"
GVOutputData = GVOutputData[!, Symbol.(RESOURCE_ZONES)]
- insertcols!(GVOutputData, 1, :Time_Index => 1:size(GVOutputData,1))
+ insertcols!(GVOutputData, 1, :Time_Index => 1:size(GVOutputData, 1))
NewGVColNames = [GVColMap[string(c)] for c in names(GVOutputData)]
- if v println("Writing resource file...") end
- CSV.write(joinpath(inpath, GVar_Outfile), GVOutputData, header=NewGVColNames)
+ if v
+ println("Writing resource file...")
+ end
+ CSV.write(joinpath(inpath, GVar_Outfile), GVOutputData, header = NewGVColNames)
# Break up VRE-storage components if needed
if !isempty(myinputs["VRE_STOR"])
- gen_var = load_dataframe(joinpath(inpath,GVar_Outfile))
+ gen_var = load_dataframe(joinpath(inpath, GVar_Outfile))
# Find which indexes have solar PV/wind names
RESOURCE_ZONES_VRE_STOR = NewGVColNames
solar_col_names = []
wind_col_names = []
for r in 1:length(RESOURCE_ZONES_VRE_STOR)
- if occursin("PV", RESOURCE_ZONES_VRE_STOR[r]) || occursin("pv", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Pv", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Solar", RESOURCE_ZONES_VRE_STOR[r]) || occursin("SOLAR", RESOURCE_ZONES_VRE_STOR[r]) || occursin("solar", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
- push!(solar_col_names,r)
+ if occursin("PV", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("pv", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Pv", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Solar", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("SOLAR", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("solar", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
+ push!(solar_col_names, r)
end
- if occursin("Wind", RESOURCE_ZONES_VRE_STOR[r]) || occursin("WIND", RESOURCE_ZONES_VRE_STOR[r]) || occursin("wind", RESOURCE_ZONES_VRE_STOR[r]) || occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
+ if occursin("Wind", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("WIND", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("wind", RESOURCE_ZONES_VRE_STOR[r]) ||
+ occursin("Time", RESOURCE_ZONES_VRE_STOR[r])
push!(wind_col_names, r)
end
end
# Index into dataframe and output them
solar_var = gen_var[!, solar_col_names]
- solar_var[!, :Time_Index] = 1:size(solar_var,1)
+ solar_var[!, :Time_Index] = 1:size(solar_var, 1)
wind_var = gen_var[!, wind_col_names]
- wind_var[!, :Time_Index] = 1:size(wind_var,1)
+ wind_var[!, :Time_Index] = 1:size(wind_var, 1)
- SolarVar_Outfile = joinpath(TimeDomainReductionFolder, "Vre_and_stor_solar_variability.csv")
- WindVar_Outfile = joinpath(TimeDomainReductionFolder, "Vre_and_stor_wind_variability.csv")
+ SolarVar_Outfile = joinpath(TimeDomainReductionFolder,
+ "Vre_and_stor_solar_variability.csv")
+ WindVar_Outfile = joinpath(TimeDomainReductionFolder,
+ "Vre_and_stor_wind_variability.csv")
CSV.write(joinpath(inpath, SolarVar_Outfile), solar_var)
CSV.write(joinpath(inpath, WindVar_Outfile), wind_var)
end
@@ -1236,26 +1589,32 @@ function cluster_inputs(inpath, settings_path, mysetup, stage_id=-99, v=false; r
SepFirstRow = DataFrame(fuel_in[1, :])
NewFuelOutput = vcat(SepFirstRow, FPOutputData)
rename!(NewFuelOutput, FuelCols)
- insertcols!(NewFuelOutput, 1, :Time_Index => 0:size(NewFuelOutput,1)-1)
- if v println("Writing fuel profiles...") end
+ insertcols!(NewFuelOutput, 1, :Time_Index => 0:(size(NewFuelOutput, 1) - 1))
+ if v
+ println("Writing fuel profiles...")
+ end
CSV.write(joinpath(inpath, Fuel_Outfile), NewFuelOutput)
### TDR_Results/Period_map.csv
- if v println("Writing period map...") end
+ if v
+ println("Writing period map...")
+ end
CSV.write(joinpath(inpath, PMap_Outfile), PeriodMap)
### TDR_Results/time_domain_reduction_settings.yml
- if v println("Writing .yml settings...") end
+ if v
+ println("Writing .yml settings...")
+ end
YAML.write_file(joinpath(inpath, YAML_Outfile), myTDRsetup)
end
return Dict("OutputDF" => FinalOutputData,
- "InputDF" => ClusteringInputDF,
- "ColToZoneMap" => col_to_zone_map,
- "TDRsetup" => myTDRsetup,
- "ClusterObject" => R,
- "Assignments" => A,
- "Weights" => W,
- "Centers" => M,
- "RMSE" => RMSE)
+ "InputDF" => ClusteringInputDF,
+ "ColToZoneMap" => col_to_zone_map,
+ "TDRsetup" => myTDRsetup,
+ "ClusterObject" => R,
+ "Assignments" => A,
+ "Weights" => W,
+ "Centers" => M,
+ "RMSE" => RMSE)
end
diff --git a/src/write_outputs/capacity_reserve_margin/effective_capacity.jl b/src/write_outputs/capacity_reserve_margin/effective_capacity.jl
index 3e15d5ca66..60a70ecd86 100644
--- a/src/write_outputs/capacity_reserve_margin/effective_capacity.jl
+++ b/src/write_outputs/capacity_reserve_margin/effective_capacity.jl
@@ -7,21 +7,16 @@
Effective capacity in a capacity reserve margin zone for certain resources in the given timesteps.
"""
-function thermal_plant_effective_capacity(
- EP,
+function thermal_plant_effective_capacity(EP,
inputs,
resources::Vector{Int},
capres_zone::Int,
- timesteps::Vector{Int},
-)::Matrix{Float64}
- eff_cap =
- thermal_plant_effective_capacity.(
- Ref(EP),
- Ref(inputs),
- resources,
- Ref(capres_zone),
- Ref(timesteps),
- )
+ timesteps::Vector{Int})::Matrix{Float64}
+ eff_cap = thermal_plant_effective_capacity.(Ref(EP),
+ Ref(inputs),
+ resources,
+ Ref(capres_zone),
+ Ref(timesteps))
return reduce(hcat, eff_cap)
end
@@ -31,24 +26,26 @@ function thermal_plant_effective_capacity(EP::Model, inputs::Dict, y, capres_zon
return thermal_plant_effective_capacity(EP, inputs, y, capres_zone, timesteps)
end
-function thermal_plant_effective_capacity(
- EP::Model,
+function thermal_plant_effective_capacity(EP::Model,
inputs::Dict,
r_id::Int,
capres_zone::Int,
- timesteps::Vector{Int},
-)::Vector{Float64}
+ timesteps::Vector{Int})::Vector{Float64}
y = r_id
gen = inputs["RESOURCES"]
- capresfactor = derating_factor(gen[y], tag=capres_zone)
+ capresfactor = derating_factor(gen[y], tag = capres_zone)
eTotalCap = value.(EP[:eTotalCap][y])
effective_capacity = fill(capresfactor * eTotalCap, length(timesteps))
if has_maintenance(inputs) && y in ids_with_maintenance(gen)
- adjustment = thermal_maintenance_capacity_reserve_margin_adjustment(EP, inputs, y, capres_zone, timesteps)
- effective_capacity = effective_capacity .+ value.(adjustment)
- end
+ adjustment = thermal_maintenance_capacity_reserve_margin_adjustment(EP,
+ inputs,
+ y,
+ capres_zone,
+ timesteps)
+ effective_capacity = effective_capacity .+ value.(adjustment)
+ end
return effective_capacity
end
diff --git a/src/write_outputs/capacity_reserve_margin/write_capacity_value.jl b/src/write_outputs/capacity_reserve_margin/write_capacity_value.jl
index 747bf5602b..2c902862d3 100644
--- a/src/write_outputs/capacity_reserve_margin/write_capacity_value.jl
+++ b/src/write_outputs/capacity_reserve_margin/write_capacity_value.jl
@@ -1,16 +1,16 @@
function write_capacity_value(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- zones = zone_id.(gen)
-
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
- THERM_ALL = inputs["THERM_ALL"]
- VRE = inputs["VRE"]
- HYDRO_RES = inputs["HYDRO_RES"]
- STOR_ALL = inputs["STOR_ALL"]
- FLEX = inputs["FLEX"]
- MUST_RUN = inputs["MUST_RUN"]
- VRE_STOR = inputs["VRE_STOR"]
+ gen = inputs["RESOURCES"]
+ zones = zone_id.(gen)
+
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ THERM_ALL = inputs["THERM_ALL"]
+ VRE = inputs["VRE"]
+ HYDRO_RES = inputs["HYDRO_RES"]
+ STOR_ALL = inputs["STOR_ALL"]
+ FLEX = inputs["FLEX"]
+ MUST_RUN = inputs["MUST_RUN"]
+ VRE_STOR = inputs["VRE_STOR"]
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
eTotalCap = value.(EP[:eTotalCap])
@@ -18,88 +18,118 @@ function write_capacity_value(path::AbstractString, inputs::Dict, setup::Dict, E
minimum_plant_size = 1 # MW
large_plants = findall(>=(minimum_plant_size), eTotalCap * scale_factor)
- THERM_ALL_EX = intersect(THERM_ALL, large_plants)
- VRE_EX = intersect(VRE, large_plants)
- HYDRO_RES_EX = intersect(HYDRO_RES, large_plants)
- STOR_ALL_EX = intersect(STOR_ALL, large_plants)
- FLEX_EX = intersect(FLEX, large_plants)
- MUST_RUN_EX = intersect(MUST_RUN, large_plants)
- # Will only be activated if grid connection capacity exists (because may build standalone storage/VRE, which will only be telling by grid connection capacity)
- VRE_STOR_EX = intersect(VRE_STOR, large_plants)
- if !isempty(VRE_STOR_EX)
- DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
- DC_CHARGE = inputs["VS_STOR_DC_CHARGE"]
- VRE_STOR_STOR_EX = intersect(inputs["VS_STOR"], VRE_STOR_EX)
- DC_DISCHARGE_EX = intersect(DC_DISCHARGE, VRE_STOR_EX)
- AC_DISCHARGE_EX = intersect(inputs["VS_STOR_AC_DISCHARGE"], VRE_STOR_EX)
- DC_CHARGE_EX = intersect(DC_CHARGE, VRE_STOR_EX)
- AC_CHARGE_EX = intersect(inputs["VS_STOR_AC_CHARGE"], VRE_STOR_EX)
- end
-
- crm_derate(i, y::Vector{Int}) = derating_factor.(gen[y], tag=i)'
+ THERM_ALL_EX = intersect(THERM_ALL, large_plants)
+ VRE_EX = intersect(VRE, large_plants)
+ HYDRO_RES_EX = intersect(HYDRO_RES, large_plants)
+ STOR_ALL_EX = intersect(STOR_ALL, large_plants)
+ FLEX_EX = intersect(FLEX, large_plants)
+ MUST_RUN_EX = intersect(MUST_RUN, large_plants)
+ # Will only be activated if grid connection capacity exists (because may build standalone storage/VRE, which will only be telling by grid connection capacity)
+ VRE_STOR_EX = intersect(VRE_STOR, large_plants)
+ if !isempty(VRE_STOR_EX)
+ DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
+ DC_CHARGE = inputs["VS_STOR_DC_CHARGE"]
+ VRE_STOR_STOR_EX = intersect(inputs["VS_STOR"], VRE_STOR_EX)
+ DC_DISCHARGE_EX = intersect(DC_DISCHARGE, VRE_STOR_EX)
+ AC_DISCHARGE_EX = intersect(inputs["VS_STOR_AC_DISCHARGE"], VRE_STOR_EX)
+ DC_CHARGE_EX = intersect(DC_CHARGE, VRE_STOR_EX)
+ AC_CHARGE_EX = intersect(inputs["VS_STOR_AC_CHARGE"], VRE_STOR_EX)
+ end
+
+ crm_derate(i, y::Vector{Int}) = derating_factor.(gen[y], tag = i)'
max_power(t::Vector{Int}, y::Vector{Int}) = inputs["pP_Max"][y, t]'
total_cap(y::Vector{Int}) = eTotalCap[y]'
- dfCapValue = DataFrame()
- for i in 1:inputs["NCapacityReserveMargin"]
+ dfCapValue = DataFrame()
+ for i in 1:inputs["NCapacityReserveMargin"]
capvalue = zeros(T, G)
minimum_crm_price = 1 # $/MW
- riskyhour = findall(>=(minimum_crm_price), capacity_reserve_margin_price(EP, inputs, setup, i))
+ riskyhour = findall(>=(minimum_crm_price),
+ capacity_reserve_margin_price(EP, inputs, setup, i))
power(y::Vector{Int}) = value.(EP[:vP][y, riskyhour])'
- capvalue[riskyhour, THERM_ALL_EX] = thermal_plant_effective_capacity(EP, inputs, THERM_ALL_EX, i, riskyhour) ./ total_cap(THERM_ALL_EX)
+ capvalue[riskyhour, THERM_ALL_EX] = thermal_plant_effective_capacity(EP,
+ inputs,
+ THERM_ALL_EX,
+ i,
+ riskyhour) ./ total_cap(THERM_ALL_EX)
capvalue[riskyhour, VRE_EX] = crm_derate(i, VRE_EX) .* max_power(riskyhour, VRE_EX)
- capvalue[riskyhour, MUST_RUN_EX] = crm_derate(i, MUST_RUN_EX) .* max_power(riskyhour, MUST_RUN_EX)
+ capvalue[riskyhour, MUST_RUN_EX] = crm_derate(i, MUST_RUN_EX) .*
+ max_power(riskyhour, MUST_RUN_EX)
- capvalue[riskyhour, HYDRO_RES_EX] = crm_derate(i, HYDRO_RES_EX) .* power(HYDRO_RES_EX) ./ total_cap(HYDRO_RES_EX)
+ capvalue[riskyhour, HYDRO_RES_EX] = crm_derate(i, HYDRO_RES_EX) .*
+ power(HYDRO_RES_EX) ./ total_cap(HYDRO_RES_EX)
- if !isempty(STOR_ALL_EX)
+ if !isempty(STOR_ALL_EX)
charge = value.(EP[:vCHARGE][STOR_ALL_EX, riskyhour].data)'
capres_discharge = value.(EP[:vCAPRES_discharge][STOR_ALL_EX, riskyhour].data)'
capres_charge = value.(EP[:vCAPRES_charge][STOR_ALL_EX, riskyhour].data)'
- capvalue[riskyhour, STOR_ALL_EX] = crm_derate(i, STOR_ALL_EX) .* (power(STOR_ALL_EX) - charge + capres_discharge - capres_charge) ./ total_cap(STOR_ALL_EX)
- end
+ capvalue[riskyhour, STOR_ALL_EX] = crm_derate(i, STOR_ALL_EX) .*
+ (power(STOR_ALL_EX) - charge +
+ capres_discharge - capres_charge) ./
+ total_cap(STOR_ALL_EX)
+ end
- if !isempty(FLEX_EX)
+ if !isempty(FLEX_EX)
charge = value.(EP[:vCHARGE_FLEX][FLEX_EX, riskyhour].data)'
- capvalue[riskyhour, FLEX_EX] = crm_derate(i, FLEX_EX) .* (charge - power(FLEX_EX)) ./ total_cap(FLEX_EX)
- end
- if !isempty(VRE_STOR_EX)
- capres_dc_discharge = value.(EP[:vCAPRES_DC_DISCHARGE][DC_DISCHARGE, riskyhour].data)'
+ capvalue[riskyhour, FLEX_EX] = crm_derate(i, FLEX_EX) .*
+ (charge - power(FLEX_EX)) ./ total_cap(FLEX_EX)
+ end
+ if !isempty(VRE_STOR_EX)
+ capres_dc_discharge = value.(EP[:vCAPRES_DC_DISCHARGE][DC_DISCHARGE,
+ riskyhour].data)'
discharge_eff = etainverter.(gen[storage_dc_discharge(gen)])'
capvalue_dc_discharge = zeros(T, G)
- capvalue_dc_discharge[riskyhour, DC_DISCHARGE] = capres_dc_discharge .* discharge_eff
+ capvalue_dc_discharge[riskyhour, DC_DISCHARGE] = capres_dc_discharge .*
+ discharge_eff
capres_dc_charge = value.(EP[:vCAPRES_DC_CHARGE][DC_CHARGE, riskyhour].data)'
charge_eff = etainverter.(gen[storage_dc_charge(gen)])'
capvalue_dc_charge = zeros(T, G)
capvalue_dc_charge[riskyhour, DC_CHARGE] = capres_dc_charge ./ charge_eff
- capvalue[riskyhour, VRE_STOR_EX] = crm_derate(i, VRE_STOR_EX) .* power(VRE_STOR_EX) ./ total_cap(VRE_STOR_EX)
-
- charge_vre_stor = value.(EP[:vCHARGE_VRE_STOR][VRE_STOR_STOR_EX, riskyhour].data)'
- capvalue[riskyhour, VRE_STOR_STOR_EX] -= crm_derate(i, VRE_STOR_STOR_EX) .* charge_vre_stor ./ total_cap(VRE_STOR_STOR_EX)
-
- capvalue[riskyhour, DC_DISCHARGE_EX] += crm_derate(i, DC_DISCHARGE_EX) .* capvalue_dc_discharge[riskyhour, DC_DISCHARGE_EX] ./ total_cap(DC_DISCHARGE_EX)
- capres_ac_discharge = value.(EP[:vCAPRES_AC_DISCHARGE][AC_DISCHARGE_EX, riskyhour].data)'
- capvalue[riskyhour, AC_DISCHARGE_EX] += crm_derate(i, AC_DISCHARGE_EX) .* capres_ac_discharge ./ total_cap(AC_DISCHARGE_EX)
-
- capvalue[riskyhour, DC_CHARGE_EX] -= crm_derate(i, DC_CHARGE_EX) .* capvalue_dc_charge[riskyhour, DC_CHARGE_EX] ./ total_cap(DC_CHARGE_EX)
+ capvalue[riskyhour, VRE_STOR_EX] = crm_derate(i, VRE_STOR_EX) .*
+ power(VRE_STOR_EX) ./ total_cap(VRE_STOR_EX)
+
+ charge_vre_stor = value.(EP[:vCHARGE_VRE_STOR][VRE_STOR_STOR_EX,
+ riskyhour].data)'
+ capvalue[riskyhour, VRE_STOR_STOR_EX] -= crm_derate(i, VRE_STOR_STOR_EX) .*
+ charge_vre_stor ./
+ total_cap(VRE_STOR_STOR_EX)
+
+ capvalue[riskyhour, DC_DISCHARGE_EX] += crm_derate(i, DC_DISCHARGE_EX) .*
+ capvalue_dc_discharge[riskyhour,
+ DC_DISCHARGE_EX] ./ total_cap(DC_DISCHARGE_EX)
+ capres_ac_discharge = value.(EP[:vCAPRES_AC_DISCHARGE][AC_DISCHARGE_EX,
+ riskyhour].data)'
+ capvalue[riskyhour, AC_DISCHARGE_EX] += crm_derate(i, AC_DISCHARGE_EX) .*
+ capres_ac_discharge ./
+ total_cap(AC_DISCHARGE_EX)
+
+ capvalue[riskyhour, DC_CHARGE_EX] -= crm_derate(i, DC_CHARGE_EX) .*
+ capvalue_dc_charge[riskyhour,
+ DC_CHARGE_EX] ./ total_cap(DC_CHARGE_EX)
capres_ac_charge = value.(EP[:vCAPRES_AC_CHARGE][AC_CHARGE_EX, riskyhour].data)'
- capvalue[riskyhour, AC_CHARGE_EX] -= crm_derate(i, AC_CHARGE_EX) .* capres_ac_charge ./ total_cap(AC_CHARGE_EX)
- end
+ capvalue[riskyhour, AC_CHARGE_EX] -= crm_derate(i, AC_CHARGE_EX) .*
+ capres_ac_charge ./ total_cap(AC_CHARGE_EX)
+ end
capvalue = collect(transpose(capvalue))
- temp_dfCapValue = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones, Reserve = fill(Symbol("CapRes_$i"), G))
- temp_dfCapValue = hcat(temp_dfCapValue, DataFrame(capvalue, :auto))
- auxNew_Names = [Symbol("Resource"); Symbol("Zone"); Symbol("Reserve"); [Symbol("t$t") for t in 1:T]]
- rename!(temp_dfCapValue, auxNew_Names)
- append!(dfCapValue, temp_dfCapValue)
- end
+ temp_dfCapValue = DataFrame(Resource = inputs["RESOURCE_NAMES"],
+ Zone = zones,
+ Reserve = fill(Symbol("CapRes_$i"), G))
+ temp_dfCapValue = hcat(temp_dfCapValue, DataFrame(capvalue, :auto))
+ auxNew_Names = [Symbol("Resource");
+ Symbol("Zone");
+ Symbol("Reserve");
+ [Symbol("t$t") for t in 1:T]]
+ rename!(temp_dfCapValue, auxNew_Names)
+ append!(dfCapValue, temp_dfCapValue)
+ end
write_simple_csv(joinpath(path, "CapacityValue.csv"), dfCapValue)
end
@@ -117,9 +147,11 @@ be calculated only if `WriteShadowPrices` is activated.
Returns a vector, with units of $/MW
"""
-function capacity_reserve_margin_price(EP::Model, inputs::Dict, setup::Dict, capres_zone::Int)::Vector{Float64}
+function capacity_reserve_margin_price(EP::Model,
+ inputs::Dict,
+ setup::Dict,
+ capres_zone::Int)::Vector{Float64}
ω = inputs["omega"]
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
return dual.(EP[:cCapacityResMargin][capres_zone, :]) ./ ω * scale_factor
end
-
diff --git a/src/write_outputs/capacity_reserve_margin/write_reserve_margin.jl b/src/write_outputs/capacity_reserve_margin/write_reserve_margin.jl
index 6d2f8c2e80..1eeca0ef0e 100644
--- a/src/write_outputs/capacity_reserve_margin/write_reserve_margin.jl
+++ b/src/write_outputs/capacity_reserve_margin/write_reserve_margin.jl
@@ -1,9 +1,9 @@
function write_reserve_margin(path::AbstractString, setup::Dict, EP::Model)
- temp_ResMar = dual.(EP[:cCapacityResMargin])
- if setup["ParameterScale"] == 1
- temp_ResMar = temp_ResMar * ModelScalingFactor # Convert from MillionUS$/GWh to US$/MWh
- end
- dfResMar = DataFrame(temp_ResMar, :auto)
- CSV.write(joinpath(path, "ReserveMargin.csv"), dfResMar)
- return nothing
+ temp_ResMar = dual.(EP[:cCapacityResMargin])
+ if setup["ParameterScale"] == 1
+ temp_ResMar = temp_ResMar * ModelScalingFactor # Convert from MillionUS$/GWh to US$/MWh
+ end
+ dfResMar = DataFrame(temp_ResMar, :auto)
+ CSV.write(joinpath(path, "ReserveMargin.csv"), dfResMar)
+ return nothing
end
diff --git a/src/write_outputs/capacity_reserve_margin/write_reserve_margin_revenue.jl b/src/write_outputs/capacity_reserve_margin/write_reserve_margin_revenue.jl
index 629cc76756..5036b0759c 100644
--- a/src/write_outputs/capacity_reserve_margin/write_reserve_margin_revenue.jl
+++ b/src/write_outputs/capacity_reserve_margin/write_reserve_margin_revenue.jl
@@ -8,60 +8,94 @@ Function for reporting the capacity revenue earned by each generator listed in t
The last column is the total revenue received from all capacity reserve margin constraints.
As a reminder, GenX models the capacity reserve margin (aka capacity market) at the time-dependent level, and each constraint either stands for an overall market or a locality constraint.
"""
-function write_reserve_margin_revenue(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+function write_reserve_margin_revenue(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
-
- gen = inputs["RESOURCES"]
- regions = region.(gen)
- clusters = cluster.(gen)
- zones = zone_id.(gen)
+ gen = inputs["RESOURCES"]
+ regions = region.(gen)
+ clusters = cluster.(gen)
+ zones = zone_id.(gen)
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
- THERM_ALL = inputs["THERM_ALL"]
- VRE = inputs["VRE"]
- HYDRO_RES = inputs["HYDRO_RES"]
- STOR_ALL = inputs["STOR_ALL"]
- FLEX = inputs["FLEX"]
- MUST_RUN = inputs["MUST_RUN"]
- VRE_STOR = inputs["VRE_STOR"]
- if !isempty(VRE_STOR)
- VRE_STOR_STOR = inputs["VS_STOR"]
- DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
- AC_DISCHARGE = inputs["VS_STOR_AC_DISCHARGE"]
- DC_CHARGE = inputs["VS_STOR_DC_CHARGE"]
- AC_CHARGE = inputs["VS_STOR_AC_CHARGE"]
- end
- dfResRevenue = DataFrame(Region = regions, Resource = inputs["RESOURCE_NAMES"], Zone = zones, Cluster = clusters)
- annual_sum = zeros(G)
- for i in 1:inputs["NCapacityReserveMargin"]
- weighted_price = capacity_reserve_margin_price(EP, inputs, setup, i) .* inputs["omega"]
- tempresrev = zeros(G)
- tempresrev[THERM_ALL] = thermal_plant_effective_capacity(EP, inputs, THERM_ALL, i)' * weighted_price
- tempresrev[VRE] = derating_factor.(gen.Vre, tag=i) .* (value.(EP[:eTotalCap][VRE])) .* (inputs["pP_Max"][VRE, :] * weighted_price)
- tempresrev[MUST_RUN] = derating_factor.(gen.MustRun, tag=i) .* (value.(EP[:eTotalCap][MUST_RUN])) .* (inputs["pP_Max"][MUST_RUN, :] * weighted_price)
- tempresrev[HYDRO_RES] = derating_factor.(gen.Hydro, tag=i) .* (value.(EP[:vP][HYDRO_RES, :]) * weighted_price)
- if !isempty(STOR_ALL)
- tempresrev[STOR_ALL] = derating_factor.(gen.Storage, tag=i) .* ((value.(EP[:vP][STOR_ALL, :]) - value.(EP[:vCHARGE][STOR_ALL, :]).data + value.(EP[:vCAPRES_discharge][STOR_ALL, :]).data - value.(EP[:vCAPRES_charge][STOR_ALL, :]).data) * weighted_price)
- end
- if !isempty(FLEX)
- tempresrev[FLEX] = derating_factor.(gen.FlexDemand, tag=i) .* ((value.(EP[:vCHARGE_FLEX][FLEX, :]).data - value.(EP[:vP][FLEX, :])) * weighted_price)
- end
- if !isempty(VRE_STOR)
- gen_VRE_STOR = gen.VreStorage
- tempresrev[VRE_STOR] = derating_factor.(gen_VRE_STOR, tag=i) .* ((value.(EP[:vP][VRE_STOR, :])) * weighted_price)
- tempresrev[VRE_STOR_STOR] .-= derating_factor.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge.!=0) .| (gen_VRE_STOR.stor_dc_charge.!=0) .| (gen_VRE_STOR.stor_ac_discharge.!=0) .|(gen_VRE_STOR.stor_ac_charge.!=0)], tag=i) .* (value.(EP[:vCHARGE_VRE_STOR][VRE_STOR_STOR, :]).data * weighted_price)
- tempresrev[DC_DISCHARGE] .+= derating_factor.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge.!=0)], tag=i) .* ((value.(EP[:vCAPRES_DC_DISCHARGE][DC_DISCHARGE, :]).data .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge.!=0)])) * weighted_price)
- tempresrev[AC_DISCHARGE] .+= derating_factor.(gen_VRE_STOR[(gen_VRE_STOR.stor_ac_discharge.!=0)], tag=i) .* ((value.(EP[:vCAPRES_AC_DISCHARGE][AC_DISCHARGE, :]).data) * weighted_price)
- tempresrev[DC_CHARGE] .-= derating_factor.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_charge.!=0)], tag=i) .* ((value.(EP[:vCAPRES_DC_CHARGE][DC_CHARGE, :]).data ./ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_charge.!=0)])) * weighted_price)
- tempresrev[AC_CHARGE] .-= derating_factor.(gen_VRE_STOR[(gen_VRE_STOR.stor_ac_charge.!=0)], tag=i) .* ((value.(EP[:vCAPRES_AC_CHARGE][AC_CHARGE, :]).data) * weighted_price)
- end
- tempresrev *= scale_factor
- annual_sum .+= tempresrev
- dfResRevenue = hcat(dfResRevenue, DataFrame([tempresrev], [Symbol("CapRes_$i")]))
- end
- dfResRevenue.AnnualSum = annual_sum
- CSV.write(joinpath(path, "ReserveMarginRevenue.csv"), dfResRevenue)
- return dfResRevenue
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ THERM_ALL = inputs["THERM_ALL"]
+ VRE = inputs["VRE"]
+ HYDRO_RES = inputs["HYDRO_RES"]
+ STOR_ALL = inputs["STOR_ALL"]
+ FLEX = inputs["FLEX"]
+ MUST_RUN = inputs["MUST_RUN"]
+ VRE_STOR = inputs["VRE_STOR"]
+ if !isempty(VRE_STOR)
+ VRE_STOR_STOR = inputs["VS_STOR"]
+ DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
+ AC_DISCHARGE = inputs["VS_STOR_AC_DISCHARGE"]
+ DC_CHARGE = inputs["VS_STOR_DC_CHARGE"]
+ AC_CHARGE = inputs["VS_STOR_AC_CHARGE"]
+ end
+ dfResRevenue = DataFrame(Region = regions,
+ Resource = inputs["RESOURCE_NAMES"],
+ Zone = zones,
+ Cluster = clusters)
+ annual_sum = zeros(G)
+ for i in 1:inputs["NCapacityReserveMargin"]
+ weighted_price = capacity_reserve_margin_price(EP, inputs, setup, i) .*
+ inputs["omega"]
+ tempresrev = zeros(G)
+ tempresrev[THERM_ALL] = thermal_plant_effective_capacity(EP,
+ inputs,
+ THERM_ALL,
+ i)' * weighted_price
+ tempresrev[VRE] = derating_factor.(gen.Vre, tag = i) .*
+ (value.(EP[:eTotalCap][VRE])) .*
+ (inputs["pP_Max"][VRE, :] * weighted_price)
+ tempresrev[MUST_RUN] = derating_factor.(gen.MustRun, tag = i) .*
+ (value.(EP[:eTotalCap][MUST_RUN])) .*
+ (inputs["pP_Max"][MUST_RUN, :] * weighted_price)
+ tempresrev[HYDRO_RES] = derating_factor.(gen.Hydro, tag = i) .*
+ (value.(EP[:vP][HYDRO_RES, :]) * weighted_price)
+ if !isempty(STOR_ALL)
+ tempresrev[STOR_ALL] = derating_factor.(gen.Storage, tag = i) .*
+ ((value.(EP[:vP][STOR_ALL, :]) -
+ value.(EP[:vCHARGE][STOR_ALL, :]).data +
+ value.(EP[:vCAPRES_discharge][STOR_ALL, :]).data -
+ value.(EP[:vCAPRES_charge][STOR_ALL, :]).data) *
+ weighted_price)
+ end
+ if !isempty(FLEX)
+ tempresrev[FLEX] = derating_factor.(gen.FlexDemand, tag = i) .*
+ ((value.(EP[:vCHARGE_FLEX][FLEX, :]).data -
+ value.(EP[:vP][FLEX, :])) * weighted_price)
+ end
+ if !isempty(VRE_STOR)
+ gen_VRE_STOR = gen.VreStorage
+ tempresrev[VRE_STOR] = derating_factor.(gen_VRE_STOR, tag = i) .*
+ ((value.(EP[:vP][VRE_STOR, :])) * weighted_price)
+ tempresrev[VRE_STOR_STOR] .-= derating_factor.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge .!= 0) .| (gen_VRE_STOR.stor_dc_charge .!= 0) .| (gen_VRE_STOR.stor_ac_discharge .!= 0) .| (gen_VRE_STOR.stor_ac_charge .!= 0)],
+ tag = i) .* (value.(EP[:vCHARGE_VRE_STOR][VRE_STOR_STOR,
+ :]).data * weighted_price)
+ tempresrev[DC_DISCHARGE] .+= derating_factor.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge .!= 0)],
+ tag = i) .* ((value.(EP[:vCAPRES_DC_DISCHARGE][DC_DISCHARGE,
+ :]).data .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge .!= 0)])) *
+ weighted_price)
+ tempresrev[AC_DISCHARGE] .+= derating_factor.(gen_VRE_STOR[(gen_VRE_STOR.stor_ac_discharge .!= 0)],
+ tag = i) .* ((value.(EP[:vCAPRES_AC_DISCHARGE][AC_DISCHARGE,
+ :]).data) * weighted_price)
+ tempresrev[DC_CHARGE] .-= derating_factor.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_charge .!= 0)],
+ tag = i) .* ((value.(EP[:vCAPRES_DC_CHARGE][DC_CHARGE, :]).data ./
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_charge .!= 0)])) *
+ weighted_price)
+ tempresrev[AC_CHARGE] .-= derating_factor.(gen_VRE_STOR[(gen_VRE_STOR.stor_ac_charge .!= 0)],
+ tag = i) .* ((value.(EP[:vCAPRES_AC_CHARGE][AC_CHARGE, :]).data) *
+ weighted_price)
+ end
+ tempresrev *= scale_factor
+ annual_sum .+= tempresrev
+ dfResRevenue = hcat(dfResRevenue, DataFrame([tempresrev], [Symbol("CapRes_$i")]))
+ end
+ dfResRevenue.AnnualSum = annual_sum
+ CSV.write(joinpath(path, "ReserveMarginRevenue.csv"), dfResRevenue)
+ return dfResRevenue
end
diff --git a/src/write_outputs/capacity_reserve_margin/write_reserve_margin_slack.jl b/src/write_outputs/capacity_reserve_margin/write_reserve_margin_slack.jl
index 99b0e9e0f6..c3f2ebf2c4 100644
--- a/src/write_outputs/capacity_reserve_margin/write_reserve_margin_slack.jl
+++ b/src/write_outputs/capacity_reserve_margin/write_reserve_margin_slack.jl
@@ -1,10 +1,13 @@
-function write_reserve_margin_slack(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+function write_reserve_margin_slack(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
NCRM = inputs["NCapacityReserveMargin"]
T = inputs["T"] # Number of time steps (hours)
- dfResMar_slack = DataFrame(CRM_Constraint = [Symbol("CapRes_$res") for res = 1:NCRM],
- AnnualSum = value.(EP[:eCapResSlack_Year]),
- Penalty = value.(EP[:eCCapResSlack]))
-
+ dfResMar_slack = DataFrame(CRM_Constraint = [Symbol("CapRes_$res") for res in 1:NCRM],
+ AnnualSum = value.(EP[:eCapResSlack_Year]),
+ Penalty = value.(EP[:eCCapResSlack]))
+
if setup["ParameterScale"] == 1
dfResMar_slack.AnnualSum .*= ModelScalingFactor # Convert GW to MW
dfResMar_slack.Penalty .*= ModelScalingFactor^2 # Convert Million $ to $
@@ -17,9 +20,11 @@ function write_reserve_margin_slack(path::AbstractString, inputs::Dict, setup::D
if setup["ParameterScale"] == 1
temp_ResMar_slack .*= ModelScalingFactor # Convert GW to MW
end
- dfResMar_slack = hcat(dfResMar_slack, DataFrame(temp_ResMar_slack, [Symbol("t$t") for t in 1:T]))
- CSV.write(joinpath(path, "ReserveMargin_prices_and_penalties.csv"), dftranspose(dfResMar_slack, false), writeheader=false)
+ dfResMar_slack = hcat(dfResMar_slack,
+ DataFrame(temp_ResMar_slack, [Symbol("t$t") for t in 1:T]))
+ CSV.write(joinpath(path, "ReserveMargin_prices_and_penalties.csv"),
+ dftranspose(dfResMar_slack, false),
+ writeheader = false)
end
return nothing
end
-
diff --git a/src/write_outputs/capacity_reserve_margin/write_reserve_margin_w.jl b/src/write_outputs/capacity_reserve_margin/write_reserve_margin_w.jl
index 00c273adfc..025f5cd4be 100644
--- a/src/write_outputs/capacity_reserve_margin/write_reserve_margin_w.jl
+++ b/src/write_outputs/capacity_reserve_margin/write_reserve_margin_w.jl
@@ -1,13 +1,14 @@
function write_reserve_margin_w(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- #dfResMar dataframe with weights included for calculations
- dfResMar_w = DataFrame(Constraint = [Symbol("t$t") for t in 1:T])
- temp_ResMar_w = transpose(dual.(EP[:cCapacityResMargin]))./inputs["omega"]
- if setup["ParameterScale"] == 1
- temp_ResMar_w = temp_ResMar_w * ModelScalingFactor # Convert from MillionUS$/GWh to US$/MWh
- end
- dfResMar_w = hcat(dfResMar_w, DataFrame(temp_ResMar_w, :auto))
- auxNew_Names_res=[Symbol("Constraint"); [Symbol("CapRes_$i") for i in 1:inputs["NCapacityReserveMargin"]]]
- rename!(dfResMar_w,auxNew_Names_res)
- CSV.write(joinpath(path, "ReserveMargin_w.csv"), dfResMar_w)
-end
\ No newline at end of file
+ T = inputs["T"] # Number of time steps (hours)
+ #dfResMar dataframe with weights included for calculations
+ dfResMar_w = DataFrame(Constraint = [Symbol("t$t") for t in 1:T])
+ temp_ResMar_w = transpose(dual.(EP[:cCapacityResMargin])) ./ inputs["omega"]
+ if setup["ParameterScale"] == 1
+ temp_ResMar_w = temp_ResMar_w * ModelScalingFactor # Convert from MillionUS$/GWh to US$/MWh
+ end
+ dfResMar_w = hcat(dfResMar_w, DataFrame(temp_ResMar_w, :auto))
+ auxNew_Names_res = [Symbol("Constraint");
+ [Symbol("CapRes_$i") for i in 1:inputs["NCapacityReserveMargin"]]]
+ rename!(dfResMar_w, auxNew_Names_res)
+ CSV.write(joinpath(path, "ReserveMargin_w.csv"), dfResMar_w)
+end
diff --git a/src/write_outputs/capacity_reserve_margin/write_virtual_discharge.jl b/src/write_outputs/capacity_reserve_margin/write_virtual_discharge.jl
index 9a4d8308a6..1aa52623de 100644
--- a/src/write_outputs/capacity_reserve_margin/write_virtual_discharge.jl
+++ b/src/write_outputs/capacity_reserve_margin/write_virtual_discharge.jl
@@ -5,25 +5,25 @@ Function for writing the "virtual" discharge of each storage technology. Virtual
allow storage resources to contribute to the capacity reserve margin without actually discharging.
"""
function write_virtual_discharge(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ STOR_ALL = inputs["STOR_ALL"]
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
- STOR_ALL = inputs["STOR_ALL"]
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
-
- resources = inputs["RESOURCE_NAMES"][STOR_ALL]
- zones = inputs["R_ZONES"][STOR_ALL]
- virtual_discharge = (value.(EP[:vCAPRES_discharge][STOR_ALL, :].data) - value.(EP[:vCAPRES_charge][STOR_ALL, :].data)) * scale_factor
+ resources = inputs["RESOURCE_NAMES"][STOR_ALL]
+ zones = inputs["R_ZONES"][STOR_ALL]
+ virtual_discharge = (value.(EP[:vCAPRES_discharge][STOR_ALL, :].data) -
+ value.(EP[:vCAPRES_charge][STOR_ALL, :].data)) * scale_factor
- dfVirtualDischarge = DataFrame(Resource = resources, Zone = zones)
- dfVirtualDischarge.AnnualSum .= virtual_discharge * inputs["omega"]
+ dfVirtualDischarge = DataFrame(Resource = resources, Zone = zones)
+ dfVirtualDischarge.AnnualSum .= virtual_discharge * inputs["omega"]
- filepath = joinpath(path, "virtual_discharge.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfVirtualDischarge)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, virtual_discharge, dfVirtualDischarge)
- end
- return nothing
-end
+ filepath = joinpath(path, "virtual_discharge.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfVirtualDischarge)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, virtual_discharge, dfVirtualDischarge)
+ end
+ return nothing
+end
diff --git a/src/write_outputs/choose_output_dir.jl b/src/write_outputs/choose_output_dir.jl
index dc051f9881..2da796944d 100644
--- a/src/write_outputs/choose_output_dir.jl
+++ b/src/write_outputs/choose_output_dir.jl
@@ -5,11 +5,11 @@ Avoid overwriting (potentially important) existing results by appending to the d
Checks if the suggested output directory already exists. While yes, it appends _1, _2, etc till an unused name is found
"""
function choose_output_dir(pathinit::String)
- path = pathinit
- counter = 1
- while isdir(path)
- path = string(pathinit, "_", counter)
- counter += 1
- end
- return path
+ path = pathinit
+ counter = 1
+ while isdir(path)
+ path = string(pathinit, "_", counter)
+ counter += 1
+ end
+ return path
end
diff --git a/src/write_outputs/co2_cap/write_co2_cap.jl b/src/write_outputs/co2_cap/write_co2_cap.jl
index fa8e479ec8..a78ddf2f23 100644
--- a/src/write_outputs/co2_cap/write_co2_cap.jl
+++ b/src/write_outputs/co2_cap/write_co2_cap.jl
@@ -5,19 +5,19 @@ Function for reporting carbon price associated with carbon cap constraints.
"""
function write_co2_cap(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- dfCO2Price = DataFrame(CO2_Cap = [Symbol("CO2_Cap_$cap") for cap = 1:inputs["NCO2Cap"]],
- CO2_Price = (-1) * (dual.(EP[:cCO2Emissions_systemwide])))
+ dfCO2Price = DataFrame(CO2_Cap = [Symbol("CO2_Cap_$cap") for cap in 1:inputs["NCO2Cap"]],
+ CO2_Price = (-1) * (dual.(EP[:cCO2Emissions_systemwide])))
if setup["ParameterScale"] == 1
dfCO2Price.CO2_Price .*= ModelScalingFactor # Convert Million$/kton to $/ton
end
- if haskey(inputs, "dfCO2Cap_slack")
- dfCO2Price[!,:CO2_Mass_Slack] = convert(Array{Float64}, value.(EP[:vCO2Cap_slack]))
- dfCO2Price[!,:CO2_Penalty] = convert(Array{Float64}, value.(EP[:eCCO2Cap_slack]))
- if setup["ParameterScale"] == 1
+ if haskey(inputs, "dfCO2Cap_slack")
+ dfCO2Price[!, :CO2_Mass_Slack] = convert(Array{Float64}, value.(EP[:vCO2Cap_slack]))
+ dfCO2Price[!, :CO2_Penalty] = convert(Array{Float64}, value.(EP[:eCCO2Cap_slack]))
+ if setup["ParameterScale"] == 1
dfCO2Price.CO2_Mass_Slack .*= ModelScalingFactor # Convert ktons to tons
dfCO2Price.CO2_Penalty .*= ModelScalingFactor^2 # Convert Million$ to $
- end
- end
+ end
+ end
CSV.write(joinpath(path, "CO2_prices_and_penalties.csv"), dfCO2Price)
diff --git a/src/write_outputs/dftranspose.jl b/src/write_outputs/dftranspose.jl
index e482a2a37b..21c8295899 100644
--- a/src/write_outputs/dftranspose.jl
+++ b/src/write_outputs/dftranspose.jl
@@ -18,10 +18,11 @@ FIXME: This is for DataFrames@0.20.2, as used in GenX.
Versions 0.21+ could use stack and unstack to make further changes while retaining the order
"""
function dftranspose(df::DataFrame, withhead::Bool)
- if withhead
- colnames = cat(:Row, Symbol.(df[!,1]), dims=1)
- return DataFrame([[names(df)]; collect.(eachrow(df))], colnames)
- else
- return DataFrame([[names(df)]; collect.(eachrow(df))], [:Row; Symbol.("x",axes(df, 1))])
- end
+ if withhead
+ colnames = cat(:Row, Symbol.(df[!, 1]), dims = 1)
+ return DataFrame([[names(df)]; collect.(eachrow(df))], colnames)
+ else
+ return DataFrame([[names(df)]; collect.(eachrow(df))],
+ [:Row; Symbol.("x", axes(df, 1))])
+ end
end # End dftranpose()
diff --git a/src/write_outputs/energy_share_requirement/write_esr_prices.jl b/src/write_outputs/energy_share_requirement/write_esr_prices.jl
index fe1127bb42..e9cccc46ae 100644
--- a/src/write_outputs/energy_share_requirement/write_esr_prices.jl
+++ b/src/write_outputs/energy_share_requirement/write_esr_prices.jl
@@ -1,17 +1,17 @@
function write_esr_prices(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- dfESR = DataFrame(ESR_Price = convert(Array{Float64}, dual.(EP[:cESRShare])))
- if setup["ParameterScale"] == 1
- dfESR[!,:ESR_Price] = dfESR[!,:ESR_Price] * ModelScalingFactor # Converting MillionUS$/GWh to US$/MWh
- end
+ dfESR = DataFrame(ESR_Price = convert(Array{Float64}, dual.(EP[:cESRShare])))
+ if setup["ParameterScale"] == 1
+ dfESR[!, :ESR_Price] = dfESR[!, :ESR_Price] * ModelScalingFactor # Converting MillionUS$/GWh to US$/MWh
+ end
- if haskey(inputs, "dfESR_slack")
- dfESR[!,:ESR_AnnualSlack] = convert(Array{Float64}, value.(EP[:vESR_slack]))
- dfESR[!,:ESR_AnnualPenalty] = convert(Array{Float64}, value.(EP[:eCESRSlack]))
- if setup["ParameterScale"] == 1
- dfESR[!,:ESR_AnnualSlack] *= ModelScalingFactor # Converting GWh to MWh
- dfESR[!,:ESR_AnnualPenalty] *= (ModelScalingFactor^2) # Converting MillionUSD to USD
- end
- end
- CSV.write(joinpath(path, "ESR_prices_and_penalties.csv"), dfESR)
- return dfESR
+ if haskey(inputs, "dfESR_slack")
+ dfESR[!, :ESR_AnnualSlack] = convert(Array{Float64}, value.(EP[:vESR_slack]))
+ dfESR[!, :ESR_AnnualPenalty] = convert(Array{Float64}, value.(EP[:eCESRSlack]))
+ if setup["ParameterScale"] == 1
+ dfESR[!, :ESR_AnnualSlack] *= ModelScalingFactor # Converting GWh to MWh
+ dfESR[!, :ESR_AnnualPenalty] *= (ModelScalingFactor^2) # Converting MillionUSD to USD
+ end
+ end
+ CSV.write(joinpath(path, "ESR_prices_and_penalties.csv"), dfESR)
+ return dfESR
end
diff --git a/src/write_outputs/energy_share_requirement/write_esr_revenue.jl b/src/write_outputs/energy_share_requirement/write_esr_revenue.jl
index c212caccf8..246853eaec 100644
--- a/src/write_outputs/energy_share_requirement/write_esr_revenue.jl
+++ b/src/write_outputs/energy_share_requirement/write_esr_revenue.jl
@@ -3,67 +3,82 @@
Function for reporting the renewable/clean credit revenue earned by each generator listed in the input file. GenX will print this file only when RPS/CES is modeled and the shadow price can be obtained form the solver. Each row corresponds to a generator, and each column starting from the 6th to the second last is the total revenue earned from each RPS constraint. The revenue is calculated as the total annual generation (if elgible for the corresponding constraint) multiplied by the RPS/CES price. The last column is the total revenue received from all constraint. The unit is \$.
"""
-function write_esr_revenue(path::AbstractString, inputs::Dict, setup::Dict, dfPower::DataFrame, dfESR::DataFrame, EP::Model)
- gen = inputs["RESOURCES"]
- regions = region.(gen)
- clusters = cluster.(gen)
- zones = zone_id.(gen)
- rid = resource_id.(gen)
+function write_esr_revenue(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ dfPower::DataFrame,
+ dfESR::DataFrame,
+ EP::Model)
+ gen = inputs["RESOURCES"]
+ regions = region.(gen)
+ clusters = cluster.(gen)
+ zones = zone_id.(gen)
+ rid = resource_id.(gen)
- dfESRRev = DataFrame(region = regions, Resource = inputs["RESOURCE_NAMES"], zone = zones, Cluster = clusters, R_ID = rid)
- G = inputs["G"]
- nESR = inputs["nESR"]
- weight = inputs["omega"]
+ dfESRRev = DataFrame(region = regions,
+ Resource = inputs["RESOURCE_NAMES"],
+ zone = zones,
+ Cluster = clusters,
+ R_ID = rid)
+ G = inputs["G"]
+ nESR = inputs["nESR"]
+ weight = inputs["omega"]
# Load VRE-storage inputs
- VRE_STOR = inputs["VRE_STOR"] # Set of VRE-STOR generators (indices)
-
- if !isempty(VRE_STOR)
- gen_VRE_STOR = gen.VreStorage # Set of VRE-STOR generators (objects)
- SOLAR = inputs["VS_SOLAR"]
- WIND = inputs["VS_WIND"]
- SOLAR_ONLY = setdiff(SOLAR, WIND)
- WIND_ONLY = setdiff(WIND, SOLAR)
- SOLAR_WIND = intersect(SOLAR, WIND)
- end
+ VRE_STOR = inputs["VRE_STOR"] # Set of VRE-STOR generators (indices)
- for i in 1:nESR
- esr_col = Symbol("ESR_$i")
- price = dfESR[i, :ESR_Price]
- derated_annual_net_generation = dfPower[1:G,:AnnualSum] .* esr.(gen, tag=i)
- revenue = derated_annual_net_generation * price
- dfESRRev[!, esr_col] = revenue
+ if !isempty(VRE_STOR)
+ gen_VRE_STOR = gen.VreStorage # Set of VRE-STOR generators (objects)
+ SOLAR = inputs["VS_SOLAR"]
+ WIND = inputs["VS_WIND"]
+ SOLAR_ONLY = setdiff(SOLAR, WIND)
+ WIND_ONLY = setdiff(WIND, SOLAR)
+ SOLAR_WIND = intersect(SOLAR, WIND)
+ end
- if !isempty(VRE_STOR)
- if !isempty(SOLAR_ONLY)
- solar_resources = ((gen_VRE_STOR.wind.==0) .& (gen_VRE_STOR.solar.!=0))
- dfESRRev[SOLAR, esr_col] = (
- value.(EP[:vP_SOLAR][SOLAR, :]).data
- .* etainverter.(gen_VRE_STOR[solar_resources]) * weight
- ) .* esr_vrestor.(gen_VRE_STOR[solar_resources], tag=i) * price
- end
- if !isempty(WIND_ONLY)
- wind_resources = ((gen_VRE_STOR.wind.!=0) .& (gen_VRE_STOR.solar.==0))
- dfESRRev[WIND, esr_col] = (
- value.(EP[:vP_WIND][WIND, :]).data
- * weight
- ) .* esr_vrestor.(gen_VRE_STOR[wind_resources], tag=i) * price
- end
- if !isempty(SOLAR_WIND)
- solar_and_wind_resources = ((gen_VRE_STOR.wind.!=0) .& (gen_VRE_STOR.solar.!=0))
- dfESRRev[SOLAR_WIND, esr_col] = (
- (
- (value.(EP[:vP_WIND][SOLAR_WIND, :]).data * weight)
- .* esr_vrestor.(gen_VRE_STOR[solar_and_wind_resources], tag=i) * price
- ) + (
- value.(EP[:vP_SOLAR][SOLAR_WIND, :]).data
- .* etainverter.(gen_VRE_STOR[solar_and_wind_resources])
- * weight
- ) .* esr_vrestor.(gen_VRE_STOR[solar_and_wind_resources], tag=i) * price
- )
- end
- end
- end
- dfESRRev.Total = sum(eachcol(dfESRRev[:, 6:nESR + 5]))
- CSV.write(joinpath(path, "ESR_Revenue.csv"), dfESRRev)
- return dfESRRev
-end
\ No newline at end of file
+ for i in 1:nESR
+ esr_col = Symbol("ESR_$i")
+ price = dfESR[i, :ESR_Price]
+ derated_annual_net_generation = dfPower[1:G, :AnnualSum] .* esr.(gen, tag = i)
+ revenue = derated_annual_net_generation * price
+ dfESRRev[!, esr_col] = revenue
+
+ if !isempty(VRE_STOR)
+ if !isempty(SOLAR_ONLY)
+ solar_resources = ((gen_VRE_STOR.wind .== 0) .& (gen_VRE_STOR.solar .!= 0))
+ dfESRRev[SOLAR, esr_col] = (value.(EP[:vP_SOLAR][SOLAR, :]).data
+ .*
+ etainverter.(gen_VRE_STOR[solar_resources]) *
+ weight) .*
+ esr_vrestor.(gen_VRE_STOR[solar_resources],
+ tag = i) * price
+ end
+ if !isempty(WIND_ONLY)
+ wind_resources = ((gen_VRE_STOR.wind .!= 0) .& (gen_VRE_STOR.solar .== 0))
+ dfESRRev[WIND, esr_col] = (value.(EP[:vP_WIND][WIND, :]).data
+ *
+ weight) .*
+ esr_vrestor.(gen_VRE_STOR[wind_resources],
+ tag = i) * price
+ end
+ if !isempty(SOLAR_WIND)
+ solar_and_wind_resources = ((gen_VRE_STOR.wind .!= 0) .&
+ (gen_VRE_STOR.solar .!= 0))
+ dfESRRev[SOLAR_WIND, esr_col] = (((value.(EP[:vP_WIND][SOLAR_WIND,
+ :]).data * weight)
+ .*
+ esr_vrestor.(gen_VRE_STOR[solar_and_wind_resources],
+ tag = i) * price) +
+ (value.(EP[:vP_SOLAR][SOLAR_WIND, :]).data
+ .*
+ etainverter.(gen_VRE_STOR[solar_and_wind_resources])
+ *
+ weight) .*
+ esr_vrestor.(gen_VRE_STOR[solar_and_wind_resources],
+ tag = i) * price)
+ end
+ end
+ end
+ dfESRRev.Total = sum(eachcol(dfESRRev[:, 6:(nESR + 5)]))
+ CSV.write(joinpath(path, "ESR_Revenue.csv"), dfESRRev)
+ return dfESRRev
+end
diff --git a/src/write_outputs/hydrogen/write_hourly_matching_prices.jl b/src/write_outputs/hydrogen/write_hourly_matching_prices.jl
index 393544e4e4..00d0ce3220 100644
--- a/src/write_outputs/hydrogen/write_hourly_matching_prices.jl
+++ b/src/write_outputs/hydrogen/write_hourly_matching_prices.jl
@@ -1,17 +1,25 @@
-function write_hourly_matching_prices(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+function write_hourly_matching_prices(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- ## Extract dual variables of constraints
- dfHourlyMatchPrices = DataFrame(Zone = 1:Z) # The unit is $/MWh
- # Dividing dual variable for each hour with corresponding hourly weight to retrieve marginal cost of the constraint
- dfHourlyMatchPrices = hcat(dfHourlyMatchPrices, DataFrame(dual.(EP[:cHourlyMatching]).data./transpose(inputs["omega"])*scale_factor, :auto))
+ ## Extract dual variables of constraints
+ dfHourlyMatchPrices = DataFrame(Zone = 1:Z) # The unit is $/MWh
+ # Dividing dual variable for each hour with corresponding hourly weight to retrieve marginal cost of the constraint
+ dfHourlyMatchPrices = hcat(dfHourlyMatchPrices,
+ DataFrame(dual.(EP[:cHourlyMatching]).data ./ transpose(inputs["omega"]) *
+ scale_factor,
+ :auto))
- auxNew_Names=[Symbol("Zone");[Symbol("t$t") for t in 1:T]]
- rename!(dfHourlyMatchPrices,auxNew_Names)
+ auxNew_Names = [Symbol("Zone"); [Symbol("t$t") for t in 1:T]]
+ rename!(dfHourlyMatchPrices, auxNew_Names)
- CSV.write(joinpath(path, "hourly_matching_prices.csv"), dftranspose(dfHourlyMatchPrices, false), header=false)
+ CSV.write(joinpath(path, "hourly_matching_prices.csv"),
+ dftranspose(dfHourlyMatchPrices, false),
+ header = false)
- return nothing
+ return nothing
end
diff --git a/src/write_outputs/hydrogen/write_hydrogen_prices.jl b/src/write_outputs/hydrogen/write_hydrogen_prices.jl
index 1d7d905491..5b3903a5a2 100644
--- a/src/write_outputs/hydrogen/write_hydrogen_prices.jl
+++ b/src/write_outputs/hydrogen/write_hydrogen_prices.jl
@@ -1,7 +1,8 @@
function write_hydrogen_prices(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- scale_factor = setup["ParameterScale"] == 1 ? 10^6 : 1 # If ParameterScale==1, costs are in millions of $
- dfHydrogenPrice = DataFrame(Hydrogen_Price_Per_Tonne = convert(Array{Float64}, dual.(EP[:cHydrogenMin])*scale_factor))
+ scale_factor = setup["ParameterScale"] == 1 ? 10^6 : 1 # If ParameterScale==1, costs are in millions of $
+ dfHydrogenPrice = DataFrame(Hydrogen_Price_Per_Tonne = convert(Array{Float64},
+ dual.(EP[:cHydrogenMin]) * scale_factor))
- CSV.write(joinpath(path, "hydrogen_prices.csv"), dfHydrogenPrice)
- return nothing
+ CSV.write(joinpath(path, "hydrogen_prices.csv"), dfHydrogenPrice)
+ return nothing
end
diff --git a/src/write_outputs/long_duration_storage/write_opwrap_lds_dstor.jl b/src/write_outputs/long_duration_storage/write_opwrap_lds_dstor.jl
index a5ce31ec7b..875d8e6f86 100644
--- a/src/write_outputs/long_duration_storage/write_opwrap_lds_dstor.jl
+++ b/src/write_outputs/long_duration_storage/write_opwrap_lds_dstor.jl
@@ -1,30 +1,32 @@
function write_opwrap_lds_dstor(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- ## Extract data frames from input dictionary
- gen = inputs["RESOURCES"]
- zones = zone_id.(gen)
+ ## Extract data frames from input dictionary
+ gen = inputs["RESOURCES"]
+ zones = zone_id.(gen)
- W = inputs["REP_PERIOD"] # Number of subperiods
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ W = inputs["REP_PERIOD"] # Number of subperiods
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- #Excess inventory of storage period built up during representative period w
- dfdStorage = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones)
- dsoc = zeros(G,W)
- for i in 1:G
- if i in inputs["STOR_LONG_DURATION"]
- dsoc[i,:] = value.(EP[:vdSOC])[i,:]
- end
- if !isempty(inputs["VRE_STOR"])
- if i in inputs["VS_LDS"]
- dsoc[i,:] = value.(EP[:vdSOC_VRE_STOR])[i,:]
- end
- end
- end
- if setup["ParameterScale"] == 1
- dsoc *= ModelScalingFactor
- end
-
- dfdStorage = hcat(dfdStorage, DataFrame(dsoc, :auto))
- auxNew_Names=[Symbol("Resource");Symbol("Zone");[Symbol("w$t") for t in 1:W]]
- rename!(dfdStorage,auxNew_Names)
- CSV.write(joinpath(path, "dStorage.csv"), dftranspose(dfdStorage, false), header=false)
+ #Excess inventory of storage period built up during representative period w
+ dfdStorage = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones)
+ dsoc = zeros(G, W)
+ for i in 1:G
+ if i in inputs["STOR_LONG_DURATION"]
+ dsoc[i, :] = value.(EP[:vdSOC])[i, :]
+ end
+ if !isempty(inputs["VRE_STOR"])
+ if i in inputs["VS_LDS"]
+ dsoc[i, :] = value.(EP[:vdSOC_VRE_STOR])[i, :]
+ end
+ end
+ end
+ if setup["ParameterScale"] == 1
+ dsoc *= ModelScalingFactor
+ end
+
+ dfdStorage = hcat(dfdStorage, DataFrame(dsoc, :auto))
+ auxNew_Names = [Symbol("Resource"); Symbol("Zone"); [Symbol("w$t") for t in 1:W]]
+ rename!(dfdStorage, auxNew_Names)
+ CSV.write(joinpath(path, "dStorage.csv"),
+ dftranspose(dfdStorage, false),
+ header = false)
end
diff --git a/src/write_outputs/long_duration_storage/write_opwrap_lds_stor_init.jl b/src/write_outputs/long_duration_storage/write_opwrap_lds_stor_init.jl
index bf1bda48aa..81587b655a 100644
--- a/src/write_outputs/long_duration_storage/write_opwrap_lds_stor_init.jl
+++ b/src/write_outputs/long_duration_storage/write_opwrap_lds_stor_init.jl
@@ -1,30 +1,35 @@
-function write_opwrap_lds_stor_init(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- ## Extract data frames from input dictionary
- gen = inputs["RESOURCES"]
- zones = zone_id.(gen)
+function write_opwrap_lds_stor_init(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ ## Extract data frames from input dictionary
+ gen = inputs["RESOURCES"]
+ zones = zone_id.(gen)
- G = inputs["G"]
+ G = inputs["G"]
- # Initial level of storage in each modeled period
- NPeriods = size(inputs["Period_Map"])[1]
- dfStorageInit = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones)
- socw = zeros(G,NPeriods)
- for i in 1:G
- if i in inputs["STOR_LONG_DURATION"]
- socw[i,:] = value.(EP[:vSOCw])[i,:]
- end
- if !isempty(inputs["VRE_STOR"])
- if i in inputs["VS_LDS"]
- socw[i, :] = value.(EP[:vSOCw_VRE_STOR][i,:])
- end
- end
- end
- if setup["ParameterScale"] == 1
- socw *= ModelScalingFactor
- end
+ # Initial level of storage in each modeled period
+ NPeriods = size(inputs["Period_Map"])[1]
+ dfStorageInit = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones)
+ socw = zeros(G, NPeriods)
+ for i in 1:G
+ if i in inputs["STOR_LONG_DURATION"]
+ socw[i, :] = value.(EP[:vSOCw])[i, :]
+ end
+ if !isempty(inputs["VRE_STOR"])
+ if i in inputs["VS_LDS"]
+ socw[i, :] = value.(EP[:vSOCw_VRE_STOR][i, :])
+ end
+ end
+ end
+ if setup["ParameterScale"] == 1
+ socw *= ModelScalingFactor
+ end
- dfStorageInit = hcat(dfStorageInit, DataFrame(socw, :auto))
- auxNew_Names=[Symbol("Resource");Symbol("Zone");[Symbol("n$t") for t in 1:NPeriods]]
- rename!(dfStorageInit,auxNew_Names)
- CSV.write(joinpath(path, "StorageInit.csv"), dftranspose(dfStorageInit, false), header=false)
+ dfStorageInit = hcat(dfStorageInit, DataFrame(socw, :auto))
+ auxNew_Names = [Symbol("Resource"); Symbol("Zone"); [Symbol("n$t") for t in 1:NPeriods]]
+ rename!(dfStorageInit, auxNew_Names)
+ CSV.write(joinpath(path, "StorageInit.csv"),
+ dftranspose(dfStorageInit, false),
+ header = false)
end
diff --git a/src/write_outputs/min_max_capacity_requirement/write_maximum_capacity_requirement.jl b/src/write_outputs/min_max_capacity_requirement/write_maximum_capacity_requirement.jl
index 24e3a7f4e6..997057955e 100644
--- a/src/write_outputs/min_max_capacity_requirement/write_maximum_capacity_requirement.jl
+++ b/src/write_outputs/min_max_capacity_requirement/write_maximum_capacity_requirement.jl
@@ -1,15 +1,19 @@
-function write_maximum_capacity_requirement(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+function write_maximum_capacity_requirement(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
NumberOfMaxCapReqs = inputs["NumberOfMaxCapReqs"]
- dfMaxCapPrice = DataFrame(Constraint = [Symbol("MaxCapReq_$maxcap") for maxcap = 1:NumberOfMaxCapReqs],
- Price=-dual.(EP[:cZoneMaxCapReq]))
+ dfMaxCapPrice = DataFrame(Constraint = [Symbol("MaxCapReq_$maxcap")
+ for maxcap in 1:NumberOfMaxCapReqs],
+ Price = -dual.(EP[:cZoneMaxCapReq]))
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
dfMaxCapPrice.Price *= scale_factor
if haskey(inputs, "MaxCapPriceCap")
- dfMaxCapPrice[!,:Slack] = convert(Array{Float64}, value.(EP[:vMaxCap_slack]))
- dfMaxCapPrice[!,:Penalty] = convert(Array{Float64}, value.(EP[:eCMaxCap_slack]))
+ dfMaxCapPrice[!, :Slack] = convert(Array{Float64}, value.(EP[:vMaxCap_slack]))
+ dfMaxCapPrice[!, :Penalty] = convert(Array{Float64}, value.(EP[:eCMaxCap_slack]))
dfMaxCapPrice.Slack *= scale_factor # Convert GW to MW
dfMaxCapPrice.Penalty *= scale_factor^2 # Convert Million $ to $
end
diff --git a/src/write_outputs/min_max_capacity_requirement/write_minimum_capacity_requirement.jl b/src/write_outputs/min_max_capacity_requirement/write_minimum_capacity_requirement.jl
index a1bfe1d28d..346213ae61 100644
--- a/src/write_outputs/min_max_capacity_requirement/write_minimum_capacity_requirement.jl
+++ b/src/write_outputs/min_max_capacity_requirement/write_minimum_capacity_requirement.jl
@@ -1,15 +1,19 @@
-function write_minimum_capacity_requirement(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+function write_minimum_capacity_requirement(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
NumberOfMinCapReqs = inputs["NumberOfMinCapReqs"]
- dfMinCapPrice = DataFrame(Constraint = [Symbol("MinCapReq_$mincap") for mincap = 1:NumberOfMinCapReqs],
- Price= dual.(EP[:cZoneMinCapReq]))
+ dfMinCapPrice = DataFrame(Constraint = [Symbol("MinCapReq_$mincap")
+ for mincap in 1:NumberOfMinCapReqs],
+ Price = dual.(EP[:cZoneMinCapReq]))
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
dfMinCapPrice.Price *= scale_factor # Convert Million $/GW to $/MW
if haskey(inputs, "MinCapPriceCap")
- dfMinCapPrice[!,:Slack] = convert(Array{Float64}, value.(EP[:vMinCap_slack]))
- dfMinCapPrice[!,:Penalty] = convert(Array{Float64}, value.(EP[:eCMinCap_slack]))
+ dfMinCapPrice[!, :Slack] = convert(Array{Float64}, value.(EP[:vMinCap_slack]))
+ dfMinCapPrice[!, :Penalty] = convert(Array{Float64}, value.(EP[:eCMinCap_slack]))
dfMinCapPrice.Slack *= scale_factor # Convert GW to MW
dfMinCapPrice.Penalty *= scale_factor^2 # Convert Million $ to $
end
diff --git a/src/write_outputs/reserves/write_operating_reserve_price_revenue.jl b/src/write_outputs/reserves/write_operating_reserve_price_revenue.jl
index 25a2b4a760..c3d4f389a4 100644
--- a/src/write_outputs/reserves/write_operating_reserve_price_revenue.jl
+++ b/src/write_outputs/reserves/write_operating_reserve_price_revenue.jl
@@ -7,36 +7,47 @@ Function for reporting the operating reserve and regulation revenue earned by ge
The last column is the total revenue received from all operating reserve and regulation constraints.
As a reminder, GenX models the operating reserve and regulation at the time-dependent level, and each constraint either stands for an overall market or a locality constraint.
"""
-function write_operating_reserve_regulation_revenue(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+function write_operating_reserve_regulation_revenue(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- gen = inputs["RESOURCES"]
- RSV = inputs["RSV"]
- REG = inputs["REG"]
+ gen = inputs["RESOURCES"]
+ RSV = inputs["RSV"]
+ REG = inputs["REG"]
regions = region.(gen)
clusters = cluster.(gen)
zones = zone_id.(gen)
names = inputs["RESOURCE_NAMES"]
- dfOpRsvRevenue = DataFrame(Region = regions[RSV], Resource = names[RSV], Zone = zones[RSV], Cluster = clusters[RSV], AnnualSum = Array{Float64}(undef, length(RSV)),)
- dfOpRegRevenue = DataFrame(Region = regions[REG], Resource = names[REG], Zone = zones[REG], Cluster = clusters[REG], AnnualSum = Array{Float64}(undef, length(REG)),)
-
- weighted_reg_price = operating_regulation_price(EP, inputs, setup)
- weighted_rsv_price = operating_reserve_price(EP, inputs, setup)
+ dfOpRsvRevenue = DataFrame(Region = regions[RSV],
+ Resource = names[RSV],
+ Zone = zones[RSV],
+ Cluster = clusters[RSV],
+ AnnualSum = Array{Float64}(undef, length(RSV)))
+ dfOpRegRevenue = DataFrame(Region = regions[REG],
+ Resource = names[REG],
+ Zone = zones[REG],
+ Cluster = clusters[REG],
+ AnnualSum = Array{Float64}(undef, length(REG)))
+
+ weighted_reg_price = operating_regulation_price(EP, inputs, setup)
+ weighted_rsv_price = operating_reserve_price(EP, inputs, setup)
- rsvrevenue = value.(EP[:vRSV][RSV, :].data) .* transpose(weighted_rsv_price)
- regrevenue = value.(EP[:vREG][REG, :].data) .* transpose(weighted_reg_price)
+ rsvrevenue = value.(EP[:vRSV][RSV, :].data) .* transpose(weighted_rsv_price)
+ regrevenue = value.(EP[:vREG][REG, :].data) .* transpose(weighted_reg_price)
- rsvrevenue *= scale_factor
- regrevenue *= scale_factor
+ rsvrevenue *= scale_factor
+ regrevenue *= scale_factor
- dfOpRsvRevenue.AnnualSum .= rsvrevenue * inputs["omega"]
- dfOpRegRevenue.AnnualSum .= regrevenue * inputs["omega"]
+ dfOpRsvRevenue.AnnualSum .= rsvrevenue * inputs["omega"]
+ dfOpRegRevenue.AnnualSum .= regrevenue * inputs["omega"]
- write_simple_csv(joinpath(path, "OperatingReserveRevenue.csv"), dfOpRsvRevenue)
- write_simple_csv(joinpath(path, "OperatingRegulationRevenue.csv"), dfOpRegRevenue)
- return dfOpRegRevenue, dfOpRsvRevenue
+ write_simple_csv(joinpath(path, "OperatingReserveRevenue.csv"), dfOpRsvRevenue)
+ write_simple_csv(joinpath(path, "OperatingRegulationRevenue.csv"), dfOpRegRevenue)
+ return dfOpRegRevenue, dfOpRsvRevenue
end
@doc raw"""
diff --git a/src/write_outputs/reserves/write_reg.jl b/src/write_outputs/reserves/write_reg.jl
index 4b984fcc14..7d7ca1efd6 100644
--- a/src/write_outputs/reserves/write_reg.jl
+++ b/src/write_outputs/reserves/write_reg.jl
@@ -1,20 +1,20 @@
function write_reg(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- REG = inputs["REG"]
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ REG = inputs["REG"]
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- resources = inputs["RESOURCE_NAMES"][REG]
- zones = inputs["R_ZONES"][REG]
- # Regulation contributions for each resource in each time step
- reg = value.(EP[:vREG][REG, :].data) * scale_factor
+ resources = inputs["RESOURCE_NAMES"][REG]
+ zones = inputs["R_ZONES"][REG]
+ # Regulation contributions for each resource in each time step
+ reg = value.(EP[:vREG][REG, :].data) * scale_factor
- dfReg = DataFrame(Resource = resources, Zone = zones)
- dfReg.AnnualSum = reg * inputs["omega"]
+ dfReg = DataFrame(Resource = resources, Zone = zones)
+ dfReg.AnnualSum = reg * inputs["omega"]
- filepath = joinpath(path, "reg.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfReg)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, reg, dfReg)
- end
- return nothing
+ filepath = joinpath(path, "reg.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfReg)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, reg, dfReg)
+ end
+ return nothing
end
diff --git a/src/write_outputs/reserves/write_rsv.jl b/src/write_outputs/reserves/write_rsv.jl
index ebfbca5725..f48b40c034 100644
--- a/src/write_outputs/reserves/write_rsv.jl
+++ b/src/write_outputs/reserves/write_rsv.jl
@@ -1,32 +1,37 @@
function write_rsv(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- RSV = inputs["RSV"]
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ T = inputs["T"] # Number of time steps (hours)
+ RSV = inputs["RSV"]
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- resources = inputs["RESOURCE_NAMES"][RSV]
- zones = inputs["R_ZONES"][RSV]
- rsv = value.(EP[:vRSV][RSV, :].data) * scale_factor
+ resources = inputs["RESOURCE_NAMES"][RSV]
+ zones = inputs["R_ZONES"][RSV]
+ rsv = value.(EP[:vRSV][RSV, :].data) * scale_factor
- dfRsv = DataFrame(Resource = resources, Zone = zones)
+ dfRsv = DataFrame(Resource = resources, Zone = zones)
- dfRsv.AnnualSum = rsv * inputs["omega"]
+ dfRsv.AnnualSum = rsv * inputs["omega"]
- if setup["WriteOutputs"] == "annual"
- write_annual(joinpath(path, "reg_dn.csv"), dfRsv)
- else # setup["WriteOutputs"] == "full"
- unmet_vec = value.(EP[:vUNMET_RSV]) * scale_factor
- total_unmet = sum(unmet_vec)
- dfRsv = hcat(dfRsv, DataFrame(rsv, :auto))
- auxNew_Names=[Symbol("Resource");Symbol("Zone");Symbol("AnnualSum");[Symbol("t$t") for t in 1:T]]
- rename!(dfRsv,auxNew_Names)
-
- total = DataFrame(["Total" 0 sum(dfRsv.AnnualSum) zeros(1, T)], :auto)
- unmet = DataFrame(["unmet" 0 total_unmet zeros(1, T)], :auto)
- total[!, 4:T+3] .= sum(rsv, dims = 1)
- unmet[!, 4:T+3] .= transpose(unmet_vec)
- rename!(total,auxNew_Names)
- rename!(unmet,auxNew_Names)
- dfRsv = vcat(dfRsv, unmet, total)
- CSV.write(joinpath(path, "reg_dn.csv"), dftranspose(dfRsv, false), writeheader=false)
- end
+ if setup["WriteOutputs"] == "annual"
+ write_annual(joinpath(path, "reg_dn.csv"), dfRsv)
+ else # setup["WriteOutputs"] == "full"
+ unmet_vec = value.(EP[:vUNMET_RSV]) * scale_factor
+ total_unmet = sum(unmet_vec)
+ dfRsv = hcat(dfRsv, DataFrame(rsv, :auto))
+ auxNew_Names = [Symbol("Resource");
+ Symbol("Zone");
+ Symbol("AnnualSum");
+ [Symbol("t$t") for t in 1:T]]
+ rename!(dfRsv, auxNew_Names)
+
+ total = DataFrame(["Total" 0 sum(dfRsv.AnnualSum) zeros(1, T)], :auto)
+ unmet = DataFrame(["unmet" 0 total_unmet zeros(1, T)], :auto)
+ total[!, 4:(T + 3)] .= sum(rsv, dims = 1)
+ unmet[!, 4:(T + 3)] .= transpose(unmet_vec)
+ rename!(total, auxNew_Names)
+ rename!(unmet, auxNew_Names)
+ dfRsv = vcat(dfRsv, unmet, total)
+ CSV.write(joinpath(path, "reg_dn.csv"),
+ dftranspose(dfRsv, false),
+ writeheader = false)
+ end
end
diff --git a/src/write_outputs/transmission/write_nw_expansion.jl b/src/write_outputs/transmission/write_nw_expansion.jl
index 973248950c..f89e1bfe1f 100644
--- a/src/write_outputs/transmission/write_nw_expansion.jl
+++ b/src/write_outputs/transmission/write_nw_expansion.jl
@@ -1,23 +1,23 @@
function write_nw_expansion(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- L = inputs["L"] # Number of transmission lines
+ L = inputs["L"] # Number of transmission lines
- # Transmission network reinforcements
- transcap = zeros(L)
- for i in 1:L
- if i in inputs["EXPANSION_LINES"]
- transcap[i] = value.(EP[:vNEW_TRANS_CAP][i])
- end
- end
+ # Transmission network reinforcements
+ transcap = zeros(L)
+ for i in 1:L
+ if i in inputs["EXPANSION_LINES"]
+ transcap[i] = value.(EP[:vNEW_TRANS_CAP][i])
+ end
+ end
- dfTransCap = DataFrame(
- Line = 1:L, New_Trans_Capacity = convert(Array{Float64}, transcap),
- Cost_Trans_Capacity = convert(Array{Float64}, transcap.*inputs["pC_Line_Reinforcement"]),
- )
+ dfTransCap = DataFrame(Line = 1:L,
+ New_Trans_Capacity = convert(Array{Float64}, transcap),
+ Cost_Trans_Capacity = convert(Array{Float64},
+ transcap .* inputs["pC_Line_Reinforcement"]))
- if setup["ParameterScale"] == 1
- dfTransCap.New_Trans_Capacity *= ModelScalingFactor # GW to MW
- dfTransCap.Cost_Trans_Capacity *= ModelScalingFactor^2 # MUSD to USD
- end
+ if setup["ParameterScale"] == 1
+ dfTransCap.New_Trans_Capacity *= ModelScalingFactor # GW to MW
+ dfTransCap.Cost_Trans_Capacity *= ModelScalingFactor^2 # MUSD to USD
+ end
- CSV.write(joinpath(path, "network_expansion.csv"), dfTransCap)
+ CSV.write(joinpath(path, "network_expansion.csv"), dfTransCap)
end
diff --git a/src/write_outputs/transmission/write_transmission_flows.jl b/src/write_outputs/transmission/write_transmission_flows.jl
index 74f6f779dc..8494a38511 100644
--- a/src/write_outputs/transmission/write_transmission_flows.jl
+++ b/src/write_outputs/transmission/write_transmission_flows.jl
@@ -1,25 +1,28 @@
-function write_transmission_flows(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- # Transmission related values
- T = inputs["T"] # Number of time steps (hours)
- L = inputs["L"] # Number of transmission lines
- # Power flows on transmission lines at each time step
- dfFlow = DataFrame(Line = 1:L)
- flow = value.(EP[:vFLOW])
- if setup["ParameterScale"] == 1
- flow *= ModelScalingFactor
- end
+function write_transmission_flows(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ # Transmission related values
+ T = inputs["T"] # Number of time steps (hours)
+ L = inputs["L"] # Number of transmission lines
+ # Power flows on transmission lines at each time step
+ dfFlow = DataFrame(Line = 1:L)
+ flow = value.(EP[:vFLOW])
+ if setup["ParameterScale"] == 1
+ flow *= ModelScalingFactor
+ end
- filepath = joinpath(path, "flow.csv")
- if setup["WriteOutputs"] == "annual"
- dfFlow.AnnualSum = flow * inputs["omega"]
- total = DataFrame(["Total" sum(dfFlow.AnnualSum)], [:Line, :AnnualSum])
- dfFlow = vcat(dfFlow, total)
- CSV.write(filepath, dfFlow)
- else # setup["WriteOutputs"] == "full"
- dfFlow = hcat(dfFlow, DataFrame(flow, :auto))
- auxNew_Names=[Symbol("Line");[Symbol("t$t") for t in 1:T]]
- rename!(dfFlow,auxNew_Names)
- CSV.write(filepath, dftranspose(dfFlow, false), writeheader=false)
- end
- return nothing
+ filepath = joinpath(path, "flow.csv")
+ if setup["WriteOutputs"] == "annual"
+ dfFlow.AnnualSum = flow * inputs["omega"]
+ total = DataFrame(["Total" sum(dfFlow.AnnualSum)], [:Line, :AnnualSum])
+ dfFlow = vcat(dfFlow, total)
+ CSV.write(filepath, dfFlow)
+ else # setup["WriteOutputs"] == "full"
+ dfFlow = hcat(dfFlow, DataFrame(flow, :auto))
+ auxNew_Names = [Symbol("Line"); [Symbol("t$t") for t in 1:T]]
+ rename!(dfFlow, auxNew_Names)
+ CSV.write(filepath, dftranspose(dfFlow, false), writeheader = false)
+ end
+ return nothing
end
diff --git a/src/write_outputs/transmission/write_transmission_losses.jl b/src/write_outputs/transmission/write_transmission_losses.jl
index 8f5bb51977..b82204acd0 100644
--- a/src/write_outputs/transmission/write_transmission_losses.jl
+++ b/src/write_outputs/transmission/write_transmission_losses.jl
@@ -1,29 +1,35 @@
-function write_transmission_losses(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- L = inputs["L"] # Number of transmission lines
- LOSS_LINES = inputs["LOSS_LINES"]
- # Power losses for transmission between zones at each time step
- dfTLosses = DataFrame(Line = 1:L)
- tlosses = zeros(L, T)
- tlosses[LOSS_LINES, :] = value.(EP[:vTLOSS][LOSS_LINES, :])
- if setup["ParameterScale"] == 1
- tlosses[LOSS_LINES, :] *= ModelScalingFactor
- end
+function write_transmission_losses(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ T = inputs["T"] # Number of time steps (hours)
+ L = inputs["L"] # Number of transmission lines
+ LOSS_LINES = inputs["LOSS_LINES"]
+ # Power losses for transmission between zones at each time step
+ dfTLosses = DataFrame(Line = 1:L)
+ tlosses = zeros(L, T)
+ tlosses[LOSS_LINES, :] = value.(EP[:vTLOSS][LOSS_LINES, :])
+ if setup["ParameterScale"] == 1
+ tlosses[LOSS_LINES, :] *= ModelScalingFactor
+ end
- dfTLosses.AnnualSum = tlosses * inputs["omega"]
-
- if setup["WriteOutputs"] == "annual"
- total = DataFrame(["Total" sum(dfTLosses.AnnualSum)], [:Line, :AnnualSum])
- dfTLosses = vcat(dfTLosses, total)
- CSV.write(joinpath(path, "tlosses.csv"), dfTLosses)
- else
- dfTLosses = hcat(dfTLosses, DataFrame(tlosses, :auto))
- auxNew_Names=[Symbol("Line");Symbol("AnnualSum");[Symbol("t$t") for t in 1:T]]
- rename!(dfTLosses,auxNew_Names)
- total = DataFrame(["Total" sum(dfTLosses.AnnualSum) fill(0.0, (1,T))], auxNew_Names)
- total[:, 3:T+2] .= sum(tlosses, dims = 1)
- dfTLosses = vcat(dfTLosses, total)
- CSV.write(joinpath(path, "tlosses.csv"), dftranspose(dfTLosses, false), writeheader=false)
- end
- return nothing
+ dfTLosses.AnnualSum = tlosses * inputs["omega"]
+
+ if setup["WriteOutputs"] == "annual"
+ total = DataFrame(["Total" sum(dfTLosses.AnnualSum)], [:Line, :AnnualSum])
+ dfTLosses = vcat(dfTLosses, total)
+ CSV.write(joinpath(path, "tlosses.csv"), dfTLosses)
+ else
+ dfTLosses = hcat(dfTLosses, DataFrame(tlosses, :auto))
+ auxNew_Names = [Symbol("Line"); Symbol("AnnualSum"); [Symbol("t$t") for t in 1:T]]
+ rename!(dfTLosses, auxNew_Names)
+ total = DataFrame(["Total" sum(dfTLosses.AnnualSum) fill(0.0, (1, T))],
+ auxNew_Names)
+ total[:, 3:(T + 2)] .= sum(tlosses, dims = 1)
+ dfTLosses = vcat(dfTLosses, total)
+ CSV.write(joinpath(path, "tlosses.csv"),
+ dftranspose(dfTLosses, false),
+ writeheader = false)
+ end
+ return nothing
end
diff --git a/src/write_outputs/ucommit/write_commit.jl b/src/write_outputs/ucommit/write_commit.jl
index 685ad53e0a..bf8e712640 100644
--- a/src/write_outputs/ucommit/write_commit.jl
+++ b/src/write_outputs/ucommit/write_commit.jl
@@ -1,15 +1,14 @@
function write_commit(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
-
- COMMIT = inputs["COMMIT"]
- T = inputs["T"]
+ COMMIT = inputs["COMMIT"]
+ T = inputs["T"]
- # Commitment state for each resource in each time step
- resources = inputs["RESOURCE_NAMES"][COMMIT]
- zones = inputs["R_ZONES"][COMMIT]
- commit = value.(EP[:vCOMMIT][COMMIT, :].data)
- dfCommit = DataFrame(Resource = resources, Zone = zones)
- dfCommit = hcat(dfCommit, DataFrame(commit, :auto))
- auxNew_Names=[Symbol("Resource");Symbol("Zone");[Symbol("t$t") for t in 1:T]]
- rename!(dfCommit,auxNew_Names)
- CSV.write(joinpath(path, "commit.csv"), dftranspose(dfCommit, false), header=false)
+ # Commitment state for each resource in each time step
+ resources = inputs["RESOURCE_NAMES"][COMMIT]
+ zones = inputs["R_ZONES"][COMMIT]
+ commit = value.(EP[:vCOMMIT][COMMIT, :].data)
+ dfCommit = DataFrame(Resource = resources, Zone = zones)
+ dfCommit = hcat(dfCommit, DataFrame(commit, :auto))
+ auxNew_Names = [Symbol("Resource"); Symbol("Zone"); [Symbol("t$t") for t in 1:T]]
+ rename!(dfCommit, auxNew_Names)
+ CSV.write(joinpath(path, "commit.csv"), dftranspose(dfCommit, false), header = false)
end
diff --git a/src/write_outputs/ucommit/write_shutdown.jl b/src/write_outputs/ucommit/write_shutdown.jl
index 56325b25f6..8a726a3367 100644
--- a/src/write_outputs/ucommit/write_shutdown.jl
+++ b/src/write_outputs/ucommit/write_shutdown.jl
@@ -1,19 +1,19 @@
function write_shutdown(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- # Operational decision variable states
- COMMIT = inputs["COMMIT"]
- zones = inputs["R_ZONES"][COMMIT]
- # Shutdown state for each resource in each time step
- shut = value.(EP[:vSHUT][COMMIT, :].data)
- resources = inputs["RESOURCE_NAMES"][COMMIT]
+ # Operational decision variable states
+ COMMIT = inputs["COMMIT"]
+ zones = inputs["R_ZONES"][COMMIT]
+ # Shutdown state for each resource in each time step
+ shut = value.(EP[:vSHUT][COMMIT, :].data)
+ resources = inputs["RESOURCE_NAMES"][COMMIT]
- dfShutdown = DataFrame(Resource = resources, Zone = zones)
- dfShutdown.AnnualSum = shut * inputs["omega"]
+ dfShutdown = DataFrame(Resource = resources, Zone = zones)
+ dfShutdown.AnnualSum = shut * inputs["omega"]
- filepath = joinpath(path, "shutdown.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfShutdown)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, shut, dfShutdown)
- end
- return nothing
+ filepath = joinpath(path, "shutdown.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfShutdown)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, shut, dfShutdown)
+ end
+ return nothing
end
diff --git a/src/write_outputs/ucommit/write_start.jl b/src/write_outputs/ucommit/write_start.jl
index 461d522a17..be23be46bd 100644
--- a/src/write_outputs/ucommit/write_start.jl
+++ b/src/write_outputs/ucommit/write_start.jl
@@ -1,19 +1,18 @@
function write_start(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+ COMMIT = inputs["COMMIT"]
+ # Startup state for each resource in each time step
+ resources = inputs["RESOURCE_NAMES"][COMMIT]
+ zones = inputs["R_ZONES"][COMMIT]
- COMMIT = inputs["COMMIT"]
- # Startup state for each resource in each time step
- resources = inputs["RESOURCE_NAMES"][COMMIT]
- zones = inputs["R_ZONES"][COMMIT]
+ dfStart = DataFrame(Resource = resources, Zone = zones)
+ start = value.(EP[:vSTART][COMMIT, :].data)
+ dfStart.AnnualSum = start * inputs["omega"]
- dfStart = DataFrame(Resource = resources, Zone = zones)
- start = value.(EP[:vSTART][COMMIT, :].data)
- dfStart.AnnualSum = start * inputs["omega"]
-
- filepath = joinpath(path, "start.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfStart)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, start, dfStart)
- end
- return nothing
+ filepath = joinpath(path, "start.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfStart)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, start, dfStart)
+ end
+ return nothing
end
diff --git a/src/write_outputs/write_angles.jl b/src/write_outputs/write_angles.jl
index f638b37e52..b93870354f 100644
--- a/src/write_outputs/write_angles.jl
+++ b/src/write_outputs/write_angles.jl
@@ -4,17 +4,19 @@
Function for reporting the bus angles for each model zone and time step if the DC_OPF flag is activated
"""
function write_angles(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
- dfAngles = DataFrame(Zone = 1:Z)
- angles = value.(EP[:vANGLE])
- dfAngles = hcat(dfAngles, DataFrame(angles, :auto))
+ dfAngles = DataFrame(Zone = 1:Z)
+ angles = value.(EP[:vANGLE])
+ dfAngles = hcat(dfAngles, DataFrame(angles, :auto))
- auxNew_Names=[Symbol("Zone");[Symbol("t$t") for t in 1:T]]
- rename!(dfAngles,auxNew_Names)
+ auxNew_Names = [Symbol("Zone"); [Symbol("t$t") for t in 1:T]]
+ rename!(dfAngles, auxNew_Names)
- ## Linear configuration final output
- CSV.write(joinpath(path, "angles.csv"), dftranspose(dfAngles, false), writeheader=false)
- return nothing
+ ## Linear configuration final output
+ CSV.write(joinpath(path, "angles.csv"),
+ dftranspose(dfAngles, false),
+ writeheader = false)
+ return nothing
end
diff --git a/src/write_outputs/write_capacity.jl b/src/write_outputs/write_capacity.jl
index f102ced874..99e4797ecc 100755
--- a/src/write_outputs/write_capacity.jl
+++ b/src/write_outputs/write_capacity.jl
@@ -4,129 +4,129 @@
Function for writing the diferent capacities for the different generation technologies (starting capacities or, existing capacities, retired capacities, and new-built capacities).
"""
function write_capacity(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+ gen = inputs["RESOURCES"]
- gen = inputs["RESOURCES"]
+ MultiStage = setup["MultiStage"]
- MultiStage = setup["MultiStage"]
-
- # Capacity decisions
- capdischarge = zeros(size(inputs["RESOURCE_NAMES"]))
- for i in inputs["NEW_CAP"]
- if i in inputs["COMMIT"]
- capdischarge[i] = value(EP[:vCAP][i])*cap_size(gen[i])
- else
- capdischarge[i] = value(EP[:vCAP][i])
- end
- end
+ # Capacity decisions
+ capdischarge = zeros(size(inputs["RESOURCE_NAMES"]))
+ for i in inputs["NEW_CAP"]
+ if i in inputs["COMMIT"]
+ capdischarge[i] = value(EP[:vCAP][i]) * cap_size(gen[i])
+ else
+ capdischarge[i] = value(EP[:vCAP][i])
+ end
+ end
- retcapdischarge = zeros(size(inputs["RESOURCE_NAMES"]))
- for i in inputs["RET_CAP"]
- if i in inputs["COMMIT"]
- retcapdischarge[i] = first(value.(EP[:vRETCAP][i]))*cap_size(gen[i])
- else
- retcapdischarge[i] = first(value.(EP[:vRETCAP][i]))
- end
- end
+ retcapdischarge = zeros(size(inputs["RESOURCE_NAMES"]))
+ for i in inputs["RET_CAP"]
+ if i in inputs["COMMIT"]
+ retcapdischarge[i] = first(value.(EP[:vRETCAP][i])) * cap_size(gen[i])
+ else
+ retcapdischarge[i] = first(value.(EP[:vRETCAP][i]))
+ end
+ end
- retrocapdischarge = zeros(size(inputs["RESOURCE_NAMES"]))
- for i in inputs["RETROFIT_CAP"]
- if i in inputs["COMMIT"]
- retrocapdischarge[i] = first(value.(EP[:vRETROFITCAP][i])) * cap_size(gen[i])
- else
- retrocapdischarge[i] = first(value.(EP[:vRETROFITCAP][i]))
- end
- end
+ retrocapdischarge = zeros(size(inputs["RESOURCE_NAMES"]))
+ for i in inputs["RETROFIT_CAP"]
+ if i in inputs["COMMIT"]
+ retrocapdischarge[i] = first(value.(EP[:vRETROFITCAP][i])) * cap_size(gen[i])
+ else
+ retrocapdischarge[i] = first(value.(EP[:vRETROFITCAP][i]))
+ end
+ end
+ capacity_constraint_dual = zeros(size(inputs["RESOURCE_NAMES"]))
+ for y in ids_with_positive(gen, max_cap_mw)
+ capacity_constraint_dual[y] = -dual.(EP[:cMaxCap][y])
+ end
- capacity_constraint_dual = zeros(size(inputs["RESOURCE_NAMES"]))
- for y in ids_with_positive(gen, max_cap_mw)
- capacity_constraint_dual[y] = -dual.(EP[:cMaxCap][y])
- end
+ capcharge = zeros(size(inputs["RESOURCE_NAMES"]))
+ retcapcharge = zeros(size(inputs["RESOURCE_NAMES"]))
+ existingcapcharge = zeros(size(inputs["RESOURCE_NAMES"]))
+ for i in inputs["STOR_ASYMMETRIC"]
+ if i in inputs["NEW_CAP_CHARGE"]
+ capcharge[i] = value(EP[:vCAPCHARGE][i])
+ end
+ if i in inputs["RET_CAP_CHARGE"]
+ retcapcharge[i] = value(EP[:vRETCAPCHARGE][i])
+ end
+ existingcapcharge[i] = MultiStage == 1 ? value(EP[:vEXISTINGCAPCHARGE][i]) :
+ existing_charge_cap_mw(gen[i])
+ end
- capcharge = zeros(size(inputs["RESOURCE_NAMES"]))
- retcapcharge = zeros(size(inputs["RESOURCE_NAMES"]))
- existingcapcharge = zeros(size(inputs["RESOURCE_NAMES"]))
- for i in inputs["STOR_ASYMMETRIC"]
- if i in inputs["NEW_CAP_CHARGE"]
- capcharge[i] = value(EP[:vCAPCHARGE][i])
- end
- if i in inputs["RET_CAP_CHARGE"]
- retcapcharge[i] = value(EP[:vRETCAPCHARGE][i])
- end
- existingcapcharge[i] = MultiStage == 1 ? value(EP[:vEXISTINGCAPCHARGE][i]) : existing_charge_cap_mw(gen[i])
- end
+ capenergy = zeros(size(inputs["RESOURCE_NAMES"]))
+ retcapenergy = zeros(size(inputs["RESOURCE_NAMES"]))
+ existingcapenergy = zeros(size(inputs["RESOURCE_NAMES"]))
+ for i in inputs["STOR_ALL"]
+ if i in inputs["NEW_CAP_ENERGY"]
+ capenergy[i] = value(EP[:vCAPENERGY][i])
+ end
+ if i in inputs["RET_CAP_ENERGY"]
+ retcapenergy[i] = value(EP[:vRETCAPENERGY][i])
+ end
+ existingcapenergy[i] = MultiStage == 1 ? value(EP[:vEXISTINGCAPENERGY][i]) :
+ existing_cap_mwh(gen[i])
+ end
+ if !isempty(inputs["VRE_STOR"])
+ for i in inputs["VS_STOR"]
+ if i in inputs["NEW_CAP_STOR"]
+ capenergy[i] = value(EP[:vCAPENERGY_VS][i])
+ end
+ if i in inputs["RET_CAP_STOR"]
+ retcapenergy[i] = value(EP[:vRETCAPENERGY_VS][i])
+ end
+ existingcapenergy[i] = existing_cap_mwh(gen[i]) # multistage functionality doesn't exist yet for VRE-storage resources
+ end
+ end
+ dfCap = DataFrame(Resource = inputs["RESOURCE_NAMES"],
+ Zone = zone_id.(gen),
+ Retrofit_Id = retrofit_id.(gen),
+ StartCap = MultiStage == 1 ? value.(EP[:vEXISTINGCAP]) : existing_cap_mw.(gen),
+ RetCap = retcapdischarge[:],
+ RetroCap = retrocapdischarge[:], #### Need to change later
+ NewCap = capdischarge[:],
+ EndCap = value.(EP[:eTotalCap]),
+ CapacityConstraintDual = capacity_constraint_dual[:],
+ StartEnergyCap = existingcapenergy[:],
+ RetEnergyCap = retcapenergy[:],
+ NewEnergyCap = capenergy[:],
+ EndEnergyCap = existingcapenergy[:] - retcapenergy[:] + capenergy[:],
+ StartChargeCap = existingcapcharge[:],
+ RetChargeCap = retcapcharge[:],
+ NewChargeCap = capcharge[:],
+ EndChargeCap = existingcapcharge[:] - retcapcharge[:] + capcharge[:])
+ if setup["ParameterScale"] == 1
+ dfCap.StartCap = dfCap.StartCap * ModelScalingFactor
+ dfCap.RetCap = dfCap.RetCap * ModelScalingFactor
+ dfCap.RetroCap = dfCap.RetroCap * ModelScalingFactor
+ dfCap.NewCap = dfCap.NewCap * ModelScalingFactor
+ dfCap.EndCap = dfCap.EndCap * ModelScalingFactor
+ dfCap.CapacityConstraintDual = dfCap.CapacityConstraintDual * ModelScalingFactor
+ dfCap.StartEnergyCap = dfCap.StartEnergyCap * ModelScalingFactor
+ dfCap.RetEnergyCap = dfCap.RetEnergyCap * ModelScalingFactor
+ dfCap.NewEnergyCap = dfCap.NewEnergyCap * ModelScalingFactor
+ dfCap.EndEnergyCap = dfCap.EndEnergyCap * ModelScalingFactor
+ dfCap.StartChargeCap = dfCap.StartChargeCap * ModelScalingFactor
+ dfCap.RetChargeCap = dfCap.RetChargeCap * ModelScalingFactor
+ dfCap.NewChargeCap = dfCap.NewChargeCap * ModelScalingFactor
+ dfCap.EndChargeCap = dfCap.EndChargeCap * ModelScalingFactor
+ end
+ total = DataFrame(Resource = "Total", Zone = "n/a", Retrofit_Id = "n/a",
+ StartCap = sum(dfCap[!, :StartCap]), RetCap = sum(dfCap[!, :RetCap]),
+ NewCap = sum(dfCap[!, :NewCap]), EndCap = sum(dfCap[!, :EndCap]),
+ RetroCap = sum(dfCap[!, :RetroCap]),
+ CapacityConstraintDual = "n/a",
+ StartEnergyCap = sum(dfCap[!, :StartEnergyCap]),
+ RetEnergyCap = sum(dfCap[!, :RetEnergyCap]),
+ NewEnergyCap = sum(dfCap[!, :NewEnergyCap]),
+ EndEnergyCap = sum(dfCap[!, :EndEnergyCap]),
+ StartChargeCap = sum(dfCap[!, :StartChargeCap]),
+ RetChargeCap = sum(dfCap[!, :RetChargeCap]),
+ NewChargeCap = sum(dfCap[!, :NewChargeCap]),
+ EndChargeCap = sum(dfCap[!, :EndChargeCap]))
- capenergy = zeros(size(inputs["RESOURCE_NAMES"]))
- retcapenergy = zeros(size(inputs["RESOURCE_NAMES"]))
- existingcapenergy = zeros(size(inputs["RESOURCE_NAMES"]))
- for i in inputs["STOR_ALL"]
- if i in inputs["NEW_CAP_ENERGY"]
- capenergy[i] = value(EP[:vCAPENERGY][i])
- end
- if i in inputs["RET_CAP_ENERGY"]
- retcapenergy[i] = value(EP[:vRETCAPENERGY][i])
- end
- existingcapenergy[i] = MultiStage == 1 ? value(EP[:vEXISTINGCAPENERGY][i]) : existing_cap_mwh(gen[i])
- end
- if !isempty(inputs["VRE_STOR"])
- for i in inputs["VS_STOR"]
- if i in inputs["NEW_CAP_STOR"]
- capenergy[i] = value(EP[:vCAPENERGY_VS][i])
- end
- if i in inputs["RET_CAP_STOR"]
- retcapenergy[i] = value(EP[:vRETCAPENERGY_VS][i])
- end
- existingcapenergy[i] = existing_cap_mwh(gen[i]) # multistage functionality doesn't exist yet for VRE-storage resources
- end
- end
- dfCap = DataFrame(
- Resource = inputs["RESOURCE_NAMES"],
- Zone = zone_id.(gen),
- Retrofit_Id = retrofit_id.(gen),
- StartCap = MultiStage == 1 ? value.(EP[:vEXISTINGCAP]) : existing_cap_mw.(gen),
- RetCap = retcapdischarge[:],
- RetroCap = retrocapdischarge[:], #### Need to change later
- NewCap = capdischarge[:],
- EndCap = value.(EP[:eTotalCap]),
- CapacityConstraintDual = capacity_constraint_dual[:],
- StartEnergyCap = existingcapenergy[:],
- RetEnergyCap = retcapenergy[:],
- NewEnergyCap = capenergy[:],
- EndEnergyCap = existingcapenergy[:] - retcapenergy[:] + capenergy[:],
- StartChargeCap = existingcapcharge[:],
- RetChargeCap = retcapcharge[:],
- NewChargeCap = capcharge[:],
- EndChargeCap = existingcapcharge[:] - retcapcharge[:] + capcharge[:]
- )
- if setup["ParameterScale"] ==1
- dfCap.StartCap = dfCap.StartCap * ModelScalingFactor
- dfCap.RetCap = dfCap.RetCap * ModelScalingFactor
- dfCap.RetroCap = dfCap.RetroCap * ModelScalingFactor
- dfCap.NewCap = dfCap.NewCap * ModelScalingFactor
- dfCap.EndCap = dfCap.EndCap * ModelScalingFactor
- dfCap.CapacityConstraintDual = dfCap.CapacityConstraintDual * ModelScalingFactor
- dfCap.StartEnergyCap = dfCap.StartEnergyCap * ModelScalingFactor
- dfCap.RetEnergyCap = dfCap.RetEnergyCap * ModelScalingFactor
- dfCap.NewEnergyCap = dfCap.NewEnergyCap * ModelScalingFactor
- dfCap.EndEnergyCap = dfCap.EndEnergyCap * ModelScalingFactor
- dfCap.StartChargeCap = dfCap.StartChargeCap * ModelScalingFactor
- dfCap.RetChargeCap = dfCap.RetChargeCap * ModelScalingFactor
- dfCap.NewChargeCap = dfCap.NewChargeCap * ModelScalingFactor
- dfCap.EndChargeCap = dfCap.EndChargeCap * ModelScalingFactor
- end
- total = DataFrame(
- Resource = "Total", Zone = "n/a", Retrofit_Id = "n/a",
- StartCap = sum(dfCap[!,:StartCap]), RetCap = sum(dfCap[!,:RetCap]),
- NewCap = sum(dfCap[!,:NewCap]), EndCap = sum(dfCap[!,:EndCap]),
- RetroCap = sum(dfCap[!,:RetroCap]),
- CapacityConstraintDual = "n/a",
- StartEnergyCap = sum(dfCap[!,:StartEnergyCap]), RetEnergyCap = sum(dfCap[!,:RetEnergyCap]),
- NewEnergyCap = sum(dfCap[!,:NewEnergyCap]), EndEnergyCap = sum(dfCap[!,:EndEnergyCap]),
- StartChargeCap = sum(dfCap[!,:StartChargeCap]), RetChargeCap = sum(dfCap[!,:RetChargeCap]),
- NewChargeCap = sum(dfCap[!,:NewChargeCap]), EndChargeCap = sum(dfCap[!,:EndChargeCap])
- )
-
- dfCap = vcat(dfCap, total)
- CSV.write(joinpath(path, "capacity.csv"), dfCap)
- return dfCap
-end
\ No newline at end of file
+ dfCap = vcat(dfCap, total)
+ CSV.write(joinpath(path, "capacity.csv"), dfCap)
+ return dfCap
+end
diff --git a/src/write_outputs/write_capacityfactor.jl b/src/write_outputs/write_capacityfactor.jl
index 03c2a50e4b..d2b8e94f20 100644
--- a/src/write_outputs/write_capacityfactor.jl
+++ b/src/write_outputs/write_capacityfactor.jl
@@ -15,40 +15,62 @@ function write_capacityfactor(path::AbstractString, inputs::Dict, setup::Dict, E
ELECTROLYZER = inputs["ELECTROLYZER"]
VRE_STOR = inputs["VRE_STOR"]
- dfCapacityfactor = DataFrame(Resource=inputs["RESOURCE_NAMES"], Zone=zone_id.(gen), AnnualSum=zeros(G), Capacity=zeros(G), CapacityFactor=zeros(G))
+ dfCapacityfactor = DataFrame(Resource = inputs["RESOURCE_NAMES"],
+ Zone = zone_id.(gen),
+ AnnualSum = zeros(G),
+ Capacity = zeros(G),
+ CapacityFactor = zeros(G))
scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
dfCapacityfactor.AnnualSum .= value.(EP[:vP]) * inputs["omega"] * scale_factor
dfCapacityfactor.Capacity .= value.(EP[:eTotalCap]) * scale_factor
if !isempty(VRE_STOR)
- SOLAR = setdiff(inputs["VS_SOLAR"],inputs["VS_WIND"])
- WIND = setdiff(inputs["VS_WIND"],inputs["VS_SOLAR"])
- SOLAR_WIND = intersect(inputs["VS_SOLAR"],inputs["VS_WIND"])
+ SOLAR = setdiff(inputs["VS_SOLAR"], inputs["VS_WIND"])
+ WIND = setdiff(inputs["VS_WIND"], inputs["VS_SOLAR"])
+ SOLAR_WIND = intersect(inputs["VS_SOLAR"], inputs["VS_WIND"])
gen_VRE_STOR = gen.VreStorage
if !isempty(SOLAR)
- dfCapacityfactor.AnnualSum[SOLAR] .= value.(EP[:vP_SOLAR][SOLAR, :]).data * inputs["omega"] * scale_factor
- dfCapacityfactor.Capacity[SOLAR] .= value.(EP[:eTotalCap_SOLAR][SOLAR]).data * scale_factor
+ dfCapacityfactor.AnnualSum[SOLAR] .= value.(EP[:vP_SOLAR][SOLAR, :]).data *
+ inputs["omega"] * scale_factor
+ dfCapacityfactor.Capacity[SOLAR] .= value.(EP[:eTotalCap_SOLAR][SOLAR]).data *
+ scale_factor
end
if !isempty(WIND)
- dfCapacityfactor.AnnualSum[WIND] .= value.(EP[:vP_WIND][WIND, :]).data * inputs["omega"] * scale_factor
- dfCapacityfactor.Capacity[WIND] .= value.(EP[:eTotalCap_WIND][WIND]).data * scale_factor
+ dfCapacityfactor.AnnualSum[WIND] .= value.(EP[:vP_WIND][WIND, :]).data *
+ inputs["omega"] * scale_factor
+ dfCapacityfactor.Capacity[WIND] .= value.(EP[:eTotalCap_WIND][WIND]).data *
+ scale_factor
end
if !isempty(SOLAR_WIND)
- dfCapacityfactor.AnnualSum[SOLAR_WIND] .= (value.(EP[:vP_WIND][SOLAR_WIND, :]).data
- + value.(EP[:vP_SOLAR][SOLAR_WIND, :]).data .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.wind.!=0) .& (gen_VRE_STOR.solar.!=0)])) * inputs["omega"] * scale_factor
- dfCapacityfactor.Capacity[SOLAR_WIND] .= (value.(EP[:eTotalCap_WIND][SOLAR_WIND]).data + value.(EP[:eTotalCap_SOLAR][SOLAR_WIND]).data .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.wind.!=0) .& (gen_VRE_STOR.solar.!=0)])) * scale_factor
+ dfCapacityfactor.AnnualSum[SOLAR_WIND] .= (value.(EP[:vP_WIND][SOLAR_WIND,
+ :]).data
+ +
+ value.(EP[:vP_SOLAR][SOLAR_WIND,
+ :]).data .*
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.wind .!= 0) .& (gen_VRE_STOR.solar .!= 0)])) *
+ inputs["omega"] * scale_factor
+ dfCapacityfactor.Capacity[SOLAR_WIND] .= (value.(EP[:eTotalCap_WIND][SOLAR_WIND]).data +
+ value.(EP[:eTotalCap_SOLAR][SOLAR_WIND]).data .*
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.wind .!= 0) .& (gen_VRE_STOR.solar .!= 0)])) *
+ scale_factor
end
end
# We only calcualte the resulted capacity factor with total capacity > 1MW and total generation > 1MWh
- EXISTING = intersect(findall(x -> x >= 1, dfCapacityfactor.AnnualSum), findall(x -> x >= 1, dfCapacityfactor.Capacity))
+ EXISTING = intersect(findall(x -> x >= 1, dfCapacityfactor.AnnualSum),
+ findall(x -> x >= 1, dfCapacityfactor.Capacity))
# We calculate capacity factor for thermal, vre, hydro and must run. Not for storage and flexible demand
CF_GEN = intersect(union(THERM_ALL, VRE, HYDRO_RES, MUST_RUN, VRE_STOR), EXISTING)
- dfCapacityfactor.CapacityFactor[CF_GEN] .= (dfCapacityfactor.AnnualSum[CF_GEN] ./ dfCapacityfactor.Capacity[CF_GEN]) / sum(inputs["omega"][t] for t in 1:T)
+ dfCapacityfactor.CapacityFactor[CF_GEN] .= (dfCapacityfactor.AnnualSum[CF_GEN] ./
+ dfCapacityfactor.Capacity[CF_GEN]) /
+ sum(inputs["omega"][t] for t in 1:T)
# Capacity factor for electrolyzers is based on vUSE variable not vP
if (!isempty(ELECTROLYZER))
- dfCapacityfactor.AnnualSum[ELECTROLYZER] .= value.(EP[:vUSE][ELECTROLYZER, :]).data * inputs["omega"] * scale_factor
- dfCapacityfactor.CapacityFactor[ELECTROLYZER] .= (dfCapacityfactor.AnnualSum[ELECTROLYZER] ./ dfCapacityfactor.Capacity[ELECTROLYZER]) / sum(inputs["omega"][t] for t in 1:T)
+ dfCapacityfactor.AnnualSum[ELECTROLYZER] .= value.(EP[:vUSE][ELECTROLYZER,
+ :]).data * inputs["omega"] * scale_factor
+ dfCapacityfactor.CapacityFactor[ELECTROLYZER] .= (dfCapacityfactor.AnnualSum[ELECTROLYZER] ./
+ dfCapacityfactor.Capacity[ELECTROLYZER]) /
+ sum(inputs["omega"][t] for t in 1:T)
end
CSV.write(joinpath(path, "capacityfactor.csv"), dfCapacityfactor)
diff --git a/src/write_outputs/write_charge.jl b/src/write_outputs/write_charge.jl
index 74d00ad65a..1e0e835633 100644
--- a/src/write_outputs/write_charge.jl
+++ b/src/write_outputs/write_charge.jl
@@ -4,42 +4,44 @@
Function for writing the charging energy values of the different storage technologies.
"""
function write_charge(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- zones = zone_id.(gen)
+ gen = inputs["RESOURCES"]
+ zones = zone_id.(gen)
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
- STOR_ALL = inputs["STOR_ALL"]
- FLEX = inputs["FLEX"]
- ELECTROLYZER = inputs["ELECTROLYZER"]
- VRE_STOR = inputs["VRE_STOR"]
- VS_STOR = !isempty(VRE_STOR) ? inputs["VS_STOR"] : []
-
- # Power withdrawn to charge each resource in each time step
- dfCharge = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones, AnnualSum = Array{Union{Missing,Float64}}(undef, G))
- charge = zeros(G,T)
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ STOR_ALL = inputs["STOR_ALL"]
+ FLEX = inputs["FLEX"]
+ ELECTROLYZER = inputs["ELECTROLYZER"]
+ VRE_STOR = inputs["VRE_STOR"]
+ VS_STOR = !isempty(VRE_STOR) ? inputs["VS_STOR"] : []
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- if !isempty(STOR_ALL)
- charge[STOR_ALL, :] = value.(EP[:vCHARGE][STOR_ALL, :]) * scale_factor
- end
- if !isempty(FLEX)
- charge[FLEX, :] = value.(EP[:vCHARGE_FLEX][FLEX, :]) * scale_factor
- end
- if !isempty(ELECTROLYZER)
- charge[ELECTROLYZER, :] = value.(EP[:vUSE][ELECTROLYZER, :]) * scale_factor
- end
- if !isempty(VS_STOR)
- charge[VS_STOR, :] = value.(EP[:vCHARGE_VRE_STOR][VS_STOR, :]) * scale_factor
- end
+ # Power withdrawn to charge each resource in each time step
+ dfCharge = DataFrame(Resource = inputs["RESOURCE_NAMES"],
+ Zone = zones,
+ AnnualSum = Array{Union{Missing, Float64}}(undef, G))
+ charge = zeros(G, T)
- dfCharge.AnnualSum .= charge * inputs["omega"]
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ if !isempty(STOR_ALL)
+ charge[STOR_ALL, :] = value.(EP[:vCHARGE][STOR_ALL, :]) * scale_factor
+ end
+ if !isempty(FLEX)
+ charge[FLEX, :] = value.(EP[:vCHARGE_FLEX][FLEX, :]) * scale_factor
+ end
+ if !isempty(ELECTROLYZER)
+ charge[ELECTROLYZER, :] = value.(EP[:vUSE][ELECTROLYZER, :]) * scale_factor
+ end
+ if !isempty(VS_STOR)
+ charge[VS_STOR, :] = value.(EP[:vCHARGE_VRE_STOR][VS_STOR, :]) * scale_factor
+ end
- filepath = joinpath(path, "charge.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfCharge)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, charge, dfCharge)
- end
- return nothing
+ dfCharge.AnnualSum .= charge * inputs["omega"]
+
+ filepath = joinpath(path, "charge.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfCharge)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, charge, dfCharge)
+ end
+ return nothing
end
diff --git a/src/write_outputs/write_charging_cost.jl b/src/write_outputs/write_charging_cost.jl
index 7c2c84a812..00410b6b59 100644
--- a/src/write_outputs/write_charging_cost.jl
+++ b/src/write_outputs/write_charging_cost.jl
@@ -1,38 +1,46 @@
function write_charging_cost(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
+ gen = inputs["RESOURCES"]
- regions = region.(gen)
- clusters = cluster.(gen)
- zones = zone_id.(gen)
+ regions = region.(gen)
+ clusters = cluster.(gen)
+ zones = zone_id.(gen)
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
- STOR_ALL = inputs["STOR_ALL"]
- FLEX = inputs["FLEX"]
- ELECTROLYZER = inputs["ELECTROLYZER"]
- VRE_STOR = inputs["VRE_STOR"]
- VS_STOR = !isempty(VRE_STOR) ? inputs["VS_STOR"] : []
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ STOR_ALL = inputs["STOR_ALL"]
+ FLEX = inputs["FLEX"]
+ ELECTROLYZER = inputs["ELECTROLYZER"]
+ VRE_STOR = inputs["VRE_STOR"]
+ VS_STOR = !isempty(VRE_STOR) ? inputs["VS_STOR"] : []
price = locational_marginal_price(EP, inputs, setup)
- dfChargingcost = DataFrame(Region = regions, Resource = inputs["RESOURCE_NAMES"], Zone = zones, Cluster = clusters, AnnualSum = Array{Float64}(undef, G),)
- chargecost = zeros(G, T)
- if !isempty(STOR_ALL)
- chargecost[STOR_ALL, :] .= (value.(EP[:vCHARGE][STOR_ALL, :]).data) .* transpose(price)[zone_id.(gen.Storage), :]
- end
- if !isempty(FLEX)
- chargecost[FLEX, :] .= value.(EP[:vP][FLEX, :]) .* transpose(price)[zone_id.(gen.FlexDemand), :]
- end
- if !isempty(ELECTROLYZER)
- chargecost[ELECTROLYZER, :] .= (value.(EP[:vUSE][ELECTROLYZER, :]).data) .* transpose(price)[zone_id.(gen.Electrolyzer), :]
- end
- if !isempty(VS_STOR)
- chargecost[VS_STOR, :] .= value.(EP[:vCHARGE_VRE_STOR][VS_STOR, :].data) .* transpose(price)[zone_id.(gen[VS_STOR]), :]
- end
- if setup["ParameterScale"] == 1
- chargecost *= ModelScalingFactor
- end
- dfChargingcost.AnnualSum .= chargecost * inputs["omega"]
- write_simple_csv(joinpath(path, "ChargingCost.csv"), dfChargingcost)
- return dfChargingcost
+ dfChargingcost = DataFrame(Region = regions,
+ Resource = inputs["RESOURCE_NAMES"],
+ Zone = zones,
+ Cluster = clusters,
+ AnnualSum = Array{Float64}(undef, G))
+ chargecost = zeros(G, T)
+ if !isempty(STOR_ALL)
+ chargecost[STOR_ALL, :] .= (value.(EP[:vCHARGE][STOR_ALL, :]).data) .*
+ transpose(price)[zone_id.(gen.Storage), :]
+ end
+ if !isempty(FLEX)
+ chargecost[FLEX, :] .= value.(EP[:vP][FLEX, :]) .*
+ transpose(price)[zone_id.(gen.FlexDemand), :]
+ end
+ if !isempty(ELECTROLYZER)
+ chargecost[ELECTROLYZER, :] .= (value.(EP[:vUSE][ELECTROLYZER, :]).data) .*
+ transpose(price)[zone_id.(gen.Electrolyzer), :]
+ end
+ if !isempty(VS_STOR)
+ chargecost[VS_STOR, :] .= value.(EP[:vCHARGE_VRE_STOR][VS_STOR, :].data) .*
+ transpose(price)[zone_id.(gen[VS_STOR]), :]
+ end
+ if setup["ParameterScale"] == 1
+ chargecost *= ModelScalingFactor
+ end
+ dfChargingcost.AnnualSum .= chargecost * inputs["omega"]
+ write_simple_csv(joinpath(path, "ChargingCost.csv"), dfChargingcost)
+ return dfChargingcost
end
diff --git a/src/write_outputs/write_co2.jl b/src/write_outputs/write_co2.jl
index c737652323..050cf91c04 100644
--- a/src/write_outputs/write_co2.jl
+++ b/src/write_outputs/write_co2.jl
@@ -9,13 +9,17 @@ function write_co2(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
write_co2_capture_plant(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
end
-
-function write_co2_emissions_plant(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+function write_co2_emissions_plant(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
gen = inputs["RESOURCES"]
G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
# CO2 emissions by plant
- dfEmissions_plant = DataFrame(Resource=inputs["RESOURCE_NAMES"], Zone=zone_id.(gen), AnnualSum=zeros(G))
+ dfEmissions_plant = DataFrame(Resource = inputs["RESOURCE_NAMES"],
+ Zone = zone_id.(gen),
+ AnnualSum = zeros(G))
emissions_plant = value.(EP[:eEmissionsByPlant])
if setup["ParameterScale"] == 1
@@ -26,7 +30,7 @@ function write_co2_emissions_plant(path::AbstractString, inputs::Dict, setup::Di
filepath = joinpath(path, "emissions_plant.csv")
if setup["WriteOutputs"] == "annual"
write_annual(filepath, dfEmissions_plant)
- else # setup["WriteOutputs"] == "full"
+ else # setup["WriteOutputs"] == "full"
write_fulltimeseries(filepath, emissions_plant, dfEmissions_plant)
end
return nothing
@@ -39,7 +43,9 @@ function write_co2_capture_plant(path::AbstractString, inputs::Dict, setup::Dict
T = inputs["T"] # Number of time steps (hours)
Z = inputs["Z"] # Number of zones
- dfCapturedEmissions_plant = DataFrame(Resource=inputs["RESOURCE_NAMES"][CCS], Zone=zone_id.(gen[CCS]), AnnualSum=zeros(length(CCS)))
+ dfCapturedEmissions_plant = DataFrame(Resource = inputs["RESOURCE_NAMES"][CCS],
+ Zone = zone_id.(gen[CCS]),
+ AnnualSum = zeros(length(CCS)))
if !isempty(CCS)
# Captured CO2 emissions by plant
emissions_captured_plant = (value.(EP[:eEmissionsCaptureByPlant]).data)
@@ -53,8 +59,10 @@ function write_co2_capture_plant(path::AbstractString, inputs::Dict, setup::Dict
if setup["WriteOutputs"] == "annual"
write_annual(filepath, dfCapturedEmissions_plant)
else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, emissions_captured_plant, dfCapturedEmissions_plant)
+ write_fulltimeseries(filepath,
+ emissions_captured_plant,
+ dfCapturedEmissions_plant)
end
return nothing
end
-end
\ No newline at end of file
+end
diff --git a/src/write_outputs/write_costs.jl b/src/write_outputs/write_costs.jl
index 8cbe60e5c9..055979a126 100644
--- a/src/write_outputs/write_costs.jl
+++ b/src/write_outputs/write_costs.jl
@@ -4,246 +4,310 @@
Function for writing the costs pertaining to the objective function (fixed, variable O&M etc.).
"""
function write_costs(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- ## Cost results
- gen = inputs["RESOURCES"]
- SEG = inputs["SEG"] # Number of lines
- Z = inputs["Z"] # Number of zones
- T = inputs["T"] # Number of time steps (hours)
- VRE_STOR = inputs["VRE_STOR"]
- ELECTROLYZER = inputs["ELECTROLYZER"]
-
- cost_list = ["cTotal", "cFix", "cVar", "cFuel" ,"cNSE", "cStart", "cUnmetRsv", "cNetworkExp", "cUnmetPolicyPenalty", "cCO2"]
- if !isempty(VRE_STOR)
- push!(cost_list, "cGridConnection")
- end
- if !isempty(ELECTROLYZER)
- push!(cost_list, "cHydrogenRevenue")
- end
- dfCost = DataFrame(Costs = cost_list)
-
- cVar = value(EP[:eTotalCVarOut])+ (!isempty(inputs["STOR_ALL"]) ? value(EP[:eTotalCVarIn]) : 0.0) + (!isempty(inputs["FLEX"]) ? value(EP[:eTotalCVarFlexIn]) : 0.0)
- cFix = value(EP[:eTotalCFix]) + (!isempty(inputs["STOR_ALL"]) ? value(EP[:eTotalCFixEnergy]) : 0.0) + (!isempty(inputs["STOR_ASYMMETRIC"]) ? value(EP[:eTotalCFixCharge]) : 0.0)
-
- cFuel = value.(EP[:eTotalCFuelOut])
-
- if !isempty(VRE_STOR)
- cFix += ((!isempty(inputs["VS_DC"]) ? value(EP[:eTotalCFixDC]) : 0.0) + (!isempty(inputs["VS_SOLAR"]) ? value(EP[:eTotalCFixSolar]) : 0.0) + (!isempty(inputs["VS_WIND"]) ? value(EP[:eTotalCFixWind]) : 0.0))
- cVar += ((!isempty(inputs["VS_SOLAR"]) ? value(EP[:eTotalCVarOutSolar]) : 0.0) + (!isempty(inputs["VS_WIND"]) ? value(EP[:eTotalCVarOutWind]) : 0.0))
- if !isempty(inputs["VS_STOR"])
- cFix += ((!isempty(inputs["VS_STOR"]) ? value(EP[:eTotalCFixStor]) : 0.0) + (!isempty(inputs["VS_ASYM_DC_CHARGE"]) ? value(EP[:eTotalCFixCharge_DC]) : 0.0) + (!isempty(inputs["VS_ASYM_DC_DISCHARGE"]) ? value(EP[:eTotalCFixDischarge_DC]) : 0.0) + (!isempty(inputs["VS_ASYM_AC_CHARGE"]) ? value(EP[:eTotalCFixCharge_AC]) : 0.0) + (!isempty(inputs["VS_ASYM_AC_DISCHARGE"]) ? value(EP[:eTotalCFixDischarge_AC]) : 0.0))
- cVar += (!isempty(inputs["VS_STOR"]) ? value(EP[:eTotalCVarStor]) : 0.0)
- end
- total_cost =[value(EP[:eObj]), cFix, cVar, cFuel, value(EP[:eTotalCNSE]), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
- else
- total_cost = [value(EP[:eObj]), cFix, cVar, cFuel, value(EP[:eTotalCNSE]), 0.0, 0.0, 0.0, 0.0, 0.0]
- end
-
- if !isempty(ELECTROLYZER)
- push!(total_cost,(!isempty(inputs["ELECTROLYZER"]) ? -1*value(EP[:eTotalHydrogenValue]) : 0.0))
- end
-
- dfCost[!,Symbol("Total")] = total_cost
-
- if setup["ParameterScale"] == 1
- dfCost.Total *= ModelScalingFactor^2
- end
-
- if setup["UCommit"]>=1
- dfCost[6,2] = value(EP[:eTotalCStart]) + value(EP[:eTotalCFuelStart])
- end
-
- if setup["OperationalReserves"]==1
- dfCost[7,2] = value(EP[:eTotalCRsvPen])
- end
-
- if setup["NetworkExpansion"] == 1 && Z > 1
- dfCost[8,2] = value(EP[:eTotalCNetworkExp])
- end
-
- if haskey(inputs, "dfCapRes_slack")
- dfCost[9,2] += value(EP[:eCTotalCapResSlack])
- end
-
- if haskey(inputs, "dfESR_slack")
- dfCost[9,2] += value(EP[:eCTotalESRSlack])
- end
-
- if haskey(inputs, "dfCO2Cap_slack")
- dfCost[9,2] += value(EP[:eCTotalCO2CapSlack])
- end
-
- if haskey(inputs, "MinCapPriceCap")
- dfCost[9,2] += value(EP[:eTotalCMinCapSlack])
- end
-
- if !isempty(VRE_STOR)
- dfCost[!,2][11] = value(EP[:eTotalCGrid]) * (setup["ParameterScale"] == 1 ? ModelScalingFactor^2 : 1)
- end
-
- if any(co2_capture_fraction.(gen) .!= 0)
- dfCost[10,2] += value(EP[:eTotaleCCO2Sequestration])
- end
-
- if setup["ParameterScale"] == 1
- dfCost[6,2] *= ModelScalingFactor^2
- dfCost[7,2] *= ModelScalingFactor^2
- dfCost[8,2] *= ModelScalingFactor^2
- dfCost[9,2] *= ModelScalingFactor^2
- dfCost[10,2] *= ModelScalingFactor^2
- end
-
- for z in 1:Z
- tempCTotal = 0.0
- tempCFix = 0.0
- tempCVar = 0.0
- tempCFuel = 0.0
- tempCStart = 0.0
- tempCNSE = 0.0
- tempHydrogenValue = 0.0
- tempCCO2 = 0.0
-
- Y_ZONE = resources_in_zone_by_rid(gen,z)
- STOR_ALL_ZONE = intersect(inputs["STOR_ALL"], Y_ZONE)
- STOR_ASYMMETRIC_ZONE = intersect(inputs["STOR_ASYMMETRIC"], Y_ZONE)
- FLEX_ZONE = intersect(inputs["FLEX"], Y_ZONE)
- COMMIT_ZONE = intersect(inputs["COMMIT"], Y_ZONE)
- ELECTROLYZERS_ZONE = intersect(inputs["ELECTROLYZER"], Y_ZONE)
- CCS_ZONE = intersect(inputs["CCS"], Y_ZONE)
-
- eCFix = sum(value.(EP[:eCFix][Y_ZONE]))
- tempCFix += eCFix
- tempCTotal += eCFix
-
- tempCVar = sum(value.(EP[:eCVar_out][Y_ZONE,:]))
- tempCTotal += tempCVar
-
- tempCFuel = sum(value.(EP[:ePlantCFuelOut][Y_ZONE,:]))
- tempCTotal += tempCFuel
-
- if !isempty(STOR_ALL_ZONE)
- eCVar_in = sum(value.(EP[:eCVar_in][STOR_ALL_ZONE,:]))
- tempCVar += eCVar_in
- eCFixEnergy = sum(value.(EP[:eCFixEnergy][STOR_ALL_ZONE]))
- tempCFix += eCFixEnergy
- tempCTotal += eCVar_in + eCFixEnergy
- end
- if !isempty(STOR_ASYMMETRIC_ZONE)
- eCFixCharge = sum(value.(EP[:eCFixCharge][STOR_ASYMMETRIC_ZONE]))
- tempCFix += eCFixCharge
- tempCTotal += eCFixCharge
- end
- if !isempty(FLEX_ZONE)
- eCVarFlex_in = sum(value.(EP[:eCVarFlex_in][FLEX_ZONE,:]))
- tempCVar += eCVarFlex_in
- tempCTotal += eCVarFlex_in
- end
- if !isempty(VRE_STOR)
- gen_VRE_STOR = gen.VreStorage
- Y_ZONE_VRE_STOR = resources_in_zone_by_rid(gen_VRE_STOR, z)
-
- # Fixed Costs
- eCFix_VRE_STOR = 0.0
- SOLAR_ZONE_VRE_STOR = intersect(Y_ZONE_VRE_STOR, inputs["VS_SOLAR"])
- if !isempty(SOLAR_ZONE_VRE_STOR)
- eCFix_VRE_STOR += sum(value.(EP[:eCFixSolar][SOLAR_ZONE_VRE_STOR]))
- end
- WIND_ZONE_VRE_STOR = intersect(Y_ZONE_VRE_STOR, inputs["VS_WIND"])
- if !isempty(WIND_ZONE_VRE_STOR)
- eCFix_VRE_STOR += sum(value.(EP[:eCFixWind][WIND_ZONE_VRE_STOR]))
- end
- DC_ZONE_VRE_STOR = intersect(Y_ZONE_VRE_STOR, inputs["VS_DC"])
- if !isempty(DC_ZONE_VRE_STOR)
- eCFix_VRE_STOR += sum(value.(EP[:eCFixDC][DC_ZONE_VRE_STOR]))
- end
- STOR_ALL_ZONE_VRE_STOR = intersect(inputs["VS_STOR"], Y_ZONE_VRE_STOR)
- if !isempty(STOR_ALL_ZONE_VRE_STOR)
- eCFix_VRE_STOR += sum(value.(EP[:eCFixEnergy_VS][STOR_ALL_ZONE_VRE_STOR]))
- DC_CHARGE_ALL_ZONE_VRE_STOR = intersect(inputs["VS_ASYM_DC_CHARGE"], Y_ZONE_VRE_STOR)
- if !isempty(DC_CHARGE_ALL_ZONE_VRE_STOR)
- eCFix_VRE_STOR += sum(value.(EP[:eCFixCharge_DC][DC_CHARGE_ALL_ZONE_VRE_STOR]))
- end
- DC_DISCHARGE_ALL_ZONE_VRE_STOR = intersect(inputs["VS_ASYM_DC_DISCHARGE"], Y_ZONE_VRE_STOR)
- if !isempty(DC_DISCHARGE_ALL_ZONE_VRE_STOR)
- eCFix_VRE_STOR += sum(value.(EP[:eCFixDischarge_DC][DC_DISCHARGE_ALL_ZONE_VRE_STOR]))
- end
- AC_DISCHARGE_ALL_ZONE_VRE_STOR = intersect(inputs["VS_ASYM_AC_DISCHARGE"], Y_ZONE_VRE_STOR)
- if !isempty(AC_DISCHARGE_ALL_ZONE_VRE_STOR)
- eCFix_VRE_STOR += sum(value.(EP[:eCFixDischarge_AC][AC_DISCHARGE_ALL_ZONE_VRE_STOR]))
- end
- AC_CHARGE_ALL_ZONE_VRE_STOR = intersect(inputs["VS_ASYM_AC_CHARGE"], Y_ZONE_VRE_STOR)
- if !isempty(AC_CHARGE_ALL_ZONE_VRE_STOR)
- eCFix_VRE_STOR += sum(value.(EP[:eCFixCharge_AC][AC_CHARGE_ALL_ZONE_VRE_STOR]))
- end
- end
- tempCFix += eCFix_VRE_STOR
-
- # Variable Costs
- eCVar_VRE_STOR = 0.0
- if !isempty(SOLAR_ZONE_VRE_STOR)
- eCVar_VRE_STOR += sum(value.(EP[:eCVarOutSolar][SOLAR_ZONE_VRE_STOR,:]))
- end
- if !isempty(WIND_ZONE_VRE_STOR)
- eCVar_VRE_STOR += sum(value.(EP[:eCVarOutWind][WIND_ZONE_VRE_STOR, :]))
- end
- if !isempty(STOR_ALL_ZONE_VRE_STOR)
- vom_map = Dict(
- DC_CHARGE_ALL_ZONE_VRE_STOR => :eCVar_Charge_DC,
- DC_DISCHARGE_ALL_ZONE_VRE_STOR => :eCVar_Discharge_DC,
- AC_DISCHARGE_ALL_ZONE_VRE_STOR => :eCVar_Discharge_AC,
- AC_CHARGE_ALL_ZONE_VRE_STOR => :eCVar_Charge_AC
- )
- for (set, symbol) in vom_map
- if !isempty(set)
- eCVar_VRE_STOR += sum(value.(EP[symbol][set, :]))
- end
- end
- end
- tempCVar += eCVar_VRE_STOR
-
- # Total Added Costs
- tempCTotal += (eCFix_VRE_STOR + eCVar_VRE_STOR)
- end
-
- if setup["UCommit"] >= 1 && !isempty(COMMIT_ZONE)
- eCStart = sum(value.(EP[:eCStart][COMMIT_ZONE,:])) + sum(value.(EP[:ePlantCFuelStart][COMMIT_ZONE,:]))
- tempCStart += eCStart
- tempCTotal += eCStart
- end
-
- if !isempty(ELECTROLYZERS_ZONE)
- tempHydrogenValue = -1*sum(value.(EP[:eHydrogenValue][ELECTROLYZERS_ZONE,:]))
- tempCTotal += tempHydrogenValue
- end
-
-
- tempCNSE = sum(value.(EP[:eCNSE][:,:,z]))
- tempCTotal += tempCNSE
-
- # if any(dfGen.CO2_Capture_Fraction .!=0)
- if !isempty(CCS_ZONE)
- tempCCO2 = sum(value.(EP[:ePlantCCO2Sequestration][CCS_ZONE]))
- tempCTotal += tempCCO2
- end
-
- if setup["ParameterScale"] == 1
- tempCTotal *= ModelScalingFactor^2
- tempCFix *= ModelScalingFactor^2
- tempCVar *= ModelScalingFactor^2
- tempCFuel *= ModelScalingFactor^2
- tempCNSE *= ModelScalingFactor^2
- tempCStart *= ModelScalingFactor^2
- tempHydrogenValue *= ModelScalingFactor^2
- tempCCO2 *= ModelScalingFactor^2
- end
- temp_cost_list = [tempCTotal, tempCFix, tempCVar, tempCFuel,tempCNSE, tempCStart, "-", "-", "-", tempCCO2]
- if !isempty(VRE_STOR)
- push!(temp_cost_list, "-")
- end
- if !isempty(ELECTROLYZERS_ZONE)
- push!(temp_cost_list,tempHydrogenValue)
- end
-
- dfCost[!,Symbol("Zone$z")] = temp_cost_list
- end
- CSV.write(joinpath(path, "costs.csv"), dfCost)
+ ## Cost results
+ gen = inputs["RESOURCES"]
+ SEG = inputs["SEG"] # Number of lines
+ Z = inputs["Z"] # Number of zones
+ T = inputs["T"] # Number of time steps (hours)
+ VRE_STOR = inputs["VRE_STOR"]
+ ELECTROLYZER = inputs["ELECTROLYZER"]
+
+ cost_list = [
+ "cTotal",
+ "cFix",
+ "cVar",
+ "cFuel",
+ "cNSE",
+ "cStart",
+ "cUnmetRsv",
+ "cNetworkExp",
+ "cUnmetPolicyPenalty",
+ "cCO2",
+ ]
+ if !isempty(VRE_STOR)
+ push!(cost_list, "cGridConnection")
+ end
+ if !isempty(ELECTROLYZER)
+ push!(cost_list, "cHydrogenRevenue")
+ end
+ dfCost = DataFrame(Costs = cost_list)
+
+ cVar = value(EP[:eTotalCVarOut]) +
+ (!isempty(inputs["STOR_ALL"]) ? value(EP[:eTotalCVarIn]) : 0.0) +
+ (!isempty(inputs["FLEX"]) ? value(EP[:eTotalCVarFlexIn]) : 0.0)
+ cFix = value(EP[:eTotalCFix]) +
+ (!isempty(inputs["STOR_ALL"]) ? value(EP[:eTotalCFixEnergy]) : 0.0) +
+ (!isempty(inputs["STOR_ASYMMETRIC"]) ? value(EP[:eTotalCFixCharge]) : 0.0)
+
+ cFuel = value.(EP[:eTotalCFuelOut])
+
+ if !isempty(VRE_STOR)
+ cFix += ((!isempty(inputs["VS_DC"]) ? value(EP[:eTotalCFixDC]) : 0.0) +
+ (!isempty(inputs["VS_SOLAR"]) ? value(EP[:eTotalCFixSolar]) : 0.0) +
+ (!isempty(inputs["VS_WIND"]) ? value(EP[:eTotalCFixWind]) : 0.0))
+ cVar += ((!isempty(inputs["VS_SOLAR"]) ? value(EP[:eTotalCVarOutSolar]) : 0.0) +
+ (!isempty(inputs["VS_WIND"]) ? value(EP[:eTotalCVarOutWind]) : 0.0))
+ if !isempty(inputs["VS_STOR"])
+ cFix += ((!isempty(inputs["VS_STOR"]) ? value(EP[:eTotalCFixStor]) : 0.0) +
+ (!isempty(inputs["VS_ASYM_DC_CHARGE"]) ?
+ value(EP[:eTotalCFixCharge_DC]) : 0.0) +
+ (!isempty(inputs["VS_ASYM_DC_DISCHARGE"]) ?
+ value(EP[:eTotalCFixDischarge_DC]) : 0.0) +
+ (!isempty(inputs["VS_ASYM_AC_CHARGE"]) ?
+ value(EP[:eTotalCFixCharge_AC]) : 0.0) +
+ (!isempty(inputs["VS_ASYM_AC_DISCHARGE"]) ?
+ value(EP[:eTotalCFixDischarge_AC]) : 0.0))
+ cVar += (!isempty(inputs["VS_STOR"]) ? value(EP[:eTotalCVarStor]) : 0.0)
+ end
+ total_cost = [
+ value(EP[:eObj]),
+ cFix,
+ cVar,
+ cFuel,
+ value(EP[:eTotalCNSE]),
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ ]
+ else
+ total_cost = [
+ value(EP[:eObj]),
+ cFix,
+ cVar,
+ cFuel,
+ value(EP[:eTotalCNSE]),
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ ]
+ end
+
+ if !isempty(ELECTROLYZER)
+ push!(total_cost,
+ (!isempty(inputs["ELECTROLYZER"]) ? -1 * value(EP[:eTotalHydrogenValue]) : 0.0))
+ end
+
+ dfCost[!, Symbol("Total")] = total_cost
+
+ if setup["ParameterScale"] == 1
+ dfCost.Total *= ModelScalingFactor^2
+ end
+
+ if setup["UCommit"] >= 1
+ dfCost[6, 2] = value(EP[:eTotalCStart]) + value(EP[:eTotalCFuelStart])
+ end
+
+ if setup["OperationalReserves"] == 1
+ dfCost[7, 2] = value(EP[:eTotalCRsvPen])
+ end
+
+ if setup["NetworkExpansion"] == 1 && Z > 1
+ dfCost[8, 2] = value(EP[:eTotalCNetworkExp])
+ end
+
+ if haskey(inputs, "dfCapRes_slack")
+ dfCost[9, 2] += value(EP[:eCTotalCapResSlack])
+ end
+
+ if haskey(inputs, "dfESR_slack")
+ dfCost[9, 2] += value(EP[:eCTotalESRSlack])
+ end
+
+ if haskey(inputs, "dfCO2Cap_slack")
+ dfCost[9, 2] += value(EP[:eCTotalCO2CapSlack])
+ end
+
+ if haskey(inputs, "MinCapPriceCap")
+ dfCost[9, 2] += value(EP[:eTotalCMinCapSlack])
+ end
+
+ if !isempty(VRE_STOR)
+ dfCost[!, 2][11] = value(EP[:eTotalCGrid]) *
+ (setup["ParameterScale"] == 1 ? ModelScalingFactor^2 : 1)
+ end
+
+ if any(co2_capture_fraction.(gen) .!= 0)
+ dfCost[10, 2] += value(EP[:eTotaleCCO2Sequestration])
+ end
+
+ if setup["ParameterScale"] == 1
+ dfCost[6, 2] *= ModelScalingFactor^2
+ dfCost[7, 2] *= ModelScalingFactor^2
+ dfCost[8, 2] *= ModelScalingFactor^2
+ dfCost[9, 2] *= ModelScalingFactor^2
+ dfCost[10, 2] *= ModelScalingFactor^2
+ end
+
+ for z in 1:Z
+ tempCTotal = 0.0
+ tempCFix = 0.0
+ tempCVar = 0.0
+ tempCFuel = 0.0
+ tempCStart = 0.0
+ tempCNSE = 0.0
+ tempHydrogenValue = 0.0
+ tempCCO2 = 0.0
+
+ Y_ZONE = resources_in_zone_by_rid(gen, z)
+ STOR_ALL_ZONE = intersect(inputs["STOR_ALL"], Y_ZONE)
+ STOR_ASYMMETRIC_ZONE = intersect(inputs["STOR_ASYMMETRIC"], Y_ZONE)
+ FLEX_ZONE = intersect(inputs["FLEX"], Y_ZONE)
+ COMMIT_ZONE = intersect(inputs["COMMIT"], Y_ZONE)
+ ELECTROLYZERS_ZONE = intersect(inputs["ELECTROLYZER"], Y_ZONE)
+ CCS_ZONE = intersect(inputs["CCS"], Y_ZONE)
+
+ eCFix = sum(value.(EP[:eCFix][Y_ZONE]))
+ tempCFix += eCFix
+ tempCTotal += eCFix
+
+ tempCVar = sum(value.(EP[:eCVar_out][Y_ZONE, :]))
+ tempCTotal += tempCVar
+
+ tempCFuel = sum(value.(EP[:ePlantCFuelOut][Y_ZONE, :]))
+ tempCTotal += tempCFuel
+
+ if !isempty(STOR_ALL_ZONE)
+ eCVar_in = sum(value.(EP[:eCVar_in][STOR_ALL_ZONE, :]))
+ tempCVar += eCVar_in
+ eCFixEnergy = sum(value.(EP[:eCFixEnergy][STOR_ALL_ZONE]))
+ tempCFix += eCFixEnergy
+ tempCTotal += eCVar_in + eCFixEnergy
+ end
+ if !isempty(STOR_ASYMMETRIC_ZONE)
+ eCFixCharge = sum(value.(EP[:eCFixCharge][STOR_ASYMMETRIC_ZONE]))
+ tempCFix += eCFixCharge
+ tempCTotal += eCFixCharge
+ end
+ if !isempty(FLEX_ZONE)
+ eCVarFlex_in = sum(value.(EP[:eCVarFlex_in][FLEX_ZONE, :]))
+ tempCVar += eCVarFlex_in
+ tempCTotal += eCVarFlex_in
+ end
+ if !isempty(VRE_STOR)
+ gen_VRE_STOR = gen.VreStorage
+ Y_ZONE_VRE_STOR = resources_in_zone_by_rid(gen_VRE_STOR, z)
+
+ # Fixed Costs
+ eCFix_VRE_STOR = 0.0
+ SOLAR_ZONE_VRE_STOR = intersect(Y_ZONE_VRE_STOR, inputs["VS_SOLAR"])
+ if !isempty(SOLAR_ZONE_VRE_STOR)
+ eCFix_VRE_STOR += sum(value.(EP[:eCFixSolar][SOLAR_ZONE_VRE_STOR]))
+ end
+ WIND_ZONE_VRE_STOR = intersect(Y_ZONE_VRE_STOR, inputs["VS_WIND"])
+ if !isempty(WIND_ZONE_VRE_STOR)
+ eCFix_VRE_STOR += sum(value.(EP[:eCFixWind][WIND_ZONE_VRE_STOR]))
+ end
+ DC_ZONE_VRE_STOR = intersect(Y_ZONE_VRE_STOR, inputs["VS_DC"])
+ if !isempty(DC_ZONE_VRE_STOR)
+ eCFix_VRE_STOR += sum(value.(EP[:eCFixDC][DC_ZONE_VRE_STOR]))
+ end
+ STOR_ALL_ZONE_VRE_STOR = intersect(inputs["VS_STOR"], Y_ZONE_VRE_STOR)
+ if !isempty(STOR_ALL_ZONE_VRE_STOR)
+ eCFix_VRE_STOR += sum(value.(EP[:eCFixEnergy_VS][STOR_ALL_ZONE_VRE_STOR]))
+ DC_CHARGE_ALL_ZONE_VRE_STOR = intersect(inputs["VS_ASYM_DC_CHARGE"],
+ Y_ZONE_VRE_STOR)
+ if !isempty(DC_CHARGE_ALL_ZONE_VRE_STOR)
+ eCFix_VRE_STOR += sum(value.(EP[:eCFixCharge_DC][DC_CHARGE_ALL_ZONE_VRE_STOR]))
+ end
+ DC_DISCHARGE_ALL_ZONE_VRE_STOR = intersect(inputs["VS_ASYM_DC_DISCHARGE"],
+ Y_ZONE_VRE_STOR)
+ if !isempty(DC_DISCHARGE_ALL_ZONE_VRE_STOR)
+ eCFix_VRE_STOR += sum(value.(EP[:eCFixDischarge_DC][DC_DISCHARGE_ALL_ZONE_VRE_STOR]))
+ end
+ AC_DISCHARGE_ALL_ZONE_VRE_STOR = intersect(inputs["VS_ASYM_AC_DISCHARGE"],
+ Y_ZONE_VRE_STOR)
+ if !isempty(AC_DISCHARGE_ALL_ZONE_VRE_STOR)
+ eCFix_VRE_STOR += sum(value.(EP[:eCFixDischarge_AC][AC_DISCHARGE_ALL_ZONE_VRE_STOR]))
+ end
+ AC_CHARGE_ALL_ZONE_VRE_STOR = intersect(inputs["VS_ASYM_AC_CHARGE"],
+ Y_ZONE_VRE_STOR)
+ if !isempty(AC_CHARGE_ALL_ZONE_VRE_STOR)
+ eCFix_VRE_STOR += sum(value.(EP[:eCFixCharge_AC][AC_CHARGE_ALL_ZONE_VRE_STOR]))
+ end
+ end
+ tempCFix += eCFix_VRE_STOR
+
+ # Variable Costs
+ eCVar_VRE_STOR = 0.0
+ if !isempty(SOLAR_ZONE_VRE_STOR)
+ eCVar_VRE_STOR += sum(value.(EP[:eCVarOutSolar][SOLAR_ZONE_VRE_STOR, :]))
+ end
+ if !isempty(WIND_ZONE_VRE_STOR)
+ eCVar_VRE_STOR += sum(value.(EP[:eCVarOutWind][WIND_ZONE_VRE_STOR, :]))
+ end
+ if !isempty(STOR_ALL_ZONE_VRE_STOR)
+ vom_map = Dict(DC_CHARGE_ALL_ZONE_VRE_STOR => :eCVar_Charge_DC,
+ DC_DISCHARGE_ALL_ZONE_VRE_STOR => :eCVar_Discharge_DC,
+ AC_DISCHARGE_ALL_ZONE_VRE_STOR => :eCVar_Discharge_AC,
+ AC_CHARGE_ALL_ZONE_VRE_STOR => :eCVar_Charge_AC)
+ for (set, symbol) in vom_map
+ if !isempty(set)
+ eCVar_VRE_STOR += sum(value.(EP[symbol][set, :]))
+ end
+ end
+ end
+ tempCVar += eCVar_VRE_STOR
+
+ # Total Added Costs
+ tempCTotal += (eCFix_VRE_STOR + eCVar_VRE_STOR)
+ end
+
+ if setup["UCommit"] >= 1 && !isempty(COMMIT_ZONE)
+ eCStart = sum(value.(EP[:eCStart][COMMIT_ZONE, :])) +
+ sum(value.(EP[:ePlantCFuelStart][COMMIT_ZONE, :]))
+ tempCStart += eCStart
+ tempCTotal += eCStart
+ end
+
+ if !isempty(ELECTROLYZERS_ZONE)
+ tempHydrogenValue = -1 * sum(value.(EP[:eHydrogenValue][ELECTROLYZERS_ZONE, :]))
+ tempCTotal += tempHydrogenValue
+ end
+
+ tempCNSE = sum(value.(EP[:eCNSE][:, :, z]))
+ tempCTotal += tempCNSE
+
+ # if any(dfGen.CO2_Capture_Fraction .!=0)
+ if !isempty(CCS_ZONE)
+ tempCCO2 = sum(value.(EP[:ePlantCCO2Sequestration][CCS_ZONE]))
+ tempCTotal += tempCCO2
+ end
+
+ if setup["ParameterScale"] == 1
+ tempCTotal *= ModelScalingFactor^2
+ tempCFix *= ModelScalingFactor^2
+ tempCVar *= ModelScalingFactor^2
+ tempCFuel *= ModelScalingFactor^2
+ tempCNSE *= ModelScalingFactor^2
+ tempCStart *= ModelScalingFactor^2
+ tempHydrogenValue *= ModelScalingFactor^2
+ tempCCO2 *= ModelScalingFactor^2
+ end
+ temp_cost_list = [
+ tempCTotal,
+ tempCFix,
+ tempCVar,
+ tempCFuel,
+ tempCNSE,
+ tempCStart,
+ "-",
+ "-",
+ "-",
+ tempCCO2,
+ ]
+ if !isempty(VRE_STOR)
+ push!(temp_cost_list, "-")
+ end
+ if !isempty(ELECTROLYZERS_ZONE)
+ push!(temp_cost_list, tempHydrogenValue)
+ end
+
+ dfCost[!, Symbol("Zone$z")] = temp_cost_list
+ end
+ CSV.write(joinpath(path, "costs.csv"), dfCost)
end
diff --git a/src/write_outputs/write_curtailment.jl b/src/write_outputs/write_curtailment.jl
index 6cb151f448..8ee244f105 100644
--- a/src/write_outputs/write_curtailment.jl
+++ b/src/write_outputs/write_curtailment.jl
@@ -5,42 +5,59 @@ Function for writing the curtailment values of the different variable renewable
co-located).
"""
function write_curtailment(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
- VRE = inputs["VRE"]
- dfCurtailment = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zone_id.(gen), AnnualSum = zeros(G))
- curtailment = zeros(G, T)
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- curtailment[VRE, :] = scale_factor * (value.(EP[:eTotalCap][VRE]) .* inputs["pP_Max"][VRE, :] .- value.(EP[:vP][VRE, :]))
-
- VRE_STOR = inputs["VRE_STOR"]
- if !isempty(VRE_STOR)
- SOLAR = setdiff(inputs["VS_SOLAR"],inputs["VS_WIND"])
- WIND = setdiff(inputs["VS_WIND"],inputs["VS_SOLAR"])
- SOLAR_WIND = intersect(inputs["VS_SOLAR"],inputs["VS_WIND"])
- gen_VRE_STOR = gen.VreStorage
- if !isempty(SOLAR)
- curtailment[SOLAR, :] = scale_factor * (value.(EP[:eTotalCap_SOLAR][SOLAR]).data .* inputs["pP_Max_Solar"][SOLAR, :] .- value.(EP[:vP_SOLAR][SOLAR, :]).data) .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.solar.!=0)])
- end
- if !isempty(WIND)
- curtailment[WIND, :] = scale_factor * (value.(EP[:eTotalCap_WIND][WIND]).data .* inputs["pP_Max_Wind"][WIND, :] .- value.(EP[:vP_WIND][WIND, :]).data)
- end
- if !isempty(SOLAR_WIND)
- curtailment[SOLAR_WIND, :] = scale_factor * ((value.(EP[:eTotalCap_SOLAR])[SOLAR_WIND].data
- .* inputs["pP_Max_Solar"][SOLAR_WIND, :] .- value.(EP[:vP_SOLAR][SOLAR_WIND, :]).data)
- .* etainverter.(gen_VRE_STOR[((gen_VRE_STOR.wind.!=0) .& (gen_VRE_STOR.solar.!=0))])
- + (value.(EP[:eTotalCap_WIND][SOLAR_WIND]).data .* inputs["pP_Max_Wind"][SOLAR_WIND, :] .- value.(EP[:vP_WIND][SOLAR_WIND, :]).data))
- end
- end
+ gen = inputs["RESOURCES"]
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ VRE = inputs["VRE"]
+ dfCurtailment = DataFrame(Resource = inputs["RESOURCE_NAMES"],
+ Zone = zone_id.(gen),
+ AnnualSum = zeros(G))
+ curtailment = zeros(G, T)
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ curtailment[VRE, :] = scale_factor *
+ (value.(EP[:eTotalCap][VRE]) .* inputs["pP_Max"][VRE, :] .-
+ value.(EP[:vP][VRE, :]))
- dfCurtailment.AnnualSum = curtailment * inputs["omega"]
+ VRE_STOR = inputs["VRE_STOR"]
+ if !isempty(VRE_STOR)
+ SOLAR = setdiff(inputs["VS_SOLAR"], inputs["VS_WIND"])
+ WIND = setdiff(inputs["VS_WIND"], inputs["VS_SOLAR"])
+ SOLAR_WIND = intersect(inputs["VS_SOLAR"], inputs["VS_WIND"])
+ gen_VRE_STOR = gen.VreStorage
+ if !isempty(SOLAR)
+ curtailment[SOLAR, :] = scale_factor *
+ (value.(EP[:eTotalCap_SOLAR][SOLAR]).data .*
+ inputs["pP_Max_Solar"][SOLAR, :] .-
+ value.(EP[:vP_SOLAR][SOLAR, :]).data) .*
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.solar .!= 0)])
+ end
+ if !isempty(WIND)
+ curtailment[WIND, :] = scale_factor * (value.(EP[:eTotalCap_WIND][WIND]).data .*
+ inputs["pP_Max_Wind"][WIND, :] .-
+ value.(EP[:vP_WIND][WIND, :]).data)
+ end
+ if !isempty(SOLAR_WIND)
+ curtailment[SOLAR_WIND, :] = scale_factor *
+ ((value.(EP[:eTotalCap_SOLAR])[SOLAR_WIND].data
+ .*
+ inputs["pP_Max_Solar"][SOLAR_WIND, :] .-
+ value.(EP[:vP_SOLAR][SOLAR_WIND, :]).data)
+ .*
+ etainverter.(gen_VRE_STOR[((gen_VRE_STOR.wind .!= 0) .& (gen_VRE_STOR.solar .!= 0))])
+ +
+ (value.(EP[:eTotalCap_WIND][SOLAR_WIND]).data .*
+ inputs["pP_Max_Wind"][SOLAR_WIND, :] .-
+ value.(EP[:vP_WIND][SOLAR_WIND, :]).data))
+ end
+ end
- filename = joinpath(path, "curtail.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filename, dfCurtailment)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filename, curtailment, dfCurtailment)
- end
- return nothing
+ dfCurtailment.AnnualSum = curtailment * inputs["omega"]
+
+ filename = joinpath(path, "curtail.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filename, dfCurtailment)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filename, curtailment, dfCurtailment)
+ end
+ return nothing
end
diff --git a/src/write_outputs/write_emissions.jl b/src/write_outputs/write_emissions.jl
index f4aaa00550..60758c085f 100644
--- a/src/write_outputs/write_emissions.jl
+++ b/src/write_outputs/write_emissions.jl
@@ -5,92 +5,123 @@ Function for reporting time-dependent CO$_2$ emissions by zone.
"""
function write_emissions(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
-
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- if (setup["WriteShadowPrices"]==1 || setup["UCommit"]==0 || (setup["UCommit"]==2 && (setup["OperationalReserves"]==0 || (setup["OperationalReserves"]>0 && inputs["pDynamic_Contingency"]==0)))) # fully linear model
- # CO2 emissions by zone
+ if (setup["WriteShadowPrices"] == 1 || setup["UCommit"] == 0 ||
+ (setup["UCommit"] == 2 && (setup["OperationalReserves"] == 0 ||
+ (setup["OperationalReserves"] > 0 && inputs["pDynamic_Contingency"] == 0)))) # fully linear model
+ # CO2 emissions by zone
- if setup["CO2Cap"]>=1
- # Dual variable of CO2 constraint = shadow price of CO2
- tempCO2Price = zeros(Z,inputs["NCO2Cap"])
- if has_duals(EP) == 1
- for cap in 1:inputs["NCO2Cap"]
- for z in findall(x->x==1, inputs["dfCO2CapZones"][:,cap])
- tempCO2Price[z,cap] = (-1) * dual.(EP[:cCO2Emissions_systemwide])[cap]
- # when scaled, The objective function is in unit of Million US$/kton, thus k$/ton, to get $/ton, multiply 1000
- tempCO2Price[z,cap] *= scale_factor
- end
- end
- end
- dfEmissions = hcat(DataFrame(Zone = 1:Z), DataFrame(tempCO2Price, :auto), DataFrame(AnnualSum = Array{Float64}(undef, Z)))
- auxNew_Names=[Symbol("Zone"); [Symbol("CO2_Price_$cap") for cap in 1:inputs["NCO2Cap"]]; Symbol("AnnualSum")]
- rename!(dfEmissions,auxNew_Names)
- else
- dfEmissions = DataFrame(Zone = 1:Z, AnnualSum = Array{Float64}(undef, Z))
- end
+ if setup["CO2Cap"] >= 1
+ # Dual variable of CO2 constraint = shadow price of CO2
+ tempCO2Price = zeros(Z, inputs["NCO2Cap"])
+ if has_duals(EP) == 1
+ for cap in 1:inputs["NCO2Cap"]
+ for z in findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap])
+ tempCO2Price[z, cap] = (-1) *
+ dual.(EP[:cCO2Emissions_systemwide])[cap]
+ # when scaled, The objective function is in unit of Million US$/kton, thus k$/ton, to get $/ton, multiply 1000
+ tempCO2Price[z, cap] *= scale_factor
+ end
+ end
+ end
+ dfEmissions = hcat(DataFrame(Zone = 1:Z),
+ DataFrame(tempCO2Price, :auto),
+ DataFrame(AnnualSum = Array{Float64}(undef, Z)))
+ auxNew_Names = [Symbol("Zone");
+ [Symbol("CO2_Price_$cap") for cap in 1:inputs["NCO2Cap"]];
+ Symbol("AnnualSum")]
+ rename!(dfEmissions, auxNew_Names)
+ else
+ dfEmissions = DataFrame(Zone = 1:Z, AnnualSum = Array{Float64}(undef, Z))
+ end
- emissions_by_zone = value.(EP[:eEmissionsByZone])
- for i in 1:Z
- dfEmissions[i,:AnnualSum] = sum(inputs["omega"] .* emissions_by_zone[i,:]) * scale_factor
- end
+ emissions_by_zone = value.(EP[:eEmissionsByZone])
+ for i in 1:Z
+ dfEmissions[i, :AnnualSum] = sum(inputs["omega"] .* emissions_by_zone[i, :]) *
+ scale_factor
+ end
- if setup["WriteOutputs"] == "annual"
- total = DataFrame(["Total" sum(dfEmissions.AnnualSum)], [:Zone;:AnnualSum])
- if setup["CO2Cap"]>=1
- total = DataFrame(["Total" zeros(1,inputs["NCO2Cap"]) sum(dfEmissions.AnnualSum)], [:Zone;[Symbol("CO2_Price_$cap") for cap in 1:inputs["NCO2Cap"]];:AnnualSum])
- end
- dfEmissions = vcat(dfEmissions, total)
- CSV.write(joinpath(path, "emissions.csv"), dfEmissions)
- else # setup["WriteOutputs"] == "full"
- dfEmissions = hcat(dfEmissions, DataFrame(emissions_by_zone * scale_factor, :auto))
- if setup["CO2Cap"]>=1
- auxNew_Names=[Symbol("Zone");[Symbol("CO2_Price_$cap") for cap in 1:inputs["NCO2Cap"]];Symbol("AnnualSum");[Symbol("t$t") for t in 1:T]]
- rename!(dfEmissions,auxNew_Names)
- total = DataFrame(["Total" zeros(1,inputs["NCO2Cap"]) sum(dfEmissions[!,:AnnualSum]) fill(0.0, (1,T))], :auto)
- for t in 1:T
- total[:,t+inputs["NCO2Cap"]+2] .= sum(dfEmissions[:,Symbol("t$t")][1:Z])
- end
- else
- auxNew_Names=[Symbol("Zone"); Symbol("AnnualSum"); [Symbol("t$t") for t in 1:T]]
- rename!(dfEmissions,auxNew_Names)
- total = DataFrame(["Total" sum(dfEmissions[!,:AnnualSum]) fill(0.0, (1,T))], :auto)
- for t in 1:T
- total[:,t+2] .= sum(dfEmissions[:,Symbol("t$t")][1:Z])
- end
- end
- rename!(total,auxNew_Names)
- dfEmissions = vcat(dfEmissions, total)
- CSV.write(joinpath(path, "emissions.csv"), dftranspose(dfEmissions, false), writeheader=false)
- end
-## Aaron - Combined elseif setup["Dual_MIP"]==1 block with the first block since they were identical. Why do we have this third case? What is different about it?
- else
- # CO2 emissions by zone
- emissions_by_zone = value.(EP[:eEmissionsByZone])
- dfEmissions = hcat(DataFrame(Zone = 1:Z), DataFrame(AnnualSum = Array{Float64}(undef, Z)))
- for i in 1:Z
- dfEmissions[i,:AnnualSum] = sum(inputs["omega"] .* emissions_by_zone[i,:]) * scale_factor
- end
+ if setup["WriteOutputs"] == "annual"
+ total = DataFrame(["Total" sum(dfEmissions.AnnualSum)], [:Zone; :AnnualSum])
+ if setup["CO2Cap"] >= 1
+ total = DataFrame(["Total" zeros(1, inputs["NCO2Cap"]) sum(dfEmissions.AnnualSum)],
+ [:Zone;
+ [Symbol("CO2_Price_$cap") for cap in 1:inputs["NCO2Cap"]];
+ :AnnualSum])
+ end
+ dfEmissions = vcat(dfEmissions, total)
+ CSV.write(joinpath(path, "emissions.csv"), dfEmissions)
+ else# setup["WriteOutputs"] == "full"
+ dfEmissions = hcat(dfEmissions,
+ DataFrame(emissions_by_zone * scale_factor, :auto))
+ if setup["CO2Cap"] >= 1
+ auxNew_Names = [Symbol("Zone");
+ [Symbol("CO2_Price_$cap") for cap in 1:inputs["NCO2Cap"]];
+ Symbol("AnnualSum");
+ [Symbol("t$t") for t in 1:T]]
+ rename!(dfEmissions, auxNew_Names)
+ total = DataFrame(["Total" zeros(1, inputs["NCO2Cap"]) sum(dfEmissions[!,
+ :AnnualSum]) fill(0.0, (1, T))],
+ :auto)
+ for t in 1:T
+ total[:, t + inputs["NCO2Cap"] + 2] .= sum(dfEmissions[:,
+ Symbol("t$t")][1:Z])
+ end
+ else
+ auxNew_Names = [Symbol("Zone");
+ Symbol("AnnualSum");
+ [Symbol("t$t") for t in 1:T]]
+ rename!(dfEmissions, auxNew_Names)
+ total = DataFrame(["Total" sum(dfEmissions[!, :AnnualSum]) fill(0.0,
+ (1, T))],
+ :auto)
+ for t in 1:T
+ total[:, t + 2] .= sum(dfEmissions[:, Symbol("t$t")][1:Z])
+ end
+ end
+ rename!(total, auxNew_Names)
+ dfEmissions = vcat(dfEmissions, total)
+ CSV.write(joinpath(path, "emissions.csv"),
+ dftranspose(dfEmissions, false),
+ writeheader = false)
+ end
+ ## Aaron - Combined elseif setup["Dual_MIP"]==1 block with the first block since they were identical. Why do we have this third case? What is different about it?
+ else
+ # CO2 emissions by zone
+ emissions_by_zone = value.(EP[:eEmissionsByZone])
+ dfEmissions = hcat(DataFrame(Zone = 1:Z),
+ DataFrame(AnnualSum = Array{Float64}(undef, Z)))
+ for i in 1:Z
+ dfEmissions[i, :AnnualSum] = sum(inputs["omega"] .* emissions_by_zone[i, :]) *
+ scale_factor
+ end
- if setup["WriteOutputs"] == "annual"
- total = DataFrame(["Total" sum(dfEmissions.AnnualSum)], [:Zone;:AnnualSum])
- dfEmissions = vcat(dfEmissions, total)
- CSV.write(joinpath(path, "emissions.csv"), dfEmissions)
- else # setup["WriteOutputs"] == "full"
- dfEmissions = hcat(dfEmissions, DataFrame(emissions_by_zone * scale_factor, :auto))
- auxNew_Names=[Symbol("Zone");Symbol("AnnualSum");[Symbol("t$t") for t in 1:T]]
- rename!(dfEmissions,auxNew_Names)
- total = DataFrame(["Total" sum(dfEmissions[!,:AnnualSum]) fill(0.0, (1,T))], :auto)
- for t in 1:T
- total[:,t+2] .= sum(dfEmissions[:,Symbol("t$t")][1:Z])
- end
- rename!(total,auxNew_Names)
- dfEmissions = vcat(dfEmissions, total)
- CSV.write(joinpath(path, "emissions.csv"), dftranspose(dfEmissions, false), writeheader=false)
- end
- end
- return nothing
+ if setup["WriteOutputs"] == "annual"
+ total = DataFrame(["Total" sum(dfEmissions.AnnualSum)], [:Zone; :AnnualSum])
+ dfEmissions = vcat(dfEmissions, total)
+ CSV.write(joinpath(path, "emissions.csv"), dfEmissions)
+ else# setup["WriteOutputs"] == "full"
+ dfEmissions = hcat(dfEmissions,
+ DataFrame(emissions_by_zone * scale_factor, :auto))
+ auxNew_Names = [Symbol("Zone");
+ Symbol("AnnualSum");
+ [Symbol("t$t") for t in 1:T]]
+ rename!(dfEmissions, auxNew_Names)
+ total = DataFrame(["Total" sum(dfEmissions[!, :AnnualSum]) fill(0.0, (1, T))],
+ :auto)
+ for t in 1:T
+ total[:, t + 2] .= sum(dfEmissions[:, Symbol("t$t")][1:Z])
+ end
+ rename!(total, auxNew_Names)
+ dfEmissions = vcat(dfEmissions, total)
+ CSV.write(joinpath(path, "emissions.csv"),
+ dftranspose(dfEmissions, false),
+ writeheader = false)
+ end
+ end
+ return nothing
end
diff --git a/src/write_outputs/write_energy_revenue.jl b/src/write_outputs/write_energy_revenue.jl
index 92168c52f1..3e0834bd1e 100644
--- a/src/write_outputs/write_energy_revenue.jl
+++ b/src/write_outputs/write_energy_revenue.jl
@@ -4,26 +4,32 @@
Function for writing energy revenue from the different generation technologies.
"""
function write_energy_revenue(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- regions = region.(gen)
- clusters = cluster.(gen)
- zones = zone_id.(gen)
+ gen = inputs["RESOURCES"]
+ regions = region.(gen)
+ clusters = cluster.(gen)
+ zones = zone_id.(gen)
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
- FLEX = inputs["FLEX"]
- NONFLEX = setdiff(collect(1:G), FLEX)
- dfEnergyRevenue = DataFrame(Region = regions, Resource = inputs["RESOURCE_NAMES"], Zone = zones, Cluster = clusters, AnnualSum = Array{Float64}(undef, G),)
- energyrevenue = zeros(G, T)
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+ FLEX = inputs["FLEX"]
+ NONFLEX = setdiff(collect(1:G), FLEX)
+ dfEnergyRevenue = DataFrame(Region = regions,
+ Resource = inputs["RESOURCE_NAMES"],
+ Zone = zones,
+ Cluster = clusters,
+ AnnualSum = Array{Float64}(undef, G))
+ energyrevenue = zeros(G, T)
price = locational_marginal_price(EP, inputs, setup)
- energyrevenue[NONFLEX, :] = value.(EP[:vP][NONFLEX, :]) .* transpose(price)[zone_id.(gen[NONFLEX]), :]
- if !isempty(FLEX)
- energyrevenue[FLEX, :] = value.(EP[:vCHARGE_FLEX][FLEX, :]).data .* transpose(price)[zone_id.(gen[FLEX]), :]
- end
- if setup["ParameterScale"] == 1
- energyrevenue *= ModelScalingFactor
- end
- dfEnergyRevenue.AnnualSum .= energyrevenue * inputs["omega"]
- write_simple_csv(joinpath(path, "EnergyRevenue.csv"), dfEnergyRevenue)
- return dfEnergyRevenue
+ energyrevenue[NONFLEX, :] = value.(EP[:vP][NONFLEX, :]) .*
+ transpose(price)[zone_id.(gen[NONFLEX]), :]
+ if !isempty(FLEX)
+ energyrevenue[FLEX, :] = value.(EP[:vCHARGE_FLEX][FLEX, :]).data .*
+ transpose(price)[zone_id.(gen[FLEX]), :]
+ end
+ if setup["ParameterScale"] == 1
+ energyrevenue *= ModelScalingFactor
+ end
+ dfEnergyRevenue.AnnualSum .= energyrevenue * inputs["omega"]
+ write_simple_csv(joinpath(path, "EnergyRevenue.csv"), dfEnergyRevenue)
+ return dfEnergyRevenue
end
diff --git a/src/write_outputs/write_fuel_consumption.jl b/src/write_outputs/write_fuel_consumption.jl
index 7a661b9386..baff1301d6 100644
--- a/src/write_outputs/write_fuel_consumption.jl
+++ b/src/write_outputs/write_fuel_consumption.jl
@@ -4,57 +4,68 @@
Write fuel consumption of each power plant.
"""
function write_fuel_consumption(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
-
- write_fuel_consumption_plant(path::AbstractString,inputs::Dict, setup::Dict, EP::Model)
- if setup["WriteOutputs"] != "annual"
- write_fuel_consumption_ts(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- end
- write_fuel_consumption_tot(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+ write_fuel_consumption_plant(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+ if setup["WriteOutputs"] != "annual"
+ write_fuel_consumption_ts(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ end
+ write_fuel_consumption_tot(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
end
-function write_fuel_consumption_plant(path::AbstractString,inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
+function write_fuel_consumption_plant(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ gen = inputs["RESOURCES"]
+
+ HAS_FUEL = inputs["HAS_FUEL"]
+ MULTI_FUELS = inputs["MULTI_FUELS"]
- HAS_FUEL = inputs["HAS_FUEL"]
- MULTI_FUELS = inputs["MULTI_FUELS"]
+ # Fuel consumption cost by each resource, including start up fuel
+ dfPlantFuel = DataFrame(Resource = inputs["RESOURCE_NAMES"][HAS_FUEL],
+ Fuel = fuel.(gen[HAS_FUEL]),
+ Zone = zone_id.(gen[HAS_FUEL]),
+ AnnualSumCosts = zeros(length(HAS_FUEL)))
+ tempannualsum = value.(EP[:ePlantCFuelOut][HAS_FUEL]) +
+ value.(EP[:ePlantCFuelStart][HAS_FUEL])
- # Fuel consumption cost by each resource, including start up fuel
- dfPlantFuel = DataFrame(Resource = inputs["RESOURCE_NAMES"][HAS_FUEL],
- Fuel = fuel.(gen[HAS_FUEL]),
- Zone = zone_id.(gen[HAS_FUEL]),
- AnnualSumCosts = zeros(length(HAS_FUEL)))
- tempannualsum = value.(EP[:ePlantCFuelOut][HAS_FUEL]) + value.(EP[:ePlantCFuelStart][HAS_FUEL])
+ if !isempty(MULTI_FUELS)
+ fuel_cols_num = inputs["FUEL_COLS"]# TODO: rename it
+ max_fuels = inputs["MAX_NUM_FUELS"]
+ dfPlantFuel.Multi_Fuels = multi_fuels.(gen[HAS_FUEL])
+ for i in 1:max_fuels
+ tempannualsum_fuel_heat_multi_generation = zeros(length(HAS_FUEL))
+ tempannualsum_fuel_heat_multi_start = zeros(length(HAS_FUEL))
+ tempannualsum_fuel_heat_multi_total = zeros(length(HAS_FUEL))
+ tempannualsum_fuel_cost_multi = zeros(length(HAS_FUEL))
+ for g in MULTI_FUELS
+ tempannualsum_fuel_heat_multi_generation[findfirst(x -> x == g, HAS_FUEL)] = value.(EP[:ePlantFuelConsumptionYear_multi_generation][g,
+ i])
+ tempannualsum_fuel_heat_multi_start[findfirst(x -> x == g, HAS_FUEL)] = value.(EP[:ePlantFuelConsumptionYear_multi_start][g,
+ i])
+ tempannualsum_fuel_heat_multi_total[findfirst(x -> x == g, HAS_FUEL)] = value.(EP[:ePlantFuelConsumptionYear_multi][g,
+ i])
+ tempannualsum_fuel_cost_multi[findfirst(x -> x == g, HAS_FUEL)] = value.(EP[:ePlantCFuelOut_multi][g,
+ i]) + value.(EP[:ePlantCFuelOut_multi_start][g,
+ i])
+ end
+ if setup["ParameterScale"] == 1
+ tempannualsum_fuel_heat_multi_generation *= ModelScalingFactor
+ tempannualsum_fuel_heat_multi_start *= ModelScalingFactor
+ tempannualsum_fuel_heat_multi_total *= ModelScalingFactor
+ tempannualsum_fuel_cost_multi *= ModelScalingFactor^2
+ end
- if !isempty(MULTI_FUELS)
- fuel_cols_num = inputs["FUEL_COLS"] # TODO: rename it
- max_fuels = inputs["MAX_NUM_FUELS"]
- dfPlantFuel.Multi_Fuels = multi_fuels.(gen[HAS_FUEL])
- for i = 1:max_fuels
- tempannualsum_fuel_heat_multi_generation = zeros(length(HAS_FUEL))
- tempannualsum_fuel_heat_multi_start = zeros(length(HAS_FUEL))
- tempannualsum_fuel_heat_multi_total = zeros(length(HAS_FUEL))
- tempannualsum_fuel_cost_multi = zeros(length(HAS_FUEL))
- for g in MULTI_FUELS
- tempannualsum_fuel_heat_multi_generation[findfirst(x->x==g, HAS_FUEL)] = value.(EP[:ePlantFuelConsumptionYear_multi_generation][g,i])
- tempannualsum_fuel_heat_multi_start[findfirst(x->x==g, HAS_FUEL)] = value.(EP[:ePlantFuelConsumptionYear_multi_start][g,i])
- tempannualsum_fuel_heat_multi_total[findfirst(x->x==g, HAS_FUEL)] = value.(EP[:ePlantFuelConsumptionYear_multi][g,i])
- tempannualsum_fuel_cost_multi[findfirst(x->x==g, HAS_FUEL)] = value.(EP[:ePlantCFuelOut_multi][g,i]) + value.(EP[:ePlantCFuelOut_multi_start][g,i])
- end
- if setup["ParameterScale"] == 1
- tempannualsum_fuel_heat_multi_generation *= ModelScalingFactor
- tempannualsum_fuel_heat_multi_start *= ModelScalingFactor
- tempannualsum_fuel_heat_multi_total *= ModelScalingFactor
- tempannualsum_fuel_cost_multi *= ModelScalingFactor^2
- end
+ dfPlantFuel[!, fuel_cols_num[i]] = fuel_cols.(gen[HAS_FUEL], tag = i)
+ dfPlantFuel[!, Symbol(string(fuel_cols_num[i], "_AnnualSum_Fuel_HeatInput_Generation_MMBtu"))] = tempannualsum_fuel_heat_multi_generation
+ dfPlantFuel[!, Symbol(string(fuel_cols_num[i], "_AnnualSum_Fuel_HeatInput_Start_MMBtu"))] = tempannualsum_fuel_heat_multi_start
+ dfPlantFuel[!, Symbol(string(fuel_cols_num[i], "_AnnualSum_Fuel_HeatInput_Total_MMBtu"))] = tempannualsum_fuel_heat_multi_total
+ dfPlantFuel[!, Symbol(string(fuel_cols_num[i], "_AnnualSum_Fuel_Cost"))] = tempannualsum_fuel_cost_multi
+ end
+ end
- dfPlantFuel[!, fuel_cols_num[i]] = fuel_cols.(gen[HAS_FUEL], tag=i)
- dfPlantFuel[!, Symbol(string(fuel_cols_num[i],"_AnnualSum_Fuel_HeatInput_Generation_MMBtu"))] = tempannualsum_fuel_heat_multi_generation
- dfPlantFuel[!, Symbol(string(fuel_cols_num[i],"_AnnualSum_Fuel_HeatInput_Start_MMBtu"))] = tempannualsum_fuel_heat_multi_start
- dfPlantFuel[!, Symbol(string(fuel_cols_num[i],"_AnnualSum_Fuel_HeatInput_Total_MMBtu"))] = tempannualsum_fuel_heat_multi_total
- dfPlantFuel[!, Symbol(string(fuel_cols_num[i],"_AnnualSum_Fuel_Cost"))] = tempannualsum_fuel_cost_multi
- end
- end
-
if setup["ParameterScale"] == 1
tempannualsum *= ModelScalingFactor^2 #
end
@@ -62,34 +73,38 @@ function write_fuel_consumption_plant(path::AbstractString,inputs::Dict, setup::
CSV.write(joinpath(path, "Fuel_cost_plant.csv"), dfPlantFuel)
end
+function write_fuel_consumption_ts(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ T = inputs["T"] # Number of time steps (hours)
+ HAS_FUEL = inputs["HAS_FUEL"]
-function write_fuel_consumption_ts(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- HAS_FUEL = inputs["HAS_FUEL"]
-
- # Fuel consumption by each resource per time step, unit is MMBTU
- dfPlantFuel_TS = DataFrame(Resource = inputs["RESOURCE_NAMES"][HAS_FUEL])
- tempts = value.(EP[:ePlantFuel_generation] + EP[:ePlantFuel_start])[HAS_FUEL,:]
+ # Fuel consumption by each resource per time step, unit is MMBTU
+ dfPlantFuel_TS = DataFrame(Resource = inputs["RESOURCE_NAMES"][HAS_FUEL])
+ tempts = value.(EP[:ePlantFuel_generation] + EP[:ePlantFuel_start])[HAS_FUEL, :]
if setup["ParameterScale"] == 1
tempts *= ModelScalingFactor # kMMBTU to MMBTU
end
- dfPlantFuel_TS = hcat(dfPlantFuel_TS,
- DataFrame(tempts, [Symbol("t$t") for t in 1:T]))
- CSV.write(joinpath(path, "FuelConsumption_plant_MMBTU.csv"),
- dftranspose(dfPlantFuel_TS, false), header=false)
+ dfPlantFuel_TS = hcat(dfPlantFuel_TS,
+ DataFrame(tempts, [Symbol("t$t") for t in 1:T]))
+ CSV.write(joinpath(path, "FuelConsumption_plant_MMBTU.csv"),
+ dftranspose(dfPlantFuel_TS, false), header = false)
end
-
-function write_fuel_consumption_tot(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- # types of fuel
- fuel_types = inputs["fuels"]
- fuel_number = length(fuel_types)
- dfFuel = DataFrame(Fuel = fuel_types,
- AnnualSum = zeros(fuel_number))
- tempannualsum = value.(EP[:eFuelConsumptionYear])
+function write_fuel_consumption_tot(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ # types of fuel
+ fuel_types = inputs["fuels"]
+ fuel_number = length(fuel_types)
+ dfFuel = DataFrame(Fuel = fuel_types,
+ AnnualSum = zeros(fuel_number))
+ tempannualsum = value.(EP[:eFuelConsumptionYear])
if setup["ParameterScale"] == 1
tempannualsum *= ModelScalingFactor # billion MMBTU to MMBTU
end
- dfFuel.AnnualSum .+= tempannualsum
- CSV.write(joinpath(path,"FuelConsumption_total_MMBTU.csv"), dfFuel)
+ dfFuel.AnnualSum .+= tempannualsum
+ CSV.write(joinpath(path, "FuelConsumption_total_MMBTU.csv"), dfFuel)
end
diff --git a/src/write_outputs/write_maintenance.jl b/src/write_outputs/write_maintenance.jl
index d00af7b696..f7a491828f 100644
--- a/src/write_outputs/write_maintenance.jl
+++ b/src/write_outputs/write_maintenance.jl
@@ -7,7 +7,7 @@ function write_simple_csv(filename::AbstractString, header::Vector, matrix)
write_simple_csv(filename, df)
end
-function prepare_timeseries_variables(EP::Model, set::Set{Symbol}, scale::Float64=1.0)
+function prepare_timeseries_variables(EP::Model, set::Set{Symbol}, scale::Float64 = 1.0)
# function to extract data from DenseAxisArray
data(var) = scale * value.(EP[var]).data
diff --git a/src/write_outputs/write_net_revenue.jl b/src/write_outputs/write_net_revenue.jl
index 3b3beb0772..a647a81dbd 100644
--- a/src/write_outputs/write_net_revenue.jl
+++ b/src/write_outputs/write_net_revenue.jl
@@ -3,223 +3,288 @@
Function for writing net revenue of different generation technologies.
"""
-function write_net_revenue(path::AbstractString, inputs::Dict, setup::Dict, EP::Model, dfCap::DataFrame, dfESRRev::DataFrame, dfResRevenue::DataFrame, dfChargingcost::DataFrame, dfPower::DataFrame, dfEnergyRevenue::DataFrame, dfSubRevenue::DataFrame, dfRegSubRevenue::DataFrame, dfVreStor::DataFrame, dfOpRegRevenue::DataFrame, dfOpRsvRevenue::DataFrame)
-
- gen = inputs["RESOURCES"]
- zones = zone_id.(gen)
- regions = region.(gen)
- clusters = cluster.(gen)
- rid = resource_id.(gen)
-
- G = inputs["G"] # Number of generators
- COMMIT = inputs["COMMIT"] # Thermal units for unit commitment
- STOR_ALL = inputs["STOR_ALL"]
-
- if setup["OperationalReserves"] >= 1
- RSV = inputs["RSV"] # Generators contributing to operating reserves
- REG = inputs["REG"] # Generators contributing to regulation
- end
-
- VRE_STOR = inputs["VRE_STOR"]
- CCS = inputs["CCS"]
- if !isempty(VRE_STOR)
- gen_VRE_STOR = gen.VreStorage
- VRE_STOR_LENGTH = size(inputs["VRE_STOR"])[1]
- SOLAR = inputs["VS_SOLAR"]
- WIND = inputs["VS_WIND"]
- DC = inputs["VS_DC"]
- DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
- AC_DISCHARGE = inputs["VS_STOR_AC_DISCHARGE"]
- DC_CHARGE = inputs["VS_STOR_DC_CHARGE"]
- AC_CHARGE = inputs["VS_STOR_AC_CHARGE"]
- # Should read in charge asymmetric capacities
- end
-
- # Create a NetRevenue dataframe
- dfNetRevenue = DataFrame(region = regions, Resource = inputs["RESOURCE_NAMES"], zone = zones, Cluster = clusters, R_ID = rid)
-
- # Add investment cost to the dataframe
- dfNetRevenue.Inv_cost_MW = inv_cost_per_mwyr.(gen) .* dfCap[1:G,:NewCap]
- dfNetRevenue.Inv_cost_MWh = inv_cost_per_mwhyr.(gen) .* dfCap[1:G,:NewEnergyCap]
- dfNetRevenue.Inv_cost_charge_MW = inv_cost_charge_per_mwyr.(gen) .* dfCap[1:G,:NewChargeCap]
- if !isempty(VRE_STOR)
- # Doesn't include charge capacities
- if !isempty(SOLAR)
- dfNetRevenue.Inv_cost_MW[VRE_STOR] += inv_cost_solar_per_mwyr.(gen_VRE_STOR) .* dfVreStor[1:VRE_STOR_LENGTH,:NewCapSolar]
- end
- if !isempty(DC)
- dfNetRevenue.Inv_cost_MW[VRE_STOR] += inv_cost_inverter_per_mwyr.(gen_VRE_STOR) .* dfVreStor[1:VRE_STOR_LENGTH,:NewCapDC]
- end
- if !isempty(WIND)
- dfNetRevenue.Inv_cost_MW[VRE_STOR] += inv_cost_wind_per_mwyr.(gen_VRE_STOR) .* dfVreStor[1:VRE_STOR_LENGTH,:NewCapWind]
- end
- end
- if setup["ParameterScale"] == 1
- dfNetRevenue.Inv_cost_MWh *= ModelScalingFactor # converting Million US$ to US$
- dfNetRevenue.Inv_cost_MW *= ModelScalingFactor # converting Million US$ to US$
- dfNetRevenue.Inv_cost_charge_MW *= ModelScalingFactor # converting Million US$ to US$
- end
-
- # Add operations and maintenance cost to the dataframe
- dfNetRevenue.Fixed_OM_cost_MW = fixed_om_cost_per_mwyr.(gen) .* dfCap[1:G,:EndCap]
- dfNetRevenue.Fixed_OM_cost_MWh = fixed_om_cost_per_mwhyr.(gen) .* dfCap[1:G,:EndEnergyCap]
- dfNetRevenue.Fixed_OM_cost_charge_MW = fixed_om_cost_charge_per_mwyr.(gen) .* dfCap[1:G, :EndChargeCap]
-
- dfNetRevenue.Var_OM_cost_out = var_om_cost_per_mwh.(gen) .* dfPower[1:G,:AnnualSum]
- if !isempty(VRE_STOR)
- if !isempty(SOLAR)
- dfNetRevenue.Fixed_OM_cost_MW[VRE_STOR] += fixed_om_solar_cost_per_mwyr.(gen_VRE_STOR) .* dfVreStor[1:VRE_STOR_LENGTH, :EndCapSolar]
- dfNetRevenue.Var_OM_cost_out[SOLAR] += var_om_cost_per_mwh_solar.(gen_VRE_STOR[(gen_VRE_STOR.solar.!=0)]) .* (value.(EP[:vP_SOLAR][SOLAR, :]).data .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.solar.!=0)]) * inputs["omega"])
- end
- if !isempty(WIND)
- dfNetRevenue.Fixed_OM_cost_MW[VRE_STOR] += fixed_om_wind_cost_per_mwyr.(gen_VRE_STOR) .* dfVreStor[1:VRE_STOR_LENGTH, :EndCapWind]
- dfNetRevenue.Var_OM_cost_out[WIND] += var_om_cost_per_mwh_wind.(gen_VRE_STOR[(gen_VRE_STOR.wind.!=0)]) .* (value.(EP[:vP_WIND][WIND, :]).data * inputs["omega"])
- end
- if !isempty(DC)
- dfNetRevenue.Fixed_OM_cost_MW[VRE_STOR] += fixed_om_inverter_cost_per_mwyr.(gen_VRE_STOR) .* dfVreStor[1:VRE_STOR_LENGTH, :EndCapDC]
- end
- if !isempty(DC_DISCHARGE)
- dfNetRevenue.Var_OM_cost_out[DC_DISCHARGE] += var_om_cost_per_mwh_discharge_dc.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge.!=0)]) .* (value.(EP[:vP_DC_DISCHARGE][DC_DISCHARGE, :]).data .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge.!=0)]) * inputs["omega"])
- end
- if !isempty(AC_DISCHARGE)
- dfNetRevenue.Var_OM_cost_out[AC_DISCHARGE] += var_om_cost_per_mwh_discharge_ac.(gen_VRE_STOR[(gen_VRE_STOR.stor_ac_discharge.!=0)]) .* (value.(EP[:vP_AC_DISCHARGE][AC_DISCHARGE, :]).data * inputs["omega"])
- end
- end
- if setup["ParameterScale"] == 1
- dfNetRevenue.Fixed_OM_cost_MW *= ModelScalingFactor # converting Million US$ to US$
- dfNetRevenue.Fixed_OM_cost_MWh *= ModelScalingFactor # converting Million US$ to US$
- dfNetRevenue.Fixed_OM_cost_charge_MW *= ModelScalingFactor # converting Million US$ to US$
- dfNetRevenue.Var_OM_cost_out *= ModelScalingFactor # converting Million US$ to US$
- end
-
- # Add fuel cost to the dataframe
- dfNetRevenue.Fuel_cost = sum(value.(EP[:ePlantCFuelOut]), dims = 2)
- if setup["ParameterScale"] == 1
- dfNetRevenue.Fuel_cost *= ModelScalingFactor^2 # converting Million US$ to US$
- end
-
- # Add storage cost to the dataframe
- dfNetRevenue.Var_OM_cost_in = zeros(nrow(dfNetRevenue))
- if !isempty(STOR_ALL)
- dfNetRevenue.Var_OM_cost_in[STOR_ALL] = var_om_cost_per_mwh_in.(gen.Storage) .* ((value.(EP[:vCHARGE][STOR_ALL,:]).data) * inputs["omega"])
- end
- if !isempty(VRE_STOR)
- if !isempty(DC_CHARGE)
- dfNetRevenue.Var_OM_cost_in[DC_CHARGE] += var_om_cost_per_mwh_charge_dc.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_charge.!=0)]) .* (value.(EP[:vP_DC_CHARGE][DC_CHARGE, :]).data ./ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_charge.!=0)]) * inputs["omega"])
- end
- if !isempty(AC_CHARGE)
- dfNetRevenue.Var_OM_cost_in[AC_CHARGE] += var_om_cost_per_mwh_charge_ac.(gen_VRE_STOR[(gen_VRE_STOR.stor_ac_charge.!=0)]) .* (value.(EP[:vP_AC_CHARGE][AC_CHARGE, :]).data * inputs["omega"])
- end
- end
-
- if setup["ParameterScale"] == 1
- dfNetRevenue.Var_OM_cost_in *= ModelScalingFactor^2 # converting Million US$ to US$
- end
- # Add start-up cost to the dataframe
- dfNetRevenue.StartCost = zeros(nrow(dfNetRevenue))
- if setup["UCommit"]>=1 && !isempty(COMMIT)
- start_costs = vec(sum(value.(EP[:eCStart][COMMIT, :]).data, dims = 2))
- start_fuel_costs = vec(value.(EP[:ePlantCFuelStart][COMMIT]))
- dfNetRevenue.StartCost[COMMIT] .= start_costs + start_fuel_costs
- end
- if setup["ParameterScale"] == 1
- dfNetRevenue.StartCost *= ModelScalingFactor^2 # converting Million US$ to US$
- end
- # Add charge cost to the dataframe
- dfNetRevenue.Charge_cost = zeros(nrow(dfNetRevenue))
- if has_duals(EP)
- dfNetRevenue.Charge_cost = dfChargingcost[1:G,:AnnualSum] # Unit is confirmed to be US$
- end
-
- # Add CO2 releated sequestration cost or credit (e.g. 45 Q) to the dataframe
- dfNetRevenue.CO2SequestrationCost = zeros(nrow(dfNetRevenue))
- if any(co2_capture_fraction.(gen) .!= 0)
- dfNetRevenue.CO2SequestrationCost = zeros(G)
- dfNetRevenue[CCS, :CO2SequestrationCost] = value.(EP[:ePlantCCO2Sequestration]).data
- end
- if setup["ParameterScale"] == 1
- dfNetRevenue.CO2SequestrationCost *= ModelScalingFactor^2 # converting Million US$ to US$
- end
-
- # Add energy and subsidy revenue to the dataframe
- dfNetRevenue.EnergyRevenue = zeros(nrow(dfNetRevenue))
- dfNetRevenue.SubsidyRevenue = zeros(nrow(dfNetRevenue))
- if has_duals(EP)
- dfNetRevenue.EnergyRevenue = dfEnergyRevenue[1:G,:AnnualSum] # Unit is confirmed to be US$
- dfNetRevenue.SubsidyRevenue = dfSubRevenue[1:G,:SubsidyRevenue] # Unit is confirmed to be US$
- end
-
- # Add energy and subsidy revenue to the dataframe
- dfNetRevenue.OperatingReserveRevenue = zeros(nrow(dfNetRevenue))
- dfNetRevenue.OperatingRegulationRevenue = zeros(nrow(dfNetRevenue))
- if setup["OperationalReserves"] > 0 && has_duals(EP)
- dfNetRevenue.OperatingReserveRevenue[RSV] = dfOpRsvRevenue.AnnualSum # Unit is confirmed to be US$
- dfNetRevenue.OperatingRegulationRevenue[REG] = dfOpRegRevenue.AnnualSum # Unit is confirmed to be US$
- end
-
- # Add capacity revenue to the dataframe
- dfNetRevenue.ReserveMarginRevenue = zeros(nrow(dfNetRevenue))
- if setup["CapacityReserveMargin"] > 0 && has_duals(EP) # The unit is confirmed to be $
- dfNetRevenue.ReserveMarginRevenue = dfResRevenue[1:G,:AnnualSum]
- end
-
- # Add RPS/CES revenue to the dataframe
- dfNetRevenue.ESRRevenue = zeros(nrow(dfNetRevenue))
- if setup["EnergyShareRequirement"] > 0 && has_duals(EP) # The unit is confirmed to be $
- dfNetRevenue.ESRRevenue = dfESRRev[1:G,:Total]
- end
-
- # Calculate emissions cost
- dfNetRevenue.EmissionsCost = zeros(nrow(dfNetRevenue))
- if setup["CO2Cap"] >=1 && has_duals(EP)
- for cap in 1:inputs["NCO2Cap"]
- co2_cap_dual = dual(EP[:cCO2Emissions_systemwide][cap])
- CO2ZONES = findall(x->x==1, inputs["dfCO2CapZones"][:,cap])
- GEN_IN_ZONE = resource_id.(gen[[y in CO2ZONES for y in zone_id.(gen)]])
- if setup["CO2Cap"]==1 || setup["CO2Cap"]==2 # Mass-based or Demand + Rate-based
- # Cost = sum(sum(emissions for zone z * dual(CO2 constraint[cap]) for z in Z) for cap in setup["NCO2"])
- temp_vec = value.(EP[:eEmissionsByPlant][GEN_IN_ZONE, :]) * inputs["omega"]
- dfNetRevenue.EmissionsCost[GEN_IN_ZONE] += - co2_cap_dual * temp_vec
- elseif setup["CO2Cap"]==3 # Generation + Rate-based
- SET_WITH_MAXCO2RATE = union(inputs["THERM_ALL"],inputs["VRE"], inputs["VRE"],inputs["MUST_RUN"],inputs["HYDRO_RES"])
- Y = intersect(GEN_IN_ZONE, SET_WITH_MAXCO2RATE)
- temp_vec = (value.(EP[:eEmissionsByPlant][Y,:]) - (value.(EP[:vP][Y,:]) .* inputs["dfMaxCO2Rate"][zone_id.(gen[Y]), cap])) * inputs["omega"]
- dfNetRevenue.EmissionsCost[Y] += - co2_cap_dual * temp_vec
- end
- end
- if setup["ParameterScale"] == 1
- dfNetRevenue.EmissionsCost *= ModelScalingFactor^2 # converting Million US$ to US$
- end
- end
-
- # Add regional technology subsidy revenue to the dataframe
- dfNetRevenue.RegSubsidyRevenue = zeros(nrow(dfNetRevenue))
- if setup["MinCapReq"] >= 1 && has_duals(EP)# The unit is confirmed to be US$
- dfNetRevenue.RegSubsidyRevenue = dfRegSubRevenue[1:G,:SubsidyRevenue]
- end
-
- dfNetRevenue.Revenue = dfNetRevenue.EnergyRevenue
- .+ dfNetRevenue.SubsidyRevenue
- .+ dfNetRevenue.ReserveMarginRevenue
- .+ dfNetRevenue.ESRRevenue
- .+ dfNetRevenue.RegSubsidyRevenue
- .+ dfNetRevenue.OperatingReserveRevenue
- .+ dfNetRevenue.OperatingRegulationRevenue
-
- dfNetRevenue.Cost = (dfNetRevenue.Inv_cost_MW
- .+ dfNetRevenue.Inv_cost_MWh
- .+ dfNetRevenue.Inv_cost_charge_MW
- .+ dfNetRevenue.Fixed_OM_cost_MW
- .+ dfNetRevenue.Fixed_OM_cost_MWh
- .+ dfNetRevenue.Fixed_OM_cost_charge_MW
- .+ dfNetRevenue.Var_OM_cost_out
- .+ dfNetRevenue.Var_OM_cost_in
- .+ dfNetRevenue.Fuel_cost
- .+ dfNetRevenue.Charge_cost
- .+ dfNetRevenue.EmissionsCost
- .+ dfNetRevenue.StartCost
- .+ dfNetRevenue.CO2SequestrationCost)
- dfNetRevenue.Profit = dfNetRevenue.Revenue .- dfNetRevenue.Cost
-
- CSV.write(joinpath(path, "NetRevenue.csv"), dfNetRevenue)
+function write_net_revenue(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model,
+ dfCap::DataFrame,
+ dfESRRev::DataFrame,
+ dfResRevenue::DataFrame,
+ dfChargingcost::DataFrame,
+ dfPower::DataFrame,
+ dfEnergyRevenue::DataFrame,
+ dfSubRevenue::DataFrame,
+ dfRegSubRevenue::DataFrame,
+ dfVreStor::DataFrame,
+ dfOpRegRevenue::DataFrame,
+ dfOpRsvRevenue::DataFrame)
+ gen = inputs["RESOURCES"]
+ zones = zone_id.(gen)
+ regions = region.(gen)
+ clusters = cluster.(gen)
+ rid = resource_id.(gen)
+
+ G = inputs["G"] # Number of generators
+ COMMIT = inputs["COMMIT"]# Thermal units for unit commitment
+ STOR_ALL = inputs["STOR_ALL"]
+
+ if setup["OperationalReserves"] >= 1
+ RSV = inputs["RSV"]# Generators contributing to operating reserves
+ REG = inputs["REG"] # Generators contributing to regulation
+ end
+
+ VRE_STOR = inputs["VRE_STOR"]
+ CCS = inputs["CCS"]
+ if !isempty(VRE_STOR)
+ gen_VRE_STOR = gen.VreStorage
+ VRE_STOR_LENGTH = size(inputs["VRE_STOR"])[1]
+ SOLAR = inputs["VS_SOLAR"]
+ WIND = inputs["VS_WIND"]
+ DC = inputs["VS_DC"]
+ DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
+ AC_DISCHARGE = inputs["VS_STOR_AC_DISCHARGE"]
+ DC_CHARGE = inputs["VS_STOR_DC_CHARGE"]
+ AC_CHARGE = inputs["VS_STOR_AC_CHARGE"]
+ # Should read in charge asymmetric capacities
+ end
+
+ # Create a NetRevenue dataframe
+ dfNetRevenue = DataFrame(region = regions,
+ Resource = inputs["RESOURCE_NAMES"],
+ zone = zones,
+ Cluster = clusters,
+ R_ID = rid)
+
+ # Add investment cost to the dataframe
+ dfNetRevenue.Inv_cost_MW = inv_cost_per_mwyr.(gen) .* dfCap[1:G, :NewCap]
+ dfNetRevenue.Inv_cost_MWh = inv_cost_per_mwhyr.(gen) .* dfCap[1:G, :NewEnergyCap]
+ dfNetRevenue.Inv_cost_charge_MW = inv_cost_charge_per_mwyr.(gen) .*
+ dfCap[1:G, :NewChargeCap]
+ if !isempty(VRE_STOR)
+ # Doesn't include charge capacities
+ if !isempty(SOLAR)
+ dfNetRevenue.Inv_cost_MW[VRE_STOR] += inv_cost_solar_per_mwyr.(gen_VRE_STOR) .*
+ dfVreStor[1:VRE_STOR_LENGTH, :NewCapSolar]
+ end
+ if !isempty(DC)
+ dfNetRevenue.Inv_cost_MW[VRE_STOR] += inv_cost_inverter_per_mwyr.(gen_VRE_STOR) .*
+ dfVreStor[1:VRE_STOR_LENGTH, :NewCapDC]
+ end
+ if !isempty(WIND)
+ dfNetRevenue.Inv_cost_MW[VRE_STOR] += inv_cost_wind_per_mwyr.(gen_VRE_STOR) .*
+ dfVreStor[1:VRE_STOR_LENGTH, :NewCapWind]
+ end
+ end
+ if setup["ParameterScale"] == 1
+ dfNetRevenue.Inv_cost_MWh *= ModelScalingFactor # converting Million US$ to US$
+ dfNetRevenue.Inv_cost_MW *= ModelScalingFactor # converting Million US$ to US$
+ dfNetRevenue.Inv_cost_charge_MW *= ModelScalingFactor # converting Million US$ to US$
+ end
+
+ # Add operations and maintenance cost to the dataframe
+ dfNetRevenue.Fixed_OM_cost_MW = fixed_om_cost_per_mwyr.(gen) .* dfCap[1:G, :EndCap]
+ dfNetRevenue.Fixed_OM_cost_MWh = fixed_om_cost_per_mwhyr.(gen) .*
+ dfCap[1:G, :EndEnergyCap]
+ dfNetRevenue.Fixed_OM_cost_charge_MW = fixed_om_cost_charge_per_mwyr.(gen) .*
+ dfCap[1:G, :EndChargeCap]
+
+ dfNetRevenue.Var_OM_cost_out = var_om_cost_per_mwh.(gen) .* dfPower[1:G, :AnnualSum]
+ if !isempty(VRE_STOR)
+ if !isempty(SOLAR)
+ dfNetRevenue.Fixed_OM_cost_MW[VRE_STOR] += fixed_om_solar_cost_per_mwyr.(gen_VRE_STOR) .*
+ dfVreStor[1:VRE_STOR_LENGTH,
+ :EndCapSolar]
+ dfNetRevenue.Var_OM_cost_out[SOLAR] += var_om_cost_per_mwh_solar.(gen_VRE_STOR[(gen_VRE_STOR.solar .!= 0)]) .*
+ (value.(EP[:vP_SOLAR][SOLAR, :]).data .*
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.solar .!= 0)]) *
+ inputs["omega"])
+ end
+ if !isempty(WIND)
+ dfNetRevenue.Fixed_OM_cost_MW[VRE_STOR] += fixed_om_wind_cost_per_mwyr.(gen_VRE_STOR) .*
+ dfVreStor[1:VRE_STOR_LENGTH,
+ :EndCapWind]
+ dfNetRevenue.Var_OM_cost_out[WIND] += var_om_cost_per_mwh_wind.(gen_VRE_STOR[(gen_VRE_STOR.wind .!= 0)]) .*
+ (value.(EP[:vP_WIND][WIND, :]).data *
+ inputs["omega"])
+ end
+ if !isempty(DC)
+ dfNetRevenue.Fixed_OM_cost_MW[VRE_STOR] += fixed_om_inverter_cost_per_mwyr.(gen_VRE_STOR) .*
+ dfVreStor[1:VRE_STOR_LENGTH,
+ :EndCapDC]
+ end
+ if !isempty(DC_DISCHARGE)
+ dfNetRevenue.Var_OM_cost_out[DC_DISCHARGE] += var_om_cost_per_mwh_discharge_dc.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge .!= 0)]) .*
+ (value.(EP[:vP_DC_DISCHARGE][DC_DISCHARGE,
+ :]).data .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge .!= 0)]) *
+ inputs["omega"])
+ end
+ if !isempty(AC_DISCHARGE)
+ dfNetRevenue.Var_OM_cost_out[AC_DISCHARGE] += var_om_cost_per_mwh_discharge_ac.(gen_VRE_STOR[(gen_VRE_STOR.stor_ac_discharge .!= 0)]) .*
+ (value.(EP[:vP_AC_DISCHARGE][AC_DISCHARGE,
+ :]).data * inputs["omega"])
+ end
+ end
+ if setup["ParameterScale"] == 1
+ dfNetRevenue.Fixed_OM_cost_MW *= ModelScalingFactor # converting Million US$ to US$
+ dfNetRevenue.Fixed_OM_cost_MWh *= ModelScalingFactor # converting Million US$ to US$
+ dfNetRevenue.Fixed_OM_cost_charge_MW *= ModelScalingFactor # converting Million US$ to US$
+ dfNetRevenue.Var_OM_cost_out *= ModelScalingFactor # converting Million US$ to US$
+ end
+
+ # Add fuel cost to the dataframe
+ dfNetRevenue.Fuel_cost = sum(value.(EP[:ePlantCFuelOut]), dims = 2)
+ if setup["ParameterScale"] == 1
+ dfNetRevenue.Fuel_cost *= ModelScalingFactor^2 # converting Million US$ to US$
+ end
+
+ # Add storage cost to the dataframe
+ dfNetRevenue.Var_OM_cost_in = zeros(nrow(dfNetRevenue))
+ if !isempty(STOR_ALL)
+ dfNetRevenue.Var_OM_cost_in[STOR_ALL] = var_om_cost_per_mwh_in.(gen.Storage) .*
+ ((value.(EP[:vCHARGE][STOR_ALL, :]).data) *
+ inputs["omega"])
+ end
+ if !isempty(VRE_STOR)
+ if !isempty(DC_CHARGE)
+ dfNetRevenue.Var_OM_cost_in[DC_CHARGE] += var_om_cost_per_mwh_charge_dc.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_charge .!= 0)]) .*
+ (value.(EP[:vP_DC_CHARGE][DC_CHARGE,
+ :]).data ./ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_charge .!= 0)]) *
+ inputs["omega"])
+ end
+ if !isempty(AC_CHARGE)
+ dfNetRevenue.Var_OM_cost_in[AC_CHARGE] += var_om_cost_per_mwh_charge_ac.(gen_VRE_STOR[(gen_VRE_STOR.stor_ac_charge .!= 0)]) .*
+ (value.(EP[:vP_AC_CHARGE][AC_CHARGE,
+ :]).data * inputs["omega"])
+ end
+ end
+
+ if setup["ParameterScale"] == 1
+ dfNetRevenue.Var_OM_cost_in *= ModelScalingFactor^2 # converting Million US$ to US$
+ end
+ # Add start-up cost to the dataframe
+ dfNetRevenue.StartCost = zeros(nrow(dfNetRevenue))
+ if setup["UCommit"] >= 1 && !isempty(COMMIT)
+ start_costs = vec(sum(value.(EP[:eCStart][COMMIT, :]).data, dims = 2))
+ start_fuel_costs = vec(value.(EP[:ePlantCFuelStart][COMMIT]))
+ dfNetRevenue.StartCost[COMMIT] .= start_costs + start_fuel_costs
+ end
+ if setup["ParameterScale"] == 1
+ dfNetRevenue.StartCost *= ModelScalingFactor^2 # converting Million US$ to US$
+ end
+ # Add charge cost to the dataframe
+ dfNetRevenue.Charge_cost = zeros(nrow(dfNetRevenue))
+ if has_duals(EP)
+ dfNetRevenue.Charge_cost = dfChargingcost[1:G, :AnnualSum] # Unit is confirmed to be US$
+ end
+
+ # Add CO2 releated sequestration cost or credit (e.g. 45 Q) to the dataframe
+ dfNetRevenue.CO2SequestrationCost = zeros(nrow(dfNetRevenue))
+ if any(co2_capture_fraction.(gen) .!= 0)
+ dfNetRevenue.CO2SequestrationCost = zeros(G)
+ dfNetRevenue[CCS, :CO2SequestrationCost] = value.(EP[:ePlantCCO2Sequestration]).data
+ end
+ if setup["ParameterScale"] == 1
+ dfNetRevenue.CO2SequestrationCost *= ModelScalingFactor^2 # converting Million US$ to US$
+ end
+
+ # Add energy and subsidy revenue to the dataframe
+ dfNetRevenue.EnergyRevenue = zeros(nrow(dfNetRevenue))
+ dfNetRevenue.SubsidyRevenue = zeros(nrow(dfNetRevenue))
+ if has_duals(EP)
+ dfNetRevenue.EnergyRevenue = dfEnergyRevenue[1:G, :AnnualSum] # Unit is confirmed to be US$
+ dfNetRevenue.SubsidyRevenue = dfSubRevenue[1:G, :SubsidyRevenue] # Unit is confirmed to be US$
+ end
+
+ # Add energy and subsidy revenue to the dataframe
+ dfNetRevenue.OperatingReserveRevenue = zeros(nrow(dfNetRevenue))
+ dfNetRevenue.OperatingRegulationRevenue = zeros(nrow(dfNetRevenue))
+ if setup["OperationalReserves"] > 0 && has_duals(EP)
+ dfNetRevenue.OperatingReserveRevenue[RSV] = dfOpRsvRevenue.AnnualSum # Unit is confirmed to be US$
+ dfNetRevenue.OperatingRegulationRevenue[REG] = dfOpRegRevenue.AnnualSum # Unit is confirmed to be US$
+ end
+
+ # Add capacity revenue to the dataframe
+ dfNetRevenue.ReserveMarginRevenue = zeros(nrow(dfNetRevenue))
+ if setup["CapacityReserveMargin"] > 0 && has_duals(EP) # The unit is confirmed to be $
+ dfNetRevenue.ReserveMarginRevenue = dfResRevenue[1:G, :AnnualSum]
+ end
+
+ # Add RPS/CES revenue to the dataframe
+ dfNetRevenue.ESRRevenue = zeros(nrow(dfNetRevenue))
+ if setup["EnergyShareRequirement"] > 0 && has_duals(EP) # The unit is confirmed to be $
+ dfNetRevenue.ESRRevenue = dfESRRev[1:G, :Total]
+ end
+
+ # Calculate emissions cost
+ dfNetRevenue.EmissionsCost = zeros(nrow(dfNetRevenue))
+ if setup["CO2Cap"] >= 1 && has_duals(EP)
+ for cap in 1:inputs["NCO2Cap"]
+ co2_cap_dual = dual(EP[:cCO2Emissions_systemwide][cap])
+ CO2ZONES = findall(x -> x == 1, inputs["dfCO2CapZones"][:, cap])
+ GEN_IN_ZONE = resource_id.(gen[[y in CO2ZONES for y in zone_id.(gen)]])
+ if setup["CO2Cap"] == 1 || setup["CO2Cap"] == 2 # Mass-based or Demand + Rate-based
+ # Cost = sum(sum(emissions for zone z * dual(CO2 constraint[cap]) for z in Z) for cap in setup["NCO2"])
+ temp_vec = value.(EP[:eEmissionsByPlant][GEN_IN_ZONE, :]) * inputs["omega"]
+ dfNetRevenue.EmissionsCost[GEN_IN_ZONE] += -co2_cap_dual * temp_vec
+ elseif setup["CO2Cap"] == 3 # Generation + Rate-based
+ SET_WITH_MAXCO2RATE = union(inputs["THERM_ALL"],
+ inputs["VRE"],
+ inputs["VRE"],
+ inputs["MUST_RUN"],
+ inputs["HYDRO_RES"])
+ Y = intersect(GEN_IN_ZONE, SET_WITH_MAXCO2RATE)
+ temp_vec = (value.(EP[:eEmissionsByPlant][Y, :]) -
+ (value.(EP[:vP][Y, :]) .*
+ inputs["dfMaxCO2Rate"][zone_id.(gen[Y]), cap])) *
+ inputs["omega"]
+ dfNetRevenue.EmissionsCost[Y] += -co2_cap_dual * temp_vec
+ end
+ end
+ if setup["ParameterScale"] == 1
+ dfNetRevenue.EmissionsCost *= ModelScalingFactor^2 # converting Million US$ to US$
+ end
+ end
+
+ # Add regional technology subsidy revenue to the dataframe
+ dfNetRevenue.RegSubsidyRevenue = zeros(nrow(dfNetRevenue))
+ if setup["MinCapReq"] >= 1 && has_duals(EP)# The unit is confirmed to be US$
+ dfNetRevenue.RegSubsidyRevenue = dfRegSubRevenue[1:G, :SubsidyRevenue]
+ end
+
+ dfNetRevenue.Revenue = dfNetRevenue.EnergyRevenue
+ .+dfNetRevenue.SubsidyRevenue
+ .+dfNetRevenue.ReserveMarginRevenue
+ .+dfNetRevenue.ESRRevenue
+ .+dfNetRevenue.RegSubsidyRevenue
+ .+dfNetRevenue.OperatingReserveRevenue
+ .+dfNetRevenue.OperatingRegulationRevenue
+
+ dfNetRevenue.Cost = (dfNetRevenue.Inv_cost_MW
+ .+
+ dfNetRevenue.Inv_cost_MWh
+ .+
+ dfNetRevenue.Inv_cost_charge_MW
+ .+
+ dfNetRevenue.Fixed_OM_cost_MW
+ .+
+ dfNetRevenue.Fixed_OM_cost_MWh
+ .+
+ dfNetRevenue.Fixed_OM_cost_charge_MW
+ .+
+ dfNetRevenue.Var_OM_cost_out
+ .+
+ dfNetRevenue.Var_OM_cost_in
+ .+
+ dfNetRevenue.Fuel_cost
+ .+
+ dfNetRevenue.Charge_cost
+ .+
+ dfNetRevenue.EmissionsCost
+ .+
+ dfNetRevenue.StartCost
+ .+
+ dfNetRevenue.CO2SequestrationCost)
+ dfNetRevenue.Profit = dfNetRevenue.Revenue .- dfNetRevenue.Cost
+
+ CSV.write(joinpath(path, "NetRevenue.csv"), dfNetRevenue)
end
diff --git a/src/write_outputs/write_nse.jl b/src/write_outputs/write_nse.jl
index 5d30dcc987..3cdc1104a7 100644
--- a/src/write_outputs/write_nse.jl
+++ b/src/write_outputs/write_nse.jl
@@ -4,33 +4,39 @@
Function for reporting non-served energy for every model zone, time step and cost-segment.
"""
function write_nse(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
- SEG = inputs["SEG"] # Number of demand curtailment segments
- # Non-served energy/demand curtailment by segment in each time step
- dfNse = DataFrame(Segment = repeat(1:SEG, outer = Z), Zone = repeat(1:Z, inner = SEG), AnnualSum = zeros(SEG * Z))
- nse = zeros(SEG * Z, T)
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- for z in 1:Z
- nse[((z-1)*SEG+1):z*SEG, :] = value.(EP[:vNSE])[:, :, z] * scale_factor
- end
- dfNse.AnnualSum .= nse * inputs["omega"]
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ SEG = inputs["SEG"] # Number of demand curtailment segments
+ # Non-served energy/demand curtailment by segment in each time step
+ dfNse = DataFrame(Segment = repeat(1:SEG, outer = Z),
+ Zone = repeat(1:Z, inner = SEG),
+ AnnualSum = zeros(SEG * Z))
+ nse = zeros(SEG * Z, T)
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ for z in 1:Z
+ nse[((z - 1) * SEG + 1):(z * SEG), :] = value.(EP[:vNSE])[:, :, z] * scale_factor
+ end
+ dfNse.AnnualSum .= nse * inputs["omega"]
- if setup["WriteOutputs"] == "annual"
- total = DataFrame(["Total" 0 sum(dfNse[!,:AnnualSum])], [:Segment, :Zone, :AnnualSum])
- dfNse = vcat(dfNse, total)
- CSV.write(joinpath(path, "nse.csv"), dfNse)
- else # setup["WriteOutputs"] == "full"
- dfNse = hcat(dfNse, DataFrame(nse, :auto))
- auxNew_Names=[Symbol("Segment");Symbol("Zone");Symbol("AnnualSum");[Symbol("t$t") for t in 1:T]]
- rename!(dfNse,auxNew_Names)
+ if setup["WriteOutputs"] == "annual"
+ total = DataFrame(["Total" 0 sum(dfNse[!, :AnnualSum])],
+ [:Segment, :Zone, :AnnualSum])
+ dfNse = vcat(dfNse, total)
+ CSV.write(joinpath(path, "nse.csv"), dfNse)
+ else # setup["WriteOutputs"] == "full"
+ dfNse = hcat(dfNse, DataFrame(nse, :auto))
+ auxNew_Names = [Symbol("Segment");
+ Symbol("Zone");
+ Symbol("AnnualSum");
+ [Symbol("t$t") for t in 1:T]]
+ rename!(dfNse, auxNew_Names)
- total = DataFrame(["Total" 0 sum(dfNse[!,:AnnualSum]) fill(0.0, (1,T))], :auto)
- total[:, 4:T+3] .= sum(nse, dims = 1)
- rename!(total,auxNew_Names)
- dfNse = vcat(dfNse, total)
+ total = DataFrame(["Total" 0 sum(dfNse[!, :AnnualSum]) fill(0.0, (1, T))], :auto)
+ total[:, 4:(T + 3)] .= sum(nse, dims = 1)
+ rename!(total, auxNew_Names)
+ dfNse = vcat(dfNse, total)
- CSV.write(joinpath(path, "nse.csv"), dftranspose(dfNse, false), writeheader=false)
- end
- return nothing
+ CSV.write(joinpath(path, "nse.csv"), dftranspose(dfNse, false), writeheader = false)
+ end
+ return nothing
end
diff --git a/src/write_outputs/write_outputs.jl b/src/write_outputs/write_outputs.jl
index 8e88ec0dc1..2c374c0b35 100644
--- a/src/write_outputs/write_outputs.jl
+++ b/src/write_outputs/write_outputs.jl
@@ -11,389 +11,471 @@
Function for the entry-point for writing the different output files. From here, onward several other functions are called, each for writing specific output files, like costs, capacities, etc.
"""
function write_outputs(EP::Model, path::AbstractString, setup::Dict, inputs::Dict)
-
- if setup["OverwriteResults"] == 1
- # Overwrite existing results if dir exists
- # This is the default behaviour when there is no flag, to avoid breaking existing code
- if !(isdir(path))
- mkpath(path)
- end
- else
- # Find closest unused ouput directory name and create it
- path = choose_output_dir(path)
- mkpath(path)
- end
-
- # https://jump.dev/MathOptInterface.jl/v0.9.10/apireference/#MathOptInterface.TerminationStatusCode
- status = termination_status(EP)
-
- ## Check if solved sucessfully - time out is included
- if status != MOI.OPTIMAL && status != MOI.LOCALLY_SOLVED
- if status != MOI.TIME_LIMIT # Model failed to solve, so record solver status and exit
- write_status(path, inputs, setup, EP)
- return
- # Model reached timelimit but failed to find a feasible solution
- #### Aaron Schwartz - Not sure if the below condition is valid anymore. We should revisit ####
- elseif isnan(objective_value(EP))==true
- # Model failed to solve, so record solver status and exit
- write_status(path, inputs, setup, EP)
- return
- end
- end
-
- # Dict containing the list of outputs to write
- output_settings_d = setup["WriteOutputsSettingsDict"]
- write_settings_file(path, setup)
-
- output_settings_d["WriteStatus"] && write_status(path, inputs, setup, EP)
-
- # linearize and re-solve model if duals are not available but ShadowPrices are requested
- if !has_duals(EP) && setup["WriteShadowPrices"] == 1
- # function to fix integers and linearize problem
- fix_integers(EP)
- # re-solve statement for LP solution
- println("Solving LP solution for duals")
- set_silent(EP)
- optimize!(EP)
- end
-
- if output_settings_d["WriteCosts"]
- elapsed_time_costs = @elapsed write_costs(path, inputs, setup, EP)
- println("Time elapsed for writing costs is")
- println(elapsed_time_costs)
- end
-
- if output_settings_d["WriteCapacity"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_capacity = @elapsed dfCap = write_capacity(path, inputs, setup, EP)
- println("Time elapsed for writing capacity is")
- println(elapsed_time_capacity)
- end
-
- if output_settings_d["WritePower"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_power = @elapsed dfPower = write_power(path, inputs, setup, EP)
- println("Time elapsed for writing power is")
- println(elapsed_time_power)
- end
-
- if output_settings_d["WriteCharge"]
- elapsed_time_charge = @elapsed write_charge(path, inputs, setup, EP)
- println("Time elapsed for writing charge is")
- println(elapsed_time_charge)
- end
-
- if output_settings_d["WriteCapacityFactor"]
- elapsed_time_capacityfactor = @elapsed write_capacityfactor(path, inputs, setup, EP)
- println("Time elapsed for writing capacity factor is")
- println(elapsed_time_capacityfactor)
- end
-
- if output_settings_d["WriteStorage"]
- elapsed_time_storage = @elapsed write_storage(path, inputs, setup, EP)
- println("Time elapsed for writing storage is")
- println(elapsed_time_storage)
- end
-
- if output_settings_d["WriteCurtailment"]
- elapsed_time_curtailment = @elapsed write_curtailment(path, inputs, setup, EP)
- println("Time elapsed for writing curtailment is")
- println(elapsed_time_curtailment)
- end
-
- if output_settings_d["WriteNSE"]
- elapsed_time_nse = @elapsed write_nse(path, inputs, setup, EP)
- println("Time elapsed for writing nse is")
- println(elapsed_time_nse)
- end
-
- if output_settings_d["WritePowerBalance"]
- elapsed_time_power_balance = @elapsed write_power_balance(path, inputs, setup, EP)
- println("Time elapsed for writing power balance is")
- println(elapsed_time_power_balance)
- end
-
- if inputs["Z"] > 1
- if output_settings_d["WriteTransmissionFlows"]
- elapsed_time_flows = @elapsed write_transmission_flows(path, inputs, setup, EP)
- println("Time elapsed for writing transmission flows is")
- println(elapsed_time_flows)
- end
-
- if output_settings_d["WriteTransmissionLosses"]
- elapsed_time_losses = @elapsed write_transmission_losses(path, inputs, setup, EP)
- println("Time elapsed for writing transmission losses is")
- println(elapsed_time_losses)
- end
-
- if setup["NetworkExpansion"] == 1 && output_settings_d["WriteNWExpansion"]
- elapsed_time_expansion = @elapsed write_nw_expansion(path, inputs, setup, EP)
- println("Time elapsed for writing network expansion is")
- println(elapsed_time_expansion)
- end
- end
-
- if output_settings_d["WriteEmissions"]
- elapsed_time_emissions = @elapsed write_emissions(path, inputs, setup, EP)
- println("Time elapsed for writing emissions is")
- println(elapsed_time_emissions)
- end
-
- dfVreStor = DataFrame()
- if !isempty(inputs["VRE_STOR"])
- if output_settings_d["WriteVREStor"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_vrestor = @elapsed dfVreStor = write_vre_stor(path, inputs, setup, EP)
- println("Time elapsed for writing vre stor is")
- println(elapsed_time_vrestor)
- end
- VS_LDS = inputs["VS_LDS"]
- VS_STOR = inputs["VS_STOR"]
- else
- VS_LDS = []
- VS_STOR = []
- end
-
- if has_duals(EP) == 1
- if output_settings_d["WriteReliability"]
- elapsed_time_reliability = @elapsed write_reliability(path, inputs, setup, EP)
- println("Time elapsed for writing reliability is")
- println(elapsed_time_reliability)
- end
- if !isempty(inputs["STOR_ALL"]) || !isempty(VS_STOR)
- if output_settings_d["WriteStorageDual"]
- elapsed_time_stordual = @elapsed write_storagedual(path, inputs, setup, EP)
- println("Time elapsed for writing storage duals is")
- println(elapsed_time_stordual)
- end
- end
- end
-
- if setup["UCommit"] >= 1
- if output_settings_d["WriteCommit"]
- elapsed_time_commit = @elapsed write_commit(path, inputs, setup, EP)
- println("Time elapsed for writing commitment is")
- println(elapsed_time_commit)
- end
-
- if output_settings_d["WriteStart"]
- elapsed_time_start = @elapsed write_start(path, inputs, setup, EP)
- println("Time elapsed for writing startup is")
- println(elapsed_time_start)
- end
-
- if output_settings_d["WriteShutdown"]
- elapsed_time_shutdown = @elapsed write_shutdown(path, inputs, setup, EP)
- println("Time elapsed for writing shutdown is")
- println(elapsed_time_shutdown)
- end
-
- if setup["OperationalReserves"] == 1
- if output_settings_d["WriteReg"]
- elapsed_time_reg = @elapsed write_reg(path, inputs, setup, EP)
- println("Time elapsed for writing regulation is")
- println(elapsed_time_reg)
- end
-
- if output_settings_d["WriteRsv"]
- elapsed_time_rsv = @elapsed write_rsv(path, inputs, setup, EP)
- println("Time elapsed for writing reserves is")
- println(elapsed_time_rsv)
- end
- end
- end
-
- # Output additional variables related inter-period energy transfer via storage
- representative_periods = inputs["REP_PERIOD"]
- if representative_periods > 1 && (!isempty(inputs["STOR_LONG_DURATION"]) || !isempty(VS_LDS))
- if output_settings_d["WriteOpWrapLDSStorInit"]
- elapsed_time_lds_init = @elapsed write_opwrap_lds_stor_init(path, inputs, setup, EP)
- println("Time elapsed for writing lds init is")
- println(elapsed_time_lds_init)
- end
-
- if output_settings_d["WriteOpWrapLDSdStor"]
- elapsed_time_lds_dstor = @elapsed write_opwrap_lds_dstor(path, inputs, setup, EP)
- println("Time elapsed for writing lds dstor is")
- println(elapsed_time_lds_dstor)
- end
- end
-
- if output_settings_d["WriteFuelConsumption"]
- elapsed_time_fuel_consumption = @elapsed write_fuel_consumption(path, inputs, setup, EP)
- println("Time elapsed for writing fuel consumption is")
- println(elapsed_time_fuel_consumption)
- end
-
- if output_settings_d["WriteCO2"]
- elapsed_time_emissions = @elapsed write_co2(path, inputs, setup, EP)
- println("Time elapsed for writing co2 is")
- println(elapsed_time_emissions)
- end
-
- if has_maintenance(inputs) && output_settings_d["WriteMaintenance"]
- write_maintenance(path, inputs, EP)
- end
-
- #Write angles when DC_OPF is activated
- if setup["DC_OPF"] == 1 && output_settings_d["WriteAngles"]
- elapsed_time_angles = @elapsed write_angles(path, inputs, setup, EP)
- println("Time elapsed for writing angles is")
- println(elapsed_time_angles)
- end
-
- # Temporary! Suppress these outputs until we know that they are compatable with multi-stage modeling
- if setup["MultiStage"] == 0
- dfEnergyRevenue = DataFrame()
- dfChargingcost = DataFrame()
- dfSubRevenue = DataFrame()
- dfRegSubRevenue = DataFrame()
- if has_duals(EP) == 1
- if output_settings_d["WritePrice"]
- elapsed_time_price = @elapsed write_price(path, inputs, setup, EP)
- println("Time elapsed for writing price is")
- println(elapsed_time_price)
- end
-
- if output_settings_d["WriteEnergyRevenue"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_energy_rev = @elapsed dfEnergyRevenue = write_energy_revenue(path, inputs, setup, EP)
- println("Time elapsed for writing energy revenue is")
- println(elapsed_time_energy_rev)
- end
-
- if output_settings_d["WriteChargingCost"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_charging_cost = @elapsed dfChargingcost = write_charging_cost(path, inputs, setup, EP)
- println("Time elapsed for writing charging cost is")
- println(elapsed_time_charging_cost)
- end
-
- if output_settings_d["WriteSubsidyRevenue"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_subsidy = @elapsed dfSubRevenue, dfRegSubRevenue = write_subsidy_revenue(path, inputs, setup, EP)
- println("Time elapsed for writing subsidy is")
- println(elapsed_time_subsidy)
- end
- end
-
- if output_settings_d["WriteTimeWeights"]
- elapsed_time_time_weights = @elapsed write_time_weights(path, inputs)
- println("Time elapsed for writing time weights is")
- println(elapsed_time_time_weights)
- end
-
- dfESRRev = DataFrame()
- if setup["EnergyShareRequirement"] == 1 && has_duals(EP)
- dfESR = DataFrame()
- if output_settings_d["WriteESRPrices"] || output_settings_d["WriteESRRevenue"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_esr_prices = @elapsed dfESR = write_esr_prices(path, inputs, setup, EP)
- println("Time elapsed for writing esr prices is")
- println(elapsed_time_esr_prices)
- end
-
- if output_settings_d["WriteESRRevenue"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_esr_revenue = @elapsed dfESRRev = write_esr_revenue(path, inputs, setup, dfPower, dfESR, EP)
- println("Time elapsed for writing esr revenue is")
- println(elapsed_time_esr_revenue)
- end
-
- end
-
- dfResRevenue = DataFrame()
- if setup["CapacityReserveMargin"]==1 && has_duals(EP)
- if output_settings_d["WriteReserveMargin"]
- elapsed_time_reserve_margin = @elapsed write_reserve_margin(path, setup, EP)
- println("Time elapsed for writing reserve margin is")
- println(elapsed_time_reserve_margin)
- end
-
- if output_settings_d["WriteReserveMarginWithWeights"]
- elapsed_time_rsv_margin_w = @elapsed write_reserve_margin_w(path, inputs, setup, EP)
- println("Time elapsed for writing reserve margin with weights is")
- println(elapsed_time_rsv_margin_w)
- end
-
- if output_settings_d["WriteVirtualDischarge"]
- elapsed_time_virtual_discharge = @elapsed write_virtual_discharge(path, inputs, setup, EP)
- println("Time elapsed for writing virtual discharge is")
- println(elapsed_time_virtual_discharge)
- end
-
- if output_settings_d["WriteReserveMarginRevenue"] || output_settings_d["WriteNetRevenue"]
- elapsed_time_res_rev = @elapsed dfResRevenue = write_reserve_margin_revenue(path, inputs, setup, EP)
- println("Time elapsed for writing reserve revenue is")
- println(elapsed_time_res_rev)
- end
-
- if haskey(inputs, "dfCapRes_slack") && output_settings_d["WriteReserveMarginSlack"]
- elapsed_time_rsv_slack = @elapsed write_reserve_margin_slack(path, inputs, setup, EP)
- println("Time elapsed for writing reserve margin slack is")
- println(elapsed_time_rsv_slack)
- end
-
- if output_settings_d["WriteCapacityValue"]
- elapsed_time_cap_value = @elapsed write_capacity_value(path, inputs, setup, EP)
- println("Time elapsed for writing capacity value is")
- println(elapsed_time_cap_value)
- end
-
- end
-
- dfOpRegRevenue = DataFrame()
- dfOpRsvRevenue = DataFrame()
- if setup["OperationalReserves"]==1 && has_duals(EP)
- elapsed_time_op_res_rev = @elapsed dfOpRegRevenue, dfOpRsvRevenue = write_operating_reserve_regulation_revenue(path, inputs, setup, EP)
- println("Time elapsed for writing oerating reserve and regulation revenue is")
- println(elapsed_time_op_res_rev)
- end
-
- if setup["CO2Cap"]>0 && has_duals(EP) == 1 && output_settings_d["WriteCO2Cap"]
- elapsed_time_co2_cap = @elapsed write_co2_cap(path, inputs, setup, EP)
- println("Time elapsed for writing co2 cap is")
- println(elapsed_time_co2_cap)
- end
- if setup["MinCapReq"] == 1 && has_duals(EP) == 1 && output_settings_d["WriteMinCapReq"]
- elapsed_time_min_cap_req = @elapsed write_minimum_capacity_requirement(path, inputs, setup, EP)
- println("Time elapsed for writing minimum capacity requirement is")
- println(elapsed_time_min_cap_req)
- end
-
- if setup["MaxCapReq"] == 1 && has_duals(EP) == 1 && output_settings_d["WriteMaxCapReq"]
- elapsed_time_max_cap_req = @elapsed write_maximum_capacity_requirement(path, inputs, setup, EP)
- println("Time elapsed for writing maximum capacity requirement is")
- println(elapsed_time_max_cap_req)
- end
-
- if !isempty(inputs["ELECTROLYZER"]) && has_duals(EP)
- if output_settings_d["WriteHydrogenPrices"]
- elapsed_time_hydrogen_prices = @elapsed write_hydrogen_prices(path, inputs, setup, EP)
- println("Time elapsed for writing hydrogen prices is")
- println(elapsed_time_hydrogen_prices)
- end
- if setup["HydrogenHourlyMatching"] == 1 && output_settings_d["WriteHourlyMatchingPrices"]
- elapsed_time_hourly_matching_prices = @elapsed write_hourly_matching_prices(path, inputs, setup, EP)
- println("Time elapsed for writing hourly matching prices is")
- println(elapsed_time_hourly_matching_prices)
- end
- end
-
- if output_settings_d["WriteNetRevenue"]
- elapsed_time_net_rev = @elapsed write_net_revenue(path, inputs, setup, EP, dfCap, dfESRRev, dfResRevenue, dfChargingcost, dfPower, dfEnergyRevenue, dfSubRevenue, dfRegSubRevenue, dfVreStor, dfOpRegRevenue, dfOpRsvRevenue)
- println("Time elapsed for writing net revenue is")
- println(elapsed_time_net_rev)
- end
- end
- ## Print confirmation
- println("Wrote outputs to $path")
-
- return path
+ if setup["OverwriteResults"] == 1
+ # Overwrite existing results if dir exists
+ # This is the default behaviour when there is no flag, to avoid breaking existing code
+ if !(isdir(path))
+ mkpath(path)
+ end
+ else
+ # Find closest unused ouput directory name and create it
+ path = choose_output_dir(path)
+ mkpath(path)
+ end
+
+ # https://jump.dev/MathOptInterface.jl/v0.9.10/apireference/#MathOptInterface.TerminationStatusCode
+ status = termination_status(EP)
+
+ ## Check if solved sucessfully - time out is included
+ if status != MOI.OPTIMAL && status != MOI.LOCALLY_SOLVED
+ if status != MOI.TIME_LIMIT # Model failed to solve, so record solver status and exit
+ write_status(path, inputs, setup, EP)
+ return
+ # Model reached timelimit but failed to find a feasible solution
+ #### Aaron Schwartz - Not sure if the below condition is valid anymore. We should revisit ####
+ elseif isnan(objective_value(EP)) == true
+ # Model failed to solve, so record solver status and exit
+ write_status(path, inputs, setup, EP)
+ return
+ end
+ end
+
+ # Dict containing the list of outputs to write
+ output_settings_d = setup["WriteOutputsSettingsDict"]
+ write_settings_file(path, setup)
+
+ output_settings_d["WriteStatus"] && write_status(path, inputs, setup, EP)
+
+ # linearize and re-solve model if duals are not available but ShadowPrices are requested
+ if !has_duals(EP) && setup["WriteShadowPrices"] == 1
+ # function to fix integers and linearize problem
+ fix_integers(EP)
+ # re-solve statement for LP solution
+ println("Solving LP solution for duals")
+ set_silent(EP)
+ optimize!(EP)
+ end
+
+ if output_settings_d["WriteCosts"]
+ elapsed_time_costs = @elapsed write_costs(path, inputs, setup, EP)
+ println("Time elapsed for writing costs is")
+ println(elapsed_time_costs)
+ end
+
+ if output_settings_d["WriteCapacity"] || output_settings_d["WriteNetRevenue"]
+ elapsed_time_capacity = @elapsed dfCap = write_capacity(path, inputs, setup, EP)
+ println("Time elapsed for writing capacity is")
+ println(elapsed_time_capacity)
+ end
+
+ if output_settings_d["WritePower"] || output_settings_d["WriteNetRevenue"]
+ elapsed_time_power = @elapsed dfPower = write_power(path, inputs, setup, EP)
+ println("Time elapsed for writing power is")
+ println(elapsed_time_power)
+ end
+
+ if output_settings_d["WriteCharge"]
+ elapsed_time_charge = @elapsed write_charge(path, inputs, setup, EP)
+ println("Time elapsed for writing charge is")
+ println(elapsed_time_charge)
+ end
+
+ if output_settings_d["WriteCapacityFactor"]
+ elapsed_time_capacityfactor = @elapsed write_capacityfactor(path, inputs, setup, EP)
+ println("Time elapsed for writing capacity factor is")
+ println(elapsed_time_capacityfactor)
+ end
+
+ if output_settings_d["WriteStorage"]
+ elapsed_time_storage = @elapsed write_storage(path, inputs, setup, EP)
+ println("Time elapsed for writing storage is")
+ println(elapsed_time_storage)
+ end
+
+ if output_settings_d["WriteCurtailment"]
+ elapsed_time_curtailment = @elapsed write_curtailment(path, inputs, setup, EP)
+ println("Time elapsed for writing curtailment is")
+ println(elapsed_time_curtailment)
+ end
+
+ if output_settings_d["WriteNSE"]
+ elapsed_time_nse = @elapsed write_nse(path, inputs, setup, EP)
+ println("Time elapsed for writing nse is")
+ println(elapsed_time_nse)
+ end
+
+ if output_settings_d["WritePowerBalance"]
+ elapsed_time_power_balance = @elapsed write_power_balance(path, inputs, setup, EP)
+ println("Time elapsed for writing power balance is")
+ println(elapsed_time_power_balance)
+ end
+
+ if inputs["Z"] > 1
+ if output_settings_d["WriteTransmissionFlows"]
+ elapsed_time_flows = @elapsed write_transmission_flows(path, inputs, setup, EP)
+ println("Time elapsed for writing transmission flows is")
+ println(elapsed_time_flows)
+ end
+
+ if output_settings_d["WriteTransmissionLosses"]
+ elapsed_time_losses = @elapsed write_transmission_losses(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing transmission losses is")
+ println(elapsed_time_losses)
+ end
+
+ if setup["NetworkExpansion"] == 1 && output_settings_d["WriteNWExpansion"]
+ elapsed_time_expansion = @elapsed write_nw_expansion(path, inputs, setup, EP)
+ println("Time elapsed for writing network expansion is")
+ println(elapsed_time_expansion)
+ end
+ end
+
+ if output_settings_d["WriteEmissions"]
+ elapsed_time_emissions = @elapsed write_emissions(path, inputs, setup, EP)
+ println("Time elapsed for writing emissions is")
+ println(elapsed_time_emissions)
+ end
+
+ dfVreStor = DataFrame()
+ if !isempty(inputs["VRE_STOR"])
+ if output_settings_d["WriteVREStor"] || output_settings_d["WriteNetRevenue"]
+ elapsed_time_vrestor = @elapsed dfVreStor = write_vre_stor(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing vre stor is")
+ println(elapsed_time_vrestor)
+ end
+ VS_LDS = inputs["VS_LDS"]
+ VS_STOR = inputs["VS_STOR"]
+ else
+ VS_LDS = []
+ VS_STOR = []
+ end
+
+ if has_duals(EP) == 1
+ if output_settings_d["WriteReliability"]
+ elapsed_time_reliability = @elapsed write_reliability(path, inputs, setup, EP)
+ println("Time elapsed for writing reliability is")
+ println(elapsed_time_reliability)
+ end
+ if !isempty(inputs["STOR_ALL"]) || !isempty(VS_STOR)
+ if output_settings_d["WriteStorageDual"]
+ elapsed_time_stordual = @elapsed write_storagedual(path, inputs, setup, EP)
+ println("Time elapsed for writing storage duals is")
+ println(elapsed_time_stordual)
+ end
+ end
+ end
+
+ if setup["UCommit"] >= 1
+ if output_settings_d["WriteCommit"]
+ elapsed_time_commit = @elapsed write_commit(path, inputs, setup, EP)
+ println("Time elapsed for writing commitment is")
+ println(elapsed_time_commit)
+ end
+
+ if output_settings_d["WriteStart"]
+ elapsed_time_start = @elapsed write_start(path, inputs, setup, EP)
+ println("Time elapsed for writing startup is")
+ println(elapsed_time_start)
+ end
+
+ if output_settings_d["WriteShutdown"]
+ elapsed_time_shutdown = @elapsed write_shutdown(path, inputs, setup, EP)
+ println("Time elapsed for writing shutdown is")
+ println(elapsed_time_shutdown)
+ end
+
+ if setup["OperationalReserves"] == 1
+ if output_settings_d["WriteReg"]
+ elapsed_time_reg = @elapsed write_reg(path, inputs, setup, EP)
+ println("Time elapsed for writing regulation is")
+ println(elapsed_time_reg)
+ end
+
+ if output_settings_d["WriteRsv"]
+ elapsed_time_rsv = @elapsed write_rsv(path, inputs, setup, EP)
+ println("Time elapsed for writing reserves is")
+ println(elapsed_time_rsv)
+ end
+ end
+ end
+
+ # Output additional variables related inter-period energy transfer via storage
+ representative_periods = inputs["REP_PERIOD"]
+ if representative_periods > 1 &&
+ (!isempty(inputs["STOR_LONG_DURATION"]) || !isempty(VS_LDS))
+ if output_settings_d["WriteOpWrapLDSStorInit"]
+ elapsed_time_lds_init = @elapsed write_opwrap_lds_stor_init(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing lds init is")
+ println(elapsed_time_lds_init)
+ end
+
+ if output_settings_d["WriteOpWrapLDSdStor"]
+ elapsed_time_lds_dstor = @elapsed write_opwrap_lds_dstor(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing lds dstor is")
+ println(elapsed_time_lds_dstor)
+ end
+ end
+
+ if output_settings_d["WriteFuelConsumption"]
+ elapsed_time_fuel_consumption = @elapsed write_fuel_consumption(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing fuel consumption is")
+ println(elapsed_time_fuel_consumption)
+ end
+
+ if output_settings_d["WriteCO2"]
+ elapsed_time_emissions = @elapsed write_co2(path, inputs, setup, EP)
+ println("Time elapsed for writing co2 is")
+ println(elapsed_time_emissions)
+ end
+
+ if has_maintenance(inputs) && output_settings_d["WriteMaintenance"]
+ write_maintenance(path, inputs, EP)
+ end
+
+ #Write angles when DC_OPF is activated
+ if setup["DC_OPF"] == 1 && output_settings_d["WriteAngles"]
+ elapsed_time_angles = @elapsed write_angles(path, inputs, setup, EP)
+ println("Time elapsed for writing angles is")
+ println(elapsed_time_angles)
+ end
+
+ # Temporary! Suppress these outputs until we know that they are compatable with multi-stage modeling
+ if setup["MultiStage"] == 0
+ dfEnergyRevenue = DataFrame()
+ dfChargingcost = DataFrame()
+ dfSubRevenue = DataFrame()
+ dfRegSubRevenue = DataFrame()
+ if has_duals(EP) == 1
+ if output_settings_d["WritePrice"]
+ elapsed_time_price = @elapsed write_price(path, inputs, setup, EP)
+ println("Time elapsed for writing price is")
+ println(elapsed_time_price)
+ end
+
+ if output_settings_d["WriteEnergyRevenue"] ||
+ output_settings_d["WriteNetRevenue"]
+ elapsed_time_energy_rev = @elapsed dfEnergyRevenue = write_energy_revenue(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing energy revenue is")
+ println(elapsed_time_energy_rev)
+ end
+
+ if output_settings_d["WriteChargingCost"] ||
+ output_settings_d["WriteNetRevenue"]
+ elapsed_time_charging_cost = @elapsed dfChargingcost = write_charging_cost(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing charging cost is")
+ println(elapsed_time_charging_cost)
+ end
+
+ if output_settings_d["WriteSubsidyRevenue"] ||
+ output_settings_d["WriteNetRevenue"]
+ elapsed_time_subsidy = @elapsed dfSubRevenue, dfRegSubRevenue = write_subsidy_revenue(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing subsidy is")
+ println(elapsed_time_subsidy)
+ end
+ end
+
+ if output_settings_d["WriteTimeWeights"]
+ elapsed_time_time_weights = @elapsed write_time_weights(path, inputs)
+ println("Time elapsed for writing time weights is")
+ println(elapsed_time_time_weights)
+ end
+
+ dfESRRev = DataFrame()
+ if setup["EnergyShareRequirement"] == 1 && has_duals(EP)
+ dfESR = DataFrame()
+ if output_settings_d["WriteESRPrices"] ||
+ output_settings_d["WriteESRRevenue"] || output_settings_d["WriteNetRevenue"]
+ elapsed_time_esr_prices = @elapsed dfESR = write_esr_prices(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing esr prices is")
+ println(elapsed_time_esr_prices)
+ end
+
+ if output_settings_d["WriteESRRevenue"] || output_settings_d["WriteNetRevenue"]
+ elapsed_time_esr_revenue = @elapsed dfESRRev = write_esr_revenue(path,
+ inputs,
+ setup,
+ dfPower,
+ dfESR,
+ EP)
+ println("Time elapsed for writing esr revenue is")
+ println(elapsed_time_esr_revenue)
+ end
+ end
+
+ dfResRevenue = DataFrame()
+ if setup["CapacityReserveMargin"] == 1 && has_duals(EP)
+ if output_settings_d["WriteReserveMargin"]
+ elapsed_time_reserve_margin = @elapsed write_reserve_margin(path, setup, EP)
+ println("Time elapsed for writing reserve margin is")
+ println(elapsed_time_reserve_margin)
+ end
+
+ if output_settings_d["WriteReserveMarginWithWeights"]
+ elapsed_time_rsv_margin_w = @elapsed write_reserve_margin_w(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing reserve margin with weights is")
+ println(elapsed_time_rsv_margin_w)
+ end
+
+ if output_settings_d["WriteVirtualDischarge"]
+ elapsed_time_virtual_discharge = @elapsed write_virtual_discharge(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing virtual discharge is")
+ println(elapsed_time_virtual_discharge)
+ end
+
+ if output_settings_d["WriteReserveMarginRevenue"] ||
+ output_settings_d["WriteNetRevenue"]
+ elapsed_time_res_rev = @elapsed dfResRevenue = write_reserve_margin_revenue(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing reserve revenue is")
+ println(elapsed_time_res_rev)
+ end
+
+ if haskey(inputs, "dfCapRes_slack") &&
+ output_settings_d["WriteReserveMarginSlack"]
+ elapsed_time_rsv_slack = @elapsed write_reserve_margin_slack(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing reserve margin slack is")
+ println(elapsed_time_rsv_slack)
+ end
+
+ if output_settings_d["WriteCapacityValue"]
+ elapsed_time_cap_value = @elapsed write_capacity_value(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing capacity value is")
+ println(elapsed_time_cap_value)
+ end
+ end
+
+ dfOpRegRevenue = DataFrame()
+ dfOpRsvRevenue = DataFrame()
+ if setup["OperationalReserves"] == 1 && has_duals(EP)
+ elapsed_time_op_res_rev = @elapsed dfOpRegRevenue, dfOpRsvRevenue = write_operating_reserve_regulation_revenue(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing oerating reserve and regulation revenue is")
+ println(elapsed_time_op_res_rev)
+ end
+
+ if setup["CO2Cap"] > 0 && has_duals(EP) == 1 && output_settings_d["WriteCO2Cap"]
+ elapsed_time_co2_cap = @elapsed write_co2_cap(path, inputs, setup, EP)
+ println("Time elapsed for writing co2 cap is")
+ println(elapsed_time_co2_cap)
+ end
+ if setup["MinCapReq"] == 1 && has_duals(EP) == 1 &&
+ output_settings_d["WriteMinCapReq"]
+ elapsed_time_min_cap_req = @elapsed write_minimum_capacity_requirement(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing minimum capacity requirement is")
+ println(elapsed_time_min_cap_req)
+ end
+
+ if setup["MaxCapReq"] == 1 && has_duals(EP) == 1 &&
+ output_settings_d["WriteMaxCapReq"]
+ elapsed_time_max_cap_req = @elapsed write_maximum_capacity_requirement(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing maximum capacity requirement is")
+ println(elapsed_time_max_cap_req)
+ end
+
+ if !isempty(inputs["ELECTROLYZER"]) && has_duals(EP)
+ if output_settings_d["WriteHydrogenPrices"]
+ elapsed_time_hydrogen_prices = @elapsed write_hydrogen_prices(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing hydrogen prices is")
+ println(elapsed_time_hydrogen_prices)
+ end
+ if setup["HydrogenHourlyMatching"] == 1 &&
+ output_settings_d["WriteHourlyMatchingPrices"]
+ elapsed_time_hourly_matching_prices = @elapsed write_hourly_matching_prices(path,
+ inputs,
+ setup,
+ EP)
+ println("Time elapsed for writing hourly matching prices is")
+ println(elapsed_time_hourly_matching_prices)
+ end
+ end
+
+ if output_settings_d["WriteNetRevenue"]
+ elapsed_time_net_rev = @elapsed write_net_revenue(path,
+ inputs,
+ setup,
+ EP,
+ dfCap,
+ dfESRRev,
+ dfResRevenue,
+ dfChargingcost,
+ dfPower,
+ dfEnergyRevenue,
+ dfSubRevenue,
+ dfRegSubRevenue,
+ dfVreStor,
+ dfOpRegRevenue,
+ dfOpRsvRevenue)
+ println("Time elapsed for writing net revenue is")
+ println(elapsed_time_net_rev)
+ end
+ end
+ ## Print confirmation
+ println("Wrote outputs to $path")
+
+ return path
end # END output()
-
"""
write_annual(fullpath::AbstractString, dfOut::DataFrame)
Internal function for writing annual outputs.
"""
function write_annual(fullpath::AbstractString, dfOut::DataFrame)
- push!(dfOut, ["Total" 0 sum(dfOut[!, :AnnualSum])])
- CSV.write(fullpath, dfOut)
- return nothing
+ push!(dfOut, ["Total" 0 sum(dfOut[!, :AnnualSum])])
+ CSV.write(fullpath, dfOut)
+ return nothing
end
"""
@@ -401,16 +483,23 @@ end
Internal function for writing full time series outputs. This function wraps the instructions for creating the full time series output files.
"""
-function write_fulltimeseries(fullpath::AbstractString, dataOut::Matrix{Float64}, dfOut::DataFrame)
- T = size(dataOut, 2)
- dfOut = hcat(dfOut, DataFrame(dataOut, :auto))
- auxNew_Names = [Symbol("Resource");Symbol("Zone");Symbol("AnnualSum");[Symbol("t$t") for t in 1:T]]
- rename!(dfOut, auxNew_Names)
- total = DataFrame(["Total" 0 sum(dfOut[!, :AnnualSum]) fill(0.0, (1, T))], auxNew_Names)
- total[!, 4:T+3] .= sum(dataOut, dims=1)
- dfOut = vcat(dfOut, total)
- CSV.write(fullpath, dftranspose(dfOut, false), writeheader=false)
- return nothing
+function write_fulltimeseries(fullpath::AbstractString,
+ dataOut::Matrix{Float64},
+ dfOut::DataFrame)
+ T = size(dataOut, 2)
+ dfOut = hcat(dfOut, DataFrame(dataOut, :auto))
+ auxNew_Names = [Symbol("Resource");
+ Symbol("Zone");
+ Symbol("AnnualSum");
+ [Symbol("t$t") for t in 1:T]]
+ rename!(dfOut, auxNew_Names)
+ total = DataFrame(["Total" 0 sum(dfOut[!, :AnnualSum]) fill(0.0, (1, T))], auxNew_Names)
+ total[!, 4:(T + 3)] .= sum(dataOut, dims = 1)
+ dfOut = vcat(dfOut, total)
+ CSV.write(fullpath, dftranspose(dfOut, false), writeheader = false)
+ return nothing
end
-write_settings_file(path, setup) = YAML.write_file(joinpath(path, "run_settings.yml"), setup)
+function write_settings_file(path, setup)
+ YAML.write_file(joinpath(path, "run_settings.yml"), setup)
+end
diff --git a/src/write_outputs/write_power.jl b/src/write_outputs/write_power.jl
index 3be5e83bf3..30e14048be 100644
--- a/src/write_outputs/write_power.jl
+++ b/src/write_outputs/write_power.jl
@@ -4,26 +4,28 @@
Function for writing the different values of power generated by the different technologies in operation.
"""
function write_power(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- zones = zone_id.(gen)
+ gen = inputs["RESOURCES"]
+ zones = zone_id.(gen)
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
- # Power injected by each resource in each time step
- dfPower = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones, AnnualSum = Array{Union{Missing,Float64}}(undef, G))
- power = value.(EP[:vP])
- if setup["ParameterScale"] == 1
- power *= ModelScalingFactor
- end
- dfPower.AnnualSum .= power * inputs["omega"]
+ # Power injected by each resource in each time step
+ dfPower = DataFrame(Resource = inputs["RESOURCE_NAMES"],
+ Zone = zones,
+ AnnualSum = Array{Union{Missing, Float64}}(undef, G))
+ power = value.(EP[:vP])
+ if setup["ParameterScale"] == 1
+ power *= ModelScalingFactor
+ end
+ dfPower.AnnualSum .= power * inputs["omega"]
- filepath = joinpath(path, "power.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfPower)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, power, dfPower)
- end
+ filepath = joinpath(path, "power.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfPower)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, power, dfPower)
+ end
- return dfPower #Shouldn't this be return nothing
+ return dfPower #Shouldn't this be return nothing
end
diff --git a/src/write_outputs/write_power_balance.jl b/src/write_outputs/write_power_balance.jl
index 627f3a4821..4661fb3faa 100644
--- a/src/write_outputs/write_power_balance.jl
+++ b/src/write_outputs/write_power_balance.jl
@@ -1,71 +1,94 @@
function write_power_balance(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
- SEG = inputs["SEG"] # Number of demand curtailment segments
- THERM_ALL = inputs["THERM_ALL"]
- VRE = inputs["VRE"]
- MUST_RUN = inputs["MUST_RUN"]
- HYDRO_RES = inputs["HYDRO_RES"]
- STOR_ALL = inputs["STOR_ALL"]
- FLEX = inputs["FLEX"]
- ELECTROLYZER = inputs["ELECTROLYZER"]
- VRE_STOR = inputs["VRE_STOR"]
- Com_list = ["Generation", "Storage_Discharge", "Storage_Charge",
- "Flexible_Demand_Defer", "Flexible_Demand_Stasify",
- "Demand_Response", "Nonserved_Energy",
- "Transmission_NetExport", "Transmission_Losses",
- "Demand"]
- if !isempty(ELECTROLYZER)
- push!(Com_list, "Electrolyzer_Consumption")
- end
- L = length(Com_list)
- dfPowerBalance = DataFrame(BalanceComponent = repeat(Com_list, outer = Z), Zone = repeat(1:Z, inner = L), AnnualSum = zeros(L * Z))
- powerbalance = zeros(Z * L, T) # following the same style of power/charge/storage/nse
- for z in 1:Z
- POWER_ZONE = intersect(resources_in_zone_by_rid(gen,z), union(THERM_ALL, VRE, MUST_RUN, HYDRO_RES))
- powerbalance[(z-1)*L+1, :] = sum(value.(EP[:vP][POWER_ZONE, :]), dims = 1)
- if !isempty(intersect(resources_in_zone_by_rid(gen,z), STOR_ALL))
- STOR_ALL_ZONE = intersect(resources_in_zone_by_rid(gen,z), STOR_ALL)
- powerbalance[(z-1)*L+2, :] = sum(value.(EP[:vP][STOR_ALL_ZONE, :]), dims = 1)
- powerbalance[(z-1)*L+3, :] = (-1) * sum((value.(EP[:vCHARGE][STOR_ALL_ZONE, :]).data), dims = 1)
- end
- if !isempty(intersect(resources_in_zone_by_rid(gen,z), VRE_STOR))
- VS_ALL_ZONE = intersect(resources_in_zone_by_rid(gen,z), inputs["VS_STOR"])
- powerbalance[(z-1)*L+2, :] = sum(value.(EP[:vP][VS_ALL_ZONE, :]), dims = 1)
- powerbalance[(z-1)*L+3, :] = (-1) * sum(value.(EP[:vCHARGE_VRE_STOR][VS_ALL_ZONE, :]).data, dims=1)
- end
- if !isempty(intersect(resources_in_zone_by_rid(gen,z), FLEX))
- FLEX_ZONE = intersect(resources_in_zone_by_rid(gen,z), FLEX)
- powerbalance[(z-1)*L+4, :] = sum((value.(EP[:vCHARGE_FLEX][FLEX_ZONE, :]).data), dims = 1)
- powerbalance[(z-1)*L+5, :] = (-1) * sum(value.(EP[:vP][FLEX_ZONE, :]), dims = 1)
- end
- if SEG > 1
- powerbalance[(z-1)*L+6, :] = sum(value.(EP[:vNSE][2:SEG, :, z]), dims = 1)
- end
- powerbalance[(z-1)*L+7, :] = value.(EP[:vNSE][1, :, z])
- if Z >= 2
- powerbalance[(z-1)*L+8, :] = (value.(EP[:ePowerBalanceNetExportFlows][:, z]))' # Transpose
- powerbalance[(z-1)*L+9, :] = -(value.(EP[:eLosses_By_Zone][z, :]))
- end
- powerbalance[(z-1)*L+10, :] = (((-1) * inputs["pD"][:, z]))' # Transpose
- if !isempty(ELECTROLYZER)
- ELECTROLYZER_ZONE = intersect(resources_in_zone_by_rid(gen,z), ELECTROLYZER)
- powerbalance[(z-1)*L+11, :] = (-1) * sum(value.(EP[:vUSE][ELECTROLYZER_ZONE, :].data), dims = 1)
- end
- end
- if setup["ParameterScale"] == 1
- powerbalance *= ModelScalingFactor
- end
- dfPowerBalance.AnnualSum .= powerbalance * inputs["omega"]
+ gen = inputs["RESOURCES"]
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
+ SEG = inputs["SEG"] # Number of demand curtailment segments
+ THERM_ALL = inputs["THERM_ALL"]
+ VRE = inputs["VRE"]
+ MUST_RUN = inputs["MUST_RUN"]
+ HYDRO_RES = inputs["HYDRO_RES"]
+ STOR_ALL = inputs["STOR_ALL"]
+ FLEX = inputs["FLEX"]
+ ELECTROLYZER = inputs["ELECTROLYZER"]
+ VRE_STOR = inputs["VRE_STOR"]
+ Com_list = ["Generation", "Storage_Discharge", "Storage_Charge",
+ "Flexible_Demand_Defer", "Flexible_Demand_Stasify",
+ "Demand_Response", "Nonserved_Energy",
+ "Transmission_NetExport", "Transmission_Losses",
+ "Demand"]
+ if !isempty(ELECTROLYZER)
+ push!(Com_list, "Electrolyzer_Consumption")
+ end
+ L = length(Com_list)
+ dfPowerBalance = DataFrame(BalanceComponent = repeat(Com_list, outer = Z),
+ Zone = repeat(1:Z, inner = L),
+ AnnualSum = zeros(L * Z))
+ powerbalance = zeros(Z * L, T) # following the same style of power/charge/storage/nse
+ for z in 1:Z
+ POWER_ZONE = intersect(resources_in_zone_by_rid(gen, z),
+ union(THERM_ALL, VRE, MUST_RUN, HYDRO_RES))
+ powerbalance[(z - 1) * L + 1, :] = sum(value.(EP[:vP][POWER_ZONE, :]), dims = 1)
+ if !isempty(intersect(resources_in_zone_by_rid(gen, z), STOR_ALL))
+ STOR_ALL_ZONE = intersect(resources_in_zone_by_rid(gen, z), STOR_ALL)
+ powerbalance[(z - 1) * L + 2, :] = sum(value.(EP[:vP][STOR_ALL_ZONE, :]),
+ dims = 1)
+ powerbalance[(z - 1) * L + 3, :] = (-1) *
+ sum((value.(EP[:vCHARGE][STOR_ALL_ZONE,
+ :]).data),
+ dims = 1)
+ end
+ if !isempty(intersect(resources_in_zone_by_rid(gen, z), VRE_STOR))
+ VS_ALL_ZONE = intersect(resources_in_zone_by_rid(gen, z), inputs["VS_STOR"])
+ powerbalance[(z - 1) * L + 2, :] = sum(value.(EP[:vP][VS_ALL_ZONE, :]),
+ dims = 1)
+ powerbalance[(z - 1) * L + 3, :] = (-1) *
+ sum(value.(EP[:vCHARGE_VRE_STOR][VS_ALL_ZONE,
+ :]).data,
+ dims = 1)
+ end
+ if !isempty(intersect(resources_in_zone_by_rid(gen, z), FLEX))
+ FLEX_ZONE = intersect(resources_in_zone_by_rid(gen, z), FLEX)
+ powerbalance[(z - 1) * L + 4, :] = sum((value.(EP[:vCHARGE_FLEX][FLEX_ZONE,
+ :]).data),
+ dims = 1)
+ powerbalance[(z - 1) * L + 5, :] = (-1) *
+ sum(value.(EP[:vP][FLEX_ZONE, :]), dims = 1)
+ end
+ if SEG > 1
+ powerbalance[(z - 1) * L + 6, :] = sum(value.(EP[:vNSE][2:SEG, :, z]), dims = 1)
+ end
+ powerbalance[(z - 1) * L + 7, :] = value.(EP[:vNSE][1, :, z])
+ if Z >= 2
+ powerbalance[(z - 1) * L + 8, :] = (value.(EP[:ePowerBalanceNetExportFlows][:,
+ z]))' # Transpose
+ powerbalance[(z - 1) * L + 9, :] = -(value.(EP[:eLosses_By_Zone][z, :]))
+ end
+ powerbalance[(z - 1) * L + 10, :] = (((-1) * inputs["pD"][:, z]))' # Transpose
+ if !isempty(ELECTROLYZER)
+ ELECTROLYZER_ZONE = intersect(resources_in_zone_by_rid(gen, z), ELECTROLYZER)
+ powerbalance[(z - 1) * L + 11, :] = (-1) *
+ sum(value.(EP[:vUSE][ELECTROLYZER_ZONE,
+ :].data),
+ dims = 1)
+ end
+ end
+ if setup["ParameterScale"] == 1
+ powerbalance *= ModelScalingFactor
+ end
+ dfPowerBalance.AnnualSum .= powerbalance * inputs["omega"]
- if setup["WriteOutputs"] == "annual"
- CSV.write(joinpath(path, "power_balance.csv"), dfPowerBalance)
- else # setup["WriteOutputs"] == "full"
- dfPowerBalance = hcat(dfPowerBalance, DataFrame(powerbalance, :auto))
- auxNew_Names = [Symbol("BalanceComponent"); Symbol("Zone"); Symbol("AnnualSum"); [Symbol("t$t") for t in 1:T]]
- rename!(dfPowerBalance,auxNew_Names)
- CSV.write(joinpath(path, "power_balance.csv"), dftranspose(dfPowerBalance, false), writeheader=false)
- end
- return nothing
+ if setup["WriteOutputs"] == "annual"
+ CSV.write(joinpath(path, "power_balance.csv"), dfPowerBalance)
+ else # setup["WriteOutputs"] == "full"
+ dfPowerBalance = hcat(dfPowerBalance, DataFrame(powerbalance, :auto))
+ auxNew_Names = [Symbol("BalanceComponent");
+ Symbol("Zone");
+ Symbol("AnnualSum");
+ [Symbol("t$t") for t in 1:T]]
+ rename!(dfPowerBalance, auxNew_Names)
+ CSV.write(joinpath(path, "power_balance.csv"),
+ dftranspose(dfPowerBalance, false),
+ writeheader = false)
+ end
+ return nothing
end
diff --git a/src/write_outputs/write_price.jl b/src/write_outputs/write_price.jl
index 7d240a0f02..05943c8fa8 100644
--- a/src/write_outputs/write_price.jl
+++ b/src/write_outputs/write_price.jl
@@ -4,22 +4,24 @@
Function for reporting marginal electricity price for each model zone and time step. Marginal electricity price is equal to the dual variable of the power balance constraint. If GenX is configured as a mixed integer linear program, then this output is only generated if `WriteShadowPrices` flag is activated. If configured as a linear program (i.e. linearized unit commitment or economic dispatch) then output automatically available.
"""
function write_price(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
- ## Extract dual variables of constraints
- # Electricity price: Dual variable of hourly power balance constraint = hourly price
- dfPrice = DataFrame(Zone = 1:Z) # The unit is $/MWh
- # Dividing dual variable for each hour with corresponding hourly weight to retrieve marginal cost of generation
+ ## Extract dual variables of constraints
+ # Electricity price: Dual variable of hourly power balance constraint = hourly price
+ dfPrice = DataFrame(Zone = 1:Z) # The unit is $/MWh
+ # Dividing dual variable for each hour with corresponding hourly weight to retrieve marginal cost of generation
price = locational_marginal_price(EP, inputs, setup)
- dfPrice = hcat(dfPrice, DataFrame(transpose(price), :auto))
+ dfPrice = hcat(dfPrice, DataFrame(transpose(price), :auto))
- auxNew_Names=[Symbol("Zone");[Symbol("t$t") for t in 1:T]]
- rename!(dfPrice,auxNew_Names)
+ auxNew_Names = [Symbol("Zone"); [Symbol("t$t") for t in 1:T]]
+ rename!(dfPrice, auxNew_Names)
- ## Linear configuration final output
- CSV.write(joinpath(path, "prices.csv"), dftranspose(dfPrice, false), writeheader=false)
- return nothing
+ ## Linear configuration final output
+ CSV.write(joinpath(path, "prices.csv"),
+ dftranspose(dfPrice, false),
+ writeheader = false)
+ return nothing
end
@doc raw"""
diff --git a/src/write_outputs/write_reliability.jl b/src/write_outputs/write_reliability.jl
index ce5cd34efd..876465f8b7 100644
--- a/src/write_outputs/write_reliability.jl
+++ b/src/write_outputs/write_reliability.jl
@@ -4,18 +4,20 @@
Function for reporting dual variable of maximum non-served energy constraint (shadow price of reliability constraint) for each model zone and time step.
"""
function write_reliability(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- T = inputs["T"] # Number of time steps (hours)
- Z = inputs["Z"] # Number of zones
+ T = inputs["T"] # Number of time steps (hours)
+ Z = inputs["Z"] # Number of zones
- # reliability: Dual variable of maximum NSE constraint = shadow value of reliability constraint
- dfReliability = DataFrame(Zone = 1:Z)
- # Dividing dual variable for each hour with corresponding hourly weight to retrieve marginal cost of generation
- scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
- dfReliability = hcat(dfReliability, DataFrame(transpose(dual.(EP[:cMaxNSE])./inputs["omega"]*scale_factor), :auto))
+ # reliability: Dual variable of maximum NSE constraint = shadow value of reliability constraint
+ dfReliability = DataFrame(Zone = 1:Z)
+ # Dividing dual variable for each hour with corresponding hourly weight to retrieve marginal cost of generation
+ scale_factor = setup["ParameterScale"] == 1 ? ModelScalingFactor : 1
+ dfReliability = hcat(dfReliability,
+ DataFrame(transpose(dual.(EP[:cMaxNSE]) ./ inputs["omega"] * scale_factor), :auto))
- auxNew_Names=[Symbol("Zone");[Symbol("t$t") for t in 1:T]]
- rename!(dfReliability,auxNew_Names)
-
- CSV.write(joinpath(path, "reliability.csv"), dftranspose(dfReliability, false), header=false)
+ auxNew_Names = [Symbol("Zone"); [Symbol("t$t") for t in 1:T]]
+ rename!(dfReliability, auxNew_Names)
+ CSV.write(joinpath(path, "reliability.csv"),
+ dftranspose(dfReliability, false),
+ header = false)
end
diff --git a/src/write_outputs/write_status.jl b/src/write_outputs/write_status.jl
index 32b6eee760..8558a21a50 100644
--- a/src/write_outputs/write_status.jl
+++ b/src/write_outputs/write_status.jl
@@ -5,16 +5,17 @@ Function for writing the final solve status of the optimization problem solved.
"""
function write_status(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- # https://jump.dev/MathOptInterface.jl/v0.9.10/apireference/#MathOptInterface.TerminationStatusCode
- status = termination_status(EP)
+ # https://jump.dev/MathOptInterface.jl/v0.9.10/apireference/#MathOptInterface.TerminationStatusCode
+ status = termination_status(EP)
- # Note: Gurobi excludes constants from solver reported objective function value - MIPGap calculated may be erroneous
- if (setup["UCommit"] == 0 || setup["UCommit"] == 2)
- dfStatus = DataFrame(Status = status, Solve = inputs["solve_time"],
- Objval = objective_value(EP))
- else
- dfStatus = DataFrame(Status = status, Solve = inputs["solve_time"],
- Objval = objective_value(EP), Objbound= objective_bound(EP),FinalMIPGap =(objective_value(EP) -objective_bound(EP))/objective_value(EP) )
- end
- CSV.write(joinpath(path, "status.csv"),dfStatus)
+ # Note: Gurobi excludes constants from solver reported objective function value - MIPGap calculated may be erroneous
+ if (setup["UCommit"] == 0 || setup["UCommit"] == 2)
+ dfStatus = DataFrame(Status = status, Solve = inputs["solve_time"],
+ Objval = objective_value(EP))
+ else
+ dfStatus = DataFrame(Status = status, Solve = inputs["solve_time"],
+ Objval = objective_value(EP), Objbound = objective_bound(EP),
+ FinalMIPGap = (objective_value(EP) - objective_bound(EP)) / objective_value(EP))
+ end
+ CSV.write(joinpath(path, "status.csv"), dfStatus)
end
diff --git a/src/write_outputs/write_storage.jl b/src/write_outputs/write_storage.jl
index b8d2167dba..a34c470108 100644
--- a/src/write_outputs/write_storage.jl
+++ b/src/write_outputs/write_storage.jl
@@ -3,40 +3,40 @@
Function for writing the capacities of different storage technologies, including hydro reservoir, flexible storage tech etc.
"""
-function write_storage(path::AbstractString, inputs::Dict,setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- zones = zone_id.(gen)
+function write_storage(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
+ gen = inputs["RESOURCES"]
+ zones = zone_id.(gen)
- T = inputs["T"] # Number of time steps (hours)
- G = inputs["G"]
- STOR_ALL = inputs["STOR_ALL"]
- HYDRO_RES = inputs["HYDRO_RES"]
- FLEX = inputs["FLEX"]
- VRE_STOR = inputs["VRE_STOR"]
- VS_STOR = !isempty(VRE_STOR) ? inputs["VS_STOR"] : []
-
- # Storage level (state of charge) of each resource in each time step
- dfStorage = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones)
- storagevcapvalue = zeros(G,T)
+ T = inputs["T"] # Number of time steps (hours)
+ G = inputs["G"]
+ STOR_ALL = inputs["STOR_ALL"]
+ HYDRO_RES = inputs["HYDRO_RES"]
+ FLEX = inputs["FLEX"]
+ VRE_STOR = inputs["VRE_STOR"]
+ VS_STOR = !isempty(VRE_STOR) ? inputs["VS_STOR"] : []
- if !isempty(inputs["STOR_ALL"])
- storagevcapvalue[STOR_ALL, :] = value.(EP[:vS][STOR_ALL, :])
- end
- if !isempty(inputs["HYDRO_RES"])
- storagevcapvalue[HYDRO_RES, :] = value.(EP[:vS_HYDRO][HYDRO_RES, :])
- end
- if !isempty(inputs["FLEX"])
- storagevcapvalue[FLEX, :] = value.(EP[:vS_FLEX][FLEX, :])
- end
- if !isempty(VS_STOR)
- storagevcapvalue[VS_STOR, :] = value.(EP[:vS_VRE_STOR][VS_STOR, :])
- end
- if setup["ParameterScale"] == 1
- storagevcapvalue *= ModelScalingFactor
- end
+ # Storage level (state of charge) of each resource in each time step
+ dfStorage = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones)
+ storagevcapvalue = zeros(G, T)
- dfStorage = hcat(dfStorage, DataFrame(storagevcapvalue, :auto))
- auxNew_Names=[Symbol("Resource");Symbol("Zone");[Symbol("t$t") for t in 1:T]]
- rename!(dfStorage,auxNew_Names)
- CSV.write(joinpath(path, "storage.csv"), dftranspose(dfStorage, false), header=false)
+ if !isempty(inputs["STOR_ALL"])
+ storagevcapvalue[STOR_ALL, :] = value.(EP[:vS][STOR_ALL, :])
+ end
+ if !isempty(inputs["HYDRO_RES"])
+ storagevcapvalue[HYDRO_RES, :] = value.(EP[:vS_HYDRO][HYDRO_RES, :])
+ end
+ if !isempty(inputs["FLEX"])
+ storagevcapvalue[FLEX, :] = value.(EP[:vS_FLEX][FLEX, :])
+ end
+ if !isempty(VS_STOR)
+ storagevcapvalue[VS_STOR, :] = value.(EP[:vS_VRE_STOR][VS_STOR, :])
+ end
+ if setup["ParameterScale"] == 1
+ storagevcapvalue *= ModelScalingFactor
+ end
+
+ dfStorage = hcat(dfStorage, DataFrame(storagevcapvalue, :auto))
+ auxNew_Names = [Symbol("Resource"); Symbol("Zone"); [Symbol("t$t") for t in 1:T]]
+ rename!(dfStorage, auxNew_Names)
+ CSV.write(joinpath(path, "storage.csv"), dftranspose(dfStorage, false), header = false)
end
diff --git a/src/write_outputs/write_storagedual.jl b/src/write_outputs/write_storagedual.jl
index 53a99f9603..a30414d4e7 100644
--- a/src/write_outputs/write_storagedual.jl
+++ b/src/write_outputs/write_storagedual.jl
@@ -4,60 +4,71 @@
Function for reporting dual of storage level (state of charge) balance of each resource in each time step.
"""
function write_storagedual(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- zones = zone_id.(gen)
-
- G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
- T = inputs["T"] # Number of time steps (hours)
-
- START_SUBPERIODS = inputs["START_SUBPERIODS"]
- INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
- REP_PERIOD = inputs["REP_PERIOD"]
- STOR_ALL = inputs["STOR_ALL"]
- VRE_STOR = inputs["VRE_STOR"]
- if !isempty(VRE_STOR)
- VS_STOR = inputs["VS_STOR"]
- VS_LDS = inputs["VS_LDS"]
- VS_NONLDS = setdiff(VS_STOR, VS_LDS)
- end
-
- # # Dual of storage level (state of charge) balance of each resource in each time step
- dfStorageDual = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones)
- dual_values = zeros(G, T)
-
- # Loop over W separately hours_per_subperiod
- if !isempty(STOR_ALL)
- STOR_ALL_NONLDS = setdiff(STOR_ALL, inputs["STOR_LONG_DURATION"])
- STOR_ALL_LDS = intersect(STOR_ALL, inputs["STOR_LONG_DURATION"])
- dual_values[STOR_ALL, INTERIOR_SUBPERIODS] = (dual.(EP[:cSoCBalInterior][INTERIOR_SUBPERIODS, STOR_ALL]).data ./ inputs["omega"][INTERIOR_SUBPERIODS])'
- dual_values[STOR_ALL_NONLDS, START_SUBPERIODS] = (dual.(EP[:cSoCBalStart][START_SUBPERIODS, STOR_ALL_NONLDS]).data ./ inputs["omega"][START_SUBPERIODS])'
- if !isempty(STOR_ALL_LDS)
- if inputs["REP_PERIOD"] > 1
- dual_values[STOR_ALL_LDS, START_SUBPERIODS] = (dual.(EP[:cSoCBalLongDurationStorageStart][1:REP_PERIOD, STOR_ALL_LDS]).data ./ inputs["omega"][START_SUBPERIODS])'
- else
- dual_values[STOR_ALL_LDS, START_SUBPERIODS] = (dual.(EP[:cSoCBalStart][START_SUBPERIODS, STOR_ALL_LDS]).data ./ inputs["omega"][START_SUBPERIODS])'
- end
- end
- end
-
- if !isempty(VRE_STOR)
- dual_values[VS_STOR, INTERIOR_SUBPERIODS] = ((dual.(EP[:cSoCBalInterior_VRE_STOR][VS_STOR, INTERIOR_SUBPERIODS]).data)' ./ inputs["omega"][INTERIOR_SUBPERIODS])'
- dual_values[VS_NONLDS, START_SUBPERIODS] = ((dual.(EP[:cSoCBalStart_VRE_STOR][VS_NONLDS, START_SUBPERIODS]).data)' ./ inputs["omega"][START_SUBPERIODS])'
- if !isempty(VS_LDS)
- if inputs["REP_PERIOD"] > 1
- dual_values[VS_LDS, START_SUBPERIODS] = ((dual.(EP[:cVreStorSoCBalLongDurationStorageStart][VS_LDS, 1:REP_PERIOD]).data)' ./ inputs["omega"][START_SUBPERIODS])'
- else
- dual_values[VS_LDS, START_SUBPERIODS] = ((dual.(EP[:cSoCBalStart_VRE_STOR][VS_LDS, START_SUBPERIODS]).data)' ./ inputs["omega"][START_SUBPERIODS])'
- end
- end
- end
-
- if setup["ParameterScale"] == 1
- dual_values *= ModelScalingFactor
- end
-
- dfStorageDual=hcat(dfStorageDual, DataFrame(dual_values, :auto))
- rename!(dfStorageDual,[Symbol("Resource");Symbol("Zone");[Symbol("t$t") for t in 1:T]])
-
- CSV.write(joinpath(path, "storagebal_duals.csv"), dftranspose(dfStorageDual, false), header=false)
+ gen = inputs["RESOURCES"]
+ zones = zone_id.(gen)
+
+ G = inputs["G"] # Number of resources (generators, storage, DR, and DERs)
+ T = inputs["T"] # Number of time steps (hours)
+
+ START_SUBPERIODS = inputs["START_SUBPERIODS"]
+ INTERIOR_SUBPERIODS = inputs["INTERIOR_SUBPERIODS"]
+ REP_PERIOD = inputs["REP_PERIOD"]
+ STOR_ALL = inputs["STOR_ALL"]
+ VRE_STOR = inputs["VRE_STOR"]
+ if !isempty(VRE_STOR)
+ VS_STOR = inputs["VS_STOR"]
+ VS_LDS = inputs["VS_LDS"]
+ VS_NONLDS = setdiff(VS_STOR, VS_LDS)
+ end
+
+ # # Dual of storage level (state of charge) balance of each resource in each time step
+ dfStorageDual = DataFrame(Resource = inputs["RESOURCE_NAMES"], Zone = zones)
+ dual_values = zeros(G, T)
+
+ # Loop over W separately hours_per_subperiod
+ if !isempty(STOR_ALL)
+ STOR_ALL_NONLDS = setdiff(STOR_ALL, inputs["STOR_LONG_DURATION"])
+ STOR_ALL_LDS = intersect(STOR_ALL, inputs["STOR_LONG_DURATION"])
+ dual_values[STOR_ALL, INTERIOR_SUBPERIODS] = (dual.(EP[:cSoCBalInterior][INTERIOR_SUBPERIODS,
+ STOR_ALL]).data ./ inputs["omega"][INTERIOR_SUBPERIODS])'
+ dual_values[STOR_ALL_NONLDS, START_SUBPERIODS] = (dual.(EP[:cSoCBalStart][START_SUBPERIODS,
+ STOR_ALL_NONLDS]).data ./ inputs["omega"][START_SUBPERIODS])'
+ if !isempty(STOR_ALL_LDS)
+ if inputs["REP_PERIOD"] > 1
+ dual_values[STOR_ALL_LDS, START_SUBPERIODS] = (dual.(EP[:cSoCBalLongDurationStorageStart][1:REP_PERIOD,
+ STOR_ALL_LDS]).data ./ inputs["omega"][START_SUBPERIODS])'
+ else
+ dual_values[STOR_ALL_LDS, START_SUBPERIODS] = (dual.(EP[:cSoCBalStart][START_SUBPERIODS,
+ STOR_ALL_LDS]).data ./ inputs["omega"][START_SUBPERIODS])'
+ end
+ end
+ end
+
+ if !isempty(VRE_STOR)
+ dual_values[VS_STOR, INTERIOR_SUBPERIODS] = ((dual.(EP[:cSoCBalInterior_VRE_STOR][VS_STOR,
+ INTERIOR_SUBPERIODS]).data)' ./ inputs["omega"][INTERIOR_SUBPERIODS])'
+ dual_values[VS_NONLDS, START_SUBPERIODS] = ((dual.(EP[:cSoCBalStart_VRE_STOR][VS_NONLDS,
+ START_SUBPERIODS]).data)' ./ inputs["omega"][START_SUBPERIODS])'
+ if !isempty(VS_LDS)
+ if inputs["REP_PERIOD"] > 1
+ dual_values[VS_LDS, START_SUBPERIODS] = ((dual.(EP[:cVreStorSoCBalLongDurationStorageStart][VS_LDS,
+ 1:REP_PERIOD]).data)' ./ inputs["omega"][START_SUBPERIODS])'
+ else
+ dual_values[VS_LDS, START_SUBPERIODS] = ((dual.(EP[:cSoCBalStart_VRE_STOR][VS_LDS,
+ START_SUBPERIODS]).data)' ./ inputs["omega"][START_SUBPERIODS])'
+ end
+ end
+ end
+
+ if setup["ParameterScale"] == 1
+ dual_values *= ModelScalingFactor
+ end
+
+ dfStorageDual = hcat(dfStorageDual, DataFrame(dual_values, :auto))
+ rename!(dfStorageDual,
+ [Symbol("Resource"); Symbol("Zone"); [Symbol("t$t") for t in 1:T]])
+
+ CSV.write(joinpath(path, "storagebal_duals.csv"),
+ dftranspose(dfStorageDual, false),
+ header = false)
end
diff --git a/src/write_outputs/write_subsidy_revenue.jl b/src/write_outputs/write_subsidy_revenue.jl
index b7702cd747..5be5d66b48 100644
--- a/src/write_outputs/write_subsidy_revenue.jl
+++ b/src/write_outputs/write_subsidy_revenue.jl
@@ -4,98 +4,120 @@
Function for reporting subsidy revenue earned if a generator specified `Min_Cap` is provided in the input file, or if a generator is subject to a Minimum Capacity Requirement constraint. The unit is \$.
"""
function write_subsidy_revenue(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- regions = region.(gen)
- clusters = cluster.(gen)
- zones = zone_id.(gen)
- rid = resource_id.(gen)
-
- G = inputs["G"]
+ gen = inputs["RESOURCES"]
+ regions = region.(gen)
+ clusters = cluster.(gen)
+ zones = zone_id.(gen)
+ rid = resource_id.(gen)
- dfSubRevenue = DataFrame(Region = regions, Resource = inputs["RESOURCE_NAMES"], Zone = zones, Cluster = clusters, R_ID=rid, SubsidyRevenue = zeros(G))
- MIN_CAP = ids_with_positive(gen, min_cap_mw)
- if !isempty(inputs["VRE_STOR"])
- MIN_CAP_SOLAR = ids_with_positive(gen.VreStorage, min_cap_solar_mw)
- MIN_CAP_WIND = ids_with_positive(gen.VreStorage, min_cap_wind_mw)
- MIN_CAP_STOR = ids_with_positive(gen, min_cap_mwh)
- if !isempty(MIN_CAP_SOLAR)
- dfSubRevenue.SubsidyRevenue[MIN_CAP_SOLAR] .+= (value.(EP[:eTotalCap_SOLAR])[MIN_CAP_SOLAR]) .* (dual.(EP[:cMinCap_Solar][MIN_CAP_SOLAR])).data
- end
- if !isempty(MIN_CAP_WIND)
- dfSubRevenue.SubsidyRevenue[MIN_CAP_WIND] .+= (value.(EP[:eTotalCap_WIND])[MIN_CAP_WIND]) .* (dual.(EP[:cMinCap_Wind][MIN_CAP_WIND])).data
- end
- if !isempty(MIN_CAP_STOR)
- dfSubRevenue.SubsidyRevenue[MIN_CAP_STOR] .+= (value.(EP[:eTotalCap_STOR])[MIN_CAP_STOR]) .* (dual.(EP[:cMinCap_Stor][MIN_CAP_STOR])).data
- end
- end
- dfSubRevenue.SubsidyRevenue[MIN_CAP] .= (value.(EP[:eTotalCap])[MIN_CAP]) .* (dual.(EP[:cMinCap][MIN_CAP])).data
- ### calculating tech specific subsidy revenue
- dfRegSubRevenue = DataFrame(Region = regions, Resource = inputs["RESOURCE_NAMES"], Zone = zones, Cluster = clusters, R_ID=rid, SubsidyRevenue = zeros(G))
- if (setup["MinCapReq"] >= 1)
- for mincap in 1:inputs["NumberOfMinCapReqs"] # This key only exists if MinCapReq >= 1, so we can't get it at the top outside of this condition.
- MIN_CAP_GEN = ids_with_policy(gen, min_cap, tag=mincap)
- dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN] .= dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN] + (value.(EP[:eTotalCap][MIN_CAP_GEN])) * (dual.(EP[:cZoneMinCapReq][mincap]))
- if !isempty(inputs["VRE_STOR"])
- gen_VRE_STOR = gen.VreStorage
- HAS_MIN_CAP_STOR = ids_with_policy(gen_VRE_STOR, min_cap_stor, tag=mincap)
- MIN_CAP_GEN_SOLAR = ids_with_policy(gen_VRE_STOR, min_cap_solar, tag=mincap)
- MIN_CAP_GEN_WIND = ids_with_policy(gen_VRE_STOR, min_cap_wind, tag=mincap)
- MIN_CAP_GEN_ASYM_DC_DIS = intersect(inputs["VS_ASYM_DC_DISCHARGE"], HAS_MIN_CAP_STOR)
- MIN_CAP_GEN_ASYM_AC_DIS = intersect(inputs["VS_ASYM_AC_DISCHARGE"], HAS_MIN_CAP_STOR)
- MIN_CAP_GEN_SYM_DC = intersect(inputs["VS_SYM_DC"], HAS_MIN_CAP_STOR)
- MIN_CAP_GEN_SYM_AC = intersect(inputs["VS_SYM_AC"], HAS_MIN_CAP_STOR)
- if !isempty(MIN_CAP_GEN_SOLAR)
- dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_SOLAR] .+= (
- (value.(EP[:eTotalCap_SOLAR][MIN_CAP_GEN_SOLAR]).data)
- .* etainverter.(gen[ids_with_policy(gen, min_cap_solar, tag=mincap)])
- * (dual.(EP[:cZoneMinCapReq][mincap]))
- )
- end
- if !isempty(MIN_CAP_GEN_WIND)
- dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_WIND] .+= (
- (value.(EP[:eTotalCap_WIND][MIN_CAP_GEN_WIND]).data)
- * (dual.(EP[:cZoneMinCapReq][mincap]))
- )
- end
- if !isempty(MIN_CAP_GEN_ASYM_DC_DIS)
- MIN_CAP_GEN_ASYM_DC_DIS = intersect(inputs["VS_ASYM_DC_DISCHARGE"], HAS_MIN_CAP_STOR)
- dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_ASYM_DC_DIS] .+= (
- (value.(EP[:eTotalCapDischarge_DC][MIN_CAP_GEN_ASYM_DC_DIS].data)
- .* etainverter.(gen_VRE_STOR[min_cap_stor.(gen_VRE_STOR, tag=mincap).==1 .& (gen_VRE_STOR.stor_dc_discharge.==2)]))
- * (dual.(EP[:cZoneMinCapReq][mincap]))
- )
- end
- if !isempty(MIN_CAP_GEN_ASYM_AC_DIS)
- dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_ASYM_AC_DIS] .+= (
- (value.(EP[:eTotalCapDischarge_AC][MIN_CAP_GEN_ASYM_AC_DIS]).data)
- * (dual.(EP[:cZoneMinCapReq][mincap]))
- )
- end
- if !isempty(MIN_CAP_GEN_SYM_DC)
- dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_SYM_DC] .+= (
- (value.(EP[:eTotalCap_STOR][MIN_CAP_GEN_SYM_DC]).data
- .* power_to_energy_dc.(gen_VRE_STOR[(min_cap_stor.(gen_VRE_STOR, tag=mincap).==1 .& (gen_VRE_STOR.stor_dc_discharge.==1))])
- .* etainverter.(gen_VRE_STOR[(min_cap_stor.(gen_VRE_STOR, tag=mincap).==1 .& (gen_VRE_STOR.stor_dc_discharge.==1))]))
- * (dual.(EP[:cZoneMinCapReq][mincap]))
- )
- end
- if !isempty(MIN_CAP_GEN_SYM_AC)
- dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_SYM_AC] .+= (
- (value.(EP[:eTotalCap_STOR][MIN_CAP_GEN_SYM_AC]).data
- .* power_to_energy_ac.(gen_VRE_STOR[(min_cap_stor.(gen_VRE_STOR, tag=mincap).==1 .& (gen_VRE_STOR.stor_ac_discharge.==1))]))
- * (dual.(EP[:cZoneMinCapReq][mincap]))
- )
- end
- end
- end
- end
+ G = inputs["G"]
- if setup["ParameterScale"] == 1
- dfSubRevenue.SubsidyRevenue *= ModelScalingFactor^2 #convert from Million US$ to US$
- dfRegSubRevenue.SubsidyRevenue *= ModelScalingFactor^2 #convert from Million US$ to US$
- end
+ dfSubRevenue = DataFrame(Region = regions,
+ Resource = inputs["RESOURCE_NAMES"],
+ Zone = zones,
+ Cluster = clusters,
+ R_ID = rid,
+ SubsidyRevenue = zeros(G))
+ MIN_CAP = ids_with_positive(gen, min_cap_mw)
+ if !isempty(inputs["VRE_STOR"])
+ MIN_CAP_SOLAR = ids_with_positive(gen.VreStorage, min_cap_solar_mw)
+ MIN_CAP_WIND = ids_with_positive(gen.VreStorage, min_cap_wind_mw)
+ MIN_CAP_STOR = ids_with_positive(gen, min_cap_mwh)
+ if !isempty(MIN_CAP_SOLAR)
+ dfSubRevenue.SubsidyRevenue[MIN_CAP_SOLAR] .+= (value.(EP[:eTotalCap_SOLAR])[MIN_CAP_SOLAR]) .*
+ (dual.(EP[:cMinCap_Solar][MIN_CAP_SOLAR])).data
+ end
+ if !isempty(MIN_CAP_WIND)
+ dfSubRevenue.SubsidyRevenue[MIN_CAP_WIND] .+= (value.(EP[:eTotalCap_WIND])[MIN_CAP_WIND]) .*
+ (dual.(EP[:cMinCap_Wind][MIN_CAP_WIND])).data
+ end
+ if !isempty(MIN_CAP_STOR)
+ dfSubRevenue.SubsidyRevenue[MIN_CAP_STOR] .+= (value.(EP[:eTotalCap_STOR])[MIN_CAP_STOR]) .*
+ (dual.(EP[:cMinCap_Stor][MIN_CAP_STOR])).data
+ end
+ end
+ dfSubRevenue.SubsidyRevenue[MIN_CAP] .= (value.(EP[:eTotalCap])[MIN_CAP]) .*
+ (dual.(EP[:cMinCap][MIN_CAP])).data
+ ### calculating tech specific subsidy revenue
+ dfRegSubRevenue = DataFrame(Region = regions,
+ Resource = inputs["RESOURCE_NAMES"],
+ Zone = zones,
+ Cluster = clusters,
+ R_ID = rid,
+ SubsidyRevenue = zeros(G))
+ if (setup["MinCapReq"] >= 1)
+ for mincap in 1:inputs["NumberOfMinCapReqs"] # This key only exists if MinCapReq >= 1, so we can't get it at the top outside of this condition.
+ MIN_CAP_GEN = ids_with_policy(gen, min_cap, tag = mincap)
+ dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN] .= dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN] +
+ (value.(EP[:eTotalCap][MIN_CAP_GEN])) *
+ (dual.(EP[:cZoneMinCapReq][mincap]))
+ if !isempty(inputs["VRE_STOR"])
+ gen_VRE_STOR = gen.VreStorage
+ HAS_MIN_CAP_STOR = ids_with_policy(gen_VRE_STOR, min_cap_stor, tag = mincap)
+ MIN_CAP_GEN_SOLAR = ids_with_policy(gen_VRE_STOR,
+ min_cap_solar,
+ tag = mincap)
+ MIN_CAP_GEN_WIND = ids_with_policy(gen_VRE_STOR, min_cap_wind, tag = mincap)
+ MIN_CAP_GEN_ASYM_DC_DIS = intersect(inputs["VS_ASYM_DC_DISCHARGE"],
+ HAS_MIN_CAP_STOR)
+ MIN_CAP_GEN_ASYM_AC_DIS = intersect(inputs["VS_ASYM_AC_DISCHARGE"],
+ HAS_MIN_CAP_STOR)
+ MIN_CAP_GEN_SYM_DC = intersect(inputs["VS_SYM_DC"], HAS_MIN_CAP_STOR)
+ MIN_CAP_GEN_SYM_AC = intersect(inputs["VS_SYM_AC"], HAS_MIN_CAP_STOR)
+ if !isempty(MIN_CAP_GEN_SOLAR)
+ dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_SOLAR] .+= ((value.(EP[:eTotalCap_SOLAR][MIN_CAP_GEN_SOLAR]).data)
+ .*
+ etainverter.(gen[ids_with_policy(gen,
+ min_cap_solar,
+ tag = mincap)])
+ *
+ (dual.(EP[:cZoneMinCapReq][mincap])))
+ end
+ if !isempty(MIN_CAP_GEN_WIND)
+ dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_WIND] .+= ((value.(EP[:eTotalCap_WIND][MIN_CAP_GEN_WIND]).data)
+ *
+ (dual.(EP[:cZoneMinCapReq][mincap])))
+ end
+ if !isempty(MIN_CAP_GEN_ASYM_DC_DIS)
+ MIN_CAP_GEN_ASYM_DC_DIS = intersect(inputs["VS_ASYM_DC_DISCHARGE"],
+ HAS_MIN_CAP_STOR)
+ dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_ASYM_DC_DIS] .+= ((value.(EP[:eTotalCapDischarge_DC][MIN_CAP_GEN_ASYM_DC_DIS].data)
+ .*
+ etainverter.(gen_VRE_STOR[min_cap_stor.(gen_VRE_STOR, tag = mincap) .== 1 .& (gen_VRE_STOR.stor_dc_discharge .== 2)]))
+ *
+ (dual.(EP[:cZoneMinCapReq][mincap])))
+ end
+ if !isempty(MIN_CAP_GEN_ASYM_AC_DIS)
+ dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_ASYM_AC_DIS] .+= ((value.(EP[:eTotalCapDischarge_AC][MIN_CAP_GEN_ASYM_AC_DIS]).data)
+ *
+ (dual.(EP[:cZoneMinCapReq][mincap])))
+ end
+ if !isempty(MIN_CAP_GEN_SYM_DC)
+ dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_SYM_DC] .+= ((value.(EP[:eTotalCap_STOR][MIN_CAP_GEN_SYM_DC]).data
+ .*
+ power_to_energy_dc.(gen_VRE_STOR[(min_cap_stor.(gen_VRE_STOR, tag = mincap) .== 1 .& (gen_VRE_STOR.stor_dc_discharge .== 1))])
+ .*
+ etainverter.(gen_VRE_STOR[(min_cap_stor.(gen_VRE_STOR, tag = mincap) .== 1 .& (gen_VRE_STOR.stor_dc_discharge .== 1))]))
+ *
+ (dual.(EP[:cZoneMinCapReq][mincap])))
+ end
+ if !isempty(MIN_CAP_GEN_SYM_AC)
+ dfRegSubRevenue.SubsidyRevenue[MIN_CAP_GEN_SYM_AC] .+= ((value.(EP[:eTotalCap_STOR][MIN_CAP_GEN_SYM_AC]).data
+ .*
+ power_to_energy_ac.(gen_VRE_STOR[(min_cap_stor.(gen_VRE_STOR, tag = mincap) .== 1 .& (gen_VRE_STOR.stor_ac_discharge .== 1))]))
+ *
+ (dual.(EP[:cZoneMinCapReq][mincap])))
+ end
+ end
+ end
+ end
- CSV.write(joinpath(path, "SubsidyRevenue.csv"), dfSubRevenue)
- CSV.write(joinpath(path, "RegSubsidyRevenue.csv"), dfRegSubRevenue)
- return dfSubRevenue, dfRegSubRevenue
+ if setup["ParameterScale"] == 1
+ dfSubRevenue.SubsidyRevenue *= ModelScalingFactor^2 #convert from Million US$ to US$
+ dfRegSubRevenue.SubsidyRevenue *= ModelScalingFactor^2 #convert from Million US$ to US$
+ end
+
+ CSV.write(joinpath(path, "SubsidyRevenue.csv"), dfSubRevenue)
+ CSV.write(joinpath(path, "RegSubsidyRevenue.csv"), dfRegSubRevenue)
+ return dfSubRevenue, dfRegSubRevenue
end
diff --git a/src/write_outputs/write_time_weights.jl b/src/write_outputs/write_time_weights.jl
index 8f799478f0..b29bbdcb2f 100644
--- a/src/write_outputs/write_time_weights.jl
+++ b/src/write_outputs/write_time_weights.jl
@@ -1,6 +1,6 @@
function write_time_weights(path::AbstractString, inputs::Dict)
- T = inputs["T"] # Number of time steps (hours)
- # Save array of weights for each time period (when using time sampling)
- dfTimeWeights = DataFrame(Time=1:T, Weight=inputs["omega"])
- CSV.write(joinpath(path, "time_weights.csv"), dfTimeWeights)
+ T = inputs["T"] # Number of time steps (hours)
+ # Save array of weights for each time period (when using time sampling)
+ dfTimeWeights = DataFrame(Time = 1:T, Weight = inputs["omega"])
+ CSV.write(joinpath(path, "time_weights.csv"), dfTimeWeights)
end
diff --git a/src/write_outputs/write_vre_stor.jl b/src/write_outputs/write_vre_stor.jl
index 6f7e617ec1..ef261d5196 100644
--- a/src/write_outputs/write_vre_stor.jl
+++ b/src/write_outputs/write_vre_stor.jl
@@ -5,16 +5,16 @@ Function for writing the vre-storage specific files.
"""
function write_vre_stor(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- ### CAPACITY DECISIONS ###
- dfVreStor = write_vre_stor_capacity(path, inputs, setup, EP)
+ ### CAPACITY DECISIONS ###
+ dfVreStor = write_vre_stor_capacity(path, inputs, setup, EP)
- ### CHARGING DECISIONS ###
- write_vre_stor_charge(path, inputs, setup, EP)
+ ### CHARGING DECISIONS ###
+ write_vre_stor_charge(path, inputs, setup, EP)
- ### DISCHARGING DECISIONS ###
- write_vre_stor_discharge(path, inputs, setup, EP)
+ ### DISCHARGING DECISIONS ###
+ write_vre_stor_discharge(path, inputs, setup, EP)
- return dfVreStor
+ return dfVreStor
end
@doc raw"""
@@ -23,262 +23,289 @@ end
Function for writing the vre-storage capacities.
"""
function write_vre_stor_capacity(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- gen_VRE_STOR = gen.VreStorage
-
- VRE_STOR = inputs["VRE_STOR"]
- SOLAR = inputs["VS_SOLAR"]
- WIND = inputs["VS_WIND"]
- DC = inputs["VS_DC"]
- STOR = inputs["VS_STOR"]
- MultiStage = setup["MultiStage"]
- size_vrestor_resources = size(inputs["RESOURCE_NAMES_VRE_STOR"])
-
- # Solar capacity
- capsolar = zeros(size_vrestor_resources)
- retcapsolar = zeros(size_vrestor_resources)
- existingcapsolar = zeros(size_vrestor_resources)
-
- # Wind capacity
- capwind = zeros(size_vrestor_resources)
- retcapwind = zeros(size_vrestor_resources)
- existingcapwind = zeros(size_vrestor_resources)
-
- # Inverter capacity
- capdc = zeros(size_vrestor_resources)
- retcapdc = zeros(size_vrestor_resources)
- existingcapdc = zeros(size_vrestor_resources)
-
- # Grid connection capacity
- capgrid = zeros(size_vrestor_resources)
- retcapgrid = zeros(size_vrestor_resources)
- existingcapgrid = zeros(size_vrestor_resources)
-
- # Energy storage capacity
- capenergy = zeros(size_vrestor_resources)
- retcapenergy = zeros(size_vrestor_resources)
- existingcapenergy = zeros(size_vrestor_resources)
-
- # Charge storage capacity DC
- capchargedc = zeros(size_vrestor_resources)
- retcapchargedc = zeros(size_vrestor_resources)
- existingcapchargedc = zeros(size_vrestor_resources)
-
- # Charge storage capacity AC
- capchargeac = zeros(size_vrestor_resources)
- retcapchargeac = zeros(size_vrestor_resources)
- existingcapchargeac = zeros(size_vrestor_resources)
-
- # Discharge storage capacity DC
- capdischargedc = zeros(size_vrestor_resources)
- retcapdischargedc = zeros(size_vrestor_resources)
- existingcapdischargedc = zeros(size_vrestor_resources)
-
- # Discharge storage capacity AC
- capdischargeac = zeros(size_vrestor_resources)
- retcapdischargeac = zeros(size_vrestor_resources)
- existingcapdischargeac = zeros(size_vrestor_resources)
-
- j = 1
- for i in VRE_STOR
- existingcapgrid[j] = MultiStage == 1 ? value(EP[:vEXISTINGCAP][i]) : existing_cap_mw(gen[i])
- if i in inputs["NEW_CAP"]
- capgrid[j] = value(EP[:vCAP][i])
- end
- if i in inputs["RET_CAP"]
- retcapgrid[j] = value(EP[:vRETCAP][i])
- end
-
- if i in SOLAR
- existingcapsolar[j] = MultiStage == 1 ? value(EP[:vEXISTINGSOLARCAP][i]) : existing_cap_solar_mw(gen_VRE_STOR[j])
- if i in inputs["NEW_CAP_SOLAR"]
- capsolar[j] = value(EP[:vSOLARCAP][i])
- end
- if i in inputs["RET_CAP_SOLAR"]
- retcapsolar[j] = first(value.(EP[:vRETSOLARCAP][i]))
- end
- end
-
- if i in WIND
- existingcapwind[j] = MultiStage == 1 ? value(EP[:vEXISTINGWINDCAP][i]) : existing_cap_wind_mw(gen_VRE_STOR[j])
- if i in inputs["NEW_CAP_WIND"]
- capwind[j] = value(EP[:vWINDCAP][i])
- end
- if i in inputs["RET_CAP_WIND"]
- retcapwind[j] = first(value.(EP[:vRETWINDCAP][i]))
- end
- end
-
- if i in DC
- existingcapdc[j] = MultiStage == 1 ? value(EP[:vEXISTINGDCCAP][i]) : existing_cap_inverter_mw(gen_VRE_STOR[j])
- if i in inputs["NEW_CAP_DC"]
- capdc[j] = value(EP[:vDCCAP][i])
- end
- if i in inputs["RET_CAP_DC"]
- retcapdc[j] = first(value.(EP[:vRETDCCAP][i]))
- end
- end
-
- if i in STOR
- existingcapenergy[j] = MultiStage == 1 ? value(EP[:vEXISTINGCAPENERGY_VS][i]) : existing_cap_mwh(gen[i])
- if i in inputs["NEW_CAP_STOR"]
- capenergy[j] = value(EP[:vCAPENERGY_VS][i])
- end
- if i in inputs["RET_CAP_STOR"]
- retcapenergy[j] = first(value.(EP[:vRETCAPENERGY_VS][i]))
- end
-
- if i in inputs["VS_ASYM_DC_CHARGE"]
- if i in inputs["NEW_CAP_CHARGE_DC"]
- capchargedc[j] = value(EP[:vCAPCHARGE_DC][i])
- end
- if i in inputs["RET_CAP_CHARGE_DC"]
- retcapchargedc[j] = value(EP[:vRETCAPCHARGE_DC][i])
- end
- existingcapchargedc[j] = MultiStage == 1 ? value(EP[:vEXISTINGCAPCHARGEDC][i]) : existing_cap_charge_dc_mw(gen_VRE_STOR[j])
- end
- if i in inputs["VS_ASYM_AC_CHARGE"]
- if i in inputs["NEW_CAP_CHARGE_AC"]
- capchargeac[j] = value(EP[:vCAPCHARGE_AC][i])
- end
- if i in inputs["RET_CAP_CHARGE_AC"]
- retcapchargeac[j] = value(EP[:vRETCAPCHARGE_AC][i])
- end
- existingcapchargeac[j] = MultiStage == 1 ? value(EP[:vEXISTINGCAPCHARGEAC][i]) : existing_cap_charge_ac_mw(gen_VRE_STOR[j])
- end
- if i in inputs["VS_ASYM_DC_DISCHARGE"]
- if i in inputs["NEW_CAP_DISCHARGE_DC"]
- capdischargedc[j] = value(EP[:vCAPDISCHARGE_DC][i])
- end
- if i in inputs["RET_CAP_DISCHARGE_DC"]
- retcapdischargedc[j] = value(EP[:vRETCAPDISCHARGE_DC][i])
- end
- existingcapdischargedc[j] = MultiStage == 1 ? value(EP[:vEXISTINGCAPDISCHARGEDC][i]) : existing_cap_discharge_dc_mw(gen_VRE_STOR[j])
- end
- if i in inputs["VS_ASYM_AC_DISCHARGE"]
- if i in inputs["NEW_CAP_DISCHARGE_AC"]
- capdischargeac[j] = value(EP[:vCAPDISCHARGE_AC][i])
- end
- if i in inputs["RET_CAP_DISCHARGE_AC"]
- retcapdischargeac[j] = value(EP[:vRETCAPDISCHARGE_AC][i])
- end
- existingcapdischargeac[j] = MultiStage == 1 ? value(EP[:vEXISTINGCAPDISCHARGEAC][i]) : existing_cap_discharge_ac_mw(gen_VRE_STOR[j])
- end
- end
- j += 1
- end
-
- technologies = resource_type_mga.(gen_VRE_STOR)
- clusters = cluster.(gen_VRE_STOR)
- zones = zone_id.(gen_VRE_STOR)
-
- dfCap = DataFrame(
- Resource = inputs["RESOURCE_NAMES_VRE_STOR"], Zone = zones, Resource_Type = technologies, Cluster=clusters,
- StartCapSolar = existingcapsolar[:],
- RetCapSolar = retcapsolar[:],
- NewCapSolar = capsolar[:],
- EndCapSolar = existingcapsolar[:] - retcapsolar[:] + capsolar[:],
- StartCapWind = existingcapwind[:],
- RetCapWind = retcapwind[:],
- NewCapWind = capwind[:],
- EndCapWind = existingcapwind[:] - retcapwind[:] + capwind[:],
- StartCapDC = existingcapdc[:],
- RetCapDC = retcapdc[:],
- NewCapDC = capdc[:],
- EndCapDC = existingcapdc[:] - retcapdc[:] + capdc[:],
- StartCapGrid = existingcapgrid[:],
- RetCapGrid = retcapgrid[:],
- NewCapGrid = capgrid[:],
- EndCapGrid = existingcapgrid[:] - retcapgrid[:] + capgrid[:],
- StartEnergyCap = existingcapenergy[:],
- RetEnergyCap = retcapenergy[:],
- NewEnergyCap = capenergy[:],
- EndEnergyCap = existingcapenergy[:] - retcapenergy[:] + capenergy[:],
- StartChargeDCCap = existingcapchargedc[:],
- RetChargeDCCap = retcapchargedc[:],
- NewChargeDCCap = capchargedc[:],
- EndChargeDCCap = existingcapchargedc[:] - retcapchargedc[:] + capchargedc[:],
- StartChargeACCap = existingcapchargeac[:],
- RetChargeACCap = retcapchargeac[:],
- NewChargeACCap = capchargeac[:],
- EndChargeACCap = existingcapchargeac[:] - retcapchargeac[:] + capchargeac[:],
- StartDischargeDCCap = existingcapdischargedc[:],
- RetDischargeDCCap = retcapdischargedc[:],
- NewDischargeDCCap = capdischargedc[:],
- EndDischargeDCCap = existingcapdischargedc[:] - retcapdischargedc[:] + capdischargedc[:],
- StartDischargeACCap = existingcapdischargeac[:],
- RetDischargeACCap = retcapdischargeac[:],
- NewDischargeACCap = capdischargeac[:],
- EndDischargeACCap = existingcapdischargeac[:] - retcapdischargeac[:] + capdischargeac[:]
- )
-
- if setup["ParameterScale"] ==1
- columns_to_scale = [
- :StartCapSolar,
- :RetCapSolar,
- :NewCapSolar,
- :EndCapSolar,
- :StartCapWind,
- :RetCapWind,
- :NewCapWind,
- :EndCapWind,
- :StartCapDC,
- :RetCapDC,
- :NewCapDC,
- :EndCapDC,
- :StartCapGrid,
- :RetCapGrid,
- :NewCapGrid,
- :EndCapGrid,
- :StartEnergyCap,
- :RetEnergyCap,
- :NewEnergyCap,
- :EndEnergyCap,
- :StartChargeACCap,
- :RetChargeACCap,
- :NewChargeACCap,
- :EndChargeACCap,
- :StartChargeDCCap,
- :RetChargeDCCap,
- :NewChargeDCCap,
- :EndChargeDCCap,
- :StartDischargeDCCap,
- :RetDischargeDCCap,
- :NewDischargeDCCap,
- :EndDischargeDCCap,
- :StartDischargeACCap,
- :RetDischargeACCap,
- :NewDischargeACCap,
- :EndDischargeACCap,
- ]
- dfCap[!, columns_to_scale] .*= ModelScalingFactor
- end
-
- total = DataFrame(
- Resource = "Total", Zone = "n/a", Resource_Type = "Total", Cluster= "n/a",
- StartCapSolar = sum(dfCap[!,:StartCapSolar]), RetCapSolar = sum(dfCap[!,:RetCapSolar]),
- NewCapSolar = sum(dfCap[!,:NewCapSolar]), EndCapSolar = sum(dfCap[!,:EndCapSolar]),
- StartCapWind = sum(dfCap[!,:StartCapWind]), RetCapWind = sum(dfCap[!,:RetCapWind]),
- NewCapWind = sum(dfCap[!,:NewCapWind]), EndCapWind = sum(dfCap[!,:EndCapWind]),
- StartCapDC = sum(dfCap[!,:StartCapDC]), RetCapDC = sum(dfCap[!,:RetCapDC]),
- NewCapDC = sum(dfCap[!,:NewCapDC]), EndCapDC = sum(dfCap[!,:EndCapDC]),
- StartCapGrid = sum(dfCap[!,:StartCapGrid]), RetCapGrid = sum(dfCap[!,:RetCapGrid]),
- NewCapGrid = sum(dfCap[!,:NewCapGrid]), EndCapGrid = sum(dfCap[!,:EndCapGrid]),
- StartEnergyCap = sum(dfCap[!,:StartEnergyCap]), RetEnergyCap = sum(dfCap[!,:RetEnergyCap]),
- NewEnergyCap = sum(dfCap[!,:NewEnergyCap]), EndEnergyCap = sum(dfCap[!,:EndEnergyCap]),
- StartChargeACCap = sum(dfCap[!,:StartChargeACCap]), RetChargeACCap = sum(dfCap[!,:RetChargeACCap]),
- NewChargeACCap = sum(dfCap[!,:NewChargeACCap]), EndChargeACCap = sum(dfCap[!,:EndChargeACCap]),
- StartChargeDCCap = sum(dfCap[!,:StartChargeDCCap]), RetChargeDCCap = sum(dfCap[!,:RetChargeDCCap]),
- NewChargeDCCap = sum(dfCap[!,:NewChargeDCCap]), EndChargeDCCap = sum(dfCap[!,:EndChargeDCCap]),
- StartDischargeDCCap = sum(dfCap[!,:StartDischargeDCCap]), RetDischargeDCCap = sum(dfCap[!,:RetDischargeDCCap]),
- NewDischargeDCCap = sum(dfCap[!,:NewDischargeDCCap]), EndDischargeDCCap = sum(dfCap[!,:EndDischargeDCCap]),
- StartDischargeACCap = sum(dfCap[!,:StartDischargeACCap]), RetDischargeACCap = sum(dfCap[!,:RetDischargeACCap]),
- NewDischargeACCap = sum(dfCap[!,:NewDischargeACCap]), EndDischargeACCap = sum(dfCap[!,:EndDischargeACCap])
- )
-
- dfCap = vcat(dfCap, total)
- CSV.write(joinpath(path, "vre_stor_capacity.csv"), dfCap)
- return dfCap
+ gen = inputs["RESOURCES"]
+ gen_VRE_STOR = gen.VreStorage
+
+ VRE_STOR = inputs["VRE_STOR"]
+ SOLAR = inputs["VS_SOLAR"]
+ WIND = inputs["VS_WIND"]
+ DC = inputs["VS_DC"]
+ STOR = inputs["VS_STOR"]
+ MultiStage = setup["MultiStage"]
+ size_vrestor_resources = size(inputs["RESOURCE_NAMES_VRE_STOR"])
+
+ # Solar capacity
+ capsolar = zeros(size_vrestor_resources)
+ retcapsolar = zeros(size_vrestor_resources)
+ existingcapsolar = zeros(size_vrestor_resources)
+
+ # Wind capacity
+ capwind = zeros(size_vrestor_resources)
+ retcapwind = zeros(size_vrestor_resources)
+ existingcapwind = zeros(size_vrestor_resources)
+
+ # Inverter capacity
+ capdc = zeros(size_vrestor_resources)
+ retcapdc = zeros(size_vrestor_resources)
+ existingcapdc = zeros(size_vrestor_resources)
+
+ # Grid connection capacity
+ capgrid = zeros(size_vrestor_resources)
+ retcapgrid = zeros(size_vrestor_resources)
+ existingcapgrid = zeros(size_vrestor_resources)
+
+ # Energy storage capacity
+ capenergy = zeros(size_vrestor_resources)
+ retcapenergy = zeros(size_vrestor_resources)
+ existingcapenergy = zeros(size_vrestor_resources)
+
+ # Charge storage capacity DC
+ capchargedc = zeros(size_vrestor_resources)
+ retcapchargedc = zeros(size_vrestor_resources)
+ existingcapchargedc = zeros(size_vrestor_resources)
+
+ # Charge storage capacity AC
+ capchargeac = zeros(size_vrestor_resources)
+ retcapchargeac = zeros(size_vrestor_resources)
+ existingcapchargeac = zeros(size_vrestor_resources)
+
+ # Discharge storage capacity DC
+ capdischargedc = zeros(size_vrestor_resources)
+ retcapdischargedc = zeros(size_vrestor_resources)
+ existingcapdischargedc = zeros(size_vrestor_resources)
+
+ # Discharge storage capacity AC
+ capdischargeac = zeros(size_vrestor_resources)
+ retcapdischargeac = zeros(size_vrestor_resources)
+ existingcapdischargeac = zeros(size_vrestor_resources)
+
+ j = 1
+ for i in VRE_STOR
+ existingcapgrid[j] = MultiStage == 1 ? value(EP[:vEXISTINGCAP][i]) :
+ existing_cap_mw(gen[i])
+ if i in inputs["NEW_CAP"]
+ capgrid[j] = value(EP[:vCAP][i])
+ end
+ if i in inputs["RET_CAP"]
+ retcapgrid[j] = value(EP[:vRETCAP][i])
+ end
+
+ if i in SOLAR
+ existingcapsolar[j] = MultiStage == 1 ? value(EP[:vEXISTINGSOLARCAP][i]) :
+ existing_cap_solar_mw(gen_VRE_STOR[j])
+ if i in inputs["NEW_CAP_SOLAR"]
+ capsolar[j] = value(EP[:vSOLARCAP][i])
+ end
+ if i in inputs["RET_CAP_SOLAR"]
+ retcapsolar[j] = first(value.(EP[:vRETSOLARCAP][i]))
+ end
+ end
+
+ if i in WIND
+ existingcapwind[j] = MultiStage == 1 ? value(EP[:vEXISTINGWINDCAP][i]) :
+ existing_cap_wind_mw(gen_VRE_STOR[j])
+ if i in inputs["NEW_CAP_WIND"]
+ capwind[j] = value(EP[:vWINDCAP][i])
+ end
+ if i in inputs["RET_CAP_WIND"]
+ retcapwind[j] = first(value.(EP[:vRETWINDCAP][i]))
+ end
+ end
+
+ if i in DC
+ existingcapdc[j] = MultiStage == 1 ? value(EP[:vEXISTINGDCCAP][i]) :
+ existing_cap_inverter_mw(gen_VRE_STOR[j])
+ if i in inputs["NEW_CAP_DC"]
+ capdc[j] = value(EP[:vDCCAP][i])
+ end
+ if i in inputs["RET_CAP_DC"]
+ retcapdc[j] = first(value.(EP[:vRETDCCAP][i]))
+ end
+ end
+
+ if i in STOR
+ existingcapenergy[j] = MultiStage == 1 ? value(EP[:vEXISTINGCAPENERGY_VS][i]) :
+ existing_cap_mwh(gen[i])
+ if i in inputs["NEW_CAP_STOR"]
+ capenergy[j] = value(EP[:vCAPENERGY_VS][i])
+ end
+ if i in inputs["RET_CAP_STOR"]
+ retcapenergy[j] = first(value.(EP[:vRETCAPENERGY_VS][i]))
+ end
+
+ if i in inputs["VS_ASYM_DC_CHARGE"]
+ if i in inputs["NEW_CAP_CHARGE_DC"]
+ capchargedc[j] = value(EP[:vCAPCHARGE_DC][i])
+ end
+ if i in inputs["RET_CAP_CHARGE_DC"]
+ retcapchargedc[j] = value(EP[:vRETCAPCHARGE_DC][i])
+ end
+ existingcapchargedc[j] = MultiStage == 1 ?
+ value(EP[:vEXISTINGCAPCHARGEDC][i]) :
+ existing_cap_charge_dc_mw(gen_VRE_STOR[j])
+ end
+ if i in inputs["VS_ASYM_AC_CHARGE"]
+ if i in inputs["NEW_CAP_CHARGE_AC"]
+ capchargeac[j] = value(EP[:vCAPCHARGE_AC][i])
+ end
+ if i in inputs["RET_CAP_CHARGE_AC"]
+ retcapchargeac[j] = value(EP[:vRETCAPCHARGE_AC][i])
+ end
+ existingcapchargeac[j] = MultiStage == 1 ?
+ value(EP[:vEXISTINGCAPCHARGEAC][i]) :
+ existing_cap_charge_ac_mw(gen_VRE_STOR[j])
+ end
+ if i in inputs["VS_ASYM_DC_DISCHARGE"]
+ if i in inputs["NEW_CAP_DISCHARGE_DC"]
+ capdischargedc[j] = value(EP[:vCAPDISCHARGE_DC][i])
+ end
+ if i in inputs["RET_CAP_DISCHARGE_DC"]
+ retcapdischargedc[j] = value(EP[:vRETCAPDISCHARGE_DC][i])
+ end
+ existingcapdischargedc[j] = MultiStage == 1 ?
+ value(EP[:vEXISTINGCAPDISCHARGEDC][i]) :
+ existing_cap_discharge_dc_mw(gen_VRE_STOR[j])
+ end
+ if i in inputs["VS_ASYM_AC_DISCHARGE"]
+ if i in inputs["NEW_CAP_DISCHARGE_AC"]
+ capdischargeac[j] = value(EP[:vCAPDISCHARGE_AC][i])
+ end
+ if i in inputs["RET_CAP_DISCHARGE_AC"]
+ retcapdischargeac[j] = value(EP[:vRETCAPDISCHARGE_AC][i])
+ end
+ existingcapdischargeac[j] = MultiStage == 1 ?
+ value(EP[:vEXISTINGCAPDISCHARGEAC][i]) :
+ existing_cap_discharge_ac_mw(gen_VRE_STOR[j])
+ end
+ end
+ j += 1
+ end
+
+ technologies = resource_type_mga.(gen_VRE_STOR)
+ clusters = cluster.(gen_VRE_STOR)
+ zones = zone_id.(gen_VRE_STOR)
+
+ dfCap = DataFrame(Resource = inputs["RESOURCE_NAMES_VRE_STOR"], Zone = zones,
+ Resource_Type = technologies, Cluster = clusters,
+ StartCapSolar = existingcapsolar[:],
+ RetCapSolar = retcapsolar[:],
+ NewCapSolar = capsolar[:],
+ EndCapSolar = existingcapsolar[:] - retcapsolar[:] + capsolar[:],
+ StartCapWind = existingcapwind[:],
+ RetCapWind = retcapwind[:],
+ NewCapWind = capwind[:],
+ EndCapWind = existingcapwind[:] - retcapwind[:] + capwind[:],
+ StartCapDC = existingcapdc[:],
+ RetCapDC = retcapdc[:],
+ NewCapDC = capdc[:],
+ EndCapDC = existingcapdc[:] - retcapdc[:] + capdc[:],
+ StartCapGrid = existingcapgrid[:],
+ RetCapGrid = retcapgrid[:],
+ NewCapGrid = capgrid[:],
+ EndCapGrid = existingcapgrid[:] - retcapgrid[:] + capgrid[:],
+ StartEnergyCap = existingcapenergy[:],
+ RetEnergyCap = retcapenergy[:],
+ NewEnergyCap = capenergy[:],
+ EndEnergyCap = existingcapenergy[:] - retcapenergy[:] + capenergy[:],
+ StartChargeDCCap = existingcapchargedc[:],
+ RetChargeDCCap = retcapchargedc[:],
+ NewChargeDCCap = capchargedc[:],
+ EndChargeDCCap = existingcapchargedc[:] - retcapchargedc[:] + capchargedc[:],
+ StartChargeACCap = existingcapchargeac[:],
+ RetChargeACCap = retcapchargeac[:],
+ NewChargeACCap = capchargeac[:],
+ EndChargeACCap = existingcapchargeac[:] - retcapchargeac[:] + capchargeac[:],
+ StartDischargeDCCap = existingcapdischargedc[:],
+ RetDischargeDCCap = retcapdischargedc[:],
+ NewDischargeDCCap = capdischargedc[:],
+ EndDischargeDCCap = existingcapdischargedc[:] - retcapdischargedc[:] +
+ capdischargedc[:],
+ StartDischargeACCap = existingcapdischargeac[:],
+ RetDischargeACCap = retcapdischargeac[:],
+ NewDischargeACCap = capdischargeac[:],
+ EndDischargeACCap = existingcapdischargeac[:] - retcapdischargeac[:] +
+ capdischargeac[:])
+
+ if setup["ParameterScale"] == 1
+ columns_to_scale = [
+ :StartCapSolar,
+ :RetCapSolar,
+ :NewCapSolar,
+ :EndCapSolar,
+ :StartCapWind,
+ :RetCapWind,
+ :NewCapWind,
+ :EndCapWind,
+ :StartCapDC,
+ :RetCapDC,
+ :NewCapDC,
+ :EndCapDC,
+ :StartCapGrid,
+ :RetCapGrid,
+ :NewCapGrid,
+ :EndCapGrid,
+ :StartEnergyCap,
+ :RetEnergyCap,
+ :NewEnergyCap,
+ :EndEnergyCap,
+ :StartChargeACCap,
+ :RetChargeACCap,
+ :NewChargeACCap,
+ :EndChargeACCap,
+ :StartChargeDCCap,
+ :RetChargeDCCap,
+ :NewChargeDCCap,
+ :EndChargeDCCap,
+ :StartDischargeDCCap,
+ :RetDischargeDCCap,
+ :NewDischargeDCCap,
+ :EndDischargeDCCap,
+ :StartDischargeACCap,
+ :RetDischargeACCap,
+ :NewDischargeACCap,
+ :EndDischargeACCap,
+ ]
+ dfCap[!, columns_to_scale] .*= ModelScalingFactor
+ end
+
+ total = DataFrame(Resource = "Total", Zone = "n/a", Resource_Type = "Total",
+ Cluster = "n/a",
+ StartCapSolar = sum(dfCap[!, :StartCapSolar]),
+ RetCapSolar = sum(dfCap[!, :RetCapSolar]),
+ NewCapSolar = sum(dfCap[!, :NewCapSolar]),
+ EndCapSolar = sum(dfCap[!, :EndCapSolar]),
+ StartCapWind = sum(dfCap[!, :StartCapWind]),
+ RetCapWind = sum(dfCap[!, :RetCapWind]),
+ NewCapWind = sum(dfCap[!, :NewCapWind]), EndCapWind = sum(dfCap[!, :EndCapWind]),
+ StartCapDC = sum(dfCap[!, :StartCapDC]), RetCapDC = sum(dfCap[!, :RetCapDC]),
+ NewCapDC = sum(dfCap[!, :NewCapDC]), EndCapDC = sum(dfCap[!, :EndCapDC]),
+ StartCapGrid = sum(dfCap[!, :StartCapGrid]),
+ RetCapGrid = sum(dfCap[!, :RetCapGrid]),
+ NewCapGrid = sum(dfCap[!, :NewCapGrid]), EndCapGrid = sum(dfCap[!, :EndCapGrid]),
+ StartEnergyCap = sum(dfCap[!, :StartEnergyCap]),
+ RetEnergyCap = sum(dfCap[!, :RetEnergyCap]),
+ NewEnergyCap = sum(dfCap[!, :NewEnergyCap]),
+ EndEnergyCap = sum(dfCap[!, :EndEnergyCap]),
+ StartChargeACCap = sum(dfCap[!, :StartChargeACCap]),
+ RetChargeACCap = sum(dfCap[!, :RetChargeACCap]),
+ NewChargeACCap = sum(dfCap[!, :NewChargeACCap]),
+ EndChargeACCap = sum(dfCap[!, :EndChargeACCap]),
+ StartChargeDCCap = sum(dfCap[!, :StartChargeDCCap]),
+ RetChargeDCCap = sum(dfCap[!, :RetChargeDCCap]),
+ NewChargeDCCap = sum(dfCap[!, :NewChargeDCCap]),
+ EndChargeDCCap = sum(dfCap[!, :EndChargeDCCap]),
+ StartDischargeDCCap = sum(dfCap[!, :StartDischargeDCCap]),
+ RetDischargeDCCap = sum(dfCap[!, :RetDischargeDCCap]),
+ NewDischargeDCCap = sum(dfCap[!, :NewDischargeDCCap]),
+ EndDischargeDCCap = sum(dfCap[!, :EndDischargeDCCap]),
+ StartDischargeACCap = sum(dfCap[!, :StartDischargeACCap]),
+ RetDischargeACCap = sum(dfCap[!, :RetDischargeACCap]),
+ NewDischargeACCap = sum(dfCap[!, :NewDischargeACCap]),
+ EndDischargeACCap = sum(dfCap[!, :EndDischargeACCap]))
+
+ dfCap = vcat(dfCap, total)
+ CSV.write(joinpath(path, "vre_stor_capacity.csv"), dfCap)
+ return dfCap
end
@doc raw"""
@@ -287,43 +314,49 @@ end
Function for writing the vre-storage charging decision variables/expressions.
"""
function write_vre_stor_charge(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- gen_VRE_STOR = gen.VreStorage
- T = inputs["T"]
+ gen = inputs["RESOURCES"]
+ gen_VRE_STOR = gen.VreStorage
+ T = inputs["T"]
DC_CHARGE = inputs["VS_STOR_DC_CHARGE"]
AC_CHARGE = inputs["VS_STOR_AC_CHARGE"]
- # DC charging of battery dataframe
- if !isempty(DC_CHARGE)
- dfCharge_DC = DataFrame(Resource = inputs["RESOURCE_NAMES_DC_CHARGE"], Zone = inputs["ZONES_DC_CHARGE"], AnnualSum = Array{Union{Missing,Float32}}(undef, size(DC_CHARGE)[1]))
- charge_dc = zeros(size(DC_CHARGE)[1], T)
- charge_dc = value.(EP[:vP_DC_CHARGE]).data ./ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge.!=0)]) * (setup["ParameterScale"]==1 ? ModelScalingFactor : 1)
- dfCharge_DC.AnnualSum .= charge_dc * inputs["omega"]
-
-
- filepath = joinpath(path,"vre_stor_dc_charge.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfCharge_DC)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, charge_dc, dfCharge_DC)
- end
- end
-
- # AC charging of battery dataframe
- if !isempty(AC_CHARGE)
- dfCharge_AC = DataFrame(Resource = inputs["RESOURCE_NAMES_AC_CHARGE"], Zone = inputs["ZONES_AC_CHARGE"], AnnualSum = Array{Union{Missing,Float32}}(undef, size(AC_CHARGE)[1]))
- charge_ac = zeros(size(AC_CHARGE)[1], T)
- charge_ac = value.(EP[:vP_AC_CHARGE]).data * (setup["ParameterScale"]==1 ? ModelScalingFactor : 1)
- dfCharge_AC.AnnualSum .= charge_ac * inputs["omega"]
-
- filepath = joinpath(path,"vre_stor_ac_charge.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfCharge_AC)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, charge_ac, dfCharge_AC)
- end
- end
- return nothing
+ # DC charging of battery dataframe
+ if !isempty(DC_CHARGE)
+ dfCharge_DC = DataFrame(Resource = inputs["RESOURCE_NAMES_DC_CHARGE"],
+ Zone = inputs["ZONES_DC_CHARGE"],
+ AnnualSum = Array{Union{Missing, Float32}}(undef, size(DC_CHARGE)[1]))
+ charge_dc = zeros(size(DC_CHARGE)[1], T)
+ charge_dc = value.(EP[:vP_DC_CHARGE]).data ./
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge .!= 0)]) *
+ (setup["ParameterScale"] == 1 ? ModelScalingFactor : 1)
+ dfCharge_DC.AnnualSum .= charge_dc * inputs["omega"]
+
+ filepath = joinpath(path, "vre_stor_dc_charge.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfCharge_DC)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, charge_dc, dfCharge_DC)
+ end
+ end
+
+ # AC charging of battery dataframe
+ if !isempty(AC_CHARGE)
+ dfCharge_AC = DataFrame(Resource = inputs["RESOURCE_NAMES_AC_CHARGE"],
+ Zone = inputs["ZONES_AC_CHARGE"],
+ AnnualSum = Array{Union{Missing, Float32}}(undef, size(AC_CHARGE)[1]))
+ charge_ac = zeros(size(AC_CHARGE)[1], T)
+ charge_ac = value.(EP[:vP_AC_CHARGE]).data *
+ (setup["ParameterScale"] == 1 ? ModelScalingFactor : 1)
+ dfCharge_AC.AnnualSum .= charge_ac * inputs["omega"]
+
+ filepath = joinpath(path, "vre_stor_ac_charge.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfCharge_AC)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, charge_ac, dfCharge_AC)
+ end
+ end
+ return nothing
end
@doc raw"""
@@ -331,81 +364,94 @@ end
Function for writing the vre-storage discharging decision variables/expressions.
"""
-function write_vre_stor_discharge(path::AbstractString, inputs::Dict, setup::Dict, EP::Model)
- gen = inputs["RESOURCES"]
- gen_VRE_STOR = gen.VreStorage
- T = inputs["T"]
- DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
+function write_vre_stor_discharge(path::AbstractString,
+ inputs::Dict,
+ setup::Dict,
+ EP::Model)
+ gen = inputs["RESOURCES"]
+ gen_VRE_STOR = gen.VreStorage
+ T = inputs["T"]
+ DC_DISCHARGE = inputs["VS_STOR_DC_DISCHARGE"]
AC_DISCHARGE = inputs["VS_STOR_AC_DISCHARGE"]
- WIND = inputs["VS_WIND"]
- SOLAR = inputs["VS_SOLAR"]
-
- # DC discharging of battery dataframe
- if !isempty(DC_DISCHARGE)
- dfDischarge_DC = DataFrame(Resource = inputs["RESOURCE_NAMES_DC_DISCHARGE"], Zone = inputs["ZONES_DC_DISCHARGE"], AnnualSum = Array{Union{Missing,Float32}}(undef, size(DC_DISCHARGE)[1]))
- power_vre_stor = value.(EP[:vP_DC_DISCHARGE]).data .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge.!=0)])
- if setup["ParameterScale"] == 1
- power_vre_stor *= ModelScalingFactor
- end
- dfDischarge_DC.AnnualSum .= power_vre_stor * inputs["omega"]
-
- filepath = joinpath(path,"vre_stor_dc_discharge.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfDischarge_DC)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, power_vre_stor, dfDischarge_DC)
- end
- end
-
- # AC discharging of battery dataframe
- if !isempty(AC_DISCHARGE)
- dfDischarge_AC = DataFrame(Resource = inputs["RESOURCE_NAMES_AC_DISCHARGE"], Zone = inputs["ZONES_AC_DISCHARGE"], AnnualSum = Array{Union{Missing,Float32}}(undef, size(AC_DISCHARGE)[1]))
- power_vre_stor = value.(EP[:vP_AC_DISCHARGE]).data
- if setup["ParameterScale"] == 1
- power_vre_stor *= ModelScalingFactor
- end
- dfDischarge_AC.AnnualSum .= power_vre_stor * inputs["omega"]
-
- filepath = joinpath(path,"vre_stor_ac_discharge.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfDischarge_AC)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, power_vre_stor, dfDischarge_AC)
- end
- end
-
- # Wind generation of co-located resource dataframe
- if !isempty(WIND)
- dfVP_VRE_STOR = DataFrame(Resource = inputs["RESOURCE_NAMES_WIND"], Zone = inputs["ZONES_WIND"], AnnualSum = Array{Union{Missing,Float32}}(undef, size(WIND)[1]))
- vre_vre_stor = value.(EP[:vP_WIND]).data
- if setup["ParameterScale"] == 1
- vre_vre_stor *= ModelScalingFactor
- end
- dfVP_VRE_STOR.AnnualSum .= vre_vre_stor * inputs["omega"]
-
- filepath = joinpath(path,"vre_stor_wind_power.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfVP_VRE_STOR)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, vre_vre_stor, dfVP_VRE_STOR)
- end
- end
-
- # Solar generation of co-located resource dataframe
- if !isempty(SOLAR)
- dfVP_VRE_STOR = DataFrame(Resource = inputs["RESOURCE_NAMES_SOLAR"], Zone = inputs["ZONES_SOLAR"], AnnualSum = Array{Union{Missing,Float32}}(undef, size(SOLAR)[1]))
- vre_vre_stor = value.(EP[:vP_SOLAR]).data .* etainverter.(gen_VRE_STOR[(gen_VRE_STOR.solar.!=0)])
- if setup["ParameterScale"] == 1
- vre_vre_stor *= ModelScalingFactor
- end
- dfVP_VRE_STOR.AnnualSum .= vre_vre_stor * inputs["omega"]
-
- filepath = joinpath(path,"vre_stor_solar_power.csv")
- if setup["WriteOutputs"] == "annual"
- write_annual(filepath, dfVP_VRE_STOR)
- else # setup["WriteOutputs"] == "full"
- write_fulltimeseries(filepath, vre_vre_stor, dfVP_VRE_STOR)
- end
- end
- return nothing
+ WIND = inputs["VS_WIND"]
+ SOLAR = inputs["VS_SOLAR"]
+
+ # DC discharging of battery dataframe
+ if !isempty(DC_DISCHARGE)
+ dfDischarge_DC = DataFrame(Resource = inputs["RESOURCE_NAMES_DC_DISCHARGE"],
+ Zone = inputs["ZONES_DC_DISCHARGE"],
+ AnnualSum = Array{Union{Missing, Float32}}(undef, size(DC_DISCHARGE)[1]))
+ power_vre_stor = value.(EP[:vP_DC_DISCHARGE]).data .*
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.stor_dc_discharge .!= 0)])
+ if setup["ParameterScale"] == 1
+ power_vre_stor *= ModelScalingFactor
+ end
+ dfDischarge_DC.AnnualSum .= power_vre_stor * inputs["omega"]
+
+ filepath = joinpath(path, "vre_stor_dc_discharge.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfDischarge_DC)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, power_vre_stor, dfDischarge_DC)
+ end
+ end
+
+ # AC discharging of battery dataframe
+ if !isempty(AC_DISCHARGE)
+ dfDischarge_AC = DataFrame(Resource = inputs["RESOURCE_NAMES_AC_DISCHARGE"],
+ Zone = inputs["ZONES_AC_DISCHARGE"],
+ AnnualSum = Array{Union{Missing, Float32}}(undef, size(AC_DISCHARGE)[1]))
+ power_vre_stor = value.(EP[:vP_AC_DISCHARGE]).data
+ if setup["ParameterScale"] == 1
+ power_vre_stor *= ModelScalingFactor
+ end
+ dfDischarge_AC.AnnualSum .= power_vre_stor * inputs["omega"]
+
+ filepath = joinpath(path, "vre_stor_ac_discharge.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfDischarge_AC)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, power_vre_stor, dfDischarge_AC)
+ end
+ end
+
+ # Wind generation of co-located resource dataframe
+ if !isempty(WIND)
+ dfVP_VRE_STOR = DataFrame(Resource = inputs["RESOURCE_NAMES_WIND"],
+ Zone = inputs["ZONES_WIND"],
+ AnnualSum = Array{Union{Missing, Float32}}(undef, size(WIND)[1]))
+ vre_vre_stor = value.(EP[:vP_WIND]).data
+ if setup["ParameterScale"] == 1
+ vre_vre_stor *= ModelScalingFactor
+ end
+ dfVP_VRE_STOR.AnnualSum .= vre_vre_stor * inputs["omega"]
+
+ filepath = joinpath(path, "vre_stor_wind_power.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfVP_VRE_STOR)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, vre_vre_stor, dfVP_VRE_STOR)
+ end
+ end
+
+ # Solar generation of co-located resource dataframe
+ if !isempty(SOLAR)
+ dfVP_VRE_STOR = DataFrame(Resource = inputs["RESOURCE_NAMES_SOLAR"],
+ Zone = inputs["ZONES_SOLAR"],
+ AnnualSum = Array{Union{Missing, Float32}}(undef, size(SOLAR)[1]))
+ vre_vre_stor = value.(EP[:vP_SOLAR]).data .*
+ etainverter.(gen_VRE_STOR[(gen_VRE_STOR.solar .!= 0)])
+ if setup["ParameterScale"] == 1
+ vre_vre_stor *= ModelScalingFactor
+ end
+ dfVP_VRE_STOR.AnnualSum .= vre_vre_stor * inputs["omega"]
+
+ filepath = joinpath(path, "vre_stor_solar_power.csv")
+ if setup["WriteOutputs"] == "annual"
+ write_annual(filepath, dfVP_VRE_STOR)
+ else # setup["WriteOutputs"] == "full"
+ write_fulltimeseries(filepath, vre_vre_stor, dfVP_VRE_STOR)
+ end
+ end
+ return nothing
end
diff --git a/test/expression_manipulation_test.jl b/test/expression_manipulation_test.jl
index aae5d442ec..71891d80ac 100644
--- a/test/expression_manipulation_test.jl
+++ b/test/expression_manipulation_test.jl
@@ -3,11 +3,11 @@ using HiGHS
function setup_sum_model()
EP = Model(HiGHS.Optimizer)
- @variable(EP, x[i=1:100,j=1:4:200]>=0)
- @variable(EP, y[i=1:100,j=1:50]>=0)
- @expression(EP, eX[i=1:100,j=1:4:200], 2.0*x[i,j]+i+10.0*j)
- @expression(EP, eY[i=1:100,j=1:50], 3.0*y[i,j]+2*i+j)
- @expression(EP, eZ[i=1:100,j=1:50], 2.0*x[i,(j-1)*4+1] + 4.0*y[i,j])
+ @variable(EP, x[i = 1:100, j = 1:4:200]>=0)
+ @variable(EP, y[i = 1:100, j = 1:50]>=0)
+ @expression(EP, eX[i = 1:100, j = 1:4:200], 2.0*x[i, j]+i+10.0*j)
+ @expression(EP, eY[i = 1:100, j = 1:50], 3.0*y[i, j]+2*i+j)
+ @expression(EP, eZ[i = 1:100, j = 1:50], 2.0 * x[i, (j - 1) * 4 + 1]+4.0 * y[i, j])
return EP
end
@@ -61,21 +61,21 @@ function sum_combo_expr()
return true
end
-let
+let
EP = Model(HiGHS.Optimizer)
# Test fill_with_zeros!
- small_zeros_expr = Array{AffExpr,2}(undef,(2,3))
+ small_zeros_expr = Array{AffExpr, 2}(undef, (2, 3))
GenX.fill_with_zeros!(small_zeros_expr)
@test small_zeros_expr == AffExpr.([0.0 0.0 0.0; 0.0 0.0 0.0])
# Test fill_with_const!
- small_const_expr = Array{AffExpr,2}(undef,(3,2))
+ small_const_expr = Array{AffExpr, 2}(undef, (3, 2))
GenX.fill_with_const!(small_const_expr, 6.0)
@test small_const_expr == AffExpr.([6.0 6.0; 6.0 6.0; 6.0 6.0])
# Test create_empty_expression! with fill_with_const!
- large_dims = (2,10,20)
+ large_dims = (2, 10, 20)
GenX.create_empty_expression!(EP, :large_expr, large_dims)
@test all(EP[:large_expr] .== 0.0)
@@ -93,11 +93,12 @@ let
@test all(EP[:large_expr][:] .== 18.0)
# Test add_similar_to_expression! returns an error if the dimensions don't match
- GenX.create_empty_expression!(EP, :small_expr, (2,3))
- @test_throws ErrorException GenX.add_similar_to_expression!(EP[:large_expr], EP[:small_expr])
+ GenX.create_empty_expression!(EP, :small_expr, (2, 3))
+ @test_throws ErrorException GenX.add_similar_to_expression!(EP[:large_expr],
+ EP[:small_expr])
# Test we can add variables to an expression using add_similar_to_expression!
- @variable(EP, test_var[1:large_dims[1], 1:large_dims[2], 1:large_dims[3]] >= 0)
+ @variable(EP, test_var[1:large_dims[1], 1:large_dims[2], 1:large_dims[3]]>=0)
GenX.add_similar_to_expression!(EP[:large_expr], test_var)
@test EP[:large_expr][100] == test_var[100] + 18.0
@@ -117,7 +118,7 @@ let
@test sum_combo_expr() == true
# Test add_term_to_expression! for variable
- @variable(EP, single_var >= 0)
+ @variable(EP, single_var>=0)
GenX.add_term_to_expression!(EP[:large_expr], single_var)
@test EP[:large_expr][100] == test_var[100] + 22.0 + single_var
@@ -144,12 +145,12 @@ let
unregister(EP, :var_denseaxisarray)
end
- ###### ###### ###### ###### ###### ###### ######
- ###### ###### ###### ###### ###### ###### ######
- # Performance tests we can perhaps add later
- # These require the BenchmarkTests.jl package
- ###### ###### ###### ###### ###### ###### ######
- ###### ###### ###### ###### ###### ###### ######
+###### ###### ###### ###### ###### ###### ######
+###### ###### ###### ###### ###### ###### ######
+# Performance tests we can perhaps add later
+# These require the BenchmarkTests.jl package
+###### ###### ###### ###### ###### ###### ######
+###### ###### ###### ###### ###### ###### ######
# function test_performance(expr_dims)
# EP = Model(HiGHS.Optimizer)
@@ -165,4 +166,3 @@ end
# small_benchmark = test_performance((2,3))
# medium_benchmark = test_performance((2,10,20))
# large_benchmark = test_performance((2,20,40))
-
diff --git a/test/resource_test.jl b/test/resource_test.jl
index 2203af17e8..7824187bfa 100644
--- a/test/resource_test.jl
+++ b/test/resource_test.jl
@@ -3,109 +3,109 @@ let
check_resource = GenX.check_resource
therm = Resource(:Resource => "my_therm",
- :THERM => 1,
- :FLEX => 0,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 0,
- :STOR => 0,
- :LDS => 0)
+ :THERM => 1,
+ :FLEX => 0,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 0,
+ :STOR => 0,
+ :LDS => 0)
stor_lds = Resource(:Resource => "stor_lds",
- :THERM => 0,
- :FLEX => 0,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 0,
- :STOR => 1,
- :LDS => 1)
+ :THERM => 0,
+ :FLEX => 0,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 0,
+ :STOR => 1,
+ :LDS => 1)
hydro_lds = Resource(:Resource => "hydro_lds",
- :THERM => 0,
- :FLEX => 0,
- :HYDRO => 1,
- :VRE => 0,
- :MUST_RUN => 0,
- :STOR => 0,
- :LDS => 1)
+ :THERM => 0,
+ :FLEX => 0,
+ :HYDRO => 1,
+ :VRE => 0,
+ :MUST_RUN => 0,
+ :STOR => 0,
+ :LDS => 1)
bad_lds = Resource(:Resource => "bad lds combo",
- :THERM => 0,
- :FLEX => 1,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 0,
- :STOR => 0,
- :LDS => 1)
+ :THERM => 0,
+ :FLEX => 1,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 0,
+ :STOR => 0,
+ :LDS => 1)
bad_none = Resource(:Resource => "none",
- :THERM => 0,
- :FLEX => 0,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 0,
- :STOR => 0,
- :LDS => 0)
+ :THERM => 0,
+ :FLEX => 0,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 0,
+ :STOR => 0,
+ :LDS => 0)
bad_twotypes = Resource(:Resource => "too many",
- :THERM => 1,
- :FLEX => 1,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 0,
- :STOR => 0,
- :LDS => 0)
+ :THERM => 1,
+ :FLEX => 1,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 0,
+ :STOR => 0,
+ :LDS => 0)
bad_multiple = Resource(:Resource => "multiple_bad",
- :THERM => 1,
- :FLEX => 1,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 0,
- :STOR => 0,
- :LDS => 1)
+ :THERM => 1,
+ :FLEX => 1,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 0,
+ :STOR => 0,
+ :LDS => 1)
# MUST_RUN units contribution to reserves
must_run = Resource(:Resource => "must_run",
- :THERM => 0,
- :FLEX => 0,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 1,
- :STOR => 0,
- :LDS => 0,
- :Reg_Max => 0,
- :Rsv_Max => 0)
+ :THERM => 0,
+ :FLEX => 0,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 1,
+ :STOR => 0,
+ :LDS => 0,
+ :Reg_Max => 0,
+ :Rsv_Max => 0)
bad_must_run = Resource(:Resource => "bad_must_run",
- :THERM => 0,
- :FLEX => 0,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 1,
- :STOR => 0,
- :LDS => 0,
- :Reg_Max => 0.083333333,
- :Rsv_Max => 0.166666667)
+ :THERM => 0,
+ :FLEX => 0,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 1,
+ :STOR => 0,
+ :LDS => 0,
+ :Reg_Max => 0.083333333,
+ :Rsv_Max => 0.166666667)
bad_mustrun_reg = Resource(:Resource => "bad_mustrun_reg",
- :THERM => 0,
- :FLEX => 0,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 1,
- :STOR => 0,
- :LDS => 0,
- :Reg_Max => 0.083333333,
- :Rsv_Max => 0)
+ :THERM => 0,
+ :FLEX => 0,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 1,
+ :STOR => 0,
+ :LDS => 0,
+ :Reg_Max => 0.083333333,
+ :Rsv_Max => 0)
bad_mustrun_rsv = Resource(:Resource => "bad_mustrun_rsv",
- :THERM => 0,
- :FLEX => 0,
- :HYDRO => 0,
- :VRE => 0,
- :MUST_RUN => 1,
- :STOR => 0,
- :LDS => 0,
- :Reg_Max => 0,
- :Rsv_Max => 0.166666667)
+ :THERM => 0,
+ :FLEX => 0,
+ :HYDRO => 0,
+ :VRE => 0,
+ :MUST_RUN => 1,
+ :STOR => 0,
+ :LDS => 0,
+ :Reg_Max => 0,
+ :Rsv_Max => 0.166666667)
function check_okay(resource)
e = check_resource(resource)
@@ -143,6 +143,4 @@ let
end
test_validate_bad(multiple_bad_resources)
-
-
end
diff --git a/test/runtests.jl b/test/runtests.jl
index 1722acabce..ff624f632e 100644
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -4,7 +4,6 @@ using Logging
include("utilities.jl")
-
@testset "Expr manipulation" begin
include("expression_manipulation_test.jl")
end
@@ -16,7 +15,7 @@ if VERSION ≥ v"1.7"
end
# Test GenX modules
-@testset verbose = true "GenX modules" begin
+@testset verbose=true "GenX modules" begin
@testset "Three zones" begin
include("test_threezones.jl")
end
diff --git a/test/test_DCOPF.jl b/test/test_DCOPF.jl
index bbac42ff62..ca15bef686 100644
--- a/test/test_DCOPF.jl
+++ b/test/test_DCOPF.jl
@@ -8,11 +8,9 @@ obj_true = 395.171391
test_path = "DCOPF"
# Define test inputs
-genx_setup = Dict(
- "Trans_Loss_Segments" => 0,
+genx_setup = Dict("Trans_Loss_Segments" => 0,
"StorageLosses" => 0,
- "DC_OPF" => 1,
-)
+ "DC_OPF" => 1)
# Run the case and get the objective value and tolerance
EP, _, _ = redirect_stdout(devnull) do
@@ -23,11 +21,11 @@ optimal_tol_rel = get_attribute(EP, "ipm_optimality_tolerance")
optimal_tol = optimal_tol_rel * obj_test # Convert to absolute tolerance
# Test the objective value
-test_result = @test obj_test ≈ obj_true atol = optimal_tol
+test_result = @test obj_test≈obj_true atol=optimal_tol
# Round objective value and tolerance. Write to test log.
obj_test = round_from_tol!(obj_test, optimal_tol)
optimal_tol = round_from_tol!(optimal_tol, optimal_tol)
write_testlog(test_path, obj_test, optimal_tol, test_result)
-end # module TestDCOPF
\ No newline at end of file
+end # module TestDCOPF
diff --git a/test/test_VRE_storage.jl b/test/test_VRE_storage.jl
index 10ce1b9d31..2765283f6e 100644
--- a/test/test_VRE_storage.jl
+++ b/test/test_VRE_storage.jl
@@ -7,8 +7,7 @@ obj_true = 92376.060123
test_path = "VRE_storage"
# Define test inputs
-genx_setup = Dict(
- "NetworkExpansion" => 1,
+genx_setup = Dict("NetworkExpansion" => 1,
"UCommit" => 2,
"CapacityReserveMargin" => 1,
"MinCapReq" => 1,
@@ -16,8 +15,7 @@ genx_setup = Dict(
"CO2Cap" => 1,
"StorageLosses" => 1,
"VirtualChargeDischargeCost" => 1,
- "ParameterScale" => 1,
-)
+ "ParameterScale" => 1)
# Run the case and get the objective value and tolerance
EP, inputs, _ = redirect_stdout(devnull) do
@@ -28,7 +26,7 @@ optimal_tol_rel = get_attribute(EP, "dual_feasibility_tolerance")
optimal_tol = optimal_tol_rel * obj_test # Convert to absolute tolerance
# Test the objective value
-test_result = @test obj_test ≈ obj_true atol = optimal_tol
+test_result = @test obj_test≈obj_true atol=optimal_tol
# Round objective value and tolerance. Write to test log.
obj_test = round_from_tol!(obj_test, optimal_tol)
diff --git a/test/test_compute_conflicts.jl b/test/test_compute_conflicts.jl
index c8c4c88f72..e02780eec8 100644
--- a/test/test_compute_conflicts.jl
+++ b/test/test_compute_conflicts.jl
@@ -3,22 +3,22 @@ module TestConflicts
using Test
include(joinpath(@__DIR__, "utilities.jl"))
-test_path = joinpath(@__DIR__,"compute_conflicts");
+test_path = joinpath(@__DIR__, "compute_conflicts")
# Define test inputs
-genx_setup = Dict{Any,Any}(
- "Trans_Loss_Segments" => 1,
+genx_setup = Dict{Any, Any}("Trans_Loss_Segments" => 1,
"CO2Cap" => 1,
"StorageLosses" => 1,
"MaxCapReq" => 1,
- "ComputeConflicts" => 1
-)
+ "ComputeConflicts" => 1)
genxoutput = redirect_stdout(devnull) do
run_genx_case_conflict_testing(test_path, genx_setup)
end
-test_result = @test length(genxoutput)==2
-write_testlog(test_path,"Testing that the infeasible model is correctly handled",test_result)
+test_result = @test length(genxoutput) == 2
+write_testlog(test_path,
+ "Testing that the infeasible model is correctly handled",
+ test_result)
-end
\ No newline at end of file
+end
diff --git a/test/test_electrolyzer.jl b/test/test_electrolyzer.jl
index 426540eef1..7789751e86 100644
--- a/test/test_electrolyzer.jl
+++ b/test/test_electrolyzer.jl
@@ -8,13 +8,11 @@ obj_true = 6946.9618
test_path = "electrolyzer"
# Define test inputs
-genx_setup = Dict(
- "Trans_Loss_Segments" => 1,
+genx_setup = Dict("Trans_Loss_Segments" => 1,
"UCommit" => 2,
"StorageLosses" => 1,
"HydrogenHourlyMatching" => 1,
- "ParameterScale" => 1,
-)
+ "ParameterScale" => 1)
# Run the case and get the objective value and tolerance
EP, _, _ = redirect_stdout(devnull) do
@@ -26,7 +24,7 @@ optimal_tol_rel = get_attribute(EP, "ipm_optimality_tolerance")
optimal_tol = optimal_tol_rel * obj_test # Convert to absolute tolerance
# Test the objective value
-test_result = @test obj_test ≈ obj_true atol = optimal_tol
+test_result = @test obj_test≈obj_true atol=optimal_tol
# Round objective value and tolerance. Write to test log.
obj_test = round_from_tol!(obj_test, optimal_tol)
diff --git a/test/test_examples.jl b/test/test_examples.jl
index ed060f9ca1..a15b5ef17b 100644
--- a/test/test_examples.jl
+++ b/test/test_examples.jl
@@ -5,13 +5,12 @@ using GenX
include(joinpath(@__DIR__, "utilities.jl"))
-
# Test that the examples in the example_systems directory run without error
function test_examples()
base_path = Base.dirname(Base.dirname(pathof(GenX)))
examples_path = joinpath(base_path, "example_systems")
- examples_dir = readdir(examples_path, join=true)
+ examples_dir = readdir(examples_path, join = true)
for example_dir in examples_dir
if isdir(example_dir) && isfile(joinpath(example_dir, "Run.jl"))
@info "Running example in $example_dir"
@@ -25,4 +24,4 @@ end
test_examples()
end
-end # module
\ No newline at end of file
+end # module
diff --git a/test/test_load_resource_data.jl b/test/test_load_resource_data.jl
index fec45f8c8e..d03674fffe 100644
--- a/test/test_load_resource_data.jl
+++ b/test/test_load_resource_data.jl
@@ -11,32 +11,33 @@ struct InputsTrue
inputs_filename::AbstractString
end
-
function test_macro_interface(attr::Symbol, gen, dfGen)
f = getfield(GenX, attr)
@test f.(gen) == dfGen[!, attr]
end
function test_ids_with(attr::Symbol, gen, dfGen)
- @test GenX.ids_with(gen,attr) == dfGen[dfGen[!, attr] .!= 0, :r_id]
+ @test GenX.ids_with(gen, attr) == dfGen[dfGen[!, attr] .!= 0, :r_id]
end
function test_ids_with_nonneg(attr::Symbol, gen, dfGen)
- @test GenX.ids_with_nonneg(gen,attr) == dfGen[dfGen[!, attr] .>= 0, :r_id]
+ @test GenX.ids_with_nonneg(gen, attr) == dfGen[dfGen[!, attr] .>= 0, :r_id]
end
function test_ids_with_positive(attr::Symbol, gen, dfGen)
- @test GenX.ids_with_positive(gen,attr) == dfGen[dfGen[!, attr] .> 0, :r_id]
+ @test GenX.ids_with_positive(gen, attr) == dfGen[dfGen[!, attr] .> 0, :r_id]
end
-function prepare_inputs_true(test_path::AbstractString, in_filenames::InputsTrue, setup::Dict)
+function prepare_inputs_true(test_path::AbstractString,
+ in_filenames::InputsTrue,
+ setup::Dict)
gen_filename = in_filenames.gen_filename
inputs_filename = in_filenames.inputs_filename
dfGen = GenX.load_dataframe(joinpath(test_path, gen_filename))
- scale_factor = setup["ParameterScale"] == 1 ? GenX.ModelScalingFactor : 1.
+ scale_factor = setup["ParameterScale"] == 1 ? GenX.ModelScalingFactor : 1.0
GenX.rename!(dfGen, lowercase.(names(dfGen)))
GenX.scale_resources_data!(dfGen, scale_factor)
- dfGen[!,:r_id] = 1:size(dfGen,1)
+ dfGen[!, :r_id] = 1:size(dfGen, 1)
inputs_true = load(joinpath(test_path, inputs_filename))
return dfGen, inputs_true
end
@@ -88,27 +89,44 @@ function test_load_scaled_resources_data(gen, dfGen)
@test GenX.fuel.(gen) == dfGen.fuel
@test GenX.co2_capture_fraction.(gen) == dfGen.co2_capture_fraction
@test GenX.co2_capture_fraction_startup.(gen) == dfGen.co2_capture_fraction_startup
- @test GenX.ccs_disposal_cost_per_metric_ton.(gen) == dfGen.ccs_disposal_cost_per_metric_ton
+ @test GenX.ccs_disposal_cost_per_metric_ton.(gen) ==
+ dfGen.ccs_disposal_cost_per_metric_ton
@test GenX.biomass.(gen) == dfGen.biomass
## multi-fuel flags
- @test GenX.ids_with_fuel(gen) == dfGen[(dfGen[!,:fuel] .!= "None"),:r_id]
- @test GenX.ids_with_positive(gen, GenX.co2_capture_fraction) == dfGen[dfGen.co2_capture_fraction .>0,:r_id]
- @test GenX.ids_with_singlefuel(gen) == dfGen[dfGen.multi_fuels.!=1,:r_id]
- @test GenX.ids_with_multifuels(gen) == dfGen[dfGen.multi_fuels.==1,:r_id]
+ @test GenX.ids_with_fuel(gen) == dfGen[(dfGen[!, :fuel] .!= "None"), :r_id]
+ @test GenX.ids_with_positive(gen, GenX.co2_capture_fraction) ==
+ dfGen[dfGen.co2_capture_fraction .> 0, :r_id]
+ @test GenX.ids_with_singlefuel(gen) == dfGen[dfGen.multi_fuels .!= 1, :r_id]
+ @test GenX.ids_with_multifuels(gen) == dfGen[dfGen.multi_fuels .== 1, :r_id]
if !isempty(GenX.ids_with_multifuels(gen))
MULTI_FUELS = GenX.ids_with_multifuels(gen)
max_fuels = maximum(GenX.num_fuels.(gen))
for i in 1:max_fuels
- @test findall(g -> GenX.max_cofire_cols(g, tag=i) < 1, gen[MULTI_FUELS]) == dfGen[dfGen[!, Symbol(string("fuel",i, "_max_cofire_level"))].< 1, :][!, :r_id]
- @test findall(g -> GenX.max_cofire_start_cols(g, tag=i) < 1, gen[MULTI_FUELS]) == dfGen[dfGen[!, Symbol(string("fuel",i, "_max_cofire_level_start"))].< 1, :][!, :r_id]
- @test findall(g -> GenX.min_cofire_cols(g, tag=i) > 0, gen[MULTI_FUELS]) == dfGen[dfGen[!, Symbol(string("fuel",i, "_min_cofire_level"))].> 0, :][!, :r_id]
- @test findall(g -> GenX.min_cofire_start_cols(g, tag=i) > 0, gen[MULTI_FUELS]) == dfGen[dfGen[!, Symbol(string("fuel",i, "_min_cofire_level_start"))].> 0, :][!, :r_id]
- @test GenX.fuel_cols.(gen, tag=i) == dfGen[!,Symbol(string("fuel",i))]
- @test GenX.heat_rate_cols.(gen, tag=i) == dfGen[!,Symbol(string("heat_rate",i, "_mmbtu_per_mwh"))]
- @test GenX.max_cofire_cols.(gen, tag=i) == dfGen[!,Symbol(string("fuel",i, "_max_cofire_level"))]
- @test GenX.min_cofire_cols.(gen, tag=i) == dfGen[!,Symbol(string("fuel",i, "_min_cofire_level"))]
- @test GenX.max_cofire_start_cols.(gen, tag=i) == dfGen[!,Symbol(string("fuel",i, "_max_cofire_level_start"))]
- @test GenX.min_cofire_start_cols.(gen, tag=i) == dfGen[!,Symbol(string("fuel",i, "_min_cofire_level_start"))]
+ @test findall(g -> GenX.max_cofire_cols(g, tag = i) < 1, gen[MULTI_FUELS]) ==
+ dfGen[dfGen[!, Symbol(string("fuel", i, "_max_cofire_level"))] .< 1, :][!,
+ :r_id]
+ @test findall(g -> GenX.max_cofire_start_cols(g, tag = i) < 1,
+ gen[MULTI_FUELS]) == dfGen[dfGen[!, Symbol(string("fuel", i, "_max_cofire_level_start"))] .< 1,
+ :][!,
+ :r_id]
+ @test findall(g -> GenX.min_cofire_cols(g, tag = i) > 0, gen[MULTI_FUELS]) ==
+ dfGen[dfGen[!, Symbol(string("fuel", i, "_min_cofire_level"))] .> 0, :][!,
+ :r_id]
+ @test findall(g -> GenX.min_cofire_start_cols(g, tag = i) > 0,
+ gen[MULTI_FUELS]) == dfGen[dfGen[!, Symbol(string("fuel", i, "_min_cofire_level_start"))] .> 0,
+ :][!,
+ :r_id]
+ @test GenX.fuel_cols.(gen, tag = i) == dfGen[!, Symbol(string("fuel", i))]
+ @test GenX.heat_rate_cols.(gen, tag = i) ==
+ dfGen[!, Symbol(string("heat_rate", i, "_mmbtu_per_mwh"))]
+ @test GenX.max_cofire_cols.(gen, tag = i) ==
+ dfGen[!, Symbol(string("fuel", i, "_max_cofire_level"))]
+ @test GenX.min_cofire_cols.(gen, tag = i) ==
+ dfGen[!, Symbol(string("fuel", i, "_min_cofire_level"))]
+ @test GenX.max_cofire_start_cols.(gen, tag = i) ==
+ dfGen[!, Symbol(string("fuel", i, "_max_cofire_level_start"))]
+ @test GenX.min_cofire_start_cols.(gen, tag = i) ==
+ dfGen[!, Symbol(string("fuel", i, "_min_cofire_level_start"))]
end
end
@test GenX.ids_with_mga(gen) == dfGen[dfGen.mga .== 1, :r_id]
@@ -118,12 +136,12 @@ function test_load_scaled_resources_data(gen, dfGen)
end
function test_add_policies_to_resources(gen, dfGen)
- @test GenX.esr.(gen, tag=1) == dfGen.esr_1
- @test GenX.esr.(gen, tag=2) == dfGen.esr_2
- @test GenX.min_cap.(gen, tag=1) == dfGen.mincaptag_1
- @test GenX.min_cap.(gen, tag=2) == dfGen.mincaptag_2
- @test GenX.min_cap.(gen, tag=3) == dfGen.mincaptag_3
- @test GenX.derating_factor.(gen, tag=1) == dfGen.capres_1
+ @test GenX.esr.(gen, tag = 1) == dfGen.esr_1
+ @test GenX.esr.(gen, tag = 2) == dfGen.esr_2
+ @test GenX.min_cap.(gen, tag = 1) == dfGen.mincaptag_1
+ @test GenX.min_cap.(gen, tag = 2) == dfGen.mincaptag_2
+ @test GenX.min_cap.(gen, tag = 3) == dfGen.mincaptag_3
+ @test GenX.derating_factor.(gen, tag = 1) == dfGen.capres_1
end
function test_add_modules_to_resources(gen, dfGen)
@@ -136,7 +154,6 @@ function test_add_modules_to_resources(gen, dfGen)
end
function test_inputs_keys(inputs, inputs_true)
-
@test inputs["G"] == inputs_true["G"]
@test inputs["HYDRO_RES"] == inputs_true["HYDRO_RES"]
@@ -159,7 +176,7 @@ function test_inputs_keys(inputs, inputs_true)
@test inputs["THERM_NO_COMMIT"] == inputs_true["THERM_NO_COMMIT"]
@test inputs["COMMIT"] == inputs_true["COMMIT"]
@test inputs["C_Start"] == inputs_true["C_Start"]
-
+
@test Set(inputs["RET_CAP"]) == inputs_true["RET_CAP"]
@test Set(inputs["RET_CAP_CHARGE"]) == inputs_true["RET_CAP_CHARGE"]
@test Set(inputs["RET_CAP_ENERGY"]) == inputs_true["RET_CAP_ENERGY"]
@@ -167,14 +184,17 @@ function test_inputs_keys(inputs, inputs_true)
@test Set(inputs["NEW_CAP_ENERGY"]) == inputs_true["NEW_CAP_ENERGY"]
@test Set(inputs["NEW_CAP_CHARGE"]) == inputs_true["NEW_CAP_CHARGE"]
- if isempty(inputs["MULTI_FUELS"])
- @test string.(inputs["slope_cols"]) == lowercase.(string.(inputs_true["slope_cols"]))
- @test string.(inputs["intercept_cols"]) == lowercase.(string.(inputs_true["intercept_cols"]))
- @test inputs["PWFU_data"] == rename!(inputs_true["PWFU_data"], lowercase.(names(inputs_true["PWFU_data"])))
+ if isempty(inputs["MULTI_FUELS"])
+ @test string.(inputs["slope_cols"]) ==
+ lowercase.(string.(inputs_true["slope_cols"]))
+ @test string.(inputs["intercept_cols"]) ==
+ lowercase.(string.(inputs_true["intercept_cols"]))
+ @test inputs["PWFU_data"] ==
+ rename!(inputs_true["PWFU_data"], lowercase.(names(inputs_true["PWFU_data"])))
@test inputs["PWFU_Num_Segments"] == inputs_true["PWFU_Num_Segments"]
@test inputs["THERM_COMMIT_PWFU"] == inputs_true["THERM_COMMIT_PWFU"]
end
-
+
@test inputs["R_ZONES"] == inputs_true["R_ZONES"]
@test inputs["RESOURCE_ZONES"] == inputs_true["RESOURCE_ZONES"]
@test inputs["RESOURCE_NAMES"] == inputs_true["RESOURCES"]
@@ -183,7 +203,7 @@ end
function test_resource_specific_attributes(gen, dfGen, inputs)
@test GenX.is_buildable(gen) == dfGen[dfGen.new_build .== 1, :r_id]
@test GenX.is_retirable(gen) == dfGen[dfGen.can_retire .== 1, :r_id]
-
+
rs = GenX.ids_with_positive(gen, GenX.max_cap_mwh)
@test rs == dfGen[dfGen.max_cap_mwh .> 0, :r_id]
@test GenX.max_cap_mwh.(rs) == dfGen[dfGen.max_cap_mwh .> 0, :r_id]
@@ -192,7 +212,7 @@ function test_resource_specific_attributes(gen, dfGen, inputs)
@test GenX.max_charge_cap_mw.(rs) == dfGen[dfGen.max_charge_cap_mw .> 0, :r_id]
rs = GenX.ids_with_unit_commitment(gen)
@test rs == dfGen[dfGen.therm .== 1, :r_id]
- @test GenX.cap_size.(gen[rs]) == dfGen[dfGen.therm.==1,:cap_size]
+ @test GenX.cap_size.(gen[rs]) == dfGen[dfGen.therm .== 1, :cap_size]
rs = setdiff(inputs["HAS_FUEL"], inputs["THERM_COMMIT"])
@test GenX.heat_rate_mmbtu_per_mwh.(gen[rs]) == dfGen[rs, :heat_rate_mmbtu_per_mwh]
rs = setdiff(inputs["THERM_COMMIT"], inputs["THERM_COMMIT_PWFU"])
@@ -211,23 +231,23 @@ function test_resource_specific_attributes(gen, dfGen, inputs)
@test GenX.min_charge_cap_mw.(gen[rs]) == dfGen[rs, :min_charge_cap_mw]
@test GenX.existing_charge_cap_mw.(gen[rs]) == dfGen[rs, :existing_charge_cap_mw]
@test GenX.inv_cost_charge_per_mwyr.(gen[rs]) == dfGen[rs, :inv_cost_charge_per_mwyr]
- @test GenX.fixed_om_cost_charge_per_mwyr.(gen[rs]) == dfGen[rs, :fixed_om_cost_charge_per_mwyr]
+ @test GenX.fixed_om_cost_charge_per_mwyr.(gen[rs]) ==
+ dfGen[rs, :fixed_om_cost_charge_per_mwyr]
rs = union(inputs["HYDRO_RES_KNOWN_CAP"], inputs["STOR_HYDRO_LONG_DURATION"])
- @test GenX.hydro_energy_to_power_ratio.(gen[rs]) == dfGen[rs, :hydro_energy_to_power_ratio]
+ @test GenX.hydro_energy_to_power_ratio.(gen[rs]) ==
+ dfGen[rs, :hydro_energy_to_power_ratio]
end
function test_load_resources_data()
- setup = Dict(
- "ParameterScale" => 0,
+ setup = Dict("ParameterScale" => 0,
"OperationalReserves" => 1,
"UCommit" => 2,
- "MultiStage" => 1,
- )
+ "MultiStage" => 1)
# Merge the setup with the default settings
settings = GenX.default_settings()
merge!(settings, setup)
-
+
test_path = joinpath("load_resources", "test_gen_non_colocated")
# load dfGen and inputs_true to compare against
@@ -269,25 +289,22 @@ function test_load_resources_data()
end
function test_load_VRE_STOR_data()
-
- setup = Dict(
- "ParameterScale" => 0,
+ setup = Dict("ParameterScale" => 0,
"OperationalReserves" => 1,
"UCommit" => 2,
- "MultiStage" => 0,
- )
+ "MultiStage" => 0)
# Merge the setup with the default settings
settings = GenX.default_settings()
merge!(settings, setup)
-
- test_path = joinpath("load_resources","test_gen_vre_stor")
+
+ test_path = joinpath("load_resources", "test_gen_vre_stor")
input_true_filenames = InputsTrue("generators_data.csv", "inputs_after_loadgen.jld2")
dfGen, inputs_true = prepare_inputs_true(test_path, input_true_filenames, settings)
dfVRE_STOR = GenX.load_dataframe(joinpath(test_path, "Vre_and_stor_data.csv"))
dfVRE_STOR = GenX.rename!(dfVRE_STOR, lowercase.(names(dfVRE_STOR)))
- scale_factor = settings["ParameterScale"] == 1 ? GenX.ModelScalingFactor : 1.
+ scale_factor = settings["ParameterScale"] == 1 ? GenX.ModelScalingFactor : 1.0
GenX.scale_vre_stor_data!(dfVRE_STOR, scale_factor)
resources_path = joinpath(test_path, settings["ResourcesFolder"])
@@ -304,27 +321,36 @@ function test_load_VRE_STOR_data()
rs = inputs["VRE_STOR"]
@test GenX.solar(gen) == dfVRE_STOR[dfVRE_STOR.solar .== 1, :r_id]
@test GenX.wind(gen) == dfVRE_STOR[dfVRE_STOR.wind .== 1, :r_id]
- @test GenX.storage_dc_discharge(gen) == dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .>= 1, :r_id]
- @test GenX.storage_sym_dc_discharge(gen) == dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .== 1, :r_id]
- @test GenX.storage_asym_dc_discharge(gen) == dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .== 2, :r_id]
+ @test GenX.storage_dc_discharge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .>= 1, :r_id]
+ @test GenX.storage_sym_dc_discharge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .== 1, :r_id]
+ @test GenX.storage_asym_dc_discharge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .== 2, :r_id]
@test GenX.storage_dc_charge(gen) == dfVRE_STOR[dfVRE_STOR.stor_dc_charge .>= 1, :r_id]
- @test GenX.storage_sym_dc_charge(gen) == dfVRE_STOR[dfVRE_STOR.stor_dc_charge .== 1, :r_id]
- @test GenX.storage_asym_dc_charge(gen) == dfVRE_STOR[dfVRE_STOR.stor_dc_charge .== 2, :r_id]
-
- @test GenX.storage_ac_discharge(gen) == dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .>= 1, :r_id]
- @test GenX.storage_sym_ac_discharge(gen) == dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .== 1, :r_id]
- @test GenX.storage_asym_ac_discharge(gen) == dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .== 2, :r_id]
+ @test GenX.storage_sym_dc_charge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_dc_charge .== 1, :r_id]
+ @test GenX.storage_asym_dc_charge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_dc_charge .== 2, :r_id]
+
+ @test GenX.storage_ac_discharge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .>= 1, :r_id]
+ @test GenX.storage_sym_ac_discharge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .== 1, :r_id]
+ @test GenX.storage_asym_ac_discharge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .== 2, :r_id]
@test GenX.storage_ac_charge(gen) == dfVRE_STOR[dfVRE_STOR.stor_ac_charge .>= 1, :r_id]
- @test GenX.storage_sym_ac_charge(gen) == dfVRE_STOR[dfVRE_STOR.stor_ac_charge .== 1, :r_id]
- @test GenX.storage_asym_ac_charge(gen) == dfVRE_STOR[dfVRE_STOR.stor_ac_charge .== 2, :r_id]
+ @test GenX.storage_sym_ac_charge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_ac_charge .== 1, :r_id]
+ @test GenX.storage_asym_ac_charge(gen) ==
+ dfVRE_STOR[dfVRE_STOR.stor_ac_charge .== 2, :r_id]
@test GenX.technology.(gen[rs]) == dfVRE_STOR.technology
- @test GenX.is_LDS_VRE_STOR(gen) == dfVRE_STOR[dfVRE_STOR.lds_vre_stor .!= 0, :r_id]
-
+ @test GenX.is_LDS_VRE_STOR(gen) == dfVRE_STOR[dfVRE_STOR.lds_vre_stor .!= 0, :r_id]
- for attr in (:existing_cap_solar_mw,
+ for attr in (:existing_cap_solar_mw,
:existing_cap_wind_mw,
:existing_cap_inverter_mw,
:existing_cap_charge_dc_mw,
@@ -335,140 +361,201 @@ function test_load_VRE_STOR_data()
test_ids_with_nonneg(attr, gen[rs], dfVRE_STOR)
end
- for attr in (:max_cap_solar_mw,
- :max_cap_wind_mw,
- :max_cap_inverter_mw,
- :max_cap_charge_dc_mw,
- :max_cap_charge_ac_mw,
- :max_cap_discharge_dc_mw,
- :max_cap_discharge_ac_mw)
+ for attr in (:max_cap_solar_mw,
+ :max_cap_wind_mw,
+ :max_cap_inverter_mw,
+ :max_cap_charge_dc_mw,
+ :max_cap_charge_ac_mw,
+ :max_cap_discharge_dc_mw,
+ :max_cap_discharge_ac_mw)
test_macro_interface(attr, gen[rs], dfVRE_STOR)
test_ids_with_nonneg(attr, gen[rs], dfVRE_STOR)
test_ids_with(attr, gen[rs], dfVRE_STOR)
end
- for attr in (:min_cap_solar_mw,
- :min_cap_wind_mw,
- :min_cap_inverter_mw,
- :min_cap_charge_dc_mw,
- :min_cap_charge_ac_mw,
- :min_cap_discharge_dc_mw,
- :min_cap_discharge_ac_mw,
- :inverter_ratio_solar,
- :inverter_ratio_wind,)
+ for attr in (:min_cap_solar_mw,
+ :min_cap_wind_mw,
+ :min_cap_inverter_mw,
+ :min_cap_charge_dc_mw,
+ :min_cap_charge_ac_mw,
+ :min_cap_discharge_dc_mw,
+ :min_cap_discharge_ac_mw,
+ :inverter_ratio_solar,
+ :inverter_ratio_wind)
test_macro_interface(attr, gen[rs], dfVRE_STOR)
test_ids_with_positive(attr, gen[rs], dfVRE_STOR)
end
for attr in (:etainverter,
- :inv_cost_inverter_per_mwyr,
- :inv_cost_solar_per_mwyr,
- :inv_cost_wind_per_mwyr,
- :inv_cost_discharge_dc_per_mwyr,
- :inv_cost_charge_dc_per_mwyr,
- :inv_cost_discharge_ac_per_mwyr,
- :inv_cost_charge_ac_per_mwyr,
- :fixed_om_inverter_cost_per_mwyr,
- :fixed_om_solar_cost_per_mwyr,
- :fixed_om_wind_cost_per_mwyr,
- :fixed_om_cost_discharge_dc_per_mwyr,
- :fixed_om_cost_charge_dc_per_mwyr,
- :fixed_om_cost_discharge_ac_per_mwyr,
- :fixed_om_cost_charge_ac_per_mwyr,
- :var_om_cost_per_mwh_solar,
- :var_om_cost_per_mwh_wind,
- :var_om_cost_per_mwh_charge_dc,
- :var_om_cost_per_mwh_discharge_dc,
- :var_om_cost_per_mwh_charge_ac,
- :var_om_cost_per_mwh_discharge_ac,
- :eff_up_ac,
- :eff_down_ac,
- :eff_up_dc,
- :eff_down_dc,
- :power_to_energy_ac,
- :power_to_energy_dc)
+ :inv_cost_inverter_per_mwyr,
+ :inv_cost_solar_per_mwyr,
+ :inv_cost_wind_per_mwyr,
+ :inv_cost_discharge_dc_per_mwyr,
+ :inv_cost_charge_dc_per_mwyr,
+ :inv_cost_discharge_ac_per_mwyr,
+ :inv_cost_charge_ac_per_mwyr,
+ :fixed_om_inverter_cost_per_mwyr,
+ :fixed_om_solar_cost_per_mwyr,
+ :fixed_om_wind_cost_per_mwyr,
+ :fixed_om_cost_discharge_dc_per_mwyr,
+ :fixed_om_cost_charge_dc_per_mwyr,
+ :fixed_om_cost_discharge_ac_per_mwyr,
+ :fixed_om_cost_charge_ac_per_mwyr,
+ :var_om_cost_per_mwh_solar,
+ :var_om_cost_per_mwh_wind,
+ :var_om_cost_per_mwh_charge_dc,
+ :var_om_cost_per_mwh_discharge_dc,
+ :var_om_cost_per_mwh_charge_ac,
+ :var_om_cost_per_mwh_discharge_ac,
+ :eff_up_ac,
+ :eff_down_ac,
+ :eff_up_dc,
+ :eff_down_dc,
+ :power_to_energy_ac,
+ :power_to_energy_dc)
test_macro_interface(attr, gen[rs], dfVRE_STOR)
end
# policies
- @test GenX.esr_vrestor.(gen[rs], tag=1) == dfVRE_STOR.esr_vrestor_1
- @test GenX.esr_vrestor.(gen[rs], tag=2) == dfVRE_STOR.esr_vrestor_2
- @test GenX.min_cap_stor.(gen[rs], tag=1) == dfVRE_STOR.mincaptagstor_1
- @test GenX.min_cap_stor.(gen[rs], tag=2) == dfVRE_STOR.mincaptagstor_2
- @test GenX.derating_factor.(gen[rs], tag=1) == dfVRE_STOR.capresvrestor_1
- @test GenX.derating_factor.(gen[rs], tag=2) == dfVRE_STOR.capresvrestor_2
- @test GenX.max_cap_stor.(gen[rs], tag=1) == dfVRE_STOR.maxcaptagstor_1
- @test GenX.max_cap_stor.(gen[rs], tag=2) == dfVRE_STOR.maxcaptagstor_2
- @test GenX.min_cap_solar.(gen[rs], tag=1) == dfVRE_STOR.mincaptagsolar_1
- @test GenX.max_cap_solar.(gen[rs], tag=1) == dfVRE_STOR.maxcaptagsolar_1
- @test GenX.min_cap_wind.(gen[rs], tag=1) == dfVRE_STOR.mincaptagwind_1
- @test GenX.max_cap_wind.(gen[rs], tag=1) == dfVRE_STOR.maxcaptagwind_1
-
- @test GenX.ids_with_policy(gen, GenX.min_cap_solar, tag=1) == dfVRE_STOR[dfVRE_STOR.mincaptagsolar_1 .== 1, :r_id]
- @test GenX.ids_with_policy(gen, GenX.min_cap_wind, tag=1) == dfVRE_STOR[dfVRE_STOR.mincaptagwind_1 .== 1, :r_id]
- @test GenX.ids_with_policy(gen, GenX.min_cap_stor, tag=1) == dfVRE_STOR[dfVRE_STOR.mincaptagstor_1 .== 1, :r_id]
- @test GenX.ids_with_policy(gen, GenX.max_cap_solar, tag=1) == dfVRE_STOR[dfVRE_STOR.maxcaptagsolar_1 .== 1, :r_id]
- @test GenX.ids_with_policy(gen, GenX.max_cap_wind, tag=1) == dfVRE_STOR[dfVRE_STOR.maxcaptagwind_1 .== 1, :r_id]
- @test GenX.ids_with_policy(gen, GenX.max_cap_stor, tag=1) == dfVRE_STOR[dfVRE_STOR.maxcaptagstor_1 .== 1, :r_id]
+ @test GenX.esr_vrestor.(gen[rs], tag = 1) == dfVRE_STOR.esr_vrestor_1
+ @test GenX.esr_vrestor.(gen[rs], tag = 2) == dfVRE_STOR.esr_vrestor_2
+ @test GenX.min_cap_stor.(gen[rs], tag = 1) == dfVRE_STOR.mincaptagstor_1
+ @test GenX.min_cap_stor.(gen[rs], tag = 2) == dfVRE_STOR.mincaptagstor_2
+ @test GenX.derating_factor.(gen[rs], tag = 1) == dfVRE_STOR.capresvrestor_1
+ @test GenX.derating_factor.(gen[rs], tag = 2) == dfVRE_STOR.capresvrestor_2
+ @test GenX.max_cap_stor.(gen[rs], tag = 1) == dfVRE_STOR.maxcaptagstor_1
+ @test GenX.max_cap_stor.(gen[rs], tag = 2) == dfVRE_STOR.maxcaptagstor_2
+ @test GenX.min_cap_solar.(gen[rs], tag = 1) == dfVRE_STOR.mincaptagsolar_1
+ @test GenX.max_cap_solar.(gen[rs], tag = 1) == dfVRE_STOR.maxcaptagsolar_1
+ @test GenX.min_cap_wind.(gen[rs], tag = 1) == dfVRE_STOR.mincaptagwind_1
+ @test GenX.max_cap_wind.(gen[rs], tag = 1) == dfVRE_STOR.maxcaptagwind_1
+
+ @test GenX.ids_with_policy(gen, GenX.min_cap_solar, tag = 1) ==
+ dfVRE_STOR[dfVRE_STOR.mincaptagsolar_1 .== 1, :r_id]
+ @test GenX.ids_with_policy(gen, GenX.min_cap_wind, tag = 1) ==
+ dfVRE_STOR[dfVRE_STOR.mincaptagwind_1 .== 1, :r_id]
+ @test GenX.ids_with_policy(gen, GenX.min_cap_stor, tag = 1) ==
+ dfVRE_STOR[dfVRE_STOR.mincaptagstor_1 .== 1, :r_id]
+ @test GenX.ids_with_policy(gen, GenX.max_cap_solar, tag = 1) ==
+ dfVRE_STOR[dfVRE_STOR.maxcaptagsolar_1 .== 1, :r_id]
+ @test GenX.ids_with_policy(gen, GenX.max_cap_wind, tag = 1) ==
+ dfVRE_STOR[dfVRE_STOR.maxcaptagwind_1 .== 1, :r_id]
+ @test GenX.ids_with_policy(gen, GenX.max_cap_stor, tag = 1) ==
+ dfVRE_STOR[dfVRE_STOR.maxcaptagstor_1 .== 1, :r_id]
# inputs keys
- @test inputs["VRE_STOR"] == dfGen[dfGen.vre_stor.==1,:r_id]
- @test inputs["VS_SOLAR"] == dfVRE_STOR[(dfVRE_STOR.solar.!=0),:r_id]
- @test inputs["VS_WIND"] == dfVRE_STOR[(dfVRE_STOR.wind.!=0),:r_id]
- @test inputs["VS_DC"] == union(dfVRE_STOR[dfVRE_STOR.stor_dc_discharge.>=1,:r_id], dfVRE_STOR[dfVRE_STOR.stor_dc_charge.>=1,:r_id], dfVRE_STOR[dfVRE_STOR.solar.!=0,:r_id])
-
- @test inputs["VS_STOR"] == union(dfVRE_STOR[dfVRE_STOR.stor_dc_charge.>=1,:r_id], dfVRE_STOR[dfVRE_STOR.stor_ac_charge.>=1,:r_id],
- dfVRE_STOR[dfVRE_STOR.stor_dc_discharge.>=1,:r_id], dfVRE_STOR[dfVRE_STOR.stor_ac_discharge.>=1,:r_id])
- STOR = inputs["VS_STOR"]
- @test inputs["VS_STOR_DC_DISCHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_dc_discharge.>=1),:r_id]
- @test inputs["VS_SYM_DC_DISCHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_dc_discharge.==1,:r_id]
- @test inputs["VS_ASYM_DC_DISCHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_dc_discharge.==2,:r_id]
- @test inputs["VS_STOR_DC_CHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_dc_charge.>=1),:r_id]
- @test inputs["VS_SYM_DC_CHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_dc_charge.==1,:r_id]
- @test inputs["VS_ASYM_DC_CHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_dc_charge.==2,:r_id]
- @test inputs["VS_STOR_AC_DISCHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_ac_discharge.>=1),:r_id]
- @test inputs["VS_SYM_AC_DISCHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_ac_discharge.==1,:r_id]
- @test inputs["VS_ASYM_AC_DISCHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_ac_discharge.==2,:r_id]
- @test inputs["VS_STOR_AC_CHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_ac_charge.>=1),:r_id]
- @test inputs["VS_SYM_AC_CHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_ac_charge.==1,:r_id]
- @test inputs["VS_ASYM_AC_CHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_ac_charge.==2,:r_id]
- @test inputs["VS_LDS"] == dfVRE_STOR[(dfVRE_STOR.lds_vre_stor.!=0),:r_id]
- @test inputs["VS_nonLDS"] == setdiff(STOR, inputs["VS_LDS"])
- @test inputs["VS_ASYM"] == union(inputs["VS_ASYM_DC_CHARGE"], inputs["VS_ASYM_DC_DISCHARGE"], inputs["VS_ASYM_AC_DISCHARGE"], inputs["VS_ASYM_AC_CHARGE"])
- @test inputs["VS_SYM_DC"] == intersect(inputs["VS_SYM_DC_CHARGE"], inputs["VS_SYM_DC_DISCHARGE"])
- @test inputs["VS_SYM_AC"] == intersect(inputs["VS_SYM_AC_CHARGE"], inputs["VS_SYM_AC_DISCHARGE"])
+ @test inputs["VRE_STOR"] == dfGen[dfGen.vre_stor .== 1, :r_id]
+ @test inputs["VS_SOLAR"] == dfVRE_STOR[(dfVRE_STOR.solar .!= 0), :r_id]
+ @test inputs["VS_WIND"] == dfVRE_STOR[(dfVRE_STOR.wind .!= 0), :r_id]
+ @test inputs["VS_DC"] == union(dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .>= 1, :r_id],
+ dfVRE_STOR[dfVRE_STOR.stor_dc_charge .>= 1, :r_id],
+ dfVRE_STOR[dfVRE_STOR.solar .!= 0, :r_id])
+
+ @test inputs["VS_STOR"] == union(dfVRE_STOR[dfVRE_STOR.stor_dc_charge .>= 1, :r_id],
+ dfVRE_STOR[dfVRE_STOR.stor_ac_charge .>= 1, :r_id],
+ dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .>= 1, :r_id],
+ dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .>= 1, :r_id])
+ STOR = inputs["VS_STOR"]
+ @test inputs["VS_STOR_DC_DISCHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_dc_discharge .>= 1), :r_id]
+ @test inputs["VS_SYM_DC_DISCHARGE"] ==
+ dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .== 1, :r_id]
+ @test inputs["VS_ASYM_DC_DISCHARGE"] ==
+ dfVRE_STOR[dfVRE_STOR.stor_dc_discharge .== 2, :r_id]
+ @test inputs["VS_STOR_DC_CHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_dc_charge .>= 1), :r_id]
+ @test inputs["VS_SYM_DC_CHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_dc_charge .== 1, :r_id]
+ @test inputs["VS_ASYM_DC_CHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_dc_charge .== 2, :r_id]
+ @test inputs["VS_STOR_AC_DISCHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_ac_discharge .>= 1), :r_id]
+ @test inputs["VS_SYM_AC_DISCHARGE"] ==
+ dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .== 1, :r_id]
+ @test inputs["VS_ASYM_AC_DISCHARGE"] ==
+ dfVRE_STOR[dfVRE_STOR.stor_ac_discharge .== 2, :r_id]
+ @test inputs["VS_STOR_AC_CHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_ac_charge .>= 1), :r_id]
+ @test inputs["VS_SYM_AC_CHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_ac_charge .== 1, :r_id]
+ @test inputs["VS_ASYM_AC_CHARGE"] == dfVRE_STOR[dfVRE_STOR.stor_ac_charge .== 2, :r_id]
+ @test inputs["VS_LDS"] == dfVRE_STOR[(dfVRE_STOR.lds_vre_stor .!= 0), :r_id]
+ @test inputs["VS_nonLDS"] == setdiff(STOR, inputs["VS_LDS"])
+ @test inputs["VS_ASYM"] == union(inputs["VS_ASYM_DC_CHARGE"],
+ inputs["VS_ASYM_DC_DISCHARGE"],
+ inputs["VS_ASYM_AC_DISCHARGE"],
+ inputs["VS_ASYM_AC_CHARGE"])
+ @test inputs["VS_SYM_DC"] ==
+ intersect(inputs["VS_SYM_DC_CHARGE"], inputs["VS_SYM_DC_DISCHARGE"])
+ @test inputs["VS_SYM_AC"] ==
+ intersect(inputs["VS_SYM_AC_CHARGE"], inputs["VS_SYM_AC_DISCHARGE"])
buildable = dfGen[dfGen.new_build .== 1, :r_id]
retirable = dfGen[dfGen.can_retire .== 1, :r_id]
- @test inputs["NEW_CAP_SOLAR"] == intersect(buildable, dfVRE_STOR[dfVRE_STOR.solar.!=0,:r_id], dfVRE_STOR[dfVRE_STOR.max_cap_solar_mw.!=0,:r_id])
- @test inputs["RET_CAP_SOLAR"] == intersect(retirable, dfVRE_STOR[dfVRE_STOR.solar.!=0,:r_id], dfVRE_STOR[dfVRE_STOR.existing_cap_solar_mw.>=0,:r_id])
- @test inputs["NEW_CAP_WIND"] == intersect(buildable, dfVRE_STOR[dfVRE_STOR.wind.!=0,:r_id], dfVRE_STOR[dfVRE_STOR.max_cap_wind_mw.!=0,:r_id])
- @test inputs["RET_CAP_WIND"] == intersect(retirable, dfVRE_STOR[dfVRE_STOR.wind.!=0,:r_id], dfVRE_STOR[dfVRE_STOR.existing_cap_wind_mw.>=0,:r_id])
- @test inputs["NEW_CAP_DC"] == intersect(buildable, dfVRE_STOR[dfVRE_STOR.max_cap_inverter_mw.!=0,:r_id], inputs["VS_DC"])
- @test inputs["RET_CAP_DC"] == intersect(retirable, dfVRE_STOR[dfVRE_STOR.existing_cap_inverter_mw.>=0,:r_id], inputs["VS_DC"])
- @test inputs["NEW_CAP_STOR"] == intersect(buildable, dfGen[dfGen.max_cap_mwh.!=0,:r_id], inputs["VS_STOR"])
- @test inputs["RET_CAP_STOR"] == intersect(retirable, dfGen[dfGen.existing_cap_mwh.>=0,:r_id], inputs["VS_STOR"])
- @test inputs["NEW_CAP_CHARGE_DC"] == intersect(buildable, dfVRE_STOR[dfVRE_STOR.max_cap_charge_dc_mw.!=0,:r_id], inputs["VS_ASYM_DC_CHARGE"])
- @test inputs["RET_CAP_CHARGE_DC"] == intersect(retirable, dfVRE_STOR[dfVRE_STOR.existing_cap_charge_dc_mw.>=0,:r_id], inputs["VS_ASYM_DC_CHARGE"])
- @test inputs["NEW_CAP_DISCHARGE_DC"] == intersect(buildable, dfVRE_STOR[dfVRE_STOR.max_cap_discharge_dc_mw.!=0,:r_id], inputs["VS_ASYM_DC_DISCHARGE"])
- @test inputs["RET_CAP_DISCHARGE_DC"] == intersect(retirable, dfVRE_STOR[dfVRE_STOR.existing_cap_discharge_dc_mw.>=0,:r_id], inputs["VS_ASYM_DC_DISCHARGE"])
- @test inputs["NEW_CAP_CHARGE_AC"] == intersect(buildable, dfVRE_STOR[dfVRE_STOR.max_cap_charge_ac_mw.!=0,:r_id], inputs["VS_ASYM_AC_CHARGE"])
- @test inputs["RET_CAP_CHARGE_AC"] == intersect(retirable, dfVRE_STOR[dfVRE_STOR.existing_cap_charge_ac_mw.>=0,:r_id], inputs["VS_ASYM_AC_CHARGE"])
- @test inputs["NEW_CAP_DISCHARGE_AC"] == intersect(buildable, dfVRE_STOR[dfVRE_STOR.max_cap_discharge_ac_mw.!=0,:r_id], inputs["VS_ASYM_AC_DISCHARGE"])
- @test inputs["RET_CAP_DISCHARGE_AC"] == intersect(retirable, dfVRE_STOR[dfVRE_STOR.existing_cap_discharge_ac_mw.>=0,:r_id], inputs["VS_ASYM_AC_DISCHARGE"])
- @test inputs["RESOURCE_NAMES_VRE_STOR"] == collect(skipmissing(dfVRE_STOR[!,:resource][1:size(inputs["VRE_STOR"])[1]]))
- @test inputs["RESOURCE_NAMES_SOLAR"] == dfVRE_STOR[(dfVRE_STOR.solar.!=0), :resource]
- @test inputs["RESOURCE_NAMES_WIND"] == dfVRE_STOR[(dfVRE_STOR.wind.!=0), :resource]
- @test inputs["RESOURCE_NAMES_DC_DISCHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_dc_discharge.!=0), :resource]
- @test inputs["RESOURCE_NAMES_AC_DISCHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_ac_discharge.!=0), :resource]
- @test inputs["RESOURCE_NAMES_DC_CHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_dc_charge.!=0), :resource]
- @test inputs["RESOURCE_NAMES_AC_CHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_ac_charge.!=0), :resource]
- @test inputs["ZONES_SOLAR"] == dfVRE_STOR[(dfVRE_STOR.solar.!=0), :zone]
- @test inputs["ZONES_WIND"] == dfVRE_STOR[(dfVRE_STOR.wind.!=0), :zone]
- @test inputs["ZONES_DC_DISCHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_dc_discharge.!=0), :zone]
- @test inputs["ZONES_AC_DISCHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_ac_discharge.!=0), :zone]
- @test inputs["ZONES_DC_CHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_dc_charge.!=0), :zone]
- @test inputs["ZONES_AC_CHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_ac_charge.!=0), :zone]
+ @test inputs["NEW_CAP_SOLAR"] == intersect(buildable,
+ dfVRE_STOR[dfVRE_STOR.solar .!= 0, :r_id],
+ dfVRE_STOR[dfVRE_STOR.max_cap_solar_mw .!= 0, :r_id])
+ @test inputs["RET_CAP_SOLAR"] == intersect(retirable,
+ dfVRE_STOR[dfVRE_STOR.solar .!= 0, :r_id],
+ dfVRE_STOR[dfVRE_STOR.existing_cap_solar_mw .>= 0, :r_id])
+ @test inputs["NEW_CAP_WIND"] == intersect(buildable,
+ dfVRE_STOR[dfVRE_STOR.wind .!= 0, :r_id],
+ dfVRE_STOR[dfVRE_STOR.max_cap_wind_mw .!= 0, :r_id])
+ @test inputs["RET_CAP_WIND"] == intersect(retirable,
+ dfVRE_STOR[dfVRE_STOR.wind .!= 0, :r_id],
+ dfVRE_STOR[dfVRE_STOR.existing_cap_wind_mw .>= 0, :r_id])
+ @test inputs["NEW_CAP_DC"] == intersect(buildable,
+ dfVRE_STOR[dfVRE_STOR.max_cap_inverter_mw .!= 0, :r_id],
+ inputs["VS_DC"])
+ @test inputs["RET_CAP_DC"] == intersect(retirable,
+ dfVRE_STOR[dfVRE_STOR.existing_cap_inverter_mw .>= 0, :r_id],
+ inputs["VS_DC"])
+ @test inputs["NEW_CAP_STOR"] ==
+ intersect(buildable, dfGen[dfGen.max_cap_mwh .!= 0, :r_id], inputs["VS_STOR"])
+ @test inputs["RET_CAP_STOR"] == intersect(retirable,
+ dfGen[dfGen.existing_cap_mwh .>= 0, :r_id],
+ inputs["VS_STOR"])
+ @test inputs["NEW_CAP_CHARGE_DC"] == intersect(buildable,
+ dfVRE_STOR[dfVRE_STOR.max_cap_charge_dc_mw .!= 0, :r_id],
+ inputs["VS_ASYM_DC_CHARGE"])
+ @test inputs["RET_CAP_CHARGE_DC"] == intersect(retirable,
+ dfVRE_STOR[dfVRE_STOR.existing_cap_charge_dc_mw .>= 0, :r_id],
+ inputs["VS_ASYM_DC_CHARGE"])
+ @test inputs["NEW_CAP_DISCHARGE_DC"] == intersect(buildable,
+ dfVRE_STOR[dfVRE_STOR.max_cap_discharge_dc_mw .!= 0, :r_id],
+ inputs["VS_ASYM_DC_DISCHARGE"])
+ @test inputs["RET_CAP_DISCHARGE_DC"] == intersect(retirable,
+ dfVRE_STOR[dfVRE_STOR.existing_cap_discharge_dc_mw .>= 0, :r_id],
+ inputs["VS_ASYM_DC_DISCHARGE"])
+ @test inputs["NEW_CAP_CHARGE_AC"] == intersect(buildable,
+ dfVRE_STOR[dfVRE_STOR.max_cap_charge_ac_mw .!= 0, :r_id],
+ inputs["VS_ASYM_AC_CHARGE"])
+ @test inputs["RET_CAP_CHARGE_AC"] == intersect(retirable,
+ dfVRE_STOR[dfVRE_STOR.existing_cap_charge_ac_mw .>= 0, :r_id],
+ inputs["VS_ASYM_AC_CHARGE"])
+ @test inputs["NEW_CAP_DISCHARGE_AC"] == intersect(buildable,
+ dfVRE_STOR[dfVRE_STOR.max_cap_discharge_ac_mw .!= 0, :r_id],
+ inputs["VS_ASYM_AC_DISCHARGE"])
+ @test inputs["RET_CAP_DISCHARGE_AC"] == intersect(retirable,
+ dfVRE_STOR[dfVRE_STOR.existing_cap_discharge_ac_mw .>= 0, :r_id],
+ inputs["VS_ASYM_AC_DISCHARGE"])
+ @test inputs["RESOURCE_NAMES_VRE_STOR"] ==
+ collect(skipmissing(dfVRE_STOR[!, :resource][1:size(inputs["VRE_STOR"])[1]]))
+ @test inputs["RESOURCE_NAMES_SOLAR"] == dfVRE_STOR[(dfVRE_STOR.solar .!= 0), :resource]
+ @test inputs["RESOURCE_NAMES_WIND"] == dfVRE_STOR[(dfVRE_STOR.wind .!= 0), :resource]
+ @test inputs["RESOURCE_NAMES_DC_DISCHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_dc_discharge .!= 0), :resource]
+ @test inputs["RESOURCE_NAMES_AC_DISCHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_ac_discharge .!= 0), :resource]
+ @test inputs["RESOURCE_NAMES_DC_CHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_dc_charge .!= 0), :resource]
+ @test inputs["RESOURCE_NAMES_AC_CHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_ac_charge .!= 0), :resource]
+ @test inputs["ZONES_SOLAR"] == dfVRE_STOR[(dfVRE_STOR.solar .!= 0), :zone]
+ @test inputs["ZONES_WIND"] == dfVRE_STOR[(dfVRE_STOR.wind .!= 0), :zone]
+ @test inputs["ZONES_DC_DISCHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_dc_discharge .!= 0), :zone]
+ @test inputs["ZONES_AC_DISCHARGE"] ==
+ dfVRE_STOR[(dfVRE_STOR.stor_ac_discharge .!= 0), :zone]
+ @test inputs["ZONES_DC_CHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_dc_charge .!= 0), :zone]
+ @test inputs["ZONES_AC_CHARGE"] == dfVRE_STOR[(dfVRE_STOR.stor_ac_charge .!= 0), :zone]
end
with_logger(ConsoleLogger(stderr, Logging.Warn)) do
@@ -476,4 +563,4 @@ with_logger(ConsoleLogger(stderr, Logging.Warn)) do
test_load_VRE_STOR_data()
end
-end # module TestLoadResourceData
\ No newline at end of file
+end # module TestLoadResourceData
diff --git a/test/test_multifuels.jl b/test/test_multifuels.jl
index 050f908509..ff1a0efdac 100644
--- a/test/test_multifuels.jl
+++ b/test/test_multifuels.jl
@@ -8,8 +8,7 @@ obj_true = 5494.7919354
test_path = "multi_fuels"
# Define test inputs
-genx_setup = Dict(
- "Trans_Loss_Segments" => 1,
+genx_setup = Dict("Trans_Loss_Segments" => 1,
"EnergyShareRequirement" => 1,
"CapacityReserveMargin" => 1,
"StorageLosses" => 1,
@@ -17,8 +16,7 @@ genx_setup = Dict(
"MaxCapReq" => 1,
"ParameterScale" => 1,
"WriteShadowPrices" => 1,
- "UCommit" => 2,
-)
+ "UCommit" => 2)
# Run the case and get the objective value and tolerance
EP, _, _ = redirect_stdout(devnull) do
@@ -29,11 +27,11 @@ optimal_tol_rel = get_attribute(EP, "ipm_optimality_tolerance")
optimal_tol = optimal_tol_rel * obj_test # Convert to absolute tolerance
# Test the objective value
-test_result = @test obj_test ≈ obj_true atol = optimal_tol
+test_result = @test obj_test≈obj_true atol=optimal_tol
# Round objective value and tolerance. Write to test log.
obj_test = round_from_tol!(obj_test, optimal_tol)
optimal_tol = round_from_tol!(optimal_tol, optimal_tol)
write_testlog(test_path, obj_test, optimal_tol, test_result)
-end # module TestMultiFuels
\ No newline at end of file
+end # module TestMultiFuels
diff --git a/test/test_multistage.jl b/test/test_multistage.jl
index c7f8d83631..4215bca5fc 100644
--- a/test/test_multistage.jl
+++ b/test/test_multistage.jl
@@ -5,38 +5,31 @@ using Test
include(joinpath(@__DIR__, "utilities.jl"))
obj_true = [79734.80032, 41630.03494, 27855.20631]
-test_path = joinpath(@__DIR__, "multi_stage");
+test_path = joinpath(@__DIR__, "multi_stage")
# Define test inputs
-multistage_setup = Dict(
- "NumStages" => 3,
+multistage_setup = Dict("NumStages" => 3,
"StageLengths" => [10, 10, 10],
"WACC" => 0.045,
"ConvergenceTolerance" => 0.01,
- "Myopic" => 0,
-)
+ "Myopic" => 0)
-genx_setup = Dict(
- "Trans_Loss_Segments" => 1,
+genx_setup = Dict("Trans_Loss_Segments" => 1,
"OperationalReserves" => 1,
"CO2Cap" => 2,
"StorageLosses" => 1,
"ParameterScale" => 1,
"UCommit" => 2,
"MultiStage" => 1,
- "MultiStageSettingsDict" => multistage_setup,
-)
+ "MultiStageSettingsDict" => multistage_setup)
# Run the case and get the objective value and tolerance
EP, _, _ = redirect_stdout(devnull) do
run_genx_case_testing(test_path, genx_setup)
end
-obj_test = objective_value.(EP[i] for i = 1:multistage_setup["NumStages"])
-optimal_tol_rel =
- get_attribute.(
- (EP[i] for i = 1:multistage_setup["NumStages"]),
- "ipm_optimality_tolerance",
- )
+obj_test = objective_value.(EP[i] for i in 1:multistage_setup["NumStages"])
+optimal_tol_rel = get_attribute.((EP[i] for i in 1:multistage_setup["NumStages"]),
+ "ipm_optimality_tolerance")
optimal_tol = optimal_tol_rel .* obj_test # Convert to absolute tolerance
# Test the objective value
@@ -47,17 +40,18 @@ obj_test = round_from_tol!.(obj_test, optimal_tol)
optimal_tol = round_from_tol!.(optimal_tol, optimal_tol)
write_testlog(test_path, obj_test, optimal_tol, test_result)
-function test_new_build(EP::Dict,inputs::Dict)
+function test_new_build(EP::Dict, inputs::Dict)
### Test that the resource with New_Build = 0 did not expand capacity
- a = true;
+ a = true
for t in keys(EP)
- if t==1
- a = value(EP[t][:eTotalCap][1]) <= GenX.existing_cap_mw(inputs[1]["RESOURCES"][1])[1]
+ if t == 1
+ a = value(EP[t][:eTotalCap][1]) <=
+ GenX.existing_cap_mw(inputs[1]["RESOURCES"][1])[1]
else
- a = value(EP[t][:eTotalCap][1]) <= value(EP[t-1][:eTotalCap][1])
+ a = value(EP[t][:eTotalCap][1]) <= value(EP[t - 1][:eTotalCap][1])
end
- if a==false
+ if a == false
break
end
end
@@ -65,17 +59,18 @@ function test_new_build(EP::Dict,inputs::Dict)
return a
end
-function test_can_retire(EP::Dict,inputs::Dict)
+function test_can_retire(EP::Dict, inputs::Dict)
### Test that the resource with Can_Retire = 0 did not retire capacity
- a = true;
-
+ a = true
+
for t in keys(EP)
- if t==1
- a = value(EP[t][:eTotalCap][1]) >= GenX.existing_cap_mw(inputs[1]["RESOURCES"][1])[1]
+ if t == 1
+ a = value(EP[t][:eTotalCap][1]) >=
+ GenX.existing_cap_mw(inputs[1]["RESOURCES"][1])[1]
else
- a = value(EP[t][:eTotalCap][1]) >= value(EP[t-1][:eTotalCap][1])
+ a = value(EP[t][:eTotalCap][1]) >= value(EP[t - 1][:eTotalCap][1])
end
- if a==false
+ if a == false
break
end
end
@@ -83,49 +78,59 @@ function test_can_retire(EP::Dict,inputs::Dict)
return a
end
-test_path_new_build = joinpath(test_path, "new_build");
+test_path_new_build = joinpath(test_path, "new_build")
EP, inputs, _ = redirect_stdout(devnull) do
- run_genx_case_testing(test_path_new_build, genx_setup);
+ run_genx_case_testing(test_path_new_build, genx_setup)
end
-new_build_test_result = @test test_new_build(EP,inputs)
-write_testlog(test_path,"Testing that the resource with New_Build = 0 did not expand capacity",new_build_test_result)
+new_build_test_result = @test test_new_build(EP, inputs)
+write_testlog(test_path,
+ "Testing that the resource with New_Build = 0 did not expand capacity",
+ new_build_test_result)
-test_path_can_retire = joinpath(test_path, "can_retire");
+test_path_can_retire = joinpath(test_path, "can_retire")
EP, inputs, _ = redirect_stdout(devnull) do
- run_genx_case_testing(test_path_can_retire, genx_setup);
+ run_genx_case_testing(test_path_can_retire, genx_setup)
end
-can_retire_test_result = @test test_can_retire(EP,inputs)
-write_testlog(test_path,"Testing that the resource with Can_Retire = 0 did not expand capacity",can_retire_test_result)
-
+can_retire_test_result = @test test_can_retire(EP, inputs)
+write_testlog(test_path,
+ "Testing that the resource with Can_Retire = 0 did not expand capacity",
+ can_retire_test_result)
function test_update_cumulative_min_ret!()
# Merge the genx_setup with the default settings
settings = GenX.default_settings()
- for ParameterScale ∈ [0,1]
+ for ParameterScale in [0, 1]
genx_setup["ParameterScale"] = ParameterScale
merge!(settings, genx_setup)
inputs_dict = Dict()
true_min_retirements = Dict()
-
+
scale_factor = settings["ParameterScale"] == 1 ? GenX.ModelScalingFactor : 1.0
redirect_stdout(devnull) do
warnerror_logger = ConsoleLogger(stderr, Logging.Warn)
with_logger(warnerror_logger) do
for t in 1:3
- inpath_sub = joinpath(test_path, "cum_min_ret", string("inputs_p",t))
-
- true_min_retirements[t] = CSV.read(joinpath(inpath_sub, "resources", "Resource_multistage_data.csv"), DataFrame)
- rename!(true_min_retirements[t], lowercase.(names(true_min_retirements[t])))
+ inpath_sub = joinpath(test_path, "cum_min_ret", string("inputs_p", t))
+
+ true_min_retirements[t] = CSV.read(joinpath(inpath_sub,
+ "resources",
+ "Resource_multistage_data.csv"),
+ DataFrame)
+ rename!(true_min_retirements[t],
+ lowercase.(names(true_min_retirements[t])))
GenX.scale_multistage_data!(true_min_retirements[t], scale_factor)
inputs_dict[t] = Dict()
inputs_dict[t]["Z"] = 1
GenX.load_demand_data!(settings, inpath_sub, inputs_dict[t])
- GenX.load_resources_data!(inputs_dict[t], settings, inpath_sub, joinpath(inpath_sub, settings["ResourcesFolder"]))
- compute_cumulative_min_retirements!(inputs_dict,t)
+ GenX.load_resources_data!(inputs_dict[t],
+ settings,
+ inpath_sub,
+ joinpath(inpath_sub, settings["ResourcesFolder"]))
+ compute_cumulative_min_retirements!(inputs_dict, t)
end
end
end
@@ -133,27 +138,47 @@ function test_update_cumulative_min_ret!()
for t in 1:3
# Test that the cumulative min retirements are updated correctly
gen = inputs_dict[t]["RESOURCES"]
- @test GenX.min_retired_cap_mw.(gen) == true_min_retirements[t].min_retired_cap_mw
- @test GenX.min_retired_energy_cap_mw.(gen) == true_min_retirements[t].min_retired_energy_cap_mw
- @test GenX.min_retired_charge_cap_mw.(gen) == true_min_retirements[t].min_retired_charge_cap_mw
- @test GenX.min_retired_cap_inverter_mw.(gen) == true_min_retirements[t].min_retired_cap_inverter_mw
- @test GenX.min_retired_cap_solar_mw.(gen) == true_min_retirements[t].min_retired_cap_solar_mw
- @test GenX.min_retired_cap_wind_mw.(gen) == true_min_retirements[t].min_retired_cap_wind_mw
- @test GenX.min_retired_cap_discharge_dc_mw.(gen) == true_min_retirements[t].min_retired_cap_discharge_dc_mw
- @test GenX.min_retired_cap_charge_dc_mw.(gen) == true_min_retirements[t].min_retired_cap_charge_dc_mw
- @test GenX.min_retired_cap_discharge_ac_mw.(gen) == true_min_retirements[t].min_retired_cap_discharge_ac_mw
- @test GenX.min_retired_cap_charge_ac_mw.(gen) == true_min_retirements[t].min_retired_cap_charge_ac_mw
-
- @test GenX.cum_min_retired_cap_mw.(gen) == sum(true_min_retirements[i].min_retired_cap_mw for i in 1:t)
- @test GenX.cum_min_retired_energy_cap_mw.(gen) == sum(true_min_retirements[i].min_retired_energy_cap_mw for i in 1:t)
- @test GenX.cum_min_retired_charge_cap_mw.(gen) == sum(true_min_retirements[i].min_retired_charge_cap_mw for i in 1:t)
- @test GenX.cum_min_retired_cap_inverter_mw.(gen) == sum(true_min_retirements[i].min_retired_cap_inverter_mw for i in 1:t)
- @test GenX.cum_min_retired_cap_solar_mw.(gen) == sum(true_min_retirements[i].min_retired_cap_solar_mw for i in 1:t)
- @test GenX.cum_min_retired_cap_wind_mw.(gen) == sum(true_min_retirements[i].min_retired_cap_wind_mw for i in 1:t)
- @test GenX.cum_min_retired_cap_discharge_dc_mw.(gen) == sum(true_min_retirements[i].min_retired_cap_discharge_dc_mw for i in 1:t)
- @test GenX.cum_min_retired_cap_charge_dc_mw.(gen) == sum(true_min_retirements[i].min_retired_cap_charge_dc_mw for i in 1:t)
- @test GenX.cum_min_retired_cap_discharge_ac_mw.(gen) == sum(true_min_retirements[i].min_retired_cap_discharge_ac_mw for i in 1:t)
- @test GenX.cum_min_retired_cap_charge_ac_mw.(gen) == sum(true_min_retirements[i].min_retired_cap_charge_ac_mw for i in 1:t)
+ @test GenX.min_retired_cap_mw.(gen) ==
+ true_min_retirements[t].min_retired_cap_mw
+ @test GenX.min_retired_energy_cap_mw.(gen) ==
+ true_min_retirements[t].min_retired_energy_cap_mw
+ @test GenX.min_retired_charge_cap_mw.(gen) ==
+ true_min_retirements[t].min_retired_charge_cap_mw
+ @test GenX.min_retired_cap_inverter_mw.(gen) ==
+ true_min_retirements[t].min_retired_cap_inverter_mw
+ @test GenX.min_retired_cap_solar_mw.(gen) ==
+ true_min_retirements[t].min_retired_cap_solar_mw
+ @test GenX.min_retired_cap_wind_mw.(gen) ==
+ true_min_retirements[t].min_retired_cap_wind_mw
+ @test GenX.min_retired_cap_discharge_dc_mw.(gen) ==
+ true_min_retirements[t].min_retired_cap_discharge_dc_mw
+ @test GenX.min_retired_cap_charge_dc_mw.(gen) ==
+ true_min_retirements[t].min_retired_cap_charge_dc_mw
+ @test GenX.min_retired_cap_discharge_ac_mw.(gen) ==
+ true_min_retirements[t].min_retired_cap_discharge_ac_mw
+ @test GenX.min_retired_cap_charge_ac_mw.(gen) ==
+ true_min_retirements[t].min_retired_cap_charge_ac_mw
+
+ @test GenX.cum_min_retired_cap_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_cap_mw for i in 1:t)
+ @test GenX.cum_min_retired_energy_cap_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_energy_cap_mw for i in 1:t)
+ @test GenX.cum_min_retired_charge_cap_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_charge_cap_mw for i in 1:t)
+ @test GenX.cum_min_retired_cap_inverter_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_cap_inverter_mw for i in 1:t)
+ @test GenX.cum_min_retired_cap_solar_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_cap_solar_mw for i in 1:t)
+ @test GenX.cum_min_retired_cap_wind_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_cap_wind_mw for i in 1:t)
+ @test GenX.cum_min_retired_cap_discharge_dc_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_cap_discharge_dc_mw for i in 1:t)
+ @test GenX.cum_min_retired_cap_charge_dc_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_cap_charge_dc_mw for i in 1:t)
+ @test GenX.cum_min_retired_cap_discharge_ac_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_cap_discharge_ac_mw for i in 1:t)
+ @test GenX.cum_min_retired_cap_charge_ac_mw.(gen) ==
+ sum(true_min_retirements[i].min_retired_cap_charge_ac_mw for i in 1:t)
end
end
end
diff --git a/test/test_piecewisefuel.jl b/test/test_piecewisefuel.jl
index a9630ce320..db52aaf0da 100644
--- a/test/test_piecewisefuel.jl
+++ b/test/test_piecewisefuel.jl
@@ -7,11 +7,9 @@ obj_true = 2341.82308
test_path = "piecewise_fuel"
# Define test inputs
-genx_setup = Dict(
- "UCommit" => 2,
+genx_setup = Dict("UCommit" => 2,
"CO2Cap" => 1,
- "ParameterScale" => 1,
-)
+ "ParameterScale" => 1)
# Run the case and get the objective value and tolerance
EP, _, _ = redirect_stdout(devnull) do
@@ -22,7 +20,7 @@ optimal_tol_rel = get_attribute(EP, "dual_feasibility_tolerance")
optimal_tol = optimal_tol_rel * obj_test # Convert to absolute tolerance
# Test the objective value
-test_result = @test obj_test ≈ obj_true atol = optimal_tol
+test_result = @test obj_test≈obj_true atol=optimal_tol
# Round objective value and tolerance. Write to test log.
obj_test = round_from_tol!(obj_test, optimal_tol)
diff --git a/test/test_retrofit.jl b/test/test_retrofit.jl
index 20ce1c2ea0..54ae82ad5a 100644
--- a/test/test_retrofit.jl
+++ b/test/test_retrofit.jl
@@ -8,8 +8,7 @@ obj_true = 3179.6244
test_path = "retrofit"
# Define test inputs
-genx_setup = Dict(
- "CO2Cap" => 2,
+genx_setup = Dict("CO2Cap" => 2,
"StorageLosses" => 1,
"MinCapReq" => 1,
"MaxCapReq" => 1,
@@ -17,8 +16,7 @@ genx_setup = Dict(
"UCommit" => 2,
"EnergyShareRequirement" => 1,
"CapacityReserveMargin" => 1,
- "MultiStage" => 0,
-)
+ "MultiStage" => 0)
# Run the case and get the objective value and tolerance
EP, inputs, _ = redirect_stdout(devnull) do
@@ -29,7 +27,7 @@ optimal_tol_rel = get_attribute(EP, "ipm_optimality_tolerance")
optimal_tol = optimal_tol_rel * obj_test # Convert to absolute tolerance
# Test the objective value
-test_result = @test obj_test ≈ obj_true atol = optimal_tol
+test_result = @test obj_test≈obj_true atol=optimal_tol
# Round objective value and tolerance. Write to test log.
obj_test = round_from_tol!(obj_test, optimal_tol)
diff --git a/test/test_threezones.jl b/test/test_threezones.jl
index c533da8770..5d608e0f96 100644
--- a/test/test_threezones.jl
+++ b/test/test_threezones.jl
@@ -8,15 +8,13 @@ obj_true = 6960.20855
test_path = "three_zones"
# Define test inputs
-genx_setup = Dict(
- "NetworkExpansion" => 1,
+genx_setup = Dict("NetworkExpansion" => 1,
"Trans_Loss_Segments" => 1,
"CO2Cap" => 2,
"StorageLosses" => 1,
"MinCapReq" => 1,
"ParameterScale" => 1,
- "UCommit" => 2,
-)
+ "UCommit" => 2)
# Run the case and get the objective value and tolerance
EP, inputs, _ = redirect_stdout(devnull) do
@@ -27,7 +25,7 @@ optimal_tol_rel = get_attribute(EP, "ipm_optimality_tolerance")
optimal_tol = optimal_tol_rel * obj_test # Convert to absolute tolerance
# Test the objective value
-test_result = @test obj_test ≈ obj_true atol = optimal_tol
+test_result = @test obj_test≈obj_true atol=optimal_tol
# Round objective value and tolerance. Write to test log.
obj_test = round_from_tol!(obj_test, optimal_tol)
diff --git a/test/test_time_domain_reduction.jl b/test/test_time_domain_reduction.jl
index 90dedfc17f..7a70df7425 100644
--- a/test/test_time_domain_reduction.jl
+++ b/test/test_time_domain_reduction.jl
@@ -1,6 +1,5 @@
module TestTDR
-
import GenX
import Test
import JLD2, Clustering
@@ -17,7 +16,7 @@ TDR_Results_test = joinpath(test_folder, "TDR_results_test")
# Folder with true clustering results for LTS and non-LTS versions
TDR_Results_true = if VERSION == v"1.6.7"
joinpath(test_folder, "TDR_results_true_LTS")
-else
+else
joinpath(test_folder, "TDR_results_true")
end
@@ -27,23 +26,21 @@ if isdir(TDR_Results_test)
end
# Inputs for cluster_inputs function
-genx_setup = Dict(
- "TimeDomainReduction" => 1,
+genx_setup = Dict("TimeDomainReduction" => 1,
"TimeDomainReductionFolder" => "TDR_results_test",
"UCommit" => 2,
"CapacityReserveMargin" => 1,
"MinCapReq" => 1,
"MaxCapReq" => 1,
"EnergyShareRequirement" => 1,
- "CO2Cap" => 2,
-)
+ "CO2Cap" => 2)
settings = GenX.default_settings()
merge!(settings, genx_setup)
clustering_test = with_logger(ConsoleLogger(stderr, Logging.Warn)) do
GenX.cluster_inputs(test_folder, settings_path, settings, random = false)["ClusterObject"]
-end
+end
# Load true clustering
clustering_true = JLD2.load(joinpath(TDR_Results_true, "clusters_true.jld2"))["ClusterObject"]
diff --git a/test/utilities.jl b/test/utilities.jl
index 43417f5462..300be0f613 100644
--- a/test/utilities.jl
+++ b/test/utilities.jl
@@ -4,8 +4,7 @@ using Dates
using CSV, DataFrames
using Logging, LoggingExtras
-
-const TestResult = Union{Test.Result,String}
+const TestResult = Union{Test.Result, String}
# Exception to throw if a csv file is not found
struct CSVFileNotFound <: Exception
@@ -13,11 +12,9 @@ struct CSVFileNotFound <: Exception
end
Base.showerror(io::IO, e::CSVFileNotFound) = print(io, e.filefullpath, " not found")
-function run_genx_case_testing(
- test_path::AbstractString,
+function run_genx_case_testing(test_path::AbstractString,
test_setup::Dict,
- optimizer::Any = HiGHS.Optimizer,
-)
+ optimizer::Any = HiGHS.Optimizer)
# Merge the genx_setup with the default settings
settings = GenX.default_settings()
merge!(settings, test_setup)
@@ -36,11 +33,9 @@ function run_genx_case_testing(
return EP, inputs, OPTIMIZER
end
-function run_genx_case_conflict_testing(
- test_path::AbstractString,
+function run_genx_case_conflict_testing(test_path::AbstractString,
test_setup::Dict,
- optimizer::Any = HiGHS.Optimizer,
-)
+ optimizer::Any = HiGHS.Optimizer)
# Merge the genx_setup with the default settings
settings = GenX.default_settings()
@@ -59,11 +54,9 @@ function run_genx_case_conflict_testing(
return output
end
-function run_genx_case_simple_testing(
- test_path::AbstractString,
+function run_genx_case_simple_testing(test_path::AbstractString,
genx_setup::Dict,
- optimizer::Any,
-)
+ optimizer::Any)
# Run the case
OPTIMIZER = configure_solver(test_path, optimizer)
inputs = load_inputs(genx_setup, test_path)
@@ -72,29 +65,25 @@ function run_genx_case_simple_testing(
return EP, inputs, OPTIMIZER
end
-function run_genx_case_multistage_testing(
- test_path::AbstractString,
+function run_genx_case_multistage_testing(test_path::AbstractString,
genx_setup::Dict,
- optimizer::Any,
-)
+ optimizer::Any)
# Run the case
OPTIMIZER = configure_solver(test_path, optimizer)
model_dict = Dict()
inputs_dict = Dict()
- for t = 1:genx_setup["MultiStageSettingsDict"]["NumStages"]
+ for t in 1:genx_setup["MultiStageSettingsDict"]["NumStages"]
# Step 0) Set Model Year
genx_setup["MultiStageSettingsDict"]["CurStage"] = t
# Step 1) Load Inputs
inpath_sub = joinpath(test_path, string("inputs_p", t))
inputs_dict[t] = load_inputs(genx_setup, inpath_sub)
- inputs_dict[t] = configure_multi_stage_inputs(
- inputs_dict[t],
+ inputs_dict[t] = configure_multi_stage_inputs(inputs_dict[t],
genx_setup["MultiStageSettingsDict"],
- genx_setup["NetworkExpansion"],
- )
+ genx_setup["NetworkExpansion"])
compute_cumulative_min_retirements!(inputs_dict, t)
@@ -105,16 +94,13 @@ function run_genx_case_multistage_testing(
return model_dict, inputs_dict, OPTIMIZER
end
-
-function write_testlog(
- test_path::AbstractString,
+function write_testlog(test_path::AbstractString,
message::AbstractString,
- test_result::TestResult,
-)
+ test_result::TestResult)
# Save the results to a log file
# Format: datetime, message, test result
- Log_path = joinpath(@__DIR__,"Logs")
+ Log_path = joinpath(@__DIR__, "Logs")
if !isdir(Log_path)
mkdir(Log_path)
end
@@ -132,24 +118,20 @@ function write_testlog(
end
end
-function write_testlog(
- test_path::AbstractString,
+function write_testlog(test_path::AbstractString,
obj_test::Real,
optimal_tol::Real,
- test_result::TestResult,
-)
+ test_result::TestResult)
# Save the results to a log file
# Format: datetime, objective value ± tolerance, test result
message = "$obj_test ± $optimal_tol"
write_testlog(test_path, message, test_result)
end
-function write_testlog(
- test_path::AbstractString,
+function write_testlog(test_path::AbstractString,
obj_test::Vector{<:Real},
optimal_tol::Vector{<:Real},
- test_result::TestResult,
-)
+ test_result::TestResult)
# Save the results to a log file
# Format: datetime, [objective value ± tolerance], test result
@assert length(obj_test) == length(optimal_tol)
@@ -227,13 +209,15 @@ Compare two columns of a DataFrame. Return true if they are identical or approxi
function isapprox_col(col1, col2)
if isequal(col1, col2) || (eltype(col1) <: Float64 && isapprox(col1, col2))
return true
- elseif eltype(col1) <: AbstractString
+ elseif eltype(col1) <: AbstractString
isapprox_col = true
for i in eachindex(col1)
- if !isapprox_col
+ if !isapprox_col
break
- elseif !isnothing(tryparse(Float64, col1[i])) && !isnothing(tryparse(Float64, col2[i]))
- isapprox_col = isapprox_col && isapprox(parse(Float64, col1[i]), parse(Float64, col2[i]))
+ elseif !isnothing(tryparse(Float64, col1[i])) &&
+ !isnothing(tryparse(Float64, col2[i]))
+ isapprox_col = isapprox_col &&
+ isapprox(parse(Float64, col1[i]), parse(Float64, col2[i]))
else
isapprox_col = isapprox_col && isequal(col1[i], col2[i])
end
@@ -243,7 +227,6 @@ function isapprox_col(col1, col2)
return false
end
-
macro warn_error_logger(block)
quote
result = nothing
@@ -256,4 +239,4 @@ macro warn_error_logger(block)
end
result
end
-end
\ No newline at end of file
+end