Skip to content

Commit

Permalink
Add unittests to operations/__init__ validations
Browse files Browse the repository at this point in the history
Also fix errors/typos in operations/__init__ that came up while testing
unittests
  • Loading branch information
gerritdm committed Aug 14, 2019
1 parent 56dfb73 commit 22db762
Show file tree
Hide file tree
Showing 2 changed files with 180 additions and 7 deletions.
14 changes: 8 additions & 6 deletions gridpath/project/operations/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -340,11 +340,11 @@ def get_inputs_from_database(subscenarios, subproblem, stage, conn):
c2 = conn.cursor()
heat_rates = c2.execute(
"""
SELECT project, operational_type, fuel, heat_rate_curves_scenario_id,
SELECT project, fuel, heat_rate_curves_scenario_id,
load_point_mw, average_heat_rate_mmbtu_per_mwh
FROM inputs_project_portfolios
INNER JOIN
(SELECT project, operational_type, fuel, heat_rate_curves_scenario_id
(SELECT project, fuel, heat_rate_curves_scenario_id
FROM inputs_project_operational_chars
WHERE project_operational_chars_scenario_id = {}) AS op_char
USING(project)
Expand Down Expand Up @@ -470,6 +470,7 @@ def validate_inputs(subscenarios, subproblem, stage, conn):
)

# Check that specified hr scenarios actually have inputs in the hr table
# and check that specified heat rate curves inputs are valid:
validation_errors = validate_heat_rate_curves(hr_df)
for error in validation_errors:
validation_results.append(
Expand All @@ -488,19 +489,19 @@ def validate_inputs(subscenarios, subproblem, stage, conn):

def validate_availability(av_df):
"""
Check 0 < availability <= 1
Check 0 <= availability <= 1
:param av_df:
:return:
"""
results = []

invalids = ((av_df["availability"] <= 0) |
invalids = ((av_df["availability"] < 0) |
(av_df["availability"] > 1))
if invalids.any():
bad_projects = av_df["project"][invalids].values
print_bad_projects = ", ".join(bad_projects)
results.append(
"Project(s) '{}': expected 0 < availability <= 1"
"Project(s) '{}': expected 0 <= availability <= 1"
.format(print_bad_projects)
)

Expand All @@ -511,14 +512,15 @@ def validate_heat_rates_sign(hr_df, columns):
"""
Check heat rate columns > 0
:param hr_df:
:param columns:
:return:
"""
results = []

for column in columns:
invalids = (hr_df[column] <= 0)
if invalids.any():
bad_projects = hr_df["project"][invalids].values
bad_projects = pd.unique(hr_df["project"][invalids].values)
print_bad_projects = ", ".join(bad_projects)
results.append(
"Project(s) '{}': Expected '{}' > 0"
Expand Down
173 changes: 172 additions & 1 deletion tests/project/operations/test_init.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,16 @@
import sys
import unittest
import numpy as np
import pandas as pd

from tests.common_functions import create_abstract_model, \
add_components_and_load_data
from tests.project.operations.common_functions import \
get_project_operational_timepoints
from gridpath.project.operations.__init__ import \
calculate_heat_rate_slope_intercept
calculate_heat_rate_slope_intercept, validate_availability, \
validate_heat_rates_sign, validate_fuel_vs_heat_rates, \
validate_heat_rate_curves


TEST_DATA_DIRECTORY = \
Expand Down Expand Up @@ -495,6 +498,174 @@ def test_calculate_heat_rate_slope_intercept(self):
self.assertDictEqual(expected_slopes, actual_slopes)
self.assertDictEqual(expected_intercepts, actual_intercepts)

def test_availability_validations(self):
test_cases = {
# Make sure correct inputs don't throw error
1: {"av_df": pd.DataFrame(
columns=["project", "horizon", "availability"],
data=[["gas_ct", 201801, 1],
["gas_ct", 201802, 0.9],
["coal_plant", 201801, 0]
]),
"error": []
},
# Negative availabilities are flagged
2: {"av_df": pd.DataFrame(
columns=["project", "horizon", "availability"],
data=[["gas_ct", 201801, -1],
["gas_ct", 201802, 0.9],
["coal_plant", 201801, 0]
]),
"error": ["Project(s) 'gas_ct': expected 0 <= availability <= 1"]
},
# Availabilities > 1 are flagged
3: {"av_df": pd.DataFrame(
columns=["project", "horizon", "availability"],
data=[["gas_ct", 201801, 1],
["gas_ct", 201802, 0.9],
["coal_plant", 201801, -0.5]
]),
"error": ["Project(s) 'coal_plant': expected 0 <= availability <= 1"]
},
# Make sure multiple errors are flagged correctly
4: {"av_df": pd.DataFrame(
columns=["project", "horizon", "availability"],
data=[["gas_ct", 201801, 1.5],
["gas_ct", 201802, 0.9],
["coal_plant", 201801, -0.5]
]),
"error": ["Project(s) 'gas_ct, coal_plant': expected 0 <= availability <= 1"]
},
}

for test_case in test_cases.keys():
expected_list = test_cases[test_case]["error"]
actual_list = validate_availability(
av_df=test_cases[test_case]["av_df"],
)
self.assertListEqual(expected_list, actual_list)

def test_heat_rate_validations(self):
test_cases = {
# Make sure correct inputs don't throw error
1: {"hr_df": pd.DataFrame(
columns=["project", "fuel", "heat_rate_curves_scenario_id",
"load_point_mw",
"average_heat_rate_mmbtu_per_mwh"],
data=[["gas_ct", "gas", 1, 10, 10.5],
["gas_ct", "gas", 1, 20, 9],
["coal_plant", "coal", 1, 100, 10]
]),
"sub_hr_df": pd.DataFrame(
columns=["project", "load_point_mw",
"average_heat_rate_mmbtu_per_mwh"],
data=[["gas_ct", 10, 10.5],
["gas_ct", 20, 9],
["coal_plant", 100, 10]
]),
"columns": ["load_point_mw", "average_heat_rate_mmbtu_per_mwh"],
"hr_sign_error": [],
"fuel_vs_hr_error": [],
"hr_curves_error": []
},
# Sign errors are flagged; Errors are grouped by column. If >1 error
# in different columns, a separate error msgs will be created
2: {"hr_df": pd.DataFrame(
columns=["project", "fuel", "heat_rate_curves_scenario_id",
"load_point_mw",
"average_heat_rate_mmbtu_per_mwh"],
data=[["gas_ct", "gas", 1, 10, -10.5],
["gas_ct", "gas", 1, -20, 9],
["coal_plant", "coal", 1, -100, 10]
]),
"sub_hr_df": pd.DataFrame(
columns=["project", "load_point_mw",
"average_heat_rate_mmbtu_per_mwh"],
data=[["gas_ct", 10, -10.5],
["gas_ct", -20, 9],
["coal_plant", -100, 10]
]),
"columns": ["load_point_mw", "average_heat_rate_mmbtu_per_mwh"],
"hr_sign_error": ["Project(s) 'gas_ct, coal_plant': Expected 'load_point_mw' > 0",
"Project(s) 'gas_ct': Expected 'average_heat_rate_mmbtu_per_mwh' > 0"],
"fuel_vs_hr_error": [],
"hr_curves_error": []
},
# Check fuel vs heat rate curve errors
3: {"hr_df": pd.DataFrame(
columns=["project", "fuel", "heat_rate_curves_scenario_id",
"load_point_mw",
"average_heat_rate_mmbtu_per_mwh"],
data=[["gas_ct", "gas", None, None, None],
["coal_plant", None, 1, 100, 10]
]),
"sub_hr_df": pd.DataFrame(
columns=["project", "load_point_mw",
"average_heat_rate_mmbtu_per_mwh"],
data=[["coal_plant", 100, 10]
]),
"columns": ["load_point_mw", "average_heat_rate_mmbtu_per_mwh"],
"hr_sign_error": [],
"fuel_vs_hr_error": ["Project(s) 'gas_ct': Missing heat_rate_curves_scenario_id",
"Project(s) 'coal_plant': No fuel specified so no heat rate expected"],
"hr_curves_error": []
},
# Check heat rate curves validations
4: {"hr_df": pd.DataFrame(
columns=["project", "fuel", "heat_rate_curves_scenario_id",
"load_point_mw",
"average_heat_rate_mmbtu_per_mwh"],
data=[["gas_ct1", "gas", 1, None, None],
["gas_ct2", "gas", 1, 10, 11],
["gas_ct2", "gas", 1, 10, 12],
["gas_ct3", "gas", 1, 10, 11],
["gas_ct3", "gas", 1, 20, 5],
["gas_ct4", "gas", 1, 10, 11],
["gas_ct4", "gas", 1, 20, 10],
["gas_ct4", "gas", 1, 30, 9]
]),
"sub_hr_df": pd.DataFrame(
columns=["project", "load_point_mw",
"average_heat_rate_mmbtu_per_mwh"],
data=[["gas_ct2", 10, 11],
["gas_ct2", 10, 12],
["gas_ct3", 10, 11],
["gas_ct3", 20, 5],
["gas_ct4", 10, 11],
["gas_ct4", 20, 10],
["gas_ct4", 30, 9]
]),
"columns": ["load_point_mw", "average_heat_rate_mmbtu_per_mwh"],
"hr_sign_error": [],
"fuel_vs_hr_error": [],
"hr_curves_error": ["Project(s) 'gas_ct1': Expected at least one load point",
"Project(s) 'gas_ct2': load points can not be identical",
"Project(s) 'gas_ct3': Total fuel burn should increase with increasing load",
"Project(s) 'gas_ct4': Fuel burn should be convex, i.e. marginal heat rate should increase with increading load"]
},

}

for test_case in test_cases.keys():
expected_list = test_cases[test_case]["hr_sign_error"]
actual_list = validate_heat_rates_sign(
hr_df=test_cases[test_case]["sub_hr_df"],
columns=test_cases[test_case]["columns"]
)
self.assertListEqual(expected_list, actual_list)

expected_list = test_cases[test_case]["fuel_vs_hr_error"]
actual_list = validate_fuel_vs_heat_rates(
hr_df=test_cases[test_case]["hr_df"]
)
self.assertListEqual(expected_list, actual_list)

expected_list = test_cases[test_case]["hr_curves_error"]
actual_list = validate_heat_rate_curves(
hr_df=test_cases[test_case]["hr_df"]
)
self.assertListEqual(expected_list, actual_list)


if __name__ == "__main__":
unittest.main()

0 comments on commit 22db762

Please sign in to comment.