From 74644c6f117749c6418e6a687720ad5e585f7289 Mon Sep 17 00:00:00 2001 From: marcosmc Date: Wed, 20 Nov 2024 10:28:43 +0100 Subject: [PATCH 01/24] signal processing workflow --- docs/design/Signal_processing_workflow.md | 100 ++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 docs/design/Signal_processing_workflow.md diff --git a/docs/design/Signal_processing_workflow.md b/docs/design/Signal_processing_workflow.md new file mode 100644 index 0000000..b1e6f7a --- /dev/null +++ b/docs/design/Signal_processing_workflow.md @@ -0,0 +1,100 @@ +=========================== + +SIGNAL PROCESSING WORKFLOW + +(c) 2024 RTE +Developed by Grupo AIA + +=========================== + +# Model Validation + +## Get the curves to process + +Read the file containing the **calculated curves**. The curves contained +in this file come from the result of a dynamic simulation performed +with Dynawo in the case where the user has entered the modeling of his +network. If the user has entered 2 sets of curves as inputs to the tool, +this file contains the producer's curves. + +Read the file containing the **reference curves**. + +## First resampling: Ensure constant time step signal. + +This section consists of 3 steps: + +- Convert the EMT signals to RMS, if necessary. Currently this step only +applies to **reference curves**. (The user can use a set of curves as input to +the tool, instead of a dynamic model, in this case should this step be applied +to the **calculated curves**?) + +- Resample the curves using common frequency sampling. + +- Pass the curves through a low-pass filter, using common cutoff values ​​and +frequency sampling. + +## Second resampling: Ensure same time grid for both signals. + +The tool shortens the **calculated and reference curves** to ensure that both +sets of curves have exactly the same time range. + +## Third: Calculate signal windows. + +The pre, during and post windows are calculated (pre and post if the event is +not temporary) taking into account the exclusion ranges for each window, as well +as the maximum length defined in the standards. + +From the sets of curves, the sets of curves are generated for each window +obtained in the previous step. + +## Validation tests + +To run the validation tests, all the sets of curves obtained are used: pre, +during (if it exists), post and the complete curve of each set of the tool +(calculated and reference). + +## Report + +Only the complete curves from the **calculated curve** set and the +**reference curve** set are used to create the final report. + +## Roadmap + +- Provide a time shift option when comparing two curve sets to synchronize the +time at which the **reference curve** set event is triggered with the +**calculated curve** set. + + +# Performance Verifications + +## Get the curves to process + +Read the file containing the **calculated curves**. The curves contained +in this file come from the result of a dynamic simulation performed +with Dynawo in the case where the user has entered the modeling of his +network. If the user has entered a set of curves as input to the tool, +this file contains the producer's curves. + +Read the file containing the **complementary curves**. This second set of curves +will only be shown in the final report together with the calculated curves; +they will not be validated under any circumstances. + +## Validation tests + +To run the validation tests, only the **calculated curves** set are used. + +## Report + +The curves from the **calculated curves** set and the +**complementary curves** set are used to create the final report. + +## Roadmap + +- Ensure that the time step signal is constant across all sets of curves, +if **complementary curves** exist. + +- Ensure same time grid for both signals, if **complementary curves** exist. + +- Provide a time shift option when there are two curve sets to synchronize the +time at which the **complementary curves** set event is triggered with the +**calculated curve** set. From 8b1a1a9b1b824cb1f15da87f0c577650e243755e Mon Sep 17 00:00:00 2001 From: marcosmc Date: Tue, 17 Dec 2024 12:05:16 +0100 Subject: [PATCH 02/24] Using parameters to share curves --- docs/tutorial/general_usage.md | 1 + src/dgcv/core/simulator.py | 5 +++-- src/dgcv/core/validator.py | 1 + src/dgcv/curves/manager.py | 29 ++++++++++++++----------- src/dgcv/curves/reader.py | 4 ++-- src/dgcv/dynawo/dynawo.py | 21 +++--------------- src/dgcv/dynawo/simulator.py | 10 +++++---- src/dgcv/model/benchmark.py | 4 ++++ src/dgcv/model/operating_condition.py | 31 ++++++++++++++++----------- src/dgcv/validation/model.py | 9 +++++--- src/dgcv/validation/performance.py | 9 +++++--- 11 files changed, 68 insertions(+), 56 deletions(-) diff --git a/docs/tutorial/general_usage.md b/docs/tutorial/general_usage.md index 112ff5f..55c4c8b 100755 --- a/docs/tutorial/general_usage.md +++ b/docs/tutorial/general_usage.md @@ -648,6 +648,7 @@ The *operating condition* directory is structured in: (dgcv_venv) user@dynawo:~/work/MyTests/Results$ tree PCS_RTE-I10/Islanding/DeltaP10DeltaQ4 -L 1 PCS_RTE-I10/Islanding/DeltaP10DeltaQ4 ├── curves_calculated.csv +├── curves_reference.csv ├── Omega.dyd ├── Omega.par ├── outputs diff --git a/src/dgcv/core/simulator.py b/src/dgcv/core/simulator.py index 607cfec..05bce70 100644 --- a/src/dgcv/core/simulator.py +++ b/src/dgcv/core/simulator.py @@ -15,6 +15,7 @@ from dgcv.core.validator import Disconnection_Model from dgcv.electrical.generator_variables import generator_variables from dgcv.model.producer import Producer +import pandas as pd def get_cfg_oc_name(pcs_bm_name: str, oc_name: str) -> str: @@ -124,7 +125,7 @@ def obtain_reference_curve( working_oc_dir: Path, pcs_bm_name: str, curves: Path, - ) -> float: + ) -> tuple[float, pd.DataFrame]: """Virtual method""" pass @@ -135,7 +136,7 @@ def obtain_simulated_curve( bm_name: str, oc_name: str, reference_event_start_time: float, - ) -> tuple[str, dict, int, bool, bool]: + ) -> tuple[str, dict, int, bool, bool, pd.DataFrame]: """Virtual method""" pass diff --git a/src/dgcv/core/validator.py b/src/dgcv/core/validator.py index 0baf7cc..abfc323 100644 --- a/src/dgcv/core/validator.py +++ b/src/dgcv/core/validator.py @@ -106,6 +106,7 @@ def validate( sim_output_path: str, event_params: dict, fs: float, + curves: dict, ) -> dict: """Virtual method""" pass diff --git a/src/dgcv/curves/manager.py b/src/dgcv/curves/manager.py index e893864..17dcd98 100644 --- a/src/dgcv/curves/manager.py +++ b/src/dgcv/curves/manager.py @@ -64,22 +64,18 @@ def __obtain_files_curve( if success: importer = CurvesImporter(working_oc_dir, get_cfg_oc_name(pcs_bm_name, oc_name)) ( - df_imported_curve, + df_imported_curves, curves_dict, sim_t_event_end, fs, ) = importer.get_curves_dataframe(self._producer.get_zone()) - if df_imported_curve.empty: + if df_imported_curves.empty: success = False has_imported_curves = False - df_imported_curve = df_imported_curve.set_index("time") - if is_reference: - df_imported_curve.to_csv(working_oc_dir / "curves_reference.csv", sep=";") - else: - df_imported_curve.to_csv(working_oc_dir / "curves_calculated.csv", sep=";") - self._generators = self.__get_generators(df_imported_curve) - self._gens = _get_generators_ini(self._generators, df_imported_curve) + if not is_reference: + self._generators = self.__get_generators(df_imported_curves) + self._gens = _get_generators_ini(self._generators, df_imported_curves) if importer.config.has_option("Curves-Metadata", "is_field_measurements"): self._is_field_measurements = bool( @@ -116,6 +112,7 @@ def __obtain_files_curve( fault_duration = 0 fs = 0 self._generators_imax = {} + df_imported_curves = pd.DataFrame() config_section = get_cfg_oc_name(pcs_bm_name, oc_name) + ".Event" connect_event_to = config.get_value(config_section, "connect_event_to") @@ -139,6 +136,7 @@ def __obtain_files_curve( fs, success, has_imported_curves, + df_imported_curves, ) def obtain_reference_curve( @@ -147,7 +145,7 @@ def obtain_reference_curve( pcs_bm_name: str, oc_name: str, curves: Path, - ) -> float: + ) -> tuple[float, pd.DataFrame]: """Read the reference curves. Parameters @@ -165,16 +163,19 @@ def obtain_reference_curve( ------- float Instant of time when the event is triggered + DataFrame + Curves imported from the file """ ( event_params, fs, success, has_imported_curves, + curves, ) = self.__obtain_files_curve( working_oc_dir, pcs_bm_name, oc_name, curves, is_reference=True ) - return event_params["start_time"] + return event_params["start_time"], curves def obtain_simulated_curve( self, @@ -183,7 +184,7 @@ def obtain_simulated_curve( bm_name: str, oc_name: str, reference_event_start_time: float, - ) -> tuple[str, dict, float, bool, bool]: + ) -> tuple[str, dict, float, bool, bool, pd.DataFrame]: """Read the input curves to get the simulated curves. Parameters @@ -213,12 +214,15 @@ def obtain_simulated_curve( True if simulation is success bool True if simulation calculated curves + DataFrame + Simulation calculated curves """ ( event_params, fs, success, has_imported_curves, + curves, ) = self.__obtain_files_curve( working_oc_dir, pcs_bm_name, oc_name, self.get_producer().get_producer_curves() ) @@ -229,6 +233,7 @@ def obtain_simulated_curve( fs, success, has_imported_curves, + curves, ) def get_disconnection_model(self) -> Disconnection_Model: diff --git a/src/dgcv/curves/reader.py b/src/dgcv/curves/reader.py index 08c5a6c..d8ab23a 100644 --- a/src/dgcv/curves/reader.py +++ b/src/dgcv/curves/reader.py @@ -189,12 +189,12 @@ def load(self, remove_file=True): if remove_file: file.unlink() - def read(self, data: pd.Dataframe): + def read(self, data: pd.DataFrame): """Read and import the data from the file. Parameters ---------- - data: Dataframe + data: DataFrame Dataframe with all the curves data """ self._time_values = data[self._time_name] diff --git a/src/dgcv/dynawo/dynawo.py b/src/dgcv/dynawo/dynawo.py index 15b73db..4daf618 100644 --- a/src/dgcv/dynawo/dynawo.py +++ b/src/dgcv/dynawo/dynawo.py @@ -309,9 +309,7 @@ def _get_modulus(complex_list: list) -> list: return np.abs(complex_list).tolist() -def _create_curves( - variable_translations: dict, input_file: Path, output_file: Path, f_nom: float -) -> pd.DataFrame: +def _create_curves(variable_translations: dict, input_file: Path) -> pd.DataFrame: """From the curve file generated by the Dynawo dynamic simulator, a new file is created where the values of the different curves are expressed in the units specified in the file and/or different curves are added to obtain the required curves. @@ -322,10 +320,6 @@ def _create_curves( Dictionary with correspondences between tool variables and Dynawo variables input_file: Path Curve file created by Dynawo - output_file: Path - Curve file for tool analysis - f_nom: float - Nominal frequency Returns ------- @@ -401,11 +395,7 @@ def _create_curves( else: curves_dict[i] = list(df_curves[i]) - curves_final = pd.DataFrame(curves_dict) - curves_final = curves_final.set_index("time") - curves_final.to_csv(output_file, sep=";") - - return curves_final + return pd.DataFrame(curves_dict) def get_dynawo_version( @@ -488,7 +478,6 @@ def run_base_dynawo( launcher_dwo: Path, jobs_filename: str, variable_translations: dict, - f_nom: float, inputs_path: Path, output_path: Path, save_file: bool = True, @@ -503,8 +492,6 @@ def run_base_dynawo( Name of the JOBS file variable_translations: dict Dictionary with correspondences between tool variables and Dynawo variables - f_nom: float - Nominal frequency inputs_path: Path Directory with Dynawo inputs output_path: Path @@ -541,8 +528,6 @@ def run_base_dynawo( curves_calculated = _create_curves( variable_translations, dynawo_output_dir / "curves/curves.csv", - inputs_path / "curves_calculated.csv", - f_nom, ) return success, log, has_error, curves_calculated @@ -585,7 +570,7 @@ def check_voltage_dip( if not is_simulation_success: return -1 - time_values = list(curves.index.values) + time_values = list(curves["time"]) voltage_values = list(curves[bus_pdr_voltage]) if fault_duration > time_values[-1]: diff --git a/src/dgcv/dynawo/simulator.py b/src/dgcv/dynawo/simulator.py index 715d17e..6b929fc 100644 --- a/src/dgcv/dynawo/simulator.py +++ b/src/dgcv/dynawo/simulator.py @@ -517,7 +517,6 @@ def __execute_dynawo( self._launcher_dwo, "TSOModel", self._curves_dict, - self._f_nom, working_oc_dir, jobs_output_dir, ) @@ -625,7 +624,7 @@ def __get_hiz_fault( fault_start, fault_duration, last_fault_xpu, - fault_rpu=last_fault_rpu, + last_fault_rpu, ) def __get_bolted_fault( @@ -681,7 +680,6 @@ def __run_time_cct( self._launcher_dwo, "TSOModel", self._curves_dict, - self._f_nom, working_oc_dir_attempt, jobs_output_dir, save_file=False, @@ -809,7 +807,7 @@ def obtain_simulated_curve( bm_name: str, oc_name: str, reference_event_start_time: float, - ) -> tuple[str, dict, int, bool, bool]: + ) -> tuple[str, dict, int, bool, bool, pd.DataFrame]: """Runs Dynawo to get the simulated curves. Parameters @@ -837,6 +835,8 @@ def obtain_simulated_curve( True if simulation is success bool True if simulation calculated curves + DataFrame + Simulation calculated curves """ # Prepare environment to validate it, @@ -896,6 +896,7 @@ def obtain_simulated_curve( success = False has_dynawo_curves = False event_params = dict() + curves_calculated = pd.DataFrame() self._logger.close_handlers() @@ -905,6 +906,7 @@ def obtain_simulated_curve( 0, success, has_dynawo_curves, + curves_calculated, ) def get_disconnection_model(self) -> Disconnection_Model: diff --git a/src/dgcv/model/benchmark.py b/src/dgcv/model/benchmark.py index 1d58518..48e3693 100644 --- a/src/dgcv/model/benchmark.py +++ b/src/dgcv/model/benchmark.py @@ -473,6 +473,7 @@ def __validate( fs: float, success: bool, has_simulated_curves: bool, + curves: dict, ): op_cond_success, results = op_cond.validate( pcs_benchmark_name, @@ -482,6 +483,7 @@ def __validate( fs, success, has_simulated_curves, + curves, ) # Statuses for the Summary Report @@ -534,6 +536,7 @@ def validate( success, has_simulated_curves, has_curves, + curves, ) = op_cond.has_required_curves(pcs_benchmark_name, self._name) if has_curves == 0: op_cond_success, results, compliance = self.__validate( @@ -545,6 +548,7 @@ def validate( fs, success, has_simulated_curves, + curves, ) # If there is a correct simulation, the report must be created success |= op_cond_success diff --git a/src/dgcv/model/operating_condition.py b/src/dgcv/model/operating_condition.py index b48e18d..afc1ea0 100644 --- a/src/dgcv/model/operating_condition.py +++ b/src/dgcv/model/operating_condition.py @@ -79,9 +79,10 @@ def __obtain_curve( working_oc_dir = self._working_dir / self._pcs_name / bm_name / self._name manage_files.create_dir(working_oc_dir) + curves = dict() reference_event_start_time = None if self.__has_reference_curves(): - reference_event_start_time = self._manager.obtain_reference_curve( + reference_event_start_time, curves["reference"] = self._manager.obtain_reference_curve( working_oc_dir, pcs_bm_name, self._name, self.__get_reference_curves() ) @@ -91,6 +92,7 @@ def __obtain_curve( fs, success, has_simulated_curves, + curves["calculated"], ) = self._simulator.obtain_simulated_curve( working_oc_dir, pcs_bm_name, @@ -106,6 +108,7 @@ def __obtain_curve( fs, success, has_simulated_curves, + curves, ) def __validate( @@ -115,6 +118,7 @@ def __validate( jobs_output_dir: Path, event_params: dict, fs: float, + curves: dict, ) -> dict: if self._validator.is_defined_cct(): @@ -137,6 +141,7 @@ def __validate( jobs_output_dir, event_params, fs, + curves, ) # Operational point without defining its validations @@ -158,6 +163,7 @@ def validate( fs: float, success: bool, has_simulated_curves: bool, + curves: dict, ) -> tuple[bool, dict]: """Validate the Benchmark. @@ -177,6 +183,8 @@ def validate( True if simulation is success has_simulated_curves: bool True if simulation calculated curves + curves: dict + Calculated and reference curves Returns ------- @@ -193,6 +201,7 @@ def validate( jobs_output_dir, event_params, fs, + curves, ) else: results = {"compliance": False, "curves": None} @@ -204,7 +213,7 @@ def has_required_curves( self, pcs_bm_name: str, bm_name: str, - ) -> tuple[Path, Path, dict, float, bool, bool, int]: + ) -> tuple[Path, Path, dict, float, bool, bool, int, dict]: """Check if all curves are present. Parameters @@ -233,6 +242,8 @@ def has_required_curves( 1 producer's curves are missing 2 reference curves are missing 3 all curves are missing + dict + Calculated and reference curves """ dgcv_logging.get_logger("Operating Condition").info( "RUNNING BENCHMARK: " + pcs_bm_name + ", OPER. COND.: " + self._name @@ -245,6 +256,7 @@ def has_required_curves( fs, success, has_simulated_curves, + curves, ) = self.__obtain_curve( pcs_bm_name, bm_name, @@ -257,18 +269,15 @@ def has_required_curves( # handled differently. sim_curves = True if not self._producer.is_dynawo_model(): - if not (working_oc_dir / "curves_calculated.csv").is_file(): + if curves["calculated"].empty: dgcv_logging.get_logger("Operating Condition").warning( "Test without producer curves file" ) sim_curves = False else: - csv_calculated_curves = manage_files.read_curves( - working_oc_dir / "curves_calculated.csv" - ) missed_curves = [] for key in measurement_names: - if key not in csv_calculated_curves: + if key not in curves["calculated"]: missed_curves.append(key) sim_curves = False if not sim_curves: @@ -278,18 +287,15 @@ def has_required_curves( ref_curves = True if self.__has_reference_curves(): - if not (working_oc_dir / "curves_reference.csv").is_file(): + if curves["reference"].empty: dgcv_logging.get_logger("Operating Condition").warning( "Test without reference curves file" ) ref_curves = False else: - csv_reference_curves = manage_files.read_curves( - working_oc_dir / "curves_reference.csv" - ) missed_curves = [] for key in measurement_names: - if key not in csv_reference_curves: + if key not in curves["reference"]: missed_curves.append(key) ref_curves = False if not ref_curves: @@ -315,6 +321,7 @@ def has_required_curves( success, has_simulated_curves, has_curves, + curves, ) def get_name(self) -> str: diff --git a/src/dgcv/validation/model.py b/src/dgcv/validation/model.py index c40d176..69de1b5 100644 --- a/src/dgcv/validation/model.py +++ b/src/dgcv/validation/model.py @@ -1131,6 +1131,7 @@ def validate( sim_output_path: str, event_params: dict, fs: float, + curves: dict, ) -> dict: """Model Validation. @@ -1157,9 +1158,11 @@ def validate( # if reference_curves is None: # reference_curves = calculated_curves - csv_calculated_curves = manage_files.read_curves(working_path / "curves_calculated.csv") - if (working_path / "curves_reference.csv").is_file(): - csv_reference_curves = manage_files.read_curves(working_path / "curves_reference.csv") + csv_calculated_curves = curves["calculated"] + csv_calculated_curves.to_csv(working_path / "curves_calculated.csv", sep=";") + if not curves["reference"].empty: + csv_reference_curves = curves["reference"] + csv_reference_curves.to_csv(working_path / "curves_reference.csv", sep=";") else: csv_reference_curves = None diff --git a/src/dgcv/validation/performance.py b/src/dgcv/validation/performance.py index 34b10d8..9fd455b 100644 --- a/src/dgcv/validation/performance.py +++ b/src/dgcv/validation/performance.py @@ -522,6 +522,7 @@ def validate( sim_output_path: str, event_params: dict, fs: float, + curves: dict, ) -> dict: """Electric Performance Verification. @@ -543,9 +544,11 @@ def validate( dict Compliance results """ - calculated_curves = manage_files.read_curves(working_path / "curves_calculated.csv") - if (working_path / "curves_reference.csv").is_file(): - reference_curves = manage_files.read_curves(working_path / "curves_reference.csv") + calculated_curves = curves["calculated"] + calculated_curves.to_csv(working_path / "curves_calculated.csv", sep=";") + if not curves["reference"].empty: + reference_curves = curves["reference"] + reference_curves.to_csv(working_path / "curves_reference.csv", sep=";") else: reference_curves = None From a72b166417011125618f04fd8df5864f101f039e Mon Sep 17 00:00:00 2001 From: marcosmc Date: Wed, 18 Dec 2024 10:41:47 +0100 Subject: [PATCH 03/24] Remove unused import --- src/dgcv/validation/performance.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/dgcv/validation/performance.py b/src/dgcv/validation/performance.py index 9fd455b..ef61456 100644 --- a/src/dgcv/validation/performance.py +++ b/src/dgcv/validation/performance.py @@ -19,7 +19,6 @@ MODEL_VALIDATION_PPM, ) from dgcv.core.validator import Stability, Validator -from dgcv.files import manage_files from dgcv.logging.logging import dgcv_logging from dgcv.validation import common, compliance_list From ee5f19b6693109e456734a2f8637f2916b4f3121 Mon Sep 17 00:00:00 2001 From: marcosmc Date: Wed, 18 Dec 2024 10:46:38 +0100 Subject: [PATCH 04/24] remove unused import --- src/dgcv/validation/model.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/dgcv/validation/model.py b/src/dgcv/validation/model.py index 69de1b5..c5913ae 100644 --- a/src/dgcv/validation/model.py +++ b/src/dgcv/validation/model.py @@ -17,7 +17,6 @@ from dgcv.configuration.cfg import config from dgcv.core.execution_parameters import Parameters from dgcv.core.validator import Validator -from dgcv.files import manage_files from dgcv.logging.logging import dgcv_logging from dgcv.sigpro import signal_windows, sigpro from dgcv.validation import common, compliance_list, sanity_checks, threshold_variables From ca674b9133c1a4170bc2790d60af65b8cf45626a Mon Sep 17 00:00:00 2001 From: marcosmc Date: Wed, 18 Dec 2024 11:09:09 +0100 Subject: [PATCH 05/24] Fix reference curves --- src/dgcv/validation/performance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dgcv/validation/performance.py b/src/dgcv/validation/performance.py index ef61456..21e4ee0 100644 --- a/src/dgcv/validation/performance.py +++ b/src/dgcv/validation/performance.py @@ -545,7 +545,7 @@ def validate( """ calculated_curves = curves["calculated"] calculated_curves.to_csv(working_path / "curves_calculated.csv", sep=";") - if not curves["reference"].empty: + if "reference" in curves and not curves["reference"].empty: reference_curves = curves["reference"] reference_curves.to_csv(working_path / "curves_reference.csv", sep=";") else: From 357d98c4b05df2b6d6948626febb77443d501003 Mon Sep 17 00:00:00 2001 From: marcosmc Date: Wed, 18 Dec 2024 12:17:06 +0100 Subject: [PATCH 06/24] Test Windows & Mac OS --- .github/workflows/dev-ci.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/dev-ci.yml b/.github/workflows/dev-ci.yml index dea1442..3ae8486 100644 --- a/.github/workflows/dev-ci.yml +++ b/.github/workflows/dev-ci.yml @@ -6,11 +6,13 @@ on: jobs: build: - runs-on: ubuntu-latest + name: Build OS ${{ matrix.os}} + runs-on: ${{ matrix.os}} strategy: fail-fast: false matrix: python-version: ["3.9", "3.10"] + os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v4 From 5136af61ac848808256f33ce987a7e83adc67807 Mon Sep 17 00:00:00 2001 From: marcosmc Date: Wed, 18 Dec 2024 12:40:30 +0100 Subject: [PATCH 07/24] Fix windows CI --- .github/workflows/dev-ci.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/dev-ci.yml b/.github/workflows/dev-ci.yml index 3ae8486..72aea62 100644 --- a/.github/workflows/dev-ci.yml +++ b/.github/workflows/dev-ci.yml @@ -6,7 +6,7 @@ on: jobs: build: - name: Build OS ${{ matrix.os}} + name: Build OS ${{ matrix.os}} with Python ${{ matrix.python-version }} runs-on: ${{ matrix.os}} strategy: fail-fast: false @@ -31,8 +31,13 @@ jobs: run: python -m build --wheel - name: Install wheel + if: matrix.os != 'windows-latest' run: pip install dist/*.whl + - name: Install wheel + if: matrix.os == 'windows-latest' + run: python -m pip install dist/*.whl + # Pending to be added in the near future: # * flake8 src --count --exit-zero --max-complexity=10 - name: Linting with flake8 From 6b0aaa66951f016d8f0cbb904179f486e3b46033 Mon Sep 17 00:00:00 2001 From: marcosmc Date: Wed, 18 Dec 2024 12:43:59 +0100 Subject: [PATCH 08/24] use shell --- .github/workflows/dev-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/dev-ci.yml b/.github/workflows/dev-ci.yml index 72aea62..7b1bd0b 100644 --- a/.github/workflows/dev-ci.yml +++ b/.github/workflows/dev-ci.yml @@ -36,6 +36,7 @@ jobs: - name: Install wheel if: matrix.os == 'windows-latest' + shell: bash run: python -m pip install dist/*.whl # Pending to be added in the near future: From e72557dde37dd32e0565a00c7f3c36c8da56b1ed Mon Sep 17 00:00:00 2001 From: marcosmc Date: Wed, 18 Dec 2024 14:32:35 +0100 Subject: [PATCH 09/24] reduce complexity --- src/dgcv/model/benchmark.py | 46 ++-- src/dgcv/validation/model.py | 31 ++- src/dgcv/validation/performance.py | 336 ++++++++++++++++++----------- 3 files changed, 278 insertions(+), 135 deletions(-) diff --git a/src/dgcv/model/benchmark.py b/src/dgcv/model/benchmark.py index 48e3693..9b8cdd1 100644 --- a/src/dgcv/model/benchmark.py +++ b/src/dgcv/model/benchmark.py @@ -284,20 +284,22 @@ def __initialize_validation_by_benchmark(self) -> list: return validations def __init_figures_description(self, validations: list) -> None: - fig_P = config.get_list("ReportCurves", "fig_P") - fig_Q = config.get_list("ReportCurves", "fig_Q") - fig_Ire = config.get_list("ReportCurves", "fig_Ire") - fig_Iim = config.get_list("ReportCurves", "fig_Iim") - fig_Ustator = config.get_list("ReportCurves", "fig_Ustator") - fig_V = config.get_list("ReportCurves", "fig_V") - fig_W = config.get_list("ReportCurves", "fig_W") - fig_Theta = config.get_list("ReportCurves", "fig_Theta") - fig_WRef = config.get_list("ReportCurves", "fig_WRef") - fig_I = config.get_list("ReportCurves", "fig_I") - fig_Tap = config.get_list("ReportCurves", "fig_Tap") - pcs_benchmark_name = self._pcs_name + CASE_SEPARATOR + self._name self._figures_description = [] + self.__init_figures_v(validations, pcs_benchmark_name) + self.__init_figures_p(validations, pcs_benchmark_name) + self.__init_figures_q(validations, pcs_benchmark_name) + self.__init_figures_ire(validations, pcs_benchmark_name) + self.__init_figures_iim(validations, pcs_benchmark_name) + self.__init_figures_w(validations, pcs_benchmark_name) + self.__init_figures_wref(validations, pcs_benchmark_name) + self.__init_figures_i(validations, pcs_benchmark_name) + self.__init_figures_ustator(validations, pcs_benchmark_name) + self.__init_figures_theta(validations, pcs_benchmark_name) + self.__init_figures_tap(validations, pcs_benchmark_name) + + def __init_figures_v(self, validations: list, pcs_benchmark_name: str) -> None: + fig_V = config.get_list("ReportCurves", "fig_V") if pcs_benchmark_name in fig_V: tests = [] if ( @@ -321,6 +323,8 @@ def __init_figures_description(self, validations: list) -> None: ] ) + def __init_figures_p(self, validations: list, pcs_benchmark_name: str) -> None: + fig_P = config.get_list("ReportCurves", "fig_P") if pcs_benchmark_name in fig_P: tests = [] if "time_5P" in validations: @@ -334,22 +338,30 @@ def __init_figures_description(self, validations: list) -> None: self._figures_description.append(["fig_P", "BusPDR_BUS_ActivePower", tests, "P(pu)"]) + def __init_figures_q(self, validations: list, pcs_benchmark_name: str) -> None: + fig_Q = config.get_list("ReportCurves", "fig_Q") if pcs_benchmark_name in fig_Q: tests = [] self._figures_description.append(["fig_Q", "BusPDR_BUS_ReactivePower", tests, "Q(pu)"]) + def __init_figures_ire(self, validations: list, pcs_benchmark_name: str) -> None: + fig_Ire = config.get_list("ReportCurves", "fig_Ire") if pcs_benchmark_name in fig_Ire: tests = [] self._figures_description.append( ["fig_Ire", "BusPDR_BUS_ActiveCurrent", tests, "Ire(pu)"] ) + def __init_figures_iim(self, validations: list, pcs_benchmark_name: str) -> None: + fig_Iim = config.get_list("ReportCurves", "fig_Iim") if pcs_benchmark_name in fig_Iim: tests = [] self._figures_description.append( ["fig_Iim", "BusPDR_BUS_ReactiveCurrent", tests, "Iim(pu)"] ) + def __init_figures_w(self, validations: list, pcs_benchmark_name: str) -> None: + fig_W = config.get_list("ReportCurves", "fig_W") if pcs_benchmark_name in fig_W: tests = [] self._figures_description.append( @@ -366,6 +378,8 @@ def __init_figures_description(self, validations: list) -> None: ] ) + def __init_figures_wref(self, validations: list, pcs_benchmark_name: str) -> None: + fig_WRef = config.get_list("ReportCurves", "fig_WRef") if pcs_benchmark_name in fig_WRef: tests = [] if "freq_1" in validations: @@ -388,6 +402,8 @@ def __init_figures_description(self, validations: list) -> None: ] ) + def __init_figures_i(self, validations: list, pcs_benchmark_name: str) -> None: + fig_I = config.get_list("ReportCurves", "fig_I") if pcs_benchmark_name in fig_I: tests = [] self._figures_description.append( @@ -408,6 +424,8 @@ def __init_figures_description(self, validations: list) -> None: ] ) + def __init_figures_ustator(self, validations: list, pcs_benchmark_name: str) -> None: + fig_Ustator = config.get_list("ReportCurves", "fig_Ustator") if pcs_benchmark_name in fig_Ustator: tests = [] if "AVR_5" in validations: @@ -431,6 +449,8 @@ def __init_figures_description(self, validations: list) -> None: ] ) + def __init_figures_theta(self, validations: list, pcs_benchmark_name: str) -> None: + fig_Theta = config.get_list("ReportCurves", "fig_Theta") if pcs_benchmark_name in fig_Theta: tests = [] self._figures_description.append( @@ -447,6 +467,8 @@ def __init_figures_description(self, validations: list) -> None: ] ) + def __init_figures_tap(self, validations: list, pcs_benchmark_name: str) -> None: + fig_Tap = config.get_list("ReportCurves", "fig_Tap") if pcs_benchmark_name in fig_Tap: tests = [] self._figures_description.append( diff --git a/src/dgcv/validation/model.py b/src/dgcv/validation/model.py index c5913ae..2820f1b 100644 --- a/src/dgcv/validation/model.py +++ b/src/dgcv/validation/model.py @@ -908,12 +908,11 @@ def __calculate( return results - def __check( + def __create_results( self, compliance_values: dict, - modified_setpoint: str, ) -> dict: - check_results = { + return { "compliance": True, "times_check": True, "sim_t_event_start": compliance_values["t_event_start"], @@ -921,6 +920,11 @@ def __check( "curves_error": compliance_values, } + def __check_times( + self, + check_results: dict, + compliance_values: dict, + ): if compliance_list.contains_key(["reaction_time"], self._validations): check_results["calc_reaction_target"] = compliance_values["calc_reaction_target"] check_results["calc_reaction_time"] = compliance_values["calc_reaction_time"] @@ -1001,6 +1005,11 @@ def __check( else: check_results["overshoot_error"] = "-" + def __check_ramp( + self, + check_results: dict, + compliance_values: dict, + ): if compliance_list.contains_key(["ramp_time_lag"], self._validations): check_results["ramp_time_lag"] = compliance_values["ramp_time_lag"] * 100 thr_ramp_time_lag = config.get_float("GridCode", "thr_ramp_time_lag", 0.10) @@ -1019,6 +1028,11 @@ def __check( check_results["times_check"] &= check_results["ramp_error_check"] check_results["compliance"] &= check_results["ramp_error_check"] + def __check_mae( + self, + check_results: dict, + compliance_values: dict, + ): thr_final_ss_mae = config.get_float("GridCode", "thr_final_ss_mae", 0.01) if compliance_list.contains_key(["mean_absolute_error_voltage"], self._validations): check_results["mae_voltage_1P"] = compliance_values["mae_voltage_1P"] @@ -1065,6 +1079,17 @@ def __check( ) check_results["compliance"] &= check_results["mae_reactive_current_1P_check"] + def __check( + self, + compliance_values: dict, + modified_setpoint: str, + ) -> dict: + check_results = self.__create_results(compliance_values) + + self.__check_times(check_results, compliance_values) + self.__check_ramp(check_results, compliance_values) + self.__check_mae(check_results, compliance_values) + _save_measurement_errors(compliance_values, "voltage", check_results) _save_measurement_errors(compliance_values, "active_power", check_results) diff --git a/src/dgcv/validation/performance.py b/src/dgcv/validation/performance.py index 21e4ee0..fa20864 100644 --- a/src/dgcv/validation/performance.py +++ b/src/dgcv/validation/performance.py @@ -154,38 +154,13 @@ def __init__( self._producer = parameters.get_producer() self._stable_time = stable_time - def __calculate( + def __calculate_simple_times( self, + compliance_values: dict, curves: pd.DataFrame, t_event_start: float, - ) -> dict: - compliance_values = {} - - if compliance_list.contains_key(["static_diff"], self._validations): - max_static_diff = 0 - filter_col = [col for col in curves if col.endswith("_GEN_MagnitudeControlledByAVRPu")] - for curve_name in filter_col: - generator_id = curve_name.replace("_GEN_MagnitudeControlledByAVRPu", "") - magnitude_controlled_by_avr = generator_id + "_GEN_" + "MagnitudeControlledByAVRPu" - avr_setpoint = generator_id + "_GEN_" + "AVRSetpointPu" - - static_diff = common.get_static_diff( - list(curves[magnitude_controlled_by_avr]), - list(curves[avr_setpoint]), - ) - if max_static_diff < static_diff: - max_static_diff = static_diff - compliance_values["static_diff"] = max_static_diff - + ): bus_pdr_voltage = "BusPDR" + "_BUS_" + "Voltage" - compliance_values["is_invalid_test"] = common.is_invalid_test( - list(curves["time"]), - list(curves[bus_pdr_voltage]), - list(curves["BusPDR_BUS_ActivePower"]), - list(curves["BusPDR_BUS_ReactivePower"]), - t_event_start, - ) - if compliance_list.contains_key(["time_5U"], self._validations): compliance_values["time_5u"] = common.get_txu_relative( 0.05, @@ -202,6 +177,21 @@ def __calculate( t_event_start, ) + if compliance_list.contains_key(["time_10Pfloor_clear"], self._validations): + compliance_values["time_10pfloor"] = common.get_txpfloor( + 0.1, + list(curves["time"]), + list(curves["BusPDR_BUS_ActivePower"]), + t_event_start, + ) + + def __calculate_composed_times( + self, + compliance_values: dict, + curves: pd.DataFrame, + t_event_start: float, + ) -> dict: + bus_pdr_voltage = "BusPDR" + "_BUS_" + "Voltage" if compliance_list.contains_key( ["time_5P", "time_5P_85U", "time_5P_clear"], self._validations ): @@ -244,38 +234,21 @@ def __calculate( t_event_start, ) - if compliance_list.contains_key(["time_10Pfloor_clear"], self._validations): - compliance_values["time_10pfloor"] = common.get_txpfloor( - 0.1, - list(curves["time"]), - list(curves["BusPDR_BUS_ActivePower"]), - t_event_start, - ) - - if compliance_list.contains_key(["imax_reac"], self._validations): - imax_reac = -1 - imax_reac_check = True - filter_col = [col for col in curves if col.endswith("_GEN_InjectedCurrent")] - for curve_name in filter_col: - generator_id = curve_name.replace("_GEN_InjectedCurrent", "") - injected_current = generator_id + "_GEN_" + "InjectedCurrent" - injected_active_current = generator_id + "_GEN_" + "InjectedActiveCurrent" - - imax_gen_reac, imax_gen_reac_check = common.check_generator_imax( - self._generators_imax[generator_id], - list(curves["time"]), - list(curves[injected_current]), - list(curves[injected_active_current]), - ) - if not imax_gen_reac_check: - if imax_reac_check: - imax_reac = imax_gen_reac - imax_reac_check = imax_gen_reac_check - elif imax_gen_reac < imax_reac: - imax_reac = imax_gen_reac - compliance_values["imax_reac"] = imax_reac - compliance_values["imax_reac_check"] = imax_reac_check + def __calculate_times( + self, + compliance_values: dict, + curves: pd.DataFrame, + t_event_start: float, + ): + self.__calculate_simple_times(compliance_values, curves, t_event_start) + self.__calculate_composed_times(compliance_values, curves, t_event_start) + def __calculate_avr( + self, + compliance_values: dict, + curves: pd.DataFrame, + t_event_start: float, + ): if compliance_list.contains_key(["AVR_5"], self._validations): AVR_5_crv = list() AVR_5_check = True @@ -299,6 +272,11 @@ def __calculate( compliance_values["AVR_5"] = AVR_5 compliance_values["AVR_5_crvs"] = AVR_5_crv + def __calculate_frequency( + self, + compliance_values: dict, + curves: pd.DataFrame, + ): if compliance_list.contains_key(["freq_1"], self._validations): check_freq1 = True time_freq1 = -1 @@ -314,18 +292,81 @@ def __calculate( time_freq1 = gen_time_freq1 compliance_values["check_freq1"] = check_freq1 compliance_values["time_freq1"] = time_freq1 + + def __calculate_others( + self, + compliance_values: dict, + curves: pd.DataFrame, + t_event_start: float, + ): + bus_pdr_voltage = "BusPDR" + "_BUS_" + "Voltage" + compliance_values["is_invalid_test"] = common.is_invalid_test( + list(curves["time"]), + list(curves[bus_pdr_voltage]), + list(curves["BusPDR_BUS_ActivePower"]), + list(curves["BusPDR_BUS_ReactivePower"]), + t_event_start, + ) + + if compliance_list.contains_key(["static_diff"], self._validations): + max_static_diff = 0 + filter_col = [col for col in curves if col.endswith("_GEN_MagnitudeControlledByAVRPu")] + for curve_name in filter_col: + generator_id = curve_name.replace("_GEN_MagnitudeControlledByAVRPu", "") + magnitude_controlled_by_avr = generator_id + "_GEN_" + "MagnitudeControlledByAVRPu" + avr_setpoint = generator_id + "_GEN_" + "AVRSetpointPu" + + static_diff = common.get_static_diff( + list(curves[magnitude_controlled_by_avr]), + list(curves[avr_setpoint]), + ) + if max_static_diff < static_diff: + max_static_diff = static_diff + compliance_values["static_diff"] = max_static_diff + + if compliance_list.contains_key(["imax_reac"], self._validations): + imax_reac = -1 + imax_reac_check = True + filter_col = [col for col in curves if col.endswith("_GEN_InjectedCurrent")] + for curve_name in filter_col: + generator_id = curve_name.replace("_GEN_InjectedCurrent", "") + injected_current = generator_id + "_GEN_" + "InjectedCurrent" + injected_active_current = generator_id + "_GEN_" + "InjectedActiveCurrent" + + imax_gen_reac, imax_gen_reac_check = common.check_generator_imax( + self._generators_imax[generator_id], + list(curves["time"]), + list(curves[injected_current]), + list(curves[injected_active_current]), + ) + if not imax_gen_reac_check: + if imax_reac_check: + imax_reac = imax_gen_reac + imax_reac_check = imax_gen_reac_check + elif imax_gen_reac < imax_reac: + imax_reac = imax_gen_reac + compliance_values["imax_reac"] = imax_reac + compliance_values["imax_reac_check"] = imax_reac_check + + def __calculate( + self, + curves: pd.DataFrame, + t_event_start: float, + ) -> dict: + compliance_values = {} + + self.__calculate_times(compliance_values, curves, t_event_start) + self.__calculate_avr(compliance_values, curves, t_event_start) + self.__calculate_frequency(compliance_values, curves) + self.__calculate_others(compliance_values, curves, t_event_start) + return compliance_values - def __check( + def __create_results( self, - simulation_path: Path, - has_dynamic_model: bool, - is_stable: Stability, t_event_start: float, - t_event_end: float, - is_ppm: bool, compliance_values: dict, - ): + ) -> dict: results = { "sim_t_event_start": t_event_start, "compliance": True, @@ -334,15 +375,15 @@ def __check( if self._time_cct is not None: results["time_cct"] = self._time_cct - if compliance_list.contains_key(["static_diff"], self._validations): - _check_compliance( - results, - compliance_values["static_diff"], - "static_diff", - 0.2, - 100, - ) + return results + def __check_simple_times( + self, + results: dict, + t_event_start: float, + t_event_end: float, + compliance_values: dict, + ): if compliance_list.contains_key(["time_5U"], self._validations): _check_compliance( results, @@ -375,25 +416,6 @@ def __check( 5.0, ) - if compliance_list.contains_key(["time_5P_85U"], self._validations): - results["time_85U"] = compliance_values["time_85u"] - _check_compliance( - results, - compliance_values["time_5p"] - compliance_values["time_85u"], - "time_5P_85U", - 10.0, - ) - - if compliance_list.contains_key(["time_10P_85U"], self._validations): - results["time_85U"] = compliance_values["time_85u"] - results["time_10P"] = compliance_values["time_10p"] - _check_compliance( - results, - compliance_values["time_10p"] - compliance_values["time_85u"], - "time_10P_85U", - 5.0, - ) - if compliance_list.contains_key(["time_5P_clear"], self._validations): results["t_event_start"] = t_event_end _check_compliance( @@ -412,20 +434,31 @@ def __check( 5.0, ) - if compliance_list.contains_key(["stabilized"], self._validations): - if not is_ppm: - stabilized = ( - is_stable.p - and is_stable.q - and is_stable.v - and is_stable.theta - and is_stable.pi - ) - else: - stabilized = is_stable.p and is_stable.q and is_stable.v + def __check_composed_times( + self, + results: dict, + t_event_start: float, + t_event_end: float, + compliance_values: dict, + ): + if compliance_list.contains_key(["time_5P_85U"], self._validations): + results["time_85U"] = compliance_values["time_85u"] + _check_compliance( + results, + compliance_values["time_5p"] - compliance_values["time_85u"], + "time_5P_85U", + 10.0, + ) - results["stabilized"] = stabilized - results["compliance"] &= stabilized + if compliance_list.contains_key(["time_10P_85U"], self._validations): + results["time_85U"] = compliance_values["time_85u"] + results["time_10P"] = compliance_values["time_10p"] + _check_compliance( + results, + compliance_values["time_10p"] - compliance_values["time_85u"], + "time_10P_85U", + 5.0, + ) if compliance_list.contains_key(["time_10Pfloor_85U"], self._validations): results["time_85U"] = compliance_values["time_85u"] @@ -447,11 +480,32 @@ def __check( 2.0, ) - if compliance_list.contains_key(["imax_reac"], self._validations): - results["imax_reac"] = compliance_values["imax_reac"] - results["imax_reac_check"] = compliance_values["imax_reac_check"] - results["compliance"] &= results["imax_reac_check"] + if compliance_list.contains_key(["time_85U_10P"], self._validations): + results["time_85U"] = compliance_values["time_85u"] + results["time_10P"] = compliance_values["time_10p"] + _check_compliance( + results, + compliance_values["time_10p"] - compliance_values["time_85u"], + "time_85U_10P", + 5.0, + ) + def __check_times( + self, + results: dict, + t_event_start: float, + t_event_end: float, + compliance_values: dict, + ): + self.__check_simple_times(results, t_event_start, t_event_end, compliance_values) + self.__check_composed_times(results, t_event_start, t_event_end, compliance_values) + + def __check_diconnections( + self, + results: dict, + simulation_path: Path, + has_dynamic_model: bool, + ): if ( compliance_list.contains_key(["no_disconnection_gen"], self._validations) and has_dynamic_model @@ -492,26 +546,68 @@ def __check( results["no_disconnection_load"] = True results["compliance"] &= results["no_disconnection_load"] + def __check_others( + self, + results: dict, + is_stable: Stability, + is_ppm: bool, + compliance_values: dict, + ): + if compliance_list.contains_key(["static_diff"], self._validations): + _check_compliance( + results, + compliance_values["static_diff"], + "static_diff", + 0.2, + 100, + ) + + if compliance_list.contains_key(["stabilized"], self._validations): + if not is_ppm: + stabilized = ( + is_stable.p + and is_stable.q + and is_stable.v + and is_stable.theta + and is_stable.pi + ) + else: + stabilized = is_stable.p and is_stable.q and is_stable.v + + results["stabilized"] = stabilized + results["compliance"] &= stabilized + + if compliance_list.contains_key(["imax_reac"], self._validations): + results["imax_reac"] = compliance_values["imax_reac"] + results["imax_reac_check"] = compliance_values["imax_reac_check"] + results["compliance"] &= results["imax_reac_check"] + if compliance_list.contains_key(["AVR_5"], self._validations): results["AVR_5_check"] = compliance_values["AVR_5_check"] results["AVR_5"] = compliance_values["AVR_5"] results["AVR_5_crvs"] = compliance_values["AVR_5_crvs"] - if compliance_list.contains_key(["time_85U_10P"], self._validations): - results["time_85U"] = compliance_values["time_85u"] - results["time_10P"] = compliance_values["time_10p"] - _check_compliance( - results, - compliance_values["time_10p"] - compliance_values["time_85u"], - "time_85U_10P", - 5.0, - ) - if compliance_list.contains_key(["freq_1"], self._validations): results["freq1"] = compliance_values["time_freq1"] results["freq1_check"] = compliance_values["check_freq1"] results["compliance"] &= results["freq1_check"] + def __check( + self, + simulation_path: Path, + has_dynamic_model: bool, + is_stable: Stability, + t_event_start: float, + t_event_end: float, + is_ppm: bool, + compliance_values: dict, + ): + results = self.__create_results(t_event_start, compliance_values) + + self.__check_times(results, t_event_start, t_event_end, compliance_values) + self.__check_diconnections(results, simulation_path, has_dynamic_model) + self.__check_others(results, is_stable, is_ppm, compliance_values) + return results def validate( From 8f71856e1fb852e7c509a595e6480b97e5ecd7f6 Mon Sep 17 00:00:00 2001 From: marcosmc Date: Wed, 18 Dec 2024 14:38:29 +0100 Subject: [PATCH 10/24] reduce complexity --- src/dgcv/report/tables/compliance.py | 101 +++++++++++++++------------ src/dgcv/report/tables/results.py | 84 +++++++++++----------- 2 files changed, 101 insertions(+), 84 deletions(-) diff --git a/src/dgcv/report/tables/compliance.py b/src/dgcv/report/tables/compliance.py index dd6f044..cdbff2e 100644 --- a/src/dgcv/report/tables/compliance.py +++ b/src/dgcv/report/tables/compliance.py @@ -10,6 +10,61 @@ from dgcv.report import printable +def _add_simple_times(results: dict, compliance_map: list): + if "time_5U" in results: + time_5U = printable.format_value(results, "time_5U", apply_formatter=True) + check = printable.format_value(results, "time_5U_check") + compliance_map.append(["$T_{5U} < 10 s$", time_5U, check]) + if "time_10U" in results: + time_10U = printable.format_value(results, "time_10U", apply_formatter=True) + check = printable.format_value(results, "time_10U_check") + compliance_map.append(["$T_{10U} < 5 s$", time_10U, check]) + if "time_5P" in results and not ("time_5P_85U" in results or "time_5P_clear" in results): + time_5P = printable.format_value(results, "time_5P", apply_formatter=True) + check = printable.format_value(results, "time_5P_check") + compliance_map.append(["$T_{5P} < 10 s$", time_5P, check]) + if "time_5P_clear" in results: + time_5P_clear = printable.format_value(results, "time_5P_clear", apply_formatter=True) + check = printable.format_value(results, "time_5P_clear_check") + compliance_map.append(["$T_{5P} - T_{clear} < 10 s$", time_5P_clear, check]) + if "time_10P" in results and not ("time_10P_85U" in results or "time_10P_clear" in results): + time_10P = printable.format_value(results, "time_10P", apply_formatter=True) + check = printable.format_value(results, "time_10P_check") + compliance_map.append(["$T_{10P} < 5 s$", time_10P, check]) + if "time_10P_clear" in results: + time_10P_clear = printable.format_value(results, "time_10P_clear", apply_formatter=True) + check = printable.format_value(results, "time_10P_clear_check") + compliance_map.append(["$T_{10P} - T_{clear} < 5 s$", time_10P_clear, check]) + if "time_10Pfloor_clear" in results: + time_10Pfloor_clear = printable.format_value( + results, "time_10Pfloor_clear", apply_formatter=True + ) + check = printable.format_value(results, "time_10Pfloor_clear_check") + compliance_map.append(["$T_{10P_{floor}} - T_{clear} < 2 s$", time_10Pfloor_clear, check]) + + +def _add_composed_times(results: dict, compliance_map: list): + if "time_5P_85U" in results: + time_5P_85U = printable.format_value(results, "time_5P_85U", apply_formatter=True) + check = printable.format_value(results, "time_5P_85U_check") + compliance_map.append(["$T_{5P} - T_{85U} < 10 s$", time_5P_85U, check]) + if "time_10P_85U" in results: + time_10P_85U = printable.format_value(results, "time_10P_85U", apply_formatter=True) + check = printable.format_value(results, "time_10P_85U_check") + compliance_map.append(["$T_{10P} - T_{85U} < 5 s$", time_10P_85U, check]) + if "time_10Pfloor_85U" in results: + time_10Pfloor_85U = printable.format_value( + results, "time_10Pfloor_85U", apply_formatter=True + ) + check = printable.format_value(results, "time_10Pfloor_85U_check") + compliance_map.append(["$T_{10P_{floor}} - T_{85U} < 2 s$", time_10Pfloor_85U, check]) + + +def _add_times(results: dict, compliance_map: list): + _add_simple_times(results, compliance_map) + _add_composed_times(results, compliance_map) + + def create_map(results: dict) -> list: """Creates a list to populate the compliance table in the LaTeX reports @@ -62,50 +117,6 @@ def create_map(results: dict) -> list: check, ] ) - if "time_5U" in results: - time_5U = printable.format_value(results, "time_5U", apply_formatter=True) - check = printable.format_value(results, "time_5U_check") - compliance_map.append(["$T_{5U} < 10 s$", time_5U, check]) - if "time_10U" in results: - time_10U = printable.format_value(results, "time_10U", apply_formatter=True) - check = printable.format_value(results, "time_10U_check") - compliance_map.append(["$T_{10U} < 5 s$", time_10U, check]) - if "time_5P" in results and not ("time_5P_85U" in results or "time_5P_clear" in results): - time_5P = printable.format_value(results, "time_5P", apply_formatter=True) - check = printable.format_value(results, "time_5P_check") - compliance_map.append(["$T_{5P} < 10 s$", time_5P, check]) - if "time_5P_85U" in results: - time_5P_85U = printable.format_value(results, "time_5P_85U", apply_formatter=True) - check = printable.format_value(results, "time_5P_85U_check") - compliance_map.append(["$T_{5P} - T_{85U} < 10 s$", time_5P_85U, check]) - if "time_5P_clear" in results: - time_5P_clear = printable.format_value(results, "time_5P_clear", apply_formatter=True) - check = printable.format_value(results, "time_5P_clear_check") - compliance_map.append(["$T_{5P} - T_{clear} < 10 s$", time_5P_clear, check]) - if "time_10P" in results and not ("time_10P_85U" in results or "time_10P_clear" in results): - time_10P = printable.format_value(results, "time_10P", apply_formatter=True) - check = printable.format_value(results, "time_10P_check") - compliance_map.append(["$T_{10P} < 5 s$", time_10P, check]) - if "time_10P_85U" in results: - time_10P_85U = printable.format_value(results, "time_10P_85U", apply_formatter=True) - check = printable.format_value(results, "time_10P_85U_check") - compliance_map.append(["$T_{10P} - T_{85U} < 5 s$", time_10P_85U, check]) - if "time_10P_clear" in results: - time_10P_clear = printable.format_value(results, "time_10P_clear", apply_formatter=True) - check = printable.format_value(results, "time_10P_clear_check") - compliance_map.append(["$T_{10P} - T_{clear} < 5 s$", time_10P_clear, check]) - if "time_10Pfloor_85U" in results: - time_10Pfloor_85U = printable.format_value( - results, "time_10Pfloor_85U", apply_formatter=True - ) - check = printable.format_value(results, "time_10Pfloor_85U_check") - compliance_map.append(["$T_{10P_{floor}} - T_{85U} < 2 s$", time_10Pfloor_85U, check]) - if "time_10Pfloor_clear" in results: - time_10Pfloor_clear = printable.format_value( - results, "time_10Pfloor_clear", apply_formatter=True - ) - check = printable.format_value(results, "time_10Pfloor_clear_check") - compliance_map.append(["$T_{10P_{floor}} - T_{clear} < 2 s$", time_10Pfloor_clear, check]) if "static_diff" in results: static_diff = printable.format_value(results, "static_diff", apply_formatter=True) check = printable.format_value(results, "static_diff_check") @@ -123,4 +134,6 @@ def create_map(results: dict) -> list: ] ) + _add_times(results, compliance_map) + return compliance_map diff --git a/src/dgcv/report/tables/results.py b/src/dgcv/report/tables/results.py index 08bd3e9..4c3923a 100644 --- a/src/dgcv/report/tables/results.py +++ b/src/dgcv/report/tables/results.py @@ -10,46 +10,7 @@ from dgcv.report import printable -def create_map(results: dict) -> list: - """Creates a list to populate the results table in the LaTex reports - - Parameters - ---------- - results: dict - Results of the validations applied in the pcs - - Returns - ------- - list - Results table - """ - results_map = [] - - if "time_10U" in results: - time_10U = printable.format_value(results, "time_10U", apply_formatter=True) - results_map.append(["$T_{10U}$", time_10U + " s"]) - if "time_5U" in results: - time_5U = printable.format_value(results, "time_5U", apply_formatter=True) - results_map.append(["$T_{5U}$", time_5U + " s"]) - if "time_10P" in results: - time_10P = printable.format_value(results, "time_10P", apply_formatter=True) - results_map.append(["$T_{10P}$", time_10P + " s"]) - if "time_10Pfloor" in results: - time_10Pfloor = printable.format_value(results, "time_10Pfloor", apply_formatter=True) - results_map.append(["$T_{10P_{floor}}$", time_10Pfloor + " s"]) - if "time_5P" in results: - time_5P = printable.format_value(results, "time_5P", apply_formatter=True) - results_map.append(["$T_{5P}$", time_5P + " s"]) - if "time_85U" in results: - time_85U = printable.format_value(results, "time_85U", apply_formatter=True) - results_map.append(["$T_{85U}$", time_85U + " s"]) - if "time_cct" in results: - time_cct = printable.format_value(results, "time_cct", apply_formatter=True) - results_map.append(["$T_{cct}$", time_cct + " s"]) - if "static_diff" in results: - static_diff = printable.format_value(results, "static_diff", apply_formatter=True) - results_map.append(["$\epsilon$", static_diff]) - +def _iterate_variables(results: dict, results_map: list): variables = [ ["voltage", "V"], ["active_power", "P"], @@ -143,4 +104,47 @@ def create_map(results: dict) -> list: [col, before_mxe, before_me, before_mae, after_mxe, after_me, after_mae] ) + +def create_map(results: dict) -> list: + """Creates a list to populate the results table in the LaTex reports + + Parameters + ---------- + results: dict + Results of the validations applied in the pcs + + Returns + ------- + list + Results table + """ + results_map = [] + + if "time_10U" in results: + time_10U = printable.format_value(results, "time_10U", apply_formatter=True) + results_map.append(["$T_{10U}$", time_10U + " s"]) + if "time_5U" in results: + time_5U = printable.format_value(results, "time_5U", apply_formatter=True) + results_map.append(["$T_{5U}$", time_5U + " s"]) + if "time_10P" in results: + time_10P = printable.format_value(results, "time_10P", apply_formatter=True) + results_map.append(["$T_{10P}$", time_10P + " s"]) + if "time_10Pfloor" in results: + time_10Pfloor = printable.format_value(results, "time_10Pfloor", apply_formatter=True) + results_map.append(["$T_{10P_{floor}}$", time_10Pfloor + " s"]) + if "time_5P" in results: + time_5P = printable.format_value(results, "time_5P", apply_formatter=True) + results_map.append(["$T_{5P}$", time_5P + " s"]) + if "time_85U" in results: + time_85U = printable.format_value(results, "time_85U", apply_formatter=True) + results_map.append(["$T_{85U}$", time_85U + " s"]) + if "time_cct" in results: + time_cct = printable.format_value(results, "time_cct", apply_formatter=True) + results_map.append(["$T_{cct}$", time_cct + " s"]) + if "static_diff" in results: + static_diff = printable.format_value(results, "static_diff", apply_formatter=True) + results_map.append(["$\epsilon$", static_diff]) + + _iterate_variables(results, results_map) + return results_map From acce0ec016192c3df673a6c92c992e3c608086b9 Mon Sep 17 00:00:00 2001 From: marcosmc Date: Wed, 18 Dec 2024 15:02:39 +0100 Subject: [PATCH 11/24] reduce complexity --- src/dgcv/report/html.py | 19 +++- src/dgcv/report/report.py | 226 ++++++++++++++++++++++---------------- 2 files changed, 148 insertions(+), 97 deletions(-) diff --git a/src/dgcv/report/html.py b/src/dgcv/report/html.py index 2e47105..fddf22e 100644 --- a/src/dgcv/report/html.py +++ b/src/dgcv/report/html.py @@ -64,7 +64,7 @@ def _get_curve_names( return curve_names -def _additional_traces(fig, additional_curves, time, curve, results): +def _additional_active_traces(fig, additional_curves, time, curve, results): last_val = curve.iloc[-1] if "10P" in additional_curves: if abs(last_val) <= 1: @@ -115,6 +115,8 @@ def _additional_traces(fig, additional_curves, time, curve, results): fig.add_hline(y=mean_val_min, line_color="#c44e52", line_dash="dash") + +def _additional_traces(fig, additional_curves, time, curve, results): if "85U" in additional_curves: val_85 = results["time_85U"] + results["sim_t_event_start"] fig.add_vline(x=val_85, opacity=0.8, line_color="#000000", line_dash="dash") @@ -354,6 +356,13 @@ def _plotly_figures( ): _exclusion_windows(fig, results) + _additional_active_traces( + fig, + additional_curves, + calculated_curves["time"], + calculated_curves[curve_name], + results, + ) _additional_traces( fig, additional_curves, @@ -437,11 +446,15 @@ def plotly_figures( def plotly_all_curves( plotted_curves: list, - calculated_curves: pd.DataFrame, - reference_curves: pd.DataFrame, results: dict, ) -> list: + calculated_curves = results["curves"] + if "reference_curves" in results: + reference_curves = results["reference_curves"] + else: + reference_curves = None + figures = list() for curve_name in calculated_curves: if curve_name in plotted_curves or "time" == curve_name.lower(): diff --git a/src/dgcv/report/report.py b/src/dgcv/report/report.py index 0fa4725..585fc96 100644 --- a/src/dgcv/report/report.py +++ b/src/dgcv/report/report.py @@ -38,6 +38,66 @@ from dgcv.templates.reports.create_figures import create_figures +def _create_reports( + report_results: dict, + parameters: Parameters, + output_path: Path, + working_path: Path, +) -> list: + reports = [] + for pcs_results in report_results.values(): + pcs = pcs_results["pcs"] + subreports = _create_full_tex( + pcs_results, + working_path, + output_path, + pcs.get_figures_description(), + pcs_results["report_name"], + parameters.get_producer(), + ) + if subreports > 0: + reports.append(f"\\input{{{pcs_results['report_name'].replace('.tex', '')}}}") + + return reports + + +def _create_figures( + report_results: dict, + parameters: Parameters, + path_latex_files: Path, + working_path: Path, +): + for pcs_results in report_results.values(): + latex_template_path = ( + path_latex_files + / parameters.get_producer().get_sim_type_str() + / pcs_results["pcs"].get_name() + ) + + latex_user_path = config.get_config_dir() / latex_template_path + dgcv_logging.get_logger("PDFLatex").debug( + f"PCS: {pcs_results['pcs'].get_name()} User LaTeX path:{latex_user_path}" + ) + latex_tool_path = Path(__file__).resolve().parent.parent / latex_template_path + dgcv_logging.get_logger("PDFLatex").debug( + f"PCS: {pcs_results['pcs'].get_name()} Tool LaTeX path:{latex_tool_path}" + ) + if latex_user_path.exists(): + copy_latex_files(latex_user_path, working_path) + if latex_tool_path.exists(): + copy_latex_files(latex_tool_path, working_path) + + if not (latex_tool_path.exists() or latex_user_path.exists()): + dgcv_logging.get_logger("PDFLatex").error("Latex Template do not exist") + return + + create_figures( + working_path, + pcs_results["pcs"].get_name(), + pcs_results["sim_type"], + ) + + def _pcs_replace( working_path: Path, pcs_results: dict, report_name: str, producer: Producer ) -> int: @@ -127,6 +187,63 @@ def _get_template(path, template_file): return template +def _generate_figures( + working_path: Path, + figures_description: dict, + figure_key: str, + oc_results: dict, + operating_condition: str, + xmin: float, + xmax: float, +) -> tuple[list, list]: + plotted_curves = list() + figures = list() + + curves = oc_results["curves"] + if "reference_curves" in oc_results: + reference_curves = oc_results["reference_curves"] + else: + reference_curves = None + + for figure_description in figures_description[figure_key]: + plot_curves = figure.get_curves2plot(figure_description[1], curves) + if len(plot_curves) == 0: + continue + + plot_reference_curves = None + if reference_curves is not None: + plot_reference_curves = figure.get_curves2plot( + figure_description[1], reference_curves, is_reference=True + ) + figure.create_plot( + list(curves["time"]), + figure_description[1], + plot_curves, + list(reference_curves["time"]) if reference_curves is not None else None, + plot_reference_curves, + {"min": xmin, "max": xmax}, + working_path / (figure_description[0] + "_" + operating_condition + ".pdf"), + figure_description[2], + oc_results, + figure_description[3], + ) + + try: + html_curves, html_figure = html.plotly_figures( + figure_description, curves, reference_curves, oc_results + ) + plotted_curves.extend(html_curves) + if html_figure: + figures.append(html_figure) + except Exception as e: + dgcv_logging.get_logger("HTMLReport").error( + "A non fatal error occurred while generating the plotly figures" + ) + dgcv_logging.get_logger("HTMLReport").error(f"{e}") + + return plotted_curves, figures + + def _create_full_tex( pcs_results: dict, working_path: Path, @@ -164,8 +281,7 @@ def _create_full_tex( ) continue - curves = oc_results["curves"] - if curves is None: + if oc_results["curves"] is None: continue unit_characteristics = { @@ -174,11 +290,6 @@ def _create_full_tex( "Udim": oc_results["udim"] / producer.u_nom, } - if "reference_curves" in oc_results: - reference_curves = oc_results["reference_curves"] - else: - reference_curves = None - xmin, xmax = figure.get_common_time_range( operating_condition, unit_characteristics, @@ -190,49 +301,18 @@ def _create_full_tex( if config.get_boolean("Debug", "show_figs_tend", False): xmax = None - plotted_curves = list() - figures = list() - for figure_description in figures_description[figure_key]: - plot_curves = figure.get_curves2plot(figure_description[1], curves) - if len(plot_curves) == 0: - continue - - plot_reference_curves = None - if reference_curves is not None: - plot_reference_curves = figure.get_curves2plot( - figure_description[1], reference_curves, is_reference=True - ) - figure.create_plot( - list(curves["time"]), - figure_description[1], - plot_curves, - list(reference_curves["time"]) if reference_curves is not None else None, - plot_reference_curves, - {"min": xmin, "max": xmax}, - working_path / (figure_description[0] + "_" + operating_condition + ".pdf"), - figure_description[2], - oc_results, - figure_description[3], - ) - - try: - html_curves, html_figure = html.plotly_figures( - figure_description, curves, reference_curves, oc_results - ) - plotted_curves.extend(html_curves) - if html_figure: - figures.append(html_figure) - except Exception as e: - dgcv_logging.get_logger("HTMLReport").error( - "A non fatal error occurred while generating the plotly figures" - ) - dgcv_logging.get_logger("HTMLReport").error(f"{e}") - + plotted_curves, figures = _generate_figures( + working_path, + figures_description, + figure_key, + oc_results, + operating_condition, + xmin, + xmax, + ) try: if config.get_boolean("Debug", "plot_all_curves_in_html", False): - figures.extend( - html.plotly_all_curves(plotted_curves, curves, reference_curves, oc_results) - ) + figures.extend(html.plotly_all_curves(plotted_curves, oc_results)) html.create_html(figures, operating_condition, output_path) except Exception as e: dgcv_logging.get_logger("HTMLReport").error( @@ -301,59 +381,17 @@ def create_pdf( if not working_path.exists(): working_path.mkdir() - for pcs_results in report_results.values(): - latex_template_path = ( - path_latex_files - / parameters.get_producer().get_sim_type_str() - / pcs_results["pcs"].get_name() - ) - - latex_user_path = config.get_config_dir() / latex_template_path - dgcv_logging.get_logger("PDFLatex").debug( - f"PCS: {pcs_results['pcs'].get_name()} User LaTeX path:{latex_user_path}" - ) - latex_tool_path = Path(__file__).resolve().parent.parent / latex_template_path - dgcv_logging.get_logger("PDFLatex").debug( - f"PCS: {pcs_results['pcs'].get_name()} Tool LaTeX path:{latex_tool_path}" - ) - if latex_user_path.exists(): - copy_latex_files(latex_user_path, working_path) - if latex_tool_path.exists(): - copy_latex_files(latex_tool_path, working_path) - - if not (latex_tool_path.exists() or latex_user_path.exists()): - dgcv_logging.get_logger("PDFLatex").error("Latex Template do not exist") - return - - create_figures( - working_path, - pcs_results["pcs"].get_name(), - pcs_results["sim_type"], - ) + _create_figures(report_results, parameters, path_latex_files, working_path) latex_root_path = Path(__file__).resolve().parent.parent / path_latex_files - dgcv_logging.get_logger("PDFLatex").debug( - f"PCS: {pcs_results['pcs'].get_name()} Root LaTeX path:{latex_root_path}" - ) - if latex_tool_path.exists(): + dgcv_logging.get_logger("PDFLatex").debug(f"Root LaTeX path:{latex_root_path}") + if latex_root_path.exists(): shutil.copy(latex_root_path / REPORT_NAME, working_path) else: dgcv_logging.get_logger("PDFLatex").error("Latex Template do not exist") return - reports = [] - for pcs_results in report_results.values(): - pcs = pcs_results["pcs"] - subreports = _create_full_tex( - pcs_results, - working_path, - output_path, - pcs.get_figures_description(), - pcs_results["report_name"], - parameters.get_producer(), - ) - if subreports > 0: - reports.append(f"\\input{{{pcs_results['report_name'].replace('.tex', '')}}}") + reports = _create_reports(report_results, parameters, output_path, working_path) summary_description = "" now = time.time() From a2759239f6b016895d97ffc88c02674114826ca8 Mon Sep 17 00:00:00 2001 From: marcosmc Date: Thu, 19 Dec 2024 08:52:04 +0100 Subject: [PATCH 12/24] reduce complexity --- src/dgcv/report/figure.py | 161 +++++++++++++++++++++++++------------- 1 file changed, 108 insertions(+), 53 deletions(-) diff --git a/src/dgcv/report/figure.py b/src/dgcv/report/figure.py index 1b18ad4..8adc23e 100644 --- a/src/dgcv/report/figure.py +++ b/src/dgcv/report/figure.py @@ -222,6 +222,19 @@ def _plot_additional_curves( ymin: float, ymax: float, ) -> tuple[float, float]: + + _plot_additional_time_curves(additional_curves, results, last_val) + ymin, ymax = _plot_additional_frequency_curves(additional_curves, ymin, ymax) + _plot_additional_avr_curves(time, additional_curves, results) + + return ymin, ymax + + +def _plot_additional_time_curves( + additional_curves: list, + results: dict, + last_val: float, +) -> None: # Plot first the additional info if "10P" in additional_curves: # Get the tube @@ -277,6 +290,12 @@ def _plot_additional_curves( val_85 = results["time_85U"] + results["sim_t_event_start"] plt.axvline(x=val_85, color="0.8", linestyle="--") + +def _plot_additional_frequency_curves( + additional_curves: list, + ymin: float, + ymax: float, +) -> tuple[float, float]: if "freq_1" in additional_curves: plt.axhline(y=(50 + 1) / 50, color="#c44e52", linestyle="--") if ymax < (50 + 1.5) / 50: @@ -301,6 +320,14 @@ def _plot_additional_curves( if ymin > (50 - 0.3) / 50: ymin = (50 - 0.3) / 50 + return ymin, ymax + + +def _plot_additional_avr_curves( + time: list, + additional_curves: list, + results: dict, +) -> tuple[float, float]: if "AVR5" in additional_curves: # Get the tube percent = 0.05 @@ -323,8 +350,6 @@ def _plot_additional_curves( plt.plot(time, line_max, color="#c44e52", linestyle="--") plt.plot(time, line_min, color="#c44e52", linestyle="--") - return ymin, ymax - def _plot_response_characteristics( curve_name: str, @@ -451,6 +476,81 @@ def _plot_mxe( ) +def _save_plot( + time: list, + curves: list, + time_reference: list, + curves_reference: list, + time_range: dict, + output_file: Path, + unit: str, + ymin: float, + ymax: float, +) -> None: + plt.clf() + plt.figure() + + # Plot later the reference curves + if time_reference is not None and curves_reference is not None: + for curve_reference in curves_reference: + plt.plot(time_reference, curve_reference["curve"], color="#dd8452", linestyle="-") + + # Plot finally the calculated curves + for curve in curves: + plt.plot(time, curve["curve"], color=curve["color"], linestyle=curve["style"]) + + plt.gca().yaxis.set_major_formatter(FormatStrFormatter("%.5g")) + plt.subplots_adjust(left=0.2) + if time_range["min"] is not None: + try: + plt.xlim(time_range["min"], time_range["max"]) + except UserWarning as uw: + dgcv_logging.get_logger("PDFLatex").warning("X-axis warning: " + uw) + if ymin is not None: + try: + plt.ylim(ymin, ymax) + except UserWarning as uw: + dgcv_logging.get_logger("PDFLatex").warning("Y-axis warning: " + uw) + + plt.xlabel("t(s)", fontsize=16) + plt.ylabel(unit, fontsize=16) + plt.savefig(output_file) + plt.close() + + +def _get_time_range( + operating_condition: str, + unit_characteristics: dict, + figures_description: dict, + results: dict, + time: list, +) -> dict: + + curves = results["curves"] + xmin = 99999 + xmax = -99999 + figure_key = operating_condition.rsplit(".", 1)[0] + for figure_description in figures_description[figure_key]: + plot_curves = get_curves2plot(figure_description[1], curves) + if len(plot_curves) == 0: + continue + + xrange_min, xrange_max = _get_xrange( + operating_condition, + unit_characteristics, + time, + plot_curves, + results["sim_t_event_start"], + ) + if xrange_min is None: + continue + else: + if xrange_min < xmin: + xmin = xrange_min + if xrange_max > xmax: + xmax = xrange_max + + def get_common_time_range( operating_condition: str, unit_characteristics: dict, @@ -480,28 +580,9 @@ def get_common_time_range( """ curves = results["curves"] time = list(curves["time"]) - xmin = 99999 - xmax = -99999 - figure_key = operating_condition.rsplit(".", 1)[0] - for figure_description in figures_description[figure_key]: - plot_curves = get_curves2plot(figure_description[1], curves) - if len(plot_curves) == 0: - continue - - xrange_min, xrange_max = _get_xrange( - operating_condition, - unit_characteristics, - time, - plot_curves, - results["sim_t_event_start"], - ) - if xrange_min is None: - continue - else: - if xrange_min < xmin: - xmin = xrange_min - if xrange_max > xmax: - xmax = xrange_max + xmin, xmax = _get_time_range( + operating_condition, unit_characteristics, figures_description, results, time + ) if xmin == 99999 and xmax == -99999: dgcv_logging.get_logger("PDFLatex").warning( @@ -572,8 +653,6 @@ def create_plot( # Cut curves ymin, ymax = _get_yrange(curves + curves_reference if curves_reference is not None else curves) - plt.clf() - plt.figure() last_val = curves[0]["curve"][-1] ymin, ymax = _plot_additional_curves(time, additional_curves, results, last_val, ymin, ymax) @@ -587,33 +666,9 @@ def create_plot( _plot_response_characteristics(curve_name, results) _plot_exclusion_windows(results) _plot_mxe(curve_name, results) - - # Plot later the reference curves - if time_reference is not None and curves_reference is not None: - for curve_reference in curves_reference: - plt.plot(time_reference, curve_reference["curve"], color="#dd8452", linestyle="-") - - # Plot finally the calculated curves - for curve in curves: - plt.plot(time, curve["curve"], color=curve["color"], linestyle=curve["style"]) - - plt.gca().yaxis.set_major_formatter(FormatStrFormatter("%.5g")) - plt.subplots_adjust(left=0.2) - if time_range["min"] is not None: - try: - plt.xlim(time_range["min"], time_range["max"]) - except UserWarning as uw: - dgcv_logging.get_logger("PDFLatex").warning("X-axis warning: " + uw) - if ymin is not None: - try: - plt.ylim(ymin, ymax) - except UserWarning as uw: - dgcv_logging.get_logger("PDFLatex").warning("Y-axis warning: " + uw) - - plt.xlabel("t(s)", fontsize=16) - plt.ylabel(unit, fontsize=16) - plt.savefig(output_file) - plt.close() + _save_plot( + time, curves, time_reference, curves_reference, time_range, output_file, unit, ymin, ymax + ) def get_curves2plot( From 27ab07d81bbdecc222e269ca6050acbcda25716e Mon Sep 17 00:00:00 2001 From: marcosmc Date: Thu, 19 Dec 2024 08:55:35 +0100 Subject: [PATCH 13/24] reduce complexity --- src/dgcv/model/producer.py | 111 +++++++++++++++++++++---------------- 1 file changed, 62 insertions(+), 49 deletions(-) diff --git a/src/dgcv/model/producer.py b/src/dgcv/model/producer.py index fc235bf..ce19bb7 100644 --- a/src/dgcv/model/producer.py +++ b/src/dgcv/model/producer.py @@ -120,56 +120,9 @@ def __set_model_validation_type(self): ppm_models = 0 bess_models = 0 if self.is_dynawo_model(): - self._zone = 1 - ( - generators_z1, - _, - _, - _, - _, - _, - ) = model_parameters.get_producer_values( - self.get_producer_dyd(), - self.get_producer_par(), - self._s_nref, - ) - self._zone = 3 - ( - generators_z3, - _, - _, - _, - _, - _, - ) = model_parameters.get_producer_values( - self.get_producer_dyd(), - self.get_producer_par(), - self._s_nref, - ) - sm_models, ppm_models, bess_models = sanity_checks.check_generators( - generators_z1 + generators_z3 - ) - self._zone = 0 + sm_models, ppm_models, bess_models = self.__set_dynawo_model_validation_type() else: - default_section = "DEFAULT" - self._zone = 1 - producer_config = self.read_producer_ini() - generator_type_z1 = producer_config.get(default_section, "generator_type") - self._zone = 3 - producer_config = self.read_producer_ini() - generator_type_z3 = producer_config.get(default_section, "generator_type") - if "SM" == generator_type_z1: - sm_models += 1 - elif "PPM" == generator_type_z1: - ppm_models += 1 - elif "BESS" == generator_type_z1: - bess_models += 1 - if "SM" == generator_type_z3: - sm_models += 1 - elif "PPM" == generator_type_z3: - ppm_models += 1 - elif "BESS" == generator_type_z3: - bess_models += 1 + sm_models, ppm_models, bess_models = self.__set_curves_model_validation_type() if sm_models > 0: raise ValueError("Synchronous machine models are not allowed for model validation") @@ -180,6 +133,66 @@ def __set_model_validation_type(self): else: raise ValueError("Model validation does not support the modeled generator type") + def __set_dynawo_model_validation_type(self): + self._zone = 1 + ( + generators_z1, + _, + _, + _, + _, + _, + ) = model_parameters.get_producer_values( + self.get_producer_dyd(), + self.get_producer_par(), + self._s_nref, + ) + self._zone = 3 + ( + generators_z3, + _, + _, + _, + _, + _, + ) = model_parameters.get_producer_values( + self.get_producer_dyd(), + self.get_producer_par(), + self._s_nref, + ) + sm_models, ppm_models, bess_models = sanity_checks.check_generators( + generators_z1 + generators_z3 + ) + self._zone = 0 + + return sm_models, ppm_models, bess_models + + def __set_curves_model_validation_type(self): + default_section = "DEFAULT" + self._zone = 1 + producer_config = self.read_producer_ini() + generator_type_z1 = producer_config.get(default_section, "generator_type") + self._zone = 3 + producer_config = self.read_producer_ini() + generator_type_z3 = producer_config.get(default_section, "generator_type") + sm_models = 0 + ppm_models = 0 + bess_models = 0 + if "SM" == generator_type_z1: + sm_models += 1 + elif "PPM" == generator_type_z1: + ppm_models += 1 + elif "BESS" == generator_type_z1: + bess_models += 1 + if "SM" == generator_type_z3: + sm_models += 1 + elif "PPM" == generator_type_z3: + ppm_models += 1 + elif "BESS" == generator_type_z3: + bess_models += 1 + + return sm_models, ppm_models, bess_models + def read_producer_ini(self): pattern_ini = re.compile(r".*.Producer.[iI][nN][iI]") producer_ini = self.__get_file_by_pattern(pattern_ini) From b2998f7295ccb396d26aea7d5fda78aa7e48278d Mon Sep 17 00:00:00 2001 From: marcosmc Date: Thu, 19 Dec 2024 09:17:48 +0100 Subject: [PATCH 14/24] reduce complexity --- src/dgcv/files/manage_files.py | 33 +++++++------ src/dgcv/files/producer_curves.py | 41 ++++++++++------ src/dgcv/model/operating_condition.py | 67 ++++++++++++--------------- 3 files changed, 75 insertions(+), 66 deletions(-) diff --git a/src/dgcv/files/manage_files.py b/src/dgcv/files/manage_files.py index abd3078..2008aba 100644 --- a/src/dgcv/files/manage_files.py +++ b/src/dgcv/files/manage_files.py @@ -23,6 +23,21 @@ ProducerFiles = namedtuple("ProducerFiles", ["producer_dyd", "producer_par"]) +def _copy_files( + path: Path, + target_path: Path, +): + pattern = re.compile(r".*") + exclude_pattern1 = re.compile(r".*__init__.py") + exclude_pattern2 = re.compile(r".*__pycache__*") + for file in path.iterdir(): + matching = pattern.match(str(file)) + matching1 = exclude_pattern1.match(str(file)) + matching2 = exclude_pattern2.match(str(file)) + if matching and not matching1 and not matching2: + shutil.copy(file, target_path / (file.stem + file.suffix.lower())) + + def create_config_file(config_file: Path, target_file: Path) -> None: """Create a commented config file in target from the input config file. @@ -202,23 +217,13 @@ def copy_base_case_files( target_path: Path Target path """ + + _copy_files(model_files.model_path, target_path) + _copy_files(model_files.omega_path, target_path) + pattern = re.compile(r".*") exclude_pattern1 = re.compile(r".*__init__.py") exclude_pattern2 = re.compile(r".*__pycache__*") - for file in model_files.model_path.iterdir(): - matching = pattern.match(str(file)) - matching1 = exclude_pattern1.match(str(file)) - matching2 = exclude_pattern2.match(str(file)) - if matching and not matching1 and not matching2: - shutil.copy(file, target_path / (file.stem + file.suffix.lower())) - - for file in model_files.omega_path.iterdir(): - matching = pattern.match(str(file)) - matching1 = exclude_pattern1.match(str(file)) - matching2 = exclude_pattern2.match(str(file)) - if matching and not matching1 and not matching2: - shutil.copy(file, target_path / (file.stem + file.suffix.lower())) - exclude_pattern3 = re.compile(r".*.[iI][nN][iI]$") exclude_pattern4 = re.compile(r".*.[cC][rR][vV]$") for file in model_files.pcs_path.iterdir(): diff --git a/src/dgcv/files/producer_curves.py b/src/dgcv/files/producer_curves.py index b1cbb15..b5c82c9 100644 --- a/src/dgcv/files/producer_curves.py +++ b/src/dgcv/files/producer_curves.py @@ -261,12 +261,12 @@ def _get_performance_templates( return producer_curves_txt, curves_names_txt -def _get_model_templates( +def _get_xmfrs_models( model_path: Path, - template: str, -): + zone: str, +) -> list: producer_dyd_tree = etree.parse( - model_path / "Zone3" / "Producer.dyd", etree.XMLParser(remove_blank_text=True) + model_path / zone / "Producer.dyd", etree.XMLParser(remove_blank_text=True) ) producer_dyd_root = producer_dyd_tree.getroot() xfmrs = [] @@ -274,26 +274,37 @@ def _get_model_templates( if "StepUp_Xfmr" in xfmr.get("id"): xfmrs.append(xfmr) - z3_gen_ppms = [] - if template == "model_PPM": - for model in dynawo_translator.get_power_park_models(): - z3_gen_ppms.extend(find_bbmodel_by_type(producer_dyd_root, model)) - elif template == "model_BESS": - for model in dynawo_translator.get_storage_models(): - z3_gen_ppms.extend(find_bbmodel_by_type(producer_dyd_root, model)) + return xfmrs + +def _get_generator_models( + model_path: Path, + template: str, + zone: str, +) -> list: producer_dyd_tree = etree.parse( - model_path / "Zone1" / "Producer.dyd", etree.XMLParser(remove_blank_text=True) + model_path / zone / "Producer.dyd", etree.XMLParser(remove_blank_text=True) ) producer_dyd_root = producer_dyd_tree.getroot() - z1_gen_ppms = [] + gen_ppms = [] if template == "model_PPM": for model in dynawo_translator.get_power_park_models(): - z1_gen_ppms.extend(find_bbmodel_by_type(producer_dyd_root, model)) + gen_ppms.extend(find_bbmodel_by_type(producer_dyd_root, model)) elif template == "model_BESS": for model in dynawo_translator.get_storage_models(): - z1_gen_ppms.extend(find_bbmodel_by_type(producer_dyd_root, model)) + gen_ppms.extend(find_bbmodel_by_type(producer_dyd_root, model)) + + return gen_ppms + + +def _get_model_templates( + model_path: Path, + template: str, +): + xfmrs = _get_xmfrs_models(model_path, "Zone3") + z3_gen_ppms = _get_generator_models(model_path, template, "Zone3") + z1_gen_ppms = _get_generator_models(model_path, template, "Zone1") producer_curves_txt = _get_model_file_template() curves_names_txt = _get_model_curves_template(xfmrs, z1_gen_ppms, z3_gen_ppms) diff --git a/src/dgcv/model/operating_condition.py b/src/dgcv/model/operating_condition.py index afc1ea0..43b044b 100644 --- a/src/dgcv/model/operating_condition.py +++ b/src/dgcv/model/operating_condition.py @@ -17,6 +17,7 @@ from dgcv.curves.manager import CurvesManager from dgcv.files import manage_files from dgcv.logging.logging import dgcv_logging +import pandas as pd class OperatingCondition: @@ -154,6 +155,29 @@ def __validate( return results + def _check_curves( + self, curves: pd.DataFrame, curves_name: str, review_curves_set: bool + ) -> bool: + measurement_names = self._validator.get_measurement_names() + has_curves = True + if review_curves_set: + if curves.empty: + dgcv_logging.get_logger("Operating Condition").warning( + f"Test without {curves_name} curves file" + ) + has_curves = False + else: + missed_curves = [] + for key in measurement_names: + if key not in curves: + missed_curves.append(key) + has_curves = False + if not has_curves: + dgcv_logging.get_logger("Operating Condition").warning( + f"Test without {curves_name} curve for keys {missed_curves}" + ) + return has_curves + def validate( self, pcs_bm_name: str, @@ -262,46 +286,15 @@ def has_required_curves( bm_name, ) - measurement_names = self._validator.get_measurement_names() - # If the tool has the model, it is assumed that the simulated curves are always available, # if they are not available it is due to a failure in the simulation, this event is # handled differently. - sim_curves = True - if not self._producer.is_dynawo_model(): - if curves["calculated"].empty: - dgcv_logging.get_logger("Operating Condition").warning( - "Test without producer curves file" - ) - sim_curves = False - else: - missed_curves = [] - for key in measurement_names: - if key not in curves["calculated"]: - missed_curves.append(key) - sim_curves = False - if not sim_curves: - dgcv_logging.get_logger("Operating Condition").warning( - f"Test without producer curve for keys {missed_curves}" - ) - - ref_curves = True - if self.__has_reference_curves(): - if curves["reference"].empty: - dgcv_logging.get_logger("Operating Condition").warning( - "Test without reference curves file" - ) - ref_curves = False - else: - missed_curves = [] - for key in measurement_names: - if key not in curves["reference"]: - missed_curves.append(key) - ref_curves = False - if not ref_curves: - dgcv_logging.get_logger("Operating Condition").warning( - f"Test without reference curve for keys {missed_curves}" - ) + sim_curves = self._check_curves( + curves["calculated"], "producer", not self._producer.is_dynawo_model() + ) + ref_curves = self._check_curves( + curves["reference"], "reference", self.__has_reference_curves() + ) if sim_curves and ref_curves: has_curves = 0 From 878dc94876665ae0400721d55b4f5f75ce114f52 Mon Sep 17 00:00:00 2001 From: marcosmc Date: Thu, 19 Dec 2024 10:35:15 +0100 Subject: [PATCH 15/24] reduce complexity --- src/dgcv/curves/manager.py | 64 ++++++++++++------- src/dgcv/dynawo/dynawo.py | 90 ++++++++++++++++----------- src/dgcv/dynawo/file_variables.py | 62 +++++++++++------- src/dgcv/model/operating_condition.py | 2 + tests/dgcv/test_tool.py | 2 - 5 files changed, 135 insertions(+), 85 deletions(-) diff --git a/src/dgcv/curves/manager.py b/src/dgcv/curves/manager.py index 17dcd98..f7e08a6 100644 --- a/src/dgcv/curves/manager.py +++ b/src/dgcv/curves/manager.py @@ -30,6 +30,12 @@ def _get_generators_ini(generators: list, curves: pd.DataFrame) -> list: return gens +def _get_config_value(config, section, option, default=0.0): + if config.has_option(section, option): + return float(config.get(section, option)) + return default + + class CurvesManager(Simulator): def __init__( self, @@ -48,25 +54,21 @@ def __get_generators(self, curves: pd.DataFrame) -> list: self.get_producer().set_generators(generators) return generators - def __obtain_files_curve( + def __get_curves_dataframe( self, working_oc_dir: Path, pcs_bm_name: str, oc_name: str, - curves: Path, + success: bool, is_reference: bool = False, - ): - # Copy base case and producers file - success = manage_files.copy_base_curves_files( - curves, working_oc_dir, get_cfg_oc_name(pcs_bm_name, oc_name) - ) + ) -> tuple[bool, float, float, float, pd.DataFrame]: has_imported_curves = True if success: importer = CurvesImporter(working_oc_dir, get_cfg_oc_name(pcs_bm_name, oc_name)) ( df_imported_curves, - curves_dict, - sim_t_event_end, + _, + _, fs, ) = importer.get_curves_dataframe(self._producer.get_zone()) if df_imported_curves.empty: @@ -77,26 +79,20 @@ def __obtain_files_curve( self._generators = self.__get_generators(df_imported_curves) self._gens = _get_generators_ini(self._generators, df_imported_curves) + sim_t_event_start = _get_config_value( + importer.config, "Curves-Metadata", "sim_t_event_start" + ) + fault_duration = _get_config_value( + importer.config, "Curves-Metadata", "fault_duration" + ) + if fs == 0: + fs = _get_config_value(importer.config, "Curves-Metadata", "frequency_sampling") + if importer.config.has_option("Curves-Metadata", "is_field_measurements"): self._is_field_measurements = bool( importer.config.get("Curves-Metadata", "is_field_measurements") ) - if importer.config.has_option("Curves-Metadata", "sim_t_event_start"): - sim_t_event_start = float( - importer.config.get("Curves-Metadata", "sim_t_event_start") - ) - else: - sim_t_event_start = 0 - - if importer.config.has_option("Curves-Metadata", "fault_duration"): - fault_duration = float(importer.config.get("Curves-Metadata", "fault_duration")) - else: - fault_duration = 0 - - if importer.config.has_option("Curves-Metadata", "frequency_sampling") and fs == 0: - fs = float(importer.config.get("Curves-Metadata", "frequency_sampling")) - generators_imax = {} for key in importer.config["Curves-Metadata"].keys(): if key.endswith("_GEN_MaxInjectedCurrentPu"): @@ -114,6 +110,26 @@ def __obtain_files_curve( self._generators_imax = {} df_imported_curves = pd.DataFrame() + return has_imported_curves, sim_t_event_start, fault_duration, fs, df_imported_curves + + def __obtain_files_curve( + self, + working_oc_dir: Path, + pcs_bm_name: str, + oc_name: str, + curves: Path, + is_reference: bool = False, + ): + # Copy base case and producers file + success = manage_files.copy_base_curves_files( + curves, working_oc_dir, get_cfg_oc_name(pcs_bm_name, oc_name) + ) + has_imported_curves, sim_t_event_start, fault_duration, fs, df_imported_curves = ( + self.__get_curves_dataframe( + working_oc_dir, pcs_bm_name, oc_name, success, is_reference + ) + ) + config_section = get_cfg_oc_name(pcs_bm_name, oc_name) + ".Event" connect_event_to = config.get_value(config_section, "connect_event_to") if config.has_key(config_section, "setpoint_step_value"): diff --git a/src/dgcv/dynawo/dynawo.py b/src/dgcv/dynawo/dynawo.py index 4daf618..8e7f35e 100644 --- a/src/dgcv/dynawo/dynawo.py +++ b/src/dgcv/dynawo/dynawo.py @@ -309,25 +309,9 @@ def _get_modulus(complex_list: list) -> list: return np.abs(complex_list).tolist() -def _create_curves(variable_translations: dict, input_file: Path) -> pd.DataFrame: - """From the curve file generated by the Dynawo dynamic simulator, a new file is created - where the values of the different curves are expressed in the units specified in the file - and/or different curves are added to obtain the required curves. - - Parameters - ---------- - variable_translations: dict - Dictionary with correspondences between tool variables and Dynawo variables - input_file: Path - Curve file created by Dynawo - - Returns - ------- - DataFrame - A DataFrame with the transformed curves - """ - # Get curves file - df_curves_imported = pd.read_csv(input_file, sep=";") +def _translate_curves( + variable_translations: dict, df_curves_imported: pd.DataFrame +) -> pd.DataFrame: column_size = len(df_curves_imported["time"]) cols = list(df_curves_imported.columns) for i in cols: @@ -364,7 +348,30 @@ def _create_curves(variable_translations: dict, input_file: Path) -> pd.DataFram _get_injected_current_curve(column_size, curves_translation) _get_network_frequency_curve(column_size, curves_translation) - df_curves = pd.DataFrame(curves_translation) + return pd.DataFrame(curves_translation) + + +def _create_curves(variable_translations: dict, input_file: Path) -> pd.DataFrame: + """From the curve file generated by the Dynawo dynamic simulator, a new file is created + where the values of the different curves are expressed in the units specified in the file + and/or different curves are added to obtain the required curves. + + Parameters + ---------- + variable_translations: dict + Dictionary with correspondences between tool variables and Dynawo variables + input_file: Path + Curve file created by Dynawo + + Returns + ------- + DataFrame + A DataFrame with the transformed curves + """ + # Get curves file + df_curves_imported = pd.read_csv(input_file, sep=";") + df_curves = _translate_curves(variable_translations, df_curves_imported) + column_size = len(df_curves_imported["time"]) # Calculate PDR Voltage, Power and Current del df_curves["BusPDR_BUS_Voltage"] @@ -398,6 +405,31 @@ def _create_curves(variable_translations: dict, input_file: Path) -> pd.DataFram return pd.DataFrame(curves_dict) +def _trim_curves( + time_values: list, + voltage_values: list, + fault_start: float, + fault_duration: float, +) -> tuple[list, list, list, list]: + pre_idx = 0 + start_idx = 0 + end_idx = -1 + for i in range(len(time_values)): + if time_values[i] - fault_start < -0.0001: + pre_idx = i + elif fault_start - time_values[i] > 0.0001: + start_idx = i + elif time_values[i] - (fault_start + fault_duration) < -0.0001: + end_idx = i + + pre_time_values = time_values[:pre_idx] + pre_voltage_values = voltage_values[:pre_idx] + post_time_values = time_values[start_idx:end_idx] + post_voltage_values = voltage_values[start_idx:end_idx] + + return pre_time_values, post_time_values, pre_voltage_values, post_voltage_values + + def get_dynawo_version( launcher_dwo: Path, ) -> str: @@ -577,21 +609,9 @@ def check_voltage_dip( fault_duration = time_values[-1] - fault_start # trim curves to the fault zone - pre_idx = 0 - start_idx = 0 - end_idx = -1 - for i in range(len(time_values)): - if time_values[i] - fault_start < -0.0001: - pre_idx = i - elif fault_start - time_values[i] > 0.0001: - start_idx = i - elif time_values[i] - (fault_start + fault_duration) < -0.0001: - end_idx = i - - pre_time_values = time_values[:pre_idx] - pre_voltage_values = voltage_values[:pre_idx] - post_time_values = time_values[start_idx:end_idx] - post_voltage_values = voltage_values[start_idx:end_idx] + pre_time_values, post_time_values, pre_voltage_values, post_voltage_values = _trim_curves( + time_values, voltage_values, fault_start, fault_duration + ) # Get the stable value before the failure, if it has been initialized correctly it will be # a flat curve, and the method returns a ValueError, in this case the first point of the diff --git a/src/dgcv/dynawo/file_variables.py b/src/dgcv/dynawo/file_variables.py index 0608e3f..bbb159f 100644 --- a/src/dgcv/dynawo/file_variables.py +++ b/src/dgcv/dynawo/file_variables.py @@ -17,6 +17,41 @@ def __init__( def __obtain_value(self, value_definition: str) -> str: return self._simulator.obtain_value(value_definition) + def __obtain_section_value(self, section: str, key: str, generator_type: str) -> str: + key_type = f"{key}_{generator_type}" + if config.has_key(section, key): + return self.__obtain_value(config.get_value(self._bm_section, key)) + elif config.has_key(section, key_type): + return self.__obtain_value(config.get_value(self._bm_section, key_type)) + + return None + + def __get_value(self, key: str) -> str: + generator_type = generator_variables.get_generator_type( + self._simulator.get_producer().u_nom + ) + + value = self.__obtain_section_value(self._bm_section, key, generator_type) + if value: + return value + + value = self.__obtain_section_value(self._oc_section, key, generator_type) + if value: + return value + + value = self.__obtain_section_value(self._model_section, key, generator_type) + if value: + return value + + value = self.__obtain_section_value(self._event_section, key, generator_type) + if value: + return value + + if config.has_key("Dynawo", key): + value = self.__obtain_value(config.get_value("Dynawo", key)) + + return None + def complete_parameters( self, variables_dict: dict, @@ -31,33 +66,12 @@ def complete_parameters( event_params: dict Event parameters """ - generator_type = generator_variables.get_generator_type( - self._simulator.get_producer().u_nom - ) - for key in variables_dict: - key_type = f"{key}_{generator_type}" if key in self._tool_variables: continue - elif config.has_key(self._bm_section, key): - value = self.__obtain_value(str(config.get_value(self._bm_section, key))) - elif config.has_key(self._bm_section, key_type): - value = self.__obtain_value(str(config.get_value(self._bm_section, key_type))) - elif config.has_key(self._oc_section, key): - value = self.__obtain_value(str(config.get_value(self._oc_section, key))) - elif config.has_key(self._oc_section, key_type): - value = self.__obtain_value(str(config.get_value(self._oc_section, key_type))) - elif config.has_key(self._model_section, key): - value = self.__obtain_value(str(config.get_value(self._model_section, key))) - elif config.has_key(self._model_section, key_type): - value = self.__obtain_value(str(config.get_value(self._model_section, key_type))) - elif config.has_key(self._event_section, key): - value = self.__obtain_value(str(config.get_value(self._event_section, key))) - elif config.has_key(self._event_section, key_type): - value = self.__obtain_value(str(config.get_value(self._event_section, key_type))) - elif config.has_key("Dynawo", key): - value = self.__obtain_value(str(config.get_value("Dynawo", key))) - else: + + value = self.__get_value(key) + if not value: continue if key.startswith("delta_t_"): diff --git a/src/dgcv/model/operating_condition.py b/src/dgcv/model/operating_condition.py index 43b044b..539adac 100644 --- a/src/dgcv/model/operating_condition.py +++ b/src/dgcv/model/operating_condition.py @@ -86,6 +86,8 @@ def __obtain_curve( reference_event_start_time, curves["reference"] = self._manager.obtain_reference_curve( working_oc_dir, pcs_bm_name, self._name, self.__get_reference_curves() ) + else: + curves["reference"] = pd.DataFrame() ( jobs_output_dir, diff --git a/tests/dgcv/test_tool.py b/tests/dgcv/test_tool.py index 98f2382..c8a2ac9 100644 --- a/tests/dgcv/test_tool.py +++ b/tests/dgcv/test_tool.py @@ -50,8 +50,6 @@ def _execute_tool(producer_model, producer_curves, reference_curves): md = ModelValidation(ep) compliance = md.validate(True) - except Exception as e: - compliance = str(e) finally: shutil.rmtree(output_dir) return compliance From 956801cd01fe54087e471228ab30ceabded72667 Mon Sep 17 00:00:00 2001 From: marcosmc Date: Thu, 19 Dec 2024 10:48:13 +0100 Subject: [PATCH 16/24] refactor --- src/dgcv/dynawo/dynawo.py | 69 +++++++++++++++++++++++++++------------ 1 file changed, 48 insertions(+), 21 deletions(-) diff --git a/src/dgcv/dynawo/dynawo.py b/src/dgcv/dynawo/dynawo.py index 8e7f35e..8c3d271 100644 --- a/src/dgcv/dynawo/dynawo.py +++ b/src/dgcv/dynawo/dynawo.py @@ -309,15 +309,41 @@ def _get_modulus(complex_list: list) -> list: return np.abs(complex_list).tolist() +def _apply_sign_convention( + variable_translations: dict, + df_curves_imported: pd.DataFrame, + curves_translation: dict, + column: str, +): + for translated_column in variable_translations[column]: + # Apply the tool sign convention to Dynawo curves + sign = variable_translations[translated_column] + curves_translation[translated_column] = np.multiply( + df_curves_imported[column], sign + ).tolist() + + +def _translate_complex_columns( + variable_translations: dict, + df_curves_imported: pd.DataFrame, + curves_translation: dict, + column: str, + column_size: int, +): + for translated_column in variable_translations[column]: + curves_translation[translated_column[:-2]] = _prepare_complex_column( + column[:-2], + column_size, + df_curves_imported, + translated_column[:-2], + variable_translations, + ) + + def _translate_curves( variable_translations: dict, df_curves_imported: pd.DataFrame ) -> pd.DataFrame: column_size = len(df_curves_imported["time"]) - cols = list(df_curves_imported.columns) - for i in cols: - if i[:7] == "Unnamed": - del df_curves_imported[i] - # Some variables of the tool are modeled in a single parameter of the dynamic model, # to avoid conflicts the Dynawo output does not contain duplicate curves, so the tool # must manage duplicate curves to always have the expected number of curves. @@ -327,23 +353,19 @@ def _translate_curves( if column in df_curves_imported.columns: if column.endswith("_im"): continue + + if column.endswith("_re"): + _translate_complex_columns( + variable_translations, + df_curves_imported, + curves_translation, + column, + column_size, + ) else: - if column.endswith("_re"): - for translated_column in variable_translations[column]: - curves_translation[translated_column[:-2]] = _prepare_complex_column( - column[:-2], - column_size, - df_curves_imported, - translated_column[:-2], - variable_translations, - ) - else: - for translated_column in variable_translations[column]: - # Apply the tool sign convention to Dynawo curves - sign = variable_translations[translated_column] - curves_translation[translated_column] = np.multiply( - df_curves_imported[column], sign - ).tolist() + _apply_sign_convention( + variable_translations, df_curves_imported, curves_translation, column + ) _get_injected_current_curve(column_size, curves_translation) _get_network_frequency_curve(column_size, curves_translation) @@ -370,6 +392,11 @@ def _create_curves(variable_translations: dict, input_file: Path) -> pd.DataFram """ # Get curves file df_curves_imported = pd.read_csv(input_file, sep=";") + cols = list(df_curves_imported.columns) + for i in cols: + if i[:7] == "Unnamed": + del df_curves_imported[i] + df_curves = _translate_curves(variable_translations, df_curves_imported) column_size = len(df_curves_imported["time"]) From effe92ff85f0446bd11e4beaedc01dee380e0acc Mon Sep 17 00:00:00 2001 From: marcosmc Date: Thu, 19 Dec 2024 10:59:52 +0100 Subject: [PATCH 17/24] reduce complexity, activate CI test --- .github/workflows/dev-ci.yml | 4 +- src/dgcv/launchers.py | 274 ++++++++++++++++++++--------------- 2 files changed, 155 insertions(+), 123 deletions(-) diff --git a/.github/workflows/dev-ci.yml b/.github/workflows/dev-ci.yml index 7b1bd0b..8cf7dff 100644 --- a/.github/workflows/dev-ci.yml +++ b/.github/workflows/dev-ci.yml @@ -39,14 +39,12 @@ jobs: shell: bash run: python -m pip install dist/*.whl - # Pending to be added in the near future: - # * flake8 src --count --exit-zero --max-complexity=10 - name: Linting with flake8 run: | # stop the build if there are Python syntax errors or undefined names flake8 src --count --show-source --statistics # exit-zero treats all errors as warnings. The GitHub editor is 99 chars wide - flake8 src --count --exit-zero --statistics + flake8 src --count --exit-zero --statistics --max-complexity=10 # - name: Run tests # run: pytest diff --git a/src/dgcv/launchers.py b/src/dgcv/launchers.py index 21b4109..487ab5f 100644 --- a/src/dgcv/launchers.py +++ b/src/dgcv/launchers.py @@ -320,10 +320,7 @@ def _subcomands_parser() -> argparse.ArgumentParser: return parser -def dgcv() -> None: - p = _subcomands_parser() - args = p.parse_args() - +def _get_dwo_launcher_name(p: argparse.ArgumentParser, args: argparse.Namespace) -> str: dwo_launcher_name = None if "dwo_launcher" in args: dwo_launcher_name = args.dwo_launcher @@ -341,146 +338,183 @@ def dgcv() -> None: ) p.print_help() - if args.command is None: - p.error("Please provide an additional command.") - p.print_help() + return dwo_launcher_name - if args.command != "anonymize": - dwo_launcher = Path(shutil.which(dwo_launcher_name)).resolve() - _check_launchers(dwo_launcher) - initialization.init(dwo_launcher, args.debug) - if args.command == "validate": - user_pcs = args.pcs - if ( - args.producer_model is None and args.producer_curves is None - ) or args.reference_curves is None: - producer_model = None - producer_curves = None - reference_curves = None - output_dir = None - else: - if args.producer_model is None: - producer_model = None - producer_curves = Path(args.producer_curves) - output_dir = ( - producer_curves.parent / "Results" - if args.output_dir is None - else Path(args.output_dir) - ) - elif args.producer_curves is None: - producer_model = Path(args.producer_model) - producer_curves = None - output_dir = ( - producer_model.parent / "Results" - if args.output_dir is None - else Path(args.output_dir) - ) - reference_curves = Path(args.reference_curves) - - if (not producer_model and not producer_curves) or not reference_curves: - p.error("Missing arguments.\nTry 'dgcv validate -h' for more information.") - p.print_help() - return - - r = _model_validation( - dwo_launcher, - output_dir, - producer_model, - producer_curves, - reference_curves, - user_pcs, - args.only_dtr, +def _get_dwo_launcher(args: argparse.Namespace, dwo_launcher_name: str) -> Path: + dwo_launcher = Path(shutil.which(dwo_launcher_name)).resolve() + _check_launchers(dwo_launcher) + initialization.init(dwo_launcher, args.debug) + + +def _execute_anonymize( + p: argparse.ArgumentParser, args: argparse.Namespace, dwo_launcher: Path +) -> None: + if args.producer_curves is None and args.results_path is None: + p.error( + "Missing arguments.\nFor the anonymize command, the producer_curves or the " + "results_path argument is required." ) - if r != 0: - p.error( - "It is not possible to find the producer model or the producer curves " - "to validate. You MUST provide both." - ) - p.print_help() + p.print_help() - elif args.command == "generate": + if args.producer_curves is not None: + producer_curves = Path(args.producer_curves) + else: + producer_curves = None + + if args.results_path is not None: + results_path = Path(args.results_path) + else: + results_path = None + + if args.output_dir is None: + output_dir = Path(producer_curves.parent / "Anonymize_Results") + else: output_dir = Path(args.output_dir) - topology = args.topology - validation = args.validation - r = _generate_input(dwo_launcher, output_dir, topology, validation) - if r != 0: - p.print_help() + anonymizer.anonymize(output_dir, args.noisestd, args.frequency, results_path, producer_curves) - elif args.command == "compile": - if args.dynawo_model is None: - dynawo_model = None - else: - dynawo_model = args.dynawo_model - r = _compile_dynawo_models(dwo_launcher, dynawo_model, args.force) - if r != 0: - p.print_help() +def _execute_compile( + p: argparse.ArgumentParser, args: argparse.Namespace, dwo_launcher: Path +) -> None: + if args.dynawo_model is None: + dynawo_model = None + else: + dynawo_model = args.dynawo_model - elif args.command == "performance": - user_pcs = args.pcs + r = _compile_dynawo_models(dwo_launcher, dynawo_model, args.force) + if r != 0: + p.print_help() + + +def _execute_generate( + p: argparse.ArgumentParser, args: argparse.Namespace, dwo_launcher: Path +) -> None: + output_dir = Path(args.output_dir) + topology = args.topology + validation = args.validation + + r = _generate_input(dwo_launcher, output_dir, topology, validation) + if r != 0: + p.print_help() + + +def _execute_performance( + p: argparse.ArgumentParser, args: argparse.Namespace, dwo_launcher: Path +) -> None: + user_pcs = args.pcs + if args.producer_model is None: + producer_model = None + else: + producer_model = Path(args.producer_model) + output_dir = ( + producer_model.parent / "Results" if args.output_dir is None else Path(args.output_dir) + ) + if args.producer_curves is None: + producer_curves = None + else: + producer_curves = Path(args.producer_curves) + output_dir = ( + producer_curves.parent / "Results" + if args.output_dir is None + else Path(args.output_dir) + ) + + if not producer_model and not producer_curves: + p.error("Missing arguments.\nTry 'dgcv performance -h' for more information.") + p.print_help() + return + + r = _performance_verification( + dwo_launcher, + output_dir, + producer_model, + producer_curves, + user_pcs, + args.only_dtr, + ) + if r != 0: + p.error("It is not possible to find the producer model or the producer curves to validate") + p.print_help() + + +def _execute_validate( + p: argparse.ArgumentParser, args: argparse.Namespace, dwo_launcher: Path +) -> None: + user_pcs = args.pcs + if ( + args.producer_model is None and args.producer_curves is None + ) or args.reference_curves is None: + producer_model = None + producer_curves = None + reference_curves = None + output_dir = None + else: if args.producer_model is None: producer_model = None - else: - producer_model = Path(args.producer_model) + producer_curves = Path(args.producer_curves) output_dir = ( - producer_model.parent / "Results" + producer_curves.parent / "Results" if args.output_dir is None else Path(args.output_dir) ) - if args.producer_curves is None: + elif args.producer_curves is None: + producer_model = Path(args.producer_model) producer_curves = None - else: - producer_curves = Path(args.producer_curves) output_dir = ( - producer_curves.parent / "Results" + producer_model.parent / "Results" if args.output_dir is None else Path(args.output_dir) ) + reference_curves = Path(args.reference_curves) - if not producer_model and not producer_curves: - p.error("Missing arguments.\nTry 'dgcv performance -h' for more information.") - p.print_help() - return - - r = _performance_verification( - dwo_launcher, - output_dir, - producer_model, - producer_curves, - user_pcs, - args.only_dtr, + if (not producer_model and not producer_curves) or not reference_curves: + p.error("Missing arguments.\nTry 'dgcv validate -h' for more information.") + p.print_help() + return + + r = _model_validation( + dwo_launcher, + output_dir, + producer_model, + producer_curves, + reference_curves, + user_pcs, + args.only_dtr, + ) + if r != 0: + p.error( + "It is not possible to find the producer model or the producer curves " + "to validate. You MUST provide both." ) - if r != 0: - p.error( - "It is not possible to find the producer model or the producer curves to validate" - ) - p.print_help() + p.print_help() - elif args.command == "anonymize": - if args.producer_curves is None and args.results_path is None: - p.error( - "Missing arguments.\nFor the anonymize command, the producer_curves or the " - "results_path argument is required." - ) - p.print_help() - if args.producer_curves is not None: - producer_curves = Path(args.producer_curves) - else: - producer_curves = None +def dgcv() -> None: + p = _subcomands_parser() + args = p.parse_args() - if args.results_path is not None: - results_path = Path(args.results_path) - else: - results_path = None + dwo_launcher_name = _get_dwo_launcher_name(p, args) - if args.output_dir is None: - output_dir = Path(producer_curves.parent / "Anonymize_Results") - else: - output_dir = Path(args.output_dir) + if args.command is None: + p.error("Please provide an additional command.") + p.print_help() - anonymizer.anonymize( - output_dir, args.noisestd, args.frequency, results_path, producer_curves - ) + if args.command != "anonymize": + dwo_launcher = _get_dwo_launcher(args, dwo_launcher_name) + + if args.command == "validate": + _execute_validate(p, args, dwo_launcher) + + elif args.command == "generate": + _execute_generate(p, args, dwo_launcher) + + elif args.command == "compile": + _execute_compile(p, args, dwo_launcher) + + elif args.command == "performance": + _execute_performance(p, args, dwo_launcher) + + elif args.command == "anonymize": + _execute_anonymize(p, args, dwo_launcher) From a490fd94fa7517638dda62716c7ff2b3b2086325 Mon Sep 17 00:00:00 2001 From: marcosmc Date: Thu, 19 Dec 2024 11:33:22 +0100 Subject: [PATCH 18/24] Filter alterntives --- src/dgcv/sigpro/sigpro.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/dgcv/sigpro/sigpro.py b/src/dgcv/sigpro/sigpro.py index 8906b22..62ee727 100644 --- a/src/dgcv/sigpro/sigpro.py +++ b/src/dgcv/sigpro/sigpro.py @@ -2,7 +2,6 @@ import pandas as pd from scipy import signal from scipy.interpolate import PchipInterpolator -from scipy.signal import bessel, filtfilt from dgcv.configuration.cfg import config @@ -41,11 +40,14 @@ def resample(x, t, fs=1000): return t_r, np.interp(t_r, t, x) -def lowpass_filter(x, cutoff=15, fs=1000): +def lowpass_filter(x, cutoff=15, fs=1000, filter="bessel"): # The filter type should have minimal ringing, so bessel is preferred - # b, a = butter(5, cutoff, fs=fs, btype="low", analog=False) - b, a = bessel(2, cutoff, fs=fs, btype="low", analog=False) - return filtfilt(b, a, x) + # b, a = signal.butter(5, cutoff, fs=fs, btype="low", analog=False) + if filter == "bessel": + b, a = signal.bessel(2, cutoff, fs=fs, btype="low", analog=False) + if filter == "butter": + b, a = signal.butter(5, cutoff, fs=fs, btype="low", analog=False) + return signal.filtfilt(b, a, x) def ensure_rms_signals(curves, fs): @@ -93,7 +95,7 @@ def resampling_signal(curves, fs=1000): return pd.DataFrame.from_dict(resampled_curve_dict, orient="columns") -def lowpass_signal(curves, cutoff=15, fs=1000): +def lowpass_signal(curves, cutoff=15, fs=1000, filter="bessel"): lowpass_curve_dict = {} for col in curves.columns: if "time" in col: @@ -107,7 +109,7 @@ def lowpass_signal(curves, cutoff=15, fs=1000): ): c_filt = c else: - c_filt = lowpass_filter(c, cutoff, fs) + c_filt = lowpass_filter(c, cutoff, fs, filter) # For avoiding overflows in PChipInterpolator c_filt[abs(c_filt) < ZERO_THRESHOLD] = 0.0 From f9b1330f6431a2a373da71e93dd28790e32a829a Mon Sep 17 00:00:00 2001 From: marcosmc Date: Thu, 19 Dec 2024 12:38:52 +0100 Subject: [PATCH 19/24] refactor curves managment --- src/dgcv/configuration/defaultConfig.ini | 4 +- src/dgcv/core/execution_parameters.py | 10 ++-- src/dgcv/core/initialization.py | 10 ++-- .../core/{simulator.py => producer_curves.py} | 5 +- .../{model_validation.py => validation.py} | 30 +++++----- src/dgcv/curves/{manager.py => curves.py} | 6 +- src/dgcv/dynawo/{simulator.py => curves.py} | 10 ++-- src/dgcv/dynawo/dyd.py | 10 ++-- src/dgcv/dynawo/file_variables.py | 14 +++-- src/dgcv/dynawo/jobs.py | 8 +-- src/dgcv/dynawo/par.py | 6 +- src/dgcv/dynawo/table.py | 6 +- src/dgcv/files/dynawo_curves_file.py | 9 +-- src/dgcv/launchers.py | 6 +- src/dgcv/logging/logger.py | 4 +- src/dgcv/model/benchmark.py | 20 +++---- src/dgcv/model/operating_condition.py | 47 ++++++++------- src/dgcv/model/producer.py | 50 ++++++++-------- src/dgcv/validation/model.py | 2 +- src/dgcv/validation/performance.py | 2 +- tests/dgcv/test_tool.py | 60 +++++++++++-------- 21 files changed, 169 insertions(+), 150 deletions(-) rename src/dgcv/core/{simulator.py => producer_curves.py} (99%) rename src/dgcv/core/{model_validation.py => validation.py} (87%) rename src/dgcv/curves/{manager.py => curves.py} (98%) rename src/dgcv/dynawo/{simulator.py => curves.py} (99%) diff --git a/src/dgcv/configuration/defaultConfig.ini b/src/dgcv/configuration/defaultConfig.ini index bbfc19e..084be0d 100644 --- a/src/dgcv/configuration/defaultConfig.ini +++ b/src/dgcv/configuration/defaultConfig.ini @@ -442,8 +442,8 @@ MqG = 1 [CurvesVariables] SM = RotorSpeedPu,InternalAngle,AVRSetpointPu,MagnitudeControlledByAVRPu,NetworkFrequencyPu PPM = InjectedActiveCurrent,InjectedReactiveCurrent,AVRSetpointPu,MagnitudeControlledByAVRPu,NetworkFrequencyPu -ModelValidationZ3 = InjectedActiveCurrent,InjectedReactiveCurrent,AVRSetpointPu,MagnitudeControlledByAVRPu,NetworkFrequencyPu -ModelValidationZ1 = ActivePowerSetpointPu,ReactivePowerSetpointPu,AVRSetpointPu,InjectedActiveCurrent,InjectedReactiveCurrent,MagnitudeControlledByAVRPu +ValidationZ3 = InjectedActiveCurrent,InjectedReactiveCurrent,AVRSetpointPu,MagnitudeControlledByAVRPu,NetworkFrequencyPu +ValidationZ1 = ActivePowerSetpointPu,ReactivePowerSetpointPu,AVRSetpointPu,InjectedActiveCurrent,InjectedReactiveCurrent,MagnitudeControlledByAVRPu [Debug] # Modify the time range to include t0 in the showed range diff --git a/src/dgcv/core/execution_parameters.py b/src/dgcv/core/execution_parameters.py index 21e4137..a32048f 100644 --- a/src/dgcv/core/execution_parameters.py +++ b/src/dgcv/core/execution_parameters.py @@ -28,7 +28,7 @@ class Parameters: Dynawo launcher producer_model: Path Producer Model directory - producer_curves: Path + producer_curves_path: Path Producer curves directory selected_pcs: str Individual PCS to validate @@ -46,8 +46,8 @@ def __init__( self, launcher_dwo: Path, producer_model: Path, - producer_curves: Path, - reference_curves: Path, + producer_curves_path: Path, + reference_curves_path: Path, selected_pcs: str, output_dir: Path, only_dtr: bool, @@ -60,7 +60,9 @@ def __init__( self._only_dtr = only_dtr # Read producer inputs - self._producer = Producer(producer_model, producer_curves, reference_curves, sim_type) + self._producer = Producer( + producer_model, producer_curves_path, reference_curves_path, sim_type + ) tmp_path = config.get_value("Global", "temporal_path") username = getpass.getuser() diff --git a/src/dgcv/core/initialization.py b/src/dgcv/core/initialization.py index 59c8685..5343078 100644 --- a/src/dgcv/core/initialization.py +++ b/src/dgcv/core/initialization.py @@ -4,7 +4,7 @@ from pathlib import Path from dgcv.configuration.cfg import config -from dgcv.core.model_validation import ModelValidation +from dgcv.core.validation import Validation from dgcv.dynawo.prepare_tool import precompile from dgcv.files import manage_files from dgcv.logging.logging import dgcv_logging @@ -190,24 +190,24 @@ def init(launcher_dwo: Path, debug: bool) -> None: manage_files.create_dir(config.get_config_dir()) manage_files.create_config_file( - ModelValidation.get_project_path() / "configuration" / "config.ini", + Validation.get_project_path() / "configuration" / "config.ini", config.get_config_dir() / "config.ini_BASIC", ) manage_files.create_config_file( - ModelValidation.get_project_path() / "configuration" / "defaultConfig.ini", + Validation.get_project_path() / "configuration" / "defaultConfig.ini", config.get_config_dir() / "config.ini_ADVANCED", ) if not _is_valid_config_file(config.get_config_dir() / "config.ini"): if not (config.get_config_dir() / "config.ini").is_file(): manage_files.create_config_file( - ModelValidation.get_project_path() / "configuration" / "config.ini", + Validation.get_project_path() / "configuration" / "config.ini", config.get_config_dir() / "config.ini", ) else: _check_config_file( - ModelValidation.get_project_path() / "configuration" / "defaultConfig.ini", + Validation.get_project_path() / "configuration" / "defaultConfig.ini", config.get_config_dir() / "config.ini", ) diff --git a/src/dgcv/core/simulator.py b/src/dgcv/core/producer_curves.py similarity index 99% rename from src/dgcv/core/simulator.py rename to src/dgcv/core/producer_curves.py index 05bce70..290a17e 100644 --- a/src/dgcv/core/simulator.py +++ b/src/dgcv/core/producer_curves.py @@ -9,13 +9,14 @@ # from pathlib import Path +import pandas as pd + from dgcv.configuration.cfg import config from dgcv.core.execution_parameters import Parameters from dgcv.core.global_variables import CASE_SEPARATOR from dgcv.core.validator import Disconnection_Model from dgcv.electrical.generator_variables import generator_variables from dgcv.model.producer import Producer -import pandas as pd def get_cfg_oc_name(pcs_bm_name: str, oc_name: str) -> str: @@ -24,7 +25,7 @@ def get_cfg_oc_name(pcs_bm_name: str, oc_name: str) -> str: return pcs_bm_name + CASE_SEPARATOR + oc_name -class Simulator: +class ProducerCurves: def __init__( self, parameters: Parameters, diff --git a/src/dgcv/core/model_validation.py b/src/dgcv/core/validation.py similarity index 87% rename from src/dgcv/core/model_validation.py rename to src/dgcv/core/validation.py index 48bcd13..4e2ffd9 100644 --- a/src/dgcv/core/model_validation.py +++ b/src/dgcv/core/validation.py @@ -35,17 +35,17 @@ def _open_document(file: Path): if os.name == "nt": - dgcv_logging.get_logger("ModelValidation").info(f"Opening the report: {file}") + dgcv_logging.get_logger("Validation").info(f"Opening the report: {file}") subprocess.run(["start", file], shell=True) else: if shutil.which("open") and os.environ.get("DISPLAY"): - dgcv_logging.get_logger("ModelValidation").info(f"Opening the report: {file}") + dgcv_logging.get_logger("Validation").info(f"Opening the report: {file}") subprocess.run(["open", file], check=True) else: - dgcv_logging.get_logger("ModelValidation").info(f"Report saved in: {file}") + dgcv_logging.get_logger("Validation").info(f"Report saved in: {file}") -class ModelValidation: +class Validation: """Validation of producer inputs. There are two types of validations, electrical performance and model validation. Additionally, the electrical performance differs between the synchronous generator-type @@ -75,7 +75,7 @@ def __init__( validation_pcs.add(parameters.get_selected_pcs()) if parameters.get_sim_type() == ELECTRIC_PERFORMANCE_SM: - dgcv_logging.get_logger("ModelValidation").info( + dgcv_logging.get_logger("Validation").info( "Electric Performance Verification for Synchronous Machines" ) self.__get_validation_pcs( @@ -83,7 +83,7 @@ def __init__( ) elif parameters.get_sim_type() == ELECTRIC_PERFORMANCE_PPM: - dgcv_logging.get_logger("ModelValidation").info( + dgcv_logging.get_logger("Validation").info( "Electric Performance Verification for Power Park Modules" ) self.__get_validation_pcs( @@ -91,7 +91,7 @@ def __init__( ) elif parameters.get_sim_type() == ELECTRIC_PERFORMANCE_BESS: - dgcv_logging.get_logger("ModelValidation").info( + dgcv_logging.get_logger("Validation").info( "Electric Performance Verification for Storage" ) self.__get_validation_pcs( @@ -99,13 +99,13 @@ def __init__( ) elif parameters.get_sim_type() == MODEL_VALIDATION_PPM: - dgcv_logging.get_logger("ModelValidation").info( + dgcv_logging.get_logger("Validation").info( "DGCV Model Validation for Power Park Modules" ) self.__get_validation_pcs(validation_pcs, "model_ppm_validation_pcs", "model/PPM") elif parameters.get_sim_type() == MODEL_VALIDATION_BESS: - dgcv_logging.get_logger("ModelValidation").info("DGCV Model Validation for Storage") + dgcv_logging.get_logger("Validation").info("DGCV Model Validation for Storage") self.__get_validation_pcs(validation_pcs, "model_bess_validation_pcs", "model/BESS") self._validation_pcs = validation_pcs @@ -151,7 +151,7 @@ def __initialize_working_environment(self) -> None: def __create_report(self, summary_list: list, report_results: dict) -> None: """Create the full report.""" sorted_summary_list = sorted(summary_list, key=attrgetter("id", "zone")) - dgcv_logging.get_logger("ModelValidation").debug(f"Sorted summary {sorted_summary_list}") + dgcv_logging.get_logger("Validation").debug(f"Sorted summary {sorted_summary_list}") try: report.create_pdf( sorted_summary_list, @@ -161,9 +161,9 @@ def __create_report(self, summary_list: list, report_results: dict) -> None: ) except (LatexReportException, FileNotFoundError, IOError, ValueError) as e: if dgcv_logging.getEffectiveLevel() == logging.DEBUG: - dgcv_logging.get_logger("ModelValidation").exception(f"Aborted execution. {e}") + dgcv_logging.get_logger("Validation").exception(f"Aborted execution. {e}") else: - dgcv_logging.get_logger("ModelValidation").error(f"Aborted execution. {e}") + dgcv_logging.get_logger("Validation").error(f"Aborted execution. {e}") exit(1) for pcs_results in report_results.values(): @@ -211,7 +211,7 @@ def validate(self, is_test_validation: bool = False) -> list: for pcs in self._pcs_list: try: if not pcs.is_valid(): - dgcv_logging.get_logger("ModelValidation").error( + dgcv_logging.get_logger("Validation").error( f"{pcs.get_name()} is not a valid PCS" ) continue @@ -226,9 +226,9 @@ def validate(self, is_test_validation: bool = False) -> list: report_results[pcs.get_name()] = pcs_results except (LatexReportException, FileNotFoundError, IOError, ValueError) as e: if dgcv_logging.getEffectiveLevel() == logging.DEBUG: - dgcv_logging.get_logger("ModelValidation").exception(f"Aborted execution. {e}") + dgcv_logging.get_logger("Validation").exception(f"Aborted execution. {e}") else: - dgcv_logging.get_logger("ModelValidation").error(f"Aborted execution. {e}") + dgcv_logging.get_logger("Validation").error(f"Aborted execution. {e}") exit(1) # Create the pcs report diff --git a/src/dgcv/curves/manager.py b/src/dgcv/curves/curves.py similarity index 98% rename from src/dgcv/curves/manager.py rename to src/dgcv/curves/curves.py index f7e08a6..c88f6bb 100644 --- a/src/dgcv/curves/manager.py +++ b/src/dgcv/curves/curves.py @@ -13,7 +13,7 @@ from dgcv.configuration.cfg import config from dgcv.core.execution_parameters import Parameters -from dgcv.core.simulator import Simulator, get_cfg_oc_name +from dgcv.core.producer_curves import ProducerCurves, get_cfg_oc_name from dgcv.core.validator import Disconnection_Model from dgcv.curves.importer import CurvesImporter from dgcv.files import manage_files @@ -36,7 +36,7 @@ def _get_config_value(config, section, option, default=0.0): return default -class CurvesManager(Simulator): +class ImportedCurves(ProducerCurves): def __init__( self, parameters: Parameters, @@ -240,7 +240,7 @@ def obtain_simulated_curve( has_imported_curves, curves, ) = self.__obtain_files_curve( - working_oc_dir, pcs_bm_name, oc_name, self.get_producer().get_producer_curves() + working_oc_dir, pcs_bm_name, oc_name, self.get_producer().get_producer_curves_path() ) return ( diff --git a/src/dgcv/dynawo/simulator.py b/src/dgcv/dynawo/curves.py similarity index 99% rename from src/dgcv/dynawo/simulator.py rename to src/dgcv/dynawo/curves.py index 6b929fc..0051b77 100644 --- a/src/dgcv/dynawo/simulator.py +++ b/src/dgcv/dynawo/curves.py @@ -16,7 +16,7 @@ from dgcv.configuration.cfg import config from dgcv.core.execution_parameters import Parameters -from dgcv.core.simulator import Simulator, get_cfg_oc_name +from dgcv.core.producer_curves import ProducerCurves, get_cfg_oc_name from dgcv.core.validator import Disconnection_Model from dgcv.dynawo import dynawo from dgcv.dynawo.dyd import DydFile @@ -41,7 +41,7 @@ from dgcv.validation import common, sanity_checks -class DynawoSimulator(Simulator): +class DynawoCurves(ProducerCurves): def __init__( self, parameters: Parameters, @@ -71,11 +71,11 @@ def __init__( sanity_checks.check_simulation_duration(self.get_simulation_duration()) logging.setLoggerClass(SimulationLogger) - self._logger = logging.getLogger("Simulator") + self._logger = logging.getLogger("ProducerCurves") def __log(self, message: str): self._logger.info(message) - dgcv_logging.get_logger("Simulator").debug(message) + dgcv_logging.get_logger("ProducerCurves").debug(message) def __prepare_oc_validation( self, @@ -234,7 +234,7 @@ def __complete_model( oc_name, ) if reference_event_start_time and event_params["start_time"] != reference_event_start_time: - dgcv_logging.get_logger("Dynawo Simulator").warning( + dgcv_logging.get_logger("Dynawo ProducerCurves").warning( f"The simulation will use the 'sim_t_event_start' value present in the Reference " f"Curves ({reference_event_start_time}), instead of the value configured " f"({event_params['start_time']})." diff --git a/src/dgcv/dynawo/dyd.py b/src/dgcv/dynawo/dyd.py index bff7d30..dc9a7e3 100644 --- a/src/dgcv/dynawo/dyd.py +++ b/src/dgcv/dynawo/dyd.py @@ -9,21 +9,21 @@ # from pathlib import Path +from dgcv.dynawo.curves import ProducerCurves from dgcv.dynawo.file_variables import FileVariables -from dgcv.dynawo.simulator import Simulator from dgcv.dynawo.translator import dynawo_translator from dgcv.files import replace_placeholders class DydFile(FileVariables): - def __init__(self, simulator: Simulator, bm_section: str, oc_section: str): + def __init__(self, producer_curves: ProducerCurves, bm_section: str, oc_section: str): tool_variables = [ "generator_id", "connection_event", ] super().__init__( tool_variables, - simulator, + producer_curves, bm_section, oc_section, ) @@ -42,9 +42,9 @@ def complete_file(self, working_oc_dir: Path, event_params: dict) -> None: if event_params["connect_to"]: connect_event_to = dynawo_translator.get_dynawo_variable( - self._simulator.get_producer().generators[0].lib, event_params["connect_to"] + self._producer_curves.get_producer().generators[0].lib, event_params["connect_to"] ) - variables_dict["generator_id"] = self._simulator.get_producer().generators[0].id + variables_dict["generator_id"] = self._producer_curves.get_producer().generators[0].id variables_dict["connection_event"] = connect_event_to self.complete_parameters(variables_dict, event_params) diff --git a/src/dgcv/dynawo/file_variables.py b/src/dgcv/dynawo/file_variables.py index bbb159f..9ac677c 100644 --- a/src/dgcv/dynawo/file_variables.py +++ b/src/dgcv/dynawo/file_variables.py @@ -1,13 +1,17 @@ from dgcv.configuration.cfg import config -from dgcv.core.simulator import Simulator +from dgcv.core.producer_curves import ProducerCurves from dgcv.electrical.generator_variables import generator_variables class FileVariables: def __init__( - self, tool_variables: list, simulator: Simulator, bm_section: str, oc_section: str + self, + tool_variables: list, + producer_curves: ProducerCurves, + bm_section: str, + oc_section: str, ): - self._simulator = simulator + self._producer_curves = producer_curves self._bm_section = bm_section self._oc_section = oc_section self._model_section = f"{bm_section}.{oc_section}.Model" @@ -15,7 +19,7 @@ def __init__( self._tool_variables = tool_variables def __obtain_value(self, value_definition: str) -> str: - return self._simulator.obtain_value(value_definition) + return self._producer_curves.obtain_value(value_definition) def __obtain_section_value(self, section: str, key: str, generator_type: str) -> str: key_type = f"{key}_{generator_type}" @@ -28,7 +32,7 @@ def __obtain_section_value(self, section: str, key: str, generator_type: str) -> def __get_value(self, key: str) -> str: generator_type = generator_variables.get_generator_type( - self._simulator.get_producer().u_nom + self._producer_curves.get_producer().u_nom ) value = self.__obtain_section_value(self._bm_section, key, generator_type) diff --git a/src/dgcv/dynawo/jobs.py b/src/dgcv/dynawo/jobs.py index 6a514e5..c88c19e 100644 --- a/src/dgcv/dynawo/jobs.py +++ b/src/dgcv/dynawo/jobs.py @@ -10,13 +10,13 @@ from pathlib import Path from dgcv.configuration.cfg import config +from dgcv.dynawo.curves import ProducerCurves from dgcv.dynawo.file_variables import FileVariables -from dgcv.dynawo.simulator import Simulator from dgcv.files import replace_placeholders class JobsFile(FileVariables): - def __init__(self, simulator: Simulator, bm_section: str, oc_section: str): + def __init__(self, producer_curves: ProducerCurves, bm_section: str, oc_section: str): tool_variables = [ "solver_lib", "solver_id", @@ -25,7 +25,7 @@ def __init__(self, simulator: Simulator, bm_section: str, oc_section: str): ] super().__init__( tool_variables, - simulator, + producer_curves, bm_section, oc_section, ) @@ -46,7 +46,7 @@ def complete_file(self, working_oc_dir: Path, event_params: dict) -> None: variables_dict["solver_id"] = "IDA" variables_dict["dgcv_ddb_path"] = config.get_config_dir() / "ddb" - variables_dict["producer_dyd"] = self._simulator.get_producer().get_producer_dyd().name + variables_dict["producer_dyd"] = self._producer_curves.get_producer().get_producer_dyd().name self.complete_parameters(variables_dict, event_params) diff --git a/src/dgcv/dynawo/par.py b/src/dgcv/dynawo/par.py index 72a7f9a..bbf4eb5 100644 --- a/src/dgcv/dynawo/par.py +++ b/src/dgcv/dynawo/par.py @@ -9,14 +9,14 @@ # from pathlib import Path +from dgcv.dynawo.curves import ProducerCurves from dgcv.dynawo.file_variables import FileVariables -from dgcv.dynawo.simulator import Simulator from dgcv.files import replace_placeholders from dgcv.model.parameters import Gen_init class ParFile(FileVariables): - def __init__(self, simulator: Simulator, bm_section: str, oc_section: str): + def __init__(self, producer_curves: ProducerCurves, bm_section: str, oc_section: str): tool_variables = [ "line_XPu", "line_RPu", @@ -32,7 +32,7 @@ def __init__(self, simulator: Simulator, bm_section: str, oc_section: str): ] super().__init__( tool_variables, - simulator, + producer_curves, bm_section, oc_section, ) diff --git a/src/dgcv/dynawo/table.py b/src/dgcv/dynawo/table.py index 1579145..71d515d 100644 --- a/src/dgcv/dynawo/table.py +++ b/src/dgcv/dynawo/table.py @@ -9,14 +9,14 @@ # from pathlib import Path +from dgcv.dynawo.curves import ProducerCurves from dgcv.dynawo.file_variables import FileVariables -from dgcv.dynawo.simulator import Simulator from dgcv.files import replace_placeholders from dgcv.model.parameters import Gen_init class TableFile(FileVariables): - def __init__(self, simulator: Simulator, bm_section: str, oc_section: str): + def __init__(self, producer_curves: ProducerCurves, bm_section: str, oc_section: str): tool_variables = [ "start_event", "end_event", @@ -26,7 +26,7 @@ def __init__(self, simulator: Simulator, bm_section: str, oc_section: str): ] super().__init__( tool_variables, - simulator, + producer_curves, bm_section, oc_section, ) diff --git a/src/dgcv/files/dynawo_curves_file.py b/src/dgcv/files/dynawo_curves_file.py index b2385b8..f6d60c4 100644 --- a/src/dgcv/files/dynawo_curves_file.py +++ b/src/dgcv/files/dynawo_curves_file.py @@ -12,10 +12,7 @@ from lxml import etree from dgcv.configuration.cfg import config -from dgcv.core.global_variables import ( - ELECTRIC_PERFORMANCE_SM, - MODEL_VALIDATION_PPM, -) +from dgcv.core.global_variables import ELECTRIC_PERFORMANCE_SM, MODEL_VALIDATION_PPM from dgcv.dynawo.translator import dynawo_translator @@ -180,9 +177,9 @@ def _add_model_validation_curves( curves_dict: dict, ) -> None: if zone == 3: - generator_variables = config.get_list("CurvesVariables", "ModelValidationZ3") + generator_variables = config.get_list("CurvesVariables", "ValidationZ3") elif zone == 1: - generator_variables = config.get_list("CurvesVariables", "ModelValidationZ1") + generator_variables = config.get_list("CurvesVariables", "ValidationZ1") else: generator_variables = [] _add_generators_curves(curves_root, generators, generator_variables, control_mode, curves_dict) diff --git a/src/dgcv/launchers.py b/src/dgcv/launchers.py index 487ab5f..bc7dca3 100644 --- a/src/dgcv/launchers.py +++ b/src/dgcv/launchers.py @@ -16,7 +16,7 @@ from dgcv.core import initialization from dgcv.core.execution_parameters import Parameters from dgcv.core.input_template import create_input_template -from dgcv.core.model_validation import ModelValidation +from dgcv.core.validation import Validation from dgcv.curves import anonymizer from dgcv.dynawo import prepare_tool from dgcv.logging.logging import dgcv_logging @@ -54,7 +54,7 @@ def _performance_verification( ) if ep.is_valid(): - md = ModelValidation( + md = Validation( ep, ) md.validate() @@ -87,7 +87,7 @@ def _model_validation( if not ep.is_complete(): return -1 - md = ModelValidation( + md = Validation( ep, ) md.validate() diff --git a/src/dgcv/logging/logger.py b/src/dgcv/logging/logger.py index c9c5541..3e72153 100644 --- a/src/dgcv/logging/logger.py +++ b/src/dgcv/logging/logger.py @@ -10,9 +10,11 @@ import logging from pathlib import Path -from dgcv.logging.custom_formatter import CustomFormatter + import colorama +from dgcv.logging.custom_formatter import CustomFormatter + colorama.init() diff --git a/src/dgcv/model/benchmark.py b/src/dgcv/model/benchmark.py index 9b8cdd1..9317034 100644 --- a/src/dgcv/model/benchmark.py +++ b/src/dgcv/model/benchmark.py @@ -13,9 +13,9 @@ from dgcv.configuration.cfg import config from dgcv.core.execution_parameters import Parameters from dgcv.core.global_variables import CASE_SEPARATOR, MODEL_VALIDATION_PPM -from dgcv.core.simulator import Simulator -from dgcv.curves.manager import CurvesManager -from dgcv.dynawo.simulator import DynawoSimulator +from dgcv.core.producer_curves import ProducerCurves +from dgcv.curves.curves import ImportedCurves +from dgcv.dynawo.curves import DynawoCurves from dgcv.files import manage_files from dgcv.logging.logging import dgcv_logging from dgcv.model.compliance import Compliance @@ -72,13 +72,13 @@ def __init__( stable_time = config.get_float("GridCode", "stable_time", 100.0) ( op_names, - simulator, + producer_curves, reference_manager, validator, ) = self.__prepare_benchmark_validation(parameters, stable_time) self._op_cond_list = [ OperatingCondition( - simulator, + producer_curves, reference_manager, validator, parameters, @@ -90,7 +90,7 @@ def __init__( def __prepare_benchmark_validation( self, parameters: Parameters, stable_time: float - ) -> tuple[list, Simulator, CurvesManager]: + ) -> tuple[list, ProducerCurves, ImportedCurves]: # Read Benchmark configurations and prepare current Benchmark work path. # Creates a specific folder by pcs if not (self._working_dir / self._pcs_name).is_dir(): @@ -115,7 +115,7 @@ def __prepare_benchmark_validation( config.get_config_dir() / self._templates_path / sim_type_path / self._pcs_name ) - simulator = DynawoSimulator( + producer_curves = DynawoCurves( parameters, self._pcs_name, model_path, @@ -125,9 +125,9 @@ def __prepare_benchmark_validation( stable_time, ) elif producer.is_user_curves(): - simulator = CurvesManager(parameters) + producer_curves = ImportedCurves(parameters) - reference_manager = CurvesManager(parameters) + reference_manager = ImportedCurves(parameters) ops = config.get_list("PCS-OperatingConditions", pcs_benchmark_name) validations = self.__initialize_validation_by_benchmark() if producer.get_sim_type() >= MODEL_VALIDATION_PPM: @@ -143,7 +143,7 @@ def __prepare_benchmark_validation( ) # If it is not a pcs with multiple operating conditions, returns itself - return ops, simulator, reference_manager, validator + return ops, producer_curves, reference_manager, validator def __initialize_validation_by_benchmark(self) -> list: # Prepare the validation list by pcs.benchmark diff --git a/src/dgcv/model/operating_condition.py b/src/dgcv/model/operating_condition.py index 539adac..f05d499 100644 --- a/src/dgcv/model/operating_condition.py +++ b/src/dgcv/model/operating_condition.py @@ -10,14 +10,15 @@ import logging from pathlib import Path +import pandas as pd + from dgcv.configuration.cfg import config from dgcv.core.execution_parameters import Parameters -from dgcv.core.simulator import Simulator, get_cfg_oc_name +from dgcv.core.producer_curves import ProducerCurves, get_cfg_oc_name from dgcv.core.validator import Validator -from dgcv.curves.manager import CurvesManager +from dgcv.curves.curves import ImportedCurves from dgcv.files import manage_files from dgcv.logging.logging import dgcv_logging -import pandas as pd class OperatingCondition: @@ -45,15 +46,15 @@ class OperatingCondition: def __init__( self, - simulator: Simulator, - manager: CurvesManager, + producer_curves: ProducerCurves, + reference_curves: ImportedCurves, validator: Validator, parameters: Parameters, pcs_name: str, oc_name: str, ): - self._simulator = simulator - self._manager = manager + self._producer_curves = producer_curves + self._reference_curves = reference_curves self._validator = validator self._working_dir = parameters.get_working_dir() self._producer = parameters.get_producer() @@ -64,12 +65,12 @@ def __init__( self._thr_ss_tol = config.get_float("GridCode", "thr_ss_tol", 0.002) def __has_reference_curves(self) -> bool: - return self._producer.has_reference_curves() + return self._producer.has_reference_curves_path() - def __get_reference_curves(self) -> Path: - if not hasattr(self, "_reference_curves"): - self._reference_curves = self._producer.get_reference_curves() - return self._reference_curves + def __get_reference_curves_path(self) -> Path: + if not hasattr(self, "_reference_curves_path"): + self._reference_curves_path = self._producer.get_reference_curves_path() + return self._reference_curves_path def __obtain_curve( self, @@ -83,8 +84,10 @@ def __obtain_curve( curves = dict() reference_event_start_time = None if self.__has_reference_curves(): - reference_event_start_time, curves["reference"] = self._manager.obtain_reference_curve( - working_oc_dir, pcs_bm_name, self._name, self.__get_reference_curves() + reference_event_start_time, curves["reference"] = ( + self._reference_curves.obtain_reference_curve( + working_oc_dir, pcs_bm_name, self._name, self.__get_reference_curves_path() + ) ) else: curves["reference"] = pd.DataFrame() @@ -96,7 +99,7 @@ def __obtain_curve( success, has_simulated_curves, curves["calculated"], - ) = self._simulator.obtain_simulated_curve( + ) = self._producer_curves.obtain_simulated_curve( working_oc_dir, pcs_bm_name, bm_name, @@ -126,16 +129,16 @@ def __validate( if self._validator.is_defined_cct(): self._validator.set_time_cct( - self._simulator.get_time_cct( + self._producer_curves.get_time_cct( working_oc_dir, jobs_output_dir, event_params["duration_time"], ) ) - self._validator.set_generators_imax(self._simulator.get_generators_imax()) - self._validator.set_disconnection_model(self._simulator.get_disconnection_model()) + self._validator.set_generators_imax(self._producer_curves.get_generators_imax()) + self._validator.set_disconnection_model(self._producer_curves.get_disconnection_model()) self._validator.set_setpoint_variation( - self._simulator.get_setpoint_variation(get_cfg_oc_name(pcs_bm_name, self._name)) + self._producer_curves.get_setpoint_variation(get_cfg_oc_name(pcs_bm_name, self._name)) ) results = self._validator.validate( @@ -200,7 +203,7 @@ def validate( working_path: Path Working path. jobs_output_dir: Path - Simulator output path. + ProducerCurves output path. event_params: dict Event parameters fs: float @@ -232,7 +235,7 @@ def validate( else: results = {"compliance": False, "curves": None} - results["udim"] = self._simulator.get_generator_u_dim() + results["udim"] = self._producer_curves.get_generator_u_dim() return success, results def has_required_curves( @@ -254,7 +257,7 @@ def has_required_curves( Path Working path. Path - Simulator output path. + ProducerCurves output path. dict Event parameters float diff --git a/src/dgcv/model/producer.py b/src/dgcv/model/producer.py index ce19bb7..5e8efb8 100644 --- a/src/dgcv/model/producer.py +++ b/src/dgcv/model/producer.py @@ -44,9 +44,9 @@ class Producer: Args ---- - producer_model: Path + producer_model_path: Path Directory to the Dynamic Model, if it is given - producer_curves: Path + producer_curves_path: Path Directory to the User Curves, if it is given verification_type: int 0 if it is an electrical performance verification @@ -55,20 +55,20 @@ class Producer: def __init__( self, - producer_model: Path, - producer_curves: Path, - reference_curves: Path, + producer_model_path: Path, + producer_curves_path: Path, + reference_curves_path: Path, verification_type: int, ): self._s_nref = config.get_float("GridCode", "s_nref", 100.0) - self._producer_model = producer_model - self._producer_curves = producer_curves - self._reference_curves = reference_curves + self._producer_model_path = producer_model_path + self._producer_curves_path = producer_curves_path + self._reference_curves_path = reference_curves_path self._zone = 0 - self._is_dynawo_model = self._producer_model is not None - self._is_user_curves = self._producer_curves is not None - self._has_reference_curves = self._reference_curves is not None + self._is_dynawo_model = self._producer_model_path is not None + self._is_user_curves = self._producer_curves_path is not None + self._has_reference_curves_path = self._reference_curves_path is not None if verification_type == ELECTRIC_PERFORMANCE: self.__set_electric_performance_type() @@ -251,10 +251,10 @@ def __init_model(self) -> None: sanity_checks.check_generators(self.generators) def __get_file_by_pattern(self, pattern) -> Path: - if self._producer_model is not None: - producer_path = self._producer_model - elif self._producer_curves is not None: - producer_path = self._producer_curves + if self._producer_model_path is not None: + producer_path = self._producer_model_path + elif self._producer_curves_path is not None: + producer_path = self._producer_curves_path else: dgcv_logging.get_logger("Producer").error("No producer model has been defined") return None @@ -308,9 +308,9 @@ def get_producer_path(self) -> Path: Producer directory """ if self.is_dynawo_model(): - return self._producer_model + return self._producer_model_path else: - return self._producer_curves + return self._producer_curves_path def get_reference_path(self) -> Path: """Get the Reference directory. @@ -320,7 +320,7 @@ def get_reference_path(self) -> Path: Path Reference directory """ - return self._reference_curves + return self._reference_curves_path def set_zone(self, zone: int) -> None: """Set the zone to test. @@ -338,7 +338,7 @@ def set_zone(self, zone: int) -> None: sanity_checks.check_well_formed_xml(self.get_producer_dyd()) sanity_checks.check_well_formed_xml(self.get_producer_par()) sanity_checks.check_curves_files( - self._producer_model, self._reference_curves, self.get_sim_type_str() + self._producer_model_path, self._reference_curves_path, self.get_sim_type_str() ) self.__init_model() @@ -372,7 +372,7 @@ def is_user_curves(self) -> bool: """ return self._is_user_curves - def has_reference_curves(self) -> bool: + def has_reference_curves_path(self) -> bool: """Check if there are reference curves directory. Returns @@ -380,7 +380,7 @@ def has_reference_curves(self) -> bool: bool True if has a reference curves directory, False otherwise """ - return self._has_reference_curves + return self._has_reference_curves_path def get_sim_type_str(self) -> str: """Gets a string according to the type of validation executed. @@ -447,7 +447,7 @@ def get_producer_par(self): pattern_par = re.compile(r".*.[pP][aA][rR]") return self.__get_file_by_pattern(pattern_par) - def get_producer_curves(self) -> Path: + def get_producer_curves_path(self) -> Path: """Gets the Producer Curves Directory. Returns @@ -455,9 +455,9 @@ def get_producer_curves(self) -> Path: Path Path to the Producer Curves Directory """ - return self._producer_curves.resolve() + return self._producer_curves_path.resolve() - def get_reference_curves(self) -> Path: + def get_reference_curves_path(self) -> Path: """Gets the Reference Curves Directory. Returns @@ -465,7 +465,7 @@ def get_reference_curves(self) -> Path: Path Path to the Reference Curves Directory """ - return self._reference_curves.resolve() + return self._reference_curves_path.resolve() def set_generators(self, generators: list) -> None: """Gets the Producer model generators. diff --git a/src/dgcv/validation/model.py b/src/dgcv/validation/model.py index 2820f1b..2641209 100644 --- a/src/dgcv/validation/model.py +++ b/src/dgcv/validation/model.py @@ -1166,7 +1166,7 @@ def validate( working_path: Path Working path. sim_output_path: str - Simulator output path (Not used in this validator). + ProducerCurves output path (Not used in this validator). event_params: dict Event parameters fs: float diff --git a/src/dgcv/validation/performance.py b/src/dgcv/validation/performance.py index fa20864..92658e6 100644 --- a/src/dgcv/validation/performance.py +++ b/src/dgcv/validation/performance.py @@ -628,7 +628,7 @@ def validate( working_path: Path Working path. sim_output_path: str - Simulator output path. + ProducerCurves output path. event_params: dict Event parameters fs: float diff --git a/tests/dgcv/test_tool.py b/tests/dgcv/test_tool.py index c8a2ac9..7e90189 100644 --- a/tests/dgcv/test_tool.py +++ b/tests/dgcv/test_tool.py @@ -1,53 +1,55 @@ import shutil from pathlib import Path +import pytest + from dgcv.configuration.cfg import config from dgcv.core.execution_parameters import Parameters -from dgcv.core.model_validation import ModelValidation +from dgcv.core.validation import Validation from dgcv.model.compliance import Compliance -def _execute_tool(producer_model, producer_curves, reference_curves): +def _execute_tool(producer_model_path, producer_curves_path, reference_curves_path): testpath = Path(__file__).resolve().parent output_dir = Path(__file__).resolve().parent / "tmp" output_dir.mkdir(exist_ok=True) assert output_dir.exists() - if producer_model: - assert (testpath / producer_model).exists() - if producer_curves: - assert (testpath / producer_curves).exists() - if reference_curves: - assert (testpath / reference_curves).exists() + if producer_model_path: + assert (testpath / producer_model_path).exists() + if producer_curves_path: + assert (testpath / producer_curves_path).exists() + if reference_curves_path: + assert (testpath / reference_curves_path).exists() try: config._default_config.set("Dynawo", "simulation_limit", "120") only_dtr = True - if producer_model: - if "SM" in producer_model: + if producer_model_path: + if "SM" in producer_model_path: sim_type = 0 - elif "PPM" in producer_model: + elif "PPM" in producer_model_path: sim_type = 0 else: sim_type = 1 else: - if "SM" in producer_curves: + if "SM" in producer_curves_path: sim_type = 0 - elif "PPM" in producer_curves: + elif "PPM" in producer_curves_path: sim_type = 0 else: sim_type = 1 ep = Parameters( Path(shutil.which("dynawo.sh")).resolve() if shutil.which("dynawo.sh") else None, - testpath / producer_model if producer_model else None, - testpath / producer_curves if producer_curves else None, - testpath / reference_curves if reference_curves else None, + testpath / producer_model_path if producer_model_path else None, + testpath / producer_curves_path if producer_curves_path else None, + testpath / reference_curves_path if reference_curves_path else None, None, output_dir, only_dtr, sim_type, ) - md = ModelValidation(ep) + md = Validation(ep) compliance = md.validate(True) finally: @@ -55,7 +57,8 @@ def _execute_tool(producer_model, producer_curves, reference_curves): return compliance -def dynawo_test_perf_sm_model(): +@pytest.mark.skipif(not shutil.which("dynawo.sh"), reason="Dynawo not installed") +def test_perf_sm_model(): compliance = _execute_tool("../../examples/SM/Dynawo/SingleAux", None, None) assert [ Compliance.NonCompliant, @@ -85,7 +88,8 @@ def test_perf_sm_curves(): ] == compliance -def dynawo_test_perf_sm_complete(): +@pytest.mark.skipif(not shutil.which("dynawo.sh"), reason="Dynawo not installed") +def test_perf_sm_complete(): compliance = _execute_tool( "../../examples/SM/Dynawo/SingleAuxI", "../../examples/SM/ProducerCurves/", None ) @@ -102,7 +106,8 @@ def dynawo_test_perf_sm_complete(): ] == compliance -def dynawo_test_perf_ppm_model(): +@pytest.mark.skipif(not shutil.which("dynawo.sh"), reason="Dynawo not installed") +def test_perf_ppm_model(): compliance = _execute_tool("../../examples/PPM/Dynawo/SingleAux/WECC", None, None) assert [ Compliance.NonCompliant, @@ -128,7 +133,8 @@ def test_perf_ppm_curves(): ] == compliance -def dynawo_test_perf_ppm_complete(): +@pytest.mark.skipif(not shutil.which("dynawo.sh"), reason="Dynawo not installed") +def test_perf_ppm_complete(): compliance = _execute_tool( "../../examples/PPM/Dynawo/SingleAux/IEC2020", "../../examples/PPM/ProducerCurves/", @@ -145,7 +151,8 @@ def dynawo_test_perf_ppm_complete(): ] == compliance -def dynawo_test_model_validation_wecc_model(): +@pytest.mark.skipif(not shutil.which("dynawo.sh"), reason="Dynawo not installed") +def test_model_validation_wecc_model(): compliance = _execute_tool( "../../examples/Model/Wind/WECC/Dynawo", None, @@ -179,7 +186,8 @@ def dynawo_test_model_validation_wecc_model(): ] == compliance -def dynawo_test_model_validation_iec2015_model(): +@pytest.mark.skipif(not shutil.which("dynawo.sh"), reason="Dynawo not installed") +def test_model_validation_iec2015_model(): compliance = _execute_tool( "../../examples/Model/Wind/IEC2015/Dynawo", None, @@ -213,7 +221,8 @@ def dynawo_test_model_validation_iec2015_model(): ] == compliance -def dynawo_test_model_validation_iec2020_model(): +@pytest.mark.skipif(not shutil.which("dynawo.sh"), reason="Dynawo not installed") +def test_model_validation_iec2020_model(): compliance = _execute_tool( "../../examples/Model/Wind/IEC2020/Dynawo", None, @@ -349,7 +358,8 @@ def test_model_validation_iec2020_curves(): ] == compliance -def dynawo_test_model_validation_partial_reference(): +@pytest.mark.skipif(not shutil.which("dynawo.sh"), reason="Dynawo not installed") +def test_model_validation_partial_reference(): compliance = _execute_tool( "../../examples/Model/Wind/WECC/Dynawo", None, From 15ef03f3c295cb1d408069d499db2d90c69d1544 Mon Sep 17 00:00:00 2001 From: marcosmc Date: Thu, 19 Dec 2024 13:05:19 +0100 Subject: [PATCH 20/24] fix rename bugs --- src/dgcv/configuration/defaultConfig.ini | 4 ++-- src/dgcv/core/execution_parameters.py | 2 +- src/dgcv/dynawo/file_variables.py | 4 ++-- src/dgcv/dynawo/jobs.py | 4 +++- src/dgcv/files/dynawo_curves_file.py | 4 ++-- src/dgcv/launchers.py | 2 ++ src/dgcv/report/report.py | 2 +- tests/dgcv/test_tool.py | 2 ++ 8 files changed, 15 insertions(+), 9 deletions(-) diff --git a/src/dgcv/configuration/defaultConfig.ini b/src/dgcv/configuration/defaultConfig.ini index 084be0d..bbfc19e 100644 --- a/src/dgcv/configuration/defaultConfig.ini +++ b/src/dgcv/configuration/defaultConfig.ini @@ -442,8 +442,8 @@ MqG = 1 [CurvesVariables] SM = RotorSpeedPu,InternalAngle,AVRSetpointPu,MagnitudeControlledByAVRPu,NetworkFrequencyPu PPM = InjectedActiveCurrent,InjectedReactiveCurrent,AVRSetpointPu,MagnitudeControlledByAVRPu,NetworkFrequencyPu -ValidationZ3 = InjectedActiveCurrent,InjectedReactiveCurrent,AVRSetpointPu,MagnitudeControlledByAVRPu,NetworkFrequencyPu -ValidationZ1 = ActivePowerSetpointPu,ReactivePowerSetpointPu,AVRSetpointPu,InjectedActiveCurrent,InjectedReactiveCurrent,MagnitudeControlledByAVRPu +ModelValidationZ3 = InjectedActiveCurrent,InjectedReactiveCurrent,AVRSetpointPu,MagnitudeControlledByAVRPu,NetworkFrequencyPu +ModelValidationZ1 = ActivePowerSetpointPu,ReactivePowerSetpointPu,AVRSetpointPu,InjectedActiveCurrent,InjectedReactiveCurrent,MagnitudeControlledByAVRPu [Debug] # Modify the time range to include t0 in the showed range diff --git a/src/dgcv/core/execution_parameters.py b/src/dgcv/core/execution_parameters.py index a32048f..673ad4e 100644 --- a/src/dgcv/core/execution_parameters.py +++ b/src/dgcv/core/execution_parameters.py @@ -172,4 +172,4 @@ def is_complete(self): bool True if it is a complete execution, False otherwise """ - return self.is_valid() and self._producer.has_reference_curves() + return self.is_valid() and self._producer.has_reference_curves_path() diff --git a/src/dgcv/dynawo/file_variables.py b/src/dgcv/dynawo/file_variables.py index 9ac677c..917a93c 100644 --- a/src/dgcv/dynawo/file_variables.py +++ b/src/dgcv/dynawo/file_variables.py @@ -24,9 +24,9 @@ def __obtain_value(self, value_definition: str) -> str: def __obtain_section_value(self, section: str, key: str, generator_type: str) -> str: key_type = f"{key}_{generator_type}" if config.has_key(section, key): - return self.__obtain_value(config.get_value(self._bm_section, key)) + return self.__obtain_value(config.get_value(section, key)) elif config.has_key(section, key_type): - return self.__obtain_value(config.get_value(self._bm_section, key_type)) + return self.__obtain_value(config.get_value(section, key_type)) return None diff --git a/src/dgcv/dynawo/jobs.py b/src/dgcv/dynawo/jobs.py index c88c19e..dec1b6e 100644 --- a/src/dgcv/dynawo/jobs.py +++ b/src/dgcv/dynawo/jobs.py @@ -46,7 +46,9 @@ def complete_file(self, working_oc_dir: Path, event_params: dict) -> None: variables_dict["solver_id"] = "IDA" variables_dict["dgcv_ddb_path"] = config.get_config_dir() / "ddb" - variables_dict["producer_dyd"] = self._producer_curves.get_producer().get_producer_dyd().name + variables_dict["producer_dyd"] = ( + self._producer_curves.get_producer().get_producer_dyd().name + ) self.complete_parameters(variables_dict, event_params) diff --git a/src/dgcv/files/dynawo_curves_file.py b/src/dgcv/files/dynawo_curves_file.py index f6d60c4..62d34a1 100644 --- a/src/dgcv/files/dynawo_curves_file.py +++ b/src/dgcv/files/dynawo_curves_file.py @@ -177,9 +177,9 @@ def _add_model_validation_curves( curves_dict: dict, ) -> None: if zone == 3: - generator_variables = config.get_list("CurvesVariables", "ValidationZ3") + generator_variables = config.get_list("CurvesVariables", "ModelValidationZ3") elif zone == 1: - generator_variables = config.get_list("CurvesVariables", "ValidationZ1") + generator_variables = config.get_list("CurvesVariables", "ModelValidationZ1") else: generator_variables = [] _add_generators_curves(curves_root, generators, generator_variables, control_mode, curves_dict) diff --git a/src/dgcv/launchers.py b/src/dgcv/launchers.py index bc7dca3..7a67ab4 100644 --- a/src/dgcv/launchers.py +++ b/src/dgcv/launchers.py @@ -346,6 +346,8 @@ def _get_dwo_launcher(args: argparse.Namespace, dwo_launcher_name: str) -> Path: _check_launchers(dwo_launcher) initialization.init(dwo_launcher, args.debug) + return dwo_launcher + def _execute_anonymize( p: argparse.ArgumentParser, args: argparse.Namespace, dwo_launcher: Path diff --git a/src/dgcv/report/report.py b/src/dgcv/report/report.py index 585fc96..b26f3f2 100644 --- a/src/dgcv/report/report.py +++ b/src/dgcv/report/report.py @@ -410,7 +410,7 @@ def create_pdf( summary_description += f"Model: {model_template} \\\\" reference_template = None - if producer.has_reference_curves(): + if producer.has_reference_curves_path(): reference_template = str(producer.get_reference_path()).replace("\\", "\\\\") summary_description += f"Reference: {reference_template} \\\\" diff --git a/tests/dgcv/test_tool.py b/tests/dgcv/test_tool.py index 7e90189..584ac14 100644 --- a/tests/dgcv/test_tool.py +++ b/tests/dgcv/test_tool.py @@ -52,6 +52,8 @@ def _execute_tool(producer_model_path, producer_curves_path, reference_curves_pa md = Validation(ep) compliance = md.validate(True) + except Exception as e: + compliance = str(e) finally: shutil.rmtree(output_dir) return compliance From a0bd0833c88f30b6759bd23361e11a0e706bf6ab Mon Sep 17 00:00:00 2001 From: marcosmc Date: Thu, 19 Dec 2024 13:27:40 +0100 Subject: [PATCH 21/24] fix bugs --- src/dgcv/dynawo/file_variables.py | 2 +- src/dgcv/report/figure.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/dgcv/dynawo/file_variables.py b/src/dgcv/dynawo/file_variables.py index 917a93c..c58bd03 100644 --- a/src/dgcv/dynawo/file_variables.py +++ b/src/dgcv/dynawo/file_variables.py @@ -52,7 +52,7 @@ def __get_value(self, key: str) -> str: return value if config.has_key("Dynawo", key): - value = self.__obtain_value(config.get_value("Dynawo", key)) + return self.__obtain_value(config.get_value("Dynawo", key)) return None diff --git a/src/dgcv/report/figure.py b/src/dgcv/report/figure.py index 8adc23e..b8c8c12 100644 --- a/src/dgcv/report/figure.py +++ b/src/dgcv/report/figure.py @@ -524,7 +524,7 @@ def _get_time_range( figures_description: dict, results: dict, time: list, -) -> dict: +) -> tuple[float, float]: curves = results["curves"] xmin = 99999 @@ -550,6 +550,8 @@ def _get_time_range( if xrange_max > xmax: xmax = xrange_max + return xmin, xmax + def get_common_time_range( operating_condition: str, From 66ce7f939dd7a1f5043d3db467a21f965c26e7e7 Mon Sep 17 00:00:00 2001 From: marcosmc Date: Thu, 19 Dec 2024 13:41:53 +0100 Subject: [PATCH 22/24] ignore test if dynawo is installed --- tests/dgcv/validation/test_sanity_checks.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/dgcv/validation/test_sanity_checks.py b/tests/dgcv/validation/test_sanity_checks.py index 37986bb..08eabad 100644 --- a/tests/dgcv/validation/test_sanity_checks.py +++ b/tests/dgcv/validation/test_sanity_checks.py @@ -8,6 +8,7 @@ # demiguelm@aia.es # +import shutil from pathlib import Path import pytest @@ -142,6 +143,7 @@ def test_internal_lines(): ) +@pytest.mark.skipif(shutil.which("dynawo.sh"), reason="Dynawo installed") def test_launchers(): with pytest.raises(OSError) as pytest_wrapped_e: sanity_checks.check_launchers("dynawo.sh") From 588af6c2229934418d23edb6d5dee0c5c799fc98 Mon Sep 17 00:00:00 2001 From: marcosmc Date: Thu, 19 Dec 2024 15:26:18 +0100 Subject: [PATCH 23/24] new CurvesManager class --- src/dgcv/curves/curves.py | 2 +- src/dgcv/curves/manager.py | 27 +++++++++ .../producer_curves.py => curves/producer.py} | 0 src/dgcv/curves/producer_factory.py | 43 +++++++++++++ src/dgcv/dynawo/curves.py | 4 +- src/dgcv/dynawo/dyd.py | 10 ++-- src/dgcv/dynawo/file_variables.py | 10 ++-- src/dgcv/dynawo/jobs.py | 10 ++-- src/dgcv/dynawo/par.py | 6 +- src/dgcv/dynawo/table.py | 6 +- src/dgcv/model/benchmark.py | 60 +++++++------------ src/dgcv/model/operating_condition.py | 47 +++++++++------ src/dgcv/validation/model.py | 2 +- src/dgcv/validation/performance.py | 2 +- 14 files changed, 144 insertions(+), 85 deletions(-) create mode 100644 src/dgcv/curves/manager.py rename src/dgcv/{core/producer_curves.py => curves/producer.py} (100%) create mode 100644 src/dgcv/curves/producer_factory.py diff --git a/src/dgcv/curves/curves.py b/src/dgcv/curves/curves.py index c88f6bb..e99f357 100644 --- a/src/dgcv/curves/curves.py +++ b/src/dgcv/curves/curves.py @@ -13,9 +13,9 @@ from dgcv.configuration.cfg import config from dgcv.core.execution_parameters import Parameters -from dgcv.core.producer_curves import ProducerCurves, get_cfg_oc_name from dgcv.core.validator import Disconnection_Model from dgcv.curves.importer import CurvesImporter +from dgcv.curves.producer import ProducerCurves, get_cfg_oc_name from dgcv.files import manage_files from dgcv.model.parameters import Gen_init, Gen_params diff --git a/src/dgcv/curves/manager.py b/src/dgcv/curves/manager.py new file mode 100644 index 0000000..3e92392 --- /dev/null +++ b/src/dgcv/curves/manager.py @@ -0,0 +1,27 @@ +from pathlib import Path + +from dgcv.core.execution_parameters import Parameters +from dgcv.curves.curves import ImportedCurves +from dgcv.curves.producer_factory import get_producer_curves + + +class CurvesManager: + def __init__( + self, + parameters: Parameters, + pcs_benchmark_name: str, + stable_time: float, + lib_path: Path, + templates_path: Path, + pcs_name: str, + ): + self._producer_curves = get_producer_curves( + parameters, pcs_benchmark_name, stable_time, lib_path, templates_path, pcs_name + ) + self._reference_curves = ImportedCurves(parameters) + + def get_producer_curves(self): + return self._producer_curves + + def get_reference_curves(self): + return self._reference_curves diff --git a/src/dgcv/core/producer_curves.py b/src/dgcv/curves/producer.py similarity index 100% rename from src/dgcv/core/producer_curves.py rename to src/dgcv/curves/producer.py diff --git a/src/dgcv/curves/producer_factory.py b/src/dgcv/curves/producer_factory.py new file mode 100644 index 0000000..5d21aa2 --- /dev/null +++ b/src/dgcv/curves/producer_factory.py @@ -0,0 +1,43 @@ +from pathlib import Path + +from dgcv.configuration.cfg import config +from dgcv.core.execution_parameters import Parameters +from dgcv.curves.curves import ImportedCurves +from dgcv.dynawo.curves import DynawoCurves + + +def get_producer_curves( + parameters: Parameters, + pcs_benchmark_name: str, + stable_time: float, + lib_path: Path, + templates_path: Path, + pcs_name: str, +): + producer = parameters.get_producer() + if producer.is_dynawo_model(): + job_name = config.get_value(pcs_benchmark_name, "job_name") + rte_model = config.get_value(pcs_benchmark_name, "TSO_model") + omega_model = config.get_value(pcs_benchmark_name, "Omega_model") + + file_path = Path(__file__).resolve().parent.parent + sim_type_path = producer.get_sim_type_str() + model_path = file_path / lib_path / "TSO_model" / rte_model + omega_path = file_path / lib_path / "Omega" / omega_model + pcs_path = file_path / templates_path / sim_type_path / pcs_name + if not pcs_path.exists(): + pcs_path = config.get_config_dir() / templates_path / sim_type_path / pcs_name + + return DynawoCurves( + parameters, + pcs_name, + model_path, + omega_path, + pcs_path, + job_name, + stable_time, + ) + elif producer.is_user_curves(): + return ImportedCurves(parameters) + + raise ValueError("Unsupported producer curves") diff --git a/src/dgcv/dynawo/curves.py b/src/dgcv/dynawo/curves.py index 0051b77..bf7bddc 100644 --- a/src/dgcv/dynawo/curves.py +++ b/src/dgcv/dynawo/curves.py @@ -16,8 +16,8 @@ from dgcv.configuration.cfg import config from dgcv.core.execution_parameters import Parameters -from dgcv.core.producer_curves import ProducerCurves, get_cfg_oc_name from dgcv.core.validator import Disconnection_Model +from dgcv.curves.producer import ProducerCurves, get_cfg_oc_name from dgcv.dynawo import dynawo from dgcv.dynawo.dyd import DydFile from dgcv.dynawo.jobs import JobsFile @@ -234,7 +234,7 @@ def __complete_model( oc_name, ) if reference_event_start_time and event_params["start_time"] != reference_event_start_time: - dgcv_logging.get_logger("Dynawo ProducerCurves").warning( + dgcv_logging.get_logger("ProducerCurves").warning( f"The simulation will use the 'sim_t_event_start' value present in the Reference " f"Curves ({reference_event_start_time}), instead of the value configured " f"({event_params['start_time']})." diff --git a/src/dgcv/dynawo/dyd.py b/src/dgcv/dynawo/dyd.py index dc9a7e3..0677969 100644 --- a/src/dgcv/dynawo/dyd.py +++ b/src/dgcv/dynawo/dyd.py @@ -9,21 +9,21 @@ # from pathlib import Path -from dgcv.dynawo.curves import ProducerCurves +from dgcv.curves.producer import ProducerCurves from dgcv.dynawo.file_variables import FileVariables from dgcv.dynawo.translator import dynawo_translator from dgcv.files import replace_placeholders class DydFile(FileVariables): - def __init__(self, producer_curves: ProducerCurves, bm_section: str, oc_section: str): + def __init__(self, dynawo_curves: ProducerCurves, bm_section: str, oc_section: str): tool_variables = [ "generator_id", "connection_event", ] super().__init__( tool_variables, - producer_curves, + dynawo_curves, bm_section, oc_section, ) @@ -42,9 +42,9 @@ def complete_file(self, working_oc_dir: Path, event_params: dict) -> None: if event_params["connect_to"]: connect_event_to = dynawo_translator.get_dynawo_variable( - self._producer_curves.get_producer().generators[0].lib, event_params["connect_to"] + self._dynawo_curves.get_producer().generators[0].lib, event_params["connect_to"] ) - variables_dict["generator_id"] = self._producer_curves.get_producer().generators[0].id + variables_dict["generator_id"] = self._dynawo_curves.get_producer().generators[0].id variables_dict["connection_event"] = connect_event_to self.complete_parameters(variables_dict, event_params) diff --git a/src/dgcv/dynawo/file_variables.py b/src/dgcv/dynawo/file_variables.py index c58bd03..c63cc7c 100644 --- a/src/dgcv/dynawo/file_variables.py +++ b/src/dgcv/dynawo/file_variables.py @@ -1,5 +1,5 @@ from dgcv.configuration.cfg import config -from dgcv.core.producer_curves import ProducerCurves +from dgcv.curves.producer import ProducerCurves from dgcv.electrical.generator_variables import generator_variables @@ -7,11 +7,11 @@ class FileVariables: def __init__( self, tool_variables: list, - producer_curves: ProducerCurves, + dynawo_curves: ProducerCurves, bm_section: str, oc_section: str, ): - self._producer_curves = producer_curves + self._dynawo_curves = dynawo_curves self._bm_section = bm_section self._oc_section = oc_section self._model_section = f"{bm_section}.{oc_section}.Model" @@ -19,7 +19,7 @@ def __init__( self._tool_variables = tool_variables def __obtain_value(self, value_definition: str) -> str: - return self._producer_curves.obtain_value(value_definition) + return self._dynawo_curves.obtain_value(value_definition) def __obtain_section_value(self, section: str, key: str, generator_type: str) -> str: key_type = f"{key}_{generator_type}" @@ -32,7 +32,7 @@ def __obtain_section_value(self, section: str, key: str, generator_type: str) -> def __get_value(self, key: str) -> str: generator_type = generator_variables.get_generator_type( - self._producer_curves.get_producer().u_nom + self._dynawo_curves.get_producer().u_nom ) value = self.__obtain_section_value(self._bm_section, key, generator_type) diff --git a/src/dgcv/dynawo/jobs.py b/src/dgcv/dynawo/jobs.py index dec1b6e..45adce7 100644 --- a/src/dgcv/dynawo/jobs.py +++ b/src/dgcv/dynawo/jobs.py @@ -10,13 +10,13 @@ from pathlib import Path from dgcv.configuration.cfg import config -from dgcv.dynawo.curves import ProducerCurves +from dgcv.curves.producer import ProducerCurves from dgcv.dynawo.file_variables import FileVariables from dgcv.files import replace_placeholders class JobsFile(FileVariables): - def __init__(self, producer_curves: ProducerCurves, bm_section: str, oc_section: str): + def __init__(self, dynawo_curves: ProducerCurves, bm_section: str, oc_section: str): tool_variables = [ "solver_lib", "solver_id", @@ -25,7 +25,7 @@ def __init__(self, producer_curves: ProducerCurves, bm_section: str, oc_section: ] super().__init__( tool_variables, - producer_curves, + dynawo_curves, bm_section, oc_section, ) @@ -46,9 +46,7 @@ def complete_file(self, working_oc_dir: Path, event_params: dict) -> None: variables_dict["solver_id"] = "IDA" variables_dict["dgcv_ddb_path"] = config.get_config_dir() / "ddb" - variables_dict["producer_dyd"] = ( - self._producer_curves.get_producer().get_producer_dyd().name - ) + variables_dict["producer_dyd"] = self._dynawo_curves.get_producer().get_producer_dyd().name self.complete_parameters(variables_dict, event_params) diff --git a/src/dgcv/dynawo/par.py b/src/dgcv/dynawo/par.py index bbf4eb5..6efa9dd 100644 --- a/src/dgcv/dynawo/par.py +++ b/src/dgcv/dynawo/par.py @@ -9,14 +9,14 @@ # from pathlib import Path -from dgcv.dynawo.curves import ProducerCurves +from dgcv.curves.producer import ProducerCurves from dgcv.dynawo.file_variables import FileVariables from dgcv.files import replace_placeholders from dgcv.model.parameters import Gen_init class ParFile(FileVariables): - def __init__(self, producer_curves: ProducerCurves, bm_section: str, oc_section: str): + def __init__(self, dynawo_curves: ProducerCurves, bm_section: str, oc_section: str): tool_variables = [ "line_XPu", "line_RPu", @@ -32,7 +32,7 @@ def __init__(self, producer_curves: ProducerCurves, bm_section: str, oc_section: ] super().__init__( tool_variables, - producer_curves, + dynawo_curves, bm_section, oc_section, ) diff --git a/src/dgcv/dynawo/table.py b/src/dgcv/dynawo/table.py index 71d515d..61c3a22 100644 --- a/src/dgcv/dynawo/table.py +++ b/src/dgcv/dynawo/table.py @@ -9,14 +9,14 @@ # from pathlib import Path -from dgcv.dynawo.curves import ProducerCurves +from dgcv.curves.producer import ProducerCurves from dgcv.dynawo.file_variables import FileVariables from dgcv.files import replace_placeholders from dgcv.model.parameters import Gen_init class TableFile(FileVariables): - def __init__(self, producer_curves: ProducerCurves, bm_section: str, oc_section: str): + def __init__(self, dynawo_curves: ProducerCurves, bm_section: str, oc_section: str): tool_variables = [ "start_event", "end_event", @@ -26,7 +26,7 @@ def __init__(self, producer_curves: ProducerCurves, bm_section: str, oc_section: ] super().__init__( tool_variables, - producer_curves, + dynawo_curves, bm_section, oc_section, ) diff --git a/src/dgcv/model/benchmark.py b/src/dgcv/model/benchmark.py index 9317034..e2d70c2 100644 --- a/src/dgcv/model/benchmark.py +++ b/src/dgcv/model/benchmark.py @@ -13,9 +13,8 @@ from dgcv.configuration.cfg import config from dgcv.core.execution_parameters import Parameters from dgcv.core.global_variables import CASE_SEPARATOR, MODEL_VALIDATION_PPM -from dgcv.core.producer_curves import ProducerCurves -from dgcv.curves.curves import ImportedCurves -from dgcv.dynawo.curves import DynawoCurves +from dgcv.core.validator import Validator +from dgcv.curves.manager import CurvesManager from dgcv.files import manage_files from dgcv.logging.logging import dgcv_logging from dgcv.model.compliance import Compliance @@ -72,14 +71,12 @@ def __init__( stable_time = config.get_float("GridCode", "stable_time", 100.0) ( op_names, - producer_curves, - reference_manager, + curves_manager, validator, ) = self.__prepare_benchmark_validation(parameters, stable_time) self._op_cond_list = [ OperatingCondition( - producer_curves, - reference_manager, + curves_manager, validator, parameters, pcs_name, @@ -90,7 +87,7 @@ def __init__( def __prepare_benchmark_validation( self, parameters: Parameters, stable_time: float - ) -> tuple[list, ProducerCurves, ImportedCurves]: + ) -> tuple[list, CurvesManager, Validator]: # Read Benchmark configurations and prepare current Benchmark work path. # Creates a specific folder by pcs if not (self._working_dir / self._pcs_name).is_dir(): @@ -99,51 +96,34 @@ def __prepare_benchmark_validation( manage_files.create_dir(self._working_dir / self._pcs_name / self._name) pcs_benchmark_name = self._pcs_name + CASE_SEPARATOR + self._name - producer = parameters.get_producer() - if producer.is_dynawo_model(): - job_name = config.get_value(pcs_benchmark_name, "job_name") - rte_model = config.get_value(pcs_benchmark_name, "TSO_model") - omega_model = config.get_value(pcs_benchmark_name, "Omega_model") - - file_path = Path(__file__).resolve().parent.parent - sim_type_path = producer.get_sim_type_str() - model_path = file_path / self._lib_path / "TSO_model" / rte_model - omega_path = file_path / self._lib_path / "Omega" / omega_model - pcs_path = file_path / self._templates_path / sim_type_path / self._pcs_name - if not pcs_path.exists(): - pcs_path = ( - config.get_config_dir() / self._templates_path / sim_type_path / self._pcs_name - ) - - producer_curves = DynawoCurves( - parameters, - self._pcs_name, - model_path, - omega_path, - pcs_path, - job_name, - stable_time, - ) - elif producer.is_user_curves(): - producer_curves = ImportedCurves(parameters) + curves_manager = CurvesManager( + parameters, + pcs_benchmark_name, + stable_time, + self._lib_path, + self._templates_path, + self._pcs_name, + ) - reference_manager = ImportedCurves(parameters) ops = config.get_list("PCS-OperatingConditions", pcs_benchmark_name) validations = self.__initialize_validation_by_benchmark() - if producer.get_sim_type() >= MODEL_VALIDATION_PPM: + if parameters.get_producer().get_sim_type() >= MODEL_VALIDATION_PPM: validator = ModelValidator( pcs_benchmark_name, parameters, validations, - reference_manager.is_field_measurements(), + curves_manager.get_reference_curves().is_field_measurements(), ) else: validator = PerformanceValidator( - parameters, stable_time, validations, reference_manager.is_field_measurements() + parameters, + stable_time, + validations, + curves_manager.get_reference_curves().is_field_measurements(), ) # If it is not a pcs with multiple operating conditions, returns itself - return ops, producer_curves, reference_manager, validator + return ops, curves_manager, validator def __initialize_validation_by_benchmark(self) -> list: # Prepare the validation list by pcs.benchmark diff --git a/src/dgcv/model/operating_condition.py b/src/dgcv/model/operating_condition.py index f05d499..925f759 100644 --- a/src/dgcv/model/operating_condition.py +++ b/src/dgcv/model/operating_condition.py @@ -14,13 +14,19 @@ from dgcv.configuration.cfg import config from dgcv.core.execution_parameters import Parameters -from dgcv.core.producer_curves import ProducerCurves, get_cfg_oc_name +from dgcv.core.global_variables import CASE_SEPARATOR from dgcv.core.validator import Validator -from dgcv.curves.curves import ImportedCurves +from dgcv.curves.manager import CurvesManager from dgcv.files import manage_files from dgcv.logging.logging import dgcv_logging +def get_cfg_oc_name(pcs_bm_name: str, oc_name: str) -> str: + if pcs_bm_name == oc_name: + return oc_name + return pcs_bm_name + CASE_SEPARATOR + oc_name + + class OperatingCondition: """Thrid-level representation of the pcs described in the DTR. A Grid Topology can contain several Operating Conditions, in each Operating Condition @@ -46,15 +52,13 @@ class OperatingCondition: def __init__( self, - producer_curves: ProducerCurves, - reference_curves: ImportedCurves, + curves_manager: CurvesManager, validator: Validator, parameters: Parameters, pcs_name: str, oc_name: str, ): - self._producer_curves = producer_curves - self._reference_curves = reference_curves + self._curves_manager = curves_manager self._validator = validator self._working_dir = parameters.get_working_dir() self._producer = parameters.get_producer() @@ -84,10 +88,11 @@ def __obtain_curve( curves = dict() reference_event_start_time = None if self.__has_reference_curves(): - reference_event_start_time, curves["reference"] = ( - self._reference_curves.obtain_reference_curve( - working_oc_dir, pcs_bm_name, self._name, self.__get_reference_curves_path() - ) + ( + reference_event_start_time, + curves["reference"], + ) = self._curves_manager.get_reference_curves().obtain_reference_curve( + working_oc_dir, pcs_bm_name, self._name, self.__get_reference_curves_path() ) else: curves["reference"] = pd.DataFrame() @@ -99,7 +104,7 @@ def __obtain_curve( success, has_simulated_curves, curves["calculated"], - ) = self._producer_curves.obtain_simulated_curve( + ) = self._curves_manager.get_producer_curves().obtain_simulated_curve( working_oc_dir, pcs_bm_name, bm_name, @@ -129,16 +134,22 @@ def __validate( if self._validator.is_defined_cct(): self._validator.set_time_cct( - self._producer_curves.get_time_cct( + self._curves_manager.get_producer_curves().get_time_cct( working_oc_dir, jobs_output_dir, event_params["duration_time"], ) ) - self._validator.set_generators_imax(self._producer_curves.get_generators_imax()) - self._validator.set_disconnection_model(self._producer_curves.get_disconnection_model()) + self._validator.set_generators_imax( + self._curves_manager.get_producer_curves().get_generators_imax() + ) + self._validator.set_disconnection_model( + self._curves_manager.get_producer_curves().get_disconnection_model() + ) self._validator.set_setpoint_variation( - self._producer_curves.get_setpoint_variation(get_cfg_oc_name(pcs_bm_name, self._name)) + self._curves_manager.get_producer_curves().get_setpoint_variation( + get_cfg_oc_name(pcs_bm_name, self._name) + ) ) results = self._validator.validate( @@ -203,7 +214,7 @@ def validate( working_path: Path Working path. jobs_output_dir: Path - ProducerCurves output path. + Simulator output path. event_params: dict Event parameters fs: float @@ -235,7 +246,7 @@ def validate( else: results = {"compliance": False, "curves": None} - results["udim"] = self._producer_curves.get_generator_u_dim() + results["udim"] = self._curves_manager.get_producer_curves().get_generator_u_dim() return success, results def has_required_curves( @@ -257,7 +268,7 @@ def has_required_curves( Path Working path. Path - ProducerCurves output path. + Simulator output path. dict Event parameters float diff --git a/src/dgcv/validation/model.py b/src/dgcv/validation/model.py index 2641209..2820f1b 100644 --- a/src/dgcv/validation/model.py +++ b/src/dgcv/validation/model.py @@ -1166,7 +1166,7 @@ def validate( working_path: Path Working path. sim_output_path: str - ProducerCurves output path (Not used in this validator). + Simulator output path (Not used in this validator). event_params: dict Event parameters fs: float diff --git a/src/dgcv/validation/performance.py b/src/dgcv/validation/performance.py index 92658e6..fa20864 100644 --- a/src/dgcv/validation/performance.py +++ b/src/dgcv/validation/performance.py @@ -628,7 +628,7 @@ def validate( working_path: Path Working path. sim_output_path: str - ProducerCurves output path. + Simulator output path. event_params: dict Event parameters fs: float From 0b8aee4f485e4cce40bd3078d9b54046471a850c Mon Sep 17 00:00:00 2001 From: marcosmc Date: Thu, 19 Dec 2024 15:59:49 +0100 Subject: [PATCH 24/24] move curves functions to manager --- src/dgcv/curves/manager.py | 175 ++++++++++++++++++++++ src/dgcv/model/benchmark.py | 43 ++++-- src/dgcv/model/operating_condition.py | 206 +++----------------------- 3 files changed, 219 insertions(+), 205 deletions(-) diff --git a/src/dgcv/curves/manager.py b/src/dgcv/curves/manager.py index 3e92392..481621e 100644 --- a/src/dgcv/curves/manager.py +++ b/src/dgcv/curves/manager.py @@ -1,8 +1,12 @@ from pathlib import Path +import pandas as pd + from dgcv.core.execution_parameters import Parameters from dgcv.curves.curves import ImportedCurves from dgcv.curves.producer_factory import get_producer_curves +from dgcv.files import manage_files +from dgcv.logging.logging import dgcv_logging class CurvesManager: @@ -15,11 +19,182 @@ def __init__( templates_path: Path, pcs_name: str, ): + self._working_dir = parameters.get_working_dir() + self._producer = parameters.get_producer() + self._pcs_name = pcs_name + self._producer_curves = get_producer_curves( parameters, pcs_benchmark_name, stable_time, lib_path, templates_path, pcs_name ) self._reference_curves = ImportedCurves(parameters) + def __has_reference_curves(self) -> bool: + return self._producer.has_reference_curves_path() + + def __get_reference_curves_path(self) -> Path: + if not hasattr(self, "_reference_curves_path"): + self._reference_curves_path = self._producer.get_reference_curves_path() + return self._reference_curves_path + + def __obtain_curve( + self, + pcs_bm_name: str, + bm_name: str, + oc_name: str, + ): + # Create a specific folder by operational point + working_oc_dir = self._working_dir / self._pcs_name / bm_name / oc_name + manage_files.create_dir(working_oc_dir) + + curves = dict() + reference_event_start_time = None + if self.__has_reference_curves(): + ( + reference_event_start_time, + curves["reference"], + ) = self.get_reference_curves().obtain_reference_curve( + working_oc_dir, pcs_bm_name, oc_name, self.__get_reference_curves_path() + ) + else: + curves["reference"] = pd.DataFrame() + + ( + jobs_output_dir, + event_params, + fs, + success, + has_simulated_curves, + curves["calculated"], + ) = self.get_producer_curves().obtain_simulated_curve( + working_oc_dir, + pcs_bm_name, + bm_name, + oc_name, + reference_event_start_time, + ) + + return ( + working_oc_dir, + jobs_output_dir, + event_params, + fs, + success, + has_simulated_curves, + curves, + ) + + def _check_curves( + self, + measurement_names: list, + curves: pd.DataFrame, + curves_name: str, + review_curves_set: bool, + ) -> bool: + has_curves = True + if review_curves_set: + if curves.empty: + dgcv_logging.get_logger("Curves Manager").warning( + f"Test without {curves_name} curves file" + ) + has_curves = False + else: + missed_curves = [] + for key in measurement_names: + if key not in curves: + missed_curves.append(key) + has_curves = False + if not has_curves: + dgcv_logging.get_logger("Curves Manager").warning( + f"Test without {curves_name} curve for keys {missed_curves}" + ) + return has_curves + + def has_required_curves( + self, + measurement_names: list, + pcs_bm_name: str, + bm_name: str, + oc_name: str, + ) -> tuple[Path, Path, dict, float, bool, bool, int, dict]: + """Check if all curves are present. + + Parameters + ---------- + pcs_bm_name: str + Composite name, pcs + Benchmark name + bm_name: str + Benchmark name + + Returns + ------- + Path + Working path. + Path + Simulator output path. + dict + Event parameters + float + Frequency sampling + bool + True if simulation is success + bool + True if simulation calculated curves + int + 0 all curves are present + 1 producer's curves are missing + 2 reference curves are missing + 3 all curves are missing + dict + Calculated and reference curves + """ + ( + working_oc_dir, + jobs_output_dir, + event_params, + fs, + success, + has_simulated_curves, + curves, + ) = self.__obtain_curve( + pcs_bm_name, + bm_name, + oc_name, + ) + + # If the tool has the model, it is assumed that the simulated curves are always available, + # if they are not available it is due to a failure in the simulation, this event is + # handled differently. + sim_curves = self._check_curves( + measurement_names, + curves["calculated"], + "producer", + not self._producer.is_dynawo_model(), + ) + ref_curves = self._check_curves( + measurement_names, curves["reference"], "reference", self.__has_reference_curves() + ) + + if sim_curves and ref_curves: + has_curves = 0 + elif not sim_curves and ref_curves: + has_curves = 1 + elif sim_curves and not ref_curves: + has_curves = 2 + else: + dgcv_logging.get_logger("Curves Manager").warning("Test without curves") + has_curves = 3 + + return ( + working_oc_dir, + jobs_output_dir, + event_params, + fs, + success, + has_simulated_curves, + has_curves, + curves, + ) + def get_producer_curves(self): return self._producer_curves diff --git a/src/dgcv/model/benchmark.py b/src/dgcv/model/benchmark.py index e2d70c2..116bb4b 100644 --- a/src/dgcv/model/benchmark.py +++ b/src/dgcv/model/benchmark.py @@ -62,6 +62,7 @@ def __init__( self._pcs_zone = pcs_zone self._report_name = report_name self._name = benchmark_name + self._parameters = parameters self._working_dir = parameters.get_working_dir() self._output_dir = parameters.get_output_dir() self._templates_path = Path(config.get_value("Global", "templates_path")) @@ -74,16 +75,9 @@ def __init__( curves_manager, validator, ) = self.__prepare_benchmark_validation(parameters, stable_time) - self._op_cond_list = [ - OperatingCondition( - curves_manager, - validator, - parameters, - pcs_name, - op_name, - ) - for op_name in op_names - ] + self._curves_manager = curves_manager + self._validator = validator + self._op_names = op_names def __prepare_benchmark_validation( self, parameters: Parameters, stable_time: float @@ -467,7 +461,7 @@ def __init_figures_tap(self, validations: list, pcs_benchmark_name: str) -> None def __validate( self, - op_cond: OperatingCondition, + op_name: str, pcs_benchmark_name: str, working_path: Path, jobs_output_dir: Path, @@ -477,7 +471,15 @@ def __validate( has_simulated_curves: bool, curves: dict, ): + op_cond = OperatingCondition( + self._parameters, + self._pcs_name, + op_name, + ) + op_cond_success, results = op_cond.validate( + self._curves_manager, + self._validator, pcs_benchmark_name, working_path, jobs_output_dir, @@ -529,7 +531,10 @@ def validate( # Validate each operational point pcs_benchmark_name = self._pcs_name + CASE_SEPARATOR + self._name - for op_cond in self._op_cond_list: + for op_name in self._op_names: + dgcv_logging.get_logger("Benchmark").info( + "RUNNING BENCHMARK: " + pcs_benchmark_name + ", OPER. COND.: " + op_name + ) ( working_path, jobs_output_dir, @@ -539,10 +544,16 @@ def validate( has_simulated_curves, has_curves, curves, - ) = op_cond.has_required_curves(pcs_benchmark_name, self._name) + ) = self._curves_manager.has_required_curves( + self._validator.get_measurement_names(), + pcs_benchmark_name, + self._name, + op_name, + ) + if has_curves == 0: op_cond_success, results, compliance = self.__validate( - op_cond, + op_name, pcs_benchmark_name, working_path, jobs_output_dir, @@ -571,12 +582,12 @@ def validate( int(self._pcs_zone), self._pcs_name, self._name, - op_cond.get_name(), + op_name, compliance, self._report_name, ) ) - pcs_results[pcs_benchmark_name + CASE_SEPARATOR + op_cond.get_name()] = results + pcs_results[pcs_benchmark_name + CASE_SEPARATOR + op_name] = results return success diff --git a/src/dgcv/model/operating_condition.py b/src/dgcv/model/operating_condition.py index 925f759..be272c2 100644 --- a/src/dgcv/model/operating_condition.py +++ b/src/dgcv/model/operating_condition.py @@ -10,14 +10,11 @@ import logging from pathlib import Path -import pandas as pd - from dgcv.configuration.cfg import config from dgcv.core.execution_parameters import Parameters from dgcv.core.global_variables import CASE_SEPARATOR from dgcv.core.validator import Validator from dgcv.curves.manager import CurvesManager -from dgcv.files import manage_files from dgcv.logging.logging import dgcv_logging @@ -40,26 +37,14 @@ class OperatingCondition: Name of the current pcs oc_name: str Name of the current OperatingCondition - model_path: Path - Model library directory - omega_path: Path - Omega library directory - pcs_path: Path - PCS configuration directory - job_name: str - Dynawo job name """ def __init__( self, - curves_manager: CurvesManager, - validator: Validator, parameters: Parameters, pcs_name: str, oc_name: str, ): - self._curves_manager = curves_manager - self._validator = validator self._working_dir = parameters.get_working_dir() self._producer = parameters.get_producer() self._pcs_name = pcs_name @@ -68,62 +53,10 @@ def __init__( # Read default values self._thr_ss_tol = config.get_float("GridCode", "thr_ss_tol", 0.002) - def __has_reference_curves(self) -> bool: - return self._producer.has_reference_curves_path() - - def __get_reference_curves_path(self) -> Path: - if not hasattr(self, "_reference_curves_path"): - self._reference_curves_path = self._producer.get_reference_curves_path() - return self._reference_curves_path - - def __obtain_curve( - self, - pcs_bm_name: str, - bm_name: str, - ): - # Create a specific folder by operational point - working_oc_dir = self._working_dir / self._pcs_name / bm_name / self._name - manage_files.create_dir(working_oc_dir) - - curves = dict() - reference_event_start_time = None - if self.__has_reference_curves(): - ( - reference_event_start_time, - curves["reference"], - ) = self._curves_manager.get_reference_curves().obtain_reference_curve( - working_oc_dir, pcs_bm_name, self._name, self.__get_reference_curves_path() - ) - else: - curves["reference"] = pd.DataFrame() - - ( - jobs_output_dir, - event_params, - fs, - success, - has_simulated_curves, - curves["calculated"], - ) = self._curves_manager.get_producer_curves().obtain_simulated_curve( - working_oc_dir, - pcs_bm_name, - bm_name, - self._name, - reference_event_start_time, - ) - - return ( - working_oc_dir, - jobs_output_dir, - event_params, - fs, - success, - has_simulated_curves, - curves, - ) - def __validate( self, + curves_manager: CurvesManager, + validator: Validator, pcs_bm_name: str, working_oc_dir: Path, jobs_output_dir: Path, @@ -132,27 +65,25 @@ def __validate( curves: dict, ) -> dict: - if self._validator.is_defined_cct(): - self._validator.set_time_cct( - self._curves_manager.get_producer_curves().get_time_cct( + if validator.is_defined_cct(): + validator.set_time_cct( + curves_manager.get_producer_curves().get_time_cct( working_oc_dir, jobs_output_dir, event_params["duration_time"], ) ) - self._validator.set_generators_imax( - self._curves_manager.get_producer_curves().get_generators_imax() - ) - self._validator.set_disconnection_model( - self._curves_manager.get_producer_curves().get_disconnection_model() + validator.set_generators_imax(curves_manager.get_producer_curves().get_generators_imax()) + validator.set_disconnection_model( + curves_manager.get_producer_curves().get_disconnection_model() ) - self._validator.set_setpoint_variation( - self._curves_manager.get_producer_curves().get_setpoint_variation( + validator.set_setpoint_variation( + curves_manager.get_producer_curves().get_setpoint_variation( get_cfg_oc_name(pcs_bm_name, self._name) ) ) - results = self._validator.validate( + results = validator.validate( self._name, working_oc_dir, jobs_output_dir, @@ -162,7 +93,7 @@ def __validate( ) # Operational point without defining its validations - if not self._validator.has_validations(): + if not validator.has_validations(): results["compliance"] = None if dgcv_logging.getEffectiveLevel() != logging.DEBUG: @@ -171,31 +102,10 @@ def __validate( return results - def _check_curves( - self, curves: pd.DataFrame, curves_name: str, review_curves_set: bool - ) -> bool: - measurement_names = self._validator.get_measurement_names() - has_curves = True - if review_curves_set: - if curves.empty: - dgcv_logging.get_logger("Operating Condition").warning( - f"Test without {curves_name} curves file" - ) - has_curves = False - else: - missed_curves = [] - for key in measurement_names: - if key not in curves: - missed_curves.append(key) - has_curves = False - if not has_curves: - dgcv_logging.get_logger("Operating Condition").warning( - f"Test without {curves_name} curve for keys {missed_curves}" - ) - return has_curves - def validate( self, + curves_manager: CurvesManager, + validator: Validator, pcs_bm_name: str, working_path: Path, jobs_output_dir: Path, @@ -236,6 +146,8 @@ def validate( if has_simulated_curves: # Validate results results = self.__validate( + curves_manager, + validator, pcs_bm_name, working_path, jobs_output_dir, @@ -246,93 +158,9 @@ def validate( else: results = {"compliance": False, "curves": None} - results["udim"] = self._curves_manager.get_producer_curves().get_generator_u_dim() + results["udim"] = curves_manager.get_producer_curves().get_generator_u_dim() return success, results - def has_required_curves( - self, - pcs_bm_name: str, - bm_name: str, - ) -> tuple[Path, Path, dict, float, bool, bool, int, dict]: - """Check if all curves are present. - - Parameters - ---------- - pcs_bm_name: str - Composite name, pcs + Benchmark name - bm_name: str - Benchmark name - - Returns - ------- - Path - Working path. - Path - Simulator output path. - dict - Event parameters - float - Frequency sampling - bool - True if simulation is success - bool - True if simulation calculated curves - int - 0 all curves are present - 1 producer's curves are missing - 2 reference curves are missing - 3 all curves are missing - dict - Calculated and reference curves - """ - dgcv_logging.get_logger("Operating Condition").info( - "RUNNING BENCHMARK: " + pcs_bm_name + ", OPER. COND.: " + self._name - ) - - ( - working_oc_dir, - jobs_output_dir, - event_params, - fs, - success, - has_simulated_curves, - curves, - ) = self.__obtain_curve( - pcs_bm_name, - bm_name, - ) - - # If the tool has the model, it is assumed that the simulated curves are always available, - # if they are not available it is due to a failure in the simulation, this event is - # handled differently. - sim_curves = self._check_curves( - curves["calculated"], "producer", not self._producer.is_dynawo_model() - ) - ref_curves = self._check_curves( - curves["reference"], "reference", self.__has_reference_curves() - ) - - if sim_curves and ref_curves: - has_curves = 0 - elif not sim_curves and ref_curves: - has_curves = 1 - elif sim_curves and not ref_curves: - has_curves = 2 - else: - dgcv_logging.get_logger("Operating Condition").warning("Test without curves") - has_curves = 3 - - return ( - working_oc_dir, - jobs_output_dir, - event_params, - fs, - success, - has_simulated_curves, - has_curves, - curves, - ) - def get_name(self) -> str: """Get the OperatingCondition name.