diff --git a/src/lisfloodutilities/gridding/configuration/1arcmin/config_pr.txt b/src/lisfloodutilities/gridding/configuration/1arcmin/config_pr.txt index 8ac2b16..23ed76c 100755 --- a/src/lisfloodutilities/gridding/configuration/1arcmin/config_pr.txt +++ b/src/lisfloodutilities/gridding/configuration/1arcmin/config_pr.txt @@ -11,6 +11,7 @@ VALUE_OFFSET = 0.0 DATA_TYPE_PACKED = i2 STANDARD_NAME = precipitation_amount LONG_NAME = Daily Accumulated Precipitation +KIWIS_FILTER_PLUGIN_CLASSES = {'ObservationsKiwisFilter': {'1303': 100.0}} [VAR_TIME] diff --git a/src/lisfloodutilities/gridding/configuration/1arcmin/config_pr6.txt b/src/lisfloodutilities/gridding/configuration/1arcmin/config_pr6.txt index 4d26822..7838abf 100755 --- a/src/lisfloodutilities/gridding/configuration/1arcmin/config_pr6.txt +++ b/src/lisfloodutilities/gridding/configuration/1arcmin/config_pr6.txt @@ -11,6 +11,8 @@ VALUE_OFFSET = 0.0 DATA_TYPE_PACKED = i2 STANDARD_NAME = precipitation_amount LONG_NAME = 6 Hourly Accumulated Precipitation +KIWIS_FILTER_PLUGIN_CLASSES = {'DowgradedObservationsKiwisFilter': {'1295': 1.0}, 'ObservationsKiwisFilter': {'1303': 100.0}} + [VAR_TIME] diff --git a/src/lisfloodutilities/gridding/configuration/1arcmin/config_pr6x4.txt b/src/lisfloodutilities/gridding/configuration/1arcmin/config_pr6x4.txt index c4ae563..2f5590d 100755 --- a/src/lisfloodutilities/gridding/configuration/1arcmin/config_pr6x4.txt +++ b/src/lisfloodutilities/gridding/configuration/1arcmin/config_pr6x4.txt @@ -11,6 +11,7 @@ VALUE_OFFSET = 0.0 DATA_TYPE_PACKED = i2 STANDARD_NAME = precipitation_amount LONG_NAME = 6 Hourly Accumulated Precipitation per Day +KIWIS_FILTER_PLUGIN_CLASSES = {'DowgradedObservationsKiwisFilter': {'1295': 1.0}, 'ObservationsKiwisFilter': {'1303': 100.0}} [VAR_TIME] diff --git a/src/lisfloodutilities/gridding/configuration/1arcmin/default.txt b/src/lisfloodutilities/gridding/configuration/1arcmin/default.txt index 9bdcaca..c87efe1 100755 --- a/src/lisfloodutilities/gridding/configuration/1arcmin/default.txt +++ b/src/lisfloodutilities/gridding/configuration/1arcmin/default.txt @@ -23,6 +23,7 @@ VALUE_OFFSET = 0.0 DATA_TYPE_PACKED = i2 STANDARD_NAME = DUMMY_STANDARD_NAME LONG_NAME = DUMMY LONG NAME +KIWIS_FILTER_PLUGIN_CLASSES = {'KiwisFilter': {}} [DIMENSION] diff --git a/src/lisfloodutilities/gridding/configuration/5x5km/config_pr.txt b/src/lisfloodutilities/gridding/configuration/5x5km/config_pr.txt index 7c16022..169d84c 100755 --- a/src/lisfloodutilities/gridding/configuration/5x5km/config_pr.txt +++ b/src/lisfloodutilities/gridding/configuration/5x5km/config_pr.txt @@ -11,6 +11,7 @@ VALUE_OFFSET = 0.0 DATA_TYPE_PACKED = i2 STANDARD_NAME = precipitation_amount LONG_NAME = Daily Accumulated Precipitation +KIWIS_FILTER_PLUGIN_CLASSES = {'ObservationsKiwisFilter': {'1303': 100.0}} [VAR_TIME] diff --git a/src/lisfloodutilities/gridding/configuration/5x5km/config_pr6.txt b/src/lisfloodutilities/gridding/configuration/5x5km/config_pr6.txt index 4d26822..356bf1f 100755 --- a/src/lisfloodutilities/gridding/configuration/5x5km/config_pr6.txt +++ b/src/lisfloodutilities/gridding/configuration/5x5km/config_pr6.txt @@ -11,6 +11,7 @@ VALUE_OFFSET = 0.0 DATA_TYPE_PACKED = i2 STANDARD_NAME = precipitation_amount LONG_NAME = 6 Hourly Accumulated Precipitation +KIWIS_FILTER_PLUGIN_CLASSES = {'DowgradedObservationsKiwisFilter': {'1295': 1.0}, 'ObservationsKiwisFilter': {'1303': 100.0}} [VAR_TIME] diff --git a/src/lisfloodutilities/gridding/configuration/5x5km/config_pr6x4.txt b/src/lisfloodutilities/gridding/configuration/5x5km/config_pr6x4.txt index c4ae563..2f5590d 100755 --- a/src/lisfloodutilities/gridding/configuration/5x5km/config_pr6x4.txt +++ b/src/lisfloodutilities/gridding/configuration/5x5km/config_pr6x4.txt @@ -11,6 +11,7 @@ VALUE_OFFSET = 0.0 DATA_TYPE_PACKED = i2 STANDARD_NAME = precipitation_amount LONG_NAME = 6 Hourly Accumulated Precipitation per Day +KIWIS_FILTER_PLUGIN_CLASSES = {'DowgradedObservationsKiwisFilter': {'1295': 1.0}, 'ObservationsKiwisFilter': {'1303': 100.0}} [VAR_TIME] diff --git a/src/lisfloodutilities/gridding/configuration/5x5km/default.txt b/src/lisfloodutilities/gridding/configuration/5x5km/default.txt index 8750abb..94e7889 100755 --- a/src/lisfloodutilities/gridding/configuration/5x5km/default.txt +++ b/src/lisfloodutilities/gridding/configuration/5x5km/default.txt @@ -24,6 +24,7 @@ VALUE_OFFSET = 0.0 DATA_TYPE_PACKED = i2 STANDARD_NAME = DUMMY_STANDARD_NAME LONG_NAME = DUMMY LONG NAME +KIWIS_FILTER_PLUGIN_CLASSES = {'KiwisFilter': {}} [DIMENSION] diff --git a/src/lisfloodutilities/gridding/generate_grids.py b/src/lisfloodutilities/gridding/generate_grids.py index febe337..728e090 100755 --- a/src/lisfloodutilities/gridding/generate_grids.py +++ b/src/lisfloodutilities/gridding/generate_grids.py @@ -21,7 +21,7 @@ from pathlib import Path from argparse import ArgumentParser, ArgumentTypeError from datetime import datetime, timedelta -from lisfloodutilities.gridding.lib.utils import Printable, Dem, Config, FileUtils, GriddingUtils +from lisfloodutilities.gridding.lib.utils import Printable, Dem, Config, FileUtils, GriddingUtils # , KiwisLoader from lisfloodutilities.gridding.lib.writers import NetCDFWriter, GDALWriter @@ -80,6 +80,8 @@ def run(config_filename: str, infolder: str, output_file: str, processing_dates_ output_writer_tiff = GDALWriter(conf, overwrite_output, quiet_mode) output_writer_netcdf = NetCDFWriter(conf, overwrite_output, quiet_mode) output_writer_netcdf.open(Path(outfile)) +# file_loader = KiwisLoader(conf, overwrite_output, Path(infolder), quiet_mode) +# for filename in file_loader: for filename in sorted(Path(infolder).rglob(inwildcard)): file_timestamp = file_utils.get_timestamp_from_filename(filename) + timedelta(days=netcdf_offset_file_date) if not file_utils.processable_file(file_timestamp, dates_to_process, conf.start_date, conf.end_date): @@ -144,6 +146,9 @@ def main(argv): parser.add_argument("-c", "--conf", dest="config_type", required=True, help="Set the grid configuration type to use.", metavar="{5x5km, 1arcmin,...}") + parser.add_argument("-p", "--pathconf", dest="config_base_path", required=False, type=FileUtils.folder_type, + help="Overrides the base path where the configurations are stored.", + metavar="/path/to/config") parser.add_argument("-v", "--var", dest="variable_code", required=True, help="Set the variable to be processed.", metavar="{pr,pd,tn,tx,ws,rg,...}") @@ -172,6 +177,8 @@ def main(argv): quiet_mode = args.quiet configuration_base_folder = os.path.join(program_path, '../src/lisfloodutilities/gridding/configuration') + if args.config_base_path is not None and len(args.config_base_path) > 0: + configuration_base_folder = args.config_base_path file_utils = FileUtils(args.variable_code, quiet_mode) diff --git a/src/lisfloodutilities/gridding/lib/filters.py b/src/lisfloodutilities/gridding/lib/filters.py new file mode 100644 index 0000000..d24d1e9 --- /dev/null +++ b/src/lisfloodutilities/gridding/lib/filters.py @@ -0,0 +1,40 @@ +from pathlib import Path +import pandas as pd + +class KiwisFilter(): + + def __init__(self, filter_args: dict): + self.args = filter_args + + def filter(self, kiwis_files: array) -> array: + filtered_data_frames = [] + for file_path in kiwis_files: + df_kiwis = pd.read_csv(file_path, sep="\t") + df_kiwis = self.__apply_filter(df_kiwis) + filtered_data_frames.append(df_kiwis) + return filtered_data_frames + + def __apply_filter(self, df: pd.DataFrame) -> pd.DataFrame: + # Get the code to filter kiwis leaving only the rows to be used for point file creation + return df + +# KIWIS_FILTER_PLUGIN_CLASSES = {'DowgradedObservationsKiwisFilter': {'1295': 1.0}, 'ObservationsKiwisFilter': {'1303': 100.0}} + +class DowgradedObservationsKiwisFilter(KiwisFilter): + + def __init__(self, filter_args: dict): + super().__init__(filter_args) + + def __apply_filter(self, df: pd.DataFrame) -> pd.DataFrame: + df = super().__apply_filter(df) + return df + +class ObservationsKiwisFilter(KiwisFilter): + + def __init__(self, filter_args: dict): + super().__init__(filter_args) + + def __apply_filter(self, df: pd.DataFrame) -> pd.DataFrame: + df = super().__apply_filter(df) + return df + \ No newline at end of file diff --git a/src/lisfloodutilities/gridding/lib/utils.py b/src/lisfloodutilities/gridding/lib/utils.py index ae6dc9b..183e7fa 100755 --- a/src/lisfloodutilities/gridding/lib/utils.py +++ b/src/lisfloodutilities/gridding/lib/utils.py @@ -23,10 +23,14 @@ import pandas as pd from decimal import * from datetime import datetime, timedelta +from collections import OrderedDict from scipy.spatial import cKDTree from pyg2p import Loggable from pyg2p.main.readers.netcdf import NetCDFReader from pyg2p.main.interpolation.scipy_interpolation_lib import ScipyInterpolation +from numpy import delete +import importlib + __DECIMAL_CASES = 20 __DECIMAL_FORMAT = '{:.20f}' @@ -428,3 +432,106 @@ def generate_grid(self, filename: Path) -> np.ndarray: self.check_grid_nan(filename, result) grid_data = self.prepare_grid(result, grid_x.shape) return grid_data + + +# class KiwisLoader(Printable): +# DATE_PATTERN_CONDENSED = '%Y%m%d%H%M%S' +# DATE_PATTERN_SEPARATED = '%Y-%m-%d %H:%M:%S' +# CSV_DELIMITER = '\t' +# FILES_WILDCARD = '??????????00_all.kiwis' +# +# def __init__(self, conf: Config, overwrite_file: bool = False, infolder: Path, quiet_mode: bool = False): +# super().__init__(quiet_mode) +# self.conf = conf +# self.overwrite_file = overwrite_file +# self.var_code = self.conf.var_code +# self.var_size = len(self.var_code) +# self.inwildcard = self.var_code + FILES_WILDCARD +# self.infolder = infolder +# # Frequency between timesteps in hours +# self.time_frequency = int(self.conf.get_config_field('VAR_TIME','FREQUENCY')) +# self.is_daily_var = (self.time_frequency == 1) +# # Number of files to be read/processed simultaneously. for non daily vars time frequency is in hours +# self.read_files_step = 1 if self.is_daily_var else int(24 / self.time_frequency) +# self.files_list = OrderedDict() +# self.file_groups = [] +# self.filter_classes = self.__get_filter_classes() +# +# def __iter__(self): +# self.__load_kiwis_paths() +# self.file_groups = iter(self.__get_file_groups()) +# return self +# +# def __next__(self): +# if self.is_daily_var: +# for filter_class in self.filter_classes: +# df_kiwis_array = filter_class.filter(self.file_groups[self.file_group_read_idx]) +# self.file_group_read_idx += 1 +# raise StopIteration +# +# def __get_filter_classes(self) -> array: +# ''' +# TODO: Implement the class. +# ''' +# plugins_array = [] +# # Load the class dynamically +# plugins = self.conf.get_config_field('PROPERTIES', 'KIWIS_FILTER_PLUGIN_CLASSES') +# module_name = 'lisfloodutilities.gridding.lib.filters' +# try: +# for plugin in plugins: +# class_name = plugin +# class_args = plugins[plugin] +# module = importlib.import_module(module_name) +# class_instance = getattr(module, class_name)(class_args) +# plugins_array.append(class_instance) +# except ImportError: +# print(f"Error: Could not import module '{module_name}'") +# except AttributeError: +# print(f"Error: Could not find class '{class_name}' in module '{module_name}'") +# return plugins_array +# +# def __filter_kiwis(self, filename_kiwis: Path) -> pd.DataFrame: +# return None +# +# def __get_points_filename(self, kiwis_timestamp: str, filename_kiwis: Path) -> Path: +# ''' +# Returns the points file path. +# If the mode is overwrite tries to get the first pointfile path it finds and if it does not find generates a new file path. +# Otherwise generates a new file path. +# ''' +# if self.overwrite_file: +# for points_path in sorted(filename_kiwis.parent.rglob(f'{self.var_code}{kiwis_timestamp_str}_??????????????.txt')): +# if points_path.is_file(): +# return points_path +# pointfile_timestamp = datetime.now().strftime('%Y%m%d%H%M%S') +# return Path(filename_kiwis.parent, f'{self.var_code}{kiwis_timestamp_str}_{pointfile_timestamp}.txt') +# +# def __load_kiwis_paths(self): +# netcdf_offset_file_date = int(self.conf.get_config_field('VAR_TIME','OFFSET_FILE_DATE')) +# for filename_kiwis in sorted(self.infolder.rglob(self.inwildcard)): +# kiwis_timestamp = self.__get_timestamp_from_filename(filename_kiwis) +# file_timestamp = kiwis_timestamp + timedelta(days=netcdf_offset_file_date) +# if self.__processable_file(file_timestamp, self.conf.start_date, self.conf.end_date): +# kiwis_timestamp_str = kiwis_timestamp.strftime(DATE_PATTERN_CONDENSED) +# filename_points = self.__get_points_filename(kiwis_timestamp_str, filename_kiwis) +# self.files_list[kiwis_timestamp_str] = (filename_kiwis, filename_points) +# # print_msg(f'Processing file: {filename}') +# +# def __get_next_file_group(self): +# ''' +# TODO +# ''' +# if self.is_daily_var: +# for kiwis_timestamp_str in self.files_list: +# self.file_groups.append([kiwis_timestamp_str]) +# else: # divide the files into groups of 4 and give exception if group is not complete +# for kiwis_timestamp_str in self.files_list: +# self.file_groups.append([]) +# +# def __get_timestamp_from_filename(self, filename: Path) -> datetime: +# file_timestamp = filename.name[self.var_size:12+self.var_size] + '00' +# return datetime.strptime(file_timestamp, FileUtils.DATE_PATTERN_CONDENSED) +# +# def __processable_file(self, file_timestamp: datetime, start_date: datetime = None, end_date: datetime = None) -> bool: +# return (start_date is not None and start_date <= file_timestamp and +# end_date is not None and file_timestamp <= end_date) diff --git a/src/lisfloodutilities/gridding/lib/writers.py b/src/lisfloodutilities/gridding/lib/writers.py index 51b3980..0830f27 100644 --- a/src/lisfloodutilities/gridding/lib/writers.py +++ b/src/lisfloodutilities/gridding/lib/writers.py @@ -14,6 +14,7 @@ import time as timex import warnings import numpy as np +import copy from argparse import ArgumentTypeError from pathlib import Path from datetime import datetime, timedelta @@ -123,7 +124,7 @@ def write_timestep(self, grid: np.ndarray, timestep: int = -1): raise Exception("netCDF Dataset was not initialized. If file already exists, use --force flag to append.") timestep_idx = int(timestep / self.time_frequency) self.nf.variables[self.netcdf_var_time][timestep_idx] = timestep - values = self.setNaN(grid) + values = self.setNaN(copy.deepcopy(grid)) values[values < self.conf.value_min_packed] = np.nan values[values > self.conf.value_max_packed] = np.nan values[values != self.conf.VALUE_NAN] *= self.conf.scale_factor diff --git a/src/lisfloodutilities/gridding/tools/leave1out_analysis.py b/src/lisfloodutilities/gridding/tools/leave1out_analysis.py index 098ccc5..86abc16 100644 --- a/src/lisfloodutilities/gridding/tools/leave1out_analysis.py +++ b/src/lisfloodutilities/gridding/tools/leave1out_analysis.py @@ -29,7 +29,7 @@ from scipy.spatial import cKDTree from scipy.stats import pearsonr from sklearn.model_selection import RepeatedStratifiedKFold -from sklearn.metrics import mean_absolute_error +from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.feature_selection import r_regression @@ -46,6 +46,29 @@ def mean_bias_error(y_true: np.array, y_pred: np.array) -> float: return mbe +def critical_success_index(df: pd.DataFrame, limit_value: float) -> float: + ''' + Critical Success Index is used only for Precipitation. + + Parameters: + df (pandas dataframe): Dataframe containing both observed and predicted values + limit_value (float): Limit in millimeters to be used in the formula + + csi = wc / (wc+wi+di) + Wc = total number of points where y_true and y_pred > 1mm + Wi = total number of points where y_true > 1mm and y_pred < 1mm + Di = total number of points where y_true < 1mm and y_pred > 1mm + + Returns: + csi (float): Critical Success Index + ''' + wc = len(df[(df['true_value'] > limit_value) & (df['interpolated_value'] > limit_value)]) + wi = len(df[(df['true_value'] > limit_value) & (df['interpolated_value'] < limit_value)]) + di = len(df[(df['true_value'] < limit_value) & (df['interpolated_value'] > limit_value)]) + csi = wc / (wc + wi + di) + return csi + + def get_netcdf_meteo_variable_name(nc_file_obj): # Only one variable must be present in netcdf files num_dims = 3 if 'time' in nc_file_obj.variables else 2 @@ -63,14 +86,16 @@ def get_pixel_area(pixel_area_file: Path) -> np.ndarray: def write_results(outfile: Path, test_code: str, mae: float, mbe: float, pearson_r: float, values_sum: float, - count_pixels_1st_interval: float, count_pixels_2nd_interval: float): + count_pixels_1st_interval: float, count_pixels_2nd_interval: float, mse: float, csi: float): if outfile.is_file(): df = pd.read_csv(outfile, delimiter='\t') new_data = [{ 'test': test_code, 'mae': mae, 'mbe': mbe, + 'mse': mse, 'pearson_r': pearson_r, + 'csi': csi, 'values_sum': values_sum, 'pixels_with_values_0 np.ndarray: -# compressed_data = data.astype(float) -# decompressed_data = compressed_data * scale_factor + offset -# decompressed_data = np.round(decompressed_data, 1) -# decompressed_data = np.where(decompressed_data == no_data, np.nan, decompressed_data) -# return decompressed_data - - def unpack_data(data: np.ndarray, scale_factor: float = 1.0, offset: float = 0.0, no_data: float = -9999.0) -> np.ndarray: print(f'scale_factor: {scale_factor} offset: {offset} no_data: {no_data}') compressed_data = data.astype(float) @@ -129,7 +148,7 @@ def get_unpacking_metadata(file_interpolated_values: Path) -> tuple[float, float return scale_factor, offset, no_data -def get_interpolated_values_dataframe(file_interpolated_values: Path, is_compressed_data: bool) -> pd.DataFrame: +def get_interpolated_values_dataframe(file_interpolated_values: Path) -> pd.DataFrame: scale_factor, offset, no_data = get_unpacking_metadata(file_interpolated_values) dataarray = rxr.open_rasterio(file_interpolated_values) band = dataarray[0] @@ -156,14 +175,14 @@ def get_interpolated_values(df_interpolated_values: pd.DataFrame, df_true_values return interpolated_values[idx] -def run(file_true_values: str, file_interpolated_values: str, file_pixel_area: str, outfile: str, is_compressed_data: bool, limit_value: float): +def run(file_true_values: str, file_interpolated_values: str, file_pixel_area: str, outfile: str, run_csi: bool, limit_value: float): file_interpolated_values_path = Path(file_interpolated_values) outfile_path = Path(outfile) file_true_values_path = Path(file_true_values) # Load the CSV file into a pandas dataframe df_true_values = pd.read_csv(file_true_values_path, delimiter='\t', header=None, names=['x', 'y', 'true_value']) - df_interpolated_values = get_interpolated_values_dataframe(file_interpolated_values_path, is_compressed_data) + df_interpolated_values = get_interpolated_values_dataframe(file_interpolated_values_path) predicted_values_mm = np.ascontiguousarray(df_interpolated_values['value'].values) # includes NaN df_interpolated_values = df_interpolated_values.dropna() interpolated_values = get_interpolated_values(df_interpolated_values, df_true_values) @@ -171,6 +190,10 @@ def run(file_true_values: str, file_interpolated_values: str, file_pixel_area: s # write intermediate results to a file df_true_values.to_csv(file_true_values_path.with_suffix('.tab'), sep="\t", index=False) + csi = -9999.0 + if run_csi: + csi = critical_success_index(df_true_values, 1.0) + # process only values greater than 1.0 mm # limit_value = 1.0 df_true_values = df_true_values[(df_true_values['true_value'] > limit_value) & (df_true_values['interpolated_value'] > limit_value)] @@ -181,6 +204,7 @@ def run(file_true_values: str, file_interpolated_values: str, file_pixel_area: s test_code = file_interpolated_values_path.parent.name mae = -9999.0 mbe = -9999.0 + mse = -9999.0 pearson_correlation_coeficient = -9999.0 values_sum = -9999.0 count_pixels_1st_interval = -9999.0 @@ -189,6 +213,7 @@ def run(file_true_values: str, file_interpolated_values: str, file_pixel_area: s if len(true_values) > 0 and len(predicted_values) > 0: mae = mean_absolute_error(true_values, predicted_values) mbe = mean_bias_error(true_values, predicted_values) + mse = mean_squared_error(true_values, predicted_values) # The correlations needs at least 2 elements in each array if len(true_values) > 1 and len(predicted_values) > 1: pearson_correlation_coeficient, p_value = pearsonr(true_values, predicted_values) @@ -207,7 +232,7 @@ def run(file_true_values: str, file_interpolated_values: str, file_pixel_area: s # write the results to the output file write_results(outfile_path, test_code, mae, mbe, pearson_correlation_coeficient, values_sum, - count_pixels_1st_interval, count_pixels_2nd_interval) + count_pixels_1st_interval, count_pixels_2nd_interval, mse, csi) def main(argv): @@ -242,7 +267,7 @@ def main(argv): parser = ArgumentParser(epilog=program_license, description=program_version_string+program_longdesc) # set defaults - parser.set_defaults(is_compressed_data=False, limit_value=1.0) + parser.set_defaults(run_csi=False, limit_value=1.0) parser.add_argument("-t", "--test", dest="file_true_values", required=True, type=FileUtils.file_type, help="Set file path for true values form the test dataset in txt format.", @@ -256,10 +281,10 @@ def main(argv): parser.add_argument("-o", "--out", dest="outfile", required=True, type=FileUtils.file_or_folder, help="Set output file containing the statistics.", metavar="output_file") - parser.add_argument("-c", "--compressed", dest="is_compressed_data", action="store_true", - help="Indicates if data in the tiff file is compressed and decompresses it before using. [default: %(default)s]") parser.add_argument("-l", "--limit", dest="limit_value", required=False, type=float, help="process only values greater than limit", metavar="limit_value") + parser.add_argument("-c", "--csi", dest="run_csi", action="store_true", + help="Critical Success Index is used only for Precipitation. [default: %(default)s]") # process options args = parser.parse_args(argv) @@ -267,14 +292,14 @@ def main(argv): print(f"True values: {args.file_true_values}") print(f"Interpolated values: {args.file_interpolated_values}") print(f"Output File: {args.outfile}") - print(f"Compressed Data: {args.is_compressed_data}") + print(f"Run CSI: {args.run_csi}") print(f"Limit Value: {args.limit_value}") file_pixel_area = "" if args.file_pixel_area is not None: file_pixel_area = args.file_pixel_area print(f"Pixel Area File: {file_pixel_area}") - run(args.file_true_values, args.file_interpolated_values, file_pixel_area, args.outfile, args.is_compressed_data, args.limit_value) + run(args.file_true_values, args.file_interpolated_values, file_pixel_area, args.outfile, args.run_csi, args.limit_value) print("Finished.") except Exception as e: indent = len(program_name) * " " diff --git a/src/lisfloodutilities/gridding/tools/leave1out_analysis_aggregation.py b/src/lisfloodutilities/gridding/tools/leave1out_analysis_aggregation.py index f49a996..252b930 100644 --- a/src/lisfloodutilities/gridding/tools/leave1out_analysis_aggregation.py +++ b/src/lisfloodutilities/gridding/tools/leave1out_analysis_aggregation.py @@ -33,7 +33,9 @@ df_dic = { 'mae': None, 'mbe': None, + 'mse': None, 'pearson_r': None, + 'csi': None, 'values_sum': None, 'pixels_with_values_0