diff --git a/docs/calidhayte/calibrate.html b/docs/calidhayte/calibrate.html index f5e411f..1db2d2e 100644 --- a/docs/calidhayte/calibrate.html +++ b/docs/calidhayte/calibrate.html @@ -95,6 +95,9 @@

API Documentation

  • models
  • +
  • + folds +
  • pymc_bayesian
  • @@ -200,9 +203,6 @@

    API Documentation

  • gaussian_process
  • -
  • - pls -
  • isotonic
  • @@ -280,1211 +280,2520 @@

    13from copy import deepcopy as dc 14import logging 15import sys - 16from typing import Any, Literal, Union + 16from typing import Any, List, Literal, Union 17import warnings 18 19# import bambi as bmb 20import numpy as np 21import pandas as pd - 22import sklearn as skl - 23from sklearn import cross_decomposition as cd - 24from sklearn import ensemble as en - 25from sklearn import gaussian_process as gp - 26from sklearn import isotonic as iso - 27from sklearn import linear_model as lm - 28from sklearn import neural_network as nn - 29from sklearn import svm - 30from sklearn import tree - 31import sklearn.preprocessing as pre - 32from sklearn.model_selection import StratifiedKFold - 33from sklearn.compose import ColumnTransformer - 34from sklearn.pipeline import Pipeline - 35import xgboost as xgb - 36 - 37_logger = logging.getLogger("pymc") - 38_logger.setLevel(logging.ERROR) + 22import scipy + 23from scipy.stats import uniform + 24import sklearn as skl + 25from sklearn import cross_decomposition as cd + 26from sklearn import ensemble as en + 27from sklearn import gaussian_process as gp + 28from sklearn import isotonic as iso + 29from sklearn import linear_model as lm + 30from sklearn import neural_network as nn + 31from sklearn import svm + 32from sklearn import tree + 33from sklearn.gaussian_process import kernels as kern + 34import sklearn.preprocessing as pre + 35from sklearn.model_selection import StratifiedKFold, RandomizedSearchCV + 36from sklearn.compose import ColumnTransformer + 37from sklearn.pipeline import Pipeline + 38import xgboost as xgb 39 - 40 - 41def cont_strat_folds( - 42 df: pd.DataFrame, - 43 target_var: str, - 44 splits: int = 5, - 45 strat_groups: int = 5, - 46 seed: int = 62 - 47 ) -> pd.DataFrame: - 48 """ - 49 Creates stratified k-folds on continuous variable - 50 ---------- - 51 df : pd.DataFrame - 52 Target data to stratify on. - 53 target_var : str - 54 Target feature name. - 55 splits : int, default=5 - 56 Number of folds to make. - 57 strat_groups : int, default=10 - 58 Number of groups to split data in to for stratification. - 59 seed : int, default=62 - 60 Random state to use. - 61 - 62 Returns - 63 ------- - 64 pd.DataFrame - 65 `y_df` with added 'Fold' column, specifying which test data fold - 66 variable corresponds to. - 67 - 68 Examples - 69 -------- - 70 >>> df = pd.read_csv('data.csv') - 71 >>> df - 72 | | x | a | b | - 73 | | | | | - 74 | 0 |2.3|1.8|7.2| - 75 | 1 |3.2|9.6|4.5| - 76 |....|...|...|...| - 77 |1000|2.3|4.5|2.2| - 78 >>> df_with_folds = const_strat_folds( - 79 df=df, - 80 target='a', - 81 splits=3, - 82 strat_groups=3. - 83 seed=78 - 84 ) - 85 >>> df_with_folds - 86 | | x | a | b |Fold| - 87 | | | | | | - 88 | 0 |2.3|1.8|7.2| 2 | - 89 | 1 |3.2|9.6|4.5| 1 | - 90 |....|...|...|...|....| - 91 |1000|2.3|4.5|2.2| 0 | - 92 - 93 All folds should have a roughly equal distribution of values for 'a' - 94 - 95 """ - 96 _df = df.copy() - 97 _df['Fold'] = -1 - 98 skf = StratifiedKFold( - 99 n_splits=splits, - 100 random_state=seed, - 101 shuffle=True - 102 ) - 103 _df['Group'] = pd.cut( - 104 _df.loc[:, target_var], - 105 strat_groups, - 106 labels=False - 107 ) - 108 group_label = _df.loc[:, 'Group'] - 109 - 110 for fold_number, (_, v) in enumerate(skf.split(group_label, group_label)): - 111 _df.loc[v, 'Fold'] = fold_number - 112 return _df.drop('Group', axis=1) - 113 - 114 - 115class Calibrate: - 116 """ - 117 Calibrate x against y using a range of different methods provided by - 118 scikit-learn[^skl], xgboost[^xgb] and PyMC (via Bambi)[^pmc]. - 119 - 120 [^skl]: https://scikit-learn.org/stable/modules/classes.html - 121 [^xgb]: https://xgboost.readthedocs.io/en/stable/python/python_api.html - 122 [^pmc]: https://bambinos.github.io/bambi/api/ - 123 - 124 Examples - 125 -------- - 126 >>> from calidhayte.calibrate import Calibrate - 127 >>> import pandas as pd - 128 >>> - 129 >>> x = pd.read_csv('independent.csv') - 130 >>> x - 131 | | a | b | - 132 | 0 |2.3|3.2| - 133 | 1 |3.4|3.1| - 134 |...|...|...| - 135 |100|3.7|2.1| - 136 >>> - 137 >>> y = pd.read_csv('dependent.csv') - 138 >>> y - 139 | | a | - 140 | 0 |7.8| - 141 | 1 |9.9| - 142 |...|...| - 143 |100|9.5| - 144 >>> - 145 >>> calibration = Calibrate( - 146 x_data=x, - 147 y_data=y, - 148 target='a', - 149 folds=5, - 150 strat_groups=5, - 151 scaler = [ - 152 'Standard Scale', - 153 'MinMax Scale' - 154 ], - 155 seed=62 - 156 ) - 157 >>> calibration.linreg() - 158 >>> calibration.lars() - 159 >>> calibration.omp() - 160 >>> calibration.ransac() - 161 >>> calibration.random_forest() - 162 >>> - 163 >>> models = calibration.return_models() - 164 >>> list(models.keys()) - 165 [ - 166 'Linear Regression', - 167 'Least Angle Regression', - 168 'Orthogonal Matching Pursuit', - 169 'RANSAC', - 170 'Random Forest' - 171 ] - 172 >>> list(models['Linear Regression'].keys()) - 173 ['Standard Scale', 'MinMax Scale'] - 174 >>> list(models['Linear Regression']['Standard Scale'].keys()) - 175 ['a', 'a + b'] - 176 >>> list(models['Linear Regression']['Standard Scale']['a'].keys()) - 177 [0, 1, 2, 3, 4] - 178 >>> type(models['Linear Regression']['Standard Scale']['a'][0]) - 179 <class sklearn.pipeline.Pipeline> - 180 >>> pipeline = models['Linear Regression']['Standard Scale']['a'][0] - 181 >>> x_new = pd.read_csv('independent_new.csv') - 182 >>> x_new - 183 | | a | b | - 184 | 0 |3.5|2.7| - 185 | 1 |4.0|1.1| - 186 |...|...|...| - 187 |100|2.3|2.1| - 188 >>> pipeline.transform(x_new) - 189 | | a | - 190 | 0 |9.7| - 191 | 1 |9.1| - 192 |...|...| - 193 |100|6.7| - 194 - 195 """ - 196 - 197 def __init__( - 198 self, - 199 x_data: pd.DataFrame, - 200 y_data: pd.DataFrame, - 201 target: str, - 202 folds: int = 5, - 203 strat_groups: int = 10, - 204 scaler: Union[ - 205 Iterable[ - 206 Literal[ - 207 'None', - 208 'Standard Scale', - 209 'MinMax Scale', - 210 'Yeo-Johnson Transform' - 211 'Box-Cox Transform', - 212 'Quantile Transform (Uniform)', - 213 'Quantile Transform (Gaussian)' - 214 ] - 215 ], - 216 Literal[ - 217 'All', - 218 'None', - 219 'Standard Scale', - 220 'MinMax Scale', - 221 'Yeo-Johnson Transform' - 222 'Box-Cox Transform', - 223 'Quantile Transform (Uniform)', - 224 'Quantile Transform (Gaussian)', - 225 ] - 226 ] = 'None', - 227 seed: int = 62 - 228 ): - 229 """Initialises class - 230 - 231 Used to compare one set of measurements against another. - 232 It can perform both univariate and multivariate regression, though - 233 some techniques can only do one or the other. Multivariate regression - 234 can only be performed when secondary variables are provided. - 235 - 236 Parameters - 237 ---------- - 238 x_data : pd.DataFrame - 239 Data to be calibrated. - 240 y_data : pd.DataFrame - 241 'True' data to calibrate against. - 242 target : str - 243 Column name of the primary feature to use in calibration, must be - 244 the name of a column in both `x_data` and `y_data`. - 245 folds : int, default=5 - 246 Number of folds to split the data into, using stratified k-fold. - 247 strat_groups : int, default=10 - 248 Number of groups to stratify against, the data will be split into - 249 n equally sized bins where n is the value of `strat_groups`. - 250 scaler : iterable of {<br>\ - 251 'None',<br>\ - 252 'Standard Scale',<br>\ - 253 'MinMax Scale',<br>\ - 254 'Yeo-Johnson Transform',<br>\ - 255 'Box-Cox Transform',<br>\ - 256 'Quantile Transform (Uniform)',<br>\ - 257 'Quantile Transform (Gaussian)',<br>\ - 258 } or {<br>\ - 259 'All',<br>\ - 260 'None',<br>\ - 261 'Standard Scale',<br>\ - 262 'MinMax Scale',<br>\ - 263 'Yeo-Johnson Transform',<br>\ - 264 'Box-Cox Transform',<br>\ - 265 'Quantile Transform (Uniform)',<br>\ - 266 'Quantile Transform (Gaussian)',<br>\ - 267 }, default='None' - 268 The scaling/transform method (or list of methods) to apply to the - 269 data - 270 seed : int, default=62 - 271 Random state to use when shuffling and splitting the data into n - 272 folds. Ensures repeatability. - 273 - 274 Raises - 275 ------ - 276 ValueError - 277 Raised if the target variables (e.g. 'NO2') is not a column name in - 278 both dataframes. - 279 Raised if `scaler` is not str, tuple or list - 280 """ - 281 if target not in x_data.columns or target not in y_data.columns: - 282 raise ValueError( - 283 f"{target} does not exist in both columns." - 284 ) - 285 join_index = x_data.join( - 286 y_data, - 287 how='inner', - 288 lsuffix='x', - 289 rsuffix='y' - 290 ).dropna().index - 291 """ - 292 The common indices between `x_data` and `y_data`, excluding missing - 293 values - 294 """ - 295 self.x_data: pd.DataFrame = x_data.loc[join_index, :] - 296 """ - 297 The data to be calibrated. - 298 """ - 299 self.target: str = target - 300 """ - 301 The name of the column in both `x_data` and `y_data` that - 302 will be used as the x and y variables in the calibration. - 303 """ - 304 self.scaler_list: dict[str, Any] = { - 305 'None': None, - 306 'Standard Scale': pre.StandardScaler(), - 307 'MinMax Scale': pre.MinMaxScaler(), - 308 'Yeo-Johnson Transform': pre.PowerTransformer( - 309 method='yeo-johnson' - 310 ), - 311 'Box-Cox Transform': pre.PowerTransformer(method='box-cox'), - 312 'Quantile Transform (Uniform)': pre.QuantileTransformer( - 313 output_distribution='uniform' - 314 ), - 315 'Quantile Transform (Gaussian)': pre.QuantileTransformer( - 316 output_distribution='normal' - 317 ) - 318 } - 319 """ - 320 Keys for scaling algorithms available in the pipelines - 321 """ - 322 self.scaler: list[str] = list() - 323 """ - 324 The scaling algorithm(s) to preprocess the data with - 325 """ - 326 if isinstance(scaler, str): - 327 if scaler == "All": - 328 if not bool(self.x_data.ge(0).all(axis=None)): - 329 warnings.warn( - 330 f'Box-Cox is not compatible with provided measurements' - 331 ) - 332 self.scaler_list.pop('Box-Cox Transform') - 333 self.scaler.extend(self.scaler_list.keys()) - 334 elif scaler in self.scaler_list.keys(): - 335 self.scaler.append(scaler) - 336 else: - 337 self.scaler.append('None') - 338 warnings.warn(f'Scaling algorithm {scaler} not recognised') - 339 elif isinstance(scaler, (tuple, list)): - 340 for sc in scaler: - 341 if sc == 'Box-Cox Transform' and not bool( - 342 self.x_data.ge(0).all(axis=None) - 343 ): - 344 warnings.warn( - 345 f'Box-Cox is not compatible with provided measurements' - 346 ) - 347 continue - 348 if sc in self.scaler_list.keys(): - 349 self.scaler.append(sc) - 350 else: - 351 warnings.warn(f'Scaling algorithm {sc} not recognised') - 352 else: - 353 raise ValueError('scaler parameter should be string, list or tuple') - 354 if not self.scaler: - 355 warnings.warn( - 356 f'No valid scaling algorithms provided, defaulting to None' - 357 ) - 358 self.scaler.append('None') - 359 - 360 self.y_data = cont_strat_folds( - 361 y_data.loc[join_index, :], - 362 target, - 363 folds, - 364 strat_groups, - 365 seed - 366 ) - 367 """ - 368 The data that `x_data` will be calibrated against. A '*Fold*' - 369 column is added using the `const_strat_folds` function which splits - 370 the data into k stratified folds (where k is the value of - 371 `folds`). It splits the continuous measurements into n bins (where n - 372 is the value of `strat_groups`) and distributes each bin equally - 373 across all folds. This significantly reduces the chances of one fold - 374 containing a skewed distribution relative to the whole dataset. - 375 """ - 376 self.models: dict[str, # Technique name - 377 dict[str, # Scaling technique - 378 dict[str, # Variable combo - 379 dict[int, # Fold - 380 Pipeline]]]] = dict() - 381 """ - 382 The calibrated models. They are stored in a nested structure as - 383 follows: - 384 1. Primary Key, name of the technique (e.g Lasso Regression). - 385 2. Scaling technique (e.g Yeo-Johnson Transform). - 386 3. Combination of variables used or `target` if calibration is - 387 univariate (e.g "`target` + a + b). - 388 4. Fold, which fold was used excluded from the calibration. If data - 389 if 5-fold cross validated, a key of 4 indicates the data was trained on - 390 folds 0-3. - 391 - 392 ```mermaid - 393 stateDiagram-v2 - 394 models --> Technique - 395 state Technique { - 396 [*] --> Scaling - 397 [*]: The calibration technique used - 398 [*]: (e.g "Lasso Regression") - 399 state Scaling { - 400 [*] --> Variables - 401 [*]: The scaling technique used - 402 [*]: (e.g "Yeo-Johnson Transform") - 403 state Variables { - 404 [*] : The combination of variables used - 405 [*] : (e.g "x + a + b") - 406 [*] --> Fold - 407 state Fold { - 408 [*] : Which fold was excluded from training data - 409 [*] : (e.g 4 indicates folds 0-3 were used to train) - 410 } - 411 } - 412 } - 413 } - 414 ``` - 415 - 416 """ - 417 - 418 def _sklearn_regression_meta( - 419 self, - 420 reg: Union[skl.base.RegressorMixin, Literal['t', 'gaussian']], - 421 name: str, - 422 min_coeffs: int = 1, - 423 max_coeffs: int = (sys.maxsize * 2) + 1, - 424 **kwargs - 425 ): - 426 """ - 427 Metaclass, formats data and uses sklearn classifier to - 428 fit x to y - 429 - 430 Parameters - 431 ---------- - 432 reg : sklearn.base.RegressorMixin or str - 433 Classifier to use, or distribution family to use for bayesian. - 434 name : str - 435 Name of classification technique to save pipeline to. - 436 min_coeffs : int, default=1 - 437 Minimum number of coefficients for technique. - 438 max_coeffs : int, default=(sys.maxsize * 2) + 1 - 439 Maximum number of coefficients for technique. - 440 - 441 Raises - 442 ------ - 443 NotImplementedError - 444 PyMC currently doesn't work, TODO - 445 """ - 446 x_secondary_cols = self.x_data.drop(self.target, axis=1).columns - 447 # All columns in x_data that aren't the target variable - 448 products = [[np.nan, col] for col in x_secondary_cols] - 449 secondary_vals = pd.MultiIndex.from_product(products) - 450 # Get all possible combinations of secondary variables in a pandas - 451 # MultiIndex - 452 if self.models.get(name) is None: - 453 self.models[name] = dict() - 454 # If the classification technique hasn't been used yet, - 455 # add its key to the models dictionary - 456 for scaler in self.scaler: - 457 if self.models[name].get(scaler) is None: - 458 self.models[name][scaler] = dict() - 459 # If the scaling technique hasn't been used with the classification - 460 # technique yet, add its key to the nested dictionary - 461 for sec_vals in secondary_vals: - 462 # Loop over all combinations of secondary values - 463 vals = [self.target] + [v for v in sec_vals if v == v] - 464 vals_str = ' + '.join(vals) - 465 if len(vals) < min_coeffs or len(vals) > max_coeffs: - 466 # Skip if number of coeffs doesn't lie within acceptable range - 467 # for technique. For example, isotonic regression - 468 # only works with one variable - 469 continue - 470 self.models[name][scaler][vals_str] = dict() - 471 for fold in self.y_data.loc[:, 'Fold'].unique(): - 472 y_data = self.y_data[ - 473 self.y_data.loc[:, 'Fold'] != fold - 474 ] - 475 if reg in ['t', 'gaussian']: - 476 # If using PyMC bayesian model, - 477 # format data and build model using bambi - 478 # then store result in pipeline - 479 # Currently doesn't work as PyMC models - 480 # can't be pickled, so don't function with deepcopy. Needs - 481 # looking into - 482 raise NotImplementedError( - 483 "PyMC functions currently don't work with deepcopy" - 484 ) - 485 # sc = scalers[scaler] - 486 # if sc is not None: - 487 # x_data = sc.fit_transform( - 488 # self.x_data.loc[y_data.index, :] - 489 # ) - 490 # else: - 491 # x_data = self.x_data.loc[y_data.index, :] - 492 # x_data['y'] = y_data.loc[:, self.target] - 493 # model = bmb.Model( - 494 # f"y ~ {vals_str}", - 495 # x_data, - 496 # family=reg - 497 # ) - 498 # _ = model.fit( - 499 # progressbar=False, - 500 # **kwargs - 501 # ) - 502 # pipeline = Pipeline([ - 503 # ("Scaler", scaler), - 504 # ("Regression", model) - 505 # ]) - 506 else: - 507 # If using scikit-learn API compatible classifier, - 508 # Build pipeline and fit to - 509 pipeline = Pipeline([ - 510 ("Selector", ColumnTransformer([ - 511 ("selector", "passthrough", vals) - 512 ], remainder="drop") - 513 ), - 514 ("Scaler", self.scaler_list[scaler]), - 515 ("Regression", reg) - 516 ]) - 517 pipeline.fit( - 518 self.x_data.loc[y_data.index, :], - 519 y_data.loc[:, self.target] - 520 ) - 521 self.models[name][scaler][vals_str][fold] = dc(pipeline) - 522 - 523 def pymc_bayesian( - 524 self, - 525 family: Literal[ - 526 "Gaussian", - 527 "Student T", - 528 ] = "Gaussian", - 529 name: str = " PyMC Bayesian", - 530 **kwargs - 531 ): - 532 """ - 533 Performs bayesian linear regression (either uni or multivariate) - 534 fitting x on y. - 535 - 536 Performs bayesian linear regression, both univariate and multivariate, - 537 on X against y. More details can be found at: - 538 https://pymc.io/projects/examples/en/latest/generalized_linear_models/ - 539 GLM-robust.html - 540 - 541 Parameters - 542 ---------- - 543 family : {'Gaussian', 'Student T'}, default='Gaussian' - 544 Statistical distribution to fit measurements to. Options are: - 545 - Gaussian - 546 - Student T - 547 """ - 548 # Define model families - 549 model_families = { - 550 "Gaussian": "gaussian", - 551 "Student T": "t" - 552 } - 553 self._sklearn_regression_meta( - 554 model_families[family], - 555 f'{name} ({model_families})', - 556 **kwargs - 557 ) - 558 - 559 def linreg(self, name: str = "Linear Regression", **kwargs): - 560 """ - 561 Fit x on y via linear regression - 562 - 563 Parameters - 564 ---------- - 565 name : str, default="Linear Regression" - 566 Name of classification technique. - 567 """ - 568 self._sklearn_regression_meta( - 569 lm.LinearRegression(**kwargs), - 570 name - 571 ) - 572 - 573 def ridge(self, name: str = "Ridge Regression", **kwargs): - 574 """ - 575 Fit x on y via ridge regression - 576 - 577 Parameters - 578 ---------- - 579 name : str, default="Ridge Regression" - 580 Name of classification technique - 581 """ - 582 self._sklearn_regression_meta( - 583 lm.Ridge(**kwargs), - 584 name - 585 ) - 586 - 587 def ridge_cv( - 588 self, - 589 name: str = "Ridge Regression (Cross Validated)", - 590 **kwargs - 591 ): - 592 """ - 593 Fit x on y via cross-validated ridge regression - 594 - 595 Parameters - 596 ---------- - 597 name : str, default="Ridge Regression (Cross Validated)" - 598 Name of classification technique - 599 """ - 600 self._sklearn_regression_meta( - 601 lm.RidgeCV(**kwargs), - 602 name - 603 ) - 604 - 605 def lasso(self, name: str = "Lasso Regression", **kwargs): + 40_logger = logging.getLogger("pymc") + 41_logger.setLevel(logging.ERROR) + 42 + 43 + 44def cont_strat_folds( + 45 df: pd.DataFrame, + 46 target_var: str, + 47 splits: int = 5, + 48 strat_groups: int = 5, + 49 seed: int = 62 + 50 ) -> pd.DataFrame: + 51 """ + 52 Creates stratified k-folds on continuous variable + 53 ---------- + 54 df : pd.DataFrame + 55 Target data to stratify on. + 56 target_var : str + 57 Target feature name. + 58 splits : int, default=5 + 59 Number of folds to make. + 60 strat_groups : int, default=10 + 61 Number of groups to split data in to for stratification. + 62 seed : int, default=62 + 63 Random state to use. + 64 + 65 Returns + 66 ------- + 67 pd.DataFrame + 68 `y_df` with added 'Fold' column, specifying which test data fold + 69 variable corresponds to. + 70 + 71 Examples + 72 -------- + 73 >>> df = pd.read_csv('data.csv') + 74 >>> df + 75 | | x | a | b | + 76 | | | | | + 77 | 0 |2.3|1.8|7.2| + 78 | 1 |3.2|9.6|4.5| + 79 |....|...|...|...| + 80 |1000|2.3|4.5|2.2| + 81 >>> df_with_folds = const_strat_folds( + 82 df=df, + 83 target='a', + 84 splits=3, + 85 strat_groups=3. + 86 seed=78 + 87 ) + 88 >>> df_with_folds + 89 | | x | a | b |Fold| + 90 | | | | | | + 91 | 0 |2.3|1.8|7.2| 2 | + 92 | 1 |3.2|9.6|4.5| 1 | + 93 |....|...|...|...|....| + 94 |1000|2.3|4.5|2.2| 0 | + 95 + 96 All folds should have a roughly equal distribution of values for 'a' + 97 + 98 """ + 99 _df = df.copy() + 100 _df['Fold'] = -1 + 101 skf = StratifiedKFold( + 102 n_splits=splits, + 103 random_state=seed, + 104 shuffle=True + 105 ) + 106 _df['Group'] = pd.cut( + 107 _df.loc[:, target_var], + 108 strat_groups, + 109 labels=False + 110 ) + 111 group_label = _df.loc[:, 'Group'] + 112 + 113 for fold_number, (_, v) in enumerate(skf.split(group_label, group_label)): + 114 _df.loc[v, 'Fold'] = fold_number + 115 return _df.drop('Group', axis=1) + 116 + 117 + 118class Calibrate: + 119 """ + 120 Calibrate x against y using a range of different methods provided by + 121 scikit-learn[^skl], xgboost[^xgb] and PyMC (via Bambi)[^pmc]. + 122 + 123 [^skl]: https://scikit-learn.org/stable/modules/classes.html + 124 [^xgb]: https://xgboost.readthedocs.io/en/stable/python/python_api.html + 125 [^pmc]: https://bambinos.github.io/bambi/api/ + 126 + 127 Examples + 128 -------- + 129 >>> from calidhayte.calibrate import Calibrate + 130 >>> import pandas as pd + 131 >>> + 132 >>> x = pd.read_csv('independent.csv') + 133 >>> x + 134 | | a | b | + 135 | 0 |2.3|3.2| + 136 | 1 |3.4|3.1| + 137 |...|...|...| + 138 |100|3.7|2.1| + 139 >>> + 140 >>> y = pd.read_csv('dependent.csv') + 141 >>> y + 142 | | a | + 143 | 0 |7.8| + 144 | 1 |9.9| + 145 |...|...| + 146 |100|9.5| + 147 >>> + 148 >>> calibration = Calibrate( + 149 x_data=x, + 150 y_data=y, + 151 target='a', + 152 folds=5, + 153 strat_groups=5, + 154 scaler = [ + 155 'Standard Scale', + 156 'MinMax Scale' + 157 ], + 158 seed=62 + 159 ) + 160 >>> calibration.linreg() + 161 >>> calibration.lars() + 162 >>> calibration.omp() + 163 >>> calibration.ransac() + 164 >>> calibration.random_forest() + 165 >>> + 166 >>> models = calibration.return_models() + 167 >>> list(models.keys()) + 168 [ + 169 'Linear Regression', + 170 'Least Angle Regression', + 171 'Orthogonal Matching Pursuit', + 172 'RANSAC', + 173 'Random Forest' + 174 ] + 175 >>> list(models['Linear Regression'].keys()) + 176 ['Standard Scale', 'MinMax Scale'] + 177 >>> list(models['Linear Regression']['Standard Scale'].keys()) + 178 ['a', 'a + b'] + 179 >>> list(models['Linear Regression']['Standard Scale']['a'].keys()) + 180 [0, 1, 2, 3, 4] + 181 >>> type(models['Linear Regression']['Standard Scale']['a'][0]) + 182 <class sklearn.pipeline.Pipeline> + 183 >>> pipeline = models['Linear Regression']['Standard Scale']['a'][0] + 184 >>> x_new = pd.read_csv('independent_new.csv') + 185 >>> x_new + 186 | | a | b | + 187 | 0 |3.5|2.7| + 188 | 1 |4.0|1.1| + 189 |...|...|...| + 190 |100|2.3|2.1| + 191 >>> pipeline.transform(x_new) + 192 | | a | + 193 | 0 |9.7| + 194 | 1 |9.1| + 195 |...|...| + 196 |100|6.7| + 197 + 198 """ + 199 + 200 def __init__( + 201 self, + 202 x_data: pd.DataFrame, + 203 y_data: pd.DataFrame, + 204 target: str, + 205 folds: int = 5, + 206 strat_groups: int = 10, + 207 scaler: Union[ + 208 Iterable[ + 209 Literal[ + 210 'None', + 211 'Standard Scale', + 212 'MinMax Scale', + 213 'Yeo-Johnson Transform', + 214 'Box-Cox Transform', + 215 'Quantile Transform (Uniform)', + 216 'Quantile Transform (Gaussian)' + 217 ] + 218 ], + 219 Literal[ + 220 'All', + 221 'None', + 222 'Standard Scale', + 223 'MinMax Scale', + 224 'Yeo-Johnson Transform', + 225 'Box-Cox Transform', + 226 'Quantile Transform (Uniform)', + 227 'Quantile Transform (Gaussian)', + 228 ] + 229 ] = 'None', + 230 seed: int = 62 + 231 ): + 232 """Initialises class + 233 + 234 Used to compare one set of measurements against another. + 235 It can perform both univariate and multivariate regression, though + 236 some techniques can only do one or the other. Multivariate regression + 237 can only be performed when secondary variables are provided. + 238 + 239 Parameters + 240 ---------- + 241 x_data : pd.DataFrame + 242 Data to be calibrated. + 243 y_data : pd.DataFrame + 244 'True' data to calibrate against. + 245 target : str + 246 Column name of the primary feature to use in calibration, must be + 247 the name of a column in both `x_data` and `y_data`. + 248 folds : int, default=5 + 249 Number of folds to split the data into, using stratified k-fold. + 250 strat_groups : int, default=10 + 251 Number of groups to stratify against, the data will be split into + 252 n equally sized bins where n is the value of `strat_groups`. + 253 scaler : iterable of {<br>\ + 254 'None',<br>\ + 255 'Standard Scale',<br>\ + 256 'MinMax Scale',<br>\ + 257 'Yeo-Johnson Transform',<br>\ + 258 'Box-Cox Transform',<br>\ + 259 'Quantile Transform (Uniform)',<br>\ + 260 'Quantile Transform (Gaussian)',<br>\ + 261 } or {<br>\ + 262 'All',<br>\ + 263 'None',<br>\ + 264 'Standard Scale',<br>\ + 265 'MinMax Scale',<br>\ + 266 'Yeo-Johnson Transform',<br>\ + 267 'Box-Cox Transform',<br>\ + 268 'Quantile Transform (Uniform)',<br>\ + 269 'Quantile Transform (Gaussian)',<br>\ + 270 }, default='None' + 271 The scaling/transform method (or list of methods) to apply to the + 272 data + 273 seed : int, default=62 + 274 Random state to use when shuffling and splitting the data into n + 275 folds. Ensures repeatability. + 276 + 277 Raises + 278 ------ + 279 ValueError + 280 Raised if the target variables (e.g. 'NO2') is not a column name in + 281 both dataframes. + 282 Raised if `scaler` is not str, tuple or list + 283 """ + 284 if target not in x_data.columns or target not in y_data.columns: + 285 raise ValueError( + 286 f"{target} does not exist in both columns." + 287 ) + 288 join_index = x_data.join( + 289 y_data, + 290 how='inner', + 291 lsuffix='x', + 292 rsuffix='y' + 293 ).dropna().index + 294 """ + 295 The common indices between `x_data` and `y_data`, excluding missing + 296 values + 297 """ + 298 self.x_data: pd.DataFrame = x_data.loc[join_index, :] + 299 """ + 300 The data to be calibrated. + 301 """ + 302 self.target: str = target + 303 """ + 304 The name of the column in both `x_data` and `y_data` that + 305 will be used as the x and y variables in the calibration. + 306 """ + 307 self.scaler_list: dict[str, Any] = { + 308 'None': None, + 309 'Standard Scale': pre.StandardScaler(), + 310 'MinMax Scale': pre.MinMaxScaler(), + 311 'Yeo-Johnson Transform': pre.PowerTransformer( + 312 method='yeo-johnson' + 313 ), + 314 'Box-Cox Transform': pre.PowerTransformer(method='box-cox'), + 315 'Quantile Transform (Uniform)': pre.QuantileTransformer( + 316 output_distribution='uniform' + 317 ), + 318 'Quantile Transform (Gaussian)': pre.QuantileTransformer( + 319 output_distribution='normal' + 320 ) + 321 } + 322 """ + 323 Keys for scaling algorithms available in the pipelines + 324 """ + 325 self.scaler: list[str] = list() + 326 """ + 327 The scaling algorithm(s) to preprocess the data with + 328 """ + 329 if isinstance(scaler, str): + 330 if scaler == "All": + 331 if not bool(self.x_data.ge(0).all(axis=None)): + 332 warnings.warn( + 333 'Box-Cox is not compatible with provided measurements' + 334 ) + 335 self.scaler_list.pop('Box-Cox Transform') + 336 self.scaler.extend(self.scaler_list.keys()) + 337 elif scaler in self.scaler_list.keys(): + 338 self.scaler.append(scaler) + 339 else: + 340 self.scaler.append('None') + 341 warnings.warn(f'Scaling algorithm {scaler} not recognised') + 342 elif isinstance(scaler, (tuple, list)): + 343 for sc in scaler: + 344 if sc == 'Box-Cox Transform' and not bool( + 345 self.x_data.ge(0).all(axis=None) + 346 ): + 347 warnings.warn( + 348 'Box-Cox is not compatible with provided measurements' + 349 ) + 350 continue + 351 if sc in self.scaler_list.keys(): + 352 self.scaler.append(sc) + 353 else: + 354 warnings.warn(f'Scaling algorithm {sc} not recognised') + 355 else: + 356 raise ValueError( + 357 'scaler parameter should be string, list or tuple' + 358 ) + 359 if not self.scaler: + 360 warnings.warn( + 361 'No valid scaling algorithms provided, defaulting to None' + 362 ) + 363 self.scaler.append('None') + 364 + 365 self.y_data = cont_strat_folds( + 366 y_data.loc[join_index, :], + 367 target, + 368 folds, + 369 strat_groups, + 370 seed + 371 ) + 372 """ + 373 The data that `x_data` will be calibrated against. A '*Fold*' + 374 column is added using the `const_strat_folds` function which splits + 375 the data into k stratified folds (where k is the value of + 376 `folds`). It splits the continuous measurements into n bins (where n + 377 is the value of `strat_groups`) and distributes each bin equally + 378 across all folds. This significantly reduces the chances of one fold + 379 containing a skewed distribution relative to the whole dataset. + 380 """ + 381 self.models: dict[str, # Technique name + 382 dict[str, # Scaling technique + 383 dict[str, # Variable combo + 384 dict[int, # Fold + 385 Pipeline]]]] = dict() + 386 """ + 387 The calibrated models. They are stored in a nested structure as + 388 follows: + 389 1. Primary Key, name of the technique (e.g Lasso Regression). + 390 2. Scaling technique (e.g Yeo-Johnson Transform). + 391 3. Combination of variables used or `target` if calibration is + 392 univariate (e.g "`target` + a + b). + 393 4. Fold, which fold was used excluded from the calibration. If data + 394 if 5-fold cross validated, a key of 4 indicates the data was trained on + 395 folds 0-3. + 396 + 397 ```mermaid + 398 stateDiagram-v2 + 399 models --> Technique + 400 state Technique { + 401 [*] --> Scaling + 402 [*]: The calibration technique used + 403 [*]: (e.g "Lasso Regression") + 404 state Scaling { + 405 [*] --> Variables + 406 [*]: The scaling technique used + 407 [*]: (e.g "Yeo-Johnson Transform") + 408 state Variables { + 409 [*] : The combination of variables used + 410 [*] : (e.g "x + a + b") + 411 [*] --> Fold + 412 state Fold { + 413 [*] : Which fold was excluded from training data + 414 [*] : (e.g 4 indicates folds 0-3 were used to train) + 415 } + 416 } + 417 } + 418 } + 419 ``` + 420 + 421 """ + 422 self.folds: int = folds + 423 """ + 424 The number of folds used in k-fold cross validation + 425 """ + 426 + 427 def _sklearn_regression_meta( + 428 self, + 429 reg: Union[ + 430 skl.base.RegressorMixin, + 431 RandomizedSearchCV, + 432 Literal['t', 'gaussian'] + 433 ], + 434 name: str, + 435 min_coeffs: int = 1, + 436 max_coeffs: int = (sys.maxsize * 2) + 1, + 437 random_search: bool = False + 438 ): + 439 """ + 440 Metaclass, formats data and uses sklearn classifier to + 441 fit x to y + 442 + 443 Parameters + 444 ---------- + 445 reg : sklearn.base.RegressorMixin or str + 446 Classifier to use, or distribution family to use for bayesian. + 447 name : str + 448 Name of classification technique to save pipeline to. + 449 min_coeffs : int, default=1 + 450 Minimum number of coefficients for technique. + 451 max_coeffs : int, default=(sys.maxsize * 2) + 1 + 452 Maximum number of coefficients for technique. + 453 random_search : bool + 454 Whether RandomizedSearch is used or not + 455 + 456 Raises + 457 ------ + 458 NotImplementedError + 459 PyMC currently doesn't work, TODO + 460 """ + 461 x_secondary_cols = self.x_data.drop(self.target, axis=1).columns + 462 # All columns in x_data that aren't the target variable + 463 products = [[np.nan, col] for col in x_secondary_cols] + 464 secondary_vals = pd.MultiIndex.from_product(products) + 465 # Get all possible combinations of secondary variables in a pandas + 466 # MultiIndex + 467 if self.models.get(name) is None: + 468 self.models[name] = dict() + 469 # If the classification technique hasn't been used yet, + 470 # add its key to the models dictionary + 471 for scaler in self.scaler: + 472 if self.models[name].get(scaler) is None: + 473 self.models[name][scaler] = dict() + 474 # If the scaling technique hasn't been used with the + 475 # classification + 476 # technique yet, add its key to the nested dictionary + 477 for sec_vals in secondary_vals: + 478 # Loop over all combinations of secondary values + 479 vals = [self.target] + [v for v in sec_vals if v == v] + 480 vals_str = ' + '.join(vals) + 481 if len(vals) < min_coeffs or len(vals) > max_coeffs: + 482 # Skip if number of coeffs doesn't lie within acceptable + 483 # range + 484 # for technique. For example, isotonic regression + 485 # only works with one variable + 486 continue + 487 self.models[name][scaler][vals_str] = dict() + 488 if random_search: + 489 pipeline = Pipeline([ + 490 ("Selector", ColumnTransformer([ + 491 ("selector", "passthrough", vals) + 492 ], remainder="drop") + 493 ), + 494 ("Scaler", self.scaler_list[scaler]), + 495 ("Regression", reg) + 496 ]) + 497 pipeline.fit( + 498 self.x_data, + 499 self.y_data.loc[:, self.target] + 500 ) + 501 self.models[name][scaler][vals_str][0] = dc(pipeline) + 502 continue + 503 + 504 for fold in self.y_data.loc[:, 'Fold'].unique(): + 505 y_data = self.y_data[ + 506 self.y_data.loc[:, 'Fold'] != fold + 507 ] + 508 if reg in ['t', 'gaussian']: + 509 # If using PyMC bayesian model, + 510 # format data and build model using bambi + 511 # then store result in pipeline + 512 # Currently doesn't work as PyMC models + 513 # can't be pickled, so don't function with deepcopy. + 514 # Needs looking into + 515 raise NotImplementedError( + 516 "PyMC functions currently don't work with deepcopy" + 517 ) + 518 # sc = scalers[scaler] + 519 # if sc is not None: + 520 # x_data = sc.fit_transform( + 521 # self.x_data.loc[y_data.index, :] + 522 # ) + 523 # else: + 524 # x_data = self.x_data.loc[y_data.index, :] + 525 # x_data['y'] = y_data.loc[:, self.target] + 526 # model = bmb.Model( + 527 # f"y ~ {vals_str}", + 528 # x_data, + 529 # family=reg + 530 # ) + 531 # _ = model.fit( + 532 # progressbar=False, + 533 # **kwargs + 534 # ) + 535 # pipeline = Pipeline([ + 536 # ("Scaler", scaler), + 537 # ("Regression", model) + 538 # ]) + 539 else: + 540 # If using scikit-learn API compatible classifier, + 541 # Build pipeline and fit to + 542 pipeline = Pipeline([ + 543 ("Selector", ColumnTransformer([ + 544 ("selector", "passthrough", vals) + 545 ], remainder="drop") + 546 ), + 547 ("Scaler", self.scaler_list[scaler]), + 548 ("Regression", reg) + 549 ]) + 550 pipeline.fit( + 551 self.x_data.loc[y_data.index, :], + 552 y_data.loc[:, self.target] + 553 ) + 554 self.models[name][scaler][vals_str][fold] = dc(pipeline) + 555 + 556 def pymc_bayesian( + 557 self, + 558 family: Literal[ + 559 "Gaussian", + 560 "Student T", + 561 ] = "Gaussian", + 562 name: str = " PyMC Bayesian", + 563 **kwargs + 564 ): + 565 """ + 566 Performs bayesian linear regression (either uni or multivariate) + 567 fitting x on y. + 568 + 569 Performs bayesian linear regression, both univariate and multivariate, + 570 on X against y. More details can be found at: + 571 https://pymc.io/projects/examples/en/latest/generalized_linear_models/ + 572 GLM-robust.html + 573 + 574 Parameters + 575 ---------- + 576 family : {'Gaussian', 'Student T'}, default='Gaussian' + 577 Statistical distribution to fit measurements to. Options are: + 578 - Gaussian + 579 - Student T + 580 """ + 581 # Define model families + 582 model_families: dict[str, Literal['t', 'gaussian']] = { + 583 "Gaussian": 'gaussian', + 584 "Student T": 't' + 585 } + 586 self._sklearn_regression_meta( + 587 model_families[family], + 588 f'{name} ({model_families})', + 589 **kwargs + 590 ) + 591 + 592 def linreg( + 593 self, + 594 name: str = "Linear Regression", + 595 random_search: bool = False, + 596 parameters: dict[ + 597 str, + 598 Union[ + 599 scipy.stats.rv_continuous, + 600 List[Union[int, str, float]] + 601 ] + 602 ] = { + 603 }, + 604 **kwargs + 605 ): 606 """ - 607 Fit x on y via lasso regression + 607 Fit x on y via linear regression 608 609 Parameters 610 ---------- - 611 name : str, default="Lasso Regression" - 612 Name of classification technique - 613 """ - 614 self._sklearn_regression_meta( - 615 lm.Lasso(**kwargs), - 616 name - 617 ) - 618 - 619 def lasso_cv( - 620 self, - 621 name: str = "Lasso Regression (Cross Validated)", - 622 **kwargs - 623 ): - 624 """ - 625 Fit x on y via cross-validated lasso regression - 626 - 627 Parameters - 628 ---------- - 629 name : str, default="Lasso Regression (Cross Validated)" - 630 Name of classification technique - 631 """ + 611 name : str, default="Linear Regression" + 612 Name of classification technique. + 613 random_search : bool, default=False + 614 Whether to perform RandomizedSearch to optimise parameters + 615 parameters : dict[ + 616 str, + 617 Union[ + 618 scipy.stats.rv_continuous, + 619 List[Union[int, str, float]] + 620 ] + 621 ], default=Preset distributions + 622 The parameters used in RandomizedSearchCV + 623 """ + 624 if random_search: + 625 classifier = RandomizedSearchCV( + 626 lm.LinearRegression(**kwargs), + 627 parameters, + 628 cv=self.folds + 629 ) + 630 else: + 631 classifier = lm.LinearRegression(**kwargs) 632 self._sklearn_regression_meta( - 633 lm.LassoCV(**kwargs), - 634 name - 635 ) - 636 - 637 def multi_task_lasso( - 638 self, - 639 name: str = "Multi-task Lasso Regression", - 640 **kwargs - 641 ): - 642 """ - 643 Fit x on y via multitask lasso regression - 644 - 645 Parameters - 646 ---------- - 647 name : str, default="Multi-task Lasso Regression" - 648 Name of classification technique - 649 """ - 650 self._sklearn_regression_meta( - 651 lm.MultiTaskLasso(**kwargs), - 652 name - 653 ) - 654 - 655 def multi_task_lasso_cv( - 656 self, - 657 name: str = "Multi-task Lasso Regression (Cross Validated)", - 658 **kwargs - 659 ): - 660 """ - 661 Fit x on y via cross validated multitask lasso regression - 662 - 663 Parameters - 664 ---------- - 665 name : str, default="Multi-task Lasso Regression (Cross Validated)" - 666 Name of classification technique - 667 """ - 668 self._sklearn_regression_meta( - 669 lm.MultiTaskLassoCV(**kwargs), - 670 name - 671 ) - 672 - 673 def elastic_net(self, name: str = "Elastic Net Regression", **kwargs): - 674 """ - 675 Fit x on y via elastic net regression - 676 - 677 Parameters - 678 ---------- - 679 name : str, default="Elastic Net Regression" - 680 Name of classification technique - 681 """ - 682 self._sklearn_regression_meta( - 683 lm.ElasticNet(**kwargs), - 684 name - 685 ) - 686 - 687 def elastic_net_cv( - 688 self, - 689 name: str = "Elastic Net Regression (Cross Validated)", - 690 **kwargs - 691 ): - 692 """ - 693 Fit x on y via cross validated elastic net regression + 633 classifier, + 634 f'{name}{" (Random Search)" if random_search else ""}', + 635 random_search=random_search + 636 ) + 637 + 638 def ridge( + 639 self, + 640 name: str = "Ridge Regression", + 641 random_search: bool = False, + 642 parameters: dict[ + 643 str, + 644 Union[ + 645 scipy.stats.rv_continuous, + 646 List[Union[int, str, float]] + 647 ] + 648 ] = { + 649 'alpha': uniform(loc=0, scale=2), + 650 'tol': uniform(loc=0, scale=1), + 651 'solver': [ + 652 'svd', + 653 'cholesky', + 654 'lsqr', + 655 'sparse_cg', + 656 'sag', + 657 'saga', + 658 'lbfgs' + 659 ] + 660 }, + 661 **kwargs + 662 ): + 663 """ + 664 Fit x on y via ridge regression + 665 + 666 Parameters + 667 ---------- + 668 name : str, default="Ridge Regression" + 669 Name of classification technique. + 670 random_search : bool, default=False + 671 Whether to perform RandomizedSearch to optimise parameters + 672 parameters : dict[ + 673 str, + 674 Union[ + 675 scipy.stats.rv_continuous, + 676 List[Union[int, str, float]] + 677 ] + 678 ], default=Preset distributions + 679 The parameters used in RandomizedSearchCV + 680 """ + 681 if random_search: + 682 classifier = RandomizedSearchCV( + 683 lm.Ridge(**kwargs), + 684 parameters, + 685 cv=self.folds + 686 ) + 687 else: + 688 classifier = lm.Ridge(**kwargs) + 689 self._sklearn_regression_meta( + 690 classifier, + 691 f'{name}{" (Random Search)" if random_search else ""}', + 692 random_search=random_search + 693 ) 694 - 695 Parameters - 696 ---------- - 697 name : str, default="Elastic Net Regression (Cross Validated)" - 698 Name of classification technique - 699 """ - 700 self._sklearn_regression_meta( - 701 lm.ElasticNetCV(**kwargs), - 702 name - 703 ) + 695 def ridge_cv( + 696 self, + 697 name: str = "Ridge Regression (Cross Validated)", + 698 random_search: bool = False, + 699 **kwargs + 700 ): + 701 """ + 702 Fit x on y via cross-validated ridge regression. + 703 Already cross validated so random search not required 704 - 705 def multi_task_elastic_net( - 706 self, - 707 name: str = "Multi-Task Elastic Net Regression", - 708 **kwargs - 709 ): - 710 """ - 711 Fit x on y via multi-task elastic net regression - 712 - 713 Parameters - 714 ---------- - 715 name : str, default="Multi-task Elastic Net Regression" - 716 Name of classification technique - 717 """ - 718 self._sklearn_regression_meta( - 719 lm.MultiTaskElasticNet(**kwargs), - 720 name - 721 ) - 722 - 723 def multi_task_elastic_net_cv( - 724 self, - 725 name: str = "Multi-Task Elastic Net Regression (Cross Validated)", - 726 **kwargs - 727 ): - 728 """ - 729 Fit x on y via cross validated multi-task elastic net regression - 730 - 731 Parameters - 732 ---------- - 733 name : str, default="Multi-Task Elastic Net Regression\ - 734 (Cross Validated)" - 735 Name of classification technique - 736 """ - 737 self._sklearn_regression_meta( - 738 lm.MultiTaskElasticNetCV(**kwargs), - 739 name - 740 ) - 741 - 742 def lars(self, name: str = "Least Angle Regression", **kwargs): - 743 """ - 744 Fit x on y via least angle regression - 745 - 746 Parameters - 747 ---------- - 748 name : str, default="Least Angle Regression" - 749 Name of classification technique. - 750 """ - 751 self._sklearn_regression_meta( - 752 lm.Lars(**kwargs), - 753 name - 754 ) - 755 - 756 def lars_lasso( - 757 self, - 758 name: str = "Least Angle Regression (Lasso)", - 759 **kwargs - 760 ): - 761 """ - 762 Fit x on y via lasso least angle regression - 763 - 764 Parameters - 765 ---------- - 766 name : str, default="Least Angle Regression (Lasso)" - 767 Name of classification technique - 768 """ - 769 self._sklearn_regression_meta( - 770 lm.LassoLars(**kwargs), - 771 name - 772 ) - 773 - 774 def omp(self, name: str = "Orthogonal Matching Pursuit", **kwargs): + 705 Parameters + 706 ---------- + 707 name : str, default="Ridge Regression (Cross Validated)" + 708 Name of classification technique + 709 random_search : bool, default=False + 710 Not used + 711 + 712 """ + 713 _ = random_search + 714 self._sklearn_regression_meta( + 715 lm.RidgeCV(**kwargs, cv=self.folds), + 716 name, + 717 random_search=True + 718 ) + 719 + 720 def lasso( + 721 self, + 722 name: str = "Lasso Regression", + 723 random_search: bool = False, + 724 parameters: dict[ + 725 str, + 726 Union[ + 727 scipy.stats.rv_continuous, + 728 List[Union[int, str, float]] + 729 ] + 730 ] = { + 731 'alpha': uniform(loc=0, scale=2), + 732 'tol': uniform(loc=0, scale=1), + 733 'selection': ['cyclic', 'random'] + 734 }, + 735 **kwargs + 736 ): + 737 """ + 738 Fit x on y via lasso regression + 739 + 740 Parameters + 741 ---------- + 742 name : str, default="Lasso Regression" + 743 Name of classification technique. + 744 random_search : bool, default=False + 745 Whether to perform RandomizedSearch to optimise parameters + 746 parameters : dict[ + 747 str, + 748 Union[ + 749 scipy.stats.rv_continuous, + 750 List[Union[int, str, float]] + 751 ] + 752 ], default=Preset distributions + 753 The parameters used in RandomizedSearchCV + 754 """ + 755 if random_search: + 756 classifier = RandomizedSearchCV( + 757 lm.Lasso(**kwargs), + 758 parameters, + 759 cv=self.folds + 760 ) + 761 else: + 762 classifier = lm.Lasso(**kwargs) + 763 self._sklearn_regression_meta( + 764 classifier, + 765 f'{name}{" (Random Search)" if random_search else ""}', + 766 random_search=random_search + 767 ) + 768 + 769 def lasso_cv( + 770 self, + 771 name: str = "Lasso Regression (Cross Validated)", + 772 random_search: bool = False, + 773 **kwargs + 774 ): 775 """ - 776 Fit x on y via orthogonal matching pursuit regression - 777 - 778 Parameters - 779 ---------- - 780 name : str, default="Orthogonal Matching Pursuit" - 781 Name of classification technique - 782 """ - 783 self._sklearn_regression_meta( - 784 lm.OrthogonalMatchingPursuit(**kwargs), - 785 name, - 786 min_coeffs=2 - 787 ) - 788 - 789 def bayesian_ridge( - 790 self, - 791 name: str = "Bayesian Ridge Regression", - 792 **kwargs - 793 ): - 794 """ - 795 Fit x on y via bayesian ridge regression - 796 - 797 Parameters - 798 ---------- - 799 name : str, default="Bayesian Ridge Regression" - 800 Name of classification technique. - 801 """ - 802 self._sklearn_regression_meta( - 803 lm.BayesianRidge(**kwargs), - 804 name - 805 ) - 806 - 807 def bayesian_ard( - 808 self, - 809 name: str = "Bayesian Automatic Relevance Detection", - 810 **kwargs - 811 ): - 812 """ - 813 Fit x on y via bayesian automatic relevance detection - 814 - 815 Parameters - 816 ---------- - 817 name : str, default="Bayesian Automatic Relevance Detection" - 818 Name of classification technique. - 819 """ - 820 self._sklearn_regression_meta( - 821 lm.ARDRegression(**kwargs), - 822 name - 823 ) - 824 - 825 def tweedie(self, name: str = "Tweedie Regression", **kwargs): - 826 """ - 827 Fit x on y via tweedie regression - 828 - 829 Parameters - 830 ---------- - 831 name : str, default="Tweedie Regression" - 832 Name of classification technique. - 833 """ - 834 self._sklearn_regression_meta( - 835 lm.TweedieRegressor(**kwargs), - 836 name - 837 ) - 838 - 839 def stochastic_gradient_descent( - 840 self, - 841 name: str = "Stochastic Gradient Descent", - 842 **kwargs - 843 ): - 844 """ - 845 Fit x on y via stochastic gradient descent regression - 846 - 847 Parameters - 848 ---------- - 849 name : str, default="Stochastic Gradient Descent" - 850 Name of classification technique. - 851 """ - 852 self._sklearn_regression_meta( - 853 lm.SGDRegressor(**kwargs), - 854 name - 855 ) - 856 - 857 def passive_aggressive( - 858 self, - 859 name: str = "Passive Agressive Regression", - 860 **kwargs - 861 ): - 862 """ - 863 Fit x on y via passive aggressive regression - 864 - 865 Parameters - 866 ---------- - 867 name : str, default="Passive Agressive Regression" - 868 Name of classification technique. - 869 """ - 870 self._sklearn_regression_meta( - 871 lm.PassiveAggressiveRegressor(**kwargs), - 872 name - 873 ) - 874 - 875 def ransac(self, name: str = "RANSAC", **kwargs): - 876 """ - 877 Fit x on y via RANSAC regression - 878 - 879 Parameters - 880 ---------- - 881 name : str, default="RANSAC" - 882 Name of classification technique. - 883 """ - 884 self._sklearn_regression_meta( - 885 lm.RANSACRegressor(**kwargs), - 886 name - 887 ) + 776 Fit x on y via cross-validated lasso regression. + 777 Already cross validated so random search not required + 778 + 779 Parameters + 780 ---------- + 781 name : str, default="Lasso Regression (Cross Validated)" + 782 Name of classification technique + 783 random_search : bool, default=False + 784 Not used + 785 + 786 """ + 787 _ = random_search + 788 self._sklearn_regression_meta( + 789 lm.LassoCV(**kwargs, cv=self.folds), + 790 name, + 791 random_search=True + 792 ) + 793 + 794 def multi_task_lasso( + 795 self, + 796 name: str = "Multi-task Lasso Regression", + 797 random_search: bool = False, + 798 parameters: dict[ + 799 str, + 800 Union[ + 801 scipy.stats.rv_continuous, + 802 List[Union[int, str, float]] + 803 ] + 804 ] = { + 805 'alpha': uniform(loc=0, scale=2), + 806 'tol': uniform(loc=0, scale=1), + 807 'selection': ['cyclic', 'random'] + 808 }, + 809 **kwargs + 810 ): + 811 """ + 812 Fit x on y via multitask lasso regression + 813 + 814 Parameters + 815 ---------- + 816 name : str, default="Multi-task Lasso Regression" + 817 Name of classification technique. + 818 random_search : bool, default=False + 819 Whether to perform RandomizedSearch to optimise parameters + 820 parameters : dict[ + 821 str, + 822 Union[ + 823 scipy.stats.rv_continuous, + 824 List[Union[int, str, float]] + 825 ] + 826 ], default=Preset distributions + 827 The parameters used in RandomizedSearchCV + 828 """ + 829 if random_search: + 830 classifier = RandomizedSearchCV( + 831 lm.MultiTaskLasso(**kwargs), + 832 parameters, + 833 cv=self.folds + 834 ) + 835 else: + 836 classifier = lm.MultiTaskLasso(**kwargs) + 837 self._sklearn_regression_meta( + 838 classifier, + 839 f'{name}{" (Random Search)" if random_search else ""}', + 840 random_search=random_search + 841 ) + 842 + 843 def multi_task_lasso_cv( + 844 self, + 845 name: str = "Multi-task Lasso Regression (Cross Validated)", + 846 random_search: bool = False, + 847 **kwargs + 848 ): + 849 """ + 850 Fit x on y via cross-validated multitask lasso regression. + 851 Already cross validated so random search not required + 852 + 853 Parameters + 854 ---------- + 855 name : str, default="Multi-task Lasso Regression (Cross Validated)" + 856 Name of classification technique + 857 random_search : bool, default=False + 858 Not used + 859 + 860 """ + 861 _ = random_search + 862 self._sklearn_regression_meta( + 863 lm.MultiTaskLassoCV(**kwargs, cv=self.folds), + 864 name, + 865 random_search=True + 866 ) + 867 + 868 def elastic_net( + 869 self, + 870 name: str = "Elastic Net Regression", + 871 random_search: bool = False, + 872 parameters: dict[ + 873 str, + 874 Union[ + 875 scipy.stats.rv_continuous, + 876 List[Union[int, str, float]] + 877 ] + 878 ] = { + 879 'alpha': uniform(loc=0, scale=2), + 880 'l1_ratio': uniform(loc=0, scale=1), + 881 'tol': uniform(loc=0, scale=1), + 882 'selection': ['cyclic', 'random'] + 883 }, + 884 **kwargs + 885 ): + 886 """ + 887 Fit x on y via elastic net regression 888 - 889 def theil_sen(self, name: str = "Theil-Sen Regression", **kwargs): - 890 """ - 891 Fit x on y via theil-sen regression - 892 - 893 Parameters - 894 ---------- - 895 name : str, default="Theil-Sen Regression" - 896 Name of classification technique. - 897 -Sen Regression - 898 """ - 899 self._sklearn_regression_meta( - 900 lm.TheilSenRegressor(**kwargs), - 901 name - 902 ) - 903 - 904 def huber(self, name: str = "Huber Regression", **kwargs): - 905 """ - 906 Fit x on y via huber regression - 907 - 908 Parameters - 909 ---------- - 910 name : str, default="Huber Regression" - 911 Name of classification technique. - 912 """ - 913 self._sklearn_regression_meta( - 914 lm.HuberRegressor(**kwargs), - 915 name - 916 ) + 889 Parameters + 890 ---------- + 891 name : str, default="Elastic Net Regression" + 892 Name of classification technique. + 893 random_search : bool, default=False + 894 Whether to perform RandomizedSearch to optimise parameters + 895 parameters : dict[ + 896 str, + 897 Union[ + 898 scipy.stats.rv_continuous, + 899 List[Union[int, str, float]] + 900 ] + 901 ], default=Preset distributions + 902 The parameters used in RandomizedSearchCV + 903 """ + 904 if random_search: + 905 classifier = RandomizedSearchCV( + 906 lm.ElasticNet(**kwargs), + 907 parameters, + 908 cv=self.folds + 909 ) + 910 else: + 911 classifier = lm.ElasticNet(**kwargs) + 912 self._sklearn_regression_meta( + 913 classifier, + 914 f'{name}{" (Random Search)" if random_search else ""}', + 915 random_search=random_search + 916 ) 917 - 918 def quantile(self, name: str = "Quantile Regression", **kwargs): - 919 """ - 920 Fit x on y via quantile regression - 921 - 922 Parameters - 923 ---------- - 924 name : str, default="Quantile Regression" - 925 Name of classification technique. - 926 """ - 927 self._sklearn_regression_meta( - 928 lm.QuantileRegressor(**kwargs), - 929 name - 930 ) - 931 - 932 def decision_tree(self, name: str = "Decision Tree", **kwargs): - 933 """ - 934 Fit x on y using a decision tree - 935 - 936 Parameters - 937 ---------- - 938 name : str, default="Decision Tree" - 939 Name of classification technique. - 940 """ - 941 self._sklearn_regression_meta( - 942 tree.DecisionTreeRegressor(**kwargs), - 943 name - 944 ) - 945 - 946 def extra_tree(self, name: str = "Extra Tree", **kwargs): - 947 """ - 948 Fit x on y using an extra tree - 949 - 950 Parameters - 951 ---------- - 952 name : str, default="Extra Tree" - 953 Name of classification technique. - 954 """ - 955 self._sklearn_regression_meta( - 956 tree.ExtraTreeRegressor(**kwargs), - 957 name - 958 ) - 959 - 960 def random_forest(self, name: str = "Random Forest", **kwargs): - 961 """ - 962 Fit x on y using a random forest - 963 - 964 Parameters - 965 ---------- - 966 name : str, default="Random Forest" - 967 Name of classification technique. - 968 """ - 969 self._sklearn_regression_meta( - 970 en.RandomForestRegressor(**kwargs), - 971 name - 972 ) - 973 - 974 def extra_trees_ensemble( - 975 self, - 976 name: str = "Extra Trees Ensemble", - 977 **kwargs - 978 ): - 979 """ - 980 Fit x on y using an ensemble of extra trees - 981 - 982 Parameters - 983 ---------- - 984 name : str, default="Extra Trees Ensemble" - 985 Name of classification technique. - 986 """ - 987 self._sklearn_regression_meta( - 988 en.ExtraTreesRegressor(**kwargs), - 989 name - 990 ) + 918 def elastic_net_cv( + 919 self, + 920 name: str = "Elastic Net Regression (Cross Validated)", + 921 random_search: bool = False, + 922 **kwargs + 923 ): + 924 """ + 925 Fit x on y via cross-validated elastic regression. + 926 Already cross validated so random search not required + 927 + 928 Parameters + 929 ---------- + 930 name : str, default="Lasso Regression (Cross Validated)" + 931 Name of classification technique + 932 random_search : bool, default=False + 933 Not used + 934 """ + 935 _ = random_search + 936 self._sklearn_regression_meta( + 937 lm.ElasticNetCV(**kwargs, cv=self.folds), + 938 name, + 939 random_search=True + 940 ) + 941 + 942 def multi_task_elastic_net( + 943 self, + 944 name: str = "Multi-task Elastic Net Regression", + 945 random_search: bool = False, + 946 parameters: dict[ + 947 str, + 948 Union[ + 949 scipy.stats.rv_continuous, + 950 List[Union[int, str, float]] + 951 ] + 952 ] = { + 953 'alpha': uniform(loc=0, scale=2), + 954 'l1_ratio': uniform(loc=0, scale=1), + 955 'tol': uniform(loc=0, scale=1), + 956 'selection': ['cyclic', 'random'] + 957 }, + 958 **kwargs + 959 ): + 960 """ + 961 Fit x on y via elastic net regression + 962 + 963 Parameters + 964 ---------- + 965 name : str, default="Multi-task Elastic Net Regression" + 966 Name of classification technique. + 967 random_search : bool, default=False + 968 Whether to perform RandomizedSearch to optimise parameters + 969 parameters : dict[ + 970 str, + 971 Union[ + 972 scipy.stats.rv_continuous, + 973 List[Union[int, str, float]] + 974 ] + 975 ], default=Preset distributions + 976 The parameters used in RandomizedSearchCV + 977 """ + 978 if random_search: + 979 classifier = RandomizedSearchCV( + 980 lm.MultiTaskElasticNet(**kwargs), + 981 parameters, + 982 cv=self.folds + 983 ) + 984 else: + 985 classifier = lm.MultiTaskElasticNet(**kwargs) + 986 self._sklearn_regression_meta( + 987 classifier, + 988 f'{name}{" (Random Search)" if random_search else ""}', + 989 random_search=random_search + 990 ) 991 - 992 def gradient_boost_regressor( + 992 def multi_task_elastic_net_cv( 993 self, - 994 name: str = "Gradient Boosting Regression", - 995 **kwargs - 996 ): - 997 """ - 998 Fit x on y using gradient boosting regression - 999 -1000 Parameters -1001 ---------- -1002 name : str, default="Gradient Boosting Regression" -1003 Name of classification technique. -1004 """ -1005 self._sklearn_regression_meta( -1006 en.GradientBoostingRegressor(**kwargs), -1007 name -1008 ) + 994 name: str = "Multi-Task Elastic Net Regression (Cross Validated)", + 995 random_search: bool = False, + 996 **kwargs + 997 ): + 998 """ + 999 Fit x on y via cross-validated multi-task elastic net regression. +1000 Already cross validated so random search not required +1001 +1002 Parameters +1003 ---------- +1004 name : str, default="Multi-Task Elastic Net Regression \ +1005 (Cross Validated)" +1006 Name of classification technique +1007 random_search : bool, default=False +1008 Not used 1009 -1010 def hist_gradient_boost_regressor( -1011 self, -1012 name: str = "Histogram-Based Gradient Boosting Regression", -1013 **kwargs -1014 ): -1015 """ -1016 Fit x on y using histogram-based gradient boosting regression +1010 """ +1011 _ = random_search +1012 self._sklearn_regression_meta( +1013 lm.MultiTaskElasticNetCV(**kwargs, cv=self.folds), +1014 name, +1015 random_search=True +1016 ) 1017 -1018 Parameters -1019 ---------- -1020 name : str, default="Histogram-Based Gradient Boosting Regression" -1021 Name of classification technique. -1022 -Based -1023 Gradient Boosting Regression -1024 """ -1025 self._sklearn_regression_meta( -1026 en.HistGradientBoostingRegressor(**kwargs), -1027 name -1028 ) -1029 -1030 def mlp_regressor( -1031 self, -1032 name: str = "Multi-Layer Perceptron Regression", -1033 **kwargs -1034 ): -1035 """ -1036 Fit x on y using multi-layer perceptrons -1037 -1038 Parameters -1039 ---------- -1040 name : str, default="Multi-Layer Perceptron Regression" -1041 Name of classification technique. -1042 -Layer Perceptron -1043 Regression -1044 """ -1045 self._sklearn_regression_meta( -1046 nn.MLPRegressor(**kwargs), -1047 name -1048 ) -1049 -1050 def svr(self, name: str = "Support Vector Regression", **kwargs): -1051 """ -1052 Fit x on y using support vector regression -1053 -1054 Parameters -1055 ---------- -1056 name : str, default="Support Vector Regression" -1057 Name of classification technique. -1058 """ +1018 def lars( +1019 self, +1020 name: str = "Least Angle Regression", +1021 random_search: bool = False, +1022 parameters: dict[ +1023 str, +1024 Union[ +1025 scipy.stats.rv_continuous, +1026 List[Union[int, str, float]] +1027 ] +1028 ] = { +1029 'n_nonzero_coefs': list(range(1, 11)) +1030 }, +1031 **kwargs +1032 ): +1033 """ +1034 Fit x on y via least angle regression +1035 +1036 Parameters +1037 ---------- +1038 name : str, default="Least Angle Regression" +1039 Name of classification technique. +1040 random_search : bool, default=False +1041 Whether to perform RandomizedSearch to optimise parameters +1042 parameters : dict[ +1043 str, +1044 Union[ +1045 scipy.stats.rv_continuous, +1046 List[Union[int, str, float]] +1047 ] +1048 ], default=Preset distributions +1049 The parameters used in RandomizedSearchCV +1050 """ +1051 if random_search: +1052 classifier = RandomizedSearchCV( +1053 lm.Lars(**kwargs), +1054 parameters, +1055 cv=self.folds +1056 ) +1057 else: +1058 classifier = lm.Lars(**kwargs) 1059 self._sklearn_regression_meta( -1060 svm.SVR(**kwargs), -1061 name -1062 ) -1063 -1064 def linear_svr( -1065 self, -1066 name: str = "Linear Support Vector Regression", -1067 **kwargs -1068 ): -1069 """ -1070 Fit x on y using linear support vector regression -1071 -1072 Parameters -1073 ---------- -1074 name : str, default="Linear Support Vector Regression" -1075 Name of classification technique. -1076 """ -1077 self._sklearn_regression_meta( -1078 svm.LinearSVR(**kwargs), -1079 name -1080 ) -1081 -1082 def nu_svr(self, name: str = "Nu-Support Vector Regression", **kwargs): -1083 """ -1084 Fit x on y using nu-support vector regression -1085 -1086 Parameters -1087 ---------- -1088 name : str, default="Nu-Support Vector Regression" -1089 Name of classification technique. -1090 -Support Vector -1091 Regression -1092 """ -1093 self._sklearn_regression_meta( -1094 svm.LinearSVR(**kwargs), -1095 name -1096 ) -1097 -1098 def gaussian_process( -1099 self, -1100 name: str = "Gaussian Process Regression", -1101 **kwargs -1102 ): -1103 """ -1104 Fit x on y using gaussian process regression -1105 -1106 Parameters -1107 ---------- -1108 name : str, default="Gaussian Process Regression" -1109 Name of classification technique. -1110 """ -1111 self._sklearn_regression_meta( -1112 gp.GaussianProcessRegressor(**kwargs), -1113 name -1114 ) -1115 -1116 def pls(self, name: str = "PLS Regression", **kwargs): -1117 """ -1118 Fit x on y using pls regression -1119 -1120 Parameters -1121 ---------- -1122 name : str, default="PLS Regression" -1123 Name of classification technique. -1124 """ -1125 self._sklearn_regression_meta( -1126 cd.PLSRegression(n_components=1, **kwargs), -1127 name -1128 ) +1060 classifier, +1061 f'{name}{" (Random Search)" if random_search else ""}', +1062 random_search=random_search +1063 ) +1064 +1065 def lars_lasso( +1066 self, +1067 name: str = "Least Angle Lasso Regression", +1068 random_search: bool = False, +1069 parameters: dict[ +1070 str, +1071 Union[ +1072 scipy.stats.rv_continuous, +1073 List[Union[int, str, float]] +1074 ] +1075 ] = { +1076 'alpha': uniform(loc=0, scale=2) +1077 }, +1078 **kwargs +1079 ): +1080 """ +1081 Fit x on y via least angle lasso regression +1082 +1083 Parameters +1084 ---------- +1085 name : str, default="Least Angle Lasso Regression" +1086 Name of classification technique. +1087 random_search : bool, default=False +1088 Whether to perform RandomizedSearch to optimise parameters +1089 parameters : dict[ +1090 str, +1091 Union[ +1092 scipy.stats.rv_continuous, +1093 List[Union[int, str, float]] +1094 ] +1095 ], default=Preset distributions +1096 The parameters used in RandomizedSearchCV +1097 """ +1098 if random_search: +1099 classifier = RandomizedSearchCV( +1100 lm.LassoLars(**kwargs), +1101 parameters, +1102 cv=self.folds +1103 ) +1104 else: +1105 classifier = lm.LassoLars(**kwargs) +1106 self._sklearn_regression_meta( +1107 classifier, +1108 f'{name}{" (Random Search)" if random_search else ""}', +1109 random_search=random_search +1110 ) +1111 +1112 def omp( +1113 self, +1114 name: str = "Orthogonal Matching Pursuit", +1115 random_search: bool = False, +1116 parameters: dict[ +1117 str, +1118 Union[ +1119 scipy.stats.rv_continuous, +1120 List[Union[int, str, float]] +1121 ] +1122 ] = { +1123 'n_nonzero_coefs': list(range(1, 11)) +1124 }, +1125 **kwargs +1126 ): +1127 """ +1128 Fit x on y via orthogonal matching pursuit regression 1129 -1130 def isotonic(self, name: str = "Isotonic Regression", **kwargs): -1131 """ -1132 Fit x on y using isotonic regression -1133 -1134 Parameters -1135 ---------- -1136 name : str, default="Isotonic Regression" -1137 Name of classification technique. -1138 """ -1139 self._sklearn_regression_meta( -1140 iso.IsotonicRegression(**kwargs), -1141 name, -1142 max_coeffs=1 -1143 ) -1144 -1145 def xgboost(self, name: str = "XGBoost Regression", **kwargs): -1146 """ -1147 Fit x on y using xgboost regression -1148 -1149 Parameters -1150 ---------- -1151 name : str, default="XGBoost Regression" -1152 Name of classification technique. -1153 """ -1154 self._sklearn_regression_meta( -1155 xgb.XGBRegressor(**kwargs), -1156 name -1157 ) -1158 -1159 def xgboost_rf( -1160 self, -1161 name: str = "XGBoost Random Forest Regression", -1162 **kwargs -1163 ): -1164 """ -1165 Fit x on y using xgboosted random forest regression -1166 -1167 Parameters -1168 ---------- -1169 name : str, default="XGBoost Random Forest Regression" -1170 Name of classification technique. -1171 """ -1172 self._sklearn_regression_meta( -1173 xgb.XGBRFRegressor(**kwargs), -1174 name -1175 ) -1176 -1177 def return_measurements(self) -> dict[str, pd.DataFrame]: -1178 """ -1179 Returns the measurements used, with missing values and -1180 non-overlapping measurements excluded +1130 Parameters +1131 ---------- +1132 name : str, default="Orthogonal Matching Pursuit" +1133 Name of classification technique. +1134 random_search : bool, default=False +1135 Whether to perform RandomizedSearch to optimise parameters +1136 parameters : dict[ +1137 str, +1138 Union[ +1139 scipy.stats.rv_continuous, +1140 List[Union[int, str, float]] +1141 ] +1142 ], default=Preset distributions +1143 The parameters used in RandomizedSearchCV +1144 """ +1145 if random_search: +1146 classifier = RandomizedSearchCV( +1147 lm.OrthogonalMatchingPursuit(**kwargs), +1148 parameters, +1149 cv=self.folds +1150 ) +1151 else: +1152 classifier = lm.OrthogonalMatchingPursuit(**kwargs) +1153 self._sklearn_regression_meta( +1154 classifier, +1155 f'{name}{" (Random Search)" if random_search else ""}', +1156 random_search=random_search, +1157 min_coeffs=2 +1158 ) +1159 +1160 def bayesian_ridge( +1161 self, +1162 name: str = "Bayesian Ridge Regression", +1163 random_search: bool = False, +1164 parameters: dict[ +1165 str, +1166 Union[ +1167 scipy.stats.rv_continuous, +1168 List[Union[int, str, float]] +1169 ] +1170 ] = { +1171 'tol': uniform(loc=0, scale=1), +1172 'alpha_1': uniform(loc=0, scale=1), +1173 'alpha_2': uniform(loc=0, scale=1), +1174 'lambda_1': uniform(loc=0, scale=1), +1175 'lambda_2': uniform(loc=0, scale=1) +1176 }, +1177 **kwargs +1178 ): +1179 """ +1180 Fit x on y via bayesian ridge regression 1181 -1182 Returns -1183 ------- -1184 dict[str, pd.DataFrame] -1185 Dictionary with 2 keys: -1186 -1187 |Key|Value| -1188 |---|---| -1189 |x|`x_data`| -1190 |y|`y_data`| -1191 -1192 """ -1193 return { -1194 'x': self.x_data, -1195 'y': self.y_data -1196 } -1197 -1198 def return_models(self) -> dict[str, # Technique -1199 dict[str, # Scaling method -1200 dict[str, # Variables used -1201 dict[int, # Fold -1202 Pipeline]]]]: -1203 """ -1204 Returns the models stored in the object -1205 -1206 Returns -1207 ------- -1208 dict[str, str, str, int, Pipeline] -1209 The calibrated models. They are stored in a nested structure as -1210 follows: -1211 1. Primary Key, name of the technique (e.g Lasso Regression). -1212 2. Scaling technique (e.g Yeo-Johnson Transform). -1213 3. Combination of variables used or `target` if calibration is -1214 univariate (e.g "`target` + a + b). -1215 4. Fold, which fold was used excluded from the calibration. If data -1216 folds 0-3. -1217 if 5-fold cross validated, a key of 4 indicates the data was -1218 trained on -1219 """ -1220 return self.models +1182 Parameters +1183 ---------- +1184 name : str, default="Bayesian Ridge Regression" +1185 Name of classification technique. +1186 random_search : bool, default=False +1187 Whether to perform RandomizedSearch to optimise parameters +1188 parameters : dict[ +1189 str, +1190 Union[ +1191 scipy.stats.rv_continuous, +1192 List[Union[int, str, float]] +1193 ] +1194 ], default=Preset distributions +1195 The parameters used in RandomizedSearchCV +1196 """ +1197 if random_search: +1198 classifier = RandomizedSearchCV( +1199 lm.BayesianRidge(**kwargs), +1200 parameters, +1201 cv=self.folds +1202 ) +1203 else: +1204 classifier = lm.BayesianRidge(**kwargs) +1205 self._sklearn_regression_meta( +1206 classifier, +1207 f'{name}{" (Random Search)" if random_search else ""}', +1208 random_search=random_search +1209 ) +1210 +1211 def bayesian_ard( +1212 self, +1213 name: str = "Bayesian Automatic Relevance Detection", +1214 random_search: bool = False, +1215 parameters: dict[ +1216 str, +1217 Union[ +1218 scipy.stats.rv_continuous, +1219 List[Union[int, str, float]] +1220 ] +1221 ] = { +1222 'tol': uniform(loc=0, scale=1), +1223 'alpha_1': uniform(loc=0, scale=1), +1224 'alpha_2': uniform(loc=0, scale=1), +1225 'lambda_1': uniform(loc=0, scale=1), +1226 'lambda_2': uniform(loc=0, scale=1) +1227 }, +1228 **kwargs +1229 ): +1230 """ +1231 Fit x on y via bayesian automatic relevance detection +1232 +1233 Parameters +1234 ---------- +1235 name : str, default="Bayesian Automatic Relevance Detection" +1236 Name of classification technique. +1237 random_search : bool, default=False +1238 Whether to perform RandomizedSearch to optimise parameters +1239 parameters : dict[ +1240 str, +1241 Union[ +1242 scipy.stats.rv_continuous, +1243 List[Union[int, str, float]] +1244 ] +1245 ], default=Preset distributions +1246 The parameters used in RandomizedSearchCV +1247 """ +1248 if random_search: +1249 classifier = RandomizedSearchCV( +1250 lm.ARDRegression(**kwargs), +1251 parameters, +1252 cv=self.folds +1253 ) +1254 else: +1255 classifier = lm.ARDRegression(**kwargs) +1256 self._sklearn_regression_meta( +1257 classifier, +1258 f'{name}{" (Random Search)" if random_search else ""}', +1259 random_search=random_search +1260 ) +1261 +1262 def tweedie( +1263 self, +1264 name: str = "Tweedie Regression", +1265 random_search: bool = False, +1266 parameters: dict[ +1267 str, +1268 Union[ +1269 scipy.stats.rv_continuous, +1270 List[Union[int, str, float]] +1271 ] +1272 ] = { +1273 'power': [0, 1, 1.5, 2, 2.5, 3], +1274 'alpha': uniform(loc=0, scale=2), +1275 'solver': ['lbfgs', 'newton-cholesky'], +1276 'tol': uniform(loc=0, scale=1), +1277 }, +1278 **kwargs +1279 ): +1280 """ +1281 Fit x on y via tweedie regression +1282 +1283 Parameters +1284 ---------- +1285 name : str, default="Tweedie Regression" +1286 Name of classification technique. +1287 random_search : bool, default=False +1288 Whether to perform RandomizedSearch to optimise parameters +1289 parameters : dict[ +1290 str, +1291 Union[ +1292 scipy.stats.rv_continuous, +1293 List[Union[int, str, float]] +1294 ] +1295 ], default=Preset distributions +1296 The parameters used in RandomizedSearchCV +1297 """ +1298 if random_search: +1299 classifier = RandomizedSearchCV( +1300 lm.TweedieRegressor(**kwargs), +1301 parameters, +1302 cv=self.folds +1303 ) +1304 else: +1305 classifier = lm.TweedieRegressor(**kwargs) +1306 self._sklearn_regression_meta( +1307 classifier, +1308 f'{name}{" (Random Search)" if random_search else ""}', +1309 random_search=random_search +1310 ) +1311 +1312 def stochastic_gradient_descent( +1313 self, +1314 name: str = "Stochastic Gradient Descent", +1315 random_search: bool = False, +1316 parameters: dict[ +1317 str, +1318 Union[ +1319 scipy.stats.rv_continuous, +1320 List[Union[int, str, float]] +1321 ] +1322 ] = { +1323 'tol': uniform(loc=0, scale=1), +1324 'loss': [ +1325 'squared_error', +1326 'huber', +1327 'epsilon_insensitive', +1328 'squared_epsilon_insensitive' +1329 ], +1330 'penalty': [ +1331 'l2', +1332 'l1', +1333 'elasticnet', +1334 None +1335 ], +1336 'alpha': uniform(loc=0, scale=0.001), +1337 'l1_ratio': uniform(loc=0, scale=1), +1338 'epsilon': uniform(loc=0, scale=1), +1339 'learning_rate': [ +1340 'constant', +1341 'optimal', +1342 'invscaling', +1343 'adaptive' +1344 ], +1345 'eta0': uniform(loc=0, scale=0.1), +1346 'power_t': uniform(loc=0, scale=1) +1347 +1348 }, +1349 **kwargs +1350 ): +1351 """ +1352 Fit x on y via stochastic gradient descent +1353 +1354 Parameters +1355 ---------- +1356 name : str, default="Stochastic Gradient Descent" +1357 Name of classification technique. +1358 random_search : bool, default=False +1359 Whether to perform RandomizedSearch to optimise parameters +1360 parameters : dict[ +1361 str, +1362 Union[ +1363 scipy.stats.rv_continuous, +1364 List[Union[int, str, float]] +1365 ] +1366 ], default=Preset distributions +1367 The parameters used in RandomizedSearchCV +1368 """ +1369 if random_search: +1370 classifier = RandomizedSearchCV( +1371 lm.SGDRegressor(**kwargs), +1372 parameters, +1373 cv=self.folds +1374 ) +1375 else: +1376 classifier = lm.SGDRegressor(**kwargs) +1377 self._sklearn_regression_meta( +1378 classifier, +1379 f'{name}{" (Random Search)" if random_search else ""}', +1380 random_search=random_search +1381 ) +1382 +1383 def passive_aggressive( +1384 self, +1385 name: str = "Passive Aggressive Regression", +1386 random_search: bool = False, +1387 parameters: dict[ +1388 str, +1389 Union[ +1390 scipy.stats.rv_continuous, +1391 List[Union[int, str, float]] +1392 ] +1393 ] = { +1394 'C': uniform(loc=0, scale=2), +1395 'tol': uniform(loc=0, scale=1), +1396 'loss': [ +1397 'epsilon_insensitive', +1398 'squared_epsilon_insensitive' +1399 ], +1400 'epsilon': uniform(loc=0, scale=1) +1401 }, +1402 **kwargs +1403 ): +1404 """ +1405 Fit x on y via stochastic gradient descent regression +1406 +1407 Parameters +1408 ---------- +1409 name : str, default="Passive Aggressive Regression" +1410 Name of classification technique. +1411 random_search : bool, default=False +1412 Whether to perform RandomizedSearch to optimise parameters +1413 parameters : dict[\ +1414 str,\ +1415 Union[\ +1416 scipy.stats.rv_continuous,\ +1417 List[Union[int, str, float]]\ +1418 ]\ +1419 ], default=Preset distributions +1420 The parameters used in RandomizedSearchCV +1421 """ +1422 if random_search: +1423 classifier = RandomizedSearchCV( +1424 lm.PassiveAggressiveRegressor(**kwargs), +1425 parameters, +1426 cv=self.folds +1427 ) +1428 else: +1429 classifier = lm.PassiveAggressiveRegressor(**kwargs) +1430 self._sklearn_regression_meta( +1431 classifier, +1432 f'{name}{" (Random Search)" if random_search else ""}', +1433 random_search=random_search +1434 ) +1435 +1436 def ransac( +1437 self, +1438 name: str = "RANSAC", +1439 random_search: bool = False, +1440 parameters: dict[ +1441 str, +1442 Union[ +1443 scipy.stats.rv_continuous, +1444 List[Union[int, str, float]] +1445 ] +1446 ] = { +1447 'estimator': [ +1448 lm.LinearRegression() +1449 # TODO: ADD +1450 ] +1451 }, +1452 **kwargs +1453 ): +1454 """ +1455 Fit x on y via ransac +1456 +1457 Parameters +1458 ---------- +1459 name : str, default="RANSAC" +1460 Name of classification technique. +1461 random_search : bool, default=False +1462 Whether to perform RandomizedSearch to optimise parameters +1463 parameters : dict[\ +1464 str,\ +1465 Union[\ +1466 scipy.stats.rv_continuous,\ +1467 List[Union[int, str, float]]\ +1468 ]\ +1469 ], default=Preset distributions +1470 The parameters used in RandomizedSearchCV +1471 """ +1472 if random_search: +1473 classifier = RandomizedSearchCV( +1474 lm.RANSACRegressor(**kwargs), +1475 parameters, +1476 cv=self.folds +1477 ) +1478 else: +1479 classifier = lm.RANSACRegressor(**kwargs) +1480 self._sklearn_regression_meta( +1481 classifier, +1482 f'{name}{" (Random Search)" if random_search else ""}', +1483 random_search=random_search +1484 ) +1485 +1486 def theil_sen( +1487 self, +1488 name: str = "Theil-Sen Regression", +1489 random_search: bool = False, +1490 parameters: dict[ +1491 str, +1492 Union[ +1493 scipy.stats.rv_continuous, +1494 List[Union[int, str, float]] +1495 ] +1496 ] = { +1497 'tol': uniform(loc=0, scale=1) +1498 }, +1499 **kwargs +1500 ): +1501 """ +1502 Fit x on y via theil-sen regression +1503 +1504 Parameters +1505 ---------- +1506 name : str, default="Theil-Sen Regression" +1507 Name of classification technique. +1508 random_search : bool, default=False +1509 Whether to perform RandomizedSearch to optimise parameters +1510 parameters : dict[\ +1511 str,\ +1512 Union[\ +1513 scipy.stats.rv_continuous,\ +1514 List[Union[int, str, float]]\ +1515 ]\ +1516 ], default=Preset distributions +1517 The parameters used in RandomizedSearchCV +1518 """ +1519 if random_search: +1520 classifier = RandomizedSearchCV( +1521 lm.TheilSenRegressor(**kwargs), +1522 parameters, +1523 cv=self.folds +1524 ) +1525 else: +1526 classifier = lm.TheilSenRegressor(**kwargs) +1527 self._sklearn_regression_meta( +1528 classifier, +1529 f'{name}{" (Random Search)" if random_search else ""}', +1530 random_search=random_search +1531 ) +1532 +1533 def huber( +1534 self, +1535 name: str = "Huber Regression", +1536 random_search: bool = False, +1537 parameters: dict[ +1538 str, +1539 Union[ +1540 scipy.stats.rv_continuous, +1541 List[Union[int, str, float]] +1542 ] +1543 ] = { +1544 'epsilon': uniform(loc=1, scale=4), +1545 'alpha': uniform(loc=0, scale=0.01), +1546 'tol': uniform(loc=0, scale=1) +1547 }, +1548 **kwargs +1549 ): +1550 """ +1551 Fit x on y via huber regression +1552 +1553 Parameters +1554 ---------- +1555 name : str, default="Huber Regression" +1556 Name of classification technique. +1557 random_search : bool, default=False +1558 Whether to perform RandomizedSearch to optimise parameters +1559 parameters : dict[\ +1560 str,\ +1561 Union[\ +1562 scipy.stats.rv_continuous,\ +1563 List[Union[int, str, float]]\ +1564 ]\ +1565 ], default=Preset distributions +1566 The parameters used in RandomizedSearchCV +1567 """ +1568 if random_search: +1569 classifier = RandomizedSearchCV( +1570 lm.HuberRegressor(**kwargs), +1571 parameters, +1572 cv=self.folds +1573 ) +1574 else: +1575 classifier = lm.HuberRegressor(**kwargs) +1576 self._sklearn_regression_meta( +1577 classifier, +1578 f'{name}{" (Random Search)" if random_search else ""}', +1579 random_search=random_search +1580 ) +1581 +1582 def quantile( +1583 self, +1584 name: str = "Quantile Regression", +1585 random_search: bool = False, +1586 parameters: dict[ +1587 str, +1588 Union[ +1589 scipy.stats.rv_continuous, +1590 List[Union[int, str, float]] +1591 ] +1592 ] = { +1593 'quantile': uniform(loc=0, scale=2), +1594 'alpha': uniform(loc=0, scale=2), +1595 'tol': uniform(loc=0, scale=1), +1596 'solver': [ +1597 'highs-ds', +1598 'highs-ipm', +1599 'highs', +1600 'revised simplex', +1601 ] +1602 }, +1603 **kwargs +1604 ): +1605 """ +1606 Fit x on y via quantile regression +1607 +1608 Parameters +1609 'interior-point', +1610 ---------- +1611 name : str, default="Quantile Regression" +1612 Name of classification technique. +1613 random_search : bool, default=False +1614 Whether to perform RandomizedSearch to optimise parameters +1615 parameters : dict[\ +1616 str,\ +1617 Union[\ +1618 scipy.stats.rv_continuous,\ +1619 List[Union[int, str, float]]\ +1620 ]\ +1621 ], default=Preset distributions +1622 The parameters used in RandomizedSearchCV +1623 """ +1624 if random_search: +1625 classifier = RandomizedSearchCV( +1626 lm.QuantileRegressor(**kwargs), +1627 parameters, +1628 cv=self.folds +1629 ) +1630 else: +1631 classifier = lm.QuantileRegressor(**kwargs) +1632 self._sklearn_regression_meta( +1633 classifier, +1634 f'{name}{" (Random Search)" if random_search else ""}', +1635 random_search=random_search +1636 ) +1637 +1638 def decision_tree( +1639 self, +1640 name: str = "Decision Tree", +1641 random_search: bool = False, +1642 parameters: dict[ +1643 str, +1644 Union[ +1645 scipy.stats.rv_continuous, +1646 List[Union[int, str, float]] +1647 ] +1648 ] = { +1649 'criterion': [ +1650 'squared_error', +1651 'friedman_mse', +1652 'absolute_error', +1653 'poisson' +1654 ], +1655 'splitter': [ +1656 'best', +1657 'random' +1658 ], +1659 'max_features': [ +1660 None, +1661 'sqrt', +1662 'log2' +1663 ], +1664 'ccp_alpha': uniform(loc=0, scale=2), +1665 }, +1666 **kwargs +1667 ): +1668 """ +1669 Fit x on y via decision tree +1670 +1671 Parameters +1672 ---------- +1673 name : str, default="Decision Tree" +1674 Name of classification technique. +1675 random_search : bool, default=False +1676 Whether to perform RandomizedSearch to optimise parameters +1677 parameters : dict[\ +1678 str,\ +1679 Union[\ +1680 scipy.stats.rv_continuous,\ +1681 List[Union[int, str, float]]\ +1682 ]\ +1683 ], default=Preset distributions +1684 The parameters used in RandomizedSearchCV +1685 """ +1686 if random_search: +1687 classifier = RandomizedSearchCV( +1688 tree.DecisionTreeRegressor(**kwargs), +1689 parameters, +1690 cv=self.folds +1691 ) +1692 else: +1693 classifier = tree.DecisionTreeRegressor(**kwargs) +1694 self._sklearn_regression_meta( +1695 classifier, +1696 f'{name}{" (Random Search)" if random_search else ""}', +1697 random_search=random_search +1698 ) +1699 +1700 def extra_tree( +1701 self, +1702 name: str = "Extra Tree", +1703 random_search: bool = False, +1704 parameters: dict[ +1705 str, +1706 Union[ +1707 scipy.stats.rv_continuous, +1708 List[Union[int, str, float]] +1709 ] +1710 ] = { +1711 'criterion': [ +1712 'squared_error', +1713 'friedman_mse', +1714 'absolute_error', +1715 'poisson' +1716 ], +1717 'splitter': [ +1718 'best', +1719 'random' +1720 ], +1721 'max_features': [ +1722 None, +1723 'sqrt', +1724 'log2' +1725 ], +1726 'ccp_alpha': uniform(loc=0, scale=2), +1727 }, +1728 **kwargs +1729 ): +1730 """ +1731 Fit x on y via extra tree +1732 +1733 Parameters +1734 ---------- +1735 name : str, default="Extra Tree" +1736 Name of classification technique. +1737 random_search : bool, default=False +1738 Whether to perform RandomizedSearch to optimise parameters +1739 parameters : dict[\ +1740 str,\ +1741 Union[\ +1742 scipy.stats.rv_continuous,\ +1743 List[Union[int, str, float]]\ +1744 ]\ +1745 ], default=Preset distributions +1746 The parameters used in RandomizedSearchCV +1747 """ +1748 if random_search: +1749 classifier = RandomizedSearchCV( +1750 tree.ExtraTreeRegressor(**kwargs), +1751 parameters, +1752 cv=self.folds +1753 ) +1754 else: +1755 classifier = tree.ExtraTreeRegressor(**kwargs) +1756 self._sklearn_regression_meta( +1757 classifier, +1758 f'{name}{" (Random Search)" if random_search else ""}', +1759 random_search=random_search +1760 ) +1761 +1762 def random_forest( +1763 self, +1764 name: str = "Random Forest", +1765 random_search: bool = False, +1766 parameters: dict[ +1767 str, +1768 Union[ +1769 scipy.stats.rv_continuous, +1770 List[Union[int, str, float]] +1771 ] +1772 ] = { +1773 'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], +1774 'bootstrap': [True, False], +1775 'max_samples': uniform(loc=0.01, scale=0.99), +1776 'criterion': [ +1777 'squared_error', +1778 'friedman_mse', +1779 'absolute_error', +1780 'poisson' +1781 ], +1782 'max_features': [ +1783 None, +1784 'sqrt', +1785 'log2' +1786 ], +1787 'ccp_alpha': uniform(loc=0, scale=2), +1788 }, +1789 **kwargs +1790 ): +1791 """ +1792 Fit x on y via random forest +1793 +1794 Parameters +1795 ---------- +1796 name : str, default="Random Forest" +1797 Name of classification technique. +1798 random_search : bool, default=False +1799 Whether to perform RandomizedSearch to optimise parameters +1800 parameters : dict[\ +1801 str,\ +1802 Union[\ +1803 scipy.stats.rv_continuous,\ +1804 List[Union[int, str, float]]\ +1805 ]\ +1806 ], default=Preset distributions +1807 The parameters used in RandomizedSearchCV +1808 """ +1809 if random_search: +1810 classifier = RandomizedSearchCV( +1811 en.RandomForestRegressor(**kwargs), +1812 parameters, +1813 cv=self.folds +1814 ) +1815 else: +1816 classifier = en.RandomForestRegressor(**kwargs) +1817 self._sklearn_regression_meta( +1818 classifier, +1819 f'{name}{" (Random Search)" if random_search else ""}', +1820 random_search=random_search +1821 ) +1822 +1823 def extra_trees_ensemble( +1824 self, +1825 name: str = "Extra Trees Ensemble", +1826 random_search: bool = False, +1827 parameters: dict[ +1828 str, +1829 Union[ +1830 scipy.stats.rv_continuous, +1831 List[Union[int, str, float]] +1832 ] +1833 ] = { +1834 'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], +1835 'bootstrap': [True, False], +1836 'max_samples': uniform(loc=0.01, scale=0.99), +1837 'criterion': [ +1838 'squared_error', +1839 'friedman_mse', +1840 'absolute_error', +1841 'poisson' +1842 ], +1843 'max_features': [ +1844 None, +1845 'sqrt', +1846 'log2' +1847 ], +1848 'ccp_alpha': uniform(loc=0, scale=2), +1849 }, +1850 **kwargs +1851 ): +1852 """ +1853 Fit x on y via extra trees ensemble +1854 +1855 Parameters +1856 ---------- +1857 name : str, default="Extra Trees Ensemble" +1858 Name of classification technique. +1859 random_search : bool, default=False +1860 Whether to perform RandomizedSearch to optimise parameters +1861 parameters : dict[\ +1862 str,\ +1863 Union[\ +1864 scipy.stats.rv_continuous,\ +1865 List[Union[int, str, float]]\ +1866 ]\ +1867 ], default=Preset distributions +1868 The parameters used in RandomizedSearchCV +1869 """ +1870 if random_search: +1871 classifier = RandomizedSearchCV( +1872 en.ExtraTreesRegressor(**kwargs), +1873 parameters, +1874 cv=self.folds +1875 ) +1876 else: +1877 classifier = en.ExtraTreesRegressor(**kwargs) +1878 self._sklearn_regression_meta( +1879 classifier, +1880 f'{name}{" (Random Search)" if random_search else ""}', +1881 random_search=random_search +1882 ) +1883 +1884 def gradient_boost_regressor( +1885 self, +1886 name: str = "Gradient Boosting Regression", +1887 random_search: bool = False, +1888 parameters: dict[ +1889 str, +1890 Union[ +1891 scipy.stats.rv_continuous, +1892 List[Union[int, str, float]] +1893 ] +1894 ] = { +1895 'loss': [ +1896 'squared_error', +1897 'absolute_error', +1898 'huber', +1899 'quantile' +1900 ], +1901 'learning_rate': uniform(loc=0, scale=2), +1902 'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], +1903 'subsample': uniform(loc=0.01, scale=0.99), +1904 'criterion': [ +1905 'friedman_mse', +1906 'squared_error' +1907 ], +1908 'max_features': [ +1909 None, +1910 'sqrt', +1911 'log2' +1912 ], +1913 'init': [ +1914 None, +1915 'zero', +1916 lm.LinearRegression, +1917 lm.TheilSenRegressor +1918 ], +1919 'ccp_alpha': uniform(loc=0, scale=2) +1920 }, +1921 **kwargs +1922 ): +1923 """ +1924 Fit x on y via gradient boosting regression +1925 +1926 Parameters +1927 ---------- +1928 name : str, default="Gradient Boosting Regression" +1929 Name of classification technique. +1930 random_search : bool, default=False +1931 Whether to perform RandomizedSearch to optimise parameters +1932 parameters : dict[\ +1933 str,\ +1934 Union[\ +1935 scipy.stats.rv_continuous,\ +1936 List[Union[int, str, float]]\ +1937 ]\ +1938 ], default=Preset distributions +1939 The parameters used in RandomizedSearchCV +1940 """ +1941 if random_search: +1942 classifier = RandomizedSearchCV( +1943 en.GradientBoostingRegressor(**kwargs), +1944 parameters, +1945 cv=self.folds +1946 ) +1947 else: +1948 classifier = en.GradientBoostingRegressor(**kwargs) +1949 self._sklearn_regression_meta( +1950 classifier, +1951 f'{name}{" (Random Search)" if random_search else ""}', +1952 random_search=random_search +1953 ) +1954 +1955 def hist_gradient_boost_regressor( +1956 self, +1957 name: str = "Histogram-Based Gradient Boosting Regression", +1958 random_search: bool = False, +1959 parameters: dict[ +1960 str, +1961 Union[ +1962 scipy.stats.rv_continuous, +1963 List[Union[int, str, float]] +1964 ] +1965 ] = { +1966 'loss': [ +1967 'squared_error', +1968 'absolute_error', +1969 'gamma', +1970 'poisson', +1971 'quantile' +1972 ], +1973 'quantile': uniform(loc=0, scale=1), +1974 'learning_rate': uniform(loc=0, scale=2), +1975 'max_iter': [5, 10, 25, 50, 100, 200, 250, 500], +1976 'l2_regularization': uniform(loc=0, scale=2), +1977 'max_bins': [1, 3, 7, 15, 31, 63, 127, 255] +1978 }, +1979 **kwargs +1980 ): +1981 """ +1982 Fit x on y via histogram-based gradient boosting regression +1983 +1984 Parameters +1985 ---------- +1986 name : str, default="Histogram-Based Gradient Boosting Regression" +1987 Name of classification technique. +1988 random_search : bool, default=False +1989 Whether to perform RandomizedSearch to optimise parameters +1990 parameters : dict[\ +1991 str,\ +1992 Union[\ +1993 scipy.stats.rv_continuous,\ +1994 List[Union[int, str, float]]\ +1995 ]\ +1996 ], default=Preset distributions +1997 The parameters used in RandomizedSearchCV +1998 """ +1999 if random_search: +2000 classifier = RandomizedSearchCV( +2001 en.HistGradientBoostingRegressor(**kwargs), +2002 parameters, +2003 cv=self.folds +2004 ) +2005 else: +2006 classifier = en.HistGradientBoostingRegressor(**kwargs) +2007 self._sklearn_regression_meta( +2008 classifier, +2009 f'{name}{" (Random Search)" if random_search else ""}', +2010 random_search=random_search +2011 ) +2012 +2013 def mlp_regressor( +2014 self, +2015 name: str = "Multi-Layer Perceptron Regression", +2016 random_search: bool = False, +2017 parameters: dict[ +2018 str, +2019 Union[ +2020 scipy.stats.rv_continuous, +2021 List[Union[int, str, float]] +2022 ] +2023 ] = { +2024 'hidden_layer_sizes': [ +2025 (100, ), +2026 (100, 200), +2027 (10, ), +2028 (200, 400), +2029 (100, 200, 300) +2030 ], +2031 'activation': [ +2032 'identity', +2033 'logistic', +2034 'tanh', +2035 'relu' +2036 ], +2037 'solver': [ +2038 'lbfgs', +2039 'sgd', +2040 'adam' +2041 ], +2042 'alpha': uniform(loc=0, scale=0.1), +2043 'batch_size': [ +2044 'auto', +2045 20, +2046 200, +2047 500, +2048 1000, +2049 5000, +2050 10000 +2051 ], +2052 'learning_rate': [ +2053 'constant', +2054 'invscaling', +2055 'adaptive' +2056 ], +2057 'learning_rate_init': uniform(loc=0, scale=0.1), +2058 'power_t': uniform(loc=0.1, scale=0.9), +2059 'max_iter': [5, 10, 25, 50, 100, 200, 250, 500], +2060 'shuffle': [True, False], +2061 'momentum': uniform(loc=0.1, scale=0.9), +2062 'beta_1': uniform(loc=0.1, scale=0.9), +2063 'beta_2': uniform(loc=0.1, scale=0.9), +2064 'epsilon': uniform(loc=1E8, scale=1E6), +2065 +2066 }, +2067 **kwargs +2068 ): +2069 """ +2070 Fit x on y via multi-layer perceptron regression +2071 +2072 Parameters +2073 ---------- +2074 name : str, default="Multi-Layer Perceptron Regression" +2075 Name of classification technique. +2076 random_search : bool, default=False +2077 Whether to perform RandomizedSearch to optimise parameters +2078 parameters : dict[\ +2079 str,\ +2080 Union[\ +2081 scipy.stats.rv_continuous,\ +2082 List[Union[int, str, float]]\ +2083 ]\ +2084 ], default=Preset distributions +2085 The parameters used in RandomizedSearchCV +2086 """ +2087 if random_search: +2088 classifier = RandomizedSearchCV( +2089 nn.MLPRegressor(**kwargs), +2090 parameters, +2091 cv=self.folds +2092 ) +2093 else: +2094 classifier = nn.MLPRegressor(**kwargs) +2095 self._sklearn_regression_meta( +2096 classifier, +2097 f'{name}{" (Random Search)" if random_search else ""}', +2098 random_search=random_search +2099 ) +2100 +2101 def svr( +2102 self, +2103 name: str = "Support Vector Regression", +2104 random_search: bool = False, +2105 parameters: dict[ +2106 str, +2107 Union[ +2108 scipy.stats.rv_continuous, +2109 List[Union[int, str, float]] +2110 ] +2111 ] = { +2112 'kernel': [ +2113 'linear', +2114 'poly', +2115 'rbf', +2116 'sigmoid', +2117 ], +2118 'degree': [2, 3, 4], +2119 'gamma': ['scale', 'auto'], +2120 'coef0': uniform(loc=0, scale=1), +2121 'C': uniform(loc=0.1, scale=1.9), +2122 'epsilon': uniform(loc=1E8, scale=1), +2123 'shrinking': [True, False] +2124 }, +2125 **kwargs +2126 ): +2127 """ +2128 Fit x on y via support vector regression +2129 +2130 Parameters +2131 ---------- +2132 name : str, default="Support Vector Regression" +2133 Name of classification technique. +2134 random_search : bool, default=False +2135 Whether to perform RandomizedSearch to optimise parameters +2136 parameters : dict[\ +2137 str,\ +2138 Union[\ +2139 scipy.stats.rv_continuous,\ +2140 List[Union[int, str, float]]\ +2141 ]\ +2142 ], default=Preset distributions +2143 The parameters used in RandomizedSearchCV +2144 """ +2145 if random_search: +2146 classifier = RandomizedSearchCV( +2147 svm.SVR(**kwargs), +2148 parameters, +2149 cv=self.folds +2150 ) +2151 else: +2152 classifier = svm.SVR(**kwargs) +2153 self._sklearn_regression_meta( +2154 classifier, +2155 f'{name}{" (Random Search)" if random_search else ""}', +2156 random_search=random_search +2157 ) +2158 +2159 def linear_svr( +2160 self, +2161 name: str = "Linear Support Vector Regression", +2162 random_search: bool = False, +2163 parameters: dict[ +2164 str, +2165 Union[ +2166 scipy.stats.rv_continuous, +2167 List[Union[int, str, float]] +2168 ] +2169 ] = { +2170 'C': uniform(loc=0.1, scale=1.9), +2171 'epsilon': uniform(loc=1E8, scale=1), +2172 'loss': ['epsilon_insensitive', 'squared_epsilon_insensitive'] +2173 }, +2174 **kwargs +2175 ): +2176 """ +2177 Fit x on y via linear support vector regression +2178 +2179 Parameters +2180 ---------- +2181 name : str, default="Linear Support Vector Regression" +2182 Name of classification technique. +2183 random_search : bool, default=False +2184 Whether to perform RandomizedSearch to optimise parameters +2185 parameters : dict[\ +2186 str,\ +2187 Union[\ +2188 scipy.stats.rv_continuous,\ +2189 List[Union[int, str, float]]\ +2190 ]\ +2191 ], default=Preset distributions +2192 The parameters used in RandomizedSearchCV +2193 """ +2194 if random_search: +2195 classifier = RandomizedSearchCV( +2196 svm.LinearSVR(**kwargs), +2197 parameters, +2198 cv=self.folds +2199 ) +2200 else: +2201 classifier = svm.LinearSVR(**kwargs) +2202 self._sklearn_regression_meta( +2203 classifier, +2204 f'{name}{" (Random Search)" if random_search else ""}', +2205 random_search=random_search +2206 ) +2207 +2208 def nu_svr( +2209 self, +2210 name: str = "Nu-Support Vector Regression", +2211 random_search: bool = False, +2212 parameters: dict[ +2213 str, +2214 Union[ +2215 scipy.stats.rv_continuous, +2216 List[Union[int, str, float]] +2217 ] +2218 ] = { +2219 'kernel': [ +2220 'linear', +2221 'poly', +2222 'rbf', +2223 'sigmoid', +2224 ], +2225 'degree': [2, 3, 4], +2226 'gamma': ['scale', 'auto'], +2227 'coef0': uniform(loc=0, scale=1), +2228 'shrinking': [True, False], +2229 'nu': uniform(loc=0, scale=1), +2230 }, +2231 **kwargs +2232 ): +2233 """ +2234 Fit x on y via nu-support vector regression +2235 +2236 Parameters +2237 ---------- +2238 name : str, default="Nu-Support Vector Regression" +2239 Name of classification technique. +2240 random_search : bool, default=False +2241 Whether to perform RandomizedSearch to optimise parameters +2242 parameters : dict[\ +2243 str,\ +2244 Union[\ +2245 scipy.stats.rv_continuous,\ +2246 List[Union[int, str, float]]\ +2247 ]\ +2248 ], default=Preset distributions +2249 The parameters used in RandomizedSearchCV +2250 """ +2251 if random_search: +2252 classifier = RandomizedSearchCV( +2253 svm.NuSVR(**kwargs), +2254 parameters, +2255 cv=self.folds +2256 ) +2257 else: +2258 classifier = svm.NuSVR(**kwargs) +2259 self._sklearn_regression_meta( +2260 classifier, +2261 f'{name}{" (Random Search)" if random_search else ""}', +2262 random_search=random_search +2263 ) +2264 +2265 def gaussian_process( +2266 self, +2267 name: str = "Gaussian Process Regression", +2268 random_search: bool = False, +2269 parameters: dict[ +2270 str, +2271 Union[ +2272 scipy.stats.rv_continuous, +2273 List[Union[int, str, float]] +2274 ] +2275 ] = { +2276 'kernel': [ +2277 None, +2278 kern.RBF, +2279 kern.Matern, +2280 kern.DotProduct, +2281 kern.WhiteKernel, +2282 kern.CompoundKernel, +2283 kern.ExpSineSquared +2284 ], +2285 'alpha': uniform(loc=0, scale=1E8), +2286 'normalize_y': [True, False] +2287 }, +2288 **kwargs +2289 ): +2290 """ +2291 Fit x on y via gaussian process regression +2292 +2293 Parameters +2294 ---------- +2295 name : str, default="Gaussian Process Regression" +2296 Name of classification technique. +2297 random_search : bool, default=False +2298 Whether to perform RandomizedSearch to optimise parameters +2299 parameters : dict[\ +2300 str,\ +2301 Union[\ +2302 scipy.stats.rv_continuous,\ +2303 List[Union[int, str, float]]\ +2304 ]\ +2305 ], default=Preset distributions +2306 The parameters used in RandomizedSearchCV +2307 """ +2308 if random_search: +2309 classifier = RandomizedSearchCV( +2310 gp.GaussianProcessRegressor(**kwargs), +2311 parameters, +2312 cv=self.folds +2313 ) +2314 else: +2315 classifier = gp.GaussianProcessRegressor(**kwargs) +2316 self._sklearn_regression_meta( +2317 classifier, +2318 f'{name}{" (Random Search)" if random_search else ""}', +2319 random_search=random_search +2320 ) +2321 +2322 def isotonic( +2323 self, +2324 name: str = "Isotonic Regression", +2325 random_search: bool = False, +2326 parameters: dict[ +2327 str, +2328 Union[ +2329 scipy.stats.rv_continuous, +2330 List[Union[int, str, float]] +2331 ] +2332 ] = { +2333 'increasing': [True, False] +2334 }, +2335 **kwargs +2336 ): +2337 """ +2338 Fit x on y via isotonic regression +2339 +2340 Parameters +2341 ---------- +2342 name : str, default="Isotonic Regression" +2343 Name of classification technique. +2344 random_search : bool, default=False +2345 Whether to perform RandomizedSearch to optimise parameters +2346 parameters : dict[\ +2347 str,\ +2348 Union[\ +2349 scipy.stats.rv_continuous,\ +2350 List[Union[int, str, float]]\ +2351 ]\ +2352 ], default=Preset distributions +2353 The parameters used in RandomizedSearchCV +2354 """ +2355 if random_search: +2356 classifier = RandomizedSearchCV( +2357 iso.IsotonicRegression(**kwargs), +2358 parameters, +2359 cv=self.folds +2360 ) +2361 else: +2362 classifier = iso.IsotonicRegression(**kwargs) +2363 self._sklearn_regression_meta( +2364 classifier, +2365 f'{name}{" (Random Search)" if random_search else ""}', +2366 random_search=random_search, +2367 max_coeffs=1 +2368 ) +2369 +2370 def xgboost( +2371 self, +2372 name: str = "XGBoost Regression", +2373 random_search: bool = False, +2374 parameters: dict[ +2375 str, +2376 Union[ +2377 scipy.stats.rv_continuous, +2378 List[Union[int, str, float]] +2379 ] +2380 ] = { +2381 'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], +2382 'max_bins': [1, 3, 7, 15, 31, 63, 127, 255], +2383 'grow_policy': [ +2384 'depthwise', +2385 'lossguide' +2386 ], +2387 'learning_rate': uniform(loc=0, scale=2), +2388 'tree_method': ['exact', 'approx', 'hist'], +2389 'gamma': uniform(loc=0, scale=1), +2390 'subsample': uniform(loc=0, scale=1), +2391 'reg_alpha': uniform(loc=0, scale=1), +2392 'reg_lambda': uniform(loc=0, scale=1) +2393 }, +2394 **kwargs +2395 ): +2396 """ +2397 Fit x on y via xgboost regression +2398 +2399 Parameters +2400 ---------- +2401 name : str, default="XGBoost Regression" +2402 Name of classification technique. +2403 random_search : bool, default=False +2404 Whether to perform RandomizedSearch to optimise parameters +2405 parameters : dict[\ +2406 str,\ +2407 Union[\ +2408 scipy.stats.rv_continuous,\ +2409 List[Union[int, str, float]]\ +2410 ]\ +2411 ], default=Preset distributions +2412 The parameters used in RandomizedSearchCV +2413 """ +2414 if random_search: +2415 classifier = RandomizedSearchCV( +2416 xgb.XGBRegressor(**kwargs), +2417 parameters, +2418 cv=self.folds +2419 ) +2420 else: +2421 classifier = xgb.XGBRegressor(**kwargs) +2422 self._sklearn_regression_meta( +2423 classifier, +2424 f'{name}{" (Random Search)" if random_search else ""}', +2425 random_search=random_search +2426 ) +2427 +2428 def xgboost_rf( +2429 self, +2430 name: str = "XGBoost Random Forest Regression", +2431 random_search: bool = False, +2432 parameters: dict[ +2433 str, +2434 Union[ +2435 scipy.stats.rv_continuous, +2436 List[Union[int, str, float]] +2437 ] +2438 ] = { +2439 'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], +2440 'max_bin': [1, 3, 7, 15, 31, 63, 127, 255], +2441 'grow_policy': [ +2442 'depthwise', +2443 'lossguide' +2444 ], +2445 'learning_rate': uniform(loc=0, scale=2), +2446 'tree_method': ['exact', 'approx', 'hist'], +2447 'gamma': uniform(loc=0, scale=1), +2448 'subsample': uniform(loc=0, scale=1), +2449 'reg_alpha': uniform(loc=0, scale=1), +2450 'reg_lambda': uniform(loc=0, scale=1) +2451 }, +2452 **kwargs +2453 ): +2454 """ +2455 Fit x on y via xgboosted random forest regression +2456 +2457 Parameters +2458 ---------- +2459 name : str, default="XGBoost Random Forest Regression" +2460 Name of classification technique. +2461 random_search : bool, default=False +2462 Whether to perform RandomizedSearch to optimise parameters +2463 parameters : dict[\ +2464 str,\ +2465 Union[\ +2466 scipy.stats.rv_continuous,\ +2467 List[Union[int, str, float]]\ +2468 ]\ +2469 ], default=Preset distributions +2470 The parameters used in RandomizedSearchCV +2471 """ +2472 if random_search: +2473 classifier = RandomizedSearchCV( +2474 xgb.XGBRFRegressor(**kwargs), +2475 parameters, +2476 cv=self.folds +2477 ) +2478 else: +2479 classifier = xgb.XGBRFRegressor(**kwargs) +2480 self._sklearn_regression_meta( +2481 classifier, +2482 f'{name}{" (Random Search)" if random_search else ""}', +2483 random_search=random_search +2484 ) +2485 +2486 def return_measurements(self) -> dict[str, pd.DataFrame]: +2487 """ +2488 Returns the measurements used, with missing values and +2489 non-overlapping measurements excluded +2490 +2491 Returns +2492 ------- +2493 dict[str, pd.DataFrame] +2494 Dictionary with 2 keys: +2495 +2496 |Key|Value| +2497 |---|---| +2498 |x|`x_data`| +2499 |y|`y_data`| +2500 +2501 """ +2502 return { +2503 'x': self.x_data, +2504 'y': self.y_data +2505 } +2506 +2507 def return_models(self) -> dict[str, # Technique +2508 dict[str, # Scaling method +2509 dict[str, # Variables used +2510 dict[int, # Fold +2511 Pipeline]]]]: +2512 """ +2513 Returns the models stored in the object +2514 +2515 Returns +2516 ------- +2517 dict[str, str, str, int, Pipeline] +2518 The calibrated models. They are stored in a nested structure as +2519 follows: +2520 1. Primary Key, name of the technique (e.g Lasso Regression). +2521 2. Scaling technique (e.g Yeo-Johnson Transform). +2522 3. Combination of variables used or `target` if calibration is +2523 univariate (e.g "`target` + a + b). +2524 4. Fold, which fold was used excluded from the calibration. If data +2525 folds 0-3. +2526 if 5-fold cross validated, a key of 4 indicates the data was +2527 trained on +2528 """ +2529 return self.models @@ -1500,78 +2809,78 @@

    -
     42def cont_strat_folds(
    - 43        df: pd.DataFrame,
    - 44        target_var: str,
    - 45        splits: int = 5,
    - 46        strat_groups: int = 5,
    - 47        seed: int = 62
    - 48        ) -> pd.DataFrame:
    - 49    """
    - 50    Creates stratified k-folds on continuous variable
    - 51    ----------
    - 52    df : pd.DataFrame
    - 53        Target data to stratify on.
    - 54    target_var : str
    - 55        Target feature name.
    - 56    splits : int, default=5
    - 57        Number of folds to make.
    - 58    strat_groups : int, default=10
    - 59        Number of groups to split data in to for stratification.
    - 60    seed : int, default=62
    - 61        Random state to use.
    - 62
    - 63    Returns
    - 64    -------
    - 65    pd.DataFrame
    - 66        `y_df` with added 'Fold' column, specifying which test data fold
    - 67        variable corresponds to.
    - 68
    - 69    Examples
    - 70    --------
    - 71    >>> df = pd.read_csv('data.csv')
    - 72    >>> df
    - 73    |    | x | a | b |
    - 74    |    |   |   |   |
    - 75    |  0 |2.3|1.8|7.2|
    - 76    |  1 |3.2|9.6|4.5|
    - 77    |....|...|...|...|
    - 78    |1000|2.3|4.5|2.2|
    - 79    >>> df_with_folds = const_strat_folds(
    - 80            df=df,
    - 81            target='a',
    - 82            splits=3,
    - 83            strat_groups=3.
    - 84            seed=78
    - 85        )
    - 86    >>> df_with_folds
    - 87    |    | x | a | b |Fold|
    - 88    |    |   |   |   |    |
    - 89    |  0 |2.3|1.8|7.2| 2  |
    - 90    |  1 |3.2|9.6|4.5| 1  |
    - 91    |....|...|...|...|....|
    - 92    |1000|2.3|4.5|2.2| 0  |
    - 93
    - 94    All folds should have a roughly equal distribution of values for 'a'
    - 95
    - 96    """
    - 97    _df = df.copy()
    - 98    _df['Fold'] = -1
    - 99    skf = StratifiedKFold(
    -100            n_splits=splits,
    -101            random_state=seed,
    -102            shuffle=True
    -103            )
    -104    _df['Group'] = pd.cut(
    -105            _df.loc[:, target_var],
    -106            strat_groups,
    -107            labels=False
    -108            )
    -109    group_label = _df.loc[:, 'Group']
    -110
    -111    for fold_number, (_, v) in enumerate(skf.split(group_label, group_label)):
    -112        _df.loc[v, 'Fold'] = fold_number
    -113    return _df.drop('Group', axis=1)
    +            
     45def cont_strat_folds(
    + 46        df: pd.DataFrame,
    + 47        target_var: str,
    + 48        splits: int = 5,
    + 49        strat_groups: int = 5,
    + 50        seed: int = 62
    + 51        ) -> pd.DataFrame:
    + 52    """
    + 53    Creates stratified k-folds on continuous variable
    + 54    ----------
    + 55    df : pd.DataFrame
    + 56        Target data to stratify on.
    + 57    target_var : str
    + 58        Target feature name.
    + 59    splits : int, default=5
    + 60        Number of folds to make.
    + 61    strat_groups : int, default=10
    + 62        Number of groups to split data in to for stratification.
    + 63    seed : int, default=62
    + 64        Random state to use.
    + 65
    + 66    Returns
    + 67    -------
    + 68    pd.DataFrame
    + 69        `y_df` with added 'Fold' column, specifying which test data fold
    + 70        variable corresponds to.
    + 71
    + 72    Examples
    + 73    --------
    + 74    >>> df = pd.read_csv('data.csv')
    + 75    >>> df
    + 76    |    | x | a | b |
    + 77    |    |   |   |   |
    + 78    |  0 |2.3|1.8|7.2|
    + 79    |  1 |3.2|9.6|4.5|
    + 80    |....|...|...|...|
    + 81    |1000|2.3|4.5|2.2|
    + 82    >>> df_with_folds = const_strat_folds(
    + 83            df=df,
    + 84            target='a',
    + 85            splits=3,
    + 86            strat_groups=3.
    + 87            seed=78
    + 88        )
    + 89    >>> df_with_folds
    + 90    |    | x | a | b |Fold|
    + 91    |    |   |   |   |    |
    + 92    |  0 |2.3|1.8|7.2| 2  |
    + 93    |  1 |3.2|9.6|4.5| 1  |
    + 94    |....|...|...|...|....|
    + 95    |1000|2.3|4.5|2.2| 0  |
    + 96
    + 97    All folds should have a roughly equal distribution of values for 'a'
    + 98
    + 99    """
    +100    _df = df.copy()
    +101    _df['Fold'] = -1
    +102    skf = StratifiedKFold(
    +103            n_splits=splits,
    +104            random_state=seed,
    +105            shuffle=True
    +106            )
    +107    _df['Group'] = pd.cut(
    +108            _df.loc[:, target_var],
    +109            strat_groups,
    +110            labels=False
    +111            )
    +112    group_label = _df.loc[:, 'Group']
    +113
    +114    for fold_number, (_, v) in enumerate(skf.split(group_label, group_label)):
    +115        _df.loc[v, 'Fold'] = fold_number
    +116    return _df.drop('Group', axis=1)
     
    @@ -1639,1112 +2948,2418 @@
    Examples
    -
     116class Calibrate:
    - 117    """
    - 118    Calibrate x against y using a range of different methods provided by
    - 119    scikit-learn[^skl], xgboost[^xgb] and PyMC (via Bambi)[^pmc].
    - 120
    - 121    [^skl]: https://scikit-learn.org/stable/modules/classes.html
    - 122    [^xgb]: https://xgboost.readthedocs.io/en/stable/python/python_api.html
    - 123    [^pmc]: https://bambinos.github.io/bambi/api/
    - 124
    - 125    Examples
    - 126    --------
    - 127    >>> from calidhayte.calibrate import Calibrate
    - 128    >>> import pandas as pd
    - 129    >>>
    - 130    >>> x = pd.read_csv('independent.csv')
    - 131    >>> x
    - 132    |   | a | b |
    - 133    | 0 |2.3|3.2|
    - 134    | 1 |3.4|3.1|
    - 135    |...|...|...|
    - 136    |100|3.7|2.1|
    - 137    >>>
    - 138    >>> y = pd.read_csv('dependent.csv')
    - 139    >>> y
    - 140    |   | a |
    - 141    | 0 |7.8|
    - 142    | 1 |9.9|
    - 143    |...|...|
    - 144    |100|9.5|
    - 145    >>>
    - 146    >>> calibration = Calibrate(
    - 147        x_data=x,
    - 148        y_data=y,
    - 149        target='a',
    - 150        folds=5,
    - 151        strat_groups=5,
    - 152        scaler = [
    - 153            'Standard Scale',
    - 154            'MinMax Scale'
    - 155            ],
    - 156        seed=62
    - 157    )
    - 158    >>> calibration.linreg()
    - 159    >>> calibration.lars()
    - 160    >>> calibration.omp()
    - 161    >>> calibration.ransac()
    - 162    >>> calibration.random_forest()
    - 163    >>>
    - 164    >>> models = calibration.return_models()
    - 165    >>> list(models.keys())
    - 166    [
    - 167        'Linear Regression',
    - 168        'Least Angle Regression',
    - 169        'Orthogonal Matching Pursuit',
    - 170        'RANSAC',
    - 171        'Random Forest'
    - 172    ]
    - 173    >>> list(models['Linear Regression'].keys())
    - 174    ['Standard Scale', 'MinMax Scale']
    - 175    >>> list(models['Linear Regression']['Standard Scale'].keys())
    - 176    ['a', 'a + b']
    - 177    >>> list(models['Linear Regression']['Standard Scale']['a'].keys())
    - 178    [0, 1, 2, 3, 4]
    - 179    >>> type(models['Linear Regression']['Standard Scale']['a'][0])
    - 180    <class sklearn.pipeline.Pipeline>
    - 181    >>> pipeline = models['Linear Regression']['Standard Scale']['a'][0]
    - 182    >>> x_new = pd.read_csv('independent_new.csv')
    - 183    >>> x_new
    - 184    |   | a | b |
    - 185    | 0 |3.5|2.7|
    - 186    | 1 |4.0|1.1|
    - 187    |...|...|...|
    - 188    |100|2.3|2.1|
    - 189    >>> pipeline.transform(x_new)
    - 190    |   | a |
    - 191    | 0 |9.7|
    - 192    | 1 |9.1|
    - 193    |...|...|
    - 194    |100|6.7|
    - 195
    - 196    """
    - 197
    - 198    def __init__(
    - 199            self,
    - 200            x_data: pd.DataFrame,
    - 201            y_data: pd.DataFrame,
    - 202            target: str,
    - 203            folds: int = 5,
    - 204            strat_groups: int = 10,
    - 205            scaler: Union[
    - 206                Iterable[
    - 207                    Literal[
    - 208                        'None',
    - 209                        'Standard Scale',
    - 210                        'MinMax Scale',
    - 211                        'Yeo-Johnson Transform'
    - 212                        'Box-Cox Transform',
    - 213                        'Quantile Transform (Uniform)',
    - 214                        'Quantile Transform (Gaussian)'
    - 215                        ]
    - 216                    ],
    - 217                Literal[
    - 218                    'All',
    - 219                    'None',
    - 220                    'Standard Scale',
    - 221                    'MinMax Scale',
    - 222                    'Yeo-Johnson Transform'
    - 223                    'Box-Cox Transform',
    - 224                    'Quantile Transform (Uniform)',
    - 225                    'Quantile Transform (Gaussian)',
    - 226                    ]
    - 227                ] = 'None',
    - 228            seed: int = 62
    - 229                 ):
    - 230        """Initialises class
    - 231
    - 232        Used to compare one set of measurements against another.
    - 233        It can perform both univariate and multivariate regression, though
    - 234        some techniques can only do one or the other. Multivariate regression
    - 235        can only be performed when secondary variables are provided.
    - 236
    - 237        Parameters
    - 238        ----------
    - 239        x_data : pd.DataFrame
    - 240            Data to be calibrated.
    - 241        y_data : pd.DataFrame
    - 242            'True' data to calibrate against.
    - 243        target : str
    - 244            Column name of the primary feature to use in calibration, must be
    - 245            the name of a column in both `x_data` and `y_data`.
    - 246        folds : int, default=5
    - 247            Number of folds to split the data into, using stratified k-fold.
    - 248        strat_groups : int, default=10
    - 249            Number of groups to stratify against, the data will be split into
    - 250            n equally sized bins where n is the value of `strat_groups`.
    - 251        scaler : iterable of {<br>\
    - 252            'None',<br>\
    - 253            'Standard Scale',<br>\
    - 254            'MinMax Scale',<br>\
    - 255            'Yeo-Johnson Transform',<br>\
    - 256            'Box-Cox Transform',<br>\
    - 257            'Quantile Transform (Uniform)',<br>\
    - 258            'Quantile Transform (Gaussian)',<br>\
    - 259            } or {<br>\
    - 260            'All',<br>\
    - 261            'None',<br>\
    - 262            'Standard Scale',<br>\
    - 263            'MinMax Scale',<br>\
    - 264            'Yeo-Johnson Transform',<br>\
    - 265            'Box-Cox Transform',<br>\
    - 266            'Quantile Transform (Uniform)',<br>\
    - 267            'Quantile Transform (Gaussian)',<br>\
    - 268            }, default='None'
    - 269            The scaling/transform method (or list of methods) to apply to the
    - 270            data
    - 271        seed : int, default=62
    - 272            Random state to use when shuffling and splitting the data into n
    - 273            folds. Ensures repeatability.
    - 274
    - 275        Raises
    - 276        ------
    - 277        ValueError
    - 278            Raised if the target variables (e.g. 'NO2') is not a column name in
    - 279            both dataframes.
    - 280            Raised if `scaler` is not str, tuple or list
    - 281        """
    - 282        if target not in x_data.columns or target not in y_data.columns:
    - 283            raise ValueError(
    - 284                    f"{target} does not exist in both columns."
    - 285                             )
    - 286        join_index = x_data.join(
    - 287                y_data,
    - 288                how='inner',
    - 289                lsuffix='x',
    - 290                rsuffix='y'
    - 291                ).dropna().index
    - 292        """
    - 293        The common indices between `x_data` and `y_data`, excluding missing
    - 294        values
    - 295        """
    - 296        self.x_data: pd.DataFrame = x_data.loc[join_index, :]
    - 297        """
    - 298        The data to be calibrated.
    - 299        """
    - 300        self.target: str = target
    - 301        """
    - 302        The name of the column in both `x_data` and `y_data` that
    - 303        will be used as the x and y variables in the calibration.
    - 304        """
    - 305        self.scaler_list: dict[str, Any] = {
    - 306                'None': None,
    - 307                'Standard Scale': pre.StandardScaler(),
    - 308                'MinMax Scale': pre.MinMaxScaler(),
    - 309                'Yeo-Johnson Transform': pre.PowerTransformer(
    - 310                    method='yeo-johnson'
    - 311                    ),
    - 312                'Box-Cox Transform': pre.PowerTransformer(method='box-cox'),
    - 313                'Quantile Transform (Uniform)': pre.QuantileTransformer(
    - 314                    output_distribution='uniform'
    - 315                    ),
    - 316                'Quantile Transform (Gaussian)': pre.QuantileTransformer(
    - 317                    output_distribution='normal'
    - 318                    )
    - 319                }
    - 320        """
    - 321        Keys for scaling algorithms available in the pipelines
    - 322        """
    - 323        self.scaler: list[str] = list()
    - 324        """
    - 325        The scaling algorithm(s) to preprocess the data with
    - 326        """
    - 327        if isinstance(scaler, str):
    - 328            if scaler == "All":
    - 329                if not bool(self.x_data.ge(0).all(axis=None)):
    - 330                    warnings.warn(
    - 331                        f'Box-Cox is not compatible with provided measurements'
    - 332                    )
    - 333                    self.scaler_list.pop('Box-Cox Transform')
    - 334                self.scaler.extend(self.scaler_list.keys())
    - 335            elif scaler in self.scaler_list.keys():
    - 336                self.scaler.append(scaler)
    - 337            else:
    - 338                self.scaler.append('None')
    - 339                warnings.warn(f'Scaling algorithm {scaler} not recognised')
    - 340        elif isinstance(scaler, (tuple, list)):
    - 341            for sc in scaler:
    - 342                if sc == 'Box-Cox Transform' and not bool(
    - 343                    self.x_data.ge(0).all(axis=None)
    - 344                ):
    - 345                    warnings.warn(
    - 346                        f'Box-Cox is not compatible with provided measurements'
    - 347                    )
    - 348                    continue
    - 349                if sc in self.scaler_list.keys():
    - 350                    self.scaler.append(sc)
    - 351                else:
    - 352                    warnings.warn(f'Scaling algorithm {sc} not recognised')
    - 353        else:
    - 354            raise ValueError('scaler parameter should be string, list or tuple')
    - 355        if not self.scaler:
    - 356            warnings.warn(
    - 357                f'No valid scaling algorithms provided, defaulting to None'
    - 358            )
    - 359            self.scaler.append('None')
    - 360
    - 361        self.y_data = cont_strat_folds(
    - 362                y_data.loc[join_index, :],
    - 363                target,
    - 364                folds,
    - 365                strat_groups,
    - 366                seed
    - 367                )
    - 368        """
    - 369        The data that `x_data` will be calibrated against. A '*Fold*'
    - 370        column is added using the `const_strat_folds` function which splits
    - 371        the data into k stratified folds (where k is the value of
    - 372        `folds`). It splits the continuous measurements into n bins (where n
    - 373        is the value of `strat_groups`) and distributes each bin equally
    - 374        across all folds. This significantly reduces the chances of one fold
    - 375        containing a skewed distribution relative to the whole dataset.
    - 376        """
    - 377        self.models: dict[str,  # Technique name
    - 378                          dict[str,  # Scaling technique
    - 379                               dict[str,  # Variable combo
    - 380                                    dict[int,  # Fold
    - 381                                         Pipeline]]]] = dict()
    - 382        """
    - 383        The calibrated models. They are stored in a nested structure as
    - 384        follows:
    - 385        1. Primary Key, name of the technique (e.g Lasso Regression).
    - 386        2. Scaling technique (e.g Yeo-Johnson Transform).
    - 387        3. Combination of variables used or `target` if calibration is
    - 388        univariate (e.g "`target` + a + b).
    - 389        4. Fold, which fold was used excluded from the calibration. If data
    - 390        if 5-fold cross validated, a key of 4 indicates the data was trained on
    - 391        folds 0-3.
    - 392
    - 393        ```mermaid
    - 394            stateDiagram-v2
    - 395              models --> Technique
    - 396              state Technique {
    - 397                [*] --> Scaling
    - 398                [*]: The calibration technique used
    - 399                [*]: (e.g "Lasso Regression")
    - 400                state Scaling {
    - 401                  [*] --> Variables
    - 402                  [*]: The scaling technique used
    - 403                  [*]: (e.g "Yeo-Johnson Transform")
    - 404                  state Variables {
    - 405                    [*] : The combination of variables used
    - 406                    [*] : (e.g "x + a + b")
    - 407                    [*] --> Fold
    - 408                    state Fold {
    - 409                     [*] : Which fold was excluded from training data
    - 410                     [*] : (e.g 4 indicates folds 0-3 were used to train)
    - 411                    }
    - 412                  }
    - 413                }
    - 414              }
    - 415        ```
    - 416
    - 417        """
    - 418
    - 419    def _sklearn_regression_meta(
    - 420            self,
    - 421            reg: Union[skl.base.RegressorMixin, Literal['t', 'gaussian']],
    - 422            name: str,
    - 423            min_coeffs: int = 1,
    - 424            max_coeffs: int = (sys.maxsize * 2) + 1,
    - 425            **kwargs
    - 426            ):
    - 427        """
    - 428        Metaclass, formats data and uses sklearn classifier to
    - 429        fit x to y
    - 430
    - 431        Parameters
    - 432        ----------
    - 433        reg : sklearn.base.RegressorMixin or str
    - 434            Classifier to use, or distribution family to use for bayesian.
    - 435        name : str
    - 436            Name of classification technique to save pipeline to.
    - 437        min_coeffs : int, default=1
    - 438            Minimum number of coefficients for technique.
    - 439        max_coeffs : int, default=(sys.maxsize * 2) + 1
    - 440            Maximum number of coefficients for technique.
    - 441
    - 442        Raises
    - 443        ------
    - 444        NotImplementedError
    - 445            PyMC currently doesn't work, TODO
    - 446        """
    - 447        x_secondary_cols = self.x_data.drop(self.target, axis=1).columns
    - 448        # All columns in x_data that aren't the target variable
    - 449        products = [[np.nan, col] for col in x_secondary_cols]
    - 450        secondary_vals = pd.MultiIndex.from_product(products)
    - 451        # Get all possible combinations of secondary variables in a pandas
    - 452        # MultiIndex
    - 453        if self.models.get(name) is None:
    - 454            self.models[name] = dict()
    - 455            # If the classification technique hasn't been used yet,
    - 456            # add its key to the models dictionary
    - 457        for scaler in self.scaler:
    - 458            if self.models[name].get(scaler) is None:
    - 459                self.models[name][scaler] = dict()
    - 460                # If the scaling technique hasn't been used with the classification
    - 461                # technique yet, add its key to the nested dictionary
    - 462            for sec_vals in secondary_vals:
    - 463                # Loop over all combinations of secondary values
    - 464                vals = [self.target] + [v for v in sec_vals if v == v]
    - 465                vals_str = ' + '.join(vals)
    - 466                if len(vals) < min_coeffs or len(vals) > max_coeffs:
    - 467                    # Skip if number of coeffs doesn't lie within acceptable range
    - 468                    # for technique. For example, isotonic regression
    - 469                    # only works with one variable
    - 470                    continue
    - 471                self.models[name][scaler][vals_str] = dict()
    - 472                for fold in self.y_data.loc[:, 'Fold'].unique():
    - 473                    y_data = self.y_data[
    - 474                            self.y_data.loc[:, 'Fold'] != fold
    - 475                            ]
    - 476                    if reg in ['t', 'gaussian']:
    - 477                        # If using PyMC bayesian model,
    - 478                        # format data and build model using bambi
    - 479                        # then store result in pipeline
    - 480                        # Currently doesn't work as PyMC models
    - 481                        # can't be pickled, so don't function with deepcopy. Needs
    - 482                        # looking into
    - 483                        raise NotImplementedError(
    - 484                            "PyMC functions currently don't work with deepcopy"
    - 485                        )
    - 486    #                    sc = scalers[scaler]
    - 487    #                    if sc is not None:
    - 488    #                        x_data = sc.fit_transform(
    - 489    #                                self.x_data.loc[y_data.index, :]
    - 490    #                                )
    - 491    #                    else:
    - 492    #                        x_data = self.x_data.loc[y_data.index, :]
    - 493    #                    x_data['y'] = y_data.loc[:, self.target]
    - 494    #                    model = bmb.Model(
    - 495    #                            f"y ~ {vals_str}",
    - 496    #                            x_data,
    - 497    #                            family=reg
    - 498    #                            )
    - 499    #                    _ = model.fit(
    - 500    #                        progressbar=False,
    - 501    #                        **kwargs
    - 502    #                        )
    - 503    #                    pipeline = Pipeline([
    - 504    #                        ("Scaler", scaler),
    - 505    #                        ("Regression", model)
    - 506    #                        ])
    - 507                    else:
    - 508                        # If using scikit-learn API compatible classifier,
    - 509                        # Build pipeline and fit to
    - 510                        pipeline = Pipeline([
    - 511                            ("Selector", ColumnTransformer([
    - 512                                    ("selector", "passthrough", vals)
    - 513                                ], remainder="drop")
    - 514                             ),
    - 515                            ("Scaler", self.scaler_list[scaler]),
    - 516                            ("Regression", reg)
    - 517                            ])
    - 518                        pipeline.fit(
    - 519                                self.x_data.loc[y_data.index, :],
    - 520                                y_data.loc[:, self.target]
    - 521                                )
    - 522                    self.models[name][scaler][vals_str][fold] = dc(pipeline)
    - 523
    - 524    def pymc_bayesian(
    - 525            self,
    - 526            family: Literal[
    - 527                "Gaussian",
    - 528                "Student T",
    - 529            ] = "Gaussian",
    - 530            name: str = " PyMC Bayesian",
    - 531            **kwargs
    - 532            ):
    - 533        """
    - 534        Performs bayesian linear regression (either uni or multivariate)
    - 535        fitting x on y.
    - 536
    - 537        Performs bayesian linear regression, both univariate and multivariate,
    - 538        on X against y. More details can be found at:
    - 539        https://pymc.io/projects/examples/en/latest/generalized_linear_models/
    - 540        GLM-robust.html
    - 541
    - 542        Parameters
    - 543        ----------
    - 544        family : {'Gaussian', 'Student T'}, default='Gaussian'
    - 545            Statistical distribution to fit measurements to. Options are:
    - 546                - Gaussian
    - 547                - Student T
    - 548        """
    - 549        # Define model families
    - 550        model_families = {
    - 551            "Gaussian": "gaussian",
    - 552            "Student T": "t"
    - 553        }
    - 554        self._sklearn_regression_meta(
    - 555                model_families[family],
    - 556                f'{name} ({model_families})',
    - 557                **kwargs
    - 558        )
    - 559
    - 560    def linreg(self, name: str = "Linear Regression", **kwargs):
    - 561        """
    - 562        Fit x on y via linear regression
    - 563
    - 564        Parameters
    - 565        ----------
    - 566        name : str, default="Linear Regression"
    - 567            Name of classification technique.
    - 568        """
    - 569        self._sklearn_regression_meta(
    - 570                lm.LinearRegression(**kwargs),
    - 571                name
    - 572                )
    - 573
    - 574    def ridge(self, name: str = "Ridge Regression", **kwargs):
    - 575        """
    - 576        Fit x on y via ridge regression
    - 577
    - 578        Parameters
    - 579        ----------
    - 580        name : str, default="Ridge Regression"
    - 581            Name of classification technique
    - 582        """
    - 583        self._sklearn_regression_meta(
    - 584                lm.Ridge(**kwargs),
    - 585                name
    - 586                )
    - 587
    - 588    def ridge_cv(
    - 589            self,
    - 590            name: str = "Ridge Regression (Cross Validated)",
    - 591            **kwargs
    - 592            ):
    - 593        """
    - 594        Fit x on y via cross-validated ridge regression
    - 595
    - 596        Parameters
    - 597        ----------
    - 598        name : str, default="Ridge Regression (Cross Validated)"
    - 599            Name of classification technique
    - 600        """
    - 601        self._sklearn_regression_meta(
    - 602                lm.RidgeCV(**kwargs),
    - 603                name
    - 604                )
    - 605
    - 606    def lasso(self, name: str = "Lasso Regression", **kwargs):
    +            
     119class Calibrate:
    + 120    """
    + 121    Calibrate x against y using a range of different methods provided by
    + 122    scikit-learn[^skl], xgboost[^xgb] and PyMC (via Bambi)[^pmc].
    + 123
    + 124    [^skl]: https://scikit-learn.org/stable/modules/classes.html
    + 125    [^xgb]: https://xgboost.readthedocs.io/en/stable/python/python_api.html
    + 126    [^pmc]: https://bambinos.github.io/bambi/api/
    + 127
    + 128    Examples
    + 129    --------
    + 130    >>> from calidhayte.calibrate import Calibrate
    + 131    >>> import pandas as pd
    + 132    >>>
    + 133    >>> x = pd.read_csv('independent.csv')
    + 134    >>> x
    + 135    |   | a | b |
    + 136    | 0 |2.3|3.2|
    + 137    | 1 |3.4|3.1|
    + 138    |...|...|...|
    + 139    |100|3.7|2.1|
    + 140    >>>
    + 141    >>> y = pd.read_csv('dependent.csv')
    + 142    >>> y
    + 143    |   | a |
    + 144    | 0 |7.8|
    + 145    | 1 |9.9|
    + 146    |...|...|
    + 147    |100|9.5|
    + 148    >>>
    + 149    >>> calibration = Calibrate(
    + 150        x_data=x,
    + 151        y_data=y,
    + 152        target='a',
    + 153        folds=5,
    + 154        strat_groups=5,
    + 155        scaler = [
    + 156            'Standard Scale',
    + 157            'MinMax Scale'
    + 158            ],
    + 159        seed=62
    + 160    )
    + 161    >>> calibration.linreg()
    + 162    >>> calibration.lars()
    + 163    >>> calibration.omp()
    + 164    >>> calibration.ransac()
    + 165    >>> calibration.random_forest()
    + 166    >>>
    + 167    >>> models = calibration.return_models()
    + 168    >>> list(models.keys())
    + 169    [
    + 170        'Linear Regression',
    + 171        'Least Angle Regression',
    + 172        'Orthogonal Matching Pursuit',
    + 173        'RANSAC',
    + 174        'Random Forest'
    + 175    ]
    + 176    >>> list(models['Linear Regression'].keys())
    + 177    ['Standard Scale', 'MinMax Scale']
    + 178    >>> list(models['Linear Regression']['Standard Scale'].keys())
    + 179    ['a', 'a + b']
    + 180    >>> list(models['Linear Regression']['Standard Scale']['a'].keys())
    + 181    [0, 1, 2, 3, 4]
    + 182    >>> type(models['Linear Regression']['Standard Scale']['a'][0])
    + 183    <class sklearn.pipeline.Pipeline>
    + 184    >>> pipeline = models['Linear Regression']['Standard Scale']['a'][0]
    + 185    >>> x_new = pd.read_csv('independent_new.csv')
    + 186    >>> x_new
    + 187    |   | a | b |
    + 188    | 0 |3.5|2.7|
    + 189    | 1 |4.0|1.1|
    + 190    |...|...|...|
    + 191    |100|2.3|2.1|
    + 192    >>> pipeline.transform(x_new)
    + 193    |   | a |
    + 194    | 0 |9.7|
    + 195    | 1 |9.1|
    + 196    |...|...|
    + 197    |100|6.7|
    + 198
    + 199    """
    + 200
    + 201    def __init__(
    + 202            self,
    + 203            x_data: pd.DataFrame,
    + 204            y_data: pd.DataFrame,
    + 205            target: str,
    + 206            folds: int = 5,
    + 207            strat_groups: int = 10,
    + 208            scaler: Union[
    + 209                Iterable[
    + 210                    Literal[
    + 211                        'None',
    + 212                        'Standard Scale',
    + 213                        'MinMax Scale',
    + 214                        'Yeo-Johnson Transform',
    + 215                        'Box-Cox Transform',
    + 216                        'Quantile Transform (Uniform)',
    + 217                        'Quantile Transform (Gaussian)'
    + 218                        ]
    + 219                    ],
    + 220                Literal[
    + 221                    'All',
    + 222                    'None',
    + 223                    'Standard Scale',
    + 224                    'MinMax Scale',
    + 225                    'Yeo-Johnson Transform',
    + 226                    'Box-Cox Transform',
    + 227                    'Quantile Transform (Uniform)',
    + 228                    'Quantile Transform (Gaussian)',
    + 229                    ]
    + 230                ] = 'None',
    + 231            seed: int = 62
    + 232                 ):
    + 233        """Initialises class
    + 234
    + 235        Used to compare one set of measurements against another.
    + 236        It can perform both univariate and multivariate regression, though
    + 237        some techniques can only do one or the other. Multivariate regression
    + 238        can only be performed when secondary variables are provided.
    + 239
    + 240        Parameters
    + 241        ----------
    + 242        x_data : pd.DataFrame
    + 243            Data to be calibrated.
    + 244        y_data : pd.DataFrame
    + 245            'True' data to calibrate against.
    + 246        target : str
    + 247            Column name of the primary feature to use in calibration, must be
    + 248            the name of a column in both `x_data` and `y_data`.
    + 249        folds : int, default=5
    + 250            Number of folds to split the data into, using stratified k-fold.
    + 251        strat_groups : int, default=10
    + 252            Number of groups to stratify against, the data will be split into
    + 253            n equally sized bins where n is the value of `strat_groups`.
    + 254        scaler : iterable of {<br>\
    + 255            'None',<br>\
    + 256            'Standard Scale',<br>\
    + 257            'MinMax Scale',<br>\
    + 258            'Yeo-Johnson Transform',<br>\
    + 259            'Box-Cox Transform',<br>\
    + 260            'Quantile Transform (Uniform)',<br>\
    + 261            'Quantile Transform (Gaussian)',<br>\
    + 262            } or {<br>\
    + 263            'All',<br>\
    + 264            'None',<br>\
    + 265            'Standard Scale',<br>\
    + 266            'MinMax Scale',<br>\
    + 267            'Yeo-Johnson Transform',<br>\
    + 268            'Box-Cox Transform',<br>\
    + 269            'Quantile Transform (Uniform)',<br>\
    + 270            'Quantile Transform (Gaussian)',<br>\
    + 271            }, default='None'
    + 272            The scaling/transform method (or list of methods) to apply to the
    + 273            data
    + 274        seed : int, default=62
    + 275            Random state to use when shuffling and splitting the data into n
    + 276            folds. Ensures repeatability.
    + 277
    + 278        Raises
    + 279        ------
    + 280        ValueError
    + 281            Raised if the target variables (e.g. 'NO2') is not a column name in
    + 282            both dataframes.
    + 283            Raised if `scaler` is not str, tuple or list
    + 284        """
    + 285        if target not in x_data.columns or target not in y_data.columns:
    + 286            raise ValueError(
    + 287                    f"{target} does not exist in both columns."
    + 288                             )
    + 289        join_index = x_data.join(
    + 290                y_data,
    + 291                how='inner',
    + 292                lsuffix='x',
    + 293                rsuffix='y'
    + 294                ).dropna().index
    + 295        """
    + 296        The common indices between `x_data` and `y_data`, excluding missing
    + 297        values
    + 298        """
    + 299        self.x_data: pd.DataFrame = x_data.loc[join_index, :]
    + 300        """
    + 301        The data to be calibrated.
    + 302        """
    + 303        self.target: str = target
    + 304        """
    + 305        The name of the column in both `x_data` and `y_data` that
    + 306        will be used as the x and y variables in the calibration.
    + 307        """
    + 308        self.scaler_list: dict[str, Any] = {
    + 309                'None': None,
    + 310                'Standard Scale': pre.StandardScaler(),
    + 311                'MinMax Scale': pre.MinMaxScaler(),
    + 312                'Yeo-Johnson Transform': pre.PowerTransformer(
    + 313                    method='yeo-johnson'
    + 314                    ),
    + 315                'Box-Cox Transform': pre.PowerTransformer(method='box-cox'),
    + 316                'Quantile Transform (Uniform)': pre.QuantileTransformer(
    + 317                    output_distribution='uniform'
    + 318                    ),
    + 319                'Quantile Transform (Gaussian)': pre.QuantileTransformer(
    + 320                    output_distribution='normal'
    + 321                    )
    + 322                }
    + 323        """
    + 324        Keys for scaling algorithms available in the pipelines
    + 325        """
    + 326        self.scaler: list[str] = list()
    + 327        """
    + 328        The scaling algorithm(s) to preprocess the data with
    + 329        """
    + 330        if isinstance(scaler, str):
    + 331            if scaler == "All":
    + 332                if not bool(self.x_data.ge(0).all(axis=None)):
    + 333                    warnings.warn(
    + 334                        'Box-Cox is not compatible with provided measurements'
    + 335                    )
    + 336                    self.scaler_list.pop('Box-Cox Transform')
    + 337                self.scaler.extend(self.scaler_list.keys())
    + 338            elif scaler in self.scaler_list.keys():
    + 339                self.scaler.append(scaler)
    + 340            else:
    + 341                self.scaler.append('None')
    + 342                warnings.warn(f'Scaling algorithm {scaler} not recognised')
    + 343        elif isinstance(scaler, (tuple, list)):
    + 344            for sc in scaler:
    + 345                if sc == 'Box-Cox Transform' and not bool(
    + 346                    self.x_data.ge(0).all(axis=None)
    + 347                ):
    + 348                    warnings.warn(
    + 349                        'Box-Cox is not compatible with provided measurements'
    + 350                    )
    + 351                    continue
    + 352                if sc in self.scaler_list.keys():
    + 353                    self.scaler.append(sc)
    + 354                else:
    + 355                    warnings.warn(f'Scaling algorithm {sc} not recognised')
    + 356        else:
    + 357            raise ValueError(
    + 358                'scaler parameter should be string, list or tuple'
    + 359            )
    + 360        if not self.scaler:
    + 361            warnings.warn(
    + 362                'No valid scaling algorithms provided, defaulting to None'
    + 363            )
    + 364            self.scaler.append('None')
    + 365
    + 366        self.y_data = cont_strat_folds(
    + 367                y_data.loc[join_index, :],
    + 368                target,
    + 369                folds,
    + 370                strat_groups,
    + 371                seed
    + 372                )
    + 373        """
    + 374        The data that `x_data` will be calibrated against. A '*Fold*'
    + 375        column is added using the `const_strat_folds` function which splits
    + 376        the data into k stratified folds (where k is the value of
    + 377        `folds`). It splits the continuous measurements into n bins (where n
    + 378        is the value of `strat_groups`) and distributes each bin equally
    + 379        across all folds. This significantly reduces the chances of one fold
    + 380        containing a skewed distribution relative to the whole dataset.
    + 381        """
    + 382        self.models: dict[str,  # Technique name
    + 383                          dict[str,  # Scaling technique
    + 384                               dict[str,  # Variable combo
    + 385                                    dict[int,  # Fold
    + 386                                         Pipeline]]]] = dict()
    + 387        """
    + 388        The calibrated models. They are stored in a nested structure as
    + 389        follows:
    + 390        1. Primary Key, name of the technique (e.g Lasso Regression).
    + 391        2. Scaling technique (e.g Yeo-Johnson Transform).
    + 392        3. Combination of variables used or `target` if calibration is
    + 393        univariate (e.g "`target` + a + b).
    + 394        4. Fold, which fold was used excluded from the calibration. If data
    + 395        if 5-fold cross validated, a key of 4 indicates the data was trained on
    + 396        folds 0-3.
    + 397
    + 398        ```mermaid
    + 399            stateDiagram-v2
    + 400              models --> Technique
    + 401              state Technique {
    + 402                [*] --> Scaling
    + 403                [*]: The calibration technique used
    + 404                [*]: (e.g "Lasso Regression")
    + 405                state Scaling {
    + 406                  [*] --> Variables
    + 407                  [*]: The scaling technique used
    + 408                  [*]: (e.g "Yeo-Johnson Transform")
    + 409                  state Variables {
    + 410                    [*] : The combination of variables used
    + 411                    [*] : (e.g "x + a + b")
    + 412                    [*] --> Fold
    + 413                    state Fold {
    + 414                     [*] : Which fold was excluded from training data
    + 415                     [*] : (e.g 4 indicates folds 0-3 were used to train)
    + 416                    }
    + 417                  }
    + 418                }
    + 419              }
    + 420        ```
    + 421
    + 422        """
    + 423        self.folds: int = folds
    + 424        """
    + 425        The number of folds used in k-fold cross validation
    + 426        """
    + 427
    + 428    def _sklearn_regression_meta(
    + 429        self,
    + 430        reg: Union[
    + 431            skl.base.RegressorMixin,
    + 432            RandomizedSearchCV,
    + 433            Literal['t', 'gaussian']
    + 434        ],
    + 435        name: str,
    + 436        min_coeffs: int = 1,
    + 437        max_coeffs: int = (sys.maxsize * 2) + 1,
    + 438        random_search: bool = False
    + 439            ):
    + 440        """
    + 441        Metaclass, formats data and uses sklearn classifier to
    + 442        fit x to y
    + 443
    + 444        Parameters
    + 445        ----------
    + 446        reg : sklearn.base.RegressorMixin or str
    + 447            Classifier to use, or distribution family to use for bayesian.
    + 448        name : str
    + 449            Name of classification technique to save pipeline to.
    + 450        min_coeffs : int, default=1
    + 451            Minimum number of coefficients for technique.
    + 452        max_coeffs : int, default=(sys.maxsize * 2) + 1
    + 453            Maximum number of coefficients for technique.
    + 454        random_search : bool
    + 455            Whether RandomizedSearch is used or not
    + 456
    + 457        Raises
    + 458        ------
    + 459        NotImplementedError
    + 460            PyMC currently doesn't work, TODO
    + 461        """
    + 462        x_secondary_cols = self.x_data.drop(self.target, axis=1).columns
    + 463        # All columns in x_data that aren't the target variable
    + 464        products = [[np.nan, col] for col in x_secondary_cols]
    + 465        secondary_vals = pd.MultiIndex.from_product(products)
    + 466        # Get all possible combinations of secondary variables in a pandas
    + 467        # MultiIndex
    + 468        if self.models.get(name) is None:
    + 469            self.models[name] = dict()
    + 470            # If the classification technique hasn't been used yet,
    + 471            # add its key to the models dictionary
    + 472        for scaler in self.scaler:
    + 473            if self.models[name].get(scaler) is None:
    + 474                self.models[name][scaler] = dict()
    + 475                # If the scaling technique hasn't been used with the
    + 476                # classification
    + 477                # technique yet, add its key to the nested dictionary
    + 478            for sec_vals in secondary_vals:
    + 479                # Loop over all combinations of secondary values
    + 480                vals = [self.target] + [v for v in sec_vals if v == v]
    + 481                vals_str = ' + '.join(vals)
    + 482                if len(vals) < min_coeffs or len(vals) > max_coeffs:
    + 483                    # Skip if number of coeffs doesn't lie within acceptable
    + 484                    # range
    + 485                    # for technique. For example, isotonic regression
    + 486                    # only works with one variable
    + 487                    continue
    + 488                self.models[name][scaler][vals_str] = dict()
    + 489                if random_search:
    + 490                    pipeline = Pipeline([
    + 491                        ("Selector", ColumnTransformer([
    + 492                                ("selector", "passthrough", vals)
    + 493                            ], remainder="drop")
    + 494                         ),
    + 495                        ("Scaler", self.scaler_list[scaler]),
    + 496                        ("Regression", reg)
    + 497                        ])
    + 498                    pipeline.fit(
    + 499                        self.x_data,
    + 500                        self.y_data.loc[:, self.target]
    + 501                            )
    + 502                    self.models[name][scaler][vals_str][0] = dc(pipeline)
    + 503                    continue
    + 504
    + 505                for fold in self.y_data.loc[:, 'Fold'].unique():
    + 506                    y_data = self.y_data[
    + 507                            self.y_data.loc[:, 'Fold'] != fold
    + 508                            ]
    + 509                    if reg in ['t', 'gaussian']:
    + 510                        # If using PyMC bayesian model,
    + 511                        # format data and build model using bambi
    + 512                        # then store result in pipeline
    + 513                        # Currently doesn't work as PyMC models
    + 514                        # can't be pickled, so don't function with deepcopy.
    + 515                        # Needs looking into
    + 516                        raise NotImplementedError(
    + 517                            "PyMC functions currently don't work with deepcopy"
    + 518                        )
    + 519    #                    sc = scalers[scaler]
    + 520    #                    if sc is not None:
    + 521    #                        x_data = sc.fit_transform(
    + 522    #                                self.x_data.loc[y_data.index, :]
    + 523    #                                )
    + 524    #                    else:
    + 525    #                        x_data = self.x_data.loc[y_data.index, :]
    + 526    #                    x_data['y'] = y_data.loc[:, self.target]
    + 527    #                    model = bmb.Model(
    + 528    #                            f"y ~ {vals_str}",
    + 529    #                            x_data,
    + 530    #                            family=reg
    + 531    #                            )
    + 532    #                    _ = model.fit(
    + 533    #                        progressbar=False,
    + 534    #                        **kwargs
    + 535    #                        )
    + 536    #                    pipeline = Pipeline([
    + 537    #                        ("Scaler", scaler),
    + 538    #                        ("Regression", model)
    + 539    #                        ])
    + 540                    else:
    + 541                        # If using scikit-learn API compatible classifier,
    + 542                        # Build pipeline and fit to
    + 543                        pipeline = Pipeline([
    + 544                            ("Selector", ColumnTransformer([
    + 545                                    ("selector", "passthrough", vals)
    + 546                                ], remainder="drop")
    + 547                             ),
    + 548                            ("Scaler", self.scaler_list[scaler]),
    + 549                            ("Regression", reg)
    + 550                            ])
    + 551                        pipeline.fit(
    + 552                                self.x_data.loc[y_data.index, :],
    + 553                                y_data.loc[:, self.target]
    + 554                                )
    + 555                    self.models[name][scaler][vals_str][fold] = dc(pipeline)
    + 556
    + 557    def pymc_bayesian(
    + 558            self,
    + 559            family: Literal[
    + 560                "Gaussian",
    + 561                "Student T",
    + 562            ] = "Gaussian",
    + 563            name: str = " PyMC Bayesian",
    + 564            **kwargs
    + 565            ):
    + 566        """
    + 567        Performs bayesian linear regression (either uni or multivariate)
    + 568        fitting x on y.
    + 569
    + 570        Performs bayesian linear regression, both univariate and multivariate,
    + 571        on X against y. More details can be found at:
    + 572        https://pymc.io/projects/examples/en/latest/generalized_linear_models/
    + 573        GLM-robust.html
    + 574
    + 575        Parameters
    + 576        ----------
    + 577        family : {'Gaussian', 'Student T'}, default='Gaussian'
    + 578            Statistical distribution to fit measurements to. Options are:
    + 579                - Gaussian
    + 580                - Student T
    + 581        """
    + 582        # Define model families
    + 583        model_families: dict[str, Literal['t', 'gaussian']] = {
    + 584            "Gaussian": 'gaussian',
    + 585            "Student T": 't'
    + 586        }
    + 587        self._sklearn_regression_meta(
    + 588                model_families[family],
    + 589                f'{name} ({model_families})',
    + 590                **kwargs
    + 591        )
    + 592
    + 593    def linreg(
    + 594        self,
    + 595        name: str = "Linear Regression",
    + 596        random_search: bool = False,
    + 597        parameters: dict[
    + 598            str,
    + 599            Union[
    + 600                scipy.stats.rv_continuous,
    + 601                List[Union[int, str, float]]
    + 602            ]
    + 603        ] = {
    + 604        },
    + 605        **kwargs
    + 606            ):
      607        """
    - 608        Fit x on y via lasso regression
    + 608        Fit x on y via linear regression
      609
      610        Parameters
      611        ----------
    - 612        name : str, default="Lasso Regression"
    - 613            Name of classification technique
    - 614        """
    - 615        self._sklearn_regression_meta(
    - 616                lm.Lasso(**kwargs),
    - 617                name
    - 618                )
    - 619
    - 620    def lasso_cv(
    - 621            self,
    - 622            name: str = "Lasso Regression (Cross Validated)",
    - 623            **kwargs
    - 624            ):
    - 625        """
    - 626        Fit x on y via cross-validated lasso regression
    - 627
    - 628        Parameters
    - 629        ----------
    - 630        name : str, default="Lasso Regression (Cross Validated)"
    - 631            Name of classification technique
    - 632        """
    + 612        name : str, default="Linear Regression"
    + 613            Name of classification technique.
    + 614        random_search : bool, default=False
    + 615            Whether to perform RandomizedSearch to optimise parameters
    + 616        parameters : dict[
    + 617                str,
    + 618                Union[
    + 619                    scipy.stats.rv_continuous,
    + 620                    List[Union[int, str, float]]
    + 621                ]
    + 622            ], default=Preset distributions
    + 623            The parameters used in RandomizedSearchCV
    + 624        """
    + 625        if random_search:
    + 626            classifier = RandomizedSearchCV(
    + 627                lm.LinearRegression(**kwargs),
    + 628                parameters,
    + 629                cv=self.folds
    + 630            )
    + 631        else:
    + 632            classifier = lm.LinearRegression(**kwargs)
      633        self._sklearn_regression_meta(
    - 634                lm.LassoCV(**kwargs),
    - 635                name
    - 636                )
    - 637
    - 638    def multi_task_lasso(
    - 639            self,
    - 640            name: str = "Multi-task Lasso Regression",
    - 641            **kwargs
    - 642            ):
    - 643        """
    - 644        Fit x on y via multitask lasso regression
    - 645
    - 646        Parameters
    - 647        ----------
    - 648        name : str, default="Multi-task Lasso Regression"
    - 649            Name of classification technique
    - 650        """
    - 651        self._sklearn_regression_meta(
    - 652                lm.MultiTaskLasso(**kwargs),
    - 653                name
    - 654                )
    - 655
    - 656    def multi_task_lasso_cv(
    - 657            self,
    - 658            name: str = "Multi-task Lasso Regression (Cross Validated)",
    - 659            **kwargs
    - 660            ):
    - 661        """
    - 662        Fit x on y via cross validated multitask lasso regression
    - 663
    - 664        Parameters
    - 665        ----------
    - 666        name : str, default="Multi-task Lasso Regression (Cross Validated)"
    - 667            Name of classification technique
    - 668        """
    - 669        self._sklearn_regression_meta(
    - 670                lm.MultiTaskLassoCV(**kwargs),
    - 671                name
    - 672                )
    - 673
    - 674    def elastic_net(self, name: str = "Elastic Net Regression", **kwargs):
    - 675        """
    - 676        Fit x on y via elastic net regression
    - 677
    - 678        Parameters
    - 679        ----------
    - 680        name : str, default="Elastic Net Regression"
    - 681            Name of classification technique
    - 682        """
    - 683        self._sklearn_regression_meta(
    - 684                lm.ElasticNet(**kwargs),
    - 685                name
    - 686                )
    - 687
    - 688    def elastic_net_cv(
    - 689            self,
    - 690            name: str = "Elastic Net Regression (Cross Validated)",
    - 691            **kwargs
    - 692            ):
    - 693        """
    - 694        Fit x on y via cross validated elastic net regression
    + 634            classifier,
    + 635            f'{name}{" (Random Search)" if random_search else ""}',
    + 636            random_search=random_search
    + 637        )
    + 638
    + 639    def ridge(
    + 640        self,
    + 641        name: str = "Ridge Regression",
    + 642        random_search: bool = False,
    + 643        parameters: dict[
    + 644            str,
    + 645            Union[
    + 646                scipy.stats.rv_continuous,
    + 647                List[Union[int, str, float]]
    + 648            ]
    + 649        ] = {
    + 650            'alpha': uniform(loc=0, scale=2),
    + 651            'tol': uniform(loc=0, scale=1),
    + 652            'solver': [
    + 653                'svd',
    + 654                'cholesky',
    + 655                'lsqr',
    + 656                'sparse_cg',
    + 657                'sag',
    + 658                'saga',
    + 659                'lbfgs'
    + 660            ]
    + 661        },
    + 662        **kwargs
    + 663            ):
    + 664        """
    + 665        Fit x on y via ridge regression
    + 666
    + 667        Parameters
    + 668        ----------
    + 669        name : str, default="Ridge Regression"
    + 670            Name of classification technique.
    + 671        random_search : bool, default=False
    + 672            Whether to perform RandomizedSearch to optimise parameters
    + 673        parameters : dict[
    + 674                str,
    + 675                Union[
    + 676                    scipy.stats.rv_continuous,
    + 677                    List[Union[int, str, float]]
    + 678                ]
    + 679            ], default=Preset distributions
    + 680            The parameters used in RandomizedSearchCV
    + 681        """
    + 682        if random_search:
    + 683            classifier = RandomizedSearchCV(
    + 684                lm.Ridge(**kwargs),
    + 685                parameters,
    + 686                cv=self.folds
    + 687            )
    + 688        else:
    + 689            classifier = lm.Ridge(**kwargs)
    + 690        self._sklearn_regression_meta(
    + 691            classifier,
    + 692            f'{name}{" (Random Search)" if random_search else ""}',
    + 693            random_search=random_search
    + 694        )
      695
    - 696        Parameters
    - 697        ----------
    - 698        name : str, default="Elastic Net Regression (Cross Validated)"
    - 699            Name of classification technique
    - 700        """
    - 701        self._sklearn_regression_meta(
    - 702                lm.ElasticNetCV(**kwargs),
    - 703                name
    - 704                )
    + 696    def ridge_cv(
    + 697            self,
    + 698            name: str = "Ridge Regression (Cross Validated)",
    + 699            random_search: bool = False,
    + 700            **kwargs
    + 701            ):
    + 702        """
    + 703        Fit x on y via cross-validated ridge regression.
    + 704        Already cross validated so random search not required
      705
    - 706    def multi_task_elastic_net(
    - 707            self,
    - 708            name: str = "Multi-Task Elastic Net Regression",
    - 709            **kwargs
    - 710            ):
    - 711        """
    - 712        Fit x on y via multi-task elastic net regression
    - 713
    - 714        Parameters
    - 715        ----------
    - 716        name : str, default="Multi-task Elastic Net Regression"
    - 717            Name of classification technique
    - 718        """
    - 719        self._sklearn_regression_meta(
    - 720                lm.MultiTaskElasticNet(**kwargs),
    - 721                name
    - 722                )
    - 723
    - 724    def multi_task_elastic_net_cv(
    - 725            self,
    - 726            name: str = "Multi-Task Elastic Net Regression (Cross Validated)",
    - 727            **kwargs
    - 728            ):
    - 729        """
    - 730        Fit x on y via cross validated multi-task elastic net regression
    - 731
    - 732        Parameters
    - 733        ----------
    - 734        name : str, default="Multi-Task Elastic Net Regression\
    - 735        (Cross Validated)"
    - 736            Name of classification technique
    - 737        """
    - 738        self._sklearn_regression_meta(
    - 739                lm.MultiTaskElasticNetCV(**kwargs),
    - 740                name
    - 741                )
    - 742
    - 743    def lars(self, name: str = "Least Angle Regression", **kwargs):
    - 744        """
    - 745        Fit x on y via least angle regression
    - 746
    - 747        Parameters
    - 748        ----------
    - 749        name : str, default="Least Angle Regression"
    - 750            Name of classification technique.
    - 751        """
    - 752        self._sklearn_regression_meta(
    - 753                lm.Lars(**kwargs),
    - 754                name
    - 755                )
    - 756
    - 757    def lars_lasso(
    - 758            self,
    - 759            name: str = "Least Angle Regression (Lasso)",
    - 760            **kwargs
    - 761            ):
    - 762        """
    - 763        Fit x on y via lasso least angle regression
    - 764
    - 765        Parameters
    - 766        ----------
    - 767        name : str, default="Least Angle Regression (Lasso)"
    - 768            Name of classification technique
    - 769        """
    - 770        self._sklearn_regression_meta(
    - 771                lm.LassoLars(**kwargs),
    - 772                name
    - 773                )
    - 774
    - 775    def omp(self, name: str = "Orthogonal Matching Pursuit", **kwargs):
    + 706        Parameters
    + 707        ----------
    + 708        name : str, default="Ridge Regression (Cross Validated)"
    + 709            Name of classification technique
    + 710        random_search : bool, default=False
    + 711            Not used
    + 712
    + 713        """
    + 714        _ = random_search
    + 715        self._sklearn_regression_meta(
    + 716            lm.RidgeCV(**kwargs, cv=self.folds),
    + 717            name,
    + 718            random_search=True
    + 719        )
    + 720
    + 721    def lasso(
    + 722        self,
    + 723        name: str = "Lasso Regression",
    + 724        random_search: bool = False,
    + 725        parameters: dict[
    + 726            str,
    + 727            Union[
    + 728                scipy.stats.rv_continuous,
    + 729                List[Union[int, str, float]]
    + 730            ]
    + 731        ] = {
    + 732            'alpha': uniform(loc=0, scale=2),
    + 733            'tol': uniform(loc=0, scale=1),
    + 734            'selection': ['cyclic', 'random']
    + 735        },
    + 736        **kwargs
    + 737            ):
    + 738        """
    + 739        Fit x on y via lasso regression
    + 740
    + 741        Parameters
    + 742        ----------
    + 743        name : str, default="Lasso Regression"
    + 744            Name of classification technique.
    + 745        random_search : bool, default=False
    + 746            Whether to perform RandomizedSearch to optimise parameters
    + 747        parameters : dict[
    + 748                str,
    + 749                Union[
    + 750                    scipy.stats.rv_continuous,
    + 751                    List[Union[int, str, float]]
    + 752                ]
    + 753            ], default=Preset distributions
    + 754            The parameters used in RandomizedSearchCV
    + 755        """
    + 756        if random_search:
    + 757            classifier = RandomizedSearchCV(
    + 758                lm.Lasso(**kwargs),
    + 759                parameters,
    + 760                cv=self.folds
    + 761            )
    + 762        else:
    + 763            classifier = lm.Lasso(**kwargs)
    + 764        self._sklearn_regression_meta(
    + 765            classifier,
    + 766            f'{name}{" (Random Search)" if random_search else ""}',
    + 767            random_search=random_search
    + 768        )
    + 769
    + 770    def lasso_cv(
    + 771            self,
    + 772            name: str = "Lasso Regression (Cross Validated)",
    + 773            random_search: bool = False,
    + 774            **kwargs
    + 775            ):
      776        """
    - 777        Fit x on y via orthogonal matching pursuit regression
    - 778
    - 779        Parameters
    - 780        ----------
    - 781        name : str, default="Orthogonal Matching Pursuit"
    - 782            Name of classification technique
    - 783        """
    - 784        self._sklearn_regression_meta(
    - 785                lm.OrthogonalMatchingPursuit(**kwargs),
    - 786                name,
    - 787                min_coeffs=2
    - 788                )
    - 789
    - 790    def bayesian_ridge(
    - 791                self,
    - 792                name: str = "Bayesian Ridge Regression",
    - 793                **kwargs
    - 794            ):
    - 795        """
    - 796        Fit x on y via bayesian ridge regression
    - 797
    - 798        Parameters
    - 799        ----------
    - 800        name : str, default="Bayesian Ridge Regression"
    - 801            Name of classification technique.
    - 802        """
    - 803        self._sklearn_regression_meta(
    - 804                lm.BayesianRidge(**kwargs),
    - 805                name
    - 806                )
    - 807
    - 808    def bayesian_ard(
    - 809            self,
    - 810            name: str = "Bayesian Automatic Relevance Detection",
    - 811            **kwargs
    - 812            ):
    - 813        """
    - 814        Fit x on y via bayesian automatic relevance detection
    - 815
    - 816        Parameters
    - 817        ----------
    - 818        name : str, default="Bayesian Automatic Relevance Detection"
    - 819            Name of classification technique.
    - 820        """
    - 821        self._sklearn_regression_meta(
    - 822                lm.ARDRegression(**kwargs),
    - 823                name
    - 824                )
    - 825
    - 826    def tweedie(self, name: str = "Tweedie Regression", **kwargs):
    - 827        """
    - 828        Fit x on y via tweedie regression
    - 829
    - 830        Parameters
    - 831        ----------
    - 832        name : str, default="Tweedie Regression"
    - 833            Name of classification technique.
    - 834        """
    - 835        self._sklearn_regression_meta(
    - 836                lm.TweedieRegressor(**kwargs),
    - 837                name
    - 838                )
    - 839
    - 840    def stochastic_gradient_descent(
    - 841            self,
    - 842            name: str = "Stochastic Gradient Descent",
    - 843            **kwargs
    - 844            ):
    - 845        """
    - 846        Fit x on y via stochastic gradient descent regression
    - 847
    - 848        Parameters
    - 849        ----------
    - 850        name : str, default="Stochastic Gradient Descent"
    - 851            Name of classification technique.
    - 852        """
    - 853        self._sklearn_regression_meta(
    - 854                lm.SGDRegressor(**kwargs),
    - 855                name
    - 856                )
    - 857
    - 858    def passive_aggressive(
    - 859            self,
    - 860            name: str = "Passive Agressive Regression",
    - 861            **kwargs
    - 862            ):
    - 863        """
    - 864        Fit x on y via passive aggressive regression
    - 865
    - 866        Parameters
    - 867        ----------
    - 868        name : str, default="Passive Agressive Regression"
    - 869            Name of classification technique.
    - 870        """
    - 871        self._sklearn_regression_meta(
    - 872                lm.PassiveAggressiveRegressor(**kwargs),
    - 873                name
    - 874                )
    - 875
    - 876    def ransac(self, name: str = "RANSAC", **kwargs):
    - 877        """
    - 878        Fit x on y via RANSAC regression
    - 879
    - 880        Parameters
    - 881        ----------
    - 882        name : str, default="RANSAC"
    - 883            Name of classification technique.
    - 884        """
    - 885        self._sklearn_regression_meta(
    - 886                lm.RANSACRegressor(**kwargs),
    - 887                name
    - 888                )
    + 777        Fit x on y via cross-validated lasso regression.
    + 778        Already cross validated so random search not required
    + 779
    + 780        Parameters
    + 781        ----------
    + 782        name : str, default="Lasso Regression (Cross Validated)"
    + 783            Name of classification technique
    + 784        random_search : bool, default=False
    + 785            Not used
    + 786
    + 787        """
    + 788        _ = random_search
    + 789        self._sklearn_regression_meta(
    + 790            lm.LassoCV(**kwargs, cv=self.folds),
    + 791            name,
    + 792            random_search=True
    + 793        )
    + 794
    + 795    def multi_task_lasso(
    + 796        self,
    + 797        name: str = "Multi-task Lasso Regression",
    + 798        random_search: bool = False,
    + 799        parameters: dict[
    + 800            str,
    + 801            Union[
    + 802                scipy.stats.rv_continuous,
    + 803                List[Union[int, str, float]]
    + 804            ]
    + 805        ] = {
    + 806            'alpha': uniform(loc=0, scale=2),
    + 807            'tol': uniform(loc=0, scale=1),
    + 808            'selection': ['cyclic', 'random']
    + 809        },
    + 810        **kwargs
    + 811            ):
    + 812        """
    + 813        Fit x on y via multitask lasso regression
    + 814
    + 815        Parameters
    + 816        ----------
    + 817        name : str, default="Multi-task Lasso Regression"
    + 818            Name of classification technique.
    + 819        random_search : bool, default=False
    + 820            Whether to perform RandomizedSearch to optimise parameters
    + 821        parameters : dict[
    + 822                str,
    + 823                Union[
    + 824                    scipy.stats.rv_continuous,
    + 825                    List[Union[int, str, float]]
    + 826                ]
    + 827            ], default=Preset distributions
    + 828            The parameters used in RandomizedSearchCV
    + 829        """
    + 830        if random_search:
    + 831            classifier = RandomizedSearchCV(
    + 832                lm.MultiTaskLasso(**kwargs),
    + 833                parameters,
    + 834                cv=self.folds
    + 835            )
    + 836        else:
    + 837            classifier = lm.MultiTaskLasso(**kwargs)
    + 838        self._sklearn_regression_meta(
    + 839            classifier,
    + 840            f'{name}{" (Random Search)" if random_search else ""}',
    + 841            random_search=random_search
    + 842        )
    + 843
    + 844    def multi_task_lasso_cv(
    + 845            self,
    + 846            name: str = "Multi-task Lasso Regression (Cross Validated)",
    + 847            random_search: bool = False,
    + 848            **kwargs
    + 849            ):
    + 850        """
    + 851        Fit x on y via cross-validated multitask lasso regression.
    + 852        Already cross validated so random search not required
    + 853
    + 854        Parameters
    + 855        ----------
    + 856        name : str, default="Multi-task Lasso Regression (Cross Validated)"
    + 857            Name of classification technique
    + 858        random_search : bool, default=False
    + 859            Not used
    + 860
    + 861        """
    + 862        _ = random_search
    + 863        self._sklearn_regression_meta(
    + 864            lm.MultiTaskLassoCV(**kwargs, cv=self.folds),
    + 865            name,
    + 866            random_search=True
    + 867        )
    + 868
    + 869    def elastic_net(
    + 870        self,
    + 871        name: str = "Elastic Net Regression",
    + 872        random_search: bool = False,
    + 873        parameters: dict[
    + 874            str,
    + 875            Union[
    + 876                scipy.stats.rv_continuous,
    + 877                List[Union[int, str, float]]
    + 878            ]
    + 879        ] = {
    + 880            'alpha': uniform(loc=0, scale=2),
    + 881            'l1_ratio': uniform(loc=0, scale=1),
    + 882            'tol': uniform(loc=0, scale=1),
    + 883            'selection': ['cyclic', 'random']
    + 884        },
    + 885        **kwargs
    + 886            ):
    + 887        """
    + 888        Fit x on y via elastic net regression
      889
    - 890    def theil_sen(self, name: str = "Theil-Sen Regression", **kwargs):
    - 891        """
    - 892        Fit x on y via theil-sen regression
    - 893
    - 894        Parameters
    - 895        ----------
    - 896        name : str, default="Theil-Sen Regression"
    - 897            Name of classification technique.
    - 898        -Sen Regression
    - 899        """
    - 900        self._sklearn_regression_meta(
    - 901                lm.TheilSenRegressor(**kwargs),
    - 902                name
    - 903                )
    - 904
    - 905    def huber(self, name: str = "Huber Regression", **kwargs):
    - 906        """
    - 907        Fit x on y via huber regression
    - 908
    - 909        Parameters
    - 910        ----------
    - 911        name : str, default="Huber Regression"
    - 912            Name of classification technique.
    - 913        """
    - 914        self._sklearn_regression_meta(
    - 915                lm.HuberRegressor(**kwargs),
    - 916                name
    - 917                )
    + 890        Parameters
    + 891        ----------
    + 892        name : str, default="Elastic Net Regression"
    + 893            Name of classification technique.
    + 894        random_search : bool, default=False
    + 895            Whether to perform RandomizedSearch to optimise parameters
    + 896        parameters : dict[
    + 897                str,
    + 898                Union[
    + 899                    scipy.stats.rv_continuous,
    + 900                    List[Union[int, str, float]]
    + 901                ]
    + 902            ], default=Preset distributions
    + 903            The parameters used in RandomizedSearchCV
    + 904        """
    + 905        if random_search:
    + 906            classifier = RandomizedSearchCV(
    + 907                lm.ElasticNet(**kwargs),
    + 908                parameters,
    + 909                cv=self.folds
    + 910            )
    + 911        else:
    + 912            classifier = lm.ElasticNet(**kwargs)
    + 913        self._sklearn_regression_meta(
    + 914            classifier,
    + 915            f'{name}{" (Random Search)" if random_search else ""}',
    + 916            random_search=random_search
    + 917        )
      918
    - 919    def quantile(self, name: str = "Quantile Regression", **kwargs):
    - 920        """
    - 921        Fit x on y via quantile regression
    - 922
    - 923        Parameters
    - 924        ----------
    - 925        name : str, default="Quantile Regression"
    - 926            Name of classification technique.
    - 927        """
    - 928        self._sklearn_regression_meta(
    - 929                lm.QuantileRegressor(**kwargs),
    - 930                name
    - 931                )
    - 932
    - 933    def decision_tree(self, name: str = "Decision Tree", **kwargs):
    - 934        """
    - 935        Fit x on y using a decision tree
    - 936
    - 937        Parameters
    - 938        ----------
    - 939        name : str, default="Decision Tree"
    - 940            Name of classification technique.
    - 941        """
    - 942        self._sklearn_regression_meta(
    - 943                tree.DecisionTreeRegressor(**kwargs),
    - 944                name
    - 945                )
    - 946
    - 947    def extra_tree(self, name: str = "Extra Tree", **kwargs):
    - 948        """
    - 949        Fit x on y using an extra tree
    - 950
    - 951        Parameters
    - 952        ----------
    - 953        name : str, default="Extra Tree"
    - 954            Name of classification technique.
    - 955        """
    - 956        self._sklearn_regression_meta(
    - 957                tree.ExtraTreeRegressor(**kwargs),
    - 958                name
    - 959                )
    - 960
    - 961    def random_forest(self, name: str = "Random Forest", **kwargs):
    - 962        """
    - 963        Fit x on y using a random forest
    - 964
    - 965        Parameters
    - 966        ----------
    - 967        name : str, default="Random Forest"
    - 968            Name of classification technique.
    - 969        """
    - 970        self._sklearn_regression_meta(
    - 971                en.RandomForestRegressor(**kwargs),
    - 972                name
    - 973                )
    - 974
    - 975    def extra_trees_ensemble(
    - 976            self,
    - 977            name: str = "Extra Trees Ensemble",
    - 978            **kwargs
    - 979            ):
    - 980        """
    - 981        Fit x on y using an ensemble of extra trees
    - 982
    - 983        Parameters
    - 984        ----------
    - 985        name : str, default="Extra Trees Ensemble"
    - 986            Name of classification technique.
    - 987        """
    - 988        self._sklearn_regression_meta(
    - 989                en.ExtraTreesRegressor(**kwargs),
    - 990                name
    - 991                )
    + 919    def elastic_net_cv(
    + 920            self,
    + 921            name: str = "Elastic Net Regression (Cross Validated)",
    + 922            random_search: bool = False,
    + 923            **kwargs
    + 924            ):
    + 925        """
    + 926        Fit x on y via cross-validated elastic regression.
    + 927        Already cross validated so random search not required
    + 928
    + 929        Parameters
    + 930        ----------
    + 931        name : str, default="Lasso Regression (Cross Validated)"
    + 932            Name of classification technique
    + 933        random_search : bool, default=False
    + 934            Not used
    + 935        """
    + 936        _ = random_search
    + 937        self._sklearn_regression_meta(
    + 938            lm.ElasticNetCV(**kwargs, cv=self.folds),
    + 939            name,
    + 940            random_search=True
    + 941        )
    + 942
    + 943    def multi_task_elastic_net(
    + 944        self,
    + 945        name: str = "Multi-task Elastic Net Regression",
    + 946        random_search: bool = False,
    + 947        parameters: dict[
    + 948            str,
    + 949            Union[
    + 950                scipy.stats.rv_continuous,
    + 951                List[Union[int, str, float]]
    + 952            ]
    + 953        ] = {
    + 954            'alpha': uniform(loc=0, scale=2),
    + 955            'l1_ratio': uniform(loc=0, scale=1),
    + 956            'tol': uniform(loc=0, scale=1),
    + 957            'selection': ['cyclic', 'random']
    + 958        },
    + 959        **kwargs
    + 960            ):
    + 961        """
    + 962        Fit x on y via elastic net regression
    + 963
    + 964        Parameters
    + 965        ----------
    + 966        name : str, default="Multi-task Elastic Net Regression"
    + 967            Name of classification technique.
    + 968        random_search : bool, default=False
    + 969            Whether to perform RandomizedSearch to optimise parameters
    + 970        parameters : dict[
    + 971                str,
    + 972                Union[
    + 973                    scipy.stats.rv_continuous,
    + 974                    List[Union[int, str, float]]
    + 975                ]
    + 976            ], default=Preset distributions
    + 977            The parameters used in RandomizedSearchCV
    + 978        """
    + 979        if random_search:
    + 980            classifier = RandomizedSearchCV(
    + 981                lm.MultiTaskElasticNet(**kwargs),
    + 982                parameters,
    + 983                cv=self.folds
    + 984            )
    + 985        else:
    + 986            classifier = lm.MultiTaskElasticNet(**kwargs)
    + 987        self._sklearn_regression_meta(
    + 988            classifier,
    + 989            f'{name}{" (Random Search)" if random_search else ""}',
    + 990            random_search=random_search
    + 991        )
      992
    - 993    def gradient_boost_regressor(
    + 993    def multi_task_elastic_net_cv(
      994            self,
    - 995            name: str = "Gradient Boosting Regression",
    - 996            **kwargs
    - 997            ):
    - 998        """
    - 999        Fit x on y using gradient boosting regression
    -1000
    -1001        Parameters
    -1002        ----------
    -1003        name : str, default="Gradient Boosting Regression"
    -1004            Name of classification technique.
    -1005        """
    -1006        self._sklearn_regression_meta(
    -1007                en.GradientBoostingRegressor(**kwargs),
    -1008                name
    -1009                )
    + 995            name: str = "Multi-Task Elastic Net Regression (Cross Validated)",
    + 996            random_search: bool = False,
    + 997            **kwargs
    + 998            ):
    + 999        """
    +1000        Fit x on y via cross-validated multi-task elastic net regression.
    +1001        Already cross validated so random search not required
    +1002
    +1003        Parameters
    +1004        ----------
    +1005        name : str, default="Multi-Task Elastic Net Regression \
    +1006        (Cross Validated)"
    +1007            Name of classification technique
    +1008        random_search : bool, default=False
    +1009            Not used
     1010
    -1011    def hist_gradient_boost_regressor(
    -1012            self,
    -1013            name: str = "Histogram-Based Gradient Boosting Regression",
    -1014            **kwargs
    -1015            ):
    -1016        """
    -1017        Fit x on y using histogram-based gradient boosting regression
    +1011        """
    +1012        _ = random_search
    +1013        self._sklearn_regression_meta(
    +1014            lm.MultiTaskElasticNetCV(**kwargs, cv=self.folds),
    +1015            name,
    +1016            random_search=True
    +1017        )
     1018
    -1019        Parameters
    -1020        ----------
    -1021        name : str, default="Histogram-Based Gradient Boosting Regression"
    -1022            Name of classification technique.
    -1023        -Based
    -1024            Gradient Boosting Regression
    -1025        """
    -1026        self._sklearn_regression_meta(
    -1027                en.HistGradientBoostingRegressor(**kwargs),
    -1028                name
    -1029                )
    -1030
    -1031    def mlp_regressor(
    -1032            self,
    -1033            name: str = "Multi-Layer Perceptron Regression",
    -1034            **kwargs
    -1035            ):
    -1036        """
    -1037        Fit x on y using multi-layer perceptrons
    -1038
    -1039        Parameters
    -1040        ----------
    -1041        name : str, default="Multi-Layer Perceptron Regression"
    -1042            Name of classification technique.
    -1043        -Layer Perceptron
    -1044            Regression
    -1045        """
    -1046        self._sklearn_regression_meta(
    -1047                nn.MLPRegressor(**kwargs),
    -1048                name
    -1049                )
    -1050
    -1051    def svr(self, name: str = "Support Vector Regression", **kwargs):
    -1052        """
    -1053        Fit x on y using support vector regression
    -1054
    -1055        Parameters
    -1056        ----------
    -1057        name : str, default="Support Vector Regression"
    -1058            Name of classification technique.
    -1059        """
    +1019    def lars(
    +1020        self,
    +1021        name: str = "Least Angle Regression",
    +1022        random_search: bool = False,
    +1023        parameters: dict[
    +1024            str,
    +1025            Union[
    +1026                scipy.stats.rv_continuous,
    +1027                List[Union[int, str, float]]
    +1028            ]
    +1029        ] = {
    +1030            'n_nonzero_coefs': list(range(1, 11))
    +1031        },
    +1032        **kwargs
    +1033            ):
    +1034        """
    +1035        Fit x on y via least angle regression
    +1036
    +1037        Parameters
    +1038        ----------
    +1039        name : str, default="Least Angle Regression"
    +1040            Name of classification technique.
    +1041        random_search : bool, default=False
    +1042            Whether to perform RandomizedSearch to optimise parameters
    +1043        parameters : dict[
    +1044                str,
    +1045                Union[
    +1046                    scipy.stats.rv_continuous,
    +1047                    List[Union[int, str, float]]
    +1048                ]
    +1049            ], default=Preset distributions
    +1050            The parameters used in RandomizedSearchCV
    +1051        """
    +1052        if random_search:
    +1053            classifier = RandomizedSearchCV(
    +1054                lm.Lars(**kwargs),
    +1055                parameters,
    +1056                cv=self.folds
    +1057            )
    +1058        else:
    +1059            classifier = lm.Lars(**kwargs)
     1060        self._sklearn_regression_meta(
    -1061                svm.SVR(**kwargs),
    -1062                name
    -1063                )
    -1064
    -1065    def linear_svr(
    -1066            self,
    -1067            name: str = "Linear Support Vector Regression",
    -1068            **kwargs
    -1069            ):
    -1070        """
    -1071        Fit x on y using linear support vector regression
    -1072
    -1073        Parameters
    -1074        ----------
    -1075        name : str, default="Linear Support Vector Regression"
    -1076            Name of classification technique.
    -1077        """
    -1078        self._sklearn_regression_meta(
    -1079                svm.LinearSVR(**kwargs),
    -1080                name
    -1081                )
    -1082
    -1083    def nu_svr(self, name: str = "Nu-Support Vector Regression", **kwargs):
    -1084        """
    -1085        Fit x on y using nu-support vector regression
    -1086
    -1087        Parameters
    -1088        ----------
    -1089        name : str, default="Nu-Support Vector Regression"
    -1090            Name of classification technique.
    -1091        -Support Vector
    -1092            Regression
    -1093        """
    -1094        self._sklearn_regression_meta(
    -1095                svm.LinearSVR(**kwargs),
    -1096                name
    -1097                )
    -1098
    -1099    def gaussian_process(
    -1100            self,
    -1101            name: str = "Gaussian Process Regression",
    -1102            **kwargs
    -1103            ):
    -1104        """
    -1105        Fit x on y using gaussian process regression
    -1106
    -1107        Parameters
    -1108        ----------
    -1109        name : str, default="Gaussian Process Regression"
    -1110            Name of classification technique.
    -1111        """
    -1112        self._sklearn_regression_meta(
    -1113                gp.GaussianProcessRegressor(**kwargs),
    -1114                name
    -1115                )
    -1116
    -1117    def pls(self, name: str = "PLS Regression", **kwargs):
    -1118        """
    -1119        Fit x on y using pls regression
    -1120
    -1121        Parameters
    -1122        ----------
    -1123        name : str, default="PLS Regression"
    -1124            Name of classification technique.
    -1125        """
    -1126        self._sklearn_regression_meta(
    -1127                cd.PLSRegression(n_components=1, **kwargs),
    -1128                name
    -1129                )
    +1061            classifier,
    +1062            f'{name}{" (Random Search)" if random_search else ""}',
    +1063            random_search=random_search
    +1064        )
    +1065
    +1066    def lars_lasso(
    +1067        self,
    +1068        name: str = "Least Angle Lasso Regression",
    +1069        random_search: bool = False,
    +1070        parameters: dict[
    +1071            str,
    +1072            Union[
    +1073                scipy.stats.rv_continuous,
    +1074                List[Union[int, str, float]]
    +1075            ]
    +1076        ] = {
    +1077            'alpha': uniform(loc=0, scale=2)
    +1078        },
    +1079        **kwargs
    +1080            ):
    +1081        """
    +1082        Fit x on y via least angle lasso regression
    +1083
    +1084        Parameters
    +1085        ----------
    +1086        name : str, default="Least Angle Lasso Regression"
    +1087            Name of classification technique.
    +1088        random_search : bool, default=False
    +1089            Whether to perform RandomizedSearch to optimise parameters
    +1090        parameters : dict[
    +1091                str,
    +1092                Union[
    +1093                    scipy.stats.rv_continuous,
    +1094                    List[Union[int, str, float]]
    +1095                ]
    +1096            ], default=Preset distributions
    +1097            The parameters used in RandomizedSearchCV
    +1098        """
    +1099        if random_search:
    +1100            classifier = RandomizedSearchCV(
    +1101                lm.LassoLars(**kwargs),
    +1102                parameters,
    +1103                cv=self.folds
    +1104            )
    +1105        else:
    +1106            classifier = lm.LassoLars(**kwargs)
    +1107        self._sklearn_regression_meta(
    +1108            classifier,
    +1109            f'{name}{" (Random Search)" if random_search else ""}',
    +1110            random_search=random_search
    +1111        )
    +1112
    +1113    def omp(
    +1114        self,
    +1115        name: str = "Orthogonal Matching Pursuit",
    +1116        random_search: bool = False,
    +1117        parameters: dict[
    +1118            str,
    +1119            Union[
    +1120                scipy.stats.rv_continuous,
    +1121                List[Union[int, str, float]]
    +1122            ]
    +1123        ] = {
    +1124            'n_nonzero_coefs': list(range(1, 11))
    +1125        },
    +1126        **kwargs
    +1127            ):
    +1128        """
    +1129        Fit x on y via orthogonal matching pursuit regression
     1130
    -1131    def isotonic(self, name: str = "Isotonic Regression", **kwargs):
    -1132        """
    -1133        Fit x on y using isotonic regression
    -1134
    -1135        Parameters
    -1136        ----------
    -1137        name : str, default="Isotonic Regression"
    -1138            Name of classification technique.
    -1139        """
    -1140        self._sklearn_regression_meta(
    -1141                iso.IsotonicRegression(**kwargs),
    -1142                name,
    -1143                max_coeffs=1
    -1144                )
    -1145
    -1146    def xgboost(self, name: str = "XGBoost Regression", **kwargs):
    -1147        """
    -1148        Fit x on y using xgboost regression
    -1149
    -1150        Parameters
    -1151        ----------
    -1152        name : str, default="XGBoost Regression"
    -1153            Name of classification technique.
    -1154        """
    -1155        self._sklearn_regression_meta(
    -1156                xgb.XGBRegressor(**kwargs),
    -1157                name
    -1158                )
    -1159
    -1160    def xgboost_rf(
    -1161            self,
    -1162            name: str = "XGBoost Random Forest Regression",
    -1163            **kwargs
    -1164            ):
    -1165        """
    -1166        Fit x on y using xgboosted random forest regression
    -1167
    -1168        Parameters
    -1169        ----------
    -1170        name : str, default="XGBoost Random Forest Regression"
    -1171            Name of classification technique.
    -1172        """
    -1173        self._sklearn_regression_meta(
    -1174                xgb.XGBRFRegressor(**kwargs),
    -1175                name
    -1176                )
    -1177
    -1178    def return_measurements(self) -> dict[str, pd.DataFrame]:
    -1179        """
    -1180        Returns the measurements used, with missing values and
    -1181        non-overlapping measurements excluded
    +1131        Parameters
    +1132        ----------
    +1133        name : str, default="Orthogonal Matching Pursuit"
    +1134            Name of classification technique.
    +1135        random_search : bool, default=False
    +1136            Whether to perform RandomizedSearch to optimise parameters
    +1137        parameters : dict[
    +1138                str,
    +1139                Union[
    +1140                    scipy.stats.rv_continuous,
    +1141                    List[Union[int, str, float]]
    +1142                ]
    +1143            ], default=Preset distributions
    +1144            The parameters used in RandomizedSearchCV
    +1145        """
    +1146        if random_search:
    +1147            classifier = RandomizedSearchCV(
    +1148                lm.OrthogonalMatchingPursuit(**kwargs),
    +1149                parameters,
    +1150                cv=self.folds
    +1151            )
    +1152        else:
    +1153            classifier = lm.OrthogonalMatchingPursuit(**kwargs)
    +1154        self._sklearn_regression_meta(
    +1155            classifier,
    +1156            f'{name}{" (Random Search)" if random_search else ""}',
    +1157            random_search=random_search,
    +1158            min_coeffs=2
    +1159        )
    +1160
    +1161    def bayesian_ridge(
    +1162        self,
    +1163        name: str = "Bayesian Ridge Regression",
    +1164        random_search: bool = False,
    +1165        parameters: dict[
    +1166            str,
    +1167            Union[
    +1168                scipy.stats.rv_continuous,
    +1169                List[Union[int, str, float]]
    +1170            ]
    +1171        ] = {
    +1172            'tol': uniform(loc=0, scale=1),
    +1173            'alpha_1': uniform(loc=0, scale=1),
    +1174            'alpha_2': uniform(loc=0, scale=1),
    +1175            'lambda_1': uniform(loc=0, scale=1),
    +1176            'lambda_2': uniform(loc=0, scale=1)
    +1177        },
    +1178        **kwargs
    +1179            ):
    +1180        """
    +1181        Fit x on y via bayesian ridge regression
     1182
    -1183        Returns
    -1184        -------
    -1185        dict[str, pd.DataFrame]
    -1186            Dictionary with 2 keys:
    -1187
    -1188            |Key|Value|
    -1189            |---|---|
    -1190            |x|`x_data`|
    -1191            |y|`y_data`|
    -1192
    -1193        """
    -1194        return {
    -1195                'x': self.x_data,
    -1196                'y': self.y_data
    -1197                }
    -1198
    -1199    def return_models(self) -> dict[str,  # Technique
    -1200                                    dict[str,  # Scaling method
    -1201                                         dict[str,  # Variables used
    -1202                                              dict[int,  # Fold
    -1203                                                   Pipeline]]]]:
    -1204        """
    -1205        Returns the models stored in the object
    -1206
    -1207        Returns
    -1208        -------
    -1209        dict[str, str, str, int, Pipeline]
    -1210            The calibrated models. They are stored in a nested structure as
    -1211            follows:
    -1212            1. Primary Key, name of the technique (e.g Lasso Regression).
    -1213            2. Scaling technique (e.g Yeo-Johnson Transform).
    -1214            3. Combination of variables used or `target` if calibration is
    -1215            univariate (e.g "`target` + a + b).
    -1216            4. Fold, which fold was used excluded from the calibration. If data
    -1217            folds 0-3.
    -1218            if 5-fold cross validated, a key of 4 indicates the data was
    -1219            trained on
    -1220        """
    -1221        return self.models
    +1183        Parameters
    +1184        ----------
    +1185        name : str, default="Bayesian Ridge Regression"
    +1186            Name of classification technique.
    +1187        random_search : bool, default=False
    +1188            Whether to perform RandomizedSearch to optimise parameters
    +1189        parameters : dict[
    +1190                str,
    +1191                Union[
    +1192                    scipy.stats.rv_continuous,
    +1193                    List[Union[int, str, float]]
    +1194                ]
    +1195            ], default=Preset distributions
    +1196            The parameters used in RandomizedSearchCV
    +1197        """
    +1198        if random_search:
    +1199            classifier = RandomizedSearchCV(
    +1200                lm.BayesianRidge(**kwargs),
    +1201                parameters,
    +1202                cv=self.folds
    +1203            )
    +1204        else:
    +1205            classifier = lm.BayesianRidge(**kwargs)
    +1206        self._sklearn_regression_meta(
    +1207            classifier,
    +1208            f'{name}{" (Random Search)" if random_search else ""}',
    +1209            random_search=random_search
    +1210        )
    +1211
    +1212    def bayesian_ard(
    +1213        self,
    +1214        name: str = "Bayesian Automatic Relevance Detection",
    +1215        random_search: bool = False,
    +1216        parameters: dict[
    +1217            str,
    +1218            Union[
    +1219                scipy.stats.rv_continuous,
    +1220                List[Union[int, str, float]]
    +1221            ]
    +1222        ] = {
    +1223            'tol': uniform(loc=0, scale=1),
    +1224            'alpha_1': uniform(loc=0, scale=1),
    +1225            'alpha_2': uniform(loc=0, scale=1),
    +1226            'lambda_1': uniform(loc=0, scale=1),
    +1227            'lambda_2': uniform(loc=0, scale=1)
    +1228        },
    +1229        **kwargs
    +1230            ):
    +1231        """
    +1232        Fit x on y via bayesian automatic relevance detection
    +1233
    +1234        Parameters
    +1235        ----------
    +1236        name : str, default="Bayesian Automatic Relevance Detection"
    +1237            Name of classification technique.
    +1238        random_search : bool, default=False
    +1239            Whether to perform RandomizedSearch to optimise parameters
    +1240        parameters : dict[
    +1241                str,
    +1242                Union[
    +1243                    scipy.stats.rv_continuous,
    +1244                    List[Union[int, str, float]]
    +1245                ]
    +1246            ], default=Preset distributions
    +1247            The parameters used in RandomizedSearchCV
    +1248        """
    +1249        if random_search:
    +1250            classifier = RandomizedSearchCV(
    +1251                lm.ARDRegression(**kwargs),
    +1252                parameters,
    +1253                cv=self.folds
    +1254            )
    +1255        else:
    +1256            classifier = lm.ARDRegression(**kwargs)
    +1257        self._sklearn_regression_meta(
    +1258            classifier,
    +1259            f'{name}{" (Random Search)" if random_search else ""}',
    +1260            random_search=random_search
    +1261        )
    +1262
    +1263    def tweedie(
    +1264        self,
    +1265        name: str = "Tweedie Regression",
    +1266        random_search: bool = False,
    +1267        parameters: dict[
    +1268            str,
    +1269            Union[
    +1270                scipy.stats.rv_continuous,
    +1271                List[Union[int, str, float]]
    +1272            ]
    +1273        ] = {
    +1274            'power': [0, 1, 1.5, 2, 2.5, 3],
    +1275            'alpha': uniform(loc=0, scale=2),
    +1276            'solver': ['lbfgs', 'newton-cholesky'],
    +1277            'tol': uniform(loc=0, scale=1),
    +1278        },
    +1279        **kwargs
    +1280            ):
    +1281        """
    +1282        Fit x on y via tweedie regression
    +1283
    +1284        Parameters
    +1285        ----------
    +1286        name : str, default="Tweedie Regression"
    +1287            Name of classification technique.
    +1288        random_search : bool, default=False
    +1289            Whether to perform RandomizedSearch to optimise parameters
    +1290        parameters : dict[
    +1291                str,
    +1292                Union[
    +1293                    scipy.stats.rv_continuous,
    +1294                    List[Union[int, str, float]]
    +1295                ]
    +1296            ], default=Preset distributions
    +1297            The parameters used in RandomizedSearchCV
    +1298        """
    +1299        if random_search:
    +1300            classifier = RandomizedSearchCV(
    +1301                lm.TweedieRegressor(**kwargs),
    +1302                parameters,
    +1303                cv=self.folds
    +1304            )
    +1305        else:
    +1306            classifier = lm.TweedieRegressor(**kwargs)
    +1307        self._sklearn_regression_meta(
    +1308            classifier,
    +1309            f'{name}{" (Random Search)" if random_search else ""}',
    +1310            random_search=random_search
    +1311        )
    +1312
    +1313    def stochastic_gradient_descent(
    +1314        self,
    +1315        name: str = "Stochastic Gradient Descent",
    +1316        random_search: bool = False,
    +1317        parameters: dict[
    +1318            str,
    +1319            Union[
    +1320                scipy.stats.rv_continuous,
    +1321                List[Union[int, str, float]]
    +1322            ]
    +1323        ] = {
    +1324            'tol': uniform(loc=0, scale=1),
    +1325            'loss': [
    +1326                'squared_error',
    +1327                'huber',
    +1328                'epsilon_insensitive',
    +1329                'squared_epsilon_insensitive'
    +1330            ],
    +1331            'penalty': [
    +1332                'l2',
    +1333                'l1',
    +1334                'elasticnet',
    +1335                None
    +1336            ],
    +1337            'alpha': uniform(loc=0, scale=0.001),
    +1338            'l1_ratio': uniform(loc=0, scale=1),
    +1339            'epsilon': uniform(loc=0, scale=1),
    +1340            'learning_rate': [
    +1341                'constant',
    +1342                'optimal',
    +1343                'invscaling',
    +1344                'adaptive'
    +1345            ],
    +1346            'eta0': uniform(loc=0, scale=0.1),
    +1347            'power_t': uniform(loc=0, scale=1)
    +1348
    +1349        },
    +1350        **kwargs
    +1351            ):
    +1352        """
    +1353        Fit x on y via stochastic gradient descent
    +1354
    +1355        Parameters
    +1356        ----------
    +1357        name : str, default="Stochastic Gradient Descent"
    +1358            Name of classification technique.
    +1359        random_search : bool, default=False
    +1360            Whether to perform RandomizedSearch to optimise parameters
    +1361        parameters : dict[
    +1362                str,
    +1363                Union[
    +1364                    scipy.stats.rv_continuous,
    +1365                    List[Union[int, str, float]]
    +1366                ]
    +1367            ], default=Preset distributions
    +1368            The parameters used in RandomizedSearchCV
    +1369        """
    +1370        if random_search:
    +1371            classifier = RandomizedSearchCV(
    +1372                lm.SGDRegressor(**kwargs),
    +1373                parameters,
    +1374                cv=self.folds
    +1375            )
    +1376        else:
    +1377            classifier = lm.SGDRegressor(**kwargs)
    +1378        self._sklearn_regression_meta(
    +1379            classifier,
    +1380            f'{name}{" (Random Search)" if random_search else ""}',
    +1381            random_search=random_search
    +1382        )
    +1383
    +1384    def passive_aggressive(
    +1385        self,
    +1386        name: str = "Passive Aggressive Regression",
    +1387        random_search: bool = False,
    +1388        parameters: dict[
    +1389            str,
    +1390            Union[
    +1391                scipy.stats.rv_continuous,
    +1392                List[Union[int, str, float]]
    +1393            ]
    +1394        ] = {
    +1395            'C': uniform(loc=0, scale=2),
    +1396            'tol': uniform(loc=0, scale=1),
    +1397            'loss': [
    +1398                'epsilon_insensitive',
    +1399                'squared_epsilon_insensitive'
    +1400            ],
    +1401            'epsilon': uniform(loc=0, scale=1)
    +1402        },
    +1403        **kwargs
    +1404            ):
    +1405        """
    +1406        Fit x on y via stochastic gradient descent regression
    +1407
    +1408        Parameters
    +1409        ----------
    +1410        name : str, default="Passive Aggressive Regression"
    +1411            Name of classification technique.
    +1412        random_search : bool, default=False
    +1413            Whether to perform RandomizedSearch to optimise parameters
    +1414        parameters : dict[\
    +1415                str,\
    +1416                Union[\
    +1417                    scipy.stats.rv_continuous,\
    +1418                    List[Union[int, str, float]]\
    +1419                ]\
    +1420            ], default=Preset distributions
    +1421            The parameters used in RandomizedSearchCV
    +1422        """
    +1423        if random_search:
    +1424            classifier = RandomizedSearchCV(
    +1425                lm.PassiveAggressiveRegressor(**kwargs),
    +1426                parameters,
    +1427                cv=self.folds
    +1428            )
    +1429        else:
    +1430            classifier = lm.PassiveAggressiveRegressor(**kwargs)
    +1431        self._sklearn_regression_meta(
    +1432            classifier,
    +1433            f'{name}{" (Random Search)" if random_search else ""}',
    +1434            random_search=random_search
    +1435        )
    +1436
    +1437    def ransac(
    +1438        self,
    +1439        name: str = "RANSAC",
    +1440        random_search: bool = False,
    +1441        parameters: dict[
    +1442            str,
    +1443            Union[
    +1444                scipy.stats.rv_continuous,
    +1445                List[Union[int, str, float]]
    +1446            ]
    +1447        ] = {
    +1448            'estimator': [
    +1449                lm.LinearRegression()
    +1450                # TODO: ADD
    +1451            ]
    +1452        },
    +1453        **kwargs
    +1454            ):
    +1455        """
    +1456        Fit x on y via ransac
    +1457
    +1458        Parameters
    +1459        ----------
    +1460        name : str, default="RANSAC"
    +1461            Name of classification technique.
    +1462        random_search : bool, default=False
    +1463            Whether to perform RandomizedSearch to optimise parameters
    +1464        parameters : dict[\
    +1465                str,\
    +1466                Union[\
    +1467                    scipy.stats.rv_continuous,\
    +1468                    List[Union[int, str, float]]\
    +1469                ]\
    +1470            ], default=Preset distributions
    +1471            The parameters used in RandomizedSearchCV
    +1472        """
    +1473        if random_search:
    +1474            classifier = RandomizedSearchCV(
    +1475                lm.RANSACRegressor(**kwargs),
    +1476                parameters,
    +1477                cv=self.folds
    +1478            )
    +1479        else:
    +1480            classifier = lm.RANSACRegressor(**kwargs)
    +1481        self._sklearn_regression_meta(
    +1482            classifier,
    +1483            f'{name}{" (Random Search)" if random_search else ""}',
    +1484            random_search=random_search
    +1485        )
    +1486
    +1487    def theil_sen(
    +1488        self,
    +1489        name: str = "Theil-Sen Regression",
    +1490        random_search: bool = False,
    +1491        parameters: dict[
    +1492            str,
    +1493            Union[
    +1494                scipy.stats.rv_continuous,
    +1495                List[Union[int, str, float]]
    +1496            ]
    +1497        ] = {
    +1498            'tol': uniform(loc=0, scale=1)
    +1499        },
    +1500        **kwargs
    +1501            ):
    +1502        """
    +1503        Fit x on y via theil-sen regression
    +1504
    +1505        Parameters
    +1506        ----------
    +1507        name : str, default="Theil-Sen Regression"
    +1508            Name of classification technique.
    +1509        random_search : bool, default=False
    +1510            Whether to perform RandomizedSearch to optimise parameters
    +1511        parameters : dict[\
    +1512                str,\
    +1513                Union[\
    +1514                    scipy.stats.rv_continuous,\
    +1515                    List[Union[int, str, float]]\
    +1516                ]\
    +1517            ], default=Preset distributions
    +1518            The parameters used in RandomizedSearchCV
    +1519        """
    +1520        if random_search:
    +1521            classifier = RandomizedSearchCV(
    +1522                lm.TheilSenRegressor(**kwargs),
    +1523                parameters,
    +1524                cv=self.folds
    +1525            )
    +1526        else:
    +1527            classifier = lm.TheilSenRegressor(**kwargs)
    +1528        self._sklearn_regression_meta(
    +1529            classifier,
    +1530            f'{name}{" (Random Search)" if random_search else ""}',
    +1531            random_search=random_search
    +1532        )
    +1533
    +1534    def huber(
    +1535        self,
    +1536        name: str = "Huber Regression",
    +1537        random_search: bool = False,
    +1538        parameters: dict[
    +1539            str,
    +1540            Union[
    +1541                scipy.stats.rv_continuous,
    +1542                List[Union[int, str, float]]
    +1543            ]
    +1544        ] = {
    +1545            'epsilon': uniform(loc=1, scale=4),
    +1546            'alpha': uniform(loc=0, scale=0.01),
    +1547            'tol': uniform(loc=0, scale=1)
    +1548        },
    +1549        **kwargs
    +1550            ):
    +1551        """
    +1552        Fit x on y via huber regression
    +1553
    +1554        Parameters
    +1555        ----------
    +1556        name : str, default="Huber Regression"
    +1557            Name of classification technique.
    +1558        random_search : bool, default=False
    +1559            Whether to perform RandomizedSearch to optimise parameters
    +1560        parameters : dict[\
    +1561                str,\
    +1562                Union[\
    +1563                    scipy.stats.rv_continuous,\
    +1564                    List[Union[int, str, float]]\
    +1565                ]\
    +1566            ], default=Preset distributions
    +1567            The parameters used in RandomizedSearchCV
    +1568        """
    +1569        if random_search:
    +1570            classifier = RandomizedSearchCV(
    +1571                lm.HuberRegressor(**kwargs),
    +1572                parameters,
    +1573                cv=self.folds
    +1574            )
    +1575        else:
    +1576            classifier = lm.HuberRegressor(**kwargs)
    +1577        self._sklearn_regression_meta(
    +1578            classifier,
    +1579            f'{name}{" (Random Search)" if random_search else ""}',
    +1580            random_search=random_search
    +1581        )
    +1582
    +1583    def quantile(
    +1584        self,
    +1585        name: str = "Quantile Regression",
    +1586        random_search: bool = False,
    +1587        parameters: dict[
    +1588            str,
    +1589            Union[
    +1590                scipy.stats.rv_continuous,
    +1591                List[Union[int, str, float]]
    +1592            ]
    +1593        ] = {
    +1594            'quantile': uniform(loc=0, scale=2),
    +1595            'alpha': uniform(loc=0, scale=2),
    +1596            'tol': uniform(loc=0, scale=1),
    +1597            'solver': [
    +1598                'highs-ds',
    +1599                'highs-ipm',
    +1600                'highs',
    +1601                'revised simplex',
    +1602            ]
    +1603        },
    +1604        **kwargs
    +1605            ):
    +1606        """
    +1607        Fit x on y via quantile regression
    +1608
    +1609        Parameters
    +1610                'interior-point',
    +1611        ----------
    +1612        name : str, default="Quantile Regression"
    +1613            Name of classification technique.
    +1614        random_search : bool, default=False
    +1615            Whether to perform RandomizedSearch to optimise parameters
    +1616        parameters : dict[\
    +1617                str,\
    +1618                Union[\
    +1619                    scipy.stats.rv_continuous,\
    +1620                    List[Union[int, str, float]]\
    +1621                ]\
    +1622            ], default=Preset distributions
    +1623            The parameters used in RandomizedSearchCV
    +1624        """
    +1625        if random_search:
    +1626            classifier = RandomizedSearchCV(
    +1627                lm.QuantileRegressor(**kwargs),
    +1628                parameters,
    +1629                cv=self.folds
    +1630            )
    +1631        else:
    +1632            classifier = lm.QuantileRegressor(**kwargs)
    +1633        self._sklearn_regression_meta(
    +1634            classifier,
    +1635            f'{name}{" (Random Search)" if random_search else ""}',
    +1636            random_search=random_search
    +1637        )
    +1638
    +1639    def decision_tree(
    +1640        self,
    +1641        name: str = "Decision Tree",
    +1642        random_search: bool = False,
    +1643        parameters: dict[
    +1644            str,
    +1645            Union[
    +1646                scipy.stats.rv_continuous,
    +1647                List[Union[int, str, float]]
    +1648            ]
    +1649        ] = {
    +1650            'criterion': [
    +1651                'squared_error',
    +1652                'friedman_mse',
    +1653                'absolute_error',
    +1654                'poisson'
    +1655            ],
    +1656            'splitter': [
    +1657                'best',
    +1658                'random'
    +1659            ],
    +1660            'max_features': [
    +1661                None,
    +1662                'sqrt',
    +1663                'log2'
    +1664            ],
    +1665            'ccp_alpha': uniform(loc=0, scale=2),
    +1666        },
    +1667        **kwargs
    +1668            ):
    +1669        """
    +1670        Fit x on y via decision tree
    +1671
    +1672        Parameters
    +1673        ----------
    +1674        name : str, default="Decision Tree"
    +1675            Name of classification technique.
    +1676        random_search : bool, default=False
    +1677            Whether to perform RandomizedSearch to optimise parameters
    +1678        parameters : dict[\
    +1679                str,\
    +1680                Union[\
    +1681                    scipy.stats.rv_continuous,\
    +1682                    List[Union[int, str, float]]\
    +1683                ]\
    +1684            ], default=Preset distributions
    +1685            The parameters used in RandomizedSearchCV
    +1686        """
    +1687        if random_search:
    +1688            classifier = RandomizedSearchCV(
    +1689                tree.DecisionTreeRegressor(**kwargs),
    +1690                parameters,
    +1691                cv=self.folds
    +1692            )
    +1693        else:
    +1694            classifier = tree.DecisionTreeRegressor(**kwargs)
    +1695        self._sklearn_regression_meta(
    +1696            classifier,
    +1697            f'{name}{" (Random Search)" if random_search else ""}',
    +1698            random_search=random_search
    +1699        )
    +1700
    +1701    def extra_tree(
    +1702        self,
    +1703        name: str = "Extra Tree",
    +1704        random_search: bool = False,
    +1705        parameters: dict[
    +1706            str,
    +1707            Union[
    +1708                scipy.stats.rv_continuous,
    +1709                List[Union[int, str, float]]
    +1710            ]
    +1711        ] = {
    +1712            'criterion': [
    +1713                'squared_error',
    +1714                'friedman_mse',
    +1715                'absolute_error',
    +1716                'poisson'
    +1717            ],
    +1718            'splitter': [
    +1719                'best',
    +1720                'random'
    +1721            ],
    +1722            'max_features': [
    +1723                None,
    +1724                'sqrt',
    +1725                'log2'
    +1726            ],
    +1727            'ccp_alpha': uniform(loc=0, scale=2),
    +1728        },
    +1729        **kwargs
    +1730            ):
    +1731        """
    +1732        Fit x on y via extra tree
    +1733
    +1734        Parameters
    +1735        ----------
    +1736        name : str, default="Extra Tree"
    +1737            Name of classification technique.
    +1738        random_search : bool, default=False
    +1739            Whether to perform RandomizedSearch to optimise parameters
    +1740        parameters : dict[\
    +1741                str,\
    +1742                Union[\
    +1743                    scipy.stats.rv_continuous,\
    +1744                    List[Union[int, str, float]]\
    +1745                ]\
    +1746            ], default=Preset distributions
    +1747            The parameters used in RandomizedSearchCV
    +1748        """
    +1749        if random_search:
    +1750            classifier = RandomizedSearchCV(
    +1751                tree.ExtraTreeRegressor(**kwargs),
    +1752                parameters,
    +1753                cv=self.folds
    +1754            )
    +1755        else:
    +1756            classifier = tree.ExtraTreeRegressor(**kwargs)
    +1757        self._sklearn_regression_meta(
    +1758            classifier,
    +1759            f'{name}{" (Random Search)" if random_search else ""}',
    +1760            random_search=random_search
    +1761        )
    +1762
    +1763    def random_forest(
    +1764        self,
    +1765        name: str = "Random Forest",
    +1766        random_search: bool = False,
    +1767        parameters: dict[
    +1768            str,
    +1769            Union[
    +1770                scipy.stats.rv_continuous,
    +1771                List[Union[int, str, float]]
    +1772            ]
    +1773        ] = {
    +1774            'n_estimators': [5, 10, 25, 50, 100, 200, 250,  500],
    +1775            'bootstrap': [True, False],
    +1776            'max_samples': uniform(loc=0.01, scale=0.99),
    +1777            'criterion': [
    +1778                'squared_error',
    +1779                'friedman_mse',
    +1780                'absolute_error',
    +1781                'poisson'
    +1782            ],
    +1783            'max_features': [
    +1784                None,
    +1785                'sqrt',
    +1786                'log2'
    +1787            ],
    +1788            'ccp_alpha': uniform(loc=0, scale=2),
    +1789        },
    +1790        **kwargs
    +1791            ):
    +1792        """
    +1793        Fit x on y via random forest
    +1794
    +1795        Parameters
    +1796        ----------
    +1797        name : str, default="Random Forest"
    +1798            Name of classification technique.
    +1799        random_search : bool, default=False
    +1800            Whether to perform RandomizedSearch to optimise parameters
    +1801        parameters : dict[\
    +1802                str,\
    +1803                Union[\
    +1804                    scipy.stats.rv_continuous,\
    +1805                    List[Union[int, str, float]]\
    +1806                ]\
    +1807            ], default=Preset distributions
    +1808            The parameters used in RandomizedSearchCV
    +1809        """
    +1810        if random_search:
    +1811            classifier = RandomizedSearchCV(
    +1812                en.RandomForestRegressor(**kwargs),
    +1813                parameters,
    +1814                cv=self.folds
    +1815            )
    +1816        else:
    +1817            classifier = en.RandomForestRegressor(**kwargs)
    +1818        self._sklearn_regression_meta(
    +1819            classifier,
    +1820            f'{name}{" (Random Search)" if random_search else ""}',
    +1821            random_search=random_search
    +1822        )
    +1823
    +1824    def extra_trees_ensemble(
    +1825        self,
    +1826        name: str = "Extra Trees Ensemble",
    +1827        random_search: bool = False,
    +1828        parameters: dict[
    +1829            str,
    +1830            Union[
    +1831                scipy.stats.rv_continuous,
    +1832                List[Union[int, str, float]]
    +1833            ]
    +1834        ] = {
    +1835            'n_estimators': [5, 10, 25, 50, 100, 200, 250,  500],
    +1836            'bootstrap': [True, False],
    +1837            'max_samples': uniform(loc=0.01, scale=0.99),
    +1838            'criterion': [
    +1839                'squared_error',
    +1840                'friedman_mse',
    +1841                'absolute_error',
    +1842                'poisson'
    +1843            ],
    +1844            'max_features': [
    +1845                None,
    +1846                'sqrt',
    +1847                'log2'
    +1848            ],
    +1849            'ccp_alpha': uniform(loc=0, scale=2),
    +1850        },
    +1851        **kwargs
    +1852            ):
    +1853        """
    +1854        Fit x on y via extra trees ensemble
    +1855
    +1856        Parameters
    +1857        ----------
    +1858        name : str, default="Extra Trees Ensemble"
    +1859            Name of classification technique.
    +1860        random_search : bool, default=False
    +1861            Whether to perform RandomizedSearch to optimise parameters
    +1862        parameters : dict[\
    +1863                str,\
    +1864                Union[\
    +1865                    scipy.stats.rv_continuous,\
    +1866                    List[Union[int, str, float]]\
    +1867                ]\
    +1868            ], default=Preset distributions
    +1869            The parameters used in RandomizedSearchCV
    +1870        """
    +1871        if random_search:
    +1872            classifier = RandomizedSearchCV(
    +1873                en.ExtraTreesRegressor(**kwargs),
    +1874                parameters,
    +1875                cv=self.folds
    +1876            )
    +1877        else:
    +1878            classifier = en.ExtraTreesRegressor(**kwargs)
    +1879        self._sklearn_regression_meta(
    +1880            classifier,
    +1881            f'{name}{" (Random Search)" if random_search else ""}',
    +1882            random_search=random_search
    +1883        )
    +1884
    +1885    def gradient_boost_regressor(
    +1886        self,
    +1887        name: str = "Gradient Boosting Regression",
    +1888        random_search: bool = False,
    +1889        parameters: dict[
    +1890            str,
    +1891            Union[
    +1892                scipy.stats.rv_continuous,
    +1893                List[Union[int, str, float]]
    +1894            ]
    +1895        ] = {
    +1896            'loss': [
    +1897                'squared_error',
    +1898                'absolute_error',
    +1899                'huber',
    +1900                'quantile'
    +1901            ],
    +1902            'learning_rate': uniform(loc=0, scale=2),
    +1903            'n_estimators': [5, 10, 25, 50, 100, 200, 250,  500],
    +1904            'subsample': uniform(loc=0.01, scale=0.99),
    +1905            'criterion': [
    +1906                'friedman_mse',
    +1907                'squared_error'
    +1908            ],
    +1909            'max_features': [
    +1910                None,
    +1911                'sqrt',
    +1912                'log2'
    +1913            ],
    +1914            'init': [
    +1915                None,
    +1916                'zero',
    +1917                lm.LinearRegression,
    +1918                lm.TheilSenRegressor
    +1919            ],
    +1920            'ccp_alpha': uniform(loc=0, scale=2)
    +1921        },
    +1922        **kwargs
    +1923            ):
    +1924        """
    +1925        Fit x on y via gradient boosting regression
    +1926
    +1927        Parameters
    +1928        ----------
    +1929        name : str, default="Gradient Boosting Regression"
    +1930            Name of classification technique.
    +1931        random_search : bool, default=False
    +1932            Whether to perform RandomizedSearch to optimise parameters
    +1933        parameters : dict[\
    +1934                str,\
    +1935                Union[\
    +1936                    scipy.stats.rv_continuous,\
    +1937                    List[Union[int, str, float]]\
    +1938                ]\
    +1939            ], default=Preset distributions
    +1940            The parameters used in RandomizedSearchCV
    +1941        """
    +1942        if random_search:
    +1943            classifier = RandomizedSearchCV(
    +1944                en.GradientBoostingRegressor(**kwargs),
    +1945                parameters,
    +1946                cv=self.folds
    +1947            )
    +1948        else:
    +1949            classifier = en.GradientBoostingRegressor(**kwargs)
    +1950        self._sklearn_regression_meta(
    +1951            classifier,
    +1952            f'{name}{" (Random Search)" if random_search else ""}',
    +1953            random_search=random_search
    +1954        )
    +1955
    +1956    def hist_gradient_boost_regressor(
    +1957        self,
    +1958        name: str = "Histogram-Based Gradient Boosting Regression",
    +1959        random_search: bool = False,
    +1960        parameters: dict[
    +1961            str,
    +1962            Union[
    +1963                scipy.stats.rv_continuous,
    +1964                List[Union[int, str, float]]
    +1965            ]
    +1966        ] = {
    +1967            'loss': [
    +1968                'squared_error',
    +1969                'absolute_error',
    +1970                'gamma',
    +1971                'poisson',
    +1972                'quantile'
    +1973            ],
    +1974            'quantile': uniform(loc=0, scale=1),
    +1975            'learning_rate': uniform(loc=0, scale=2),
    +1976            'max_iter': [5, 10, 25, 50, 100, 200, 250,  500],
    +1977            'l2_regularization': uniform(loc=0, scale=2),
    +1978            'max_bins': [1, 3, 7, 15, 31, 63, 127, 255]
    +1979        },
    +1980        **kwargs
    +1981            ):
    +1982        """
    +1983        Fit x on y via histogram-based gradient boosting regression
    +1984
    +1985        Parameters
    +1986        ----------
    +1987        name : str, default="Histogram-Based Gradient Boosting Regression"
    +1988            Name of classification technique.
    +1989        random_search : bool, default=False
    +1990            Whether to perform RandomizedSearch to optimise parameters
    +1991        parameters : dict[\
    +1992                str,\
    +1993                Union[\
    +1994                    scipy.stats.rv_continuous,\
    +1995                    List[Union[int, str, float]]\
    +1996                ]\
    +1997            ], default=Preset distributions
    +1998            The parameters used in RandomizedSearchCV
    +1999        """
    +2000        if random_search:
    +2001            classifier = RandomizedSearchCV(
    +2002                en.HistGradientBoostingRegressor(**kwargs),
    +2003                parameters,
    +2004                cv=self.folds
    +2005            )
    +2006        else:
    +2007            classifier = en.HistGradientBoostingRegressor(**kwargs)
    +2008        self._sklearn_regression_meta(
    +2009            classifier,
    +2010            f'{name}{" (Random Search)" if random_search else ""}',
    +2011            random_search=random_search
    +2012        )
    +2013
    +2014    def mlp_regressor(
    +2015        self,
    +2016        name: str = "Multi-Layer Perceptron Regression",
    +2017        random_search: bool = False,
    +2018        parameters: dict[
    +2019            str,
    +2020            Union[
    +2021                scipy.stats.rv_continuous,
    +2022                List[Union[int, str, float]]
    +2023            ]
    +2024        ] = {
    +2025            'hidden_layer_sizes': [
    +2026                (100, ),
    +2027                (100, 200),
    +2028                (10, ),
    +2029                (200, 400),
    +2030                (100, 200, 300)
    +2031            ],
    +2032            'activation': [
    +2033                'identity',
    +2034                'logistic',
    +2035                'tanh',
    +2036                'relu'
    +2037            ],
    +2038            'solver': [
    +2039                'lbfgs',
    +2040                'sgd',
    +2041                'adam'
    +2042            ],
    +2043            'alpha': uniform(loc=0, scale=0.1),
    +2044            'batch_size': [
    +2045                'auto',
    +2046                20,
    +2047                200,
    +2048                500,
    +2049                1000,
    +2050                5000,
    +2051                10000
    +2052            ],
    +2053            'learning_rate': [
    +2054                'constant',
    +2055                'invscaling',
    +2056                'adaptive'
    +2057            ],
    +2058            'learning_rate_init': uniform(loc=0, scale=0.1),
    +2059            'power_t': uniform(loc=0.1, scale=0.9),
    +2060            'max_iter': [5, 10, 25, 50, 100, 200, 250,  500],
    +2061            'shuffle': [True, False],
    +2062            'momentum': uniform(loc=0.1, scale=0.9),
    +2063            'beta_1': uniform(loc=0.1, scale=0.9),
    +2064            'beta_2': uniform(loc=0.1, scale=0.9),
    +2065            'epsilon': uniform(loc=1E8, scale=1E6),
    +2066
    +2067        },
    +2068        **kwargs
    +2069            ):
    +2070        """
    +2071        Fit x on y via multi-layer perceptron regression
    +2072
    +2073        Parameters
    +2074        ----------
    +2075        name : str, default="Multi-Layer Perceptron Regression"
    +2076            Name of classification technique.
    +2077        random_search : bool, default=False
    +2078            Whether to perform RandomizedSearch to optimise parameters
    +2079        parameters : dict[\
    +2080                str,\
    +2081                Union[\
    +2082                    scipy.stats.rv_continuous,\
    +2083                    List[Union[int, str, float]]\
    +2084                ]\
    +2085            ], default=Preset distributions
    +2086            The parameters used in RandomizedSearchCV
    +2087        """
    +2088        if random_search:
    +2089            classifier = RandomizedSearchCV(
    +2090                nn.MLPRegressor(**kwargs),
    +2091                parameters,
    +2092                cv=self.folds
    +2093            )
    +2094        else:
    +2095            classifier = nn.MLPRegressor(**kwargs)
    +2096        self._sklearn_regression_meta(
    +2097            classifier,
    +2098            f'{name}{" (Random Search)" if random_search else ""}',
    +2099            random_search=random_search
    +2100        )
    +2101
    +2102    def svr(
    +2103        self,
    +2104        name: str = "Support Vector Regression",
    +2105        random_search: bool = False,
    +2106        parameters: dict[
    +2107            str,
    +2108            Union[
    +2109                scipy.stats.rv_continuous,
    +2110                List[Union[int, str, float]]
    +2111            ]
    +2112        ] = {
    +2113            'kernel': [
    +2114                'linear',
    +2115                'poly',
    +2116                'rbf',
    +2117                'sigmoid',
    +2118            ],
    +2119            'degree': [2, 3, 4],
    +2120            'gamma': ['scale', 'auto'],
    +2121            'coef0': uniform(loc=0, scale=1),
    +2122            'C': uniform(loc=0.1, scale=1.9),
    +2123            'epsilon': uniform(loc=1E8, scale=1),
    +2124            'shrinking': [True, False]
    +2125        },
    +2126        **kwargs
    +2127            ):
    +2128        """
    +2129        Fit x on y via support vector regression
    +2130
    +2131        Parameters
    +2132        ----------
    +2133        name : str, default="Support Vector Regression"
    +2134            Name of classification technique.
    +2135        random_search : bool, default=False
    +2136            Whether to perform RandomizedSearch to optimise parameters
    +2137        parameters : dict[\
    +2138                str,\
    +2139                Union[\
    +2140                    scipy.stats.rv_continuous,\
    +2141                    List[Union[int, str, float]]\
    +2142                ]\
    +2143            ], default=Preset distributions
    +2144            The parameters used in RandomizedSearchCV
    +2145        """
    +2146        if random_search:
    +2147            classifier = RandomizedSearchCV(
    +2148                svm.SVR(**kwargs),
    +2149                parameters,
    +2150                cv=self.folds
    +2151            )
    +2152        else:
    +2153            classifier = svm.SVR(**kwargs)
    +2154        self._sklearn_regression_meta(
    +2155            classifier,
    +2156            f'{name}{" (Random Search)" if random_search else ""}',
    +2157            random_search=random_search
    +2158        )
    +2159
    +2160    def linear_svr(
    +2161        self,
    +2162        name: str = "Linear Support Vector Regression",
    +2163        random_search: bool = False,
    +2164        parameters: dict[
    +2165            str,
    +2166            Union[
    +2167                scipy.stats.rv_continuous,
    +2168                List[Union[int, str, float]]
    +2169            ]
    +2170        ] = {
    +2171            'C': uniform(loc=0.1, scale=1.9),
    +2172            'epsilon': uniform(loc=1E8, scale=1),
    +2173            'loss': ['epsilon_insensitive', 'squared_epsilon_insensitive']
    +2174        },
    +2175        **kwargs
    +2176            ):
    +2177        """
    +2178        Fit x on y via linear support vector regression
    +2179
    +2180        Parameters
    +2181        ----------
    +2182        name : str, default="Linear Support Vector Regression"
    +2183            Name of classification technique.
    +2184        random_search : bool, default=False
    +2185            Whether to perform RandomizedSearch to optimise parameters
    +2186        parameters : dict[\
    +2187                str,\
    +2188                Union[\
    +2189                    scipy.stats.rv_continuous,\
    +2190                    List[Union[int, str, float]]\
    +2191                ]\
    +2192            ], default=Preset distributions
    +2193            The parameters used in RandomizedSearchCV
    +2194        """
    +2195        if random_search:
    +2196            classifier = RandomizedSearchCV(
    +2197                svm.LinearSVR(**kwargs),
    +2198                parameters,
    +2199                cv=self.folds
    +2200            )
    +2201        else:
    +2202            classifier = svm.LinearSVR(**kwargs)
    +2203        self._sklearn_regression_meta(
    +2204            classifier,
    +2205            f'{name}{" (Random Search)" if random_search else ""}',
    +2206            random_search=random_search
    +2207        )
    +2208
    +2209    def nu_svr(
    +2210        self,
    +2211        name: str = "Nu-Support Vector Regression",
    +2212        random_search: bool = False,
    +2213        parameters: dict[
    +2214            str,
    +2215            Union[
    +2216                scipy.stats.rv_continuous,
    +2217                List[Union[int, str, float]]
    +2218            ]
    +2219        ] = {
    +2220            'kernel': [
    +2221                'linear',
    +2222                'poly',
    +2223                'rbf',
    +2224                'sigmoid',
    +2225            ],
    +2226            'degree': [2, 3, 4],
    +2227            'gamma': ['scale', 'auto'],
    +2228            'coef0': uniform(loc=0, scale=1),
    +2229            'shrinking': [True, False],
    +2230            'nu': uniform(loc=0, scale=1),
    +2231        },
    +2232        **kwargs
    +2233            ):
    +2234        """
    +2235        Fit x on y via nu-support vector regression
    +2236
    +2237        Parameters
    +2238        ----------
    +2239        name : str, default="Nu-Support Vector Regression"
    +2240            Name of classification technique.
    +2241        random_search : bool, default=False
    +2242            Whether to perform RandomizedSearch to optimise parameters
    +2243        parameters : dict[\
    +2244                str,\
    +2245                Union[\
    +2246                    scipy.stats.rv_continuous,\
    +2247                    List[Union[int, str, float]]\
    +2248                ]\
    +2249            ], default=Preset distributions
    +2250            The parameters used in RandomizedSearchCV
    +2251        """
    +2252        if random_search:
    +2253            classifier = RandomizedSearchCV(
    +2254                svm.NuSVR(**kwargs),
    +2255                parameters,
    +2256                cv=self.folds
    +2257            )
    +2258        else:
    +2259            classifier = svm.NuSVR(**kwargs)
    +2260        self._sklearn_regression_meta(
    +2261            classifier,
    +2262            f'{name}{" (Random Search)" if random_search else ""}',
    +2263            random_search=random_search
    +2264        )
    +2265
    +2266    def gaussian_process(
    +2267        self,
    +2268        name: str = "Gaussian Process Regression",
    +2269        random_search: bool = False,
    +2270        parameters: dict[
    +2271            str,
    +2272            Union[
    +2273                scipy.stats.rv_continuous,
    +2274                List[Union[int, str, float]]
    +2275            ]
    +2276        ] = {
    +2277            'kernel': [
    +2278                None,
    +2279                kern.RBF,
    +2280                kern.Matern,
    +2281                kern.DotProduct,
    +2282                kern.WhiteKernel,
    +2283                kern.CompoundKernel,
    +2284                kern.ExpSineSquared
    +2285            ],
    +2286            'alpha': uniform(loc=0, scale=1E8),
    +2287            'normalize_y': [True, False]
    +2288        },
    +2289        **kwargs
    +2290            ):
    +2291        """
    +2292        Fit x on y via gaussian process regression
    +2293
    +2294        Parameters
    +2295        ----------
    +2296        name : str, default="Gaussian Process Regression"
    +2297            Name of classification technique.
    +2298        random_search : bool, default=False
    +2299            Whether to perform RandomizedSearch to optimise parameters
    +2300        parameters : dict[\
    +2301                str,\
    +2302                Union[\
    +2303                    scipy.stats.rv_continuous,\
    +2304                    List[Union[int, str, float]]\
    +2305                ]\
    +2306            ], default=Preset distributions
    +2307            The parameters used in RandomizedSearchCV
    +2308        """
    +2309        if random_search:
    +2310            classifier = RandomizedSearchCV(
    +2311                gp.GaussianProcessRegressor(**kwargs),
    +2312                parameters,
    +2313                cv=self.folds
    +2314            )
    +2315        else:
    +2316            classifier = gp.GaussianProcessRegressor(**kwargs)
    +2317        self._sklearn_regression_meta(
    +2318            classifier,
    +2319            f'{name}{" (Random Search)" if random_search else ""}',
    +2320            random_search=random_search
    +2321        )
    +2322
    +2323    def isotonic(
    +2324        self,
    +2325        name: str = "Isotonic Regression",
    +2326        random_search: bool = False,
    +2327        parameters: dict[
    +2328            str,
    +2329            Union[
    +2330                scipy.stats.rv_continuous,
    +2331                List[Union[int, str, float]]
    +2332            ]
    +2333        ] = {
    +2334            'increasing': [True, False]
    +2335        },
    +2336        **kwargs
    +2337            ):
    +2338        """
    +2339        Fit x on y via isotonic regression
    +2340
    +2341        Parameters
    +2342        ----------
    +2343        name : str, default="Isotonic Regression"
    +2344            Name of classification technique.
    +2345        random_search : bool, default=False
    +2346            Whether to perform RandomizedSearch to optimise parameters
    +2347        parameters : dict[\
    +2348                str,\
    +2349                Union[\
    +2350                    scipy.stats.rv_continuous,\
    +2351                    List[Union[int, str, float]]\
    +2352                ]\
    +2353            ], default=Preset distributions
    +2354            The parameters used in RandomizedSearchCV
    +2355        """
    +2356        if random_search:
    +2357            classifier = RandomizedSearchCV(
    +2358                iso.IsotonicRegression(**kwargs),
    +2359                parameters,
    +2360                cv=self.folds
    +2361            )
    +2362        else:
    +2363            classifier = iso.IsotonicRegression(**kwargs)
    +2364        self._sklearn_regression_meta(
    +2365            classifier,
    +2366            f'{name}{" (Random Search)" if random_search else ""}',
    +2367            random_search=random_search,
    +2368            max_coeffs=1
    +2369        )
    +2370
    +2371    def xgboost(
    +2372        self,
    +2373        name: str = "XGBoost Regression",
    +2374        random_search: bool = False,
    +2375        parameters: dict[
    +2376            str,
    +2377            Union[
    +2378                scipy.stats.rv_continuous,
    +2379                List[Union[int, str, float]]
    +2380            ]
    +2381        ] = {
    +2382            'n_estimators': [5, 10, 25, 50, 100, 200, 250,  500],
    +2383            'max_bins': [1, 3, 7, 15, 31, 63, 127, 255],
    +2384            'grow_policy': [
    +2385                'depthwise',
    +2386                'lossguide'
    +2387            ],
    +2388            'learning_rate': uniform(loc=0, scale=2),
    +2389            'tree_method': ['exact', 'approx', 'hist'],
    +2390            'gamma': uniform(loc=0, scale=1),
    +2391            'subsample': uniform(loc=0, scale=1),
    +2392            'reg_alpha': uniform(loc=0, scale=1),
    +2393            'reg_lambda': uniform(loc=0, scale=1)
    +2394        },
    +2395        **kwargs
    +2396            ):
    +2397        """
    +2398        Fit x on y via xgboost regression
    +2399
    +2400        Parameters
    +2401        ----------
    +2402        name : str, default="XGBoost Regression"
    +2403            Name of classification technique.
    +2404        random_search : bool, default=False
    +2405            Whether to perform RandomizedSearch to optimise parameters
    +2406        parameters : dict[\
    +2407                str,\
    +2408                Union[\
    +2409                    scipy.stats.rv_continuous,\
    +2410                    List[Union[int, str, float]]\
    +2411                ]\
    +2412            ], default=Preset distributions
    +2413            The parameters used in RandomizedSearchCV
    +2414        """
    +2415        if random_search:
    +2416            classifier = RandomizedSearchCV(
    +2417                xgb.XGBRegressor(**kwargs),
    +2418                parameters,
    +2419                cv=self.folds
    +2420            )
    +2421        else:
    +2422            classifier = xgb.XGBRegressor(**kwargs)
    +2423        self._sklearn_regression_meta(
    +2424            classifier,
    +2425            f'{name}{" (Random Search)" if random_search else ""}',
    +2426            random_search=random_search
    +2427        )
    +2428
    +2429    def xgboost_rf(
    +2430        self,
    +2431        name: str = "XGBoost Random Forest Regression",
    +2432        random_search: bool = False,
    +2433        parameters: dict[
    +2434            str,
    +2435            Union[
    +2436                scipy.stats.rv_continuous,
    +2437                List[Union[int, str, float]]
    +2438            ]
    +2439        ] = {
    +2440            'n_estimators': [5, 10, 25, 50, 100, 200, 250,  500],
    +2441            'max_bin': [1, 3, 7, 15, 31, 63, 127, 255],
    +2442            'grow_policy': [
    +2443                'depthwise',
    +2444                'lossguide'
    +2445            ],
    +2446            'learning_rate': uniform(loc=0, scale=2),
    +2447            'tree_method': ['exact', 'approx', 'hist'],
    +2448            'gamma': uniform(loc=0, scale=1),
    +2449            'subsample': uniform(loc=0, scale=1),
    +2450            'reg_alpha': uniform(loc=0, scale=1),
    +2451            'reg_lambda': uniform(loc=0, scale=1)
    +2452        },
    +2453        **kwargs
    +2454            ):
    +2455        """
    +2456        Fit x on y via xgboosted random forest regression
    +2457
    +2458        Parameters
    +2459        ----------
    +2460        name : str, default="XGBoost Random Forest Regression"
    +2461            Name of classification technique.
    +2462        random_search : bool, default=False
    +2463            Whether to perform RandomizedSearch to optimise parameters
    +2464        parameters : dict[\
    +2465                str,\
    +2466                Union[\
    +2467                    scipy.stats.rv_continuous,\
    +2468                    List[Union[int, str, float]]\
    +2469                ]\
    +2470            ], default=Preset distributions
    +2471            The parameters used in RandomizedSearchCV
    +2472        """
    +2473        if random_search:
    +2474            classifier = RandomizedSearchCV(
    +2475                xgb.XGBRFRegressor(**kwargs),
    +2476                parameters,
    +2477                cv=self.folds
    +2478            )
    +2479        else:
    +2480            classifier = xgb.XGBRFRegressor(**kwargs)
    +2481        self._sklearn_regression_meta(
    +2482            classifier,
    +2483            f'{name}{" (Random Search)" if random_search else ""}',
    +2484            random_search=random_search
    +2485        )
    +2486
    +2487    def return_measurements(self) -> dict[str, pd.DataFrame]:
    +2488        """
    +2489        Returns the measurements used, with missing values and
    +2490        non-overlapping measurements excluded
    +2491
    +2492        Returns
    +2493        -------
    +2494        dict[str, pd.DataFrame]
    +2495            Dictionary with 2 keys:
    +2496
    +2497            |Key|Value|
    +2498            |---|---|
    +2499            |x|`x_data`|
    +2500            |y|`y_data`|
    +2501
    +2502        """
    +2503        return {
    +2504                'x': self.x_data,
    +2505                'y': self.y_data
    +2506                }
    +2507
    +2508    def return_models(self) -> dict[str,  # Technique
    +2509                                    dict[str,  # Scaling method
    +2510                                         dict[str,  # Variables used
    +2511                                              dict[int,  # Fold
    +2512                                                   Pipeline]]]]:
    +2513        """
    +2514        Returns the models stored in the object
    +2515
    +2516        Returns
    +2517        -------
    +2518        dict[str, str, str, int, Pipeline]
    +2519            The calibrated models. They are stored in a nested structure as
    +2520            follows:
    +2521            1. Primary Key, name of the technique (e.g Lasso Regression).
    +2522            2. Scaling technique (e.g Yeo-Johnson Transform).
    +2523            3. Combination of variables used or `target` if calibration is
    +2524            univariate (e.g "`target` + a + b).
    +2525            4. Fold, which fold was used excluded from the calibration. If data
    +2526            folds 0-3.
    +2527            if 5-fold cross validated, a key of 4 indicates the data was
    +2528            trained on
    +2529        """
    +2530        return self.models
     
    @@ -2848,232 +5463,238 @@
    Examples
    - Calibrate( x_data: pandas.core.frame.DataFrame, y_data: pandas.core.frame.DataFrame, target: str, folds: int = 5, strat_groups: int = 10, scaler: Union[collections.abc.Iterable[Literal['None', 'Standard Scale', 'MinMax Scale', 'Yeo-Johnson TransformBox-Cox Transform', 'Quantile Transform (Uniform)', 'Quantile Transform (Gaussian)']], Literal['All', 'None', 'Standard Scale', 'MinMax Scale', 'Yeo-Johnson TransformBox-Cox Transform', 'Quantile Transform (Uniform)', 'Quantile Transform (Gaussian)']] = 'None', seed: int = 62) + Calibrate( x_data: pandas.core.frame.DataFrame, y_data: pandas.core.frame.DataFrame, target: str, folds: int = 5, strat_groups: int = 10, scaler: Union[collections.abc.Iterable[Literal['None', 'Standard Scale', 'MinMax Scale', 'Yeo-Johnson Transform', 'Box-Cox Transform', 'Quantile Transform (Uniform)', 'Quantile Transform (Gaussian)']], Literal['All', 'None', 'Standard Scale', 'MinMax Scale', 'Yeo-Johnson Transform', 'Box-Cox Transform', 'Quantile Transform (Uniform)', 'Quantile Transform (Gaussian)']] = 'None', seed: int = 62)
    -
    198    def __init__(
    -199            self,
    -200            x_data: pd.DataFrame,
    -201            y_data: pd.DataFrame,
    -202            target: str,
    -203            folds: int = 5,
    -204            strat_groups: int = 10,
    -205            scaler: Union[
    -206                Iterable[
    -207                    Literal[
    -208                        'None',
    -209                        'Standard Scale',
    -210                        'MinMax Scale',
    -211                        'Yeo-Johnson Transform'
    -212                        'Box-Cox Transform',
    -213                        'Quantile Transform (Uniform)',
    -214                        'Quantile Transform (Gaussian)'
    -215                        ]
    -216                    ],
    -217                Literal[
    -218                    'All',
    -219                    'None',
    -220                    'Standard Scale',
    -221                    'MinMax Scale',
    -222                    'Yeo-Johnson Transform'
    -223                    'Box-Cox Transform',
    -224                    'Quantile Transform (Uniform)',
    -225                    'Quantile Transform (Gaussian)',
    -226                    ]
    -227                ] = 'None',
    -228            seed: int = 62
    -229                 ):
    -230        """Initialises class
    -231
    -232        Used to compare one set of measurements against another.
    -233        It can perform both univariate and multivariate regression, though
    -234        some techniques can only do one or the other. Multivariate regression
    -235        can only be performed when secondary variables are provided.
    -236
    -237        Parameters
    -238        ----------
    -239        x_data : pd.DataFrame
    -240            Data to be calibrated.
    -241        y_data : pd.DataFrame
    -242            'True' data to calibrate against.
    -243        target : str
    -244            Column name of the primary feature to use in calibration, must be
    -245            the name of a column in both `x_data` and `y_data`.
    -246        folds : int, default=5
    -247            Number of folds to split the data into, using stratified k-fold.
    -248        strat_groups : int, default=10
    -249            Number of groups to stratify against, the data will be split into
    -250            n equally sized bins where n is the value of `strat_groups`.
    -251        scaler : iterable of {<br>\
    -252            'None',<br>\
    -253            'Standard Scale',<br>\
    -254            'MinMax Scale',<br>\
    -255            'Yeo-Johnson Transform',<br>\
    -256            'Box-Cox Transform',<br>\
    -257            'Quantile Transform (Uniform)',<br>\
    -258            'Quantile Transform (Gaussian)',<br>\
    -259            } or {<br>\
    -260            'All',<br>\
    -261            'None',<br>\
    -262            'Standard Scale',<br>\
    -263            'MinMax Scale',<br>\
    -264            'Yeo-Johnson Transform',<br>\
    -265            'Box-Cox Transform',<br>\
    -266            'Quantile Transform (Uniform)',<br>\
    -267            'Quantile Transform (Gaussian)',<br>\
    -268            }, default='None'
    -269            The scaling/transform method (or list of methods) to apply to the
    -270            data
    -271        seed : int, default=62
    -272            Random state to use when shuffling and splitting the data into n
    -273            folds. Ensures repeatability.
    -274
    -275        Raises
    -276        ------
    -277        ValueError
    -278            Raised if the target variables (e.g. 'NO2') is not a column name in
    -279            both dataframes.
    -280            Raised if `scaler` is not str, tuple or list
    -281        """
    -282        if target not in x_data.columns or target not in y_data.columns:
    -283            raise ValueError(
    -284                    f"{target} does not exist in both columns."
    -285                             )
    -286        join_index = x_data.join(
    -287                y_data,
    -288                how='inner',
    -289                lsuffix='x',
    -290                rsuffix='y'
    -291                ).dropna().index
    -292        """
    -293        The common indices between `x_data` and `y_data`, excluding missing
    -294        values
    -295        """
    -296        self.x_data: pd.DataFrame = x_data.loc[join_index, :]
    -297        """
    -298        The data to be calibrated.
    -299        """
    -300        self.target: str = target
    -301        """
    -302        The name of the column in both `x_data` and `y_data` that
    -303        will be used as the x and y variables in the calibration.
    -304        """
    -305        self.scaler_list: dict[str, Any] = {
    -306                'None': None,
    -307                'Standard Scale': pre.StandardScaler(),
    -308                'MinMax Scale': pre.MinMaxScaler(),
    -309                'Yeo-Johnson Transform': pre.PowerTransformer(
    -310                    method='yeo-johnson'
    -311                    ),
    -312                'Box-Cox Transform': pre.PowerTransformer(method='box-cox'),
    -313                'Quantile Transform (Uniform)': pre.QuantileTransformer(
    -314                    output_distribution='uniform'
    -315                    ),
    -316                'Quantile Transform (Gaussian)': pre.QuantileTransformer(
    -317                    output_distribution='normal'
    -318                    )
    -319                }
    -320        """
    -321        Keys for scaling algorithms available in the pipelines
    -322        """
    -323        self.scaler: list[str] = list()
    -324        """
    -325        The scaling algorithm(s) to preprocess the data with
    -326        """
    -327        if isinstance(scaler, str):
    -328            if scaler == "All":
    -329                if not bool(self.x_data.ge(0).all(axis=None)):
    -330                    warnings.warn(
    -331                        f'Box-Cox is not compatible with provided measurements'
    -332                    )
    -333                    self.scaler_list.pop('Box-Cox Transform')
    -334                self.scaler.extend(self.scaler_list.keys())
    -335            elif scaler in self.scaler_list.keys():
    -336                self.scaler.append(scaler)
    -337            else:
    -338                self.scaler.append('None')
    -339                warnings.warn(f'Scaling algorithm {scaler} not recognised')
    -340        elif isinstance(scaler, (tuple, list)):
    -341            for sc in scaler:
    -342                if sc == 'Box-Cox Transform' and not bool(
    -343                    self.x_data.ge(0).all(axis=None)
    -344                ):
    -345                    warnings.warn(
    -346                        f'Box-Cox is not compatible with provided measurements'
    -347                    )
    -348                    continue
    -349                if sc in self.scaler_list.keys():
    -350                    self.scaler.append(sc)
    -351                else:
    -352                    warnings.warn(f'Scaling algorithm {sc} not recognised')
    -353        else:
    -354            raise ValueError('scaler parameter should be string, list or tuple')
    -355        if not self.scaler:
    -356            warnings.warn(
    -357                f'No valid scaling algorithms provided, defaulting to None'
    -358            )
    -359            self.scaler.append('None')
    -360
    -361        self.y_data = cont_strat_folds(
    -362                y_data.loc[join_index, :],
    -363                target,
    -364                folds,
    -365                strat_groups,
    -366                seed
    -367                )
    -368        """
    -369        The data that `x_data` will be calibrated against. A '*Fold*'
    -370        column is added using the `const_strat_folds` function which splits
    -371        the data into k stratified folds (where k is the value of
    -372        `folds`). It splits the continuous measurements into n bins (where n
    -373        is the value of `strat_groups`) and distributes each bin equally
    -374        across all folds. This significantly reduces the chances of one fold
    -375        containing a skewed distribution relative to the whole dataset.
    -376        """
    -377        self.models: dict[str,  # Technique name
    -378                          dict[str,  # Scaling technique
    -379                               dict[str,  # Variable combo
    -380                                    dict[int,  # Fold
    -381                                         Pipeline]]]] = dict()
    -382        """
    -383        The calibrated models. They are stored in a nested structure as
    -384        follows:
    -385        1. Primary Key, name of the technique (e.g Lasso Regression).
    -386        2. Scaling technique (e.g Yeo-Johnson Transform).
    -387        3. Combination of variables used or `target` if calibration is
    -388        univariate (e.g "`target` + a + b).
    -389        4. Fold, which fold was used excluded from the calibration. If data
    -390        if 5-fold cross validated, a key of 4 indicates the data was trained on
    -391        folds 0-3.
    -392
    -393        ```mermaid
    -394            stateDiagram-v2
    -395              models --> Technique
    -396              state Technique {
    -397                [*] --> Scaling
    -398                [*]: The calibration technique used
    -399                [*]: (e.g "Lasso Regression")
    -400                state Scaling {
    -401                  [*] --> Variables
    -402                  [*]: The scaling technique used
    -403                  [*]: (e.g "Yeo-Johnson Transform")
    -404                  state Variables {
    -405                    [*] : The combination of variables used
    -406                    [*] : (e.g "x + a + b")
    -407                    [*] --> Fold
    -408                    state Fold {
    -409                     [*] : Which fold was excluded from training data
    -410                     [*] : (e.g 4 indicates folds 0-3 were used to train)
    -411                    }
    -412                  }
    -413                }
    -414              }
    -415        ```
    -416
    -417        """
    +            
    201    def __init__(
    +202            self,
    +203            x_data: pd.DataFrame,
    +204            y_data: pd.DataFrame,
    +205            target: str,
    +206            folds: int = 5,
    +207            strat_groups: int = 10,
    +208            scaler: Union[
    +209                Iterable[
    +210                    Literal[
    +211                        'None',
    +212                        'Standard Scale',
    +213                        'MinMax Scale',
    +214                        'Yeo-Johnson Transform',
    +215                        'Box-Cox Transform',
    +216                        'Quantile Transform (Uniform)',
    +217                        'Quantile Transform (Gaussian)'
    +218                        ]
    +219                    ],
    +220                Literal[
    +221                    'All',
    +222                    'None',
    +223                    'Standard Scale',
    +224                    'MinMax Scale',
    +225                    'Yeo-Johnson Transform',
    +226                    'Box-Cox Transform',
    +227                    'Quantile Transform (Uniform)',
    +228                    'Quantile Transform (Gaussian)',
    +229                    ]
    +230                ] = 'None',
    +231            seed: int = 62
    +232                 ):
    +233        """Initialises class
    +234
    +235        Used to compare one set of measurements against another.
    +236        It can perform both univariate and multivariate regression, though
    +237        some techniques can only do one or the other. Multivariate regression
    +238        can only be performed when secondary variables are provided.
    +239
    +240        Parameters
    +241        ----------
    +242        x_data : pd.DataFrame
    +243            Data to be calibrated.
    +244        y_data : pd.DataFrame
    +245            'True' data to calibrate against.
    +246        target : str
    +247            Column name of the primary feature to use in calibration, must be
    +248            the name of a column in both `x_data` and `y_data`.
    +249        folds : int, default=5
    +250            Number of folds to split the data into, using stratified k-fold.
    +251        strat_groups : int, default=10
    +252            Number of groups to stratify against, the data will be split into
    +253            n equally sized bins where n is the value of `strat_groups`.
    +254        scaler : iterable of {<br>\
    +255            'None',<br>\
    +256            'Standard Scale',<br>\
    +257            'MinMax Scale',<br>\
    +258            'Yeo-Johnson Transform',<br>\
    +259            'Box-Cox Transform',<br>\
    +260            'Quantile Transform (Uniform)',<br>\
    +261            'Quantile Transform (Gaussian)',<br>\
    +262            } or {<br>\
    +263            'All',<br>\
    +264            'None',<br>\
    +265            'Standard Scale',<br>\
    +266            'MinMax Scale',<br>\
    +267            'Yeo-Johnson Transform',<br>\
    +268            'Box-Cox Transform',<br>\
    +269            'Quantile Transform (Uniform)',<br>\
    +270            'Quantile Transform (Gaussian)',<br>\
    +271            }, default='None'
    +272            The scaling/transform method (or list of methods) to apply to the
    +273            data
    +274        seed : int, default=62
    +275            Random state to use when shuffling and splitting the data into n
    +276            folds. Ensures repeatability.
    +277
    +278        Raises
    +279        ------
    +280        ValueError
    +281            Raised if the target variables (e.g. 'NO2') is not a column name in
    +282            both dataframes.
    +283            Raised if `scaler` is not str, tuple or list
    +284        """
    +285        if target not in x_data.columns or target not in y_data.columns:
    +286            raise ValueError(
    +287                    f"{target} does not exist in both columns."
    +288                             )
    +289        join_index = x_data.join(
    +290                y_data,
    +291                how='inner',
    +292                lsuffix='x',
    +293                rsuffix='y'
    +294                ).dropna().index
    +295        """
    +296        The common indices between `x_data` and `y_data`, excluding missing
    +297        values
    +298        """
    +299        self.x_data: pd.DataFrame = x_data.loc[join_index, :]
    +300        """
    +301        The data to be calibrated.
    +302        """
    +303        self.target: str = target
    +304        """
    +305        The name of the column in both `x_data` and `y_data` that
    +306        will be used as the x and y variables in the calibration.
    +307        """
    +308        self.scaler_list: dict[str, Any] = {
    +309                'None': None,
    +310                'Standard Scale': pre.StandardScaler(),
    +311                'MinMax Scale': pre.MinMaxScaler(),
    +312                'Yeo-Johnson Transform': pre.PowerTransformer(
    +313                    method='yeo-johnson'
    +314                    ),
    +315                'Box-Cox Transform': pre.PowerTransformer(method='box-cox'),
    +316                'Quantile Transform (Uniform)': pre.QuantileTransformer(
    +317                    output_distribution='uniform'
    +318                    ),
    +319                'Quantile Transform (Gaussian)': pre.QuantileTransformer(
    +320                    output_distribution='normal'
    +321                    )
    +322                }
    +323        """
    +324        Keys for scaling algorithms available in the pipelines
    +325        """
    +326        self.scaler: list[str] = list()
    +327        """
    +328        The scaling algorithm(s) to preprocess the data with
    +329        """
    +330        if isinstance(scaler, str):
    +331            if scaler == "All":
    +332                if not bool(self.x_data.ge(0).all(axis=None)):
    +333                    warnings.warn(
    +334                        'Box-Cox is not compatible with provided measurements'
    +335                    )
    +336                    self.scaler_list.pop('Box-Cox Transform')
    +337                self.scaler.extend(self.scaler_list.keys())
    +338            elif scaler in self.scaler_list.keys():
    +339                self.scaler.append(scaler)
    +340            else:
    +341                self.scaler.append('None')
    +342                warnings.warn(f'Scaling algorithm {scaler} not recognised')
    +343        elif isinstance(scaler, (tuple, list)):
    +344            for sc in scaler:
    +345                if sc == 'Box-Cox Transform' and not bool(
    +346                    self.x_data.ge(0).all(axis=None)
    +347                ):
    +348                    warnings.warn(
    +349                        'Box-Cox is not compatible with provided measurements'
    +350                    )
    +351                    continue
    +352                if sc in self.scaler_list.keys():
    +353                    self.scaler.append(sc)
    +354                else:
    +355                    warnings.warn(f'Scaling algorithm {sc} not recognised')
    +356        else:
    +357            raise ValueError(
    +358                'scaler parameter should be string, list or tuple'
    +359            )
    +360        if not self.scaler:
    +361            warnings.warn(
    +362                'No valid scaling algorithms provided, defaulting to None'
    +363            )
    +364            self.scaler.append('None')
    +365
    +366        self.y_data = cont_strat_folds(
    +367                y_data.loc[join_index, :],
    +368                target,
    +369                folds,
    +370                strat_groups,
    +371                seed
    +372                )
    +373        """
    +374        The data that `x_data` will be calibrated against. A '*Fold*'
    +375        column is added using the `const_strat_folds` function which splits
    +376        the data into k stratified folds (where k is the value of
    +377        `folds`). It splits the continuous measurements into n bins (where n
    +378        is the value of `strat_groups`) and distributes each bin equally
    +379        across all folds. This significantly reduces the chances of one fold
    +380        containing a skewed distribution relative to the whole dataset.
    +381        """
    +382        self.models: dict[str,  # Technique name
    +383                          dict[str,  # Scaling technique
    +384                               dict[str,  # Variable combo
    +385                                    dict[int,  # Fold
    +386                                         Pipeline]]]] = dict()
    +387        """
    +388        The calibrated models. They are stored in a nested structure as
    +389        follows:
    +390        1. Primary Key, name of the technique (e.g Lasso Regression).
    +391        2. Scaling technique (e.g Yeo-Johnson Transform).
    +392        3. Combination of variables used or `target` if calibration is
    +393        univariate (e.g "`target` + a + b).
    +394        4. Fold, which fold was used excluded from the calibration. If data
    +395        if 5-fold cross validated, a key of 4 indicates the data was trained on
    +396        folds 0-3.
    +397
    +398        ```mermaid
    +399            stateDiagram-v2
    +400              models --> Technique
    +401              state Technique {
    +402                [*] --> Scaling
    +403                [*]: The calibration technique used
    +404                [*]: (e.g "Lasso Regression")
    +405                state Scaling {
    +406                  [*] --> Variables
    +407                  [*]: The scaling technique used
    +408                  [*]: (e.g "Yeo-Johnson Transform")
    +409                  state Variables {
    +410                    [*] : The combination of variables used
    +411                    [*] : (e.g "x + a + b")
    +412                    [*] --> Fold
    +413                    state Fold {
    +414                     [*] : Which fold was excluded from training data
    +415                     [*] : (e.g 4 indicates folds 0-3 were used to train)
    +416                    }
    +417                  }
    +418                }
    +419              }
    +420        ```
    +421
    +422        """
    +423        self.folds: int = folds
    +424        """
    +425        The number of folds used in k-fold cross validation
    +426        """
     
    @@ -3182,7 +5803,7 @@
    Raises

    The data that x_data will be calibrated against. A 'Fold' column is added using the const_strat_folds function which splits the data into k stratified folds (where k is the value of -folds). It splits the continuous measurements into n bins (where n +folds). It splits the continuous measurements into n bins (where n is the value of strat_groups) and distributes each bin equally across all folds. This significantly reduces the chances of one fold containing a skewed distribution relative to the whole dataset.

    @@ -3236,6 +5857,19 @@
    Raises
    +
    +
    +
    + folds: int + + +
    + + +

    The number of folds used in k-fold cross validation

    +
    + +
    @@ -3248,41 +5882,41 @@
    Raises
    -
    524    def pymc_bayesian(
    -525            self,
    -526            family: Literal[
    -527                "Gaussian",
    -528                "Student T",
    -529            ] = "Gaussian",
    -530            name: str = " PyMC Bayesian",
    -531            **kwargs
    -532            ):
    -533        """
    -534        Performs bayesian linear regression (either uni or multivariate)
    -535        fitting x on y.
    -536
    -537        Performs bayesian linear regression, both univariate and multivariate,
    -538        on X against y. More details can be found at:
    -539        https://pymc.io/projects/examples/en/latest/generalized_linear_models/
    -540        GLM-robust.html
    -541
    -542        Parameters
    -543        ----------
    -544        family : {'Gaussian', 'Student T'}, default='Gaussian'
    -545            Statistical distribution to fit measurements to. Options are:
    -546                - Gaussian
    -547                - Student T
    -548        """
    -549        # Define model families
    -550        model_families = {
    -551            "Gaussian": "gaussian",
    -552            "Student T": "t"
    -553        }
    -554        self._sklearn_regression_meta(
    -555                model_families[family],
    -556                f'{name} ({model_families})',
    -557                **kwargs
    -558        )
    +            
    557    def pymc_bayesian(
    +558            self,
    +559            family: Literal[
    +560                "Gaussian",
    +561                "Student T",
    +562            ] = "Gaussian",
    +563            name: str = " PyMC Bayesian",
    +564            **kwargs
    +565            ):
    +566        """
    +567        Performs bayesian linear regression (either uni or multivariate)
    +568        fitting x on y.
    +569
    +570        Performs bayesian linear regression, both univariate and multivariate,
    +571        on X against y. More details can be found at:
    +572        https://pymc.io/projects/examples/en/latest/generalized_linear_models/
    +573        GLM-robust.html
    +574
    +575        Parameters
    +576        ----------
    +577        family : {'Gaussian', 'Student T'}, default='Gaussian'
    +578            Statistical distribution to fit measurements to. Options are:
    +579                - Gaussian
    +580                - Student T
    +581        """
    +582        # Define model families
    +583        model_families: dict[str, Literal['t', 'gaussian']] = {
    +584            "Gaussian": 'gaussian',
    +585            "Student T": 't'
    +586        }
    +587        self._sklearn_regression_meta(
    +588                model_families[family],
    +589                f'{name} ({model_families})',
    +590                **kwargs
    +591        )
     
    @@ -3311,25 +5945,57 @@
    Parameters
    def - linreg(self, name: str = 'Linear Regression', **kwargs): + linreg( self, name: str = 'Linear Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {}, **kwargs):
    -
    560    def linreg(self, name: str = "Linear Regression", **kwargs):
    -561        """
    -562        Fit x on y via linear regression
    -563
    -564        Parameters
    -565        ----------
    -566        name : str, default="Linear Regression"
    -567            Name of classification technique.
    -568        """
    -569        self._sklearn_regression_meta(
    -570                lm.LinearRegression(**kwargs),
    -571                name
    -572                )
    +            
    593    def linreg(
    +594        self,
    +595        name: str = "Linear Regression",
    +596        random_search: bool = False,
    +597        parameters: dict[
    +598            str,
    +599            Union[
    +600                scipy.stats.rv_continuous,
    +601                List[Union[int, str, float]]
    +602            ]
    +603        ] = {
    +604        },
    +605        **kwargs
    +606            ):
    +607        """
    +608        Fit x on y via linear regression
    +609
    +610        Parameters
    +611        ----------
    +612        name : str, default="Linear Regression"
    +613            Name of classification technique.
    +614        random_search : bool, default=False
    +615            Whether to perform RandomizedSearch to optimise parameters
    +616        parameters : dict[
    +617                str,
    +618                Union[
    +619                    scipy.stats.rv_continuous,
    +620                    List[Union[int, str, float]]
    +621                ]
    +622            ], default=Preset distributions
    +623            The parameters used in RandomizedSearchCV
    +624        """
    +625        if random_search:
    +626            classifier = RandomizedSearchCV(
    +627                lm.LinearRegression(**kwargs),
    +628                parameters,
    +629                cv=self.folds
    +630            )
    +631        else:
    +632            classifier = lm.LinearRegression(**kwargs)
    +633        self._sklearn_regression_meta(
    +634            classifier,
    +635            f'{name}{" (Random Search)" if random_search else ""}',
    +636            random_search=random_search
    +637        )
     
    @@ -3340,6 +6006,16 @@
    Parameters
    • name (str, default="Linear Regression"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[): +str, + Union[ + scipy.stats.rv_continuous, + List[Union[int, str, float]] + ] +], default=Preset distributions +The parameters used in RandomizedSearchCV
    @@ -3350,25 +6026,68 @@
    Parameters
    def - ridge(self, name: str = 'Ridge Regression', **kwargs): + ridge( self, name: str = 'Ridge Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbe30d50>, 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbe31950>, 'solver': ['svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga', 'lbfgs']}, **kwargs):
    -
    574    def ridge(self, name: str = "Ridge Regression", **kwargs):
    -575        """
    -576        Fit x on y via ridge regression
    -577
    -578        Parameters
    -579        ----------
    -580        name : str, default="Ridge Regression"
    -581            Name of classification technique
    -582        """
    -583        self._sklearn_regression_meta(
    -584                lm.Ridge(**kwargs),
    -585                name
    -586                )
    +            
    639    def ridge(
    +640        self,
    +641        name: str = "Ridge Regression",
    +642        random_search: bool = False,
    +643        parameters: dict[
    +644            str,
    +645            Union[
    +646                scipy.stats.rv_continuous,
    +647                List[Union[int, str, float]]
    +648            ]
    +649        ] = {
    +650            'alpha': uniform(loc=0, scale=2),
    +651            'tol': uniform(loc=0, scale=1),
    +652            'solver': [
    +653                'svd',
    +654                'cholesky',
    +655                'lsqr',
    +656                'sparse_cg',
    +657                'sag',
    +658                'saga',
    +659                'lbfgs'
    +660            ]
    +661        },
    +662        **kwargs
    +663            ):
    +664        """
    +665        Fit x on y via ridge regression
    +666
    +667        Parameters
    +668        ----------
    +669        name : str, default="Ridge Regression"
    +670            Name of classification technique.
    +671        random_search : bool, default=False
    +672            Whether to perform RandomizedSearch to optimise parameters
    +673        parameters : dict[
    +674                str,
    +675                Union[
    +676                    scipy.stats.rv_continuous,
    +677                    List[Union[int, str, float]]
    +678                ]
    +679            ], default=Preset distributions
    +680            The parameters used in RandomizedSearchCV
    +681        """
    +682        if random_search:
    +683            classifier = RandomizedSearchCV(
    +684                lm.Ridge(**kwargs),
    +685                parameters,
    +686                cv=self.folds
    +687            )
    +688        else:
    +689            classifier = lm.Ridge(**kwargs)
    +690        self._sklearn_regression_meta(
    +691            classifier,
    +692            f'{name}{" (Random Search)" if random_search else ""}',
    +693            random_search=random_search
    +694        )
     
    @@ -3378,7 +6097,17 @@
    Parameters
    • name (str, default="Ridge Regression"): -Name of classification technique
    • +Name of classification technique. +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[): +str, + Union[ + scipy.stats.rv_continuous, + List[Union[int, str, float]] + ] +], default=Preset distributions +The parameters used in RandomizedSearchCV
    @@ -3389,39 +6118,49 @@
    Parameters
    def - ridge_cv(self, name: str = 'Ridge Regression (Cross Validated)', **kwargs): + ridge_cv( self, name: str = 'Ridge Regression (Cross Validated)', random_search: bool = False, **kwargs):
    -
    588    def ridge_cv(
    -589            self,
    -590            name: str = "Ridge Regression (Cross Validated)",
    -591            **kwargs
    -592            ):
    -593        """
    -594        Fit x on y via cross-validated ridge regression
    -595
    -596        Parameters
    -597        ----------
    -598        name : str, default="Ridge Regression (Cross Validated)"
    -599            Name of classification technique
    -600        """
    -601        self._sklearn_regression_meta(
    -602                lm.RidgeCV(**kwargs),
    -603                name
    -604                )
    +            
    696    def ridge_cv(
    +697            self,
    +698            name: str = "Ridge Regression (Cross Validated)",
    +699            random_search: bool = False,
    +700            **kwargs
    +701            ):
    +702        """
    +703        Fit x on y via cross-validated ridge regression.
    +704        Already cross validated so random search not required
    +705
    +706        Parameters
    +707        ----------
    +708        name : str, default="Ridge Regression (Cross Validated)"
    +709            Name of classification technique
    +710        random_search : bool, default=False
    +711            Not used
    +712
    +713        """
    +714        _ = random_search
    +715        self._sklearn_regression_meta(
    +716            lm.RidgeCV(**kwargs, cv=self.folds),
    +717            name,
    +718            random_search=True
    +719        )
     
    -

    Fit x on y via cross-validated ridge regression

    +

    Fit x on y via cross-validated ridge regression. +Already cross validated so random search not required

    Parameters
    • name (str, default="Ridge Regression (Cross Validated)"): Name of classification technique
    • +
    • random_search (bool, default=False): +Not used
    @@ -3432,25 +6171,60 @@
    Parameters
    def - lasso(self, name: str = 'Lasso Regression', **kwargs): + lasso( self, name: str = 'Lasso Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbe32550>, 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbe32650>, 'selection': ['cyclic', 'random']}, **kwargs):
    -
    606    def lasso(self, name: str = "Lasso Regression", **kwargs):
    -607        """
    -608        Fit x on y via lasso regression
    -609
    -610        Parameters
    -611        ----------
    -612        name : str, default="Lasso Regression"
    -613            Name of classification technique
    -614        """
    -615        self._sklearn_regression_meta(
    -616                lm.Lasso(**kwargs),
    -617                name
    -618                )
    +            
    721    def lasso(
    +722        self,
    +723        name: str = "Lasso Regression",
    +724        random_search: bool = False,
    +725        parameters: dict[
    +726            str,
    +727            Union[
    +728                scipy.stats.rv_continuous,
    +729                List[Union[int, str, float]]
    +730            ]
    +731        ] = {
    +732            'alpha': uniform(loc=0, scale=2),
    +733            'tol': uniform(loc=0, scale=1),
    +734            'selection': ['cyclic', 'random']
    +735        },
    +736        **kwargs
    +737            ):
    +738        """
    +739        Fit x on y via lasso regression
    +740
    +741        Parameters
    +742        ----------
    +743        name : str, default="Lasso Regression"
    +744            Name of classification technique.
    +745        random_search : bool, default=False
    +746            Whether to perform RandomizedSearch to optimise parameters
    +747        parameters : dict[
    +748                str,
    +749                Union[
    +750                    scipy.stats.rv_continuous,
    +751                    List[Union[int, str, float]]
    +752                ]
    +753            ], default=Preset distributions
    +754            The parameters used in RandomizedSearchCV
    +755        """
    +756        if random_search:
    +757            classifier = RandomizedSearchCV(
    +758                lm.Lasso(**kwargs),
    +759                parameters,
    +760                cv=self.folds
    +761            )
    +762        else:
    +763            classifier = lm.Lasso(**kwargs)
    +764        self._sklearn_regression_meta(
    +765            classifier,
    +766            f'{name}{" (Random Search)" if random_search else ""}',
    +767            random_search=random_search
    +768        )
     
    @@ -3460,7 +6234,17 @@
    Parameters
    • name (str, default="Lasso Regression"): -Name of classification technique
    • +Name of classification technique. +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[): +str, + Union[ + scipy.stats.rv_continuous, + List[Union[int, str, float]] + ] +], default=Preset distributions +The parameters used in RandomizedSearchCV
    @@ -3471,39 +6255,49 @@
    Parameters
    def - lasso_cv(self, name: str = 'Lasso Regression (Cross Validated)', **kwargs): + lasso_cv( self, name: str = 'Lasso Regression (Cross Validated)', random_search: bool = False, **kwargs):
    -
    620    def lasso_cv(
    -621            self,
    -622            name: str = "Lasso Regression (Cross Validated)",
    -623            **kwargs
    -624            ):
    -625        """
    -626        Fit x on y via cross-validated lasso regression
    -627
    -628        Parameters
    -629        ----------
    -630        name : str, default="Lasso Regression (Cross Validated)"
    -631            Name of classification technique
    -632        """
    -633        self._sklearn_regression_meta(
    -634                lm.LassoCV(**kwargs),
    -635                name
    -636                )
    +            
    770    def lasso_cv(
    +771            self,
    +772            name: str = "Lasso Regression (Cross Validated)",
    +773            random_search: bool = False,
    +774            **kwargs
    +775            ):
    +776        """
    +777        Fit x on y via cross-validated lasso regression.
    +778        Already cross validated so random search not required
    +779
    +780        Parameters
    +781        ----------
    +782        name : str, default="Lasso Regression (Cross Validated)"
    +783            Name of classification technique
    +784        random_search : bool, default=False
    +785            Not used
    +786
    +787        """
    +788        _ = random_search
    +789        self._sklearn_regression_meta(
    +790            lm.LassoCV(**kwargs, cv=self.folds),
    +791            name,
    +792            random_search=True
    +793        )
     
    -

    Fit x on y via cross-validated lasso regression

    +

    Fit x on y via cross-validated lasso regression. +Already cross validated so random search not required

    Parameters
    • name (str, default="Lasso Regression (Cross Validated)"): Name of classification technique
    • +
    • random_search (bool, default=False): +Not used
    @@ -3514,29 +6308,60 @@
    Parameters
    def - multi_task_lasso(self, name: str = 'Multi-task Lasso Regression', **kwargs): + multi_task_lasso( self, name: str = 'Multi-task Lasso Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbe32c10>, 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbe33310>, 'selection': ['cyclic', 'random']}, **kwargs):
    -
    638    def multi_task_lasso(
    -639            self,
    -640            name: str = "Multi-task Lasso Regression",
    -641            **kwargs
    -642            ):
    -643        """
    -644        Fit x on y via multitask lasso regression
    -645
    -646        Parameters
    -647        ----------
    -648        name : str, default="Multi-task Lasso Regression"
    -649            Name of classification technique
    -650        """
    -651        self._sklearn_regression_meta(
    -652                lm.MultiTaskLasso(**kwargs),
    -653                name
    -654                )
    +            
    795    def multi_task_lasso(
    +796        self,
    +797        name: str = "Multi-task Lasso Regression",
    +798        random_search: bool = False,
    +799        parameters: dict[
    +800            str,
    +801            Union[
    +802                scipy.stats.rv_continuous,
    +803                List[Union[int, str, float]]
    +804            ]
    +805        ] = {
    +806            'alpha': uniform(loc=0, scale=2),
    +807            'tol': uniform(loc=0, scale=1),
    +808            'selection': ['cyclic', 'random']
    +809        },
    +810        **kwargs
    +811            ):
    +812        """
    +813        Fit x on y via multitask lasso regression
    +814
    +815        Parameters
    +816        ----------
    +817        name : str, default="Multi-task Lasso Regression"
    +818            Name of classification technique.
    +819        random_search : bool, default=False
    +820            Whether to perform RandomizedSearch to optimise parameters
    +821        parameters : dict[
    +822                str,
    +823                Union[
    +824                    scipy.stats.rv_continuous,
    +825                    List[Union[int, str, float]]
    +826                ]
    +827            ], default=Preset distributions
    +828            The parameters used in RandomizedSearchCV
    +829        """
    +830        if random_search:
    +831            classifier = RandomizedSearchCV(
    +832                lm.MultiTaskLasso(**kwargs),
    +833                parameters,
    +834                cv=self.folds
    +835            )
    +836        else:
    +837            classifier = lm.MultiTaskLasso(**kwargs)
    +838        self._sklearn_regression_meta(
    +839            classifier,
    +840            f'{name}{" (Random Search)" if random_search else ""}',
    +841            random_search=random_search
    +842        )
     
    @@ -3546,7 +6371,17 @@
    Parameters
    • name (str, default="Multi-task Lasso Regression"): -Name of classification technique
    • +Name of classification technique. +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[): +str, + Union[ + scipy.stats.rv_continuous, + List[Union[int, str, float]] + ] +], default=Preset distributions +The parameters used in RandomizedSearchCV
    @@ -3557,39 +6392,49 @@
    Parameters
    def - multi_task_lasso_cv( self, name: str = 'Multi-task Lasso Regression (Cross Validated)', **kwargs): + multi_task_lasso_cv( self, name: str = 'Multi-task Lasso Regression (Cross Validated)', random_search: bool = False, **kwargs):
    -
    656    def multi_task_lasso_cv(
    -657            self,
    -658            name: str = "Multi-task Lasso Regression (Cross Validated)",
    -659            **kwargs
    -660            ):
    -661        """
    -662        Fit x on y via cross validated multitask lasso regression
    -663
    -664        Parameters
    -665        ----------
    -666        name : str, default="Multi-task Lasso Regression (Cross Validated)"
    -667            Name of classification technique
    -668        """
    -669        self._sklearn_regression_meta(
    -670                lm.MultiTaskLassoCV(**kwargs),
    -671                name
    -672                )
    +            
    844    def multi_task_lasso_cv(
    +845            self,
    +846            name: str = "Multi-task Lasso Regression (Cross Validated)",
    +847            random_search: bool = False,
    +848            **kwargs
    +849            ):
    +850        """
    +851        Fit x on y via cross-validated multitask lasso regression.
    +852        Already cross validated so random search not required
    +853
    +854        Parameters
    +855        ----------
    +856        name : str, default="Multi-task Lasso Regression (Cross Validated)"
    +857            Name of classification technique
    +858        random_search : bool, default=False
    +859            Not used
    +860
    +861        """
    +862        _ = random_search
    +863        self._sklearn_regression_meta(
    +864            lm.MultiTaskLassoCV(**kwargs, cv=self.folds),
    +865            name,
    +866            random_search=True
    +867        )
     
    -

    Fit x on y via cross validated multitask lasso regression

    +

    Fit x on y via cross-validated multitask lasso regression. +Already cross validated so random search not required

    Parameters
    • name (str, default="Multi-task Lasso Regression (Cross Validated)"): Name of classification technique
    • +
    • random_search (bool, default=False): +Not used
    @@ -3600,25 +6445,61 @@
    Parameters
    def - elastic_net(self, name: str = 'Elastic Net Regression', **kwargs): + elastic_net( self, name: str = 'Elastic Net Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbe338d0>, 'l1_ratio': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbe33fd0>, 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc3c5d0>, 'selection': ['cyclic', 'random']}, **kwargs):
    -
    674    def elastic_net(self, name: str = "Elastic Net Regression", **kwargs):
    -675        """
    -676        Fit x on y via elastic net regression
    -677
    -678        Parameters
    -679        ----------
    -680        name : str, default="Elastic Net Regression"
    -681            Name of classification technique
    -682        """
    -683        self._sklearn_regression_meta(
    -684                lm.ElasticNet(**kwargs),
    -685                name
    -686                )
    +            
    869    def elastic_net(
    +870        self,
    +871        name: str = "Elastic Net Regression",
    +872        random_search: bool = False,
    +873        parameters: dict[
    +874            str,
    +875            Union[
    +876                scipy.stats.rv_continuous,
    +877                List[Union[int, str, float]]
    +878            ]
    +879        ] = {
    +880            'alpha': uniform(loc=0, scale=2),
    +881            'l1_ratio': uniform(loc=0, scale=1),
    +882            'tol': uniform(loc=0, scale=1),
    +883            'selection': ['cyclic', 'random']
    +884        },
    +885        **kwargs
    +886            ):
    +887        """
    +888        Fit x on y via elastic net regression
    +889
    +890        Parameters
    +891        ----------
    +892        name : str, default="Elastic Net Regression"
    +893            Name of classification technique.
    +894        random_search : bool, default=False
    +895            Whether to perform RandomizedSearch to optimise parameters
    +896        parameters : dict[
    +897                str,
    +898                Union[
    +899                    scipy.stats.rv_continuous,
    +900                    List[Union[int, str, float]]
    +901                ]
    +902            ], default=Preset distributions
    +903            The parameters used in RandomizedSearchCV
    +904        """
    +905        if random_search:
    +906            classifier = RandomizedSearchCV(
    +907                lm.ElasticNet(**kwargs),
    +908                parameters,
    +909                cv=self.folds
    +910            )
    +911        else:
    +912            classifier = lm.ElasticNet(**kwargs)
    +913        self._sklearn_regression_meta(
    +914            classifier,
    +915            f'{name}{" (Random Search)" if random_search else ""}',
    +916            random_search=random_search
    +917        )
     
    @@ -3628,7 +6509,17 @@
    Parameters
    • name (str, default="Elastic Net Regression"): -Name of classification technique
    • +Name of classification technique. +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[): +str, + Union[ + scipy.stats.rv_continuous, + List[Union[int, str, float]] + ] +], default=Preset distributions +The parameters used in RandomizedSearchCV
    @@ -3639,39 +6530,48 @@
    Parameters
    def - elastic_net_cv( self, name: str = 'Elastic Net Regression (Cross Validated)', **kwargs): + elastic_net_cv( self, name: str = 'Elastic Net Regression (Cross Validated)', random_search: bool = False, **kwargs):
    -
    688    def elastic_net_cv(
    -689            self,
    -690            name: str = "Elastic Net Regression (Cross Validated)",
    -691            **kwargs
    -692            ):
    -693        """
    -694        Fit x on y via cross validated elastic net regression
    -695
    -696        Parameters
    -697        ----------
    -698        name : str, default="Elastic Net Regression (Cross Validated)"
    -699            Name of classification technique
    -700        """
    -701        self._sklearn_regression_meta(
    -702                lm.ElasticNetCV(**kwargs),
    -703                name
    -704                )
    +            
    919    def elastic_net_cv(
    +920            self,
    +921            name: str = "Elastic Net Regression (Cross Validated)",
    +922            random_search: bool = False,
    +923            **kwargs
    +924            ):
    +925        """
    +926        Fit x on y via cross-validated elastic regression.
    +927        Already cross validated so random search not required
    +928
    +929        Parameters
    +930        ----------
    +931        name : str, default="Lasso Regression (Cross Validated)"
    +932            Name of classification technique
    +933        random_search : bool, default=False
    +934            Not used
    +935        """
    +936        _ = random_search
    +937        self._sklearn_regression_meta(
    +938            lm.ElasticNetCV(**kwargs, cv=self.folds),
    +939            name,
    +940            random_search=True
    +941        )
     
    -

    Fit x on y via cross validated elastic net regression

    +

    Fit x on y via cross-validated elastic regression. +Already cross validated so random search not required

    Parameters
      -
    • name (str, default="Elastic Net Regression (Cross Validated)"): +
    • name (str, default="Lasso Regression (Cross Validated)"): Name of classification technique
    • +
    • random_search (bool, default=False): +Not used
    @@ -3682,39 +6582,81 @@
    Parameters
    def - multi_task_elastic_net(self, name: str = 'Multi-Task Elastic Net Regression', **kwargs): + multi_task_elastic_net( self, name: str = 'Multi-task Elastic Net Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc3cbd0>, 'l1_ratio': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc3d310>, 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc3da10>, 'selection': ['cyclic', 'random']}, **kwargs):
    -
    706    def multi_task_elastic_net(
    -707            self,
    -708            name: str = "Multi-Task Elastic Net Regression",
    -709            **kwargs
    -710            ):
    -711        """
    -712        Fit x on y via multi-task elastic net regression
    -713
    -714        Parameters
    -715        ----------
    -716        name : str, default="Multi-task Elastic Net Regression"
    -717            Name of classification technique
    -718        """
    -719        self._sklearn_regression_meta(
    -720                lm.MultiTaskElasticNet(**kwargs),
    -721                name
    -722                )
    +            
    943    def multi_task_elastic_net(
    +944        self,
    +945        name: str = "Multi-task Elastic Net Regression",
    +946        random_search: bool = False,
    +947        parameters: dict[
    +948            str,
    +949            Union[
    +950                scipy.stats.rv_continuous,
    +951                List[Union[int, str, float]]
    +952            ]
    +953        ] = {
    +954            'alpha': uniform(loc=0, scale=2),
    +955            'l1_ratio': uniform(loc=0, scale=1),
    +956            'tol': uniform(loc=0, scale=1),
    +957            'selection': ['cyclic', 'random']
    +958        },
    +959        **kwargs
    +960            ):
    +961        """
    +962        Fit x on y via elastic net regression
    +963
    +964        Parameters
    +965        ----------
    +966        name : str, default="Multi-task Elastic Net Regression"
    +967            Name of classification technique.
    +968        random_search : bool, default=False
    +969            Whether to perform RandomizedSearch to optimise parameters
    +970        parameters : dict[
    +971                str,
    +972                Union[
    +973                    scipy.stats.rv_continuous,
    +974                    List[Union[int, str, float]]
    +975                ]
    +976            ], default=Preset distributions
    +977            The parameters used in RandomizedSearchCV
    +978        """
    +979        if random_search:
    +980            classifier = RandomizedSearchCV(
    +981                lm.MultiTaskElasticNet(**kwargs),
    +982                parameters,
    +983                cv=self.folds
    +984            )
    +985        else:
    +986            classifier = lm.MultiTaskElasticNet(**kwargs)
    +987        self._sklearn_regression_meta(
    +988            classifier,
    +989            f'{name}{" (Random Search)" if random_search else ""}',
    +990            random_search=random_search
    +991        )
     
    -

    Fit x on y via multi-task elastic net regression

    +

    Fit x on y via elastic net regression

    Parameters
    • name (str, default="Multi-task Elastic Net Regression"): -Name of classification technique
    • +Name of classification technique. +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[): +str, + Union[ + scipy.stats.rv_continuous, + List[Union[int, str, float]] + ] +], default=Preset distributions +The parameters used in RandomizedSearchCV
    @@ -3725,40 +6667,50 @@
    Parameters
    def - multi_task_elastic_net_cv( self, name: str = 'Multi-Task Elastic Net Regression (Cross Validated)', **kwargs): + multi_task_elastic_net_cv( self, name: str = 'Multi-Task Elastic Net Regression (Cross Validated)', random_search: bool = False, **kwargs):
    -
    724    def multi_task_elastic_net_cv(
    -725            self,
    -726            name: str = "Multi-Task Elastic Net Regression (Cross Validated)",
    -727            **kwargs
    -728            ):
    -729        """
    -730        Fit x on y via cross validated multi-task elastic net regression
    -731
    -732        Parameters
    -733        ----------
    -734        name : str, default="Multi-Task Elastic Net Regression\
    -735        (Cross Validated)"
    -736            Name of classification technique
    -737        """
    -738        self._sklearn_regression_meta(
    -739                lm.MultiTaskElasticNetCV(**kwargs),
    -740                name
    -741                )
    +            
     993    def multi_task_elastic_net_cv(
    + 994            self,
    + 995            name: str = "Multi-Task Elastic Net Regression (Cross Validated)",
    + 996            random_search: bool = False,
    + 997            **kwargs
    + 998            ):
    + 999        """
    +1000        Fit x on y via cross-validated multi-task elastic net regression.
    +1001        Already cross validated so random search not required
    +1002
    +1003        Parameters
    +1004        ----------
    +1005        name : str, default="Multi-Task Elastic Net Regression \
    +1006        (Cross Validated)"
    +1007            Name of classification technique
    +1008        random_search : bool, default=False
    +1009            Not used
    +1010
    +1011        """
    +1012        _ = random_search
    +1013        self._sklearn_regression_meta(
    +1014            lm.MultiTaskElasticNetCV(**kwargs, cv=self.folds),
    +1015            name,
    +1016            random_search=True
    +1017        )
     
    -

    Fit x on y via cross validated multi-task elastic net regression

    +

    Fit x on y via cross-validated multi-task elastic net regression. +Already cross validated so random search not required

    Parameters
      -
    • name (str, default="Multi-Task Elastic Net Regression (Cross Validated)"): +
    • name (str, default="Multi-Task Elastic Net Regression (Cross Validated)"): Name of classification technique
    • +
    • random_search (bool, default=False): +Not used
    @@ -3769,25 +6721,58 @@
    Parameters
    def - lars(self, name: str = 'Least Angle Regression', **kwargs): + lars( self, name: str = 'Least Angle Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'n_nonzero_coefs': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}, **kwargs):
    -
    743    def lars(self, name: str = "Least Angle Regression", **kwargs):
    -744        """
    -745        Fit x on y via least angle regression
    -746
    -747        Parameters
    -748        ----------
    -749        name : str, default="Least Angle Regression"
    -750            Name of classification technique.
    -751        """
    -752        self._sklearn_regression_meta(
    -753                lm.Lars(**kwargs),
    -754                name
    -755                )
    +            
    1019    def lars(
    +1020        self,
    +1021        name: str = "Least Angle Regression",
    +1022        random_search: bool = False,
    +1023        parameters: dict[
    +1024            str,
    +1025            Union[
    +1026                scipy.stats.rv_continuous,
    +1027                List[Union[int, str, float]]
    +1028            ]
    +1029        ] = {
    +1030            'n_nonzero_coefs': list(range(1, 11))
    +1031        },
    +1032        **kwargs
    +1033            ):
    +1034        """
    +1035        Fit x on y via least angle regression
    +1036
    +1037        Parameters
    +1038        ----------
    +1039        name : str, default="Least Angle Regression"
    +1040            Name of classification technique.
    +1041        random_search : bool, default=False
    +1042            Whether to perform RandomizedSearch to optimise parameters
    +1043        parameters : dict[
    +1044                str,
    +1045                Union[
    +1046                    scipy.stats.rv_continuous,
    +1047                    List[Union[int, str, float]]
    +1048                ]
    +1049            ], default=Preset distributions
    +1050            The parameters used in RandomizedSearchCV
    +1051        """
    +1052        if random_search:
    +1053            classifier = RandomizedSearchCV(
    +1054                lm.Lars(**kwargs),
    +1055                parameters,
    +1056                cv=self.folds
    +1057            )
    +1058        else:
    +1059            classifier = lm.Lars(**kwargs)
    +1060        self._sklearn_regression_meta(
    +1061            classifier,
    +1062            f'{name}{" (Random Search)" if random_search else ""}',
    +1063            random_search=random_search
    +1064        )
     
    @@ -3798,6 +6783,16 @@
    Parameters
    • name (str, default="Least Angle Regression"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[): +str, + Union[ + scipy.stats.rv_continuous, + List[Union[int, str, float]] + ] +], default=Preset distributions +The parameters used in RandomizedSearchCV
    @@ -3808,39 +6803,78 @@
    Parameters
    def - lars_lasso(self, name: str = 'Least Angle Regression (Lasso)', **kwargs): + lars_lasso( self, name: str = 'Least Angle Lasso Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc3e710>}, **kwargs):
    -
    757    def lars_lasso(
    -758            self,
    -759            name: str = "Least Angle Regression (Lasso)",
    -760            **kwargs
    -761            ):
    -762        """
    -763        Fit x on y via lasso least angle regression
    -764
    -765        Parameters
    -766        ----------
    -767        name : str, default="Least Angle Regression (Lasso)"
    -768            Name of classification technique
    -769        """
    -770        self._sklearn_regression_meta(
    -771                lm.LassoLars(**kwargs),
    -772                name
    -773                )
    +            
    1066    def lars_lasso(
    +1067        self,
    +1068        name: str = "Least Angle Lasso Regression",
    +1069        random_search: bool = False,
    +1070        parameters: dict[
    +1071            str,
    +1072            Union[
    +1073                scipy.stats.rv_continuous,
    +1074                List[Union[int, str, float]]
    +1075            ]
    +1076        ] = {
    +1077            'alpha': uniform(loc=0, scale=2)
    +1078        },
    +1079        **kwargs
    +1080            ):
    +1081        """
    +1082        Fit x on y via least angle lasso regression
    +1083
    +1084        Parameters
    +1085        ----------
    +1086        name : str, default="Least Angle Lasso Regression"
    +1087            Name of classification technique.
    +1088        random_search : bool, default=False
    +1089            Whether to perform RandomizedSearch to optimise parameters
    +1090        parameters : dict[
    +1091                str,
    +1092                Union[
    +1093                    scipy.stats.rv_continuous,
    +1094                    List[Union[int, str, float]]
    +1095                ]
    +1096            ], default=Preset distributions
    +1097            The parameters used in RandomizedSearchCV
    +1098        """
    +1099        if random_search:
    +1100            classifier = RandomizedSearchCV(
    +1101                lm.LassoLars(**kwargs),
    +1102                parameters,
    +1103                cv=self.folds
    +1104            )
    +1105        else:
    +1106            classifier = lm.LassoLars(**kwargs)
    +1107        self._sklearn_regression_meta(
    +1108            classifier,
    +1109            f'{name}{" (Random Search)" if random_search else ""}',
    +1110            random_search=random_search
    +1111        )
     
    -

    Fit x on y via lasso least angle regression

    +

    Fit x on y via least angle lasso regression

    Parameters
      -
    • name (str, default="Least Angle Regression (Lasso)"): -Name of classification technique
    • +
    • name (str, default="Least Angle Lasso Regression"): +Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[): +str, + Union[ + scipy.stats.rv_continuous, + List[Union[int, str, float]] + ] +], default=Preset distributions +The parameters used in RandomizedSearchCV
    @@ -3851,26 +6885,59 @@
    Parameters
    def - omp(self, name: str = 'Orthogonal Matching Pursuit', **kwargs): + omp( self, name: str = 'Orthogonal Matching Pursuit', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'n_nonzero_coefs': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}, **kwargs):
    -
    775    def omp(self, name: str = "Orthogonal Matching Pursuit", **kwargs):
    -776        """
    -777        Fit x on y via orthogonal matching pursuit regression
    -778
    -779        Parameters
    -780        ----------
    -781        name : str, default="Orthogonal Matching Pursuit"
    -782            Name of classification technique
    -783        """
    -784        self._sklearn_regression_meta(
    -785                lm.OrthogonalMatchingPursuit(**kwargs),
    -786                name,
    -787                min_coeffs=2
    -788                )
    +            
    1113    def omp(
    +1114        self,
    +1115        name: str = "Orthogonal Matching Pursuit",
    +1116        random_search: bool = False,
    +1117        parameters: dict[
    +1118            str,
    +1119            Union[
    +1120                scipy.stats.rv_continuous,
    +1121                List[Union[int, str, float]]
    +1122            ]
    +1123        ] = {
    +1124            'n_nonzero_coefs': list(range(1, 11))
    +1125        },
    +1126        **kwargs
    +1127            ):
    +1128        """
    +1129        Fit x on y via orthogonal matching pursuit regression
    +1130
    +1131        Parameters
    +1132        ----------
    +1133        name : str, default="Orthogonal Matching Pursuit"
    +1134            Name of classification technique.
    +1135        random_search : bool, default=False
    +1136            Whether to perform RandomizedSearch to optimise parameters
    +1137        parameters : dict[
    +1138                str,
    +1139                Union[
    +1140                    scipy.stats.rv_continuous,
    +1141                    List[Union[int, str, float]]
    +1142                ]
    +1143            ], default=Preset distributions
    +1144            The parameters used in RandomizedSearchCV
    +1145        """
    +1146        if random_search:
    +1147            classifier = RandomizedSearchCV(
    +1148                lm.OrthogonalMatchingPursuit(**kwargs),
    +1149                parameters,
    +1150                cv=self.folds
    +1151            )
    +1152        else:
    +1153            classifier = lm.OrthogonalMatchingPursuit(**kwargs)
    +1154        self._sklearn_regression_meta(
    +1155            classifier,
    +1156            f'{name}{" (Random Search)" if random_search else ""}',
    +1157            random_search=random_search,
    +1158            min_coeffs=2
    +1159        )
     
    @@ -3880,7 +6947,17 @@
    Parameters
    • name (str, default="Orthogonal Matching Pursuit"): -Name of classification technique
    • +Name of classification technique. +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[): +str, + Union[ + scipy.stats.rv_continuous, + List[Union[int, str, float]] + ] +], default=Preset distributions +The parameters used in RandomizedSearchCV
    @@ -3891,29 +6968,62 @@
    Parameters
    def - bayesian_ridge(self, name: str = 'Bayesian Ridge Regression', **kwargs): + bayesian_ridge( self, name: str = 'Bayesian Ridge Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc3f010>, 'alpha_1': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc3f2d0>, 'alpha_2': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc3f9d0>, 'lambda_1': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc48110>, 'lambda_2': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc48810>}, **kwargs):
    -
    790    def bayesian_ridge(
    -791                self,
    -792                name: str = "Bayesian Ridge Regression",
    -793                **kwargs
    -794            ):
    -795        """
    -796        Fit x on y via bayesian ridge regression
    -797
    -798        Parameters
    -799        ----------
    -800        name : str, default="Bayesian Ridge Regression"
    -801            Name of classification technique.
    -802        """
    -803        self._sklearn_regression_meta(
    -804                lm.BayesianRidge(**kwargs),
    -805                name
    -806                )
    +            
    1161    def bayesian_ridge(
    +1162        self,
    +1163        name: str = "Bayesian Ridge Regression",
    +1164        random_search: bool = False,
    +1165        parameters: dict[
    +1166            str,
    +1167            Union[
    +1168                scipy.stats.rv_continuous,
    +1169                List[Union[int, str, float]]
    +1170            ]
    +1171        ] = {
    +1172            'tol': uniform(loc=0, scale=1),
    +1173            'alpha_1': uniform(loc=0, scale=1),
    +1174            'alpha_2': uniform(loc=0, scale=1),
    +1175            'lambda_1': uniform(loc=0, scale=1),
    +1176            'lambda_2': uniform(loc=0, scale=1)
    +1177        },
    +1178        **kwargs
    +1179            ):
    +1180        """
    +1181        Fit x on y via bayesian ridge regression
    +1182
    +1183        Parameters
    +1184        ----------
    +1185        name : str, default="Bayesian Ridge Regression"
    +1186            Name of classification technique.
    +1187        random_search : bool, default=False
    +1188            Whether to perform RandomizedSearch to optimise parameters
    +1189        parameters : dict[
    +1190                str,
    +1191                Union[
    +1192                    scipy.stats.rv_continuous,
    +1193                    List[Union[int, str, float]]
    +1194                ]
    +1195            ], default=Preset distributions
    +1196            The parameters used in RandomizedSearchCV
    +1197        """
    +1198        if random_search:
    +1199            classifier = RandomizedSearchCV(
    +1200                lm.BayesianRidge(**kwargs),
    +1201                parameters,
    +1202                cv=self.folds
    +1203            )
    +1204        else:
    +1205            classifier = lm.BayesianRidge(**kwargs)
    +1206        self._sklearn_regression_meta(
    +1207            classifier,
    +1208            f'{name}{" (Random Search)" if random_search else ""}',
    +1209            random_search=random_search
    +1210        )
     
    @@ -3924,6 +7034,16 @@
    Parameters
    • name (str, default="Bayesian Ridge Regression"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[): +str, + Union[ + scipy.stats.rv_continuous, + List[Union[int, str, float]] + ] +], default=Preset distributions +The parameters used in RandomizedSearchCV
    @@ -3934,29 +7054,62 @@
    Parameters
    def - bayesian_ard(self, name: str = 'Bayesian Automatic Relevance Detection', **kwargs): + bayesian_ard( self, name: str = 'Bayesian Automatic Relevance Detection', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc48f10>, 'alpha_1': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc496d0>, 'alpha_2': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc49dd0>, 'lambda_1': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc4a4d0>, 'lambda_2': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc4abd0>}, **kwargs):
    -
    808    def bayesian_ard(
    -809            self,
    -810            name: str = "Bayesian Automatic Relevance Detection",
    -811            **kwargs
    -812            ):
    -813        """
    -814        Fit x on y via bayesian automatic relevance detection
    -815
    -816        Parameters
    -817        ----------
    -818        name : str, default="Bayesian Automatic Relevance Detection"
    -819            Name of classification technique.
    -820        """
    -821        self._sklearn_regression_meta(
    -822                lm.ARDRegression(**kwargs),
    -823                name
    -824                )
    +            
    1212    def bayesian_ard(
    +1213        self,
    +1214        name: str = "Bayesian Automatic Relevance Detection",
    +1215        random_search: bool = False,
    +1216        parameters: dict[
    +1217            str,
    +1218            Union[
    +1219                scipy.stats.rv_continuous,
    +1220                List[Union[int, str, float]]
    +1221            ]
    +1222        ] = {
    +1223            'tol': uniform(loc=0, scale=1),
    +1224            'alpha_1': uniform(loc=0, scale=1),
    +1225            'alpha_2': uniform(loc=0, scale=1),
    +1226            'lambda_1': uniform(loc=0, scale=1),
    +1227            'lambda_2': uniform(loc=0, scale=1)
    +1228        },
    +1229        **kwargs
    +1230            ):
    +1231        """
    +1232        Fit x on y via bayesian automatic relevance detection
    +1233
    +1234        Parameters
    +1235        ----------
    +1236        name : str, default="Bayesian Automatic Relevance Detection"
    +1237            Name of classification technique.
    +1238        random_search : bool, default=False
    +1239            Whether to perform RandomizedSearch to optimise parameters
    +1240        parameters : dict[
    +1241                str,
    +1242                Union[
    +1243                    scipy.stats.rv_continuous,
    +1244                    List[Union[int, str, float]]
    +1245                ]
    +1246            ], default=Preset distributions
    +1247            The parameters used in RandomizedSearchCV
    +1248        """
    +1249        if random_search:
    +1250            classifier = RandomizedSearchCV(
    +1251                lm.ARDRegression(**kwargs),
    +1252                parameters,
    +1253                cv=self.folds
    +1254            )
    +1255        else:
    +1256            classifier = lm.ARDRegression(**kwargs)
    +1257        self._sklearn_regression_meta(
    +1258            classifier,
    +1259            f'{name}{" (Random Search)" if random_search else ""}',
    +1260            random_search=random_search
    +1261        )
     
    @@ -3967,6 +7120,16 @@
    Parameters
    • name (str, default="Bayesian Automatic Relevance Detection"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[): +str, + Union[ + scipy.stats.rv_continuous, + List[Union[int, str, float]] + ] +], default=Preset distributions +The parameters used in RandomizedSearchCV
    @@ -3977,25 +7140,61 @@
    Parameters
    def - tweedie(self, name: str = 'Tweedie Regression', **kwargs): + tweedie( self, name: str = 'Tweedie Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'power': [0, 1, 1.5, 2, 2.5, 3], 'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc4b2d0>, 'solver': ['lbfgs', 'newton-cholesky'], 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc4bb10>}, **kwargs):
    -
    826    def tweedie(self, name: str = "Tweedie Regression", **kwargs):
    -827        """
    -828        Fit x on y via tweedie regression
    -829
    -830        Parameters
    -831        ----------
    -832        name : str, default="Tweedie Regression"
    -833            Name of classification technique.
    -834        """
    -835        self._sklearn_regression_meta(
    -836                lm.TweedieRegressor(**kwargs),
    -837                name
    -838                )
    +            
    1263    def tweedie(
    +1264        self,
    +1265        name: str = "Tweedie Regression",
    +1266        random_search: bool = False,
    +1267        parameters: dict[
    +1268            str,
    +1269            Union[
    +1270                scipy.stats.rv_continuous,
    +1271                List[Union[int, str, float]]
    +1272            ]
    +1273        ] = {
    +1274            'power': [0, 1, 1.5, 2, 2.5, 3],
    +1275            'alpha': uniform(loc=0, scale=2),
    +1276            'solver': ['lbfgs', 'newton-cholesky'],
    +1277            'tol': uniform(loc=0, scale=1),
    +1278        },
    +1279        **kwargs
    +1280            ):
    +1281        """
    +1282        Fit x on y via tweedie regression
    +1283
    +1284        Parameters
    +1285        ----------
    +1286        name : str, default="Tweedie Regression"
    +1287            Name of classification technique.
    +1288        random_search : bool, default=False
    +1289            Whether to perform RandomizedSearch to optimise parameters
    +1290        parameters : dict[
    +1291                str,
    +1292                Union[
    +1293                    scipy.stats.rv_continuous,
    +1294                    List[Union[int, str, float]]
    +1295                ]
    +1296            ], default=Preset distributions
    +1297            The parameters used in RandomizedSearchCV
    +1298        """
    +1299        if random_search:
    +1300            classifier = RandomizedSearchCV(
    +1301                lm.TweedieRegressor(**kwargs),
    +1302                parameters,
    +1303                cv=self.folds
    +1304            )
    +1305        else:
    +1306            classifier = lm.TweedieRegressor(**kwargs)
    +1307        self._sklearn_regression_meta(
    +1308            classifier,
    +1309            f'{name}{" (Random Search)" if random_search else ""}',
    +1310            random_search=random_search
    +1311        )
     
    @@ -4006,6 +7205,16 @@
    Parameters
    • name (str, default="Tweedie Regression"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[): +str, + Union[ + scipy.stats.rv_continuous, + List[Union[int, str, float]] + ] +], default=Preset distributions +The parameters used in RandomizedSearchCV
    @@ -4016,39 +7225,102 @@
    Parameters
    def - stochastic_gradient_descent(self, name: str = 'Stochastic Gradient Descent', **kwargs): + stochastic_gradient_descent( self, name: str = 'Stochastic Gradient Descent', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc502d0>, 'loss': ['squared_error', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive'], 'penalty': ['l2', 'l1', 'elasticnet', None], 'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc50ad0>, 'l1_ratio': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc51290>, 'epsilon': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc519d0>, 'learning_rate': ['constant', 'optimal', 'invscaling', 'adaptive'], 'eta0': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc52110>, 'power_t': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc52890>}, **kwargs):
    -
    840    def stochastic_gradient_descent(
    -841            self,
    -842            name: str = "Stochastic Gradient Descent",
    -843            **kwargs
    -844            ):
    -845        """
    -846        Fit x on y via stochastic gradient descent regression
    -847
    -848        Parameters
    -849        ----------
    -850        name : str, default="Stochastic Gradient Descent"
    -851            Name of classification technique.
    -852        """
    -853        self._sklearn_regression_meta(
    -854                lm.SGDRegressor(**kwargs),
    -855                name
    -856                )
    +            
    1313    def stochastic_gradient_descent(
    +1314        self,
    +1315        name: str = "Stochastic Gradient Descent",
    +1316        random_search: bool = False,
    +1317        parameters: dict[
    +1318            str,
    +1319            Union[
    +1320                scipy.stats.rv_continuous,
    +1321                List[Union[int, str, float]]
    +1322            ]
    +1323        ] = {
    +1324            'tol': uniform(loc=0, scale=1),
    +1325            'loss': [
    +1326                'squared_error',
    +1327                'huber',
    +1328                'epsilon_insensitive',
    +1329                'squared_epsilon_insensitive'
    +1330            ],
    +1331            'penalty': [
    +1332                'l2',
    +1333                'l1',
    +1334                'elasticnet',
    +1335                None
    +1336            ],
    +1337            'alpha': uniform(loc=0, scale=0.001),
    +1338            'l1_ratio': uniform(loc=0, scale=1),
    +1339            'epsilon': uniform(loc=0, scale=1),
    +1340            'learning_rate': [
    +1341                'constant',
    +1342                'optimal',
    +1343                'invscaling',
    +1344                'adaptive'
    +1345            ],
    +1346            'eta0': uniform(loc=0, scale=0.1),
    +1347            'power_t': uniform(loc=0, scale=1)
    +1348
    +1349        },
    +1350        **kwargs
    +1351            ):
    +1352        """
    +1353        Fit x on y via stochastic gradient descent
    +1354
    +1355        Parameters
    +1356        ----------
    +1357        name : str, default="Stochastic Gradient Descent"
    +1358            Name of classification technique.
    +1359        random_search : bool, default=False
    +1360            Whether to perform RandomizedSearch to optimise parameters
    +1361        parameters : dict[
    +1362                str,
    +1363                Union[
    +1364                    scipy.stats.rv_continuous,
    +1365                    List[Union[int, str, float]]
    +1366                ]
    +1367            ], default=Preset distributions
    +1368            The parameters used in RandomizedSearchCV
    +1369        """
    +1370        if random_search:
    +1371            classifier = RandomizedSearchCV(
    +1372                lm.SGDRegressor(**kwargs),
    +1373                parameters,
    +1374                cv=self.folds
    +1375            )
    +1376        else:
    +1377            classifier = lm.SGDRegressor(**kwargs)
    +1378        self._sklearn_regression_meta(
    +1379            classifier,
    +1380            f'{name}{" (Random Search)" if random_search else ""}',
    +1381            random_search=random_search
    +1382        )
     
    -

    Fit x on y via stochastic gradient descent regression

    +

    Fit x on y via stochastic gradient descent

    Parameters
    • name (str, default="Stochastic Gradient Descent"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[): +str, + Union[ + scipy.stats.rv_continuous, + List[Union[int, str, float]] + ] +], default=Preset distributions +The parameters used in RandomizedSearchCV
    @@ -4059,39 +7331,78 @@
    Parameters
    def - passive_aggressive(self, name: str = 'Passive Agressive Regression', **kwargs): + passive_aggressive( self, name: str = 'Passive Aggressive Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'C': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc52fd0>, 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc537d0>, 'loss': ['epsilon_insensitive', 'squared_epsilon_insensitive'], 'epsilon': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc53f10>}, **kwargs):
    -
    858    def passive_aggressive(
    -859            self,
    -860            name: str = "Passive Agressive Regression",
    -861            **kwargs
    -862            ):
    -863        """
    -864        Fit x on y via passive aggressive regression
    -865
    -866        Parameters
    -867        ----------
    -868        name : str, default="Passive Agressive Regression"
    -869            Name of classification technique.
    -870        """
    -871        self._sklearn_regression_meta(
    -872                lm.PassiveAggressiveRegressor(**kwargs),
    -873                name
    -874                )
    +            
    1384    def passive_aggressive(
    +1385        self,
    +1386        name: str = "Passive Aggressive Regression",
    +1387        random_search: bool = False,
    +1388        parameters: dict[
    +1389            str,
    +1390            Union[
    +1391                scipy.stats.rv_continuous,
    +1392                List[Union[int, str, float]]
    +1393            ]
    +1394        ] = {
    +1395            'C': uniform(loc=0, scale=2),
    +1396            'tol': uniform(loc=0, scale=1),
    +1397            'loss': [
    +1398                'epsilon_insensitive',
    +1399                'squared_epsilon_insensitive'
    +1400            ],
    +1401            'epsilon': uniform(loc=0, scale=1)
    +1402        },
    +1403        **kwargs
    +1404            ):
    +1405        """
    +1406        Fit x on y via stochastic gradient descent regression
    +1407
    +1408        Parameters
    +1409        ----------
    +1410        name : str, default="Passive Aggressive Regression"
    +1411            Name of classification technique.
    +1412        random_search : bool, default=False
    +1413            Whether to perform RandomizedSearch to optimise parameters
    +1414        parameters : dict[\
    +1415                str,\
    +1416                Union[\
    +1417                    scipy.stats.rv_continuous,\
    +1418                    List[Union[int, str, float]]\
    +1419                ]\
    +1420            ], default=Preset distributions
    +1421            The parameters used in RandomizedSearchCV
    +1422        """
    +1423        if random_search:
    +1424            classifier = RandomizedSearchCV(
    +1425                lm.PassiveAggressiveRegressor(**kwargs),
    +1426                parameters,
    +1427                cv=self.folds
    +1428            )
    +1429        else:
    +1430            classifier = lm.PassiveAggressiveRegressor(**kwargs)
    +1431        self._sklearn_regression_meta(
    +1432            classifier,
    +1433            f'{name}{" (Random Search)" if random_search else ""}',
    +1434            random_search=random_search
    +1435        )
     
    -

    Fit x on y via passive aggressive regression

    +

    Fit x on y via stochastic gradient descent regression

    Parameters
      -
    • name (str, default="Passive Agressive Regression"): +
    • name (str, default="Passive Aggressive Regression"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4102,35 +7413,75 @@
    Parameters
    def - ransac(self, name: str = 'RANSAC', **kwargs): + ransac( self, name: str = 'RANSAC', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'estimator': [LinearRegression()]}, **kwargs):
    -
    876    def ransac(self, name: str = "RANSAC", **kwargs):
    -877        """
    -878        Fit x on y via RANSAC regression
    -879
    -880        Parameters
    -881        ----------
    -882        name : str, default="RANSAC"
    -883            Name of classification technique.
    -884        """
    -885        self._sklearn_regression_meta(
    -886                lm.RANSACRegressor(**kwargs),
    -887                name
    -888                )
    +            
    1437    def ransac(
    +1438        self,
    +1439        name: str = "RANSAC",
    +1440        random_search: bool = False,
    +1441        parameters: dict[
    +1442            str,
    +1443            Union[
    +1444                scipy.stats.rv_continuous,
    +1445                List[Union[int, str, float]]
    +1446            ]
    +1447        ] = {
    +1448            'estimator': [
    +1449                lm.LinearRegression()
    +1450                # TODO: ADD
    +1451            ]
    +1452        },
    +1453        **kwargs
    +1454            ):
    +1455        """
    +1456        Fit x on y via ransac
    +1457
    +1458        Parameters
    +1459        ----------
    +1460        name : str, default="RANSAC"
    +1461            Name of classification technique.
    +1462        random_search : bool, default=False
    +1463            Whether to perform RandomizedSearch to optimise parameters
    +1464        parameters : dict[\
    +1465                str,\
    +1466                Union[\
    +1467                    scipy.stats.rv_continuous,\
    +1468                    List[Union[int, str, float]]\
    +1469                ]\
    +1470            ], default=Preset distributions
    +1471            The parameters used in RandomizedSearchCV
    +1472        """
    +1473        if random_search:
    +1474            classifier = RandomizedSearchCV(
    +1475                lm.RANSACRegressor(**kwargs),
    +1476                parameters,
    +1477                cv=self.folds
    +1478            )
    +1479        else:
    +1480            classifier = lm.RANSACRegressor(**kwargs)
    +1481        self._sklearn_regression_meta(
    +1482            classifier,
    +1483            f'{name}{" (Random Search)" if random_search else ""}',
    +1484            random_search=random_search
    +1485        )
     
    -

    Fit x on y via RANSAC regression

    +

    Fit x on y via ransac

    Parameters
    • name (str, default="RANSAC"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4141,26 +7492,58 @@
    Parameters
    def - theil_sen(self, name: str = 'Theil-Sen Regression', **kwargs): + theil_sen( self, name: str = 'Theil-Sen Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc68cd0>}, **kwargs):
    -
    890    def theil_sen(self, name: str = "Theil-Sen Regression", **kwargs):
    -891        """
    -892        Fit x on y via theil-sen regression
    -893
    -894        Parameters
    -895        ----------
    -896        name : str, default="Theil-Sen Regression"
    -897            Name of classification technique.
    -898        -Sen Regression
    -899        """
    -900        self._sklearn_regression_meta(
    -901                lm.TheilSenRegressor(**kwargs),
    -902                name
    -903                )
    +            
    1487    def theil_sen(
    +1488        self,
    +1489        name: str = "Theil-Sen Regression",
    +1490        random_search: bool = False,
    +1491        parameters: dict[
    +1492            str,
    +1493            Union[
    +1494                scipy.stats.rv_continuous,
    +1495                List[Union[int, str, float]]
    +1496            ]
    +1497        ] = {
    +1498            'tol': uniform(loc=0, scale=1)
    +1499        },
    +1500        **kwargs
    +1501            ):
    +1502        """
    +1503        Fit x on y via theil-sen regression
    +1504
    +1505        Parameters
    +1506        ----------
    +1507        name : str, default="Theil-Sen Regression"
    +1508            Name of classification technique.
    +1509        random_search : bool, default=False
    +1510            Whether to perform RandomizedSearch to optimise parameters
    +1511        parameters : dict[\
    +1512                str,\
    +1513                Union[\
    +1514                    scipy.stats.rv_continuous,\
    +1515                    List[Union[int, str, float]]\
    +1516                ]\
    +1517            ], default=Preset distributions
    +1518            The parameters used in RandomizedSearchCV
    +1519        """
    +1520        if random_search:
    +1521            classifier = RandomizedSearchCV(
    +1522                lm.TheilSenRegressor(**kwargs),
    +1523                parameters,
    +1524                cv=self.folds
    +1525            )
    +1526        else:
    +1527            classifier = lm.TheilSenRegressor(**kwargs)
    +1528        self._sklearn_regression_meta(
    +1529            classifier,
    +1530            f'{name}{" (Random Search)" if random_search else ""}',
    +1531            random_search=random_search
    +1532        )
     
    @@ -4171,7 +7554,10 @@
    Parameters
    • name (str, default="Theil-Sen Regression"): Name of classification technique.
    • -
    • -Sen Regression
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4182,25 +7568,60 @@
    Parameters
    def - huber(self, name: str = 'Huber Regression', **kwargs): + huber( self, name: str = 'Huber Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'epsilon': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc69010>, 'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc69810>, 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc69f50>}, **kwargs):
    -
    905    def huber(self, name: str = "Huber Regression", **kwargs):
    -906        """
    -907        Fit x on y via huber regression
    -908
    -909        Parameters
    -910        ----------
    -911        name : str, default="Huber Regression"
    -912            Name of classification technique.
    -913        """
    -914        self._sklearn_regression_meta(
    -915                lm.HuberRegressor(**kwargs),
    -916                name
    -917                )
    +            
    1534    def huber(
    +1535        self,
    +1536        name: str = "Huber Regression",
    +1537        random_search: bool = False,
    +1538        parameters: dict[
    +1539            str,
    +1540            Union[
    +1541                scipy.stats.rv_continuous,
    +1542                List[Union[int, str, float]]
    +1543            ]
    +1544        ] = {
    +1545            'epsilon': uniform(loc=1, scale=4),
    +1546            'alpha': uniform(loc=0, scale=0.01),
    +1547            'tol': uniform(loc=0, scale=1)
    +1548        },
    +1549        **kwargs
    +1550            ):
    +1551        """
    +1552        Fit x on y via huber regression
    +1553
    +1554        Parameters
    +1555        ----------
    +1556        name : str, default="Huber Regression"
    +1557            Name of classification technique.
    +1558        random_search : bool, default=False
    +1559            Whether to perform RandomizedSearch to optimise parameters
    +1560        parameters : dict[\
    +1561                str,\
    +1562                Union[\
    +1563                    scipy.stats.rv_continuous,\
    +1564                    List[Union[int, str, float]]\
    +1565                ]\
    +1566            ], default=Preset distributions
    +1567            The parameters used in RandomizedSearchCV
    +1568        """
    +1569        if random_search:
    +1570            classifier = RandomizedSearchCV(
    +1571                lm.HuberRegressor(**kwargs),
    +1572                parameters,
    +1573                cv=self.folds
    +1574            )
    +1575        else:
    +1576            classifier = lm.HuberRegressor(**kwargs)
    +1577        self._sklearn_regression_meta(
    +1578            classifier,
    +1579            f'{name}{" (Random Search)" if random_search else ""}',
    +1580            random_search=random_search
    +1581        )
     
    @@ -4211,6 +7632,10 @@
    Parameters
    • name (str, default="Huber Regression"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4221,36 +7646,82 @@
    Parameters
    def - quantile(self, name: str = 'Quantile Regression', **kwargs): + quantile( self, name: str = 'Quantile Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'quantile': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6a690>, 'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6ae90>, 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6b5d0>, 'solver': ['highs-ds', 'highs-ipm', 'highs', 'revised simplex']}, **kwargs):
    -
    919    def quantile(self, name: str = "Quantile Regression", **kwargs):
    -920        """
    -921        Fit x on y via quantile regression
    -922
    -923        Parameters
    -924        ----------
    -925        name : str, default="Quantile Regression"
    -926            Name of classification technique.
    -927        """
    -928        self._sklearn_regression_meta(
    -929                lm.QuantileRegressor(**kwargs),
    -930                name
    -931                )
    +            
    1583    def quantile(
    +1584        self,
    +1585        name: str = "Quantile Regression",
    +1586        random_search: bool = False,
    +1587        parameters: dict[
    +1588            str,
    +1589            Union[
    +1590                scipy.stats.rv_continuous,
    +1591                List[Union[int, str, float]]
    +1592            ]
    +1593        ] = {
    +1594            'quantile': uniform(loc=0, scale=2),
    +1595            'alpha': uniform(loc=0, scale=2),
    +1596            'tol': uniform(loc=0, scale=1),
    +1597            'solver': [
    +1598                'highs-ds',
    +1599                'highs-ipm',
    +1600                'highs',
    +1601                'revised simplex',
    +1602            ]
    +1603        },
    +1604        **kwargs
    +1605            ):
    +1606        """
    +1607        Fit x on y via quantile regression
    +1608
    +1609        Parameters
    +1610                'interior-point',
    +1611        ----------
    +1612        name : str, default="Quantile Regression"
    +1613            Name of classification technique.
    +1614        random_search : bool, default=False
    +1615            Whether to perform RandomizedSearch to optimise parameters
    +1616        parameters : dict[\
    +1617                str,\
    +1618                Union[\
    +1619                    scipy.stats.rv_continuous,\
    +1620                    List[Union[int, str, float]]\
    +1621                ]\
    +1622            ], default=Preset distributions
    +1623            The parameters used in RandomizedSearchCV
    +1624        """
    +1625        if random_search:
    +1626            classifier = RandomizedSearchCV(
    +1627                lm.QuantileRegressor(**kwargs),
    +1628                parameters,
    +1629                cv=self.folds
    +1630            )
    +1631        else:
    +1632            classifier = lm.QuantileRegressor(**kwargs)
    +1633        self._sklearn_regression_meta(
    +1634            classifier,
    +1635            f'{name}{" (Random Search)" if random_search else ""}',
    +1636            random_search=random_search
    +1637        )
     

    Fit x on y via quantile regression

    -
    Parameters
    +

    Parameters

    -
      -
    • name (str, default="Quantile Regression"): -Name of classification technique.
    • -
    +

    'interior-point',

    + +

    name : str, default="Quantile Regression" + Name of classification technique. +random_search : bool, default=False + Whether to perform RandomizedSearch to optimise parameters +parameters : dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions + The parameters used in RandomizedSearchCV

    @@ -4260,35 +7731,87 @@
    Parameters
    def - decision_tree(self, name: str = 'Decision Tree', **kwargs): + decision_tree( self, name: str = 'Decision Tree', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'criterion': ['squared_error', 'friedman_mse', 'absolute_error', 'poisson'], 'splitter': ['best', 'random'], 'max_features': [None, 'sqrt', 'log2'], 'ccp_alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6bd10>}, **kwargs):
    -
    933    def decision_tree(self, name: str = "Decision Tree", **kwargs):
    -934        """
    -935        Fit x on y using a decision tree
    -936
    -937        Parameters
    -938        ----------
    -939        name : str, default="Decision Tree"
    -940            Name of classification technique.
    -941        """
    -942        self._sklearn_regression_meta(
    -943                tree.DecisionTreeRegressor(**kwargs),
    -944                name
    -945                )
    +            
    1639    def decision_tree(
    +1640        self,
    +1641        name: str = "Decision Tree",
    +1642        random_search: bool = False,
    +1643        parameters: dict[
    +1644            str,
    +1645            Union[
    +1646                scipy.stats.rv_continuous,
    +1647                List[Union[int, str, float]]
    +1648            ]
    +1649        ] = {
    +1650            'criterion': [
    +1651                'squared_error',
    +1652                'friedman_mse',
    +1653                'absolute_error',
    +1654                'poisson'
    +1655            ],
    +1656            'splitter': [
    +1657                'best',
    +1658                'random'
    +1659            ],
    +1660            'max_features': [
    +1661                None,
    +1662                'sqrt',
    +1663                'log2'
    +1664            ],
    +1665            'ccp_alpha': uniform(loc=0, scale=2),
    +1666        },
    +1667        **kwargs
    +1668            ):
    +1669        """
    +1670        Fit x on y via decision tree
    +1671
    +1672        Parameters
    +1673        ----------
    +1674        name : str, default="Decision Tree"
    +1675            Name of classification technique.
    +1676        random_search : bool, default=False
    +1677            Whether to perform RandomizedSearch to optimise parameters
    +1678        parameters : dict[\
    +1679                str,\
    +1680                Union[\
    +1681                    scipy.stats.rv_continuous,\
    +1682                    List[Union[int, str, float]]\
    +1683                ]\
    +1684            ], default=Preset distributions
    +1685            The parameters used in RandomizedSearchCV
    +1686        """
    +1687        if random_search:
    +1688            classifier = RandomizedSearchCV(
    +1689                tree.DecisionTreeRegressor(**kwargs),
    +1690                parameters,
    +1691                cv=self.folds
    +1692            )
    +1693        else:
    +1694            classifier = tree.DecisionTreeRegressor(**kwargs)
    +1695        self._sklearn_regression_meta(
    +1696            classifier,
    +1697            f'{name}{" (Random Search)" if random_search else ""}',
    +1698            random_search=random_search
    +1699        )
     
    -

    Fit x on y using a decision tree

    +

    Fit x on y via decision tree

    Parameters
    • name (str, default="Decision Tree"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4299,35 +7822,87 @@
    Parameters
    def - extra_tree(self, name: str = 'Extra Tree', **kwargs): + extra_tree( self, name: str = 'Extra Tree', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'criterion': ['squared_error', 'friedman_mse', 'absolute_error', 'poisson'], 'splitter': ['best', 'random'], 'max_features': [None, 'sqrt', 'log2'], 'ccp_alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6c650>}, **kwargs):
    -
    947    def extra_tree(self, name: str = "Extra Tree", **kwargs):
    -948        """
    -949        Fit x on y using an extra tree
    -950
    -951        Parameters
    -952        ----------
    -953        name : str, default="Extra Tree"
    -954            Name of classification technique.
    -955        """
    -956        self._sklearn_regression_meta(
    -957                tree.ExtraTreeRegressor(**kwargs),
    -958                name
    -959                )
    +            
    1701    def extra_tree(
    +1702        self,
    +1703        name: str = "Extra Tree",
    +1704        random_search: bool = False,
    +1705        parameters: dict[
    +1706            str,
    +1707            Union[
    +1708                scipy.stats.rv_continuous,
    +1709                List[Union[int, str, float]]
    +1710            ]
    +1711        ] = {
    +1712            'criterion': [
    +1713                'squared_error',
    +1714                'friedman_mse',
    +1715                'absolute_error',
    +1716                'poisson'
    +1717            ],
    +1718            'splitter': [
    +1719                'best',
    +1720                'random'
    +1721            ],
    +1722            'max_features': [
    +1723                None,
    +1724                'sqrt',
    +1725                'log2'
    +1726            ],
    +1727            'ccp_alpha': uniform(loc=0, scale=2),
    +1728        },
    +1729        **kwargs
    +1730            ):
    +1731        """
    +1732        Fit x on y via extra tree
    +1733
    +1734        Parameters
    +1735        ----------
    +1736        name : str, default="Extra Tree"
    +1737            Name of classification technique.
    +1738        random_search : bool, default=False
    +1739            Whether to perform RandomizedSearch to optimise parameters
    +1740        parameters : dict[\
    +1741                str,\
    +1742                Union[\
    +1743                    scipy.stats.rv_continuous,\
    +1744                    List[Union[int, str, float]]\
    +1745                ]\
    +1746            ], default=Preset distributions
    +1747            The parameters used in RandomizedSearchCV
    +1748        """
    +1749        if random_search:
    +1750            classifier = RandomizedSearchCV(
    +1751                tree.ExtraTreeRegressor(**kwargs),
    +1752                parameters,
    +1753                cv=self.folds
    +1754            )
    +1755        else:
    +1756            classifier = tree.ExtraTreeRegressor(**kwargs)
    +1757        self._sklearn_regression_meta(
    +1758            classifier,
    +1759            f'{name}{" (Random Search)" if random_search else ""}',
    +1760            random_search=random_search
    +1761        )
     
    -

    Fit x on y using an extra tree

    +

    Fit x on y via extra tree

    Parameters
    • name (str, default="Extra Tree"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4338,35 +7913,86 @@
    Parameters
    def - random_forest(self, name: str = 'Random Forest', **kwargs): + random_forest( self, name: str = 'Random Forest', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], 'bootstrap': [True, False], 'max_samples': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6d510>, 'criterion': ['squared_error', 'friedman_mse', 'absolute_error', 'poisson'], 'max_features': [None, 'sqrt', 'log2'], 'ccp_alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6d7d0>}, **kwargs):
    -
    961    def random_forest(self, name: str = "Random Forest", **kwargs):
    -962        """
    -963        Fit x on y using a random forest
    -964
    -965        Parameters
    -966        ----------
    -967        name : str, default="Random Forest"
    -968            Name of classification technique.
    -969        """
    -970        self._sklearn_regression_meta(
    -971                en.RandomForestRegressor(**kwargs),
    -972                name
    -973                )
    +            
    1763    def random_forest(
    +1764        self,
    +1765        name: str = "Random Forest",
    +1766        random_search: bool = False,
    +1767        parameters: dict[
    +1768            str,
    +1769            Union[
    +1770                scipy.stats.rv_continuous,
    +1771                List[Union[int, str, float]]
    +1772            ]
    +1773        ] = {
    +1774            'n_estimators': [5, 10, 25, 50, 100, 200, 250,  500],
    +1775            'bootstrap': [True, False],
    +1776            'max_samples': uniform(loc=0.01, scale=0.99),
    +1777            'criterion': [
    +1778                'squared_error',
    +1779                'friedman_mse',
    +1780                'absolute_error',
    +1781                'poisson'
    +1782            ],
    +1783            'max_features': [
    +1784                None,
    +1785                'sqrt',
    +1786                'log2'
    +1787            ],
    +1788            'ccp_alpha': uniform(loc=0, scale=2),
    +1789        },
    +1790        **kwargs
    +1791            ):
    +1792        """
    +1793        Fit x on y via random forest
    +1794
    +1795        Parameters
    +1796        ----------
    +1797        name : str, default="Random Forest"
    +1798            Name of classification technique.
    +1799        random_search : bool, default=False
    +1800            Whether to perform RandomizedSearch to optimise parameters
    +1801        parameters : dict[\
    +1802                str,\
    +1803                Union[\
    +1804                    scipy.stats.rv_continuous,\
    +1805                    List[Union[int, str, float]]\
    +1806                ]\
    +1807            ], default=Preset distributions
    +1808            The parameters used in RandomizedSearchCV
    +1809        """
    +1810        if random_search:
    +1811            classifier = RandomizedSearchCV(
    +1812                en.RandomForestRegressor(**kwargs),
    +1813                parameters,
    +1814                cv=self.folds
    +1815            )
    +1816        else:
    +1817            classifier = en.RandomForestRegressor(**kwargs)
    +1818        self._sklearn_regression_meta(
    +1819            classifier,
    +1820            f'{name}{" (Random Search)" if random_search else ""}',
    +1821            random_search=random_search
    +1822        )
     
    -

    Fit x on y using a random forest

    +

    Fit x on y via random forest

    Parameters
    • name (str, default="Random Forest"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4377,39 +8003,86 @@
    Parameters
    def - extra_trees_ensemble(self, name: str = 'Extra Trees Ensemble', **kwargs): + extra_trees_ensemble( self, name: str = 'Extra Trees Ensemble', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], 'bootstrap': [True, False], 'max_samples': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6e590>, 'criterion': ['squared_error', 'friedman_mse', 'absolute_error', 'poisson'], 'max_features': [None, 'sqrt', 'log2'], 'ccp_alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6e850>}, **kwargs):
    -
    975    def extra_trees_ensemble(
    -976            self,
    -977            name: str = "Extra Trees Ensemble",
    -978            **kwargs
    -979            ):
    -980        """
    -981        Fit x on y using an ensemble of extra trees
    -982
    -983        Parameters
    -984        ----------
    -985        name : str, default="Extra Trees Ensemble"
    -986            Name of classification technique.
    -987        """
    -988        self._sklearn_regression_meta(
    -989                en.ExtraTreesRegressor(**kwargs),
    -990                name
    -991                )
    +            
    1824    def extra_trees_ensemble(
    +1825        self,
    +1826        name: str = "Extra Trees Ensemble",
    +1827        random_search: bool = False,
    +1828        parameters: dict[
    +1829            str,
    +1830            Union[
    +1831                scipy.stats.rv_continuous,
    +1832                List[Union[int, str, float]]
    +1833            ]
    +1834        ] = {
    +1835            'n_estimators': [5, 10, 25, 50, 100, 200, 250,  500],
    +1836            'bootstrap': [True, False],
    +1837            'max_samples': uniform(loc=0.01, scale=0.99),
    +1838            'criterion': [
    +1839                'squared_error',
    +1840                'friedman_mse',
    +1841                'absolute_error',
    +1842                'poisson'
    +1843            ],
    +1844            'max_features': [
    +1845                None,
    +1846                'sqrt',
    +1847                'log2'
    +1848            ],
    +1849            'ccp_alpha': uniform(loc=0, scale=2),
    +1850        },
    +1851        **kwargs
    +1852            ):
    +1853        """
    +1854        Fit x on y via extra trees ensemble
    +1855
    +1856        Parameters
    +1857        ----------
    +1858        name : str, default="Extra Trees Ensemble"
    +1859            Name of classification technique.
    +1860        random_search : bool, default=False
    +1861            Whether to perform RandomizedSearch to optimise parameters
    +1862        parameters : dict[\
    +1863                str,\
    +1864                Union[\
    +1865                    scipy.stats.rv_continuous,\
    +1866                    List[Union[int, str, float]]\
    +1867                ]\
    +1868            ], default=Preset distributions
    +1869            The parameters used in RandomizedSearchCV
    +1870        """
    +1871        if random_search:
    +1872            classifier = RandomizedSearchCV(
    +1873                en.ExtraTreesRegressor(**kwargs),
    +1874                parameters,
    +1875                cv=self.folds
    +1876            )
    +1877        else:
    +1878            classifier = en.ExtraTreesRegressor(**kwargs)
    +1879        self._sklearn_regression_meta(
    +1880            classifier,
    +1881            f'{name}{" (Random Search)" if random_search else ""}',
    +1882            random_search=random_search
    +1883        )
     
    -

    Fit x on y using an ensemble of extra trees

    +

    Fit x on y via extra trees ensemble

    Parameters
    • name (str, default="Extra Trees Ensemble"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4420,39 +8093,96 @@
    Parameters
    def - gradient_boost_regressor(self, name: str = 'Gradient Boosting Regression', **kwargs): + gradient_boost_regressor( self, name: str = 'Gradient Boosting Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'loss': ['squared_error', 'absolute_error', 'huber', 'quantile'], 'learning_rate': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6f010>, 'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], 'subsample': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6fe50>, 'criterion': ['friedman_mse', 'squared_error'], 'max_features': [None, 'sqrt', 'log2'], 'init': [None, 'zero', <class 'sklearn.linear_model._base.LinearRegression'>, <class 'sklearn.linear_model._theil_sen.TheilSenRegressor'>], 'ccp_alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc74050>}, **kwargs):
    -
     993    def gradient_boost_regressor(
    - 994            self,
    - 995            name: str = "Gradient Boosting Regression",
    - 996            **kwargs
    - 997            ):
    - 998        """
    - 999        Fit x on y using gradient boosting regression
    -1000
    -1001        Parameters
    -1002        ----------
    -1003        name : str, default="Gradient Boosting Regression"
    -1004            Name of classification technique.
    -1005        """
    -1006        self._sklearn_regression_meta(
    -1007                en.GradientBoostingRegressor(**kwargs),
    -1008                name
    -1009                )
    +            
    1885    def gradient_boost_regressor(
    +1886        self,
    +1887        name: str = "Gradient Boosting Regression",
    +1888        random_search: bool = False,
    +1889        parameters: dict[
    +1890            str,
    +1891            Union[
    +1892                scipy.stats.rv_continuous,
    +1893                List[Union[int, str, float]]
    +1894            ]
    +1895        ] = {
    +1896            'loss': [
    +1897                'squared_error',
    +1898                'absolute_error',
    +1899                'huber',
    +1900                'quantile'
    +1901            ],
    +1902            'learning_rate': uniform(loc=0, scale=2),
    +1903            'n_estimators': [5, 10, 25, 50, 100, 200, 250,  500],
    +1904            'subsample': uniform(loc=0.01, scale=0.99),
    +1905            'criterion': [
    +1906                'friedman_mse',
    +1907                'squared_error'
    +1908            ],
    +1909            'max_features': [
    +1910                None,
    +1911                'sqrt',
    +1912                'log2'
    +1913            ],
    +1914            'init': [
    +1915                None,
    +1916                'zero',
    +1917                lm.LinearRegression,
    +1918                lm.TheilSenRegressor
    +1919            ],
    +1920            'ccp_alpha': uniform(loc=0, scale=2)
    +1921        },
    +1922        **kwargs
    +1923            ):
    +1924        """
    +1925        Fit x on y via gradient boosting regression
    +1926
    +1927        Parameters
    +1928        ----------
    +1929        name : str, default="Gradient Boosting Regression"
    +1930            Name of classification technique.
    +1931        random_search : bool, default=False
    +1932            Whether to perform RandomizedSearch to optimise parameters
    +1933        parameters : dict[\
    +1934                str,\
    +1935                Union[\
    +1936                    scipy.stats.rv_continuous,\
    +1937                    List[Union[int, str, float]]\
    +1938                ]\
    +1939            ], default=Preset distributions
    +1940            The parameters used in RandomizedSearchCV
    +1941        """
    +1942        if random_search:
    +1943            classifier = RandomizedSearchCV(
    +1944                en.GradientBoostingRegressor(**kwargs),
    +1945                parameters,
    +1946                cv=self.folds
    +1947            )
    +1948        else:
    +1949            classifier = en.GradientBoostingRegressor(**kwargs)
    +1950        self._sklearn_regression_meta(
    +1951            classifier,
    +1952            f'{name}{" (Random Search)" if random_search else ""}',
    +1953            random_search=random_search
    +1954        )
     
    -

    Fit x on y using gradient boosting regression

    +

    Fit x on y via gradient boosting regression

    Parameters
    • name (str, default="Gradient Boosting Regression"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4463,42 +8193,83 @@
    Parameters
    def - hist_gradient_boost_regressor( self, name: str = 'Histogram-Based Gradient Boosting Regression', **kwargs): + hist_gradient_boost_regressor( self, name: str = 'Histogram-Based Gradient Boosting Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'loss': ['squared_error', 'absolute_error', 'gamma', 'poisson', 'quantile'], 'quantile': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc74850>, 'learning_rate': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc75090>, 'max_iter': [5, 10, 25, 50, 100, 200, 250, 500], 'l2_regularization': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc75dd0>, 'max_bins': [1, 3, 7, 15, 31, 63, 127, 255]}, **kwargs):
    -
    1011    def hist_gradient_boost_regressor(
    -1012            self,
    -1013            name: str = "Histogram-Based Gradient Boosting Regression",
    -1014            **kwargs
    -1015            ):
    -1016        """
    -1017        Fit x on y using histogram-based gradient boosting regression
    -1018
    -1019        Parameters
    -1020        ----------
    -1021        name : str, default="Histogram-Based Gradient Boosting Regression"
    -1022            Name of classification technique.
    -1023        -Based
    -1024            Gradient Boosting Regression
    -1025        """
    -1026        self._sklearn_regression_meta(
    -1027                en.HistGradientBoostingRegressor(**kwargs),
    -1028                name
    -1029                )
    +            
    1956    def hist_gradient_boost_regressor(
    +1957        self,
    +1958        name: str = "Histogram-Based Gradient Boosting Regression",
    +1959        random_search: bool = False,
    +1960        parameters: dict[
    +1961            str,
    +1962            Union[
    +1963                scipy.stats.rv_continuous,
    +1964                List[Union[int, str, float]]
    +1965            ]
    +1966        ] = {
    +1967            'loss': [
    +1968                'squared_error',
    +1969                'absolute_error',
    +1970                'gamma',
    +1971                'poisson',
    +1972                'quantile'
    +1973            ],
    +1974            'quantile': uniform(loc=0, scale=1),
    +1975            'learning_rate': uniform(loc=0, scale=2),
    +1976            'max_iter': [5, 10, 25, 50, 100, 200, 250,  500],
    +1977            'l2_regularization': uniform(loc=0, scale=2),
    +1978            'max_bins': [1, 3, 7, 15, 31, 63, 127, 255]
    +1979        },
    +1980        **kwargs
    +1981            ):
    +1982        """
    +1983        Fit x on y via histogram-based gradient boosting regression
    +1984
    +1985        Parameters
    +1986        ----------
    +1987        name : str, default="Histogram-Based Gradient Boosting Regression"
    +1988            Name of classification technique.
    +1989        random_search : bool, default=False
    +1990            Whether to perform RandomizedSearch to optimise parameters
    +1991        parameters : dict[\
    +1992                str,\
    +1993                Union[\
    +1994                    scipy.stats.rv_continuous,\
    +1995                    List[Union[int, str, float]]\
    +1996                ]\
    +1997            ], default=Preset distributions
    +1998            The parameters used in RandomizedSearchCV
    +1999        """
    +2000        if random_search:
    +2001            classifier = RandomizedSearchCV(
    +2002                en.HistGradientBoostingRegressor(**kwargs),
    +2003                parameters,
    +2004                cv=self.folds
    +2005            )
    +2006        else:
    +2007            classifier = en.HistGradientBoostingRegressor(**kwargs)
    +2008        self._sklearn_regression_meta(
    +2009            classifier,
    +2010            f'{name}{" (Random Search)" if random_search else ""}',
    +2011            random_search=random_search
    +2012        )
     
    -

    Fit x on y using histogram-based gradient boosting regression

    +

    Fit x on y via histogram-based gradient boosting regression

    Parameters
    • name (str, default="Histogram-Based Gradient Boosting Regression"): Name of classification technique.
    • -
    • -Based: Gradient Boosting Regression
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4509,42 +8280,113 @@
    Parameters
    def - mlp_regressor(self, name: str = 'Multi-Layer Perceptron Regression', **kwargs): + mlp_regressor( self, name: str = 'Multi-Layer Perceptron Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'hidden_layer_sizes': [(100,), (100, 200), (10,), (200, 400), (100, 200, 300)], 'activation': ['identity', 'logistic', 'tanh', 'relu'], 'solver': ['lbfgs', 'sgd', 'adam'], 'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc76590>, 'batch_size': ['auto', 20, 200, 500, 1000, 5000, 10000], 'learning_rate': ['constant', 'invscaling', 'adaptive'], 'learning_rate_init': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc76ed0>, 'power_t': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc770d0>, 'max_iter': [5, 10, 25, 50, 100, 200, 250, 500], 'shuffle': [True, False], 'momentum': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc77e10>, 'beta_1': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc84050>, 'beta_2': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc84790>, 'epsilon': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc84ed0>}, **kwargs):
    -
    1031    def mlp_regressor(
    -1032            self,
    -1033            name: str = "Multi-Layer Perceptron Regression",
    -1034            **kwargs
    -1035            ):
    -1036        """
    -1037        Fit x on y using multi-layer perceptrons
    -1038
    -1039        Parameters
    -1040        ----------
    -1041        name : str, default="Multi-Layer Perceptron Regression"
    -1042            Name of classification technique.
    -1043        -Layer Perceptron
    -1044            Regression
    -1045        """
    -1046        self._sklearn_regression_meta(
    -1047                nn.MLPRegressor(**kwargs),
    -1048                name
    -1049                )
    +            
    2014    def mlp_regressor(
    +2015        self,
    +2016        name: str = "Multi-Layer Perceptron Regression",
    +2017        random_search: bool = False,
    +2018        parameters: dict[
    +2019            str,
    +2020            Union[
    +2021                scipy.stats.rv_continuous,
    +2022                List[Union[int, str, float]]
    +2023            ]
    +2024        ] = {
    +2025            'hidden_layer_sizes': [
    +2026                (100, ),
    +2027                (100, 200),
    +2028                (10, ),
    +2029                (200, 400),
    +2030                (100, 200, 300)
    +2031            ],
    +2032            'activation': [
    +2033                'identity',
    +2034                'logistic',
    +2035                'tanh',
    +2036                'relu'
    +2037            ],
    +2038            'solver': [
    +2039                'lbfgs',
    +2040                'sgd',
    +2041                'adam'
    +2042            ],
    +2043            'alpha': uniform(loc=0, scale=0.1),
    +2044            'batch_size': [
    +2045                'auto',
    +2046                20,
    +2047                200,
    +2048                500,
    +2049                1000,
    +2050                5000,
    +2051                10000
    +2052            ],
    +2053            'learning_rate': [
    +2054                'constant',
    +2055                'invscaling',
    +2056                'adaptive'
    +2057            ],
    +2058            'learning_rate_init': uniform(loc=0, scale=0.1),
    +2059            'power_t': uniform(loc=0.1, scale=0.9),
    +2060            'max_iter': [5, 10, 25, 50, 100, 200, 250,  500],
    +2061            'shuffle': [True, False],
    +2062            'momentum': uniform(loc=0.1, scale=0.9),
    +2063            'beta_1': uniform(loc=0.1, scale=0.9),
    +2064            'beta_2': uniform(loc=0.1, scale=0.9),
    +2065            'epsilon': uniform(loc=1E8, scale=1E6),
    +2066
    +2067        },
    +2068        **kwargs
    +2069            ):
    +2070        """
    +2071        Fit x on y via multi-layer perceptron regression
    +2072
    +2073        Parameters
    +2074        ----------
    +2075        name : str, default="Multi-Layer Perceptron Regression"
    +2076            Name of classification technique.
    +2077        random_search : bool, default=False
    +2078            Whether to perform RandomizedSearch to optimise parameters
    +2079        parameters : dict[\
    +2080                str,\
    +2081                Union[\
    +2082                    scipy.stats.rv_continuous,\
    +2083                    List[Union[int, str, float]]\
    +2084                ]\
    +2085            ], default=Preset distributions
    +2086            The parameters used in RandomizedSearchCV
    +2087        """
    +2088        if random_search:
    +2089            classifier = RandomizedSearchCV(
    +2090                nn.MLPRegressor(**kwargs),
    +2091                parameters,
    +2092                cv=self.folds
    +2093            )
    +2094        else:
    +2095            classifier = nn.MLPRegressor(**kwargs)
    +2096        self._sklearn_regression_meta(
    +2097            classifier,
    +2098            f'{name}{" (Random Search)" if random_search else ""}',
    +2099            random_search=random_search
    +2100        )
     
    -

    Fit x on y using multi-layer perceptrons

    +

    Fit x on y via multi-layer perceptron regression

    Parameters
    • name (str, default="Multi-Layer Perceptron Regression"): Name of classification technique.
    • -
    • -Layer Perceptron: Regression
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4555,35 +8397,83 @@
    Parameters
    def - svr(self, name: str = 'Support Vector Regression', **kwargs): + svr( self, name: str = 'Support Vector Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'kernel': ['linear', 'poly', 'rbf', 'sigmoid'], 'degree': [2, 3, 4], 'gamma': ['scale', 'auto'], 'coef0': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc85610>, 'C': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc85ed0>, 'epsilon': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc86610>, 'shrinking': [True, False]}, **kwargs):
    -
    1051    def svr(self, name: str = "Support Vector Regression", **kwargs):
    -1052        """
    -1053        Fit x on y using support vector regression
    -1054
    -1055        Parameters
    -1056        ----------
    -1057        name : str, default="Support Vector Regression"
    -1058            Name of classification technique.
    -1059        """
    -1060        self._sklearn_regression_meta(
    -1061                svm.SVR(**kwargs),
    -1062                name
    -1063                )
    +            
    2102    def svr(
    +2103        self,
    +2104        name: str = "Support Vector Regression",
    +2105        random_search: bool = False,
    +2106        parameters: dict[
    +2107            str,
    +2108            Union[
    +2109                scipy.stats.rv_continuous,
    +2110                List[Union[int, str, float]]
    +2111            ]
    +2112        ] = {
    +2113            'kernel': [
    +2114                'linear',
    +2115                'poly',
    +2116                'rbf',
    +2117                'sigmoid',
    +2118            ],
    +2119            'degree': [2, 3, 4],
    +2120            'gamma': ['scale', 'auto'],
    +2121            'coef0': uniform(loc=0, scale=1),
    +2122            'C': uniform(loc=0.1, scale=1.9),
    +2123            'epsilon': uniform(loc=1E8, scale=1),
    +2124            'shrinking': [True, False]
    +2125        },
    +2126        **kwargs
    +2127            ):
    +2128        """
    +2129        Fit x on y via support vector regression
    +2130
    +2131        Parameters
    +2132        ----------
    +2133        name : str, default="Support Vector Regression"
    +2134            Name of classification technique.
    +2135        random_search : bool, default=False
    +2136            Whether to perform RandomizedSearch to optimise parameters
    +2137        parameters : dict[\
    +2138                str,\
    +2139                Union[\
    +2140                    scipy.stats.rv_continuous,\
    +2141                    List[Union[int, str, float]]\
    +2142                ]\
    +2143            ], default=Preset distributions
    +2144            The parameters used in RandomizedSearchCV
    +2145        """
    +2146        if random_search:
    +2147            classifier = RandomizedSearchCV(
    +2148                svm.SVR(**kwargs),
    +2149                parameters,
    +2150                cv=self.folds
    +2151            )
    +2152        else:
    +2153            classifier = svm.SVR(**kwargs)
    +2154        self._sklearn_regression_meta(
    +2155            classifier,
    +2156            f'{name}{" (Random Search)" if random_search else ""}',
    +2157            random_search=random_search
    +2158        )
     
    -

    Fit x on y using support vector regression

    +

    Fit x on y via support vector regression

    Parameters
    • name (str, default="Support Vector Regression"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4594,39 +8484,74 @@
    Parameters
    def - linear_svr(self, name: str = 'Linear Support Vector Regression', **kwargs): + linear_svr( self, name: str = 'Linear Support Vector Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'C': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc86d50>, 'epsilon': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc87590>, 'loss': ['epsilon_insensitive', 'squared_epsilon_insensitive']}, **kwargs):
    -
    1065    def linear_svr(
    -1066            self,
    -1067            name: str = "Linear Support Vector Regression",
    -1068            **kwargs
    -1069            ):
    -1070        """
    -1071        Fit x on y using linear support vector regression
    -1072
    -1073        Parameters
    -1074        ----------
    -1075        name : str, default="Linear Support Vector Regression"
    -1076            Name of classification technique.
    -1077        """
    -1078        self._sklearn_regression_meta(
    -1079                svm.LinearSVR(**kwargs),
    -1080                name
    -1081                )
    +            
    2160    def linear_svr(
    +2161        self,
    +2162        name: str = "Linear Support Vector Regression",
    +2163        random_search: bool = False,
    +2164        parameters: dict[
    +2165            str,
    +2166            Union[
    +2167                scipy.stats.rv_continuous,
    +2168                List[Union[int, str, float]]
    +2169            ]
    +2170        ] = {
    +2171            'C': uniform(loc=0.1, scale=1.9),
    +2172            'epsilon': uniform(loc=1E8, scale=1),
    +2173            'loss': ['epsilon_insensitive', 'squared_epsilon_insensitive']
    +2174        },
    +2175        **kwargs
    +2176            ):
    +2177        """
    +2178        Fit x on y via linear support vector regression
    +2179
    +2180        Parameters
    +2181        ----------
    +2182        name : str, default="Linear Support Vector Regression"
    +2183            Name of classification technique.
    +2184        random_search : bool, default=False
    +2185            Whether to perform RandomizedSearch to optimise parameters
    +2186        parameters : dict[\
    +2187                str,\
    +2188                Union[\
    +2189                    scipy.stats.rv_continuous,\
    +2190                    List[Union[int, str, float]]\
    +2191                ]\
    +2192            ], default=Preset distributions
    +2193            The parameters used in RandomizedSearchCV
    +2194        """
    +2195        if random_search:
    +2196            classifier = RandomizedSearchCV(
    +2197                svm.LinearSVR(**kwargs),
    +2198                parameters,
    +2199                cv=self.folds
    +2200            )
    +2201        else:
    +2202            classifier = svm.LinearSVR(**kwargs)
    +2203        self._sklearn_regression_meta(
    +2204            classifier,
    +2205            f'{name}{" (Random Search)" if random_search else ""}',
    +2206            random_search=random_search
    +2207        )
     
    -

    Fit x on y using linear support vector regression

    +

    Fit x on y via linear support vector regression

    Parameters
    • name (str, default="Linear Support Vector Regression"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4637,38 +8562,82 @@
    Parameters
    def - nu_svr(self, name: str = 'Nu-Support Vector Regression', **kwargs): + nu_svr( self, name: str = 'Nu-Support Vector Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'kernel': ['linear', 'poly', 'rbf', 'sigmoid'], 'degree': [2, 3, 4], 'gamma': ['scale', 'auto'], 'coef0': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc87cd0>, 'shrinking': [True, False], 'nu': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc9c610>}, **kwargs):
    -
    1083    def nu_svr(self, name: str = "Nu-Support Vector Regression", **kwargs):
    -1084        """
    -1085        Fit x on y using nu-support vector regression
    -1086
    -1087        Parameters
    -1088        ----------
    -1089        name : str, default="Nu-Support Vector Regression"
    -1090            Name of classification technique.
    -1091        -Support Vector
    -1092            Regression
    -1093        """
    -1094        self._sklearn_regression_meta(
    -1095                svm.LinearSVR(**kwargs),
    -1096                name
    -1097                )
    +            
    2209    def nu_svr(
    +2210        self,
    +2211        name: str = "Nu-Support Vector Regression",
    +2212        random_search: bool = False,
    +2213        parameters: dict[
    +2214            str,
    +2215            Union[
    +2216                scipy.stats.rv_continuous,
    +2217                List[Union[int, str, float]]
    +2218            ]
    +2219        ] = {
    +2220            'kernel': [
    +2221                'linear',
    +2222                'poly',
    +2223                'rbf',
    +2224                'sigmoid',
    +2225            ],
    +2226            'degree': [2, 3, 4],
    +2227            'gamma': ['scale', 'auto'],
    +2228            'coef0': uniform(loc=0, scale=1),
    +2229            'shrinking': [True, False],
    +2230            'nu': uniform(loc=0, scale=1),
    +2231        },
    +2232        **kwargs
    +2233            ):
    +2234        """
    +2235        Fit x on y via nu-support vector regression
    +2236
    +2237        Parameters
    +2238        ----------
    +2239        name : str, default="Nu-Support Vector Regression"
    +2240            Name of classification technique.
    +2241        random_search : bool, default=False
    +2242            Whether to perform RandomizedSearch to optimise parameters
    +2243        parameters : dict[\
    +2244                str,\
    +2245                Union[\
    +2246                    scipy.stats.rv_continuous,\
    +2247                    List[Union[int, str, float]]\
    +2248                ]\
    +2249            ], default=Preset distributions
    +2250            The parameters used in RandomizedSearchCV
    +2251        """
    +2252        if random_search:
    +2253            classifier = RandomizedSearchCV(
    +2254                svm.NuSVR(**kwargs),
    +2255                parameters,
    +2256                cv=self.folds
    +2257            )
    +2258        else:
    +2259            classifier = svm.NuSVR(**kwargs)
    +2260        self._sklearn_regression_meta(
    +2261            classifier,
    +2262            f'{name}{" (Random Search)" if random_search else ""}',
    +2263            random_search=random_search
    +2264        )
     
    -

    Fit x on y using nu-support vector regression

    +

    Fit x on y via nu-support vector regression

    Parameters
    • name (str, default="Nu-Support Vector Regression"): Name of classification technique.
    • -
    • -Support Vector: Regression
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4679,78 +8648,82 @@
    Parameters
    def - gaussian_process(self, name: str = 'Gaussian Process Regression', **kwargs): + gaussian_process( self, name: str = 'Gaussian Process Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'kernel': [None, <class 'sklearn.gaussian_process.kernels.RBF'>, <class 'sklearn.gaussian_process.kernels.Matern'>, <class 'sklearn.gaussian_process.kernels.DotProduct'>, <class 'sklearn.gaussian_process.kernels.WhiteKernel'>, <class 'sklearn.gaussian_process.kernels.CompoundKernel'>, <class 'sklearn.gaussian_process.kernels.ExpSineSquared'>], 'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc9cd90>, 'normalize_y': [True, False]}, **kwargs):
    -
    1099    def gaussian_process(
    -1100            self,
    -1101            name: str = "Gaussian Process Regression",
    -1102            **kwargs
    -1103            ):
    -1104        """
    -1105        Fit x on y using gaussian process regression
    -1106
    -1107        Parameters
    -1108        ----------
    -1109        name : str, default="Gaussian Process Regression"
    -1110            Name of classification technique.
    -1111        """
    -1112        self._sklearn_regression_meta(
    -1113                gp.GaussianProcessRegressor(**kwargs),
    -1114                name
    -1115                )
    +            
    2266    def gaussian_process(
    +2267        self,
    +2268        name: str = "Gaussian Process Regression",
    +2269        random_search: bool = False,
    +2270        parameters: dict[
    +2271            str,
    +2272            Union[
    +2273                scipy.stats.rv_continuous,
    +2274                List[Union[int, str, float]]
    +2275            ]
    +2276        ] = {
    +2277            'kernel': [
    +2278                None,
    +2279                kern.RBF,
    +2280                kern.Matern,
    +2281                kern.DotProduct,
    +2282                kern.WhiteKernel,
    +2283                kern.CompoundKernel,
    +2284                kern.ExpSineSquared
    +2285            ],
    +2286            'alpha': uniform(loc=0, scale=1E8),
    +2287            'normalize_y': [True, False]
    +2288        },
    +2289        **kwargs
    +2290            ):
    +2291        """
    +2292        Fit x on y via gaussian process regression
    +2293
    +2294        Parameters
    +2295        ----------
    +2296        name : str, default="Gaussian Process Regression"
    +2297            Name of classification technique.
    +2298        random_search : bool, default=False
    +2299            Whether to perform RandomizedSearch to optimise parameters
    +2300        parameters : dict[\
    +2301                str,\
    +2302                Union[\
    +2303                    scipy.stats.rv_continuous,\
    +2304                    List[Union[int, str, float]]\
    +2305                ]\
    +2306            ], default=Preset distributions
    +2307            The parameters used in RandomizedSearchCV
    +2308        """
    +2309        if random_search:
    +2310            classifier = RandomizedSearchCV(
    +2311                gp.GaussianProcessRegressor(**kwargs),
    +2312                parameters,
    +2313                cv=self.folds
    +2314            )
    +2315        else:
    +2316            classifier = gp.GaussianProcessRegressor(**kwargs)
    +2317        self._sklearn_regression_meta(
    +2318            classifier,
    +2319            f'{name}{" (Random Search)" if random_search else ""}',
    +2320            random_search=random_search
    +2321        )
     
    -

    Fit x on y using gaussian process regression

    +

    Fit x on y via gaussian process regression

    Parameters
    • name (str, default="Gaussian Process Regression"): Name of classification technique.
    • -
    -
    - - -
    -
    - -
    - - def - pls(self, name: str = 'PLS Regression', **kwargs): - - - -
    - -
    1117    def pls(self, name: str = "PLS Regression", **kwargs):
    -1118        """
    -1119        Fit x on y using pls regression
    -1120
    -1121        Parameters
    -1122        ----------
    -1123        name : str, default="PLS Regression"
    -1124            Name of classification technique.
    -1125        """
    -1126        self._sklearn_regression_meta(
    -1127                cd.PLSRegression(n_components=1, **kwargs),
    -1128                name
    -1129                )
    -
    - - -

    Fit x on y using pls regression

    - -
    Parameters
    - -
      -
    • name (str, default="PLS Regression"): -Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4761,36 +8734,73 @@
    Parameters
    def - isotonic(self, name: str = 'Isotonic Regression', **kwargs): + isotonic( self, name: str = 'Isotonic Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'increasing': [True, False]}, **kwargs):
    -
    1131    def isotonic(self, name: str = "Isotonic Regression", **kwargs):
    -1132        """
    -1133        Fit x on y using isotonic regression
    -1134
    -1135        Parameters
    -1136        ----------
    -1137        name : str, default="Isotonic Regression"
    -1138            Name of classification technique.
    -1139        """
    -1140        self._sklearn_regression_meta(
    -1141                iso.IsotonicRegression(**kwargs),
    -1142                name,
    -1143                max_coeffs=1
    -1144                )
    +            
    2323    def isotonic(
    +2324        self,
    +2325        name: str = "Isotonic Regression",
    +2326        random_search: bool = False,
    +2327        parameters: dict[
    +2328            str,
    +2329            Union[
    +2330                scipy.stats.rv_continuous,
    +2331                List[Union[int, str, float]]
    +2332            ]
    +2333        ] = {
    +2334            'increasing': [True, False]
    +2335        },
    +2336        **kwargs
    +2337            ):
    +2338        """
    +2339        Fit x on y via isotonic regression
    +2340
    +2341        Parameters
    +2342        ----------
    +2343        name : str, default="Isotonic Regression"
    +2344            Name of classification technique.
    +2345        random_search : bool, default=False
    +2346            Whether to perform RandomizedSearch to optimise parameters
    +2347        parameters : dict[\
    +2348                str,\
    +2349                Union[\
    +2350                    scipy.stats.rv_continuous,\
    +2351                    List[Union[int, str, float]]\
    +2352                ]\
    +2353            ], default=Preset distributions
    +2354            The parameters used in RandomizedSearchCV
    +2355        """
    +2356        if random_search:
    +2357            classifier = RandomizedSearchCV(
    +2358                iso.IsotonicRegression(**kwargs),
    +2359                parameters,
    +2360                cv=self.folds
    +2361            )
    +2362        else:
    +2363            classifier = iso.IsotonicRegression(**kwargs)
    +2364        self._sklearn_regression_meta(
    +2365            classifier,
    +2366            f'{name}{" (Random Search)" if random_search else ""}',
    +2367            random_search=random_search,
    +2368            max_coeffs=1
    +2369        )
     
    -

    Fit x on y using isotonic regression

    +

    Fit x on y via isotonic regression

    Parameters
    • name (str, default="Isotonic Regression"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4801,35 +8811,83 @@
    Parameters
    def - xgboost(self, name: str = 'XGBoost Regression', **kwargs): + xgboost( self, name: str = 'XGBoost Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], 'max_bins': [1, 3, 7, 15, 31, 63, 127, 255], 'grow_policy': ['depthwise', 'lossguide'], 'learning_rate': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc9d9d0>, 'tree_method': ['exact', 'approx', 'hist'], 'gamma': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc9e050>, 'subsample': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc9e7d0>, 'reg_alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc9ef10>, 'reg_lambda': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc9f650>}, **kwargs):
    -
    1146    def xgboost(self, name: str = "XGBoost Regression", **kwargs):
    -1147        """
    -1148        Fit x on y using xgboost regression
    -1149
    -1150        Parameters
    -1151        ----------
    -1152        name : str, default="XGBoost Regression"
    -1153            Name of classification technique.
    -1154        """
    -1155        self._sklearn_regression_meta(
    -1156                xgb.XGBRegressor(**kwargs),
    -1157                name
    -1158                )
    +            
    2371    def xgboost(
    +2372        self,
    +2373        name: str = "XGBoost Regression",
    +2374        random_search: bool = False,
    +2375        parameters: dict[
    +2376            str,
    +2377            Union[
    +2378                scipy.stats.rv_continuous,
    +2379                List[Union[int, str, float]]
    +2380            ]
    +2381        ] = {
    +2382            'n_estimators': [5, 10, 25, 50, 100, 200, 250,  500],
    +2383            'max_bins': [1, 3, 7, 15, 31, 63, 127, 255],
    +2384            'grow_policy': [
    +2385                'depthwise',
    +2386                'lossguide'
    +2387            ],
    +2388            'learning_rate': uniform(loc=0, scale=2),
    +2389            'tree_method': ['exact', 'approx', 'hist'],
    +2390            'gamma': uniform(loc=0, scale=1),
    +2391            'subsample': uniform(loc=0, scale=1),
    +2392            'reg_alpha': uniform(loc=0, scale=1),
    +2393            'reg_lambda': uniform(loc=0, scale=1)
    +2394        },
    +2395        **kwargs
    +2396            ):
    +2397        """
    +2398        Fit x on y via xgboost regression
    +2399
    +2400        Parameters
    +2401        ----------
    +2402        name : str, default="XGBoost Regression"
    +2403            Name of classification technique.
    +2404        random_search : bool, default=False
    +2405            Whether to perform RandomizedSearch to optimise parameters
    +2406        parameters : dict[\
    +2407                str,\
    +2408                Union[\
    +2409                    scipy.stats.rv_continuous,\
    +2410                    List[Union[int, str, float]]\
    +2411                ]\
    +2412            ], default=Preset distributions
    +2413            The parameters used in RandomizedSearchCV
    +2414        """
    +2415        if random_search:
    +2416            classifier = RandomizedSearchCV(
    +2417                xgb.XGBRegressor(**kwargs),
    +2418                parameters,
    +2419                cv=self.folds
    +2420            )
    +2421        else:
    +2422            classifier = xgb.XGBRegressor(**kwargs)
    +2423        self._sklearn_regression_meta(
    +2424            classifier,
    +2425            f'{name}{" (Random Search)" if random_search else ""}',
    +2426            random_search=random_search
    +2427        )
     
    -

    Fit x on y using xgboost regression

    +

    Fit x on y via xgboost regression

    Parameters
    • name (str, default="XGBoost Regression"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4840,39 +8898,83 @@
    Parameters
    def - xgboost_rf(self, name: str = 'XGBoost Random Forest Regression', **kwargs): + xgboost_rf( self, name: str = 'XGBoost Random Forest Regression', random_search: bool = False, parameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], 'max_bin': [1, 3, 7, 15, 31, 63, 127, 255], 'grow_policy': ['depthwise', 'lossguide'], 'learning_rate': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc9ff90>, 'tree_method': ['exact', 'approx', 'hist'], 'gamma': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbca0710>, 'subsample': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbca0e90>, 'reg_alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbca15d0>, 'reg_lambda': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbca1d10>}, **kwargs):
    -
    1160    def xgboost_rf(
    -1161            self,
    -1162            name: str = "XGBoost Random Forest Regression",
    -1163            **kwargs
    -1164            ):
    -1165        """
    -1166        Fit x on y using xgboosted random forest regression
    -1167
    -1168        Parameters
    -1169        ----------
    -1170        name : str, default="XGBoost Random Forest Regression"
    -1171            Name of classification technique.
    -1172        """
    -1173        self._sklearn_regression_meta(
    -1174                xgb.XGBRFRegressor(**kwargs),
    -1175                name
    -1176                )
    +            
    2429    def xgboost_rf(
    +2430        self,
    +2431        name: str = "XGBoost Random Forest Regression",
    +2432        random_search: bool = False,
    +2433        parameters: dict[
    +2434            str,
    +2435            Union[
    +2436                scipy.stats.rv_continuous,
    +2437                List[Union[int, str, float]]
    +2438            ]
    +2439        ] = {
    +2440            'n_estimators': [5, 10, 25, 50, 100, 200, 250,  500],
    +2441            'max_bin': [1, 3, 7, 15, 31, 63, 127, 255],
    +2442            'grow_policy': [
    +2443                'depthwise',
    +2444                'lossguide'
    +2445            ],
    +2446            'learning_rate': uniform(loc=0, scale=2),
    +2447            'tree_method': ['exact', 'approx', 'hist'],
    +2448            'gamma': uniform(loc=0, scale=1),
    +2449            'subsample': uniform(loc=0, scale=1),
    +2450            'reg_alpha': uniform(loc=0, scale=1),
    +2451            'reg_lambda': uniform(loc=0, scale=1)
    +2452        },
    +2453        **kwargs
    +2454            ):
    +2455        """
    +2456        Fit x on y via xgboosted random forest regression
    +2457
    +2458        Parameters
    +2459        ----------
    +2460        name : str, default="XGBoost Random Forest Regression"
    +2461            Name of classification technique.
    +2462        random_search : bool, default=False
    +2463            Whether to perform RandomizedSearch to optimise parameters
    +2464        parameters : dict[\
    +2465                str,\
    +2466                Union[\
    +2467                    scipy.stats.rv_continuous,\
    +2468                    List[Union[int, str, float]]\
    +2469                ]\
    +2470            ], default=Preset distributions
    +2471            The parameters used in RandomizedSearchCV
    +2472        """
    +2473        if random_search:
    +2474            classifier = RandomizedSearchCV(
    +2475                xgb.XGBRFRegressor(**kwargs),
    +2476                parameters,
    +2477                cv=self.folds
    +2478            )
    +2479        else:
    +2480            classifier = xgb.XGBRFRegressor(**kwargs)
    +2481        self._sklearn_regression_meta(
    +2482            classifier,
    +2483            f'{name}{" (Random Search)" if random_search else ""}',
    +2484            random_search=random_search
    +2485        )
     
    -

    Fit x on y using xgboosted random forest regression

    +

    Fit x on y via xgboosted random forest regression

    Parameters
    • name (str, default="XGBoost Random Forest Regression"): Name of classification technique.
    • +
    • random_search (bool, default=False): +Whether to perform RandomizedSearch to optimise parameters
    • +
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions): +The parameters used in RandomizedSearchCV
    @@ -4889,26 +8991,26 @@
    Parameters
    -
    1178    def return_measurements(self) -> dict[str, pd.DataFrame]:
    -1179        """
    -1180        Returns the measurements used, with missing values and
    -1181        non-overlapping measurements excluded
    -1182
    -1183        Returns
    -1184        -------
    -1185        dict[str, pd.DataFrame]
    -1186            Dictionary with 2 keys:
    -1187
    -1188            |Key|Value|
    -1189            |---|---|
    -1190            |x|`x_data`|
    -1191            |y|`y_data`|
    -1192
    -1193        """
    -1194        return {
    -1195                'x': self.x_data,
    -1196                'y': self.y_data
    -1197                }
    +            
    2487    def return_measurements(self) -> dict[str, pd.DataFrame]:
    +2488        """
    +2489        Returns the measurements used, with missing values and
    +2490        non-overlapping measurements excluded
    +2491
    +2492        Returns
    +2493        -------
    +2494        dict[str, pd.DataFrame]
    +2495            Dictionary with 2 keys:
    +2496
    +2497            |Key|Value|
    +2498            |---|---|
    +2499            |x|`x_data`|
    +2500            |y|`y_data`|
    +2501
    +2502        """
    +2503        return {
    +2504                'x': self.x_data,
    +2505                'y': self.y_data
    +2506                }
     
    @@ -4954,29 +9056,29 @@
    Returns
    -
    1199    def return_models(self) -> dict[str,  # Technique
    -1200                                    dict[str,  # Scaling method
    -1201                                         dict[str,  # Variables used
    -1202                                              dict[int,  # Fold
    -1203                                                   Pipeline]]]]:
    -1204        """
    -1205        Returns the models stored in the object
    -1206
    -1207        Returns
    -1208        -------
    -1209        dict[str, str, str, int, Pipeline]
    -1210            The calibrated models. They are stored in a nested structure as
    -1211            follows:
    -1212            1. Primary Key, name of the technique (e.g Lasso Regression).
    -1213            2. Scaling technique (e.g Yeo-Johnson Transform).
    -1214            3. Combination of variables used or `target` if calibration is
    -1215            univariate (e.g "`target` + a + b).
    -1216            4. Fold, which fold was used excluded from the calibration. If data
    -1217            folds 0-3.
    -1218            if 5-fold cross validated, a key of 4 indicates the data was
    -1219            trained on
    -1220        """
    -1221        return self.models
    +            
    2508    def return_models(self) -> dict[str,  # Technique
    +2509                                    dict[str,  # Scaling method
    +2510                                         dict[str,  # Variables used
    +2511                                              dict[int,  # Fold
    +2512                                                   Pipeline]]]]:
    +2513        """
    +2514        Returns the models stored in the object
    +2515
    +2516        Returns
    +2517        -------
    +2518        dict[str, str, str, int, Pipeline]
    +2519            The calibrated models. They are stored in a nested structure as
    +2520            follows:
    +2521            1. Primary Key, name of the technique (e.g Lasso Regression).
    +2522            2. Scaling technique (e.g Yeo-Johnson Transform).
    +2523            3. Combination of variables used or `target` if calibration is
    +2524            univariate (e.g "`target` + a + b).
    +2525            4. Fold, which fold was used excluded from the calibration. If data
    +2526            folds 0-3.
    +2527            if 5-fold cross validated, a key of 4 indicates the data was
    +2528            trained on
    +2529        """
    +2530        return self.models
     
    diff --git a/docs/calidhayte/graphs.html b/docs/calidhayte/graphs.html index 3683410..ac0df79 100644 --- a/docs/calidhayte/graphs.html +++ b/docs/calidhayte/graphs.html @@ -113,6 +113,9 @@

    API Documentation

  • lin_reg_plot
  • +
  • + shap +
  • save_plots
  • @@ -131,6 +134,12 @@

    API Documentation

  • ecdf_plot
  • +
  • + shap_plot +
  • +
  • + get_shap +
  • @@ -157,383 +166,506 @@

    3from typing import Callable, Literal, Optional, Union 4 5from matplotlib import get_backend - 6import matplotlib.pyplot as plt - 7import numpy as np - 8import pandas as pd - 9import shap - 10from sklearn.pipeline import Pipeline - 11 + 6import matplotlib.figure + 7import matplotlib.pyplot as plt + 8import numpy as np + 9import pandas as pd + 10import shap + 11from sklearn.pipeline import Pipeline 12 - 13class Graphs: - 14 """ - 15 Calculates errors between "true" and "predicted" measurements, plots - 16 graphs and returns all results - 17 """ - 18 - 19 def __init__( - 20 self, - 21 x: pd.DataFrame, - 22 x_name: str, - 23 y: pd.DataFrame, - 24 y_name: str, - 25 target: str, - 26 models: dict[str, dict[str, dict[str, dict[int, Pipeline]]]], - 27 style: str = 'bmh', - 28 backend: str = str(get_backend()) - 29 ): - 30 """ - 31 """ - 32 self.x: pd.DataFrame = x - 33 """ - 34 Independent variable(s) that are calibrated against `y`, the independent - 35 variable. Index should match `y`. - 36 """ - 37 self.y: pd.DataFrame = y - 38 """ - 39 Dependent variable used to calibrate the independent variables `x`. - 40 Index should match `x`. - 41 """ - 42 self.x_name: str = x_name - 43 """ - 44 Label for `x` measurements - 45 """ - 46 self.y_name: str = y_name - 47 """ - 48 Label for `y` measurements - 49 """ - 50 self.target = target - 51 """ - 52 Measurand in `y` to calibrate against - 53 """ - 54 self.models: dict[str, - 55 dict[str, # Scaling Method - 56 dict[str, # Variables used - 57 dict[int, # Fold - 58 Pipeline]]]] = models - 59 """ - 60 The precalibrated models. They are stored in a nested structure as - 61 follows: - 62 1. Primary Key, name of the technique (e.g Lasso Regression). - 63 2. Scaling technique (e.g Yeo-Johnson Transform). - 64 3. Combination of variables used or `target` if calibration is - 65 univariate (e.g "`target` + a + b). - 66 4. Fold, which fold was used excluded from the calibration. If data - 67 if 5-fold cross validated, a key of 4 indicates the data was trained on - 68 folds 0-3. - 69 - 70 ```mermaid - 71 stateDiagram-v2 - 72 models --> Technique - 73 state Technique { - 74 [*] --> Scaling - 75 [*]: The calibration technique used - 76 [*]: (e.g "Lasso Regression") - 77 state Scaling { - 78 [*] --> Variables - 79 [*]: The scaling technique used - 80 [*]: (e.g "Yeo-Johnson Transform") - 81 state Variables { - 82 [*] : The combination of variables used - 83 [*] : (e.g "x + a + b") - 84 [*] --> Fold - 85 state Fold { - 86 [*] : Which fold was excluded from training data - 87 [*] : (e.g 4 indicates folds 0-3 were used to train) - 88 } - 89 } - 90 } - 91 } - 92 ``` - 93 - 94 """ - 95 self.plots: dict[str, # Technique - 96 dict[str, # Scaling Method - 97 dict[str, # Variables used - 98 dict[str, # Plot Name - 99 plt.figure.Figure]]]] = dict() -100 """ -101 The plotted data, stored in a similar structure to `models` -102 1. Primary Key, name of the technique (e.g Lasso Regression). -103 2. Scaling technique (e.g Yeo-Johnson Transform). -104 3. Combination of variables used or `target` if calibration is -105 univariate (e.g "`target` + a + b). -106 4. Name of the plot (e.g. 'Bland-Altman') -107 -108 ```mermaid -109 stateDiagram-v2 -110 models --> Technique -111 state Technique { -112 [*] --> Scaling -113 [*]: The calibration technique used -114 [*]: (e.g "Lasso Regression") -115 state Scaling { -116 [*] --> Variables -117 [*]: The scaling technique used -118 [*]: (e.g "Yeo-Johnson Transform") -119 state Variables { -120 [*] : The combination of variables used -121 [*] : (e.g "x + a + b") -122 [*] --> pn -123 state "Plot Name" as pn { -124 [*] : Name of the plot -125 [*] : (e.g Bland-Altman) -126 } -127 } -128 } -129 } -130 ``` -131 -132 """ -133 self.style: Union[str, Path] = style -134 """ -135 Name of in-built matplotlib style or path to stylesheet -136 """ -137 self.backend = backend -138 """ -139 Matplotlib backend to use -140 """ -141 -142 def plot_meta(self, plot_func: Callable, name: str, **kwargs): -143 """ -144 Iterates over data and creates plots using function specified in -145 `plot_func` -146 -147 Should not be accessed directly, should instead be called by -148 another method -149 -150 Parameters -151 ---------- -152 plot_func : Callable -153 Function that returns matplotlib figure -154 name : str -155 Name to give plot, used as key in `plots` dict -156 **kwargs -157 Additional arguments passed to `plot_func` -158 """ -159 if not self.x.sort_index().index.to_series().eq( -160 self.y.sort_index().index.to_series() -161 ).all(): -162 raise ValueError( -163 'Index of x and y do not match. Output of Calibrate class ' -164 'in calidhayte should have matching indexes' -165 ) -166 for technique, scaling_methods in self.models.items(): -167 if self.plots.get(technique) is None: -168 self.plots[technique] = dict() -169 for scaling_method, var_combos in scaling_methods.items(): -170 if self.plots[technique].get(scaling_method) is None: -171 self.plots[technique][scaling_method] = dict() -172 for vars, folds in var_combos.items(): -173 if self.plots[technique][scaling_method].get(vars) is None: -174 self.plots[technique][scaling_method][vars] = dict() -175 pred = pd.Series() -176 for fold, model in folds.items(): -177 x_data = self.x.loc[ -178 self.y[self.y.loc[:, 'Fold'] == fold].index, -179 : -180 ] -181 pred = pd.concat( -182 [ -183 pred, -184 pd.Series( -185 index=x_data.index, -186 data=model.predict(x_data) -187 ) -188 ] -189 ) -190 x = pred -191 y = self.y.loc[:, self.target].reindex(x.index) -192 fig = plot_func( -193 x=x, -194 y=y, -195 x_name=self.x_name, -196 y_name=self.y_name, -197 **kwargs + 13 + 14class Graphs: + 15 """ + 16 Calculates errors between "true" and "predicted" measurements, plots + 17 graphs and returns all results + 18 """ + 19 + 20 def __init__( + 21 self, + 22 x: pd.DataFrame, + 23 x_name: str, + 24 y: pd.DataFrame, + 25 y_name: str, + 26 target: str, + 27 models: dict[str, dict[str, dict[str, dict[int, Pipeline]]]], + 28 style: str = 'bmh', + 29 backend: str = str(get_backend()) + 30 ): + 31 """ + 32 """ + 33 self.x: pd.DataFrame = x + 34 """ + 35 Independent variable(s) that are calibrated against `y`, + 36 the independent variable. Index should match `y`. + 37 """ + 38 self.y: pd.DataFrame = y + 39 """ + 40 Dependent variable used to calibrate the independent variables `x`. + 41 Index should match `x`. + 42 """ + 43 self.x_name: str = x_name + 44 """ + 45 Label for `x` measurements + 46 """ + 47 self.y_name: str = y_name + 48 """ + 49 Label for `y` measurements + 50 """ + 51 self.target = target + 52 """ + 53 Measurand in `y` to calibrate against + 54 """ + 55 self.models: dict[ + 56 str, dict[ # Scaling Method + 57 str, dict[ # Variables used + 58 str, dict[ # Fold + 59 int, Pipeline]]]] = models + 60 """ + 61 The precalibrated models. They are stored in a nested structure as + 62 follows: + 63 1. Primary Key, name of the technique (e.g Lasso Regression). + 64 2. Scaling technique (e.g Yeo-Johnson Transform). + 65 3. Combination of variables used or `target` if calibration is + 66 univariate (e.g "`target` + a + b). + 67 4. Fold, which fold was used excluded from the calibration. If data + 68 if 5-fold cross validated, a key of 4 indicates the data was trained on + 69 folds 0-3. + 70 + 71 ```mermaid + 72 stateDiagram-v2 + 73 models --> Technique + 74 state Technique { + 75 [*] --> Scaling + 76 [*]: The calibration technique used + 77 [*]: (e.g "Lasso Regression") + 78 state Scaling { + 79 [*] --> Variables + 80 [*]: The scaling technique used + 81 [*]: (e.g "Yeo-Johnson Transform") + 82 state Variables { + 83 [*] : The combination of variables used + 84 [*] : (e.g "x + a + b") + 85 [*] --> Fold + 86 state Fold { + 87 [*] : Which fold was excluded from training data + 88 [*] : (e.g 4 indicates folds 0-3 were used to train) + 89 } + 90 } + 91 } + 92 } + 93 ``` + 94 + 95 """ + 96 self.plots: dict[str, # Technique + 97 dict[str, # Scaling Method + 98 dict[str, # Variables used + 99 dict[str, # Plot Name +100 matplotlib.figure.Figure]]]] = dict() +101 """ +102 The plotted data, stored in a similar structure to `models` +103 1. Primary Key, name of the technique (e.g Lasso Regression). +104 2. Scaling technique (e.g Yeo-Johnson Transform). +105 3. Combination of variables used or `target` if calibration is +106 univariate (e.g "`target` + a + b). +107 4. Name of the plot (e.g. 'Bland-Altman') +108 +109 ```mermaid +110 stateDiagram-v2 +111 models --> Technique +112 state Technique { +113 [*] --> Scaling +114 [*]: The calibration technique used +115 [*]: (e.g "Lasso Regression") +116 state Scaling { +117 [*] --> Variables +118 [*]: The scaling technique used +119 [*]: (e.g "Yeo-Johnson Transform") +120 state Variables { +121 [*] : The combination of variables used +122 [*] : (e.g "x + a + b") +123 [*] --> pn +124 state "Plot Name" as pn { +125 [*] : Name of the plot +126 [*] : (e.g Bland-Altman) +127 } +128 } +129 } +130 } +131 ``` +132 +133 """ +134 self.style: Union[str, Path] = style +135 """ +136 Name of in-built matplotlib style or path to stylesheet +137 """ +138 self.backend = backend +139 """ +140 Matplotlib backend to use +141 """ +142 +143 def plot_meta( +144 self, +145 plot_func: Callable[ +146 ..., +147 matplotlib.figure.Figure +148 ], +149 name: str, +150 **kwargs +151 ): +152 """ +153 Iterates over data and creates plots using function specified in +154 `plot_func` +155 +156 Should not be accessed directly, should instead be called by +157 another method +158 +159 Parameters +160 ---------- +161 plot_func : Callable +162 Function that returns matplotlib figure +163 name : str +164 Name to give plot, used as key in `plots` dict +165 **kwargs +166 Additional arguments passed to `plot_func` +167 """ +168 if not self.x.sort_index().index.to_series().eq( +169 self.y.sort_index().index.to_series() +170 ).all(): +171 raise ValueError( +172 'Index of x and y do not match. Output of Calibrate class ' +173 'in calidhayte should have matching indexes' +174 ) +175 for technique, scaling_methods in self.models.items(): +176 if self.plots.get(technique) is None: +177 self.plots[technique] = dict() +178 for scaling_method, var_combos in scaling_methods.items(): +179 if self.plots[technique].get(scaling_method) is None: +180 self.plots[technique][scaling_method] = dict() +181 for vars, folds in var_combos.items(): +182 if self.plots[technique][scaling_method].get(vars) is None: +183 self.plots[technique][scaling_method][vars] = dict() +184 pred = pd.Series() +185 for fold, model in folds.items(): +186 x_data = self.x.loc[ +187 self.y[self.y.loc[:, 'Fold'] == fold].index, +188 : +189 ] +190 pred = pd.concat( +191 [ +192 pred, +193 pd.Series( +194 index=x_data.index, +195 data=model.predict(x_data) +196 ) +197 ] 198 ) -199 self.plots[technique][scaling_method][vars][name] = fig -200 -201 def bland_altman_plot(self, title=None): -202 with plt.rc_context({'backend': self.backend}), \ -203 plt.style.context(self.style): -204 self.plot_meta(bland_altman_plot, 'Bland-Altman', title=title) -205 -206 def ecdf_plot(self, title=None): -207 with plt.rc_context({'backend': self.backend}), \ -208 plt.style.context(self.style): -209 self.plot_meta(ecdf_plot, 'eCDF', title=title) -210 -211 def lin_reg_plot(self, title=None): -212 with plt.rc_context({'backend': self.backend}), \ -213 plt.style.context(self.style): -214 self.plot_meta(lin_reg_plot, 'Linear Regression', title=title) -215 -216 def save_plots( -217 self, -218 path: str, -219 filetype: Union[ -220 Literal['png', 'pgf', 'pdf'], -221 Iterable[Literal['png', 'pgf', 'pdf']] -222 ] = 'png' -223 ): -224 for technique, scaling_methods in self.plots.items(): -225 for scaling_method, var_combos in scaling_methods.items(): -226 for vars, figures in var_combos.items(): -227 for plot_type, fig in figures.items(): -228 plot_path = Path( -229 f'{path}/{technique}/{plot_type}' -230 ) -231 plot_path.mkdir(parents=True, exist_ok=True) -232 if isinstance(filetype, str): -233 fig.savefig( -234 plot_path / -235 f'{scaling_method} {vars}.{filetype}' -236 ) -237 elif isinstance(filetype, Iterable): -238 for ftype in filetype: -239 fig.savefig( -240 plot_path / -241 f'{scaling_method} {vars}.{ftype}' -242 ) -243 plt.close(fig) -244 -245 -246def ecdf(data): -247 x = np.sort(data) -248 y = np.arange(1, len(data) + 1) / len(data) -249 return x, y -250 -251 -252def lin_reg_plot( -253 x: pd.Series, -254 y: pd.Series, -255 x_name: str, -256 y_name: str, -257 title: Optional[str] = None -258 ): -259 """ -260 """ -261 fig = plt.figure(figsize=(4, 4), dpi=200) -262 fig_gs = fig.add_gridspec( -263 2, -264 2, -265 width_ratios=(7, 2), -266 height_ratios=(2, 7), -267 left=0.1, -268 right=0.9, -269 bottom=0.1, -270 top=0.9, -271 wspace=0.0, -272 hspace=0.0, -273 ) -274 -275 scatter_ax = fig.add_subplot(fig_gs[1, 0]) -276 histx_ax = fig.add_subplot(fig_gs[0, 0], sharex=scatter_ax) -277 histx_ax.axis("off") -278 histy_ax = fig.add_subplot(fig_gs[1, 1], sharey=scatter_ax) -279 histy_ax.axis("off") -280 -281 max_value = max((y.max(), x.max())) -282 min_value = min((y.min(), x.min())) -283 scatter_ax.set_xlim(min_value - 3, max_value + 3) -284 scatter_ax.set_ylim(min_value - 3, max_value + 3) -285 scatter_ax.set_xlabel(x_name) -286 scatter_ax.set_ylabel(y_name) -287 scatter_ax.scatter(x, y, color="C0", marker='.', alpha=0.75) -288 -289 binwidth = 7.5 -290 xymax = max(np.max(np.abs(x)), np.max(np.abs(y))) -291 lim = (int(xymax / binwidth) + 1) * binwidth -292 -293 bins = np.arange(-lim, lim + binwidth, binwidth) -294 histx_ax.hist(x, bins=bins, color="C0") -295 histy_ax.hist(y, bins=bins, orientation="horizontal", color="C0") -296 if isinstance(title, str): -297 fig.suptitle(title) -298 return fig -299 -300 -301def bland_altman_plot( -302 x: pd.DataFrame, -303 y: pd.Series, -304 title: Optional[str] = None, -305 **kwargs -306 ): -307 """ -308 """ -309 fig, ax = plt.subplots(figsize=(4, 4), dpi=200) -310 x_data = np.mean(np.vstack((x, y)).T, axis=1) -311 y_data = np.array(x) - np.array(y) -312 y_mean = np.mean(y_data) -313 y_sd = 1.96 * np.std(y_data) -314 max_diff_from_mean = max( -315 (y_data - y_mean).min(), (y_data - y_mean).max(), key=abs -316 ) -317 text_adjust = (12 * max_diff_from_mean) / 300 -318 ax.set_ylim(y_mean - max_diff_from_mean, y_mean + max_diff_from_mean) -319 ax.set_xlabel("Average of Measured and Reference") -320 ax.set_ylabel("Difference Between Measured and Reference") -321 ax.scatter(x_data, y_data, alpha=0.75) -322 ax.axline((0, y_mean), (1, y_mean), color="xkcd:vermillion") -323 ax.text( -324 max(x_data), -325 y_mean + text_adjust, -326 f"Mean: {y_mean:.2f}", -327 verticalalignment="bottom", -328 horizontalalignment="right", -329 ) -330 ax.axline( -331 (0, y_mean + y_sd), (1, y_mean + y_sd), color="xkcd:fresh green" -332 ) -333 ax.text( -334 max(x_data), -335 y_mean + y_sd + text_adjust, -336 f"1.96$\\sigma$: {y_mean + y_sd:.2f}", -337 verticalalignment="bottom", -338 horizontalalignment="right", -339 ) -340 ax.axline( -341 (0, y_mean - y_sd), (1, y_mean - y_sd), color="xkcd:fresh green" -342 ) -343 ax.text( -344 max(x_data), -345 y_mean - y_sd + text_adjust, -346 f"1.96$\\sigma$: -{y_sd:.2f}", -347 verticalalignment="bottom", -348 horizontalalignment="right", -349 ) -350 if isinstance(title, str): -351 fig.suptitle(title) -352 return fig -353 -354 -355def ecdf_plot( -356 x: pd.DataFrame, -357 y: pd.Series, -358 x_name: str, -359 y_name: str, -360 title: Optional[str] = None -361 ): -362 """ -363 """ -364 fig, ax = plt.subplots(figsize=(4, 4), dpi=200) -365 true_x, true_y = ecdf(y) -366 pred_x, pred_y = ecdf(x) -367 ax.set_ylim(0, 1) -368 ax.set_xlabel("Measurement") -369 ax.set_ylabel("Cumulative Total") -370 ax.plot(true_x, true_y, linestyle="none", marker=".", label=y_name) -371 ax.plot( -372 pred_x, -373 pred_y, -374 linestyle="none", -375 marker=".", -376 alpha=0.8, -377 label=x_name, -378 ) -379 ax.legend() -380 if isinstance(title, str): -381 fig.suptitle(title) -382 return fig +199 x = pred +200 y = self.y.loc[:, self.target].reindex(x.index) +201 fig = plot_func( +202 x=x, +203 y=y, +204 x_name=self.x_name, +205 y_name=self.y_name, +206 **kwargs +207 ) +208 self.plots[technique][scaling_method][vars][name] = fig +209 +210 def bland_altman_plot(self, title=None): +211 with plt.rc_context({'backend': self.backend}), \ +212 plt.style.context(self.style): +213 self.plot_meta(bland_altman_plot, 'Bland-Altman', title=title) +214 +215 def ecdf_plot(self, title=None): +216 with plt.rc_context({'backend': self.backend}), \ +217 plt.style.context(self.style): +218 self.plot_meta(ecdf_plot, 'eCDF', title=title) +219 +220 def lin_reg_plot(self, title=None): +221 with plt.rc_context({'backend': self.backend}), \ +222 plt.style.context(self.style): +223 self.plot_meta(lin_reg_plot, 'Linear Regression', title=title) +224 +225 def shap(self, pipeline_keys: list[str], title=None): +226 x = self.x +227 y = self.y +228 pipeline = self.models[pipeline_keys[0]][pipeline_keys[1]][pipeline_keys[2]] +229 +230 if not self.plots.get(pipeline_keys[0]): +231 self.plots[pipeline_keys[0]] = dict() +232 if not self.plots[pipeline_keys[0]].get(pipeline_keys[1]): +233 self.plots[pipeline_keys[0]][pipeline_keys[1]] = dict() +234 if not self.plots[pipeline_keys[0]][pipeline_keys[1]].get(pipeline_keys[2]): +235 self.plots[pipeline_keys[0]][pipeline_keys[1]][pipeline_keys[2]] = dict() +236 with plt.rc_context({'backend': self.backend}), \ +237 plt.style.context(self.style): +238 shap_df = get_shap(x, y, pipeline) +239 self.plots[pipeline_keys[0]][pipeline_keys[1]][pipeline_keys[2]]['Shap'] = shap_plot(shap_df, x) +240 +241 +242 +243 def save_plots( +244 self, +245 path: str, +246 filetype: Union[ +247 Literal['png', 'pgf', 'pdf'], +248 Iterable[Literal['png', 'pgf', 'pdf']] +249 ] = 'png' +250 ): +251 for technique, scaling_methods in self.plots.items(): +252 for scaling_method, var_combos in scaling_methods.items(): +253 for vars, figures in var_combos.items(): +254 for plot_type, fig in figures.items(): +255 plot_path = Path( +256 f'{path}/{technique}/{plot_type}' +257 ) +258 plot_path.mkdir(parents=True, exist_ok=True) +259 if isinstance(filetype, str): +260 fig.savefig( +261 plot_path / +262 f'{scaling_method} {vars}.{filetype}' +263 ) +264 elif isinstance(filetype, Iterable): +265 for ftype in filetype: +266 fig.savefig( +267 plot_path / +268 f'{scaling_method} {vars}.{ftype}' +269 ) +270 plt.close(fig) +271 +272 +273def ecdf(data): +274 x = np.sort(data) +275 y = np.arange(1, len(data) + 1) / len(data) +276 return x, y +277 +278 +279def lin_reg_plot( +280 x: pd.Series, +281 y: pd.Series, +282 x_name: str, +283 y_name: str, +284 title: Optional[str] = None +285 ): +286 """ +287 """ +288 fig = plt.figure(figsize=(4, 4), dpi=200) +289 fig_gs = fig.add_gridspec( +290 2, +291 2, +292 width_ratios=(7, 2), +293 height_ratios=(2, 7), +294 left=0.1, +295 right=0.9, +296 bottom=0.1, +297 top=0.9, +298 wspace=0.0, +299 hspace=0.0, +300 ) +301 +302 scatter_ax = fig.add_subplot(fig_gs[1, 0]) +303 histx_ax = fig.add_subplot(fig_gs[0, 0], sharex=scatter_ax) +304 histx_ax.axis("off") +305 histy_ax = fig.add_subplot(fig_gs[1, 1], sharey=scatter_ax) +306 histy_ax.axis("off") +307 +308 max_value = max((y.max(), x.max())) +309 min_value = min((y.min(), x.min())) +310 scatter_ax.set_xlim(min_value - 3, max_value + 3) +311 scatter_ax.set_ylim(min_value - 3, max_value + 3) +312 scatter_ax.set_xlabel(x_name) +313 scatter_ax.set_ylabel(y_name) +314 scatter_ax.scatter(x, y, color="C0", marker='.', alpha=0.75) +315 +316 binwidth = 7.5 +317 xymax = max(np.max(np.abs(x)), np.max(np.abs(y))) +318 lim = (int(xymax / binwidth) + 1) * binwidth +319 +320 bins = list(np.arange(-lim, lim + binwidth, binwidth)) +321 histx_ax.hist(x, bins=bins, color="C0") +322 histy_ax.hist(y, bins=bins, orientation="horizontal", color="C0") +323 if isinstance(title, str): +324 fig.suptitle(title) +325 return fig +326 +327 +328def bland_altman_plot( +329 x: pd.DataFrame, +330 y: pd.Series, +331 title: Optional[str] = None, +332 **kwargs +333 ): +334 """ +335 """ +336 fig, ax = plt.subplots(figsize=(4, 4), dpi=200) +337 x_data = np.mean(np.vstack((x, y)).T, axis=1) +338 y_data = np.array(x) - np.array(y) +339 y_mean = np.mean(y_data) +340 y_sd = 1.96 * np.std(y_data) +341 max_diff_from_mean = max( +342 (y_data - y_mean).min(), (y_data - y_mean).max(), key=abs +343 ) +344 text_adjust = (12 * max_diff_from_mean) / 300 +345 ax.set_ylim(y_mean - max_diff_from_mean, y_mean + max_diff_from_mean) +346 ax.set_xlabel("Average of Measured and Reference") +347 ax.set_ylabel("Difference Between Measured and Reference") +348 ax.scatter(x_data, y_data, alpha=0.75) +349 ax.axline((0, y_mean), (1, y_mean), color="xkcd:vermillion") +350 ax.text( +351 max(x_data), +352 y_mean + text_adjust, +353 f"Mean: {y_mean:.2f}", +354 verticalalignment="bottom", +355 horizontalalignment="right", +356 ) +357 ax.axline( +358 (0, y_mean + y_sd), (1, y_mean + y_sd), color="xkcd:fresh green" +359 ) +360 ax.text( +361 max(x_data), +362 y_mean + y_sd + text_adjust, +363 f"1.96$\\sigma$: {y_mean + y_sd:.2f}", +364 verticalalignment="bottom", +365 horizontalalignment="right", +366 ) +367 ax.axline( +368 (0, y_mean - y_sd), (1, y_mean - y_sd), color="xkcd:fresh green" +369 ) +370 ax.text( +371 max(x_data), +372 y_mean - y_sd + text_adjust, +373 f"1.96$\\sigma$: -{y_sd:.2f}", +374 verticalalignment="bottom", +375 horizontalalignment="right", +376 ) +377 if isinstance(title, str): +378 fig.suptitle(title) +379 return fig +380 +381 +382def ecdf_plot( +383 x: pd.DataFrame, +384 y: pd.Series, +385 x_name: str, +386 y_name: str, +387 title: Optional[str] = None +388 ): +389 """ +390 """ +391 fig, ax = plt.subplots(figsize=(4, 4), dpi=200) +392 true_x, true_y = ecdf(y) +393 pred_x, pred_y = ecdf(x) +394 ax.set_ylim(0, 1) +395 ax.set_xlabel("Measurement") +396 ax.set_ylabel("Cumulative Total") +397 ax.plot(true_x, true_y, linestyle="none", marker=".", label=y_name) +398 ax.plot( +399 pred_x, +400 pred_y, +401 linestyle="none", +402 marker=".", +403 alpha=0.8, +404 label=x_name, +405 ) +406 ax.legend() +407 if isinstance(title, str): +408 fig.suptitle(title) +409 return fig +410 +411def shap_plot(shaps: pd.DataFrame, x: pd.DataFrame): +412 """ +413 """ +414 shaps_min = shaps.drop(['Fold'], axis=1).min(axis=None) +415 shaps_max = shaps.drop(['Fold'], axis=1).max(axis=None) +416 shaps_range = shaps_max - shaps_min +417 shaps_lims = ( +418 shaps_min - (shaps_range * 0.1), +419 shaps_max + (shaps_range * 0.1) +420 ) +421 +422 num_of_cols = shaps.drop(['Fold'], axis=1).shape[1] +423 +424 shape_of_scatters = ( +425 int(np.ceil(num_of_cols / 2)), +426 (min(2, int(num_of_cols))) +427 ) +428 +429 fig, ax = plt.subplots( +430 *shape_of_scatters, +431 figsize=( +432 4 * shape_of_scatters[0], +433 4 * shape_of_scatters[1] +434 ), +435 dpi=200 +436 ) +437 +438 for col_ind, col in enumerate(shaps.drop(['Fold'], axis=1).columns): +439 scatter_data = pd.concat( +440 [ +441 x.loc[:, col].rename('Value'), +442 shaps.loc[:, col].rename('Shap'), +443 shaps.loc[:, 'Fold'].rename('Fold') +444 ], +445 axis=1 +446 ) +447 x_min = scatter_data.loc[:, 'Value'].min() +448 x_max = scatter_data.loc[:, 'Value'].max() +449 x_range = x_max - x_min +450 x_lims = (x_min - (x_range * 0.1), x_max + (x_range * 0.1)) +451 +452 row_num = int(np.floor(col_ind / 2)) +453 col_num = col_ind % 2 +454 for i, fold in enumerate(sorted(shaps.loc[:, 'Fold'].unique())): +455 scat_fold = scatter_data[scatter_data.loc[:, 'Fold'] == fold] +456 ax[row_num, col_num].scatter( +457 scat_fold['Value'], +458 scat_fold['Shap'], +459 c=f'C{i}', +460 label=f'Fold {fold}', +461 marker='.' +462 ) +463 ax[row_num, col_num].set_title(col) +464 ax[row_num, col_num].set_xlabel('Value') +465 ax[row_num, col_num].set_xlim(x_lims) +466 ax[row_num, col_num].set_ylabel('Shap') +467 ax[row_num, col_num].set_ylim(shaps_lims) +468 +469 ax[0, 0].legend(loc='best') +470 plt.tight_layout() +471 return fig +472 +473def get_shap( +474 x: pd.DataFrame, +475 y: pd.DataFrame, +476 pipeline: dict[int, Pipeline] +477 ): +478 shaps = pd.DataFrame() +479 for fold in pipeline.keys(): +480 if len(pipeline.keys()) > 1: +481 fold_index = y[y.loc[:, 'Fold'] == fold].index +482 x_data = x.loc[fold_index, :] +483 else: +484 x_data = x +485 explainer = shap.KernelExplainer( +486 model=pipeline[fold][-1].predict, +487 data=x_data, +488 link='identity' +489 ) +490 shaps = pd.concat( +491 [ +492 shaps, +493 pd.DataFrame( +494 explainer.shap_values(x_data), +495 index=x_data.index, +496 columns=x_data.columns +497 ) +498 ] +499 ) +500 if len(pipeline.keys()) > 1: +501 shaps.loc[x_data.index, 'Fold'] = y.loc[x_data.index, 'Fold'] +502 else: +503 shaps.loc[:, 'Fold'] = 'Cross-Validated' +504 shaps = shaps.sort_index() +505 return shaps

    @@ -549,237 +681,263 @@

    -
     14class Graphs:
    - 15    """
    - 16    Calculates errors between "true" and "predicted" measurements, plots
    - 17    graphs and returns all results
    - 18    """
    - 19
    - 20    def __init__(
    - 21        self,
    - 22        x: pd.DataFrame,
    - 23        x_name: str,
    - 24        y: pd.DataFrame,
    - 25        y_name: str,
    - 26        target: str,
    - 27        models: dict[str, dict[str, dict[str, dict[int, Pipeline]]]],
    - 28        style: str = 'bmh',
    - 29        backend: str = str(get_backend())
    - 30    ):
    - 31        """
    - 32        """
    - 33        self.x: pd.DataFrame = x
    - 34        """
    - 35        Independent variable(s) that are calibrated against `y`, the independent
    - 36        variable. Index should match `y`.
    - 37        """
    - 38        self.y: pd.DataFrame = y
    - 39        """
    - 40        Dependent variable used to calibrate the independent variables `x`.
    - 41        Index should match `x`.
    - 42        """
    - 43        self.x_name: str = x_name
    - 44        """
    - 45        Label for `x` measurements
    - 46        """
    - 47        self.y_name: str = y_name
    - 48        """
    - 49        Label for `y` measurements
    - 50        """
    - 51        self.target = target
    - 52        """
    - 53        Measurand in `y` to calibrate against
    - 54        """
    - 55        self.models: dict[str,
    - 56                         dict[str,  # Scaling Method
    - 57                              dict[str,  # Variables used
    - 58                                   dict[int,  # Fold
    - 59                                        Pipeline]]]] = models
    - 60        """
    - 61        The precalibrated models. They are stored in a nested structure as
    - 62        follows:
    - 63        1. Primary Key, name of the technique (e.g Lasso Regression).
    - 64        2. Scaling technique (e.g Yeo-Johnson Transform).
    - 65        3. Combination of variables used or `target` if calibration is
    - 66        univariate (e.g "`target` + a + b).
    - 67        4. Fold, which fold was used excluded from the calibration. If data
    - 68        if 5-fold cross validated, a key of 4 indicates the data was trained on
    - 69        folds 0-3.
    - 70
    - 71        ```mermaid
    - 72            stateDiagram-v2
    - 73              models --> Technique
    - 74              state Technique {
    - 75                [*] --> Scaling
    - 76                [*]: The calibration technique used
    - 77                [*]: (e.g "Lasso Regression")
    - 78                state Scaling {
    - 79                  [*] --> Variables
    - 80                  [*]: The scaling technique used
    - 81                  [*]: (e.g "Yeo-Johnson Transform")
    - 82                  state Variables {
    - 83                    [*] : The combination of variables used
    - 84                    [*] : (e.g "x + a + b")
    - 85                    [*] --> Fold
    - 86                    state Fold {
    - 87                     [*] : Which fold was excluded from training data
    - 88                     [*] : (e.g 4 indicates folds 0-3 were used to train)
    - 89                    }
    - 90                  }
    - 91                }
    - 92              }
    - 93        ```
    - 94
    - 95        """
    - 96        self.plots: dict[str,  # Technique
    - 97                         dict[str,  # Scaling Method
    - 98                              dict[str,  # Variables used
    - 99                                   dict[str,  # Plot Name
    -100                                        plt.figure.Figure]]]] = dict()
    -101        """
    -102        The plotted data, stored in a similar structure to `models`
    -103        1. Primary Key, name of the technique (e.g Lasso Regression).
    -104        2. Scaling technique (e.g Yeo-Johnson Transform).
    -105        3. Combination of variables used or `target` if calibration is
    -106        univariate (e.g "`target` + a + b).
    -107        4. Name of the plot (e.g. 'Bland-Altman')
    -108
    -109        ```mermaid
    -110            stateDiagram-v2
    -111              models --> Technique
    -112              state Technique {
    -113                [*] --> Scaling
    -114                [*]: The calibration technique used
    -115                [*]: (e.g "Lasso Regression")
    -116                state Scaling {
    -117                  [*] --> Variables
    -118                  [*]: The scaling technique used
    -119                  [*]: (e.g "Yeo-Johnson Transform")
    -120                  state Variables {
    -121                    [*] : The combination of variables used
    -122                    [*] : (e.g "x + a + b")
    -123                    [*] --> pn
    -124                    state "Plot Name" as pn {
    -125                     [*] : Name of the plot
    -126                     [*] : (e.g Bland-Altman)
    -127                    }
    -128                  }
    -129                }
    -130              }
    -131        ```
    -132
    -133        """
    -134        self.style: Union[str, Path] = style
    -135        """
    -136        Name of in-built matplotlib style or path to stylesheet
    -137        """
    -138        self.backend = backend
    -139        """
    -140        Matplotlib backend to use
    -141        """
    -142
    -143    def plot_meta(self, plot_func: Callable, name: str, **kwargs):
    -144        """
    -145        Iterates over data and creates plots using function specified in
    -146        `plot_func`
    -147
    -148        Should not be accessed directly, should instead be called by
    -149        another method
    -150
    -151        Parameters
    -152        ----------
    -153        plot_func : Callable
    -154            Function that returns matplotlib figure
    -155        name : str
    -156            Name to give plot, used as key in `plots` dict
    -157        **kwargs
    -158            Additional arguments passed to `plot_func`  
    -159        """
    -160        if not self.x.sort_index().index.to_series().eq(
    -161            self.y.sort_index().index.to_series()
    -162        ).all():
    -163            raise ValueError(
    -164                'Index of x and y do not match. Output of Calibrate class '
    -165                'in calidhayte should have matching indexes'
    -166            )
    -167        for technique, scaling_methods in self.models.items():
    -168            if self.plots.get(technique) is None:
    -169                self.plots[technique] = dict()
    -170            for scaling_method, var_combos in scaling_methods.items():
    -171                if self.plots[technique].get(scaling_method) is None:
    -172                    self.plots[technique][scaling_method] = dict()
    -173                for vars, folds in var_combos.items():
    -174                    if self.plots[technique][scaling_method].get(vars) is None:
    -175                        self.plots[technique][scaling_method][vars] = dict()
    -176                    pred = pd.Series()
    -177                    for fold, model in folds.items():
    -178                        x_data = self.x.loc[
    -179                                self.y[self.y.loc[:, 'Fold'] == fold].index,
    -180                                :
    -181                                ]
    -182                        pred = pd.concat(
    -183                                [
    -184                                    pred,
    -185                                    pd.Series(
    -186                                        index=x_data.index,
    -187                                        data=model.predict(x_data)
    -188                                        )
    -189                                ]
    -190                            )
    -191                    x = pred
    -192                    y = self.y.loc[:, self.target].reindex(x.index)
    -193                    fig = plot_func(
    -194                            x=x,
    -195                            y=y,
    -196                            x_name=self.x_name,
    -197                            y_name=self.y_name,
    -198                            **kwargs
    +            
     15class Graphs:
    + 16    """
    + 17    Calculates errors between "true" and "predicted" measurements, plots
    + 18    graphs and returns all results
    + 19    """
    + 20
    + 21    def __init__(
    + 22        self,
    + 23        x: pd.DataFrame,
    + 24        x_name: str,
    + 25        y: pd.DataFrame,
    + 26        y_name: str,
    + 27        target: str,
    + 28        models: dict[str, dict[str, dict[str, dict[int, Pipeline]]]],
    + 29        style: str = 'bmh',
    + 30        backend: str = str(get_backend())
    + 31    ):
    + 32        """
    + 33        """
    + 34        self.x: pd.DataFrame = x
    + 35        """
    + 36        Independent variable(s) that are calibrated against `y`,
    + 37        the independent variable. Index should match `y`.
    + 38        """
    + 39        self.y: pd.DataFrame = y
    + 40        """
    + 41        Dependent variable used to calibrate the independent variables `x`.
    + 42        Index should match `x`.
    + 43        """
    + 44        self.x_name: str = x_name
    + 45        """
    + 46        Label for `x` measurements
    + 47        """
    + 48        self.y_name: str = y_name
    + 49        """
    + 50        Label for `y` measurements
    + 51        """
    + 52        self.target = target
    + 53        """
    + 54        Measurand in `y` to calibrate against
    + 55        """
    + 56        self.models: dict[
    + 57            str, dict[  # Scaling Method
    + 58                str, dict[  # Variables used
    + 59                    str, dict[  # Fold
    + 60                        int, Pipeline]]]] = models
    + 61        """
    + 62        The precalibrated models. They are stored in a nested structure as
    + 63        follows:
    + 64        1. Primary Key, name of the technique (e.g Lasso Regression).
    + 65        2. Scaling technique (e.g Yeo-Johnson Transform).
    + 66        3. Combination of variables used or `target` if calibration is
    + 67        univariate (e.g "`target` + a + b).
    + 68        4. Fold, which fold was used excluded from the calibration. If data
    + 69        if 5-fold cross validated, a key of 4 indicates the data was trained on
    + 70        folds 0-3.
    + 71
    + 72        ```mermaid
    + 73            stateDiagram-v2
    + 74              models --> Technique
    + 75              state Technique {
    + 76                [*] --> Scaling
    + 77                [*]: The calibration technique used
    + 78                [*]: (e.g "Lasso Regression")
    + 79                state Scaling {
    + 80                  [*] --> Variables
    + 81                  [*]: The scaling technique used
    + 82                  [*]: (e.g "Yeo-Johnson Transform")
    + 83                  state Variables {
    + 84                    [*] : The combination of variables used
    + 85                    [*] : (e.g "x + a + b")
    + 86                    [*] --> Fold
    + 87                    state Fold {
    + 88                     [*] : Which fold was excluded from training data
    + 89                     [*] : (e.g 4 indicates folds 0-3 were used to train)
    + 90                    }
    + 91                  }
    + 92                }
    + 93              }
    + 94        ```
    + 95
    + 96        """
    + 97        self.plots: dict[str,  # Technique
    + 98                         dict[str,  # Scaling Method
    + 99                              dict[str,  # Variables used
    +100                                   dict[str,  # Plot Name
    +101                                        matplotlib.figure.Figure]]]] = dict()
    +102        """
    +103        The plotted data, stored in a similar structure to `models`
    +104        1. Primary Key, name of the technique (e.g Lasso Regression).
    +105        2. Scaling technique (e.g Yeo-Johnson Transform).
    +106        3. Combination of variables used or `target` if calibration is
    +107        univariate (e.g "`target` + a + b).
    +108        4. Name of the plot (e.g. 'Bland-Altman')
    +109
    +110        ```mermaid
    +111            stateDiagram-v2
    +112              models --> Technique
    +113              state Technique {
    +114                [*] --> Scaling
    +115                [*]: The calibration technique used
    +116                [*]: (e.g "Lasso Regression")
    +117                state Scaling {
    +118                  [*] --> Variables
    +119                  [*]: The scaling technique used
    +120                  [*]: (e.g "Yeo-Johnson Transform")
    +121                  state Variables {
    +122                    [*] : The combination of variables used
    +123                    [*] : (e.g "x + a + b")
    +124                    [*] --> pn
    +125                    state "Plot Name" as pn {
    +126                     [*] : Name of the plot
    +127                     [*] : (e.g Bland-Altman)
    +128                    }
    +129                  }
    +130                }
    +131              }
    +132        ```
    +133
    +134        """
    +135        self.style: Union[str, Path] = style
    +136        """
    +137        Name of in-built matplotlib style or path to stylesheet
    +138        """
    +139        self.backend = backend
    +140        """
    +141        Matplotlib backend to use
    +142        """
    +143
    +144    def plot_meta(
    +145        self,
    +146        plot_func: Callable[
    +147            ...,
    +148            matplotlib.figure.Figure
    +149        ],
    +150        name: str,
    +151        **kwargs
    +152    ):
    +153        """
    +154        Iterates over data and creates plots using function specified in
    +155        `plot_func`
    +156
    +157        Should not be accessed directly, should instead be called by
    +158        another method
    +159
    +160        Parameters
    +161        ----------
    +162        plot_func : Callable
    +163            Function that returns matplotlib figure
    +164        name : str
    +165            Name to give plot, used as key in `plots` dict
    +166        **kwargs
    +167            Additional arguments passed to `plot_func`
    +168        """
    +169        if not self.x.sort_index().index.to_series().eq(
    +170            self.y.sort_index().index.to_series()
    +171        ).all():
    +172            raise ValueError(
    +173                'Index of x and y do not match. Output of Calibrate class '
    +174                'in calidhayte should have matching indexes'
    +175            )
    +176        for technique, scaling_methods in self.models.items():
    +177            if self.plots.get(technique) is None:
    +178                self.plots[technique] = dict()
    +179            for scaling_method, var_combos in scaling_methods.items():
    +180                if self.plots[technique].get(scaling_method) is None:
    +181                    self.plots[technique][scaling_method] = dict()
    +182                for vars, folds in var_combos.items():
    +183                    if self.plots[technique][scaling_method].get(vars) is None:
    +184                        self.plots[technique][scaling_method][vars] = dict()
    +185                    pred = pd.Series()
    +186                    for fold, model in folds.items():
    +187                        x_data = self.x.loc[
    +188                                self.y[self.y.loc[:, 'Fold'] == fold].index,
    +189                                :
    +190                                ]
    +191                        pred = pd.concat(
    +192                                [
    +193                                    pred,
    +194                                    pd.Series(
    +195                                        index=x_data.index,
    +196                                        data=model.predict(x_data)
    +197                                        )
    +198                                ]
     199                            )
    -200                    self.plots[technique][scaling_method][vars][name] = fig
    -201
    -202    def bland_altman_plot(self, title=None):
    -203        with plt.rc_context({'backend': self.backend}), \
    -204                plt.style.context(self.style):
    -205            self.plot_meta(bland_altman_plot, 'Bland-Altman', title=title)
    -206
    -207    def ecdf_plot(self, title=None):
    -208        with plt.rc_context({'backend': self.backend}), \
    -209                plt.style.context(self.style):
    -210            self.plot_meta(ecdf_plot, 'eCDF', title=title)
    -211
    -212    def lin_reg_plot(self, title=None):
    -213        with plt.rc_context({'backend': self.backend}), \
    -214                plt.style.context(self.style):
    -215            self.plot_meta(lin_reg_plot, 'Linear Regression', title=title)
    -216
    -217    def save_plots(
    -218        self,
    -219        path: str,
    -220        filetype: Union[
    -221           Literal['png', 'pgf', 'pdf'],
    -222           Iterable[Literal['png', 'pgf', 'pdf']]
    -223            ] = 'png'
    -224    ):
    -225        for technique, scaling_methods in self.plots.items():
    -226            for scaling_method, var_combos in scaling_methods.items():
    -227                for vars, figures in var_combos.items():
    -228                    for plot_type, fig in figures.items():
    -229                        plot_path = Path(
    -230                                f'{path}/{technique}/{plot_type}'
    -231                                )
    -232                        plot_path.mkdir(parents=True, exist_ok=True)
    -233                        if isinstance(filetype, str):
    -234                            fig.savefig(
    -235                                plot_path /
    -236                                f'{scaling_method} {vars}.{filetype}'
    -237                            )
    -238                        elif isinstance(filetype, Iterable):
    -239                            for ftype in filetype:
    -240                                fig.savefig(
    -241                                    plot_path /
    -242                                    f'{scaling_method} {vars}.{ftype}'
    -243                                )
    -244                        plt.close(fig)
    +200                    x = pred
    +201                    y = self.y.loc[:, self.target].reindex(x.index)
    +202                    fig = plot_func(
    +203                            x=x,
    +204                            y=y,
    +205                            x_name=self.x_name,
    +206                            y_name=self.y_name,
    +207                            **kwargs
    +208                            )
    +209                    self.plots[technique][scaling_method][vars][name] = fig
    +210
    +211    def bland_altman_plot(self, title=None):
    +212        with plt.rc_context({'backend': self.backend}), \
    +213                plt.style.context(self.style):
    +214            self.plot_meta(bland_altman_plot, 'Bland-Altman', title=title)
    +215
    +216    def ecdf_plot(self, title=None):
    +217        with plt.rc_context({'backend': self.backend}), \
    +218                plt.style.context(self.style):
    +219            self.plot_meta(ecdf_plot, 'eCDF', title=title)
    +220
    +221    def lin_reg_plot(self, title=None):
    +222        with plt.rc_context({'backend': self.backend}), \
    +223                plt.style.context(self.style):
    +224            self.plot_meta(lin_reg_plot, 'Linear Regression', title=title)
    +225
    +226    def shap(self, pipeline_keys: list[str], title=None):
    +227        x = self.x
    +228        y = self.y
    +229        pipeline = self.models[pipeline_keys[0]][pipeline_keys[1]][pipeline_keys[2]]
    +230        
    +231        if not self.plots.get(pipeline_keys[0]):
    +232            self.plots[pipeline_keys[0]] = dict()
    +233        if not self.plots[pipeline_keys[0]].get(pipeline_keys[1]):
    +234            self.plots[pipeline_keys[0]][pipeline_keys[1]] = dict()
    +235        if not self.plots[pipeline_keys[0]][pipeline_keys[1]].get(pipeline_keys[2]):
    +236            self.plots[pipeline_keys[0]][pipeline_keys[1]][pipeline_keys[2]] = dict()
    +237        with plt.rc_context({'backend': self.backend}), \
    +238                plt.style.context(self.style):
    +239            shap_df = get_shap(x, y, pipeline)
    +240            self.plots[pipeline_keys[0]][pipeline_keys[1]][pipeline_keys[2]]['Shap'] = shap_plot(shap_df, x)
    +241
    +242
    +243
    +244    def save_plots(
    +245        self,
    +246        path: str,
    +247        filetype: Union[
    +248           Literal['png', 'pgf', 'pdf'],
    +249           Iterable[Literal['png', 'pgf', 'pdf']]
    +250            ] = 'png'
    +251    ):
    +252        for technique, scaling_methods in self.plots.items():
    +253            for scaling_method, var_combos in scaling_methods.items():
    +254                for vars, figures in var_combos.items():
    +255                    for plot_type, fig in figures.items():
    +256                        plot_path = Path(
    +257                                f'{path}/{technique}/{plot_type}'
    +258                                )
    +259                        plot_path.mkdir(parents=True, exist_ok=True)
    +260                        if isinstance(filetype, str):
    +261                            fig.savefig(
    +262                                plot_path /
    +263                                f'{scaling_method} {vars}.{filetype}'
    +264                            )
    +265                        elif isinstance(filetype, Iterable):
    +266                            for ftype in filetype:
    +267                                fig.savefig(
    +268                                    plot_path /
    +269                                    f'{scaling_method} {vars}.{ftype}'
    +270                                )
    +271                        plt.close(fig)
     
    @@ -792,134 +950,134 @@

    - Graphs( x: pandas.core.frame.DataFrame, x_name: str, y: pandas.core.frame.DataFrame, y_name: str, target: str, models: dict[str, dict[str, dict[str, dict[int, sklearn.pipeline.Pipeline]]]], style: str = 'bmh', backend: str = 'agg') + Graphs( x: pandas.core.frame.DataFrame, x_name: str, y: pandas.core.frame.DataFrame, y_name: str, target: str, models: dict[str, dict[str, dict[str, dict[int, sklearn.pipeline.Pipeline]]]], style: str = 'bmh', backend: str = 'TkAgg')
    -
     20    def __init__(
    - 21        self,
    - 22        x: pd.DataFrame,
    - 23        x_name: str,
    - 24        y: pd.DataFrame,
    - 25        y_name: str,
    - 26        target: str,
    - 27        models: dict[str, dict[str, dict[str, dict[int, Pipeline]]]],
    - 28        style: str = 'bmh',
    - 29        backend: str = str(get_backend())
    - 30    ):
    - 31        """
    - 32        """
    - 33        self.x: pd.DataFrame = x
    - 34        """
    - 35        Independent variable(s) that are calibrated against `y`, the independent
    - 36        variable. Index should match `y`.
    - 37        """
    - 38        self.y: pd.DataFrame = y
    - 39        """
    - 40        Dependent variable used to calibrate the independent variables `x`.
    - 41        Index should match `x`.
    - 42        """
    - 43        self.x_name: str = x_name
    - 44        """
    - 45        Label for `x` measurements
    - 46        """
    - 47        self.y_name: str = y_name
    - 48        """
    - 49        Label for `y` measurements
    - 50        """
    - 51        self.target = target
    - 52        """
    - 53        Measurand in `y` to calibrate against
    - 54        """
    - 55        self.models: dict[str,
    - 56                         dict[str,  # Scaling Method
    - 57                              dict[str,  # Variables used
    - 58                                   dict[int,  # Fold
    - 59                                        Pipeline]]]] = models
    - 60        """
    - 61        The precalibrated models. They are stored in a nested structure as
    - 62        follows:
    - 63        1. Primary Key, name of the technique (e.g Lasso Regression).
    - 64        2. Scaling technique (e.g Yeo-Johnson Transform).
    - 65        3. Combination of variables used or `target` if calibration is
    - 66        univariate (e.g "`target` + a + b).
    - 67        4. Fold, which fold was used excluded from the calibration. If data
    - 68        if 5-fold cross validated, a key of 4 indicates the data was trained on
    - 69        folds 0-3.
    - 70
    - 71        ```mermaid
    - 72            stateDiagram-v2
    - 73              models --> Technique
    - 74              state Technique {
    - 75                [*] --> Scaling
    - 76                [*]: The calibration technique used
    - 77                [*]: (e.g "Lasso Regression")
    - 78                state Scaling {
    - 79                  [*] --> Variables
    - 80                  [*]: The scaling technique used
    - 81                  [*]: (e.g "Yeo-Johnson Transform")
    - 82                  state Variables {
    - 83                    [*] : The combination of variables used
    - 84                    [*] : (e.g "x + a + b")
    - 85                    [*] --> Fold
    - 86                    state Fold {
    - 87                     [*] : Which fold was excluded from training data
    - 88                     [*] : (e.g 4 indicates folds 0-3 were used to train)
    - 89                    }
    - 90                  }
    - 91                }
    - 92              }
    - 93        ```
    - 94
    - 95        """
    - 96        self.plots: dict[str,  # Technique
    - 97                         dict[str,  # Scaling Method
    - 98                              dict[str,  # Variables used
    - 99                                   dict[str,  # Plot Name
    -100                                        plt.figure.Figure]]]] = dict()
    -101        """
    -102        The plotted data, stored in a similar structure to `models`
    -103        1. Primary Key, name of the technique (e.g Lasso Regression).
    -104        2. Scaling technique (e.g Yeo-Johnson Transform).
    -105        3. Combination of variables used or `target` if calibration is
    -106        univariate (e.g "`target` + a + b).
    -107        4. Name of the plot (e.g. 'Bland-Altman')
    -108
    -109        ```mermaid
    -110            stateDiagram-v2
    -111              models --> Technique
    -112              state Technique {
    -113                [*] --> Scaling
    -114                [*]: The calibration technique used
    -115                [*]: (e.g "Lasso Regression")
    -116                state Scaling {
    -117                  [*] --> Variables
    -118                  [*]: The scaling technique used
    -119                  [*]: (e.g "Yeo-Johnson Transform")
    -120                  state Variables {
    -121                    [*] : The combination of variables used
    -122                    [*] : (e.g "x + a + b")
    -123                    [*] --> pn
    -124                    state "Plot Name" as pn {
    -125                     [*] : Name of the plot
    -126                     [*] : (e.g Bland-Altman)
    -127                    }
    -128                  }
    -129                }
    -130              }
    -131        ```
    -132
    -133        """
    -134        self.style: Union[str, Path] = style
    -135        """
    -136        Name of in-built matplotlib style or path to stylesheet
    -137        """
    -138        self.backend = backend
    -139        """
    -140        Matplotlib backend to use
    -141        """
    +            
     21    def __init__(
    + 22        self,
    + 23        x: pd.DataFrame,
    + 24        x_name: str,
    + 25        y: pd.DataFrame,
    + 26        y_name: str,
    + 27        target: str,
    + 28        models: dict[str, dict[str, dict[str, dict[int, Pipeline]]]],
    + 29        style: str = 'bmh',
    + 30        backend: str = str(get_backend())
    + 31    ):
    + 32        """
    + 33        """
    + 34        self.x: pd.DataFrame = x
    + 35        """
    + 36        Independent variable(s) that are calibrated against `y`,
    + 37        the independent variable. Index should match `y`.
    + 38        """
    + 39        self.y: pd.DataFrame = y
    + 40        """
    + 41        Dependent variable used to calibrate the independent variables `x`.
    + 42        Index should match `x`.
    + 43        """
    + 44        self.x_name: str = x_name
    + 45        """
    + 46        Label for `x` measurements
    + 47        """
    + 48        self.y_name: str = y_name
    + 49        """
    + 50        Label for `y` measurements
    + 51        """
    + 52        self.target = target
    + 53        """
    + 54        Measurand in `y` to calibrate against
    + 55        """
    + 56        self.models: dict[
    + 57            str, dict[  # Scaling Method
    + 58                str, dict[  # Variables used
    + 59                    str, dict[  # Fold
    + 60                        int, Pipeline]]]] = models
    + 61        """
    + 62        The precalibrated models. They are stored in a nested structure as
    + 63        follows:
    + 64        1. Primary Key, name of the technique (e.g Lasso Regression).
    + 65        2. Scaling technique (e.g Yeo-Johnson Transform).
    + 66        3. Combination of variables used or `target` if calibration is
    + 67        univariate (e.g "`target` + a + b).
    + 68        4. Fold, which fold was used excluded from the calibration. If data
    + 69        if 5-fold cross validated, a key of 4 indicates the data was trained on
    + 70        folds 0-3.
    + 71
    + 72        ```mermaid
    + 73            stateDiagram-v2
    + 74              models --> Technique
    + 75              state Technique {
    + 76                [*] --> Scaling
    + 77                [*]: The calibration technique used
    + 78                [*]: (e.g "Lasso Regression")
    + 79                state Scaling {
    + 80                  [*] --> Variables
    + 81                  [*]: The scaling technique used
    + 82                  [*]: (e.g "Yeo-Johnson Transform")
    + 83                  state Variables {
    + 84                    [*] : The combination of variables used
    + 85                    [*] : (e.g "x + a + b")
    + 86                    [*] --> Fold
    + 87                    state Fold {
    + 88                     [*] : Which fold was excluded from training data
    + 89                     [*] : (e.g 4 indicates folds 0-3 were used to train)
    + 90                    }
    + 91                  }
    + 92                }
    + 93              }
    + 94        ```
    + 95
    + 96        """
    + 97        self.plots: dict[str,  # Technique
    + 98                         dict[str,  # Scaling Method
    + 99                              dict[str,  # Variables used
    +100                                   dict[str,  # Plot Name
    +101                                        matplotlib.figure.Figure]]]] = dict()
    +102        """
    +103        The plotted data, stored in a similar structure to `models`
    +104        1. Primary Key, name of the technique (e.g Lasso Regression).
    +105        2. Scaling technique (e.g Yeo-Johnson Transform).
    +106        3. Combination of variables used or `target` if calibration is
    +107        univariate (e.g "`target` + a + b).
    +108        4. Name of the plot (e.g. 'Bland-Altman')
    +109
    +110        ```mermaid
    +111            stateDiagram-v2
    +112              models --> Technique
    +113              state Technique {
    +114                [*] --> Scaling
    +115                [*]: The calibration technique used
    +116                [*]: (e.g "Lasso Regression")
    +117                state Scaling {
    +118                  [*] --> Variables
    +119                  [*]: The scaling technique used
    +120                  [*]: (e.g "Yeo-Johnson Transform")
    +121                  state Variables {
    +122                    [*] : The combination of variables used
    +123                    [*] : (e.g "x + a + b")
    +124                    [*] --> pn
    +125                    state "Plot Name" as pn {
    +126                     [*] : Name of the plot
    +127                     [*] : (e.g Bland-Altman)
    +128                    }
    +129                  }
    +130                }
    +131              }
    +132        ```
    +133
    +134        """
    +135        self.style: Union[str, Path] = style
    +136        """
    +137        Name of in-built matplotlib style or path to stylesheet
    +138        """
    +139        self.backend = backend
    +140        """
    +141        Matplotlib backend to use
    +142        """
     
    @@ -934,8 +1092,8 @@

    -

    Independent variable(s) that are calibrated against y, the independent -variable. Index should match y.

    +

    Independent variable(s) that are calibrated against y, +the independent variable. Index should match y.

    @@ -1042,7 +1200,7 @@

    - plots: 'dict[str, dict[str, dict[str, dict[str, plt.figure.Figure]]]]' + plots: dict[str, dict[str, dict[str, dict[str, matplotlib.figure.Figure]]]]
    @@ -1115,70 +1273,78 @@

    def - plot_meta(self, plot_func: Callable, name: str, **kwargs): + plot_meta( self, plot_func: Callable[..., matplotlib.figure.Figure], name: str, **kwargs):
    -
    143    def plot_meta(self, plot_func: Callable, name: str, **kwargs):
    -144        """
    -145        Iterates over data and creates plots using function specified in
    -146        `plot_func`
    -147
    -148        Should not be accessed directly, should instead be called by
    -149        another method
    -150
    -151        Parameters
    -152        ----------
    -153        plot_func : Callable
    -154            Function that returns matplotlib figure
    -155        name : str
    -156            Name to give plot, used as key in `plots` dict
    -157        **kwargs
    -158            Additional arguments passed to `plot_func`  
    -159        """
    -160        if not self.x.sort_index().index.to_series().eq(
    -161            self.y.sort_index().index.to_series()
    -162        ).all():
    -163            raise ValueError(
    -164                'Index of x and y do not match. Output of Calibrate class '
    -165                'in calidhayte should have matching indexes'
    -166            )
    -167        for technique, scaling_methods in self.models.items():
    -168            if self.plots.get(technique) is None:
    -169                self.plots[technique] = dict()
    -170            for scaling_method, var_combos in scaling_methods.items():
    -171                if self.plots[technique].get(scaling_method) is None:
    -172                    self.plots[technique][scaling_method] = dict()
    -173                for vars, folds in var_combos.items():
    -174                    if self.plots[technique][scaling_method].get(vars) is None:
    -175                        self.plots[technique][scaling_method][vars] = dict()
    -176                    pred = pd.Series()
    -177                    for fold, model in folds.items():
    -178                        x_data = self.x.loc[
    -179                                self.y[self.y.loc[:, 'Fold'] == fold].index,
    -180                                :
    -181                                ]
    -182                        pred = pd.concat(
    -183                                [
    -184                                    pred,
    -185                                    pd.Series(
    -186                                        index=x_data.index,
    -187                                        data=model.predict(x_data)
    -188                                        )
    -189                                ]
    -190                            )
    -191                    x = pred
    -192                    y = self.y.loc[:, self.target].reindex(x.index)
    -193                    fig = plot_func(
    -194                            x=x,
    -195                            y=y,
    -196                            x_name=self.x_name,
    -197                            y_name=self.y_name,
    -198                            **kwargs
    +            
    144    def plot_meta(
    +145        self,
    +146        plot_func: Callable[
    +147            ...,
    +148            matplotlib.figure.Figure
    +149        ],
    +150        name: str,
    +151        **kwargs
    +152    ):
    +153        """
    +154        Iterates over data and creates plots using function specified in
    +155        `plot_func`
    +156
    +157        Should not be accessed directly, should instead be called by
    +158        another method
    +159
    +160        Parameters
    +161        ----------
    +162        plot_func : Callable
    +163            Function that returns matplotlib figure
    +164        name : str
    +165            Name to give plot, used as key in `plots` dict
    +166        **kwargs
    +167            Additional arguments passed to `plot_func`
    +168        """
    +169        if not self.x.sort_index().index.to_series().eq(
    +170            self.y.sort_index().index.to_series()
    +171        ).all():
    +172            raise ValueError(
    +173                'Index of x and y do not match. Output of Calibrate class '
    +174                'in calidhayte should have matching indexes'
    +175            )
    +176        for technique, scaling_methods in self.models.items():
    +177            if self.plots.get(technique) is None:
    +178                self.plots[technique] = dict()
    +179            for scaling_method, var_combos in scaling_methods.items():
    +180                if self.plots[technique].get(scaling_method) is None:
    +181                    self.plots[technique][scaling_method] = dict()
    +182                for vars, folds in var_combos.items():
    +183                    if self.plots[technique][scaling_method].get(vars) is None:
    +184                        self.plots[technique][scaling_method][vars] = dict()
    +185                    pred = pd.Series()
    +186                    for fold, model in folds.items():
    +187                        x_data = self.x.loc[
    +188                                self.y[self.y.loc[:, 'Fold'] == fold].index,
    +189                                :
    +190                                ]
    +191                        pred = pd.concat(
    +192                                [
    +193                                    pred,
    +194                                    pd.Series(
    +195                                        index=x_data.index,
    +196                                        data=model.predict(x_data)
    +197                                        )
    +198                                ]
     199                            )
    -200                    self.plots[technique][scaling_method][vars][name] = fig
    +200                    x = pred
    +201                    y = self.y.loc[:, self.target].reindex(x.index)
    +202                    fig = plot_func(
    +203                            x=x,
    +204                            y=y,
    +205                            x_name=self.x_name,
    +206                            y_name=self.y_name,
    +207                            **kwargs
    +208                            )
    +209                    self.plots[technique][scaling_method][vars][name] = fig
     
    @@ -1212,10 +1378,10 @@
    Parameters
    -
    202    def bland_altman_plot(self, title=None):
    -203        with plt.rc_context({'backend': self.backend}), \
    -204                plt.style.context(self.style):
    -205            self.plot_meta(bland_altman_plot, 'Bland-Altman', title=title)
    +            
    211    def bland_altman_plot(self, title=None):
    +212        with plt.rc_context({'backend': self.backend}), \
    +213                plt.style.context(self.style):
    +214            self.plot_meta(bland_altman_plot, 'Bland-Altman', title=title)
     
    @@ -1233,10 +1399,10 @@
    Parameters
    -
    207    def ecdf_plot(self, title=None):
    -208        with plt.rc_context({'backend': self.backend}), \
    -209                plt.style.context(self.style):
    -210            self.plot_meta(ecdf_plot, 'eCDF', title=title)
    +            
    216    def ecdf_plot(self, title=None):
    +217        with plt.rc_context({'backend': self.backend}), \
    +218                plt.style.context(self.style):
    +219            self.plot_meta(ecdf_plot, 'eCDF', title=title)
     
    @@ -1254,10 +1420,42 @@
    Parameters
    -
    212    def lin_reg_plot(self, title=None):
    -213        with plt.rc_context({'backend': self.backend}), \
    -214                plt.style.context(self.style):
    -215            self.plot_meta(lin_reg_plot, 'Linear Regression', title=title)
    +            
    221    def lin_reg_plot(self, title=None):
    +222        with plt.rc_context({'backend': self.backend}), \
    +223                plt.style.context(self.style):
    +224            self.plot_meta(lin_reg_plot, 'Linear Regression', title=title)
    +
    + + + + +
    +
    + +
    + + def + shap(self, pipeline_keys: list[str], title=None): + + + +
    + +
    226    def shap(self, pipeline_keys: list[str], title=None):
    +227        x = self.x
    +228        y = self.y
    +229        pipeline = self.models[pipeline_keys[0]][pipeline_keys[1]][pipeline_keys[2]]
    +230        
    +231        if not self.plots.get(pipeline_keys[0]):
    +232            self.plots[pipeline_keys[0]] = dict()
    +233        if not self.plots[pipeline_keys[0]].get(pipeline_keys[1]):
    +234            self.plots[pipeline_keys[0]][pipeline_keys[1]] = dict()
    +235        if not self.plots[pipeline_keys[0]][pipeline_keys[1]].get(pipeline_keys[2]):
    +236            self.plots[pipeline_keys[0]][pipeline_keys[1]][pipeline_keys[2]] = dict()
    +237        with plt.rc_context({'backend': self.backend}), \
    +238                plt.style.context(self.style):
    +239            shap_df = get_shap(x, y, pipeline)
    +240            self.plots[pipeline_keys[0]][pipeline_keys[1]][pipeline_keys[2]]['Shap'] = shap_plot(shap_df, x)
     
    @@ -1275,34 +1473,34 @@
    Parameters
    -
    217    def save_plots(
    -218        self,
    -219        path: str,
    -220        filetype: Union[
    -221           Literal['png', 'pgf', 'pdf'],
    -222           Iterable[Literal['png', 'pgf', 'pdf']]
    -223            ] = 'png'
    -224    ):
    -225        for technique, scaling_methods in self.plots.items():
    -226            for scaling_method, var_combos in scaling_methods.items():
    -227                for vars, figures in var_combos.items():
    -228                    for plot_type, fig in figures.items():
    -229                        plot_path = Path(
    -230                                f'{path}/{technique}/{plot_type}'
    -231                                )
    -232                        plot_path.mkdir(parents=True, exist_ok=True)
    -233                        if isinstance(filetype, str):
    -234                            fig.savefig(
    -235                                plot_path /
    -236                                f'{scaling_method} {vars}.{filetype}'
    -237                            )
    -238                        elif isinstance(filetype, Iterable):
    -239                            for ftype in filetype:
    -240                                fig.savefig(
    -241                                    plot_path /
    -242                                    f'{scaling_method} {vars}.{ftype}'
    -243                                )
    -244                        plt.close(fig)
    +            
    244    def save_plots(
    +245        self,
    +246        path: str,
    +247        filetype: Union[
    +248           Literal['png', 'pgf', 'pdf'],
    +249           Iterable[Literal['png', 'pgf', 'pdf']]
    +250            ] = 'png'
    +251    ):
    +252        for technique, scaling_methods in self.plots.items():
    +253            for scaling_method, var_combos in scaling_methods.items():
    +254                for vars, figures in var_combos.items():
    +255                    for plot_type, fig in figures.items():
    +256                        plot_path = Path(
    +257                                f'{path}/{technique}/{plot_type}'
    +258                                )
    +259                        plot_path.mkdir(parents=True, exist_ok=True)
    +260                        if isinstance(filetype, str):
    +261                            fig.savefig(
    +262                                plot_path /
    +263                                f'{scaling_method} {vars}.{filetype}'
    +264                            )
    +265                        elif isinstance(filetype, Iterable):
    +266                            for ftype in filetype:
    +267                                fig.savefig(
    +268                                    plot_path /
    +269                                    f'{scaling_method} {vars}.{ftype}'
    +270                                )
    +271                        plt.close(fig)
     
    @@ -1321,10 +1519,10 @@
    Parameters
    -
    247def ecdf(data):
    -248    x = np.sort(data)
    -249    y = np.arange(1, len(data) + 1) / len(data)
    -250    return x, y
    +            
    274def ecdf(data):
    +275    x = np.sort(data)
    +276    y = np.arange(1, len(data) + 1) / len(data)
    +277    return x, y
     
    @@ -1342,53 +1540,53 @@
    Parameters
    -
    253def lin_reg_plot(
    -254        x: pd.Series,
    -255        y: pd.Series,
    -256        x_name: str,
    -257        y_name: str,
    -258        title: Optional[str] = None
    -259        ):
    -260    """
    -261    """
    -262    fig = plt.figure(figsize=(4, 4), dpi=200)
    -263    fig_gs = fig.add_gridspec(
    -264        2,
    -265        2,
    -266        width_ratios=(7, 2),
    -267        height_ratios=(2, 7),
    -268        left=0.1,
    -269        right=0.9,
    -270        bottom=0.1,
    -271        top=0.9,
    -272        wspace=0.0,
    -273        hspace=0.0,
    -274    )
    -275
    -276    scatter_ax = fig.add_subplot(fig_gs[1, 0])
    -277    histx_ax = fig.add_subplot(fig_gs[0, 0], sharex=scatter_ax)
    -278    histx_ax.axis("off")
    -279    histy_ax = fig.add_subplot(fig_gs[1, 1], sharey=scatter_ax)
    -280    histy_ax.axis("off")
    -281
    -282    max_value = max((y.max(), x.max()))
    -283    min_value = min((y.min(), x.min()))
    -284    scatter_ax.set_xlim(min_value - 3, max_value + 3)
    -285    scatter_ax.set_ylim(min_value - 3, max_value + 3)
    -286    scatter_ax.set_xlabel(x_name)
    -287    scatter_ax.set_ylabel(y_name)
    -288    scatter_ax.scatter(x, y, color="C0", marker='.', alpha=0.75)
    -289
    -290    binwidth = 7.5
    -291    xymax = max(np.max(np.abs(x)), np.max(np.abs(y)))
    -292    lim = (int(xymax / binwidth) + 1) * binwidth
    -293
    -294    bins = np.arange(-lim, lim + binwidth, binwidth)
    -295    histx_ax.hist(x, bins=bins, color="C0")
    -296    histy_ax.hist(y, bins=bins, orientation="horizontal", color="C0")
    -297    if isinstance(title, str):
    -298        fig.suptitle(title)
    -299    return fig
    +            
    280def lin_reg_plot(
    +281        x: pd.Series,
    +282        y: pd.Series,
    +283        x_name: str,
    +284        y_name: str,
    +285        title: Optional[str] = None
    +286        ):
    +287    """
    +288    """
    +289    fig = plt.figure(figsize=(4, 4), dpi=200)
    +290    fig_gs = fig.add_gridspec(
    +291        2,
    +292        2,
    +293        width_ratios=(7, 2),
    +294        height_ratios=(2, 7),
    +295        left=0.1,
    +296        right=0.9,
    +297        bottom=0.1,
    +298        top=0.9,
    +299        wspace=0.0,
    +300        hspace=0.0,
    +301    )
    +302
    +303    scatter_ax = fig.add_subplot(fig_gs[1, 0])
    +304    histx_ax = fig.add_subplot(fig_gs[0, 0], sharex=scatter_ax)
    +305    histx_ax.axis("off")
    +306    histy_ax = fig.add_subplot(fig_gs[1, 1], sharey=scatter_ax)
    +307    histy_ax.axis("off")
    +308
    +309    max_value = max((y.max(), x.max()))
    +310    min_value = min((y.min(), x.min()))
    +311    scatter_ax.set_xlim(min_value - 3, max_value + 3)
    +312    scatter_ax.set_ylim(min_value - 3, max_value + 3)
    +313    scatter_ax.set_xlabel(x_name)
    +314    scatter_ax.set_ylabel(y_name)
    +315    scatter_ax.scatter(x, y, color="C0", marker='.', alpha=0.75)
    +316
    +317    binwidth = 7.5
    +318    xymax = max(np.max(np.abs(x)), np.max(np.abs(y)))
    +319    lim = (int(xymax / binwidth) + 1) * binwidth
    +320
    +321    bins = list(np.arange(-lim, lim + binwidth, binwidth))
    +322    histx_ax.hist(x, bins=bins, color="C0")
    +323    histy_ax.hist(y, bins=bins, orientation="horizontal", color="C0")
    +324    if isinstance(title, str):
    +325        fig.suptitle(title)
    +326    return fig
     
    @@ -1406,58 +1604,58 @@
    Parameters
    -
    302def bland_altman_plot(
    -303        x: pd.DataFrame,
    -304        y: pd.Series,
    -305        title: Optional[str] = None,
    -306        **kwargs
    -307        ):
    -308    """
    -309    """
    -310    fig, ax = plt.subplots(figsize=(4, 4), dpi=200)
    -311    x_data = np.mean(np.vstack((x, y)).T, axis=1)
    -312    y_data = np.array(x) - np.array(y)
    -313    y_mean = np.mean(y_data)
    -314    y_sd = 1.96 * np.std(y_data)
    -315    max_diff_from_mean = max(
    -316        (y_data - y_mean).min(), (y_data - y_mean).max(), key=abs
    -317    )
    -318    text_adjust = (12 * max_diff_from_mean) / 300
    -319    ax.set_ylim(y_mean - max_diff_from_mean, y_mean + max_diff_from_mean)
    -320    ax.set_xlabel("Average of Measured and Reference")
    -321    ax.set_ylabel("Difference Between Measured and Reference")
    -322    ax.scatter(x_data, y_data, alpha=0.75)
    -323    ax.axline((0, y_mean), (1, y_mean), color="xkcd:vermillion")
    -324    ax.text(
    -325        max(x_data),
    -326        y_mean + text_adjust,
    -327        f"Mean: {y_mean:.2f}",
    -328        verticalalignment="bottom",
    -329        horizontalalignment="right",
    -330    )
    -331    ax.axline(
    -332        (0, y_mean + y_sd), (1, y_mean + y_sd), color="xkcd:fresh green"
    -333    )
    -334    ax.text(
    -335        max(x_data),
    -336        y_mean + y_sd + text_adjust,
    -337        f"1.96$\\sigma$: {y_mean + y_sd:.2f}",
    -338        verticalalignment="bottom",
    -339        horizontalalignment="right",
    -340    )
    -341    ax.axline(
    -342        (0, y_mean - y_sd), (1, y_mean - y_sd), color="xkcd:fresh green"
    -343    )
    -344    ax.text(
    -345        max(x_data),
    -346        y_mean - y_sd + text_adjust,
    -347        f"1.96$\\sigma$: -{y_sd:.2f}",
    -348        verticalalignment="bottom",
    -349        horizontalalignment="right",
    -350    )
    -351    if isinstance(title, str):
    -352        fig.suptitle(title)
    -353    return fig
    +            
    329def bland_altman_plot(
    +330        x: pd.DataFrame,
    +331        y: pd.Series,
    +332        title: Optional[str] = None,
    +333        **kwargs
    +334        ):
    +335    """
    +336    """
    +337    fig, ax = plt.subplots(figsize=(4, 4), dpi=200)
    +338    x_data = np.mean(np.vstack((x, y)).T, axis=1)
    +339    y_data = np.array(x) - np.array(y)
    +340    y_mean = np.mean(y_data)
    +341    y_sd = 1.96 * np.std(y_data)
    +342    max_diff_from_mean = max(
    +343        (y_data - y_mean).min(), (y_data - y_mean).max(), key=abs
    +344    )
    +345    text_adjust = (12 * max_diff_from_mean) / 300
    +346    ax.set_ylim(y_mean - max_diff_from_mean, y_mean + max_diff_from_mean)
    +347    ax.set_xlabel("Average of Measured and Reference")
    +348    ax.set_ylabel("Difference Between Measured and Reference")
    +349    ax.scatter(x_data, y_data, alpha=0.75)
    +350    ax.axline((0, y_mean), (1, y_mean), color="xkcd:vermillion")
    +351    ax.text(
    +352        max(x_data),
    +353        y_mean + text_adjust,
    +354        f"Mean: {y_mean:.2f}",
    +355        verticalalignment="bottom",
    +356        horizontalalignment="right",
    +357    )
    +358    ax.axline(
    +359        (0, y_mean + y_sd), (1, y_mean + y_sd), color="xkcd:fresh green"
    +360    )
    +361    ax.text(
    +362        max(x_data),
    +363        y_mean + y_sd + text_adjust,
    +364        f"1.96$\\sigma$: {y_mean + y_sd:.2f}",
    +365        verticalalignment="bottom",
    +366        horizontalalignment="right",
    +367    )
    +368    ax.axline(
    +369        (0, y_mean - y_sd), (1, y_mean - y_sd), color="xkcd:fresh green"
    +370    )
    +371    ax.text(
    +372        max(x_data),
    +373        y_mean - y_sd + text_adjust,
    +374        f"1.96$\\sigma$: -{y_sd:.2f}",
    +375        verticalalignment="bottom",
    +376        horizontalalignment="right",
    +377    )
    +378    if isinstance(title, str):
    +379        fig.suptitle(title)
    +380    return fig
     
    @@ -1475,34 +1673,162 @@
    Parameters
    -
    356def ecdf_plot(
    -357        x: pd.DataFrame,
    -358        y: pd.Series,
    -359        x_name: str,
    -360        y_name: str,
    -361        title: Optional[str] = None
    -362        ):
    -363    """
    -364    """
    -365    fig, ax = plt.subplots(figsize=(4, 4), dpi=200)
    -366    true_x, true_y = ecdf(y)
    -367    pred_x, pred_y = ecdf(x)
    -368    ax.set_ylim(0, 1)
    -369    ax.set_xlabel("Measurement")
    -370    ax.set_ylabel("Cumulative Total")
    -371    ax.plot(true_x, true_y, linestyle="none", marker=".", label=y_name)
    -372    ax.plot(
    -373        pred_x,
    -374        pred_y,
    -375        linestyle="none",
    -376        marker=".",
    -377        alpha=0.8,
    -378        label=x_name,
    -379    )
    -380    ax.legend()
    -381    if isinstance(title, str):
    -382        fig.suptitle(title)
    -383    return fig
    +            
    383def ecdf_plot(
    +384        x: pd.DataFrame,
    +385        y: pd.Series,
    +386        x_name: str,
    +387        y_name: str,
    +388        title: Optional[str] = None
    +389        ):
    +390    """
    +391    """
    +392    fig, ax = plt.subplots(figsize=(4, 4), dpi=200)
    +393    true_x, true_y = ecdf(y)
    +394    pred_x, pred_y = ecdf(x)
    +395    ax.set_ylim(0, 1)
    +396    ax.set_xlabel("Measurement")
    +397    ax.set_ylabel("Cumulative Total")
    +398    ax.plot(true_x, true_y, linestyle="none", marker=".", label=y_name)
    +399    ax.plot(
    +400        pred_x,
    +401        pred_y,
    +402        linestyle="none",
    +403        marker=".",
    +404        alpha=0.8,
    +405        label=x_name,
    +406    )
    +407    ax.legend()
    +408    if isinstance(title, str):
    +409        fig.suptitle(title)
    +410    return fig
    +
    + + + + + +
    + +
    + + def + shap_plot(shaps: pandas.core.frame.DataFrame, x: pandas.core.frame.DataFrame): + + + +
    + +
    412def shap_plot(shaps: pd.DataFrame, x: pd.DataFrame):
    +413    """
    +414    """
    +415    shaps_min = shaps.drop(['Fold'], axis=1).min(axis=None)
    +416    shaps_max = shaps.drop(['Fold'], axis=1).max(axis=None)
    +417    shaps_range = shaps_max - shaps_min
    +418    shaps_lims = (
    +419        shaps_min - (shaps_range * 0.1),
    +420        shaps_max + (shaps_range * 0.1)
    +421    )
    +422
    +423    num_of_cols = shaps.drop(['Fold'], axis=1).shape[1]
    +424
    +425    shape_of_scatters = (
    +426        int(np.ceil(num_of_cols / 2)),
    +427        (min(2, int(num_of_cols)))
    +428    )
    +429
    +430    fig, ax = plt.subplots(
    +431        *shape_of_scatters,
    +432        figsize=(
    +433           4 * shape_of_scatters[0],
    +434           4 * shape_of_scatters[1]
    +435        ),
    +436        dpi=200
    +437    )
    +438
    +439    for col_ind, col in enumerate(shaps.drop(['Fold'], axis=1).columns):
    +440        scatter_data = pd.concat(
    +441            [
    +442                x.loc[:, col].rename('Value'),
    +443                shaps.loc[:, col].rename('Shap'),
    +444                shaps.loc[:, 'Fold'].rename('Fold')
    +445            ],
    +446            axis=1
    +447        )
    +448        x_min = scatter_data.loc[:, 'Value'].min()
    +449        x_max = scatter_data.loc[:, 'Value'].max()
    +450        x_range = x_max - x_min
    +451        x_lims = (x_min - (x_range * 0.1), x_max + (x_range * 0.1))
    +452
    +453        row_num = int(np.floor(col_ind / 2))
    +454        col_num = col_ind % 2
    +455        for i, fold in enumerate(sorted(shaps.loc[:, 'Fold'].unique())):
    +456            scat_fold = scatter_data[scatter_data.loc[:, 'Fold'] == fold]
    +457            ax[row_num, col_num].scatter(
    +458                scat_fold['Value'],
    +459                scat_fold['Shap'],
    +460                c=f'C{i}',
    +461                label=f'Fold {fold}',
    +462                marker='.'
    +463            )
    +464        ax[row_num, col_num].set_title(col)
    +465        ax[row_num, col_num].set_xlabel('Value')
    +466        ax[row_num, col_num].set_xlim(x_lims)
    +467        ax[row_num, col_num].set_ylabel('Shap')
    +468        ax[row_num, col_num].set_ylim(shaps_lims)
    +469
    +470    ax[0, 0].legend(loc='best')
    +471    plt.tight_layout()
    +472    return fig
    +
    + + + + +
    +
    + +
    + + def + get_shap( x: pandas.core.frame.DataFrame, y: pandas.core.frame.DataFrame, pipeline: dict[int, sklearn.pipeline.Pipeline]): + + + +
    + +
    474def get_shap(
    +475    x: pd.DataFrame,
    +476    y: pd.DataFrame,
    +477    pipeline: dict[int, Pipeline]
    +478    ):
    +479    shaps = pd.DataFrame()
    +480    for fold in pipeline.keys():
    +481        if len(pipeline.keys()) > 1:
    +482            fold_index = y[y.loc[:, 'Fold'] == fold].index
    +483            x_data = x.loc[fold_index, :]
    +484        else:
    +485            x_data = x
    +486        explainer = shap.KernelExplainer(
    +487            model=pipeline[fold][-1].predict,
    +488            data=x_data,
    +489            link='identity'
    +490        )
    +491        shaps = pd.concat(
    +492            [
    +493                shaps,
    +494                pd.DataFrame(
    +495                    explainer.shap_values(x_data),
    +496                    index=x_data.index,
    +497                    columns=x_data.columns
    +498                )
    +499            ]
    +500        )
    +501        if len(pipeline.keys()) > 1:
    +502            shaps.loc[x_data.index, 'Fold'] = y.loc[x_data.index, 'Fold']
    +503        else:
    +504            shaps.loc[:, 'Fold'] = 'Cross-Validated'
    +505        shaps = shaps.sort_index()
    +506    return shaps
     
    diff --git a/docs/calidhayte/summary.html b/docs/calidhayte/summary.html index d31c22b..e5156f4 100644 --- a/docs/calidhayte/summary.html +++ b/docs/calidhayte/summary.html @@ -125,95 +125,96 @@

     1import pathlib
      2
      3from matplotlib import get_backend
    - 4import matplotlib.pyplot as plt
    - 5import pandas as pd
    - 6
    + 4import matplotlib.figure
    + 5import matplotlib.pyplot as plt
    + 6import pandas as pd
      7
    - 8class Summary:
    - 9    """
    -10    """
    -11    def __init__(
    -12            self,
    -13            results: pd.DataFrame,
    -14            cols: list[str],
    -15            style: str = 'bmh',
    -16            backend: str = str(get_backend())
    -17            ):
    -18        """
    -19        """
    -20        self.results = results
    -21        print(self.results)
    -22        self.plots: dict[str, dict[str, plt.figure.Figure]] = dict()
    -23        self.cols: list[str] = cols
    -24        self.style = style
    -25        self.backend = backend
    -26
    -27    def boxplots(self):
    -28        """
    -29        """
    -30        self.plots["Box Plots"] = dict()
    -31        for label in self.results.index.names[:-1]:
    -32            for col in self.cols:
    -33                with plt.rc_context(
    -34                        {
    -35                            'backend': self.backend,
    -36                            'figure.dpi': 200
    -37                        }
    -38                    ), \
    -39                    plt.style.context(self.style):
    -40                    plot = self.results.loc[
    -41                            :, [col]
    -42                            ].boxplot(
    -43                                by=label,
    -44                                figsize=(
    -45                                    len(self.cols),
    -46                                    2 * round(len(self.cols)/2)
    -47                                    ),
    -48                                rot=90,
    -49                                fontsize=8,
    -50                                sym='.',
    -51                            )
    -52                    plot.title.set_size(8)
    -53                    plt.tight_layout()
    -54                    self.plots["Box Plots"][f'{label} {col}'] = plot
    -55                    plt.close()
    -56
    -57    def histograms(self):
    -58        """
    -59        """
    -60        self.plots["Histograms"] = dict()
    -61        for col in self.cols:
    -62            with plt.rc_context(
    -63                    {
    -64                        'backend': self.backend,
    -65                        'figure.dpi': 200
    -66                    }
    -67                ), \
    -68                plt.style.context(self.style):
    -69                plot = self.results.loc[
    -70                        :, col
    -71                        ].plot.hist(
    -72                        bins=30,
    -73                        figsize=(8, 4)
    -74                    )
    -75                plot.set_xlabel(col)
    -76                plot.title.set_size(8)
    -77                plt.tight_layout()
    -78                self.plots["Histograms"][col] = plot
    -79                plt.close()
    -80
    -81    def save_plots(self, path, filetype: str = 'png'):
    -82        """
    -83        """
    -84        for plot_type, plots in self.plots.items():
    -85            for variable, ax in plots.items():
    -86                plot_path = pathlib.Path(
    -87                        f'{path}/Summary'
    -88                        )
    -89                fig = ax.figure
    -90                plot_path.mkdir(parents=True, exist_ok=True)
    -91                fig.savefig(plot_path / f'{plot_type} {variable}.{filetype}')
    -92                plt.close(fig)
    + 8
    + 9class Summary:
    +10    """
    +11    """
    +12    def __init__(
    +13            self,
    +14            results: pd.DataFrame,
    +15            cols: list[str],
    +16            style: str = 'bmh',
    +17            backend: str = str(get_backend())
    +18            ):
    +19        """
    +20        """
    +21        self.results = results
    +22        print(self.results)
    +23        self.plots: dict[str, dict[str, matplotlib.figure.Figure]] = dict()
    +24        self.cols: list[str] = cols
    +25        self.style = style
    +26        self.backend = backend
    +27
    +28    def boxplots(self):
    +29        """
    +30        """
    +31        self.plots["Box Plots"] = dict()
    +32        for label in self.results.index.names[:-1]:
    +33            for col in self.cols:
    +34                with plt.rc_context(
    +35                        {
    +36                            'backend': self.backend,
    +37                            'figure.dpi': 200
    +38                        }
    +39                        ), \
    +40                        plt.style.context(self.style):
    +41                    plot = self.results.loc[
    +42                            :, [col]
    +43                            ].boxplot(
    +44                                by=label,
    +45                                figsize=(
    +46                                    len(self.cols),
    +47                                    2 * round(len(self.cols)/2)
    +48                                    ),
    +49                                rot=90,
    +50                                fontsize=8,
    +51                                sym='.',
    +52                            )
    +53                    plot.title.set_size(8)
    +54                    plt.tight_layout()
    +55                    self.plots["Box Plots"][f'{label} {col}'] = plot
    +56                    plt.close()
    +57
    +58    def histograms(self):
    +59        """
    +60        """
    +61        self.plots["Histograms"] = dict()
    +62        for col in self.cols:
    +63            with plt.rc_context(
    +64                    {
    +65                        'backend': self.backend,
    +66                        'figure.dpi': 200
    +67                    }
    +68                    ), \
    +69                    plt.style.context(self.style):
    +70                plot = self.results.loc[
    +71                        :, col
    +72                        ].plot.hist(
    +73                        bins=30,
    +74                        figsize=(8, 4)
    +75                    )
    +76                plot.set_xlabel(col)
    +77                plot.title.set_size(8)
    +78                plt.tight_layout()
    +79                self.plots["Histograms"][col] = plot
    +80                plt.close()
    +81
    +82    def save_plots(self, path, filetype: str = 'png'):
    +83        """
    +84        """
    +85        for plot_type, plots in self.plots.items():
    +86            for variable, ax in plots.items():
    +87                plot_path = pathlib.Path(
    +88                        f'{path}/Summary'
    +89                        )
    +90                fig = ax.figure
    +91                plot_path.mkdir(parents=True, exist_ok=True)
    +92                fig.savefig(plot_path / f'{plot_type} {variable}.{filetype}')
    +93                plt.close(fig)
     
    @@ -229,91 +230,91 @@

    -
     9class Summary:
    -10    """
    -11    """
    -12    def __init__(
    -13            self,
    -14            results: pd.DataFrame,
    -15            cols: list[str],
    -16            style: str = 'bmh',
    -17            backend: str = str(get_backend())
    -18            ):
    -19        """
    -20        """
    -21        self.results = results
    -22        print(self.results)
    -23        self.plots: dict[str, dict[str, plt.figure.Figure]] = dict()
    -24        self.cols: list[str] = cols
    -25        self.style = style
    -26        self.backend = backend
    -27
    -28    def boxplots(self):
    -29        """
    -30        """
    -31        self.plots["Box Plots"] = dict()
    -32        for label in self.results.index.names[:-1]:
    -33            for col in self.cols:
    -34                with plt.rc_context(
    -35                        {
    -36                            'backend': self.backend,
    -37                            'figure.dpi': 200
    -38                        }
    -39                    ), \
    -40                    plt.style.context(self.style):
    -41                    plot = self.results.loc[
    -42                            :, [col]
    -43                            ].boxplot(
    -44                                by=label,
    -45                                figsize=(
    -46                                    len(self.cols),
    -47                                    2 * round(len(self.cols)/2)
    -48                                    ),
    -49                                rot=90,
    -50                                fontsize=8,
    -51                                sym='.',
    -52                            )
    -53                    plot.title.set_size(8)
    -54                    plt.tight_layout()
    -55                    self.plots["Box Plots"][f'{label} {col}'] = plot
    -56                    plt.close()
    -57
    -58    def histograms(self):
    -59        """
    -60        """
    -61        self.plots["Histograms"] = dict()
    -62        for col in self.cols:
    -63            with plt.rc_context(
    -64                    {
    -65                        'backend': self.backend,
    -66                        'figure.dpi': 200
    -67                    }
    -68                ), \
    -69                plt.style.context(self.style):
    -70                plot = self.results.loc[
    -71                        :, col
    -72                        ].plot.hist(
    -73                        bins=30,
    -74                        figsize=(8, 4)
    -75                    )
    -76                plot.set_xlabel(col)
    -77                plot.title.set_size(8)
    -78                plt.tight_layout()
    -79                self.plots["Histograms"][col] = plot
    -80                plt.close()
    -81
    -82    def save_plots(self, path, filetype: str = 'png'):
    -83        """
    -84        """
    -85        for plot_type, plots in self.plots.items():
    -86            for variable, ax in plots.items():
    -87                plot_path = pathlib.Path(
    -88                        f'{path}/Summary'
    -89                        )
    -90                fig = ax.figure
    -91                plot_path.mkdir(parents=True, exist_ok=True)
    -92                fig.savefig(plot_path / f'{plot_type} {variable}.{filetype}')
    -93                plt.close(fig)
    +            
    10class Summary:
    +11    """
    +12    """
    +13    def __init__(
    +14            self,
    +15            results: pd.DataFrame,
    +16            cols: list[str],
    +17            style: str = 'bmh',
    +18            backend: str = str(get_backend())
    +19            ):
    +20        """
    +21        """
    +22        self.results = results
    +23        print(self.results)
    +24        self.plots: dict[str, dict[str, matplotlib.figure.Figure]] = dict()
    +25        self.cols: list[str] = cols
    +26        self.style = style
    +27        self.backend = backend
    +28
    +29    def boxplots(self):
    +30        """
    +31        """
    +32        self.plots["Box Plots"] = dict()
    +33        for label in self.results.index.names[:-1]:
    +34            for col in self.cols:
    +35                with plt.rc_context(
    +36                        {
    +37                            'backend': self.backend,
    +38                            'figure.dpi': 200
    +39                        }
    +40                        ), \
    +41                        plt.style.context(self.style):
    +42                    plot = self.results.loc[
    +43                            :, [col]
    +44                            ].boxplot(
    +45                                by=label,
    +46                                figsize=(
    +47                                    len(self.cols),
    +48                                    2 * round(len(self.cols)/2)
    +49                                    ),
    +50                                rot=90,
    +51                                fontsize=8,
    +52                                sym='.',
    +53                            )
    +54                    plot.title.set_size(8)
    +55                    plt.tight_layout()
    +56                    self.plots["Box Plots"][f'{label} {col}'] = plot
    +57                    plt.close()
    +58
    +59    def histograms(self):
    +60        """
    +61        """
    +62        self.plots["Histograms"] = dict()
    +63        for col in self.cols:
    +64            with plt.rc_context(
    +65                    {
    +66                        'backend': self.backend,
    +67                        'figure.dpi': 200
    +68                    }
    +69                    ), \
    +70                    plt.style.context(self.style):
    +71                plot = self.results.loc[
    +72                        :, col
    +73                        ].plot.hist(
    +74                        bins=30,
    +75                        figsize=(8, 4)
    +76                    )
    +77                plot.set_xlabel(col)
    +78                plot.title.set_size(8)
    +79                plt.tight_layout()
    +80                self.plots["Histograms"][col] = plot
    +81                plt.close()
    +82
    +83    def save_plots(self, path, filetype: str = 'png'):
    +84        """
    +85        """
    +86        for plot_type, plots in self.plots.items():
    +87            for variable, ax in plots.items():
    +88                plot_path = pathlib.Path(
    +89                        f'{path}/Summary'
    +90                        )
    +91                fig = ax.figure
    +92                plot_path.mkdir(parents=True, exist_ok=True)
    +93                fig.savefig(plot_path / f'{plot_type} {variable}.{filetype}')
    +94                plt.close(fig)
     
    @@ -323,27 +324,27 @@

    - Summary( results: pandas.core.frame.DataFrame, cols: list[str], style: str = 'bmh', backend: str = 'agg') + Summary( results: pandas.core.frame.DataFrame, cols: list[str], style: str = 'bmh', backend: str = 'TkAgg')
    -
    12    def __init__(
    -13            self,
    -14            results: pd.DataFrame,
    -15            cols: list[str],
    -16            style: str = 'bmh',
    -17            backend: str = str(get_backend())
    -18            ):
    -19        """
    -20        """
    -21        self.results = results
    -22        print(self.results)
    -23        self.plots: dict[str, dict[str, plt.figure.Figure]] = dict()
    -24        self.cols: list[str] = cols
    -25        self.style = style
    -26        self.backend = backend
    +            
    13    def __init__(
    +14            self,
    +15            results: pd.DataFrame,
    +16            cols: list[str],
    +17            style: str = 'bmh',
    +18            backend: str = str(get_backend())
    +19            ):
    +20        """
    +21        """
    +22        self.results = results
    +23        print(self.results)
    +24        self.plots: dict[str, dict[str, matplotlib.figure.Figure]] = dict()
    +25        self.cols: list[str] = cols
    +26        self.style = style
    +27        self.backend = backend
     
    @@ -363,7 +364,7 @@

    - plots: 'dict[str, dict[str, plt.figure.Figure]]' + plots: dict[str, dict[str, matplotlib.figure.Figure]]
    @@ -416,35 +417,35 @@

    -
    28    def boxplots(self):
    -29        """
    -30        """
    -31        self.plots["Box Plots"] = dict()
    -32        for label in self.results.index.names[:-1]:
    -33            for col in self.cols:
    -34                with plt.rc_context(
    -35                        {
    -36                            'backend': self.backend,
    -37                            'figure.dpi': 200
    -38                        }
    -39                    ), \
    -40                    plt.style.context(self.style):
    -41                    plot = self.results.loc[
    -42                            :, [col]
    -43                            ].boxplot(
    -44                                by=label,
    -45                                figsize=(
    -46                                    len(self.cols),
    -47                                    2 * round(len(self.cols)/2)
    -48                                    ),
    -49                                rot=90,
    -50                                fontsize=8,
    -51                                sym='.',
    -52                            )
    -53                    plot.title.set_size(8)
    -54                    plt.tight_layout()
    -55                    self.plots["Box Plots"][f'{label} {col}'] = plot
    -56                    plt.close()
    +            
    29    def boxplots(self):
    +30        """
    +31        """
    +32        self.plots["Box Plots"] = dict()
    +33        for label in self.results.index.names[:-1]:
    +34            for col in self.cols:
    +35                with plt.rc_context(
    +36                        {
    +37                            'backend': self.backend,
    +38                            'figure.dpi': 200
    +39                        }
    +40                        ), \
    +41                        plt.style.context(self.style):
    +42                    plot = self.results.loc[
    +43                            :, [col]
    +44                            ].boxplot(
    +45                                by=label,
    +46                                figsize=(
    +47                                    len(self.cols),
    +48                                    2 * round(len(self.cols)/2)
    +49                                    ),
    +50                                rot=90,
    +51                                fontsize=8,
    +52                                sym='.',
    +53                            )
    +54                    plot.title.set_size(8)
    +55                    plt.tight_layout()
    +56                    self.plots["Box Plots"][f'{label} {col}'] = plot
    +57                    plt.close()
     
    @@ -462,29 +463,29 @@

    -
    58    def histograms(self):
    -59        """
    -60        """
    -61        self.plots["Histograms"] = dict()
    -62        for col in self.cols:
    -63            with plt.rc_context(
    -64                    {
    -65                        'backend': self.backend,
    -66                        'figure.dpi': 200
    -67                    }
    -68                ), \
    -69                plt.style.context(self.style):
    -70                plot = self.results.loc[
    -71                        :, col
    -72                        ].plot.hist(
    -73                        bins=30,
    -74                        figsize=(8, 4)
    -75                    )
    -76                plot.set_xlabel(col)
    -77                plot.title.set_size(8)
    -78                plt.tight_layout()
    -79                self.plots["Histograms"][col] = plot
    -80                plt.close()
    +            
    59    def histograms(self):
    +60        """
    +61        """
    +62        self.plots["Histograms"] = dict()
    +63        for col in self.cols:
    +64            with plt.rc_context(
    +65                    {
    +66                        'backend': self.backend,
    +67                        'figure.dpi': 200
    +68                    }
    +69                    ), \
    +70                    plt.style.context(self.style):
    +71                plot = self.results.loc[
    +72                        :, col
    +73                        ].plot.hist(
    +74                        bins=30,
    +75                        figsize=(8, 4)
    +76                    )
    +77                plot.set_xlabel(col)
    +78                plot.title.set_size(8)
    +79                plt.tight_layout()
    +80                self.plots["Histograms"][col] = plot
    +81                plt.close()
     
    @@ -502,18 +503,18 @@

    -
    82    def save_plots(self, path, filetype: str = 'png'):
    -83        """
    -84        """
    -85        for plot_type, plots in self.plots.items():
    -86            for variable, ax in plots.items():
    -87                plot_path = pathlib.Path(
    -88                        f'{path}/Summary'
    -89                        )
    -90                fig = ax.figure
    -91                plot_path.mkdir(parents=True, exist_ok=True)
    -92                fig.savefig(plot_path / f'{plot_type} {variable}.{filetype}')
    -93                plt.close(fig)
    +            
    83    def save_plots(self, path, filetype: str = 'png'):
    +84        """
    +85        """
    +86        for plot_type, plots in self.plots.items():
    +87            for variable, ax in plots.items():
    +88                plot_path = pathlib.Path(
    +89                        f'{path}/Summary'
    +90                        )
    +91                fig = ax.figure
    +92                plot_path.mkdir(parents=True, exist_ok=True)
    +93                fig.savefig(plot_path / f'{plot_type} {variable}.{filetype}')
    +94                plt.close(fig)
     
    diff --git a/docs/search.js b/docs/search.js index be17cce..7166616 100644 --- a/docs/search.js +++ b/docs/search.js @@ -1,6 +1,6 @@ window.pdocSearch = (function(){ /** elasticlunr - http://weixsong.github.io * Copyright (C) 2017 Oliver Nightingale * Copyright (C) 2017 Wei Song * MIT Licensed */!function(){function e(e){if(null===e||"object"!=typeof e)return e;var t=e.constructor();for(var n in e)e.hasOwnProperty(n)&&(t[n]=e[n]);return t}var t=function(e){var n=new t.Index;return n.pipeline.add(t.trimmer,t.stopWordFilter,t.stemmer),e&&e.call(n,n),n};t.version="0.9.5",lunr=t,t.utils={},t.utils.warn=function(e){return function(t){e.console&&console.warn&&console.warn(t)}}(this),t.utils.toString=function(e){return void 0===e||null===e?"":e.toString()},t.EventEmitter=function(){this.events={}},t.EventEmitter.prototype.addListener=function(){var e=Array.prototype.slice.call(arguments),t=e.pop(),n=e;if("function"!=typeof t)throw new TypeError("last argument must be a function");n.forEach(function(e){this.hasHandler(e)||(this.events[e]=[]),this.events[e].push(t)},this)},t.EventEmitter.prototype.removeListener=function(e,t){if(this.hasHandler(e)){var n=this.events[e].indexOf(t);-1!==n&&(this.events[e].splice(n,1),0==this.events[e].length&&delete this.events[e])}},t.EventEmitter.prototype.emit=function(e){if(this.hasHandler(e)){var t=Array.prototype.slice.call(arguments,1);this.events[e].forEach(function(e){e.apply(void 0,t)},this)}},t.EventEmitter.prototype.hasHandler=function(e){return e in this.events},t.tokenizer=function(e){if(!arguments.length||null===e||void 0===e)return[];if(Array.isArray(e)){var n=e.filter(function(e){return null===e||void 0===e?!1:!0});n=n.map(function(e){return t.utils.toString(e).toLowerCase()});var i=[];return n.forEach(function(e){var n=e.split(t.tokenizer.seperator);i=i.concat(n)},this),i}return e.toString().trim().toLowerCase().split(t.tokenizer.seperator)},t.tokenizer.defaultSeperator=/[\s\-]+/,t.tokenizer.seperator=t.tokenizer.defaultSeperator,t.tokenizer.setSeperator=function(e){null!==e&&void 0!==e&&"object"==typeof e&&(t.tokenizer.seperator=e)},t.tokenizer.resetSeperator=function(){t.tokenizer.seperator=t.tokenizer.defaultSeperator},t.tokenizer.getSeperator=function(){return t.tokenizer.seperator},t.Pipeline=function(){this._queue=[]},t.Pipeline.registeredFunctions={},t.Pipeline.registerFunction=function(e,n){n in t.Pipeline.registeredFunctions&&t.utils.warn("Overwriting existing registered function: "+n),e.label=n,t.Pipeline.registeredFunctions[n]=e},t.Pipeline.getRegisteredFunction=function(e){return e in t.Pipeline.registeredFunctions!=!0?null:t.Pipeline.registeredFunctions[e]},t.Pipeline.warnIfFunctionNotRegistered=function(e){var n=e.label&&e.label in this.registeredFunctions;n||t.utils.warn("Function is not registered with pipeline. This may cause problems when serialising the index.\n",e)},t.Pipeline.load=function(e){var n=new t.Pipeline;return e.forEach(function(e){var i=t.Pipeline.getRegisteredFunction(e);if(!i)throw new Error("Cannot load un-registered function: "+e);n.add(i)}),n},t.Pipeline.prototype.add=function(){var e=Array.prototype.slice.call(arguments);e.forEach(function(e){t.Pipeline.warnIfFunctionNotRegistered(e),this._queue.push(e)},this)},t.Pipeline.prototype.after=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._queue.indexOf(e);if(-1===i)throw new Error("Cannot find existingFn");this._queue.splice(i+1,0,n)},t.Pipeline.prototype.before=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._queue.indexOf(e);if(-1===i)throw new Error("Cannot find existingFn");this._queue.splice(i,0,n)},t.Pipeline.prototype.remove=function(e){var t=this._queue.indexOf(e);-1!==t&&this._queue.splice(t,1)},t.Pipeline.prototype.run=function(e){for(var t=[],n=e.length,i=this._queue.length,o=0;n>o;o++){for(var r=e[o],s=0;i>s&&(r=this._queue[s](r,o,e),void 0!==r&&null!==r);s++);void 0!==r&&null!==r&&t.push(r)}return t},t.Pipeline.prototype.reset=function(){this._queue=[]},t.Pipeline.prototype.get=function(){return this._queue},t.Pipeline.prototype.toJSON=function(){return this._queue.map(function(e){return t.Pipeline.warnIfFunctionNotRegistered(e),e.label})},t.Index=function(){this._fields=[],this._ref="id",this.pipeline=new t.Pipeline,this.documentStore=new t.DocumentStore,this.index={},this.eventEmitter=new t.EventEmitter,this._idfCache={},this.on("add","remove","update",function(){this._idfCache={}}.bind(this))},t.Index.prototype.on=function(){var e=Array.prototype.slice.call(arguments);return this.eventEmitter.addListener.apply(this.eventEmitter,e)},t.Index.prototype.off=function(e,t){return this.eventEmitter.removeListener(e,t)},t.Index.load=function(e){e.version!==t.version&&t.utils.warn("version mismatch: current "+t.version+" importing "+e.version);var n=new this;n._fields=e.fields,n._ref=e.ref,n.documentStore=t.DocumentStore.load(e.documentStore),n.pipeline=t.Pipeline.load(e.pipeline),n.index={};for(var i in e.index)n.index[i]=t.InvertedIndex.load(e.index[i]);return n},t.Index.prototype.addField=function(e){return this._fields.push(e),this.index[e]=new t.InvertedIndex,this},t.Index.prototype.setRef=function(e){return this._ref=e,this},t.Index.prototype.saveDocument=function(e){return this.documentStore=new t.DocumentStore(e),this},t.Index.prototype.addDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.addDoc(i,e),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));this.documentStore.addFieldLength(i,n,o.length);var r={};o.forEach(function(e){e in r?r[e]+=1:r[e]=1},this);for(var s in r){var u=r[s];u=Math.sqrt(u),this.index[n].addToken(s,{ref:i,tf:u})}},this),n&&this.eventEmitter.emit("add",e,this)}},t.Index.prototype.removeDocByRef=function(e){if(e&&this.documentStore.isDocStored()!==!1&&this.documentStore.hasDoc(e)){var t=this.documentStore.getDoc(e);this.removeDoc(t,!1)}},t.Index.prototype.removeDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.hasDoc(i)&&(this.documentStore.removeDoc(i),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));o.forEach(function(e){this.index[n].removeToken(e,i)},this)},this),n&&this.eventEmitter.emit("remove",e,this))}},t.Index.prototype.updateDoc=function(e,t){var t=void 0===t?!0:t;this.removeDocByRef(e[this._ref],!1),this.addDoc(e,!1),t&&this.eventEmitter.emit("update",e,this)},t.Index.prototype.idf=function(e,t){var n="@"+t+"/"+e;if(Object.prototype.hasOwnProperty.call(this._idfCache,n))return this._idfCache[n];var i=this.index[t].getDocFreq(e),o=1+Math.log(this.documentStore.length/(i+1));return this._idfCache[n]=o,o},t.Index.prototype.getFields=function(){return this._fields.slice()},t.Index.prototype.search=function(e,n){if(!e)return[];e="string"==typeof e?{any:e}:JSON.parse(JSON.stringify(e));var i=null;null!=n&&(i=JSON.stringify(n));for(var o=new t.Configuration(i,this.getFields()).get(),r={},s=Object.keys(e),u=0;u0&&t.push(e);for(var i in n)"docs"!==i&&"df"!==i&&this.expandToken(e+i,t,n[i]);return t},t.InvertedIndex.prototype.toJSON=function(){return{root:this.root}},t.Configuration=function(e,n){var e=e||"";if(void 0==n||null==n)throw new Error("fields should not be null");this.config={};var i;try{i=JSON.parse(e),this.buildUserConfig(i,n)}catch(o){t.utils.warn("user configuration parse failed, will use default configuration"),this.buildDefaultConfig(n)}},t.Configuration.prototype.buildDefaultConfig=function(e){this.reset(),e.forEach(function(e){this.config[e]={boost:1,bool:"OR",expand:!1}},this)},t.Configuration.prototype.buildUserConfig=function(e,n){var i="OR",o=!1;if(this.reset(),"bool"in e&&(i=e.bool||i),"expand"in e&&(o=e.expand||o),"fields"in e)for(var r in e.fields)if(n.indexOf(r)>-1){var s=e.fields[r],u=o;void 0!=s.expand&&(u=s.expand),this.config[r]={boost:s.boost||0===s.boost?s.boost:1,bool:s.bool||i,expand:u}}else t.utils.warn("field name in user configuration not found in index instance fields");else this.addAllFields2UserConfig(i,o,n)},t.Configuration.prototype.addAllFields2UserConfig=function(e,t,n){n.forEach(function(n){this.config[n]={boost:1,bool:e,expand:t}},this)},t.Configuration.prototype.get=function(){return this.config},t.Configuration.prototype.reset=function(){this.config={}},lunr.SortedSet=function(){this.length=0,this.elements=[]},lunr.SortedSet.load=function(e){var t=new this;return t.elements=e,t.length=e.length,t},lunr.SortedSet.prototype.add=function(){var e,t;for(e=0;e1;){if(r===e)return o;e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o]}return r===e?o:-1},lunr.SortedSet.prototype.locationFor=function(e){for(var t=0,n=this.elements.length,i=n-t,o=t+Math.floor(i/2),r=this.elements[o];i>1;)e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o];return r>e?o:e>r?o+1:void 0},lunr.SortedSet.prototype.intersect=function(e){for(var t=new lunr.SortedSet,n=0,i=0,o=this.length,r=e.length,s=this.elements,u=e.elements;;){if(n>o-1||i>r-1)break;s[n]!==u[i]?s[n]u[i]&&i++:(t.add(s[n]),n++,i++)}return t},lunr.SortedSet.prototype.clone=function(){var e=new lunr.SortedSet;return e.elements=this.toArray(),e.length=e.elements.length,e},lunr.SortedSet.prototype.union=function(e){var t,n,i;this.length>=e.length?(t=this,n=e):(t=e,n=this),i=t.clone();for(var o=0,r=n.toArray();o\n calidhayte\n

    \n\n

    Contact: CaderIdrisGH@outlook.com

    \n\n

    \"Tests\"

    \n\n
    \n\n

    Table of Contents

    \n\n
      \n
    1. Summary
    2. \n
    3. Main Features
    4. \n
    5. How to Install
    6. \n
    7. Dependencies
    8. \n
    9. Example Usage
    10. \n
    11. Acknowledgements
    12. \n
    \n\n
    \n\n

    Summary

    \n\n

    calidhayte calibrates one set of measurements against another, using a variety of parametric and non parametric techniques.\nThe datasets are split by k-fold cross validation and stratified so the distribution of 'true' measurements is consistent in all.\nIt can then performs multiple error calculations to validate them, as well as produce several graphs to visualise the calibrations.

    \n\n
    \n\n

    Main Features

    \n\n
      \n
    • Calibrate one set of measurements (cross-comparing all available secondary variables) against a 'true' set\n
        \n
      • A suite of calibration methods are available, including bayesian regression
      • \n
    • \n
    • Perform a suite of error calculations on the resulting calibration
    • \n
    • Visualise results of calibration
    • \n
    • Summarise calibrations to highlight best performing techniques
    • \n
    \n\n
    \n\n

    How to install

    \n\n

    pip

    \n\n
    \n
    pip install git+https://github.com/CaderIdris/calidhayte@release_tag\n
    \n
    \n\n

    conda

    \n\n
    \n
    conda install git pip\npip install git+https://github.com/CaderIdris/calidhayte@release_tag \n
    \n
    \n\n

    The release tags can be found in the sidebar

    \n\n
    \n\n

    Dependencies

    \n\n

    Please see Pipfile.

    \n\n
    \n\n

    Example Usage

    \n\n

    This module requires two dataframes as a prerequisite.

    \n\n

    Independent Measurements

    \n\n\n\n\n \n \n \n \n \n \n \n\n\n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n
    xabcde
    2022-01-010.1072.235
    2022-01-020.71328.91
    2022-01-03nannan1nannan7
    _______
    2022-09-300.5312.740
    \n\n

    Dependent Measurements

    \n\n\n\n\n \n \n\n\n\n\n \n \n\n\n \n \n\n\n \n \n\n\n \n \n\n\n \n \n\n\n \n \n\n\n
    x
    2022-01-021
    2022-01-053
    __
    2022-09-29nan
    2022-09-3037
    2022-10-013
    \n\n
      \n
    • The two dataframes are joined on the index as an inner join, so the indices do not have to match initially
    • \n
    • nan values can be present
    • \n
    • More than one column can be present for the dependent measurements but only 'Values' will be used
    • \n
    • The index can contain date objects, datetime objects or integers. They should be unique. Strings are untested and may cause unexpected behaviours
    • \n
    \n\n
    \n
    from calidhayte import Calibrate, Results, Graphs, Summary\n\n# x_df is a dataframe containing multiple columns containing independent measurements.\n# The primary measurement is denoted by the 'Values' columns, the other measurement columns can have any name.\n# y_df is a dataframe containing the dependent measurement in the 'Values' column.\n\ncoeffs = Calibrate(\n    x=x_df,\n    y=y_df\n    target='x'\n)\n\ncal.linreg()\ncal.theil_sen()\ncal.random_forest(n_estimators=500, max_features=1.0)\n\nmodels = coeffs.return_models()\n\nresults = Results(\n    x=x_df,\n    y=y_df,\n    target='x',\n    models=models\n)\n\nresults.r2()\nresults.median_absolute()\nresults.max()\n\nresults_df = results.return_errors()\nresults_df.to_csv('results.csv')\n\ngraphs = Graphs(\n    x=x_df,\n    y=y_df,\n    target='x',\n    models=models,\n    x_name='x',\n    y_name='y'\n)\ngraphs.ecdf_plot()\ngraphs.lin_reg_plot()\ngraphs.save_plots()\n
    \n
    \n\n
    \n\n

    Acknowledgements

    \n\n

    Many thanks to James Murphy at Mcoding who's excellent tutorial Automated Testing in Python and associated repository helped a lot when structuring this package

    \n"}, "calidhayte.calibrate": {"fullname": "calidhayte.calibrate", "modulename": "calidhayte.calibrate", "kind": "module", "doc": "

    Contains code used to perform a range of univariate and multivariate\nregressions on provided data.

    \n\n

    Acts as a wrapper for scikit-learn 1, XGBoost 2 and PyMC (via Bambi)\n3

    \n\n\n"}, "calidhayte.calibrate.cont_strat_folds": {"fullname": "calidhayte.calibrate.cont_strat_folds", "modulename": "calidhayte.calibrate", "qualname": "cont_strat_folds", "kind": "function", "doc": "

    Creates stratified k-folds on continuous variable

    \n\n

    df : pd.DataFrame\n Target data to stratify on.\ntarget_var : str\n Target feature name.\nsplits : int, default=5\n Number of folds to make.\nstrat_groups : int, default=10\n Number of groups to split data in to for stratification.\nseed : int, default=62\n Random state to use.

    \n\n
    Returns
    \n\n
      \n
    • pd.DataFrame: y_df with added 'Fold' column, specifying which test data fold\nvariable corresponds to.
    • \n
    \n\n
    Examples
    \n\n
    \n
    >>> df = pd.read_csv('data.csv')\n>>> df\n|    | x | a | b |\n|    |   |   |   |\n|  0 |2.3|1.8|7.2|\n|  1 |3.2|9.6|4.5|\n|....|...|...|...|\n|1000|2.3|4.5|2.2|\n>>> df_with_folds = const_strat_folds(\n        df=df,\n        target='a',\n        splits=3,\n        strat_groups=3.\n        seed=78\n    )\n>>> df_with_folds\n|    | x | a | b |Fold|\n|    |   |   |   |    |\n|  0 |2.3|1.8|7.2| 2  |\n|  1 |3.2|9.6|4.5| 1  |\n|....|...|...|...|....|\n|1000|2.3|4.5|2.2| 0  |\n
    \n
    \n\n

    All folds should have a roughly equal distribution of values for 'a'

    \n", "signature": "(\tdf: pandas.core.frame.DataFrame,\ttarget_var: str,\tsplits: int = 5,\tstrat_groups: int = 5,\tseed: int = 62) -> pandas.core.frame.DataFrame:", "funcdef": "def"}, "calidhayte.calibrate.Calibrate": {"fullname": "calidhayte.calibrate.Calibrate", "modulename": "calidhayte.calibrate", "qualname": "Calibrate", "kind": "class", "doc": "

    Calibrate x against y using a range of different methods provided by\nscikit-learn1, xgboost2 and PyMC (via Bambi)3.

    \n\n
    Examples
    \n\n
    \n
    >>> from calidhayte.calibrate import Calibrate\n>>> import pandas as pd\n>>>\n>>> x = pd.read_csv('independent.csv')\n>>> x\n|   | a | b |\n| 0 |2.3|3.2|\n| 1 |3.4|3.1|\n|...|...|...|\n|100|3.7|2.1|\n>>>\n>>> y = pd.read_csv('dependent.csv')\n>>> y\n|   | a |\n| 0 |7.8|\n| 1 |9.9|\n|...|...|\n|100|9.5|\n>>>\n>>> calibration = Calibrate(\n    x_data=x,\n    y_data=y,\n    target='a',\n    folds=5,\n    strat_groups=5,\n    scaler = [\n        'Standard Scale',\n        'MinMax Scale'\n        ],\n    seed=62\n)\n>>> calibration.linreg()\n>>> calibration.lars()\n>>> calibration.omp()\n>>> calibration.ransac()\n>>> calibration.random_forest()\n>>>\n>>> models = calibration.return_models()\n>>> list(models.keys())\n[\n    'Linear Regression',\n    'Least Angle Regression',\n    'Orthogonal Matching Pursuit',\n    'RANSAC',\n    'Random Forest'\n]\n>>> list(models['Linear Regression'].keys())\n['Standard Scale', 'MinMax Scale']\n>>> list(models['Linear Regression']['Standard Scale'].keys())\n['a', 'a + b']\n>>> list(models['Linear Regression']['Standard Scale']['a'].keys())\n[0, 1, 2, 3, 4]\n>>> type(models['Linear Regression']['Standard Scale']['a'][0])\n<class sklearn.pipeline.Pipeline>\n>>> pipeline = models['Linear Regression']['Standard Scale']['a'][0]\n>>> x_new = pd.read_csv('independent_new.csv')\n>>> x_new\n|   | a | b |\n| 0 |3.5|2.7|\n| 1 |4.0|1.1|\n|...|...|...|\n|100|2.3|2.1|\n>>> pipeline.transform(x_new)\n|   | a |\n| 0 |9.7|\n| 1 |9.1|\n|...|...|\n|100|6.7|\n
    \n
    \n\n\n"}, "calidhayte.calibrate.Calibrate.__init__": {"fullname": "calidhayte.calibrate.Calibrate.__init__", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.__init__", "kind": "function", "doc": "

    Initialises class

    \n\n

    Used to compare one set of measurements against another.\nIt can perform both univariate and multivariate regression, though\nsome techniques can only do one or the other. Multivariate regression\ncan only be performed when secondary variables are provided.

    \n\n
    Parameters
    \n\n
      \n
    • x_data (pd.DataFrame):\nData to be calibrated.
    • \n
    • y_data (pd.DataFrame):\n'True' data to calibrate against.
    • \n
    • target (str):\nColumn name of the primary feature to use in calibration, must be\nthe name of a column in both x_data and y_data.
    • \n
    • folds (int, default=5):\nNumber of folds to split the data into, using stratified k-fold.
    • \n
    • strat_groups (int, default=10):\nNumber of groups to stratify against, the data will be split into\nn equally sized bins where n is the value of strat_groups.
    • \n
    • scaler (iterable of {
      'None',
      'Standard Scale',
      'MinMax Scale',
      'Yeo-Johnson Transform',
      'Box-Cox Transform',
      'Quantile Transform (Uniform)',
      'Quantile Transform (Gaussian)',
      } or {
      'All',
      'None',
      'Standard Scale',
      'MinMax Scale',
      'Yeo-Johnson Transform',
      'Box-Cox Transform',
      'Quantile Transform (Uniform)',
      'Quantile Transform (Gaussian)',
      }, default='None'):\nThe scaling/transform method (or list of methods) to apply to the\ndata
    • \n
    • seed (int, default=62):\nRandom state to use when shuffling and splitting the data into n\nfolds. Ensures repeatability.
    • \n
    \n\n
    Raises
    \n\n
      \n
    • ValueError: Raised if the target variables (e.g. 'NO2') is not a column name in\nboth dataframes.\nRaised if scaler is not str, tuple or list
    • \n
    \n", "signature": "(\tx_data: pandas.core.frame.DataFrame,\ty_data: pandas.core.frame.DataFrame,\ttarget: str,\tfolds: int = 5,\tstrat_groups: int = 10,\tscaler: Union[collections.abc.Iterable[Literal['None', 'Standard Scale', 'MinMax Scale', 'Yeo-Johnson TransformBox-Cox Transform', 'Quantile Transform (Uniform)', 'Quantile Transform (Gaussian)']], Literal['All', 'None', 'Standard Scale', 'MinMax Scale', 'Yeo-Johnson TransformBox-Cox Transform', 'Quantile Transform (Uniform)', 'Quantile Transform (Gaussian)']] = 'None',\tseed: int = 62)"}, "calidhayte.calibrate.Calibrate.x_data": {"fullname": "calidhayte.calibrate.Calibrate.x_data", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.x_data", "kind": "variable", "doc": "

    The data to be calibrated.

    \n", "annotation": ": pandas.core.frame.DataFrame"}, "calidhayte.calibrate.Calibrate.target": {"fullname": "calidhayte.calibrate.Calibrate.target", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.target", "kind": "variable", "doc": "

    The name of the column in both x_data and y_data that\nwill be used as the x and y variables in the calibration.

    \n", "annotation": ": str"}, "calidhayte.calibrate.Calibrate.scaler_list": {"fullname": "calidhayte.calibrate.Calibrate.scaler_list", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.scaler_list", "kind": "variable", "doc": "

    Keys for scaling algorithms available in the pipelines

    \n", "annotation": ": dict[str, typing.Any]"}, "calidhayte.calibrate.Calibrate.scaler": {"fullname": "calidhayte.calibrate.Calibrate.scaler", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.scaler", "kind": "variable", "doc": "

    The scaling algorithm(s) to preprocess the data with

    \n", "annotation": ": list[str]"}, "calidhayte.calibrate.Calibrate.y_data": {"fullname": "calidhayte.calibrate.Calibrate.y_data", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.y_data", "kind": "variable", "doc": "

    The data that x_data will be calibrated against. A 'Fold'\ncolumn is added using the const_strat_folds function which splits\nthe data into k stratified folds (where k is the value of\nfolds). It splits the continuous measurements into n bins (where n\nis the value of strat_groups) and distributes each bin equally\nacross all folds. This significantly reduces the chances of one fold\ncontaining a skewed distribution relative to the whole dataset.

    \n"}, "calidhayte.calibrate.Calibrate.models": {"fullname": "calidhayte.calibrate.Calibrate.models", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.models", "kind": "variable", "doc": "

    The calibrated models. They are stored in a nested structure as\nfollows:

    \n\n
      \n
    1. Primary Key, name of the technique (e.g Lasso Regression).
    2. \n
    3. Scaling technique (e.g Yeo-Johnson Transform).
    4. \n
    5. Combination of variables used or target if calibration is\nunivariate (e.g \"target + a + b).
    6. \n
    7. Fold, which fold was used excluded from the calibration. If data\nif 5-fold cross validated, a key of 4 indicates the data was trained on\nfolds 0-3.
    8. \n
    \n\n
    stateDiagram-v2\n models --> Technique\n state Technique {\n [*] --> Scaling\n [*]: The calibration technique used\n [*]: (e.g \"Lasso Regression\")\n state Scaling {\n [*] --> Variables\n [*]: The scaling technique used\n [*]: (e.g \"Yeo-Johnson Transform\")\n state Variables {\n [*] : The combination of variables used\n [*] : (e.g \"x + a + b\")\n [*] --> Fold\n state Fold {\n [*] : Which fold was excluded from training data\n [*] : (e.g 4 indicates folds 0-3 were used to train)\n }\n }\n }\n }\n
    \n", "annotation": ": dict[str, dict[str, dict[str, dict[int, sklearn.pipeline.Pipeline]]]]"}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"fullname": "calidhayte.calibrate.Calibrate.pymc_bayesian", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.pymc_bayesian", "kind": "function", "doc": "

    Performs bayesian linear regression (either uni or multivariate)\nfitting x on y.

    \n\n

    Performs bayesian linear regression, both univariate and multivariate,\non X against y. More details can be found at:\nhttps://pymc.io/projects/examples/en/latest/generalized_linear_models/\nGLM-robust.html

    \n\n
    Parameters
    \n\n
      \n
    • family ({'Gaussian', 'Student T'}, default='Gaussian'):\nStatistical distribution to fit measurements to. Options are:\n - Gaussian\n - Student T
    • \n
    \n", "signature": "(\tself,\tfamily: Literal['Gaussian', 'Student T'] = 'Gaussian',\tname: str = ' PyMC Bayesian',\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.linreg": {"fullname": "calidhayte.calibrate.Calibrate.linreg", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.linreg", "kind": "function", "doc": "

    Fit x on y via linear regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Linear Regression\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Linear Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.ridge": {"fullname": "calidhayte.calibrate.Calibrate.ridge", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.ridge", "kind": "function", "doc": "

    Fit x on y via ridge regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Ridge Regression\"):\nName of classification technique
    • \n
    \n", "signature": "(self, name: str = 'Ridge Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.ridge_cv": {"fullname": "calidhayte.calibrate.Calibrate.ridge_cv", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.ridge_cv", "kind": "function", "doc": "

    Fit x on y via cross-validated ridge regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Ridge Regression (Cross Validated)\"):\nName of classification technique
    • \n
    \n", "signature": "(self, name: str = 'Ridge Regression (Cross Validated)', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.lasso": {"fullname": "calidhayte.calibrate.Calibrate.lasso", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.lasso", "kind": "function", "doc": "

    Fit x on y via lasso regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Lasso Regression\"):\nName of classification technique
    • \n
    \n", "signature": "(self, name: str = 'Lasso Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.lasso_cv": {"fullname": "calidhayte.calibrate.Calibrate.lasso_cv", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.lasso_cv", "kind": "function", "doc": "

    Fit x on y via cross-validated lasso regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Lasso Regression (Cross Validated)\"):\nName of classification technique
    • \n
    \n", "signature": "(self, name: str = 'Lasso Regression (Cross Validated)', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"fullname": "calidhayte.calibrate.Calibrate.multi_task_lasso", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.multi_task_lasso", "kind": "function", "doc": "

    Fit x on y via multitask lasso regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Multi-task Lasso Regression\"):\nName of classification technique
    • \n
    \n", "signature": "(self, name: str = 'Multi-task Lasso Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"fullname": "calidhayte.calibrate.Calibrate.multi_task_lasso_cv", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.multi_task_lasso_cv", "kind": "function", "doc": "

    Fit x on y via cross validated multitask lasso regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Multi-task Lasso Regression (Cross Validated)\"):\nName of classification technique
    • \n
    \n", "signature": "(\tself,\tname: str = 'Multi-task Lasso Regression (Cross Validated)',\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.elastic_net": {"fullname": "calidhayte.calibrate.Calibrate.elastic_net", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.elastic_net", "kind": "function", "doc": "

    Fit x on y via elastic net regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Elastic Net Regression\"):\nName of classification technique
    • \n
    \n", "signature": "(self, name: str = 'Elastic Net Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"fullname": "calidhayte.calibrate.Calibrate.elastic_net_cv", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.elastic_net_cv", "kind": "function", "doc": "

    Fit x on y via cross validated elastic net regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Elastic Net Regression (Cross Validated)\"):\nName of classification technique
    • \n
    \n", "signature": "(\tself,\tname: str = 'Elastic Net Regression (Cross Validated)',\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"fullname": "calidhayte.calibrate.Calibrate.multi_task_elastic_net", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.multi_task_elastic_net", "kind": "function", "doc": "

    Fit x on y via multi-task elastic net regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Multi-task Elastic Net Regression\"):\nName of classification technique
    • \n
    \n", "signature": "(self, name: str = 'Multi-Task Elastic Net Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"fullname": "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.multi_task_elastic_net_cv", "kind": "function", "doc": "

    Fit x on y via cross validated multi-task elastic net regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Multi-Task Elastic Net Regression (Cross Validated)\"):\nName of classification technique
    • \n
    \n", "signature": "(\tself,\tname: str = 'Multi-Task Elastic Net Regression (Cross Validated)',\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.lars": {"fullname": "calidhayte.calibrate.Calibrate.lars", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.lars", "kind": "function", "doc": "

    Fit x on y via least angle regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Least Angle Regression\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Least Angle Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.lars_lasso": {"fullname": "calidhayte.calibrate.Calibrate.lars_lasso", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.lars_lasso", "kind": "function", "doc": "

    Fit x on y via lasso least angle regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Least Angle Regression (Lasso)\"):\nName of classification technique
    • \n
    \n", "signature": "(self, name: str = 'Least Angle Regression (Lasso)', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.omp": {"fullname": "calidhayte.calibrate.Calibrate.omp", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.omp", "kind": "function", "doc": "

    Fit x on y via orthogonal matching pursuit regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Orthogonal Matching Pursuit\"):\nName of classification technique
    • \n
    \n", "signature": "(self, name: str = 'Orthogonal Matching Pursuit', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"fullname": "calidhayte.calibrate.Calibrate.bayesian_ridge", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.bayesian_ridge", "kind": "function", "doc": "

    Fit x on y via bayesian ridge regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Bayesian Ridge Regression\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Bayesian Ridge Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"fullname": "calidhayte.calibrate.Calibrate.bayesian_ard", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.bayesian_ard", "kind": "function", "doc": "

    Fit x on y via bayesian automatic relevance detection

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Bayesian Automatic Relevance Detection\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Bayesian Automatic Relevance Detection', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.tweedie": {"fullname": "calidhayte.calibrate.Calibrate.tweedie", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.tweedie", "kind": "function", "doc": "

    Fit x on y via tweedie regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Tweedie Regression\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Tweedie Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"fullname": "calidhayte.calibrate.Calibrate.stochastic_gradient_descent", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.stochastic_gradient_descent", "kind": "function", "doc": "

    Fit x on y via stochastic gradient descent regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Stochastic Gradient Descent\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Stochastic Gradient Descent', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"fullname": "calidhayte.calibrate.Calibrate.passive_aggressive", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.passive_aggressive", "kind": "function", "doc": "

    Fit x on y via passive aggressive regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Passive Agressive Regression\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Passive Agressive Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.ransac": {"fullname": "calidhayte.calibrate.Calibrate.ransac", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.ransac", "kind": "function", "doc": "

    Fit x on y via RANSAC regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"RANSAC\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'RANSAC', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.theil_sen": {"fullname": "calidhayte.calibrate.Calibrate.theil_sen", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.theil_sen", "kind": "function", "doc": "

    Fit x on y via theil-sen regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Theil-Sen Regression\"):\nName of classification technique.
    • \n
    • -Sen Regression
    • \n
    \n", "signature": "(self, name: str = 'Theil-Sen Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.huber": {"fullname": "calidhayte.calibrate.Calibrate.huber", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.huber", "kind": "function", "doc": "

    Fit x on y via huber regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Huber Regression\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Huber Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.quantile": {"fullname": "calidhayte.calibrate.Calibrate.quantile", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.quantile", "kind": "function", "doc": "

    Fit x on y via quantile regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Quantile Regression\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Quantile Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.decision_tree": {"fullname": "calidhayte.calibrate.Calibrate.decision_tree", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.decision_tree", "kind": "function", "doc": "

    Fit x on y using a decision tree

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Decision Tree\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Decision Tree', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.extra_tree": {"fullname": "calidhayte.calibrate.Calibrate.extra_tree", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.extra_tree", "kind": "function", "doc": "

    Fit x on y using an extra tree

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Extra Tree\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Extra Tree', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.random_forest": {"fullname": "calidhayte.calibrate.Calibrate.random_forest", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.random_forest", "kind": "function", "doc": "

    Fit x on y using a random forest

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Random Forest\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Random Forest', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"fullname": "calidhayte.calibrate.Calibrate.extra_trees_ensemble", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.extra_trees_ensemble", "kind": "function", "doc": "

    Fit x on y using an ensemble of extra trees

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Extra Trees Ensemble\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Extra Trees Ensemble', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"fullname": "calidhayte.calibrate.Calibrate.gradient_boost_regressor", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.gradient_boost_regressor", "kind": "function", "doc": "

    Fit x on y using gradient boosting regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Gradient Boosting Regression\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Gradient Boosting Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"fullname": "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.hist_gradient_boost_regressor", "kind": "function", "doc": "

    Fit x on y using histogram-based gradient boosting regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Histogram-Based Gradient Boosting Regression\"):\nName of classification technique.
    • \n
    • -Based: Gradient Boosting Regression
    • \n
    \n", "signature": "(\tself,\tname: str = 'Histogram-Based Gradient Boosting Regression',\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"fullname": "calidhayte.calibrate.Calibrate.mlp_regressor", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.mlp_regressor", "kind": "function", "doc": "

    Fit x on y using multi-layer perceptrons

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Multi-Layer Perceptron Regression\"):\nName of classification technique.
    • \n
    • -Layer Perceptron: Regression
    • \n
    \n", "signature": "(self, name: str = 'Multi-Layer Perceptron Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.svr": {"fullname": "calidhayte.calibrate.Calibrate.svr", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.svr", "kind": "function", "doc": "

    Fit x on y using support vector regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Support Vector Regression\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Support Vector Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.linear_svr": {"fullname": "calidhayte.calibrate.Calibrate.linear_svr", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.linear_svr", "kind": "function", "doc": "

    Fit x on y using linear support vector regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Linear Support Vector Regression\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Linear Support Vector Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.nu_svr": {"fullname": "calidhayte.calibrate.Calibrate.nu_svr", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.nu_svr", "kind": "function", "doc": "

    Fit x on y using nu-support vector regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Nu-Support Vector Regression\"):\nName of classification technique.
    • \n
    • -Support Vector: Regression
    • \n
    \n", "signature": "(self, name: str = 'Nu-Support Vector Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.gaussian_process": {"fullname": "calidhayte.calibrate.Calibrate.gaussian_process", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.gaussian_process", "kind": "function", "doc": "

    Fit x on y using gaussian process regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Gaussian Process Regression\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Gaussian Process Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.pls": {"fullname": "calidhayte.calibrate.Calibrate.pls", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.pls", "kind": "function", "doc": "

    Fit x on y using pls regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"PLS Regression\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'PLS Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.isotonic": {"fullname": "calidhayte.calibrate.Calibrate.isotonic", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.isotonic", "kind": "function", "doc": "

    Fit x on y using isotonic regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Isotonic Regression\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'Isotonic Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.xgboost": {"fullname": "calidhayte.calibrate.Calibrate.xgboost", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.xgboost", "kind": "function", "doc": "

    Fit x on y using xgboost regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"XGBoost Regression\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'XGBoost Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"fullname": "calidhayte.calibrate.Calibrate.xgboost_rf", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.xgboost_rf", "kind": "function", "doc": "

    Fit x on y using xgboosted random forest regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"XGBoost Random Forest Regression\"):\nName of classification technique.
    • \n
    \n", "signature": "(self, name: str = 'XGBoost Random Forest Regression', **kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.return_measurements": {"fullname": "calidhayte.calibrate.Calibrate.return_measurements", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.return_measurements", "kind": "function", "doc": "

    Returns the measurements used, with missing values and\nnon-overlapping measurements excluded

    \n\n
    Returns
    \n\n
      \n
    • dict[str, pd.DataFrame]: Dictionary with 2 keys:
    • \n
    \n\n\n\n\n \n \n\n\n\n\n \n \n\n\n \n \n\n\n
    KeyValue
    xx_data
    yy_data
    \n", "signature": "(self) -> dict[str, pandas.core.frame.DataFrame]:", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.return_models": {"fullname": "calidhayte.calibrate.Calibrate.return_models", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.return_models", "kind": "function", "doc": "

    Returns the models stored in the object

    \n\n
    Returns
    \n\n
      \n
    • dict[str, str, str, int, Pipeline]: The calibrated models. They are stored in a nested structure as\nfollows:\n
        \n
      1. Primary Key, name of the technique (e.g Lasso Regression).
      2. \n
      3. Scaling technique (e.g Yeo-Johnson Transform).
      4. \n
      5. Combination of variables used or target if calibration is\nunivariate (e.g \"target + a + b).
      6. \n
      7. Fold, which fold was used excluded from the calibration. If data\nfolds 0-3.\nif 5-fold cross validated, a key of 4 indicates the data was\ntrained on
      8. \n
    • \n
    \n", "signature": "(\tself) -> dict[str, dict[str, dict[str, dict[int, sklearn.pipeline.Pipeline]]]]:", "funcdef": "def"}, "calidhayte.graphs": {"fullname": "calidhayte.graphs", "modulename": "calidhayte.graphs", "kind": "module", "doc": "

    \n"}, "calidhayte.graphs.Graphs": {"fullname": "calidhayte.graphs.Graphs", "modulename": "calidhayte.graphs", "qualname": "Graphs", "kind": "class", "doc": "

    Calculates errors between \"true\" and \"predicted\" measurements, plots\ngraphs and returns all results

    \n"}, "calidhayte.graphs.Graphs.__init__": {"fullname": "calidhayte.graphs.Graphs.__init__", "modulename": "calidhayte.graphs", "qualname": "Graphs.__init__", "kind": "function", "doc": "

    \n", "signature": "(\tx: pandas.core.frame.DataFrame,\tx_name: str,\ty: pandas.core.frame.DataFrame,\ty_name: str,\ttarget: str,\tmodels: dict[str, dict[str, dict[str, dict[int, sklearn.pipeline.Pipeline]]]],\tstyle: str = 'bmh',\tbackend: str = 'agg')"}, "calidhayte.graphs.Graphs.x": {"fullname": "calidhayte.graphs.Graphs.x", "modulename": "calidhayte.graphs", "qualname": "Graphs.x", "kind": "variable", "doc": "

    Independent variable(s) that are calibrated against y, the independent\nvariable. Index should match y.

    \n", "annotation": ": pandas.core.frame.DataFrame"}, "calidhayte.graphs.Graphs.y": {"fullname": "calidhayte.graphs.Graphs.y", "modulename": "calidhayte.graphs", "qualname": "Graphs.y", "kind": "variable", "doc": "

    Dependent variable used to calibrate the independent variables x.\nIndex should match x.

    \n", "annotation": ": pandas.core.frame.DataFrame"}, "calidhayte.graphs.Graphs.x_name": {"fullname": "calidhayte.graphs.Graphs.x_name", "modulename": "calidhayte.graphs", "qualname": "Graphs.x_name", "kind": "variable", "doc": "

    Label for x measurements

    \n", "annotation": ": str"}, "calidhayte.graphs.Graphs.y_name": {"fullname": "calidhayte.graphs.Graphs.y_name", "modulename": "calidhayte.graphs", "qualname": "Graphs.y_name", "kind": "variable", "doc": "

    Label for y measurements

    \n", "annotation": ": str"}, "calidhayte.graphs.Graphs.target": {"fullname": "calidhayte.graphs.Graphs.target", "modulename": "calidhayte.graphs", "qualname": "Graphs.target", "kind": "variable", "doc": "

    Measurand in y to calibrate against

    \n"}, "calidhayte.graphs.Graphs.models": {"fullname": "calidhayte.graphs.Graphs.models", "modulename": "calidhayte.graphs", "qualname": "Graphs.models", "kind": "variable", "doc": "

    The precalibrated models. They are stored in a nested structure as\nfollows:

    \n\n
      \n
    1. Primary Key, name of the technique (e.g Lasso Regression).
    2. \n
    3. Scaling technique (e.g Yeo-Johnson Transform).
    4. \n
    5. Combination of variables used or target if calibration is\nunivariate (e.g \"target + a + b).
    6. \n
    7. Fold, which fold was used excluded from the calibration. If data\nif 5-fold cross validated, a key of 4 indicates the data was trained on\nfolds 0-3.
    8. \n
    \n\n
    stateDiagram-v2\n models --> Technique\n state Technique {\n [*] --> Scaling\n [*]: The calibration technique used\n [*]: (e.g \"Lasso Regression\")\n state Scaling {\n [*] --> Variables\n [*]: The scaling technique used\n [*]: (e.g \"Yeo-Johnson Transform\")\n state Variables {\n [*] : The combination of variables used\n [*] : (e.g \"x + a + b\")\n [*] --> Fold\n state Fold {\n [*] : Which fold was excluded from training data\n [*] : (e.g 4 indicates folds 0-3 were used to train)\n }\n }\n }\n }\n
    \n", "annotation": ": dict[str, dict[str, dict[str, dict[int, sklearn.pipeline.Pipeline]]]]"}, "calidhayte.graphs.Graphs.plots": {"fullname": "calidhayte.graphs.Graphs.plots", "modulename": "calidhayte.graphs", "qualname": "Graphs.plots", "kind": "variable", "doc": "

    The plotted data, stored in a similar structure to models

    \n\n
      \n
    1. Primary Key, name of the technique (e.g Lasso Regression).
    2. \n
    3. Scaling technique (e.g Yeo-Johnson Transform).
    4. \n
    5. Combination of variables used or target if calibration is\nunivariate (e.g \"target + a + b).
    6. \n
    7. Name of the plot (e.g. 'Bland-Altman')
    8. \n
    \n\n
    stateDiagram-v2\n models --> Technique\n state Technique {\n [*] --> Scaling\n [*]: The calibration technique used\n [*]: (e.g \"Lasso Regression\")\n state Scaling {\n [*] --> Variables\n [*]: The scaling technique used\n [*]: (e.g \"Yeo-Johnson Transform\")\n state Variables {\n [*] : The combination of variables used\n [*] : (e.g \"x + a + b\")\n [*] --> pn\n state \"Plot Name\" as pn {\n [*] : Name of the plot\n [*] : (e.g Bland-Altman)\n }\n }\n }\n }\n
    \n", "annotation": ": 'dict[str, dict[str, dict[str, dict[str, plt.figure.Figure]]]]'"}, "calidhayte.graphs.Graphs.style": {"fullname": "calidhayte.graphs.Graphs.style", "modulename": "calidhayte.graphs", "qualname": "Graphs.style", "kind": "variable", "doc": "

    Name of in-built matplotlib style or path to stylesheet

    \n", "annotation": ": Union[str, pathlib.Path]"}, "calidhayte.graphs.Graphs.backend": {"fullname": "calidhayte.graphs.Graphs.backend", "modulename": "calidhayte.graphs", "qualname": "Graphs.backend", "kind": "variable", "doc": "

    Matplotlib backend to use

    \n"}, "calidhayte.graphs.Graphs.plot_meta": {"fullname": "calidhayte.graphs.Graphs.plot_meta", "modulename": "calidhayte.graphs", "qualname": "Graphs.plot_meta", "kind": "function", "doc": "

    Iterates over data and creates plots using function specified in\nplot_func

    \n\n

    Should not be accessed directly, should instead be called by\nanother method

    \n\n
    Parameters
    \n\n
      \n
    • plot_func (Callable):\nFunction that returns matplotlib figure
    • \n
    • name (str):\nName to give plot, used as key in plots dict
    • \n
    • **kwargs: Additional arguments passed to plot_func
    • \n
    \n", "signature": "(self, plot_func: Callable, name: str, **kwargs):", "funcdef": "def"}, "calidhayte.graphs.Graphs.bland_altman_plot": {"fullname": "calidhayte.graphs.Graphs.bland_altman_plot", "modulename": "calidhayte.graphs", "qualname": "Graphs.bland_altman_plot", "kind": "function", "doc": "

    \n", "signature": "(self, title=None):", "funcdef": "def"}, "calidhayte.graphs.Graphs.ecdf_plot": {"fullname": "calidhayte.graphs.Graphs.ecdf_plot", "modulename": "calidhayte.graphs", "qualname": "Graphs.ecdf_plot", "kind": "function", "doc": "

    \n", "signature": "(self, title=None):", "funcdef": "def"}, "calidhayte.graphs.Graphs.lin_reg_plot": {"fullname": "calidhayte.graphs.Graphs.lin_reg_plot", "modulename": "calidhayte.graphs", "qualname": "Graphs.lin_reg_plot", "kind": "function", "doc": "

    \n", "signature": "(self, title=None):", "funcdef": "def"}, "calidhayte.graphs.Graphs.save_plots": {"fullname": "calidhayte.graphs.Graphs.save_plots", "modulename": "calidhayte.graphs", "qualname": "Graphs.save_plots", "kind": "function", "doc": "

    \n", "signature": "(\tself,\tpath: str,\tfiletype: Union[Literal['png', 'pgf', 'pdf'], collections.abc.Iterable[Literal['png', 'pgf', 'pdf']]] = 'png'):", "funcdef": "def"}, "calidhayte.graphs.ecdf": {"fullname": "calidhayte.graphs.ecdf", "modulename": "calidhayte.graphs", "qualname": "ecdf", "kind": "function", "doc": "

    \n", "signature": "(data):", "funcdef": "def"}, "calidhayte.graphs.lin_reg_plot": {"fullname": "calidhayte.graphs.lin_reg_plot", "modulename": "calidhayte.graphs", "qualname": "lin_reg_plot", "kind": "function", "doc": "

    \n", "signature": "(\tx: pandas.core.series.Series,\ty: pandas.core.series.Series,\tx_name: str,\ty_name: str,\ttitle: Optional[str] = None):", "funcdef": "def"}, "calidhayte.graphs.bland_altman_plot": {"fullname": "calidhayte.graphs.bland_altman_plot", "modulename": "calidhayte.graphs", "qualname": "bland_altman_plot", "kind": "function", "doc": "

    \n", "signature": "(\tx: pandas.core.frame.DataFrame,\ty: pandas.core.series.Series,\ttitle: Optional[str] = None,\t**kwargs):", "funcdef": "def"}, "calidhayte.graphs.ecdf_plot": {"fullname": "calidhayte.graphs.ecdf_plot", "modulename": "calidhayte.graphs", "qualname": "ecdf_plot", "kind": "function", "doc": "

    \n", "signature": "(\tx: pandas.core.frame.DataFrame,\ty: pandas.core.series.Series,\tx_name: str,\ty_name: str,\ttitle: Optional[str] = None):", "funcdef": "def"}, "calidhayte.results": {"fullname": "calidhayte.results", "modulename": "calidhayte.results", "kind": "module", "doc": "

    Determine the performance of different calibration techniques using a range of\ndifferent metrics.

    \n\n

    Acts as a wrapper for scikit-learn performance metrics 1.

    \n\n\n"}, "calidhayte.results.CoefficientPipelineDict": {"fullname": "calidhayte.results.CoefficientPipelineDict", "modulename": "calidhayte.results", "qualname": "CoefficientPipelineDict", "kind": "variable", "doc": "

    Type alias for the nested dictionaries that the models are stored in

    \n", "annotation": ": TypeAlias", "default_value": "dict[str, dict[str, dict[str, dict[int, sklearn.pipeline.Pipeline]]]]"}, "calidhayte.results.Results": {"fullname": "calidhayte.results.Results", "modulename": "calidhayte.results", "qualname": "Results", "kind": "class", "doc": "

    Determine performance of models using a range of metrics.

    \n\n

    Used to compare a range of different models that were fitted in the\nCalibrate class in coefficients.py.

    \n"}, "calidhayte.results.Results.__init__": {"fullname": "calidhayte.results.Results.__init__", "modulename": "calidhayte.results", "qualname": "Results.__init__", "kind": "function", "doc": "

    Initialises the class

    \n\n
    Parameters
    \n\n
      \n
    • x_data (pd.DataFrame):\nDependent measurements
    • \n
    • y_data (pd.DataFrame):\nIndependent measurements
    • \n
    • target (str):\nColumn name of the primary feature to use in calibration, must be\nthe name of a column in both x_data and y_data.
    • \n
    • models (CoefficientPipelineDict):\nThe calibrated models.
    • \n
    \n", "signature": "(\tx_data: pandas.core.frame.DataFrame,\ty_data: pandas.core.frame.DataFrame,\ttarget: str,\tmodels: dict[str, dict[str, dict[str, dict[int, sklearn.pipeline.Pipeline]]]])"}, "calidhayte.results.Results.x": {"fullname": "calidhayte.results.Results.x", "modulename": "calidhayte.results", "qualname": "Results.x", "kind": "variable", "doc": "

    Dependent measurements

    \n", "annotation": ": pandas.core.frame.DataFrame"}, "calidhayte.results.Results.y": {"fullname": "calidhayte.results.Results.y", "modulename": "calidhayte.results", "qualname": "Results.y", "kind": "variable", "doc": "

    Independent Measurements

    \n", "annotation": ": pandas.core.frame.DataFrame"}, "calidhayte.results.Results.target": {"fullname": "calidhayte.results.Results.target", "modulename": "calidhayte.results", "qualname": "Results.target", "kind": "variable", "doc": "

    Column name of primary feature to use in calibration

    \n", "annotation": ": str"}, "calidhayte.results.Results.models": {"fullname": "calidhayte.results.Results.models", "modulename": "calidhayte.results", "qualname": "Results.models", "kind": "variable", "doc": "

    They are stored in a nested structure as\nfollows:

    \n\n
      \n
    1. Primary Key, name of the technique (e.g Lasso Regression).
    2. \n
    3. Scaling technique (e.g Yeo-Johnson Transform).
    4. \n
    5. Combination of variables used or target if calibration is\nunivariate (e.g \"target + a + b).
    6. \n
    7. Fold, which fold was used excluded from the calibration. If data\nif 5-fold cross validated, a key of 4 indicates the data was\ntrained on folds 0-3.
    8. \n
    \n\n
    stateDiagram-v2\n models --> Technique\n state Technique {\n [*] --> Scaling\n [*]: The calibration technique used\n [*]: (e.g \"Lasso Regression\")\n state Scaling {\n [*] --> Variables\n [*]: The scaling technique used\n [*]: (e.g \"Yeo-Johnson Transform\")\n state Variables {\n [*] : The combination of variables used\n [*] : (e.g \"x + a + b\")\n [*] --> Fold\n state Fold {\n [*] : Which fold was excluded from training data\n [*] : (e.g 4 indicates folds 0-3 were used to train)\n }\n }\n }\n }\n
    \n", "annotation": ": dict[str, dict[str, dict[str, dict[int, sklearn.pipeline.Pipeline]]]]"}, "calidhayte.results.Results.errors": {"fullname": "calidhayte.results.Results.errors", "modulename": "calidhayte.results", "qualname": "Results.errors", "kind": "variable", "doc": "

    Results of error metric valculations. Index increases sequentially\nby 1, columns contain the technique, scaling method, variables and\nfold for each row. It also contains a column for each metric.

    \n\n\n\n\n \n \n \n \n \n \n \n \n\n\n\n\n \n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n \n\n\n
    TechniqueScaling MethodVariablesFoldExplained Variance Score...Mean Absolute Percentage Error
    0Random ForestStandard Scalingx + a00.95...0.05
    1Theil-SenYeo-JohnsonScalingx + a + b10.98...0.01
    ........................
    55Extra TreesNonex20.43...0.52
    \n", "annotation": ": pandas.core.frame.DataFrame"}, "calidhayte.results.Results.explained_variance_score": {"fullname": "calidhayte.results.Results.explained_variance_score", "modulename": "calidhayte.results", "qualname": "Results.explained_variance_score", "kind": "function", "doc": "

    Calculate the explained variance score between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.max": {"fullname": "calidhayte.results.Results.max", "modulename": "calidhayte.results", "qualname": "Results.max", "kind": "function", "doc": "

    Calculate the max error between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.mean_absolute": {"fullname": "calidhayte.results.Results.mean_absolute", "modulename": "calidhayte.results", "qualname": "Results.mean_absolute", "kind": "function", "doc": "

    Calculate the mean absolute error between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.root_mean_squared": {"fullname": "calidhayte.results.Results.root_mean_squared", "modulename": "calidhayte.results", "qualname": "Results.root_mean_squared", "kind": "function", "doc": "

    Calculate the root mean squared error between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.root_mean_squared_log": {"fullname": "calidhayte.results.Results.root_mean_squared_log", "modulename": "calidhayte.results", "qualname": "Results.root_mean_squared_log", "kind": "function", "doc": "

    Calculate the root mean squared log error between the true values\n(y) and predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.median_absolute": {"fullname": "calidhayte.results.Results.median_absolute", "modulename": "calidhayte.results", "qualname": "Results.median_absolute", "kind": "function", "doc": "

    Calculate the median absolute error between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.mean_absolute_percentage": {"fullname": "calidhayte.results.Results.mean_absolute_percentage", "modulename": "calidhayte.results", "qualname": "Results.mean_absolute_percentage", "kind": "function", "doc": "

    Calculate the mean absolute percentage error between the true\nvalues (y) and predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.r2": {"fullname": "calidhayte.results.Results.r2", "modulename": "calidhayte.results", "qualname": "Results.r2", "kind": "function", "doc": "

    Calculate the r2 between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.mean_poisson_deviance": {"fullname": "calidhayte.results.Results.mean_poisson_deviance", "modulename": "calidhayte.results", "qualname": "Results.mean_poisson_deviance", "kind": "function", "doc": "

    Calculate the mean poisson deviance between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.mean_gamma_deviance": {"fullname": "calidhayte.results.Results.mean_gamma_deviance", "modulename": "calidhayte.results", "qualname": "Results.mean_gamma_deviance", "kind": "function", "doc": "

    Calculate the mean gamma deviance between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.mean_tweedie_deviance": {"fullname": "calidhayte.results.Results.mean_tweedie_deviance", "modulename": "calidhayte.results", "qualname": "Results.mean_tweedie_deviance", "kind": "function", "doc": "

    Calculate the mean tweedie deviance between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.mean_pinball_loss": {"fullname": "calidhayte.results.Results.mean_pinball_loss", "modulename": "calidhayte.results", "qualname": "Results.mean_pinball_loss", "kind": "function", "doc": "

    Calculate the mean pinball loss between the true values (y)\npredicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.return_errors": {"fullname": "calidhayte.results.Results.return_errors", "modulename": "calidhayte.results", "qualname": "Results.return_errors", "kind": "function", "doc": "

    Returns all calculated errors in dataframe format

    \n\n

    Initially the error dataframe has the following structure:

    \n\n\n\n\n \n \n \n \n \n \n \n \n\n\n\n\n \n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n \n\n\n
    TechniqueScaling MethodVariablesFoldExplained Variance Score...Mean Absolute Percentage Error
    0Random ForestStandard Scalingx + a00.95...0.05
    1Theil-SenYeo-JohnsonScalingx + a + b10.98...0.01
    ........................
    55Extra TreesNonex20.43...0.52
    \n\n

    However, before returning the data, a new MultiIndex is built using\nthe Technique, Scaling Method, Variables and Fold columns. This\nallows easy comparison of the different techniques by grouping on one\nor multiple levels of the MultiIndex.

    \n\n
    Returns
    \n\n
      \n
    • pd.DataFrame: Results dataframe in the following format:
    • \n
    \n\n\n\n\n \n \n \n \n \n \n \n\n\n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n
    Explained Variance Score...Mean Absolute Percentage Error
    Random ForestStandard Scalingx + a00.95...0.05
    Theil-SenYeo-JohnsonScalingx + a + b10.98...0.01
    .....................
    Extra TreesNonex20.43...0.52
    \n", "signature": "(self) -> pandas.core.frame.DataFrame:", "funcdef": "def"}, "calidhayte.summary": {"fullname": "calidhayte.summary", "modulename": "calidhayte.summary", "kind": "module", "doc": "

    \n"}, "calidhayte.summary.Summary": {"fullname": "calidhayte.summary.Summary", "modulename": "calidhayte.summary", "qualname": "Summary", "kind": "class", "doc": "

    \n"}, "calidhayte.summary.Summary.__init__": {"fullname": "calidhayte.summary.Summary.__init__", "modulename": "calidhayte.summary", "qualname": "Summary.__init__", "kind": "function", "doc": "

    \n", "signature": "(\tresults: pandas.core.frame.DataFrame,\tcols: list[str],\tstyle: str = 'bmh',\tbackend: str = 'agg')"}, "calidhayte.summary.Summary.results": {"fullname": "calidhayte.summary.Summary.results", "modulename": "calidhayte.summary", "qualname": "Summary.results", "kind": "variable", "doc": "

    \n"}, "calidhayte.summary.Summary.plots": {"fullname": "calidhayte.summary.Summary.plots", "modulename": "calidhayte.summary", "qualname": "Summary.plots", "kind": "variable", "doc": "

    \n", "annotation": ": 'dict[str, dict[str, plt.figure.Figure]]'"}, "calidhayte.summary.Summary.cols": {"fullname": "calidhayte.summary.Summary.cols", "modulename": "calidhayte.summary", "qualname": "Summary.cols", "kind": "variable", "doc": "

    \n", "annotation": ": list[str]"}, "calidhayte.summary.Summary.style": {"fullname": "calidhayte.summary.Summary.style", "modulename": "calidhayte.summary", "qualname": "Summary.style", "kind": "variable", "doc": "

    \n"}, "calidhayte.summary.Summary.backend": {"fullname": "calidhayte.summary.Summary.backend", "modulename": "calidhayte.summary", "qualname": "Summary.backend", "kind": "variable", "doc": "

    \n"}, "calidhayte.summary.Summary.boxplots": {"fullname": "calidhayte.summary.Summary.boxplots", "modulename": "calidhayte.summary", "qualname": "Summary.boxplots", "kind": "function", "doc": "

    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.summary.Summary.histograms": {"fullname": "calidhayte.summary.Summary.histograms", "modulename": "calidhayte.summary", "qualname": "Summary.histograms", "kind": "function", "doc": "

    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.summary.Summary.save_plots": {"fullname": "calidhayte.summary.Summary.save_plots", "modulename": "calidhayte.summary", "qualname": "Summary.save_plots", "kind": "function", "doc": "

    \n", "signature": "(self, path, filetype: str = 'png'):", "funcdef": "def"}}, "docInfo": {"calidhayte": {"qualname": 0, "fullname": 1, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 1226}, "calidhayte.calibrate": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 111}, "calidhayte.calibrate.cont_strat_folds": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 117, "bases": 0, "doc": 336}, "calidhayte.calibrate.Calibrate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 986}, "calidhayte.calibrate.Calibrate.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 318, "bases": 0, "doc": 334}, "calidhayte.calibrate.Calibrate.x_data": {"qualname": 3, "fullname": 5, "annotation": 5, "default_value": 0, "signature": 0, "bases": 0, "doc": 8}, "calidhayte.calibrate.Calibrate.target": {"qualname": 2, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 32}, "calidhayte.calibrate.Calibrate.scaler_list": {"qualname": 3, "fullname": 5, "annotation": 4, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "calidhayte.calibrate.Calibrate.scaler": {"qualname": 2, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "calidhayte.calibrate.Calibrate.y_data": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 89}, "calidhayte.calibrate.Calibrate.models": {"qualname": 2, "fullname": 4, "annotation": 8, "default_value": 0, "signature": 0, "bases": 0, "doc": 192}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 86, "bases": 0, "doc": 75}, "calidhayte.calibrate.Calibrate.linreg": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 31}, "calidhayte.calibrate.Calibrate.ridge": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 30}, "calidhayte.calibrate.Calibrate.ridge_cv": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 42, "bases": 0, "doc": 34}, "calidhayte.calibrate.Calibrate.lasso": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 30}, "calidhayte.calibrate.Calibrate.lasso_cv": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 42, "bases": 0, "doc": 34}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 42, "bases": 0, "doc": 33}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"qualname": 5, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 47, "bases": 0, "doc": 37}, "calidhayte.calibrate.Calibrate.elastic_net": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 32}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 36}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"qualname": 5, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 43, "bases": 0, "doc": 36}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"qualname": 6, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 48, "bases": 0, "doc": 40}, "calidhayte.calibrate.Calibrate.lars": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 33}, "calidhayte.calibrate.Calibrate.lars_lasso": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 42, "bases": 0, "doc": 34}, "calidhayte.calibrate.Calibrate.omp": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 33}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 33}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 42, "bases": 0, "doc": 35}, "calidhayte.calibrate.Calibrate.tweedie": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 31}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 34}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 33}, "calidhayte.calibrate.Calibrate.ransac": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 39, "bases": 0, "doc": 30}, "calidhayte.calibrate.Calibrate.theil_sen": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 40}, "calidhayte.calibrate.Calibrate.huber": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 31}, "calidhayte.calibrate.Calibrate.quantile": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 31}, "calidhayte.calibrate.Calibrate.decision_tree": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 32}, "calidhayte.calibrate.Calibrate.extra_tree": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 32}, "calidhayte.calibrate.Calibrate.random_forest": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 32}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 35}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 33}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"qualname": 5, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 46}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 42, "bases": 0, "doc": 42}, "calidhayte.calibrate.Calibrate.svr": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 33}, "calidhayte.calibrate.Calibrate.linear_svr": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 42, "bases": 0, "doc": 35}, "calidhayte.calibrate.Calibrate.nu_svr": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 42, "bases": 0, "doc": 43}, "calidhayte.calibrate.Calibrate.gaussian_process": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 33}, "calidhayte.calibrate.Calibrate.pls": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 31}, "calidhayte.calibrate.Calibrate.isotonic": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 31}, "calidhayte.calibrate.Calibrate.xgboost": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 31}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 42, "bases": 0, "doc": 35}, "calidhayte.calibrate.Calibrate.return_measurements": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 80}, "calidhayte.calibrate.Calibrate.return_models": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 119}, "calidhayte.graphs": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "calidhayte.graphs.Graphs": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 15}, "calidhayte.graphs.Graphs.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 198, "bases": 0, "doc": 3}, "calidhayte.graphs.Graphs.x": {"qualname": 2, "fullname": 4, "annotation": 5, "default_value": 0, "signature": 0, "bases": 0, "doc": 22}, "calidhayte.graphs.Graphs.y": {"qualname": 2, "fullname": 4, "annotation": 5, "default_value": 0, "signature": 0, "bases": 0, "doc": 20}, "calidhayte.graphs.Graphs.x_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 8}, "calidhayte.graphs.Graphs.y_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 8}, "calidhayte.graphs.Graphs.target": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "calidhayte.graphs.Graphs.models": {"qualname": 2, "fullname": 4, "annotation": 8, "default_value": 0, "signature": 0, "bases": 0, "doc": 192}, "calidhayte.graphs.Graphs.plots": {"qualname": 2, "fullname": 4, "annotation": 11, "default_value": 0, "signature": 0, "bases": 0, "doc": 164}, "calidhayte.graphs.Graphs.style": {"qualname": 2, "fullname": 4, "annotation": 4, "default_value": 0, "signature": 0, "bases": 0, "doc": 12}, "calidhayte.graphs.Graphs.backend": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 6}, "calidhayte.graphs.Graphs.plot_meta": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 39, "bases": 0, "doc": 82}, "calidhayte.graphs.Graphs.bland_altman_plot": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "calidhayte.graphs.Graphs.ecdf_plot": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "calidhayte.graphs.Graphs.lin_reg_plot": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "calidhayte.graphs.Graphs.save_plots": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 127, "bases": 0, "doc": 3}, "calidhayte.graphs.ecdf": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "calidhayte.graphs.lin_reg_plot": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 106, "bases": 0, "doc": 3}, "calidhayte.graphs.bland_altman_plot": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 90, "bases": 0, "doc": 3}, "calidhayte.graphs.ecdf_plot": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 106, "bases": 0, "doc": 3}, "calidhayte.results": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 61}, "calidhayte.results.CoefficientPipelineDict": {"qualname": 1, "fullname": 3, "annotation": 2, "default_value": 7, "signature": 0, "bases": 0, "doc": 14}, "calidhayte.results.Results": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 35}, "calidhayte.results.Results.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 132, "bases": 0, "doc": 81}, "calidhayte.results.Results.x": {"qualname": 2, "fullname": 4, "annotation": 5, "default_value": 0, "signature": 0, "bases": 0, "doc": 4}, "calidhayte.results.Results.y": {"qualname": 2, "fullname": 4, "annotation": 5, "default_value": 0, "signature": 0, "bases": 0, "doc": 4}, "calidhayte.results.Results.target": {"qualname": 2, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "calidhayte.results.Results.models": {"qualname": 2, "fullname": 4, "annotation": 8, "default_value": 0, "signature": 0, "bases": 0, "doc": 189}, "calidhayte.results.Results.errors": {"qualname": 2, "fullname": 4, "annotation": 5, "default_value": 0, "signature": 0, "bases": 0, "doc": 220}, "calidhayte.results.Results.explained_variance_score": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 47}, "calidhayte.results.Results.max": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 46}, "calidhayte.results.Results.mean_absolute": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 47}, "calidhayte.results.Results.root_mean_squared": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 48}, "calidhayte.results.Results.root_mean_squared_log": {"qualname": 5, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 49}, "calidhayte.results.Results.median_absolute": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 47}, "calidhayte.results.Results.mean_absolute_percentage": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 48}, "calidhayte.results.Results.r2": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 45}, "calidhayte.results.Results.mean_poisson_deviance": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 47}, "calidhayte.results.Results.mean_gamma_deviance": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 47}, "calidhayte.results.Results.mean_tweedie_deviance": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 47}, "calidhayte.results.Results.mean_pinball_loss": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 46}, "calidhayte.results.Results.return_errors": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 435}, "calidhayte.summary": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "calidhayte.summary.Summary": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 91, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.results": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.plots": {"qualname": 2, "fullname": 4, "annotation": 9, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.cols": {"qualname": 2, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.style": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.backend": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.boxplots": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.histograms": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.save_plots": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 37, "bases": 0, "doc": 3}}, "length": 106, "save": true}, "index": {"qualname": {"root": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 4, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.summary.Summary.cols": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}}, "df": 49}}}}}}}}, "v": {"docs": {"calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 5}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}}}}}}}}, "y": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.summary.Summary.style": {"tf": 1}}, "df": 2}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.explained_variance_score": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 3}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 2}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 2}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.summary.Summary": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}, "calidhayte.summary.Summary.results": {"tf": 1}, "calidhayte.summary.Summary.plots": {"tf": 1}, "calidhayte.summary.Summary.cols": {"tf": 1}, "calidhayte.summary.Summary.style": {"tf": 1}, "calidhayte.summary.Summary.backend": {"tf": 1}, "calidhayte.summary.Summary.boxplots": {"tf": 1}, "calidhayte.summary.Summary.histograms": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 10}}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 4}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}}, "df": 1}}}}}}}}, "x": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}}, "df": 4, "g": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 2}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}}, "df": 3}}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.graphs.Graphs.target": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}}, "df": 3}}}}, "s": {"docs": {}, "df": 0, "k": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}}, "df": 2}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}}, "df": 2, "s": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}}, "df": 1}}, "n": {"docs": {"calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}}, "df": 5}}}, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 1}, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}}, "df": 4}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 4}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}}}, "l": {"docs": {}, "df": 0, "p": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {"calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 8}}, "t": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.results.Results.median_absolute": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "x": {"docs": {"calidhayte.results.Results.max": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.pls": {"tf": 1}}, "df": 1}, "o": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}}, "df": 7, "s": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.summary.Summary.plots": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 4}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 1}}}}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 3}}}}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.backend": {"tf": 1}, "calidhayte.summary.Summary.backend": {"tf": 1}}, "df": 2}}}}}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 2}}}, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.summary.Summary.boxplots": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}}, "df": 2}}}}}, "r": {"2": {"docs": {"calidhayte.results.Results.r2": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.ransac": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 3}}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 3}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}, "calidhayte.summary.Summary.results": {"tf": 1}}, "df": 21}}}}}}, "f": {"docs": {"calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 1}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 2}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.results.Results.explained_variance_score": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "f": {"docs": {"calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.ecdf": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}}, "df": 3}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 2}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}, "u": {"docs": {"calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"calidhayte.calibrate.Calibrate.omp": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 1}}, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}}, "df": 2}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}}, "df": 3}}}}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 3}}}}}, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.graphs.Graphs.target": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.graphs.Graphs.backend": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}}, "df": 16}}}}}, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 1}}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.huber": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.summary.Summary.histograms": {"tf": 1}}, "df": 1}}}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.quantile": {"tf": 1}}, "df": 1}}}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.explained_variance_score": {"tf": 1}}, "df": 1}}}}}}}}}}, "fullname": {"root": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 4, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs": {"tf": 1}, "calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.graphs.Graphs.target": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.graphs.Graphs.backend": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.graphs.ecdf": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}, "calidhayte.results": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}, "calidhayte.summary": {"tf": 1}, "calidhayte.summary.Summary": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}, "calidhayte.summary.Summary.results": {"tf": 1}, "calidhayte.summary.Summary.plots": {"tf": 1}, "calidhayte.summary.Summary.cols": {"tf": 1}, "calidhayte.summary.Summary.style": {"tf": 1}, "calidhayte.summary.Summary.backend": {"tf": 1}, "calidhayte.summary.Summary.boxplots": {"tf": 1}, "calidhayte.summary.Summary.histograms": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 106}}}}}}, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.target": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}}, "df": 51}}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.summary.Summary.cols": {"tf": 1}}, "df": 1}}}, "v": {"docs": {"calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 5}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}}}}}}}}, "y": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.summary.Summary.style": {"tf": 1}}, "df": 2}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.explained_variance_score": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 3}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 2}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 2}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.summary": {"tf": 1}, "calidhayte.summary.Summary": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.__init__": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.results": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.plots": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.cols": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.style": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.backend": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.boxplots": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.histograms": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.save_plots": {"tf": 1.4142135623730951}}, "df": 11}}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 4}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}}, "df": 1}}}}}}}}, "x": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}}, "df": 4, "g": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 2}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}}, "df": 3}}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.graphs.Graphs.target": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}}, "df": 3}}}}, "s": {"docs": {}, "df": 0, "k": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}}, "df": 2}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}}, "df": 2, "s": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}}, "df": 1}}, "n": {"docs": {"calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}}, "df": 5}}}, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 1}, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}}, "df": 4}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 4}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}}}, "l": {"docs": {}, "df": 0, "p": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {"calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 8}}, "t": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.results.Results.median_absolute": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "x": {"docs": {"calidhayte.results.Results.max": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.pls": {"tf": 1}}, "df": 1}, "o": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}}, "df": 7, "s": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.summary.Summary.plots": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 4}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 1}}}}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 3}}}}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.backend": {"tf": 1}, "calidhayte.summary.Summary.backend": {"tf": 1}}, "df": 2}}}}}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 2}}}, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.summary.Summary.boxplots": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}}, "df": 2}}}}}, "r": {"2": {"docs": {"calidhayte.results.Results.r2": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.ransac": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 3}}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 3}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.x": {"tf": 1.4142135623730951}, "calidhayte.results.Results.y": {"tf": 1.4142135623730951}, "calidhayte.results.Results.target": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1.4142135623730951}, "calidhayte.results.Results.explained_variance_score": {"tf": 1.4142135623730951}, "calidhayte.results.Results.max": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_absolute": {"tf": 1.4142135623730951}, "calidhayte.results.Results.root_mean_squared": {"tf": 1.4142135623730951}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1.4142135623730951}, "calidhayte.results.Results.median_absolute": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1.4142135623730951}, "calidhayte.results.Results.r2": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.results": {"tf": 1}}, "df": 23}}}}}}, "f": {"docs": {"calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 1}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 2}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.results.Results.explained_variance_score": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "f": {"docs": {"calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.ecdf": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}}, "df": 3}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 2}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}, "u": {"docs": {"calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"calidhayte.calibrate.Calibrate.omp": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 1}}, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}}, "df": 2}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}}, "df": 3}}}}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 3}}}}}, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.graphs": {"tf": 1}, "calidhayte.graphs.Graphs": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.x": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.y": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.x_name": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.y_name": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.target": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.style": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.backend": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1.4142135623730951}, "calidhayte.graphs.ecdf": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}}, "df": 21}}}}}, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 1}}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.huber": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.summary.Summary.histograms": {"tf": 1}}, "df": 1}}}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.quantile": {"tf": 1}}, "df": 1}}}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.explained_variance_score": {"tf": 1}}, "df": 1}}}}}}}}}}, "annotation": {"root": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.summary.Summary.plots": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.cols": {"tf": 1}}, "df": 20, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 6}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.summary.Summary.plots": {"tf": 1}}, "df": 2}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 6}}}}, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 6}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.plots": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 6}}}}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.plots": {"tf": 2}, "calidhayte.results.Results.models": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.plots": {"tf": 1.4142135623730951}}, "df": 6}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 3}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}}, "df": 4}}, "k": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 3}}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.summary.Summary.cols": {"tf": 1}}, "df": 2}}}}}}}}, "x": {"2": {"7": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.plots": {"tf": 1.4142135623730951}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}}, "df": 1}}}}}}}}}}}, "default_value": {"root": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1.7320508075688772}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "signature": {"root": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "3": {"9": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 5.291502622129181}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.__init__": {"tf": 2}, "calidhayte.graphs.Graphs.save_plots": {"tf": 3.7416573867739413}, "calidhayte.summary.Summary.__init__": {"tf": 2}, "calidhayte.summary.Summary.save_plots": {"tf": 1.4142135623730951}}, "df": 44}, "docs": {}, "df": 0}, "5": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}, "6": {"2": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 9.746794344808963}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 15}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 8.12403840463596}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 5.916079783099616}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 5.916079783099616}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 5.916079783099616}, "calidhayte.calibrate.Calibrate.lars": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.omp": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.huber": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 5.916079783099616}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.svr": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.pls": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 5.830951894845301}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 7.416198487095663}, "calidhayte.graphs.Graphs.__init__": {"tf": 12.569805089976535}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 5.656854249492381}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 4.242640687119285}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 4.242640687119285}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 4.242640687119285}, "calidhayte.graphs.Graphs.save_plots": {"tf": 9.797958971132712}, "calidhayte.graphs.ecdf": {"tf": 3.1622776601683795}, "calidhayte.graphs.lin_reg_plot": {"tf": 9.273618495495704}, "calidhayte.graphs.bland_altman_plot": {"tf": 8.660254037844387}, "calidhayte.graphs.ecdf_plot": {"tf": 9.273618495495704}, "calidhayte.results.Results.__init__": {"tf": 10.295630140987}, "calidhayte.results.Results.explained_variance_score": {"tf": 3.1622776601683795}, "calidhayte.results.Results.max": {"tf": 3.1622776601683795}, "calidhayte.results.Results.mean_absolute": {"tf": 3.1622776601683795}, "calidhayte.results.Results.root_mean_squared": {"tf": 3.1622776601683795}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 3.1622776601683795}, "calidhayte.results.Results.median_absolute": {"tf": 3.1622776601683795}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 3.1622776601683795}, "calidhayte.results.Results.r2": {"tf": 3.1622776601683795}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 3.1622776601683795}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 3.1622776601683795}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 3.1622776601683795}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 3.1622776601683795}, "calidhayte.results.Results.return_errors": {"tf": 4.898979485566356}, "calidhayte.summary.Summary.__init__": {"tf": 8.54400374531753}, "calidhayte.summary.Summary.boxplots": {"tf": 3.1622776601683795}, "calidhayte.summary.Summary.histograms": {"tf": 3.1622776601683795}, "calidhayte.summary.Summary.save_plots": {"tf": 5.477225575051661}}, "df": 71, "d": {"docs": {}, "df": 0, "f": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.ecdf": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}}, "df": 3, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 9}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 2}, "calidhayte.graphs.Graphs.__init__": {"tf": 2}, "calidhayte.results.Results.__init__": {"tf": 2}}, "df": 4}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.lin_reg_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.bland_altman_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.ecdf_plot": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 10}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 2}}}, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.omp": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.pls": {"tf": 1}}, "df": 1}, "o": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.graphs.Graphs.save_plots": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 2}}, "g": {"docs": {}, "df": 0, "f": {"docs": {"calidhayte.graphs.Graphs.save_plots": {"tf": 1.4142135623730951}}, "df": 1}}, "d": {"docs": {}, "df": 0, "f": {"docs": {"calidhayte.graphs.Graphs.save_plots": {"tf": 1.4142135623730951}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.lin_reg_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.bland_altman_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.ecdf_plot": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 10}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {"calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 1}}, "x": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 5}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 9}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 2}}}}}}}}, "t": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 4}}}}, "s": {"docs": {}, "df": 0, "k": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 2.449489742783178}}, "df": 1, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "x": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}}, "df": 2, "s": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}}, "df": 6}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 5}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 3}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.__init__": {"tf": 2.8284271247461903}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1.7320508075688772}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1.7320508075688772}, "calidhayte.results.Results.__init__": {"tf": 2}, "calidhayte.summary.Summary.__init__": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 52, "a": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}}}}}}}}, "y": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "f": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}, "calidhayte.summary.Summary.boxplots": {"tf": 1}, "calidhayte.summary.Summary.histograms": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 62}}, "n": {"docs": {"calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}}, "df": 1}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.graphs.lin_reg_plot": {"tf": 2}, "calidhayte.graphs.bland_altman_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.ecdf_plot": {"tf": 1.4142135623730951}}, "df": 3}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 2}}, "df": 1, "r": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 3}}}}}}, "k": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 3}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 5}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}}, "df": 2}}}}}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}}, "df": 1}}}}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 3}}}}}}}, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 3}}}}}}}}, "x": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.lin_reg_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 6, "g": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 2}}}}}}}, "y": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.lin_reg_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 6, "e": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}}, "df": 2}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}}, "df": 2}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 1}}}}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}}}}}}}, "g": {"docs": {"calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1.4142135623730951}}, "df": 3}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}}, "df": 5}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}}, "df": 2}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}}, "df": 7}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.ecdf_plot": {"tf": 1.4142135623730951}}, "df": 43}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}, "u": {"docs": {"calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 1}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "x": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 5}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.omp": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 2}}}}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 2}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}}, "df": 2}}}}}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 3}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 2}}}}}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 2}}}}}}}, "m": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 2}}}, "k": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}}, "df": 41}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 30}}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.ransac": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 2}}}}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 2}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.omp": {"tf": 1}}, "df": 1}}}}}}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}}, "df": 3}}}}}}}}, "h": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.huber": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 1}}}}}}}}}}}, "bases": {"root": {"docs": {}, "df": 0}}, "doc": {"root": {"0": {"1": {"docs": {"calidhayte": {"tf": 2.6457513110645907}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 3}, "2": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}, "3": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "5": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 3}, "9": {"docs": {"calidhayte": {"tf": 1.7320508075688772}}, "df": 1}, "docs": {"calidhayte": {"tf": 2.449489742783178}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 2.8284271247461903}, "calidhayte.results.Results.return_errors": {"tf": 3.872983346207417}}, "df": 9, "|": {"1": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "1": {"0": {"0": {"0": {"docs": {}, "df": 0, "|": {"2": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0, "|": {"2": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "3": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "6": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "9": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 3}, "6": {"0": {"docs": {"calidhayte.calibrate": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 1.7320508075688772}, "calidhayte.results": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 15}, "docs": {}, "df": 0}, "docs": {"calidhayte": {"tf": 2.6457513110645907}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 3.3166247903554}, "calidhayte.results": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1.7320508075688772}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.7320508075688772}}, "df": 19}, "2": {"0": {"2": {"2": {"docs": {"calidhayte": {"tf": 3}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "docs": {"calidhayte": {"tf": 2}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate": {"tf": 2}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 7, "|": {"9": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}}, "3": {"0": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}, "7": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "9": {"docs": {"calidhayte": {"tf": 4}, "calidhayte.calibrate.cont_strat_folds": {"tf": 2}, "calidhayte.calibrate.Calibrate": {"tf": 7.3484692283495345}}, "df": 3}, "docs": {"calidhayte": {"tf": 2.23606797749979}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 2}, "calidhayte.calibrate.Calibrate": {"tf": 2}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 8, "|": {"1": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}}, "df": 1}, "2": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "3": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "4": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}}, "4": {"3": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 2}, "docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 6, "|": {"3": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "5": {"0": {"0": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 2}, "5": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 2}, "docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 8, "|": {"2": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}}, "6": {"2": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0, "|": {"4": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}}, "7": {"8": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}, "docs": {"calidhayte": {"tf": 2}, "calidhayte.calibrate.Calibrate": {"tf": 2}}, "df": 2, "|": {"2": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "8": {"6": {"1": {"7": {"docs": {"calidhayte.calibrate": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 1.7320508075688772}, "calidhayte.results": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 15}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2, "|": {"7": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}}, "9": {"5": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 2}, "8": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 2}, "docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 2}}, "df": 2}, "docs": {"calidhayte": {"tf": 27.294688127912362}, "calidhayte.calibrate": {"tf": 7.937253933193772}, "calidhayte.calibrate.cont_strat_folds": {"tf": 13.114877048604}, "calidhayte.calibrate.Calibrate": {"tf": 25.03996805109783}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 10.295630140987}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.target": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 3.605551275463989}, "calidhayte.calibrate.Calibrate.models": {"tf": 7.416198487095663}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 4.47213595499958}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 3.605551275463989}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 3.605551275463989}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 3.605551275463989}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 3.605551275463989}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 3.605551275463989}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 3.605551275463989}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 3.605551275463989}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 3.605551275463989}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 3.605551275463989}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 3.605551275463989}, "calidhayte.calibrate.Calibrate.lars": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 3.605551275463989}, "calidhayte.calibrate.Calibrate.omp": {"tf": 3.605551275463989}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 4.358898943540674}, "calidhayte.calibrate.Calibrate.huber": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 4.358898943540674}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 4.358898943540674}, "calidhayte.calibrate.Calibrate.svr": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 4.358898943540674}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.pls": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 7.211102550927978}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 5.744562646538029}, "calidhayte.graphs": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.x": {"tf": 2.6457513110645907}, "calidhayte.graphs.Graphs.y": {"tf": 2.6457513110645907}, "calidhayte.graphs.Graphs.x_name": {"tf": 2}, "calidhayte.graphs.Graphs.y_name": {"tf": 2}, "calidhayte.graphs.Graphs.target": {"tf": 2}, "calidhayte.graphs.Graphs.models": {"tf": 7.416198487095663}, "calidhayte.graphs.Graphs.plots": {"tf": 7.54983443527075}, "calidhayte.graphs.Graphs.style": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.backend": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 5.477225575051661}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1.7320508075688772}, "calidhayte.graphs.ecdf": {"tf": 1.7320508075688772}, "calidhayte.graphs.lin_reg_plot": {"tf": 1.7320508075688772}, "calidhayte.graphs.bland_altman_plot": {"tf": 1.7320508075688772}, "calidhayte.graphs.ecdf_plot": {"tf": 1.7320508075688772}, "calidhayte.results": {"tf": 5.656854249492381}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1.4142135623730951}, "calidhayte.results.Results": {"tf": 2.8284271247461903}, "calidhayte.results.Results.__init__": {"tf": 5.830951894845301}, "calidhayte.results.Results.x": {"tf": 1.4142135623730951}, "calidhayte.results.Results.y": {"tf": 1.4142135623730951}, "calidhayte.results.Results.target": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 7.416198487095663}, "calidhayte.results.Results.errors": {"tf": 11.958260743101398}, "calidhayte.results.Results.explained_variance_score": {"tf": 5.385164807134504}, "calidhayte.results.Results.max": {"tf": 5.385164807134504}, "calidhayte.results.Results.mean_absolute": {"tf": 5.385164807134504}, "calidhayte.results.Results.root_mean_squared": {"tf": 5.385164807134504}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 5.385164807134504}, "calidhayte.results.Results.median_absolute": {"tf": 5.385164807134504}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 5.385164807134504}, "calidhayte.results.Results.r2": {"tf": 5.385164807134504}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 5.385164807134504}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 5.385164807134504}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 5.385164807134504}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 5.385164807134504}, "calidhayte.results.Results.return_errors": {"tf": 16.97056274847714}, "calidhayte.summary": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.__init__": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.results": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.plots": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.cols": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.style": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.backend": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.boxplots": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.histograms": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.save_plots": {"tf": 1.7320508075688772}}, "df": 106, "c": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte": {"tf": 1.7320508075688772}}, "df": 1, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}}}}, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 2}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.target": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}}, "df": 6, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "d": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 7}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1.7320508075688772}}, "df": 12, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}}, "e": {"docs": {"calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 12, "s": {"docs": {"calidhayte.graphs.Graphs": {"tf": 1}}, "df": 1}, "d": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "@": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "k": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "n": {"docs": {"calidhayte": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 3}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 2}}}, "s": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 2}}}}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 2}}, "d": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}, "m": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}}, "df": 2}}}}, "/": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "@": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 5}}}}}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 8, "s": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 3}}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results": {"tf": 1}}, "df": 1}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.results.Results.__init__": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}}}}}}}}, "x": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 10}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 2}}}}}}, "s": {"docs": {}, "df": 0, "v": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 2.449489742783178}}, "df": 3}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 38}}}}}}}}}}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}}, "df": 1, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "g": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.cont_strat_folds": {"tf": 2}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 10}}}}, "s": {"docs": {}, "df": 0, "k": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}}, "df": 4}}}, "o": {"docs": {"calidhayte": {"tf": 2.8284271247461903}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 3}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.target": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.graphs.Graphs.backend": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1.4142135623730951}, "calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 20}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 2.449489742783178}, "calidhayte.graphs.Graphs.plots": {"tf": 2.449489742783178}, "calidhayte.results.Results.models": {"tf": 2.449489742783178}, "calidhayte.results.Results.errors": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 45, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.results": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 4}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 4}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 3.1622776601683795}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 2}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.models": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 2.449489742783178}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 2.6457513110645907}, "calidhayte.graphs.Graphs.plots": {"tf": 2.6457513110645907}, "calidhayte.results": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1.4142135623730951}, "calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 2}, "calidhayte.results.Results.models": {"tf": 2.449489742783178}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1.4142135623730951}, "calidhayte.results.Results.max": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_absolute": {"tf": 1.4142135623730951}, "calidhayte.results.Results.root_mean_squared": {"tf": 1.4142135623730951}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1.4142135623730951}, "calidhayte.results.Results.median_absolute": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1.4142135623730951}, "calidhayte.results.Results.r2": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 2.6457513110645907}}, "df": 33, "n": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "m": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "y": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 5}, "i": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 4}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 3}}, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "t": {"docs": {"calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}}, "df": 6}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 15}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 7}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 4}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 3}}}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}}, "df": 2, "s": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 3}}}}, "w": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}}, "df": 2}}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "f": {"docs": {"calidhayte": {"tf": 2.8284271247461903}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.models": {"tf": 2}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.models": {"tf": 2}, "calidhayte.graphs.Graphs.plots": {"tf": 2.23606797749979}, "calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.results": {"tf": 1.4142135623730951}, "calidhayte.results.Results": {"tf": 1.7320508075688772}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 2}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 57}, "n": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 47, "e": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 4}, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 2}}}, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.return_models": {"tf": 1}}, "df": 1, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "r": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 2}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 10, "g": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.results": {"tf": 1}}, "df": 3}}}}}}}}}}}}}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}}, "df": 1}}}}}}}}}}}, "s": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}}, "df": 3, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1.7320508075688772}}, "df": 1}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.7320508075688772}}, "df": 3}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "d": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 3}}, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.7320508075688772}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 4}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}}, "df": 1}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 3, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}}, "df": 2}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 43, "a": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}}, "df": 4, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 4}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}}}}}}, "y": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 6}}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 2}, "calidhayte.graphs.Graphs.models": {"tf": 2}, "calidhayte.graphs.Graphs.plots": {"tf": 2}, "calidhayte.results.Results.models": {"tf": 2}}, "df": 6, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 4}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 4}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 6}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "y": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1.4142135623730951}}, "df": 5}}}}, "u": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.results": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 2}}, "df": 2, "r": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 2}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 2}, "calidhayte.graphs.Graphs.plots": {"tf": 2}, "calidhayte.results.Results.models": {"tf": 2}, "calidhayte.results.Results.errors": {"tf": 1.7320508075688772}, "calidhayte.results.Results.return_errors": {"tf": 2}}, "df": 9, "/": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 3}}}}, "k": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 2}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.graphs.Graphs.backend": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 3}}}}}}}}, "y": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "x": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.results.Results.max": {"tf": 1}}, "df": 2}, "n": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1.7320508075688772}}, "df": 1, "s": {"docs": {"calidhayte": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.x": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}}, "df": 11}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.target": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 10}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 4, "s": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 3}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.results.Results.errors": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"calidhayte.results": {"tf": 1.4142135623730951}, "calidhayte.results.Results": {"tf": 1}}, "df": 2}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}}, "df": 2}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}}, "df": 5, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 2}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "k": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 11}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 2}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "x": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 2}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}}, "df": 4, "s": {"docs": {"calidhayte": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.models": {"tf": 2.449489742783178}, "calidhayte.results.Results.models": {"tf": 2.449489742783178}, "calidhayte.results.Results.errors": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 10, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 2}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 8}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 4}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 2}}}, "r": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.results": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1.4142135623730951}}, "df": 9, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 6}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 6}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1.7320508075688772}}, "df": 1, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 39, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.results.Results": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 2}}, "s": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, ":": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.results": {"tf": 1}}, "df": 3}}}}}}, "x": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}}}}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}}}}}}}, "m": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.results": {"tf": 1}}, "df": 4}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.huber": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 2}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.target": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.target": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1.4142135623730951}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 18, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte": {"tf": 2.23606797749979}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.x": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}}, "df": 6}}}}}}}, "x": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 4}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 4}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 2}}}}}}}}}, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}}, "df": 2}}}, "s": {"docs": {"calidhayte": {"tf": 2}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 9, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.isotonic": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "t": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 4, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}, "f": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1.7320508075688772}}, "df": 6}}, "d": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}}, "t": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}}, "df": 5}}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 2}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 41}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}}, "df": 2}}}}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}}, "df": 3}}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 2}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 3.1622776601683795}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 2}, "calidhayte.results.Results.models": {"tf": 1.7320508075688772}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 17, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 2}}, "df": 6, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.results": {"tf": 1.4142135623730951}, "calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 4}}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}, "f": {"docs": {"calidhayte": {"tf": 3.1622776601683795}, "calidhayte.calibrate.cont_strat_folds": {"tf": 2.8284271247461903}}, "df": 2}}, "e": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.models": {"tf": 2.6457513110645907}, "calidhayte.graphs.Graphs.plots": {"tf": 2.8284271247461903}, "calidhayte.results.Results.models": {"tf": 2.6457513110645907}}, "df": 7, "x": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 5}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 4}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1.4142135623730951}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.7320508075688772}}, "df": 9, "s": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 3}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}}}, "c": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "f": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 2}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1.4142135623730951}}, "df": 2}}, "s": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}}, "df": 4}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 23}}}, "e": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.backend": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}}, "df": 5, "d": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 2.449489742783178}, "calidhayte.graphs.Graphs.plots": {"tf": 2}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 2.449489742783178}}, "df": 13}}}, "n": {"docs": {}, "df": 0, "i": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 8}}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}}}}, "a": {"docs": {"calidhayte": {"tf": 3}, "calidhayte.calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.cont_strat_folds": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate": {"tf": 3.3166247903554}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 2}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.models": {"tf": 2}, "calidhayte.graphs.Graphs.plots": {"tf": 1.7320508075688772}, "calidhayte.results": {"tf": 1.4142135623730951}, "calidhayte.results.Results": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 2}, "calidhayte.results.Results.errors": {"tf": 1.7320508075688772}, "calidhayte.results.Results.return_errors": {"tf": 2.23606797749979}}, "df": 18, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.results": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}}, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.target": {"tf": 1}}, "df": 7}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 3, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 3}}}}}, "d": {"docs": {"calidhayte": {"tf": 2}, "calidhayte.calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.target": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.graphs.Graphs": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 24}, "y": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}}, "df": 3}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 2}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 9}, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 6, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.scaler": {"tf": 1}}, "df": 1, "s": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"calidhayte": {"tf": 2}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 11, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}}, "df": 2}}}}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 6}}}}}}}, "t": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 2}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}}}}}, "v": {"2": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 4}, "docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.x": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.y": {"tf": 1}}, "df": 3, "s": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 2}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 2}, "calidhayte.graphs.Graphs.plots": {"tf": 2}, "calidhayte.results.Results.models": {"tf": 2}, "calidhayte.results.Results.errors": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 11}}}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 3}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "d": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 9}}}}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}}, "df": 3, "s": {"docs": {"calidhayte": {"tf": 2}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 15}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}}, "df": 1}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "a": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}}, "df": 25}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.7320508075688772}}, "df": 3}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 42}}}}}}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 3, "s": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}}, "df": 2}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results": {"tf": 1.4142135623730951}, "calidhayte.results.Results": {"tf": 1}}, "df": 2}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 3}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.scaler": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 13}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.models": {"tf": 1}}, "df": 1}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 9}}}}}}, "i": {"docs": {}, "df": 0, "p": {"docs": {"calidhayte": {"tf": 2}}, "df": 1, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 2}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}}, "df": 2, "s": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 2}}, "df": 3, "s": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1.4142135623730951}}, "df": 3}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"calidhayte.calibrate.Calibrate.pls": {"tf": 1.4142135623730951}}, "df": 1}}, "y": {"docs": {"calidhayte.results.Results": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}, "d": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 2}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 6}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "n": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}}, "df": 1}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}}, "df": 3, "o": {"2": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}}, "df": 2, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 3}}, "t": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 3}}, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 2.449489742783178}}, "df": 1}, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 2}, "calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 51}}}, "u": {"docs": {"calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}}, "df": 1, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "w": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 2}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 2}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 5}}}}, "t": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}}, "df": 4}}}, "b": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 10, "y": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 5}, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}}, "df": 4}}}}}}, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.7320508075688772}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.backend": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {"calidhayte": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 2}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 8, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 13}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 4}}, "x": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.7320508075688772}}, "df": 2}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1, "s": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "k": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}}, "df": 4, "e": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 7, "s": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 2}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}}, "df": 3}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 4}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 4}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1.4142135623730951}}, "df": 3}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 2}, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 6}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.results": {"tf": 1}}, "df": 2}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1.7320508075688772}, "calidhayte.results.Results.models": {"tf": 1.7320508075688772}}, "df": 4}}}, "g": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.models": {"tf": 2.6457513110645907}, "calidhayte.graphs.Graphs.plots": {"tf": 2.8284271247461903}, "calidhayte.results.Results.models": {"tf": 2.6457513110645907}}, "df": 6, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 2.6457513110645907}, "calidhayte.graphs.Graphs": {"tf": 1}}, "df": 2}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.7320508075688772}}, "df": 3}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 4}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "+": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, ":": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}}}}}}, "h": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 3.4641016151377544}, "calidhayte.calibrate.Calibrate": {"tf": 8.888194417315589}, "calidhayte.calibrate.Calibrate.models": {"tf": 2}, "calidhayte.graphs.Graphs.models": {"tf": 2}, "calidhayte.graphs.Graphs.plots": {"tf": 2}, "calidhayte.results.Results.models": {"tf": 2}}, "df": 6}, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}}, "df": 3}}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}, "r": {"2": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 42, "s": {"docs": {"calidhayte.calibrate": {"tf": 1}}, "df": 1}}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"calidhayte": {"tf": 3.3166247903554}, "calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 4}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 6}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1.7320508075688772}}, "df": 2, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}}}}}}}, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 8}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.results": {"tf": 1}, "calidhayte.results.Results": {"tf": 1.4142135623730951}}, "df": 4}}, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1.4142135623730951}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}, "d": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}}}}, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}}, "df": 1}, "o": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.4142135623730951}}, "df": 3}}}}}, "x": {"docs": {"calidhayte": {"tf": 3.7416573867739413}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.target": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.y": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1.7320508075688772}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 2.449489742783178}}, "df": 67, "g": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 4, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 1}}}}}}}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 6, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}, "y": {"docs": {"calidhayte": {"tf": 3}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.target": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.pls": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.x": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.graphs.Graphs.target": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.explained_variance_score": {"tf": 1.4142135623730951}, "calidhayte.results.Results.max": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_absolute": {"tf": 1.4142135623730951}, "calidhayte.results.Results.root_mean_squared": {"tf": 1.4142135623730951}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1.4142135623730951}, "calidhayte.results.Results.median_absolute": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1.4142135623730951}, "calidhayte.results.Results.r2": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1.4142135623730951}}, "df": 61, "e": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 8}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}}, "df": 4}}}, "k": {"docs": {"calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 12}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 2}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "g": {"docs": {"calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 1}, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.results": {"tf": 1.4142135623730951}}, "df": 3}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}}, "df": 3}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 10}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.7320508075688772}}, "df": 1}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 2}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}}, "pipeline": ["trimmer"], "_isPrebuiltIndex": true}; + /** pdoc search index */const docs = {"version": "0.9.5", "fields": ["qualname", "fullname", "annotation", "default_value", "signature", "bases", "doc"], "ref": "fullname", "documentStore": {"docs": {"calidhayte": {"fullname": "calidhayte", "modulename": "calidhayte", "kind": "module", "doc": "

    \n calidhayte\n

    \n\n

    Contact: CaderIdrisGH@outlook.com

    \n\n

    \"Tests\"

    \n\n
    \n\n

    Table of Contents

    \n\n
      \n
    1. Summary
    2. \n
    3. Main Features
    4. \n
    5. How to Install
    6. \n
    7. Dependencies
    8. \n
    9. Example Usage
    10. \n
    11. Acknowledgements
    12. \n
    \n\n
    \n\n

    Summary

    \n\n

    calidhayte calibrates one set of measurements against another, using a variety of parametric and non parametric techniques.\nThe datasets are split by k-fold cross validation and stratified so the distribution of 'true' measurements is consistent in all.\nIt can then performs multiple error calculations to validate them, as well as produce several graphs to visualise the calibrations.

    \n\n
    \n\n

    Main Features

    \n\n
      \n
    • Calibrate one set of measurements (cross-comparing all available secondary variables) against a 'true' set\n
        \n
      • A suite of calibration methods are available, including bayesian regression
      • \n
    • \n
    • Perform a suite of error calculations on the resulting calibration
    • \n
    • Visualise results of calibration
    • \n
    • Summarise calibrations to highlight best performing techniques
    • \n
    \n\n
    \n\n

    How to install

    \n\n

    pip

    \n\n
    \n
    pip install git+https://github.com/CaderIdris/calidhayte@release_tag\n
    \n
    \n\n

    conda

    \n\n
    \n
    conda install git pip\npip install git+https://github.com/CaderIdris/calidhayte@release_tag \n
    \n
    \n\n

    The release tags can be found in the sidebar

    \n\n
    \n\n

    Dependencies

    \n\n

    Please see Pipfile.

    \n\n
    \n\n

    Example Usage

    \n\n

    This module requires two dataframes as a prerequisite.

    \n\n

    Independent Measurements

    \n\n\n\n\n \n \n \n \n \n \n \n\n\n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n
    xabcde
    2022-01-010.1072.235
    2022-01-020.71328.91
    2022-01-03nannan1nannan7
    _______
    2022-09-300.5312.740
    \n\n

    Dependent Measurements

    \n\n\n\n\n \n \n\n\n\n\n \n \n\n\n \n \n\n\n \n \n\n\n \n \n\n\n \n \n\n\n \n \n\n\n
    x
    2022-01-021
    2022-01-053
    __
    2022-09-29nan
    2022-09-3037
    2022-10-013
    \n\n
      \n
    • The two dataframes are joined on the index as an inner join, so the indices do not have to match initially
    • \n
    • nan values can be present
    • \n
    • More than one column can be present for the dependent measurements but only 'Values' will be used
    • \n
    • The index can contain date objects, datetime objects or integers. They should be unique. Strings are untested and may cause unexpected behaviours
    • \n
    \n\n
    \n
    from calidhayte import Calibrate, Results, Graphs, Summary\n\n# x_df is a dataframe containing multiple columns containing independent measurements.\n# The primary measurement is denoted by the 'Values' columns, the other measurement columns can have any name.\n# y_df is a dataframe containing the dependent measurement in the 'Values' column.\n\ncoeffs = Calibrate(\n    x=x_df,\n    y=y_df\n    target='x'\n)\n\ncal.linreg()\ncal.theil_sen()\ncal.random_forest(n_estimators=500, max_features=1.0)\n\nmodels = coeffs.return_models()\n\nresults = Results(\n    x=x_df,\n    y=y_df,\n    target='x',\n    models=models\n)\n\nresults.r2()\nresults.median_absolute()\nresults.max()\n\nresults_df = results.return_errors()\nresults_df.to_csv('results.csv')\n\ngraphs = Graphs(\n    x=x_df,\n    y=y_df,\n    target='x',\n    models=models,\n    x_name='x',\n    y_name='y'\n)\ngraphs.ecdf_plot()\ngraphs.lin_reg_plot()\ngraphs.save_plots()\n
    \n
    \n\n
    \n\n

    Acknowledgements

    \n\n

    Many thanks to James Murphy at Mcoding who's excellent tutorial Automated Testing in Python and associated repository helped a lot when structuring this package

    \n"}, "calidhayte.calibrate": {"fullname": "calidhayte.calibrate", "modulename": "calidhayte.calibrate", "kind": "module", "doc": "

    Contains code used to perform a range of univariate and multivariate\nregressions on provided data.

    \n\n

    Acts as a wrapper for scikit-learn 1, XGBoost 2 and PyMC (via Bambi)\n3

    \n\n\n"}, "calidhayte.calibrate.cont_strat_folds": {"fullname": "calidhayte.calibrate.cont_strat_folds", "modulename": "calidhayte.calibrate", "qualname": "cont_strat_folds", "kind": "function", "doc": "

    Creates stratified k-folds on continuous variable

    \n\n

    df : pd.DataFrame\n Target data to stratify on.\ntarget_var : str\n Target feature name.\nsplits : int, default=5\n Number of folds to make.\nstrat_groups : int, default=10\n Number of groups to split data in to for stratification.\nseed : int, default=62\n Random state to use.

    \n\n
    Returns
    \n\n
      \n
    • pd.DataFrame: y_df with added 'Fold' column, specifying which test data fold\nvariable corresponds to.
    • \n
    \n\n
    Examples
    \n\n
    \n
    >>> df = pd.read_csv('data.csv')\n>>> df\n|    | x | a | b |\n|    |   |   |   |\n|  0 |2.3|1.8|7.2|\n|  1 |3.2|9.6|4.5|\n|....|...|...|...|\n|1000|2.3|4.5|2.2|\n>>> df_with_folds = const_strat_folds(\n        df=df,\n        target='a',\n        splits=3,\n        strat_groups=3.\n        seed=78\n    )\n>>> df_with_folds\n|    | x | a | b |Fold|\n|    |   |   |   |    |\n|  0 |2.3|1.8|7.2| 2  |\n|  1 |3.2|9.6|4.5| 1  |\n|....|...|...|...|....|\n|1000|2.3|4.5|2.2| 0  |\n
    \n
    \n\n

    All folds should have a roughly equal distribution of values for 'a'

    \n", "signature": "(\tdf: pandas.core.frame.DataFrame,\ttarget_var: str,\tsplits: int = 5,\tstrat_groups: int = 5,\tseed: int = 62) -> pandas.core.frame.DataFrame:", "funcdef": "def"}, "calidhayte.calibrate.Calibrate": {"fullname": "calidhayte.calibrate.Calibrate", "modulename": "calidhayte.calibrate", "qualname": "Calibrate", "kind": "class", "doc": "

    Calibrate x against y using a range of different methods provided by\nscikit-learn1, xgboost2 and PyMC (via Bambi)3.

    \n\n
    Examples
    \n\n
    \n
    >>> from calidhayte.calibrate import Calibrate\n>>> import pandas as pd\n>>>\n>>> x = pd.read_csv('independent.csv')\n>>> x\n|   | a | b |\n| 0 |2.3|3.2|\n| 1 |3.4|3.1|\n|...|...|...|\n|100|3.7|2.1|\n>>>\n>>> y = pd.read_csv('dependent.csv')\n>>> y\n|   | a |\n| 0 |7.8|\n| 1 |9.9|\n|...|...|\n|100|9.5|\n>>>\n>>> calibration = Calibrate(\n    x_data=x,\n    y_data=y,\n    target='a',\n    folds=5,\n    strat_groups=5,\n    scaler = [\n        'Standard Scale',\n        'MinMax Scale'\n        ],\n    seed=62\n)\n>>> calibration.linreg()\n>>> calibration.lars()\n>>> calibration.omp()\n>>> calibration.ransac()\n>>> calibration.random_forest()\n>>>\n>>> models = calibration.return_models()\n>>> list(models.keys())\n[\n    'Linear Regression',\n    'Least Angle Regression',\n    'Orthogonal Matching Pursuit',\n    'RANSAC',\n    'Random Forest'\n]\n>>> list(models['Linear Regression'].keys())\n['Standard Scale', 'MinMax Scale']\n>>> list(models['Linear Regression']['Standard Scale'].keys())\n['a', 'a + b']\n>>> list(models['Linear Regression']['Standard Scale']['a'].keys())\n[0, 1, 2, 3, 4]\n>>> type(models['Linear Regression']['Standard Scale']['a'][0])\n<class sklearn.pipeline.Pipeline>\n>>> pipeline = models['Linear Regression']['Standard Scale']['a'][0]\n>>> x_new = pd.read_csv('independent_new.csv')\n>>> x_new\n|   | a | b |\n| 0 |3.5|2.7|\n| 1 |4.0|1.1|\n|...|...|...|\n|100|2.3|2.1|\n>>> pipeline.transform(x_new)\n|   | a |\n| 0 |9.7|\n| 1 |9.1|\n|...|...|\n|100|6.7|\n
    \n
    \n\n\n"}, "calidhayte.calibrate.Calibrate.__init__": {"fullname": "calidhayte.calibrate.Calibrate.__init__", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.__init__", "kind": "function", "doc": "

    Initialises class

    \n\n

    Used to compare one set of measurements against another.\nIt can perform both univariate and multivariate regression, though\nsome techniques can only do one or the other. Multivariate regression\ncan only be performed when secondary variables are provided.

    \n\n
    Parameters
    \n\n
      \n
    • x_data (pd.DataFrame):\nData to be calibrated.
    • \n
    • y_data (pd.DataFrame):\n'True' data to calibrate against.
    • \n
    • target (str):\nColumn name of the primary feature to use in calibration, must be\nthe name of a column in both x_data and y_data.
    • \n
    • folds (int, default=5):\nNumber of folds to split the data into, using stratified k-fold.
    • \n
    • strat_groups (int, default=10):\nNumber of groups to stratify against, the data will be split into\nn equally sized bins where n is the value of strat_groups.
    • \n
    • scaler (iterable of {
      'None',
      'Standard Scale',
      'MinMax Scale',
      'Yeo-Johnson Transform',
      'Box-Cox Transform',
      'Quantile Transform (Uniform)',
      'Quantile Transform (Gaussian)',
      } or {
      'All',
      'None',
      'Standard Scale',
      'MinMax Scale',
      'Yeo-Johnson Transform',
      'Box-Cox Transform',
      'Quantile Transform (Uniform)',
      'Quantile Transform (Gaussian)',
      }, default='None'):\nThe scaling/transform method (or list of methods) to apply to the\ndata
    • \n
    • seed (int, default=62):\nRandom state to use when shuffling and splitting the data into n\nfolds. Ensures repeatability.
    • \n
    \n\n
    Raises
    \n\n
      \n
    • ValueError: Raised if the target variables (e.g. 'NO2') is not a column name in\nboth dataframes.\nRaised if scaler is not str, tuple or list
    • \n
    \n", "signature": "(\tx_data: pandas.core.frame.DataFrame,\ty_data: pandas.core.frame.DataFrame,\ttarget: str,\tfolds: int = 5,\tstrat_groups: int = 10,\tscaler: Union[collections.abc.Iterable[Literal['None', 'Standard Scale', 'MinMax Scale', 'Yeo-Johnson Transform', 'Box-Cox Transform', 'Quantile Transform (Uniform)', 'Quantile Transform (Gaussian)']], Literal['All', 'None', 'Standard Scale', 'MinMax Scale', 'Yeo-Johnson Transform', 'Box-Cox Transform', 'Quantile Transform (Uniform)', 'Quantile Transform (Gaussian)']] = 'None',\tseed: int = 62)"}, "calidhayte.calibrate.Calibrate.x_data": {"fullname": "calidhayte.calibrate.Calibrate.x_data", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.x_data", "kind": "variable", "doc": "

    The data to be calibrated.

    \n", "annotation": ": pandas.core.frame.DataFrame"}, "calidhayte.calibrate.Calibrate.target": {"fullname": "calidhayte.calibrate.Calibrate.target", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.target", "kind": "variable", "doc": "

    The name of the column in both x_data and y_data that\nwill be used as the x and y variables in the calibration.

    \n", "annotation": ": str"}, "calidhayte.calibrate.Calibrate.scaler_list": {"fullname": "calidhayte.calibrate.Calibrate.scaler_list", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.scaler_list", "kind": "variable", "doc": "

    Keys for scaling algorithms available in the pipelines

    \n", "annotation": ": dict[str, typing.Any]"}, "calidhayte.calibrate.Calibrate.scaler": {"fullname": "calidhayte.calibrate.Calibrate.scaler", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.scaler", "kind": "variable", "doc": "

    The scaling algorithm(s) to preprocess the data with

    \n", "annotation": ": list[str]"}, "calidhayte.calibrate.Calibrate.y_data": {"fullname": "calidhayte.calibrate.Calibrate.y_data", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.y_data", "kind": "variable", "doc": "

    The data that x_data will be calibrated against. A 'Fold'\ncolumn is added using the const_strat_folds function which splits\nthe data into k stratified folds (where k is the value of\nfolds). It splits the continuous measurements into n bins (where n\nis the value of strat_groups) and distributes each bin equally\nacross all folds. This significantly reduces the chances of one fold\ncontaining a skewed distribution relative to the whole dataset.

    \n"}, "calidhayte.calibrate.Calibrate.models": {"fullname": "calidhayte.calibrate.Calibrate.models", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.models", "kind": "variable", "doc": "

    The calibrated models. They are stored in a nested structure as\nfollows:

    \n\n
      \n
    1. Primary Key, name of the technique (e.g Lasso Regression).
    2. \n
    3. Scaling technique (e.g Yeo-Johnson Transform).
    4. \n
    5. Combination of variables used or target if calibration is\nunivariate (e.g \"target + a + b).
    6. \n
    7. Fold, which fold was used excluded from the calibration. If data\nif 5-fold cross validated, a key of 4 indicates the data was trained on\nfolds 0-3.
    8. \n
    \n\n
    stateDiagram-v2\n models --> Technique\n state Technique {\n [*] --> Scaling\n [*]: The calibration technique used\n [*]: (e.g \"Lasso Regression\")\n state Scaling {\n [*] --> Variables\n [*]: The scaling technique used\n [*]: (e.g \"Yeo-Johnson Transform\")\n state Variables {\n [*] : The combination of variables used\n [*] : (e.g \"x + a + b\")\n [*] --> Fold\n state Fold {\n [*] : Which fold was excluded from training data\n [*] : (e.g 4 indicates folds 0-3 were used to train)\n }\n }\n }\n }\n
    \n", "annotation": ": dict[str, dict[str, dict[str, dict[int, sklearn.pipeline.Pipeline]]]]"}, "calidhayte.calibrate.Calibrate.folds": {"fullname": "calidhayte.calibrate.Calibrate.folds", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.folds", "kind": "variable", "doc": "

    The number of folds used in k-fold cross validation

    \n", "annotation": ": int"}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"fullname": "calidhayte.calibrate.Calibrate.pymc_bayesian", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.pymc_bayesian", "kind": "function", "doc": "

    Performs bayesian linear regression (either uni or multivariate)\nfitting x on y.

    \n\n

    Performs bayesian linear regression, both univariate and multivariate,\non X against y. More details can be found at:\nhttps://pymc.io/projects/examples/en/latest/generalized_linear_models/\nGLM-robust.html

    \n\n
    Parameters
    \n\n
      \n
    • family ({'Gaussian', 'Student T'}, default='Gaussian'):\nStatistical distribution to fit measurements to. Options are:\n - Gaussian\n - Student T
    • \n
    \n", "signature": "(\tself,\tfamily: Literal['Gaussian', 'Student T'] = 'Gaussian',\tname: str = ' PyMC Bayesian',\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.linreg": {"fullname": "calidhayte.calibrate.Calibrate.linreg", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.linreg", "kind": "function", "doc": "

    Fit x on y via linear regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Linear Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[):\nstr,\n Union[\n scipy.stats.rv_continuous,\n List[Union[int, str, float]]\n ]\n], default=Preset distributions\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Linear Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.ridge": {"fullname": "calidhayte.calibrate.Calibrate.ridge", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.ridge", "kind": "function", "doc": "

    Fit x on y via ridge regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Ridge Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[):\nstr,\n Union[\n scipy.stats.rv_continuous,\n List[Union[int, str, float]]\n ]\n], default=Preset distributions\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Ridge Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbe30d50>, 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbe31950>, 'solver': ['svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga', 'lbfgs']},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.ridge_cv": {"fullname": "calidhayte.calibrate.Calibrate.ridge_cv", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.ridge_cv", "kind": "function", "doc": "

    Fit x on y via cross-validated ridge regression.\nAlready cross validated so random search not required

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Ridge Regression (Cross Validated)\"):\nName of classification technique
    • \n
    • random_search (bool, default=False):\nNot used
    • \n
    \n", "signature": "(\tself,\tname: str = 'Ridge Regression (Cross Validated)',\trandom_search: bool = False,\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.lasso": {"fullname": "calidhayte.calibrate.Calibrate.lasso", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.lasso", "kind": "function", "doc": "

    Fit x on y via lasso regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Lasso Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[):\nstr,\n Union[\n scipy.stats.rv_continuous,\n List[Union[int, str, float]]\n ]\n], default=Preset distributions\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Lasso Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbe32550>, 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbe32650>, 'selection': ['cyclic', 'random']},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.lasso_cv": {"fullname": "calidhayte.calibrate.Calibrate.lasso_cv", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.lasso_cv", "kind": "function", "doc": "

    Fit x on y via cross-validated lasso regression.\nAlready cross validated so random search not required

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Lasso Regression (Cross Validated)\"):\nName of classification technique
    • \n
    • random_search (bool, default=False):\nNot used
    • \n
    \n", "signature": "(\tself,\tname: str = 'Lasso Regression (Cross Validated)',\trandom_search: bool = False,\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"fullname": "calidhayte.calibrate.Calibrate.multi_task_lasso", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.multi_task_lasso", "kind": "function", "doc": "

    Fit x on y via multitask lasso regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Multi-task Lasso Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[):\nstr,\n Union[\n scipy.stats.rv_continuous,\n List[Union[int, str, float]]\n ]\n], default=Preset distributions\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Multi-task Lasso Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbe32c10>, 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbe33310>, 'selection': ['cyclic', 'random']},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"fullname": "calidhayte.calibrate.Calibrate.multi_task_lasso_cv", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.multi_task_lasso_cv", "kind": "function", "doc": "

    Fit x on y via cross-validated multitask lasso regression.\nAlready cross validated so random search not required

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Multi-task Lasso Regression (Cross Validated)\"):\nName of classification technique
    • \n
    • random_search (bool, default=False):\nNot used
    • \n
    \n", "signature": "(\tself,\tname: str = 'Multi-task Lasso Regression (Cross Validated)',\trandom_search: bool = False,\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.elastic_net": {"fullname": "calidhayte.calibrate.Calibrate.elastic_net", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.elastic_net", "kind": "function", "doc": "

    Fit x on y via elastic net regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Elastic Net Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[):\nstr,\n Union[\n scipy.stats.rv_continuous,\n List[Union[int, str, float]]\n ]\n], default=Preset distributions\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Elastic Net Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbe338d0>, 'l1_ratio': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbe33fd0>, 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc3c5d0>, 'selection': ['cyclic', 'random']},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"fullname": "calidhayte.calibrate.Calibrate.elastic_net_cv", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.elastic_net_cv", "kind": "function", "doc": "

    Fit x on y via cross-validated elastic regression.\nAlready cross validated so random search not required

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Lasso Regression (Cross Validated)\"):\nName of classification technique
    • \n
    • random_search (bool, default=False):\nNot used
    • \n
    \n", "signature": "(\tself,\tname: str = 'Elastic Net Regression (Cross Validated)',\trandom_search: bool = False,\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"fullname": "calidhayte.calibrate.Calibrate.multi_task_elastic_net", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.multi_task_elastic_net", "kind": "function", "doc": "

    Fit x on y via elastic net regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Multi-task Elastic Net Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[):\nstr,\n Union[\n scipy.stats.rv_continuous,\n List[Union[int, str, float]]\n ]\n], default=Preset distributions\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Multi-task Elastic Net Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc3cbd0>, 'l1_ratio': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc3d310>, 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc3da10>, 'selection': ['cyclic', 'random']},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"fullname": "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.multi_task_elastic_net_cv", "kind": "function", "doc": "

    Fit x on y via cross-validated multi-task elastic net regression.\nAlready cross validated so random search not required

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Multi-Task Elastic Net Regression (Cross Validated)\"):\nName of classification technique
    • \n
    • random_search (bool, default=False):\nNot used
    • \n
    \n", "signature": "(\tself,\tname: str = 'Multi-Task Elastic Net Regression (Cross Validated)',\trandom_search: bool = False,\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.lars": {"fullname": "calidhayte.calibrate.Calibrate.lars", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.lars", "kind": "function", "doc": "

    Fit x on y via least angle regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Least Angle Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[):\nstr,\n Union[\n scipy.stats.rv_continuous,\n List[Union[int, str, float]]\n ]\n], default=Preset distributions\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Least Angle Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'n_nonzero_coefs': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.lars_lasso": {"fullname": "calidhayte.calibrate.Calibrate.lars_lasso", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.lars_lasso", "kind": "function", "doc": "

    Fit x on y via least angle lasso regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Least Angle Lasso Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[):\nstr,\n Union[\n scipy.stats.rv_continuous,\n List[Union[int, str, float]]\n ]\n], default=Preset distributions\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Least Angle Lasso Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc3e710>},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.omp": {"fullname": "calidhayte.calibrate.Calibrate.omp", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.omp", "kind": "function", "doc": "

    Fit x on y via orthogonal matching pursuit regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Orthogonal Matching Pursuit\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[):\nstr,\n Union[\n scipy.stats.rv_continuous,\n List[Union[int, str, float]]\n ]\n], default=Preset distributions\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Orthogonal Matching Pursuit',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'n_nonzero_coefs': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"fullname": "calidhayte.calibrate.Calibrate.bayesian_ridge", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.bayesian_ridge", "kind": "function", "doc": "

    Fit x on y via bayesian ridge regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Bayesian Ridge Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[):\nstr,\n Union[\n scipy.stats.rv_continuous,\n List[Union[int, str, float]]\n ]\n], default=Preset distributions\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Bayesian Ridge Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc3f010>, 'alpha_1': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc3f2d0>, 'alpha_2': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc3f9d0>, 'lambda_1': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc48110>, 'lambda_2': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc48810>},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"fullname": "calidhayte.calibrate.Calibrate.bayesian_ard", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.bayesian_ard", "kind": "function", "doc": "

    Fit x on y via bayesian automatic relevance detection

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Bayesian Automatic Relevance Detection\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[):\nstr,\n Union[\n scipy.stats.rv_continuous,\n List[Union[int, str, float]]\n ]\n], default=Preset distributions\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Bayesian Automatic Relevance Detection',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc48f10>, 'alpha_1': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc496d0>, 'alpha_2': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc49dd0>, 'lambda_1': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc4a4d0>, 'lambda_2': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc4abd0>},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.tweedie": {"fullname": "calidhayte.calibrate.Calibrate.tweedie", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.tweedie", "kind": "function", "doc": "

    Fit x on y via tweedie regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Tweedie Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[):\nstr,\n Union[\n scipy.stats.rv_continuous,\n List[Union[int, str, float]]\n ]\n], default=Preset distributions\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Tweedie Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'power': [0, 1, 1.5, 2, 2.5, 3], 'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc4b2d0>, 'solver': ['lbfgs', 'newton-cholesky'], 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc4bb10>},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"fullname": "calidhayte.calibrate.Calibrate.stochastic_gradient_descent", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.stochastic_gradient_descent", "kind": "function", "doc": "

    Fit x on y via stochastic gradient descent

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Stochastic Gradient Descent\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[):\nstr,\n Union[\n scipy.stats.rv_continuous,\n List[Union[int, str, float]]\n ]\n], default=Preset distributions\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Stochastic Gradient Descent',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc502d0>, 'loss': ['squared_error', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive'], 'penalty': ['l2', 'l1', 'elasticnet', None], 'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc50ad0>, 'l1_ratio': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc51290>, 'epsilon': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc519d0>, 'learning_rate': ['constant', 'optimal', 'invscaling', 'adaptive'], 'eta0': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc52110>, 'power_t': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc52890>},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"fullname": "calidhayte.calibrate.Calibrate.passive_aggressive", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.passive_aggressive", "kind": "function", "doc": "

    Fit x on y via stochastic gradient descent regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Passive Aggressive Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Passive Aggressive Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'C': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc52fd0>, 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc537d0>, 'loss': ['epsilon_insensitive', 'squared_epsilon_insensitive'], 'epsilon': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc53f10>},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.ransac": {"fullname": "calidhayte.calibrate.Calibrate.ransac", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.ransac", "kind": "function", "doc": "

    Fit x on y via ransac

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"RANSAC\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'RANSAC',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'estimator': [LinearRegression()]},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.theil_sen": {"fullname": "calidhayte.calibrate.Calibrate.theil_sen", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.theil_sen", "kind": "function", "doc": "

    Fit x on y via theil-sen regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Theil-Sen Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Theil-Sen Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc68cd0>},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.huber": {"fullname": "calidhayte.calibrate.Calibrate.huber", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.huber", "kind": "function", "doc": "

    Fit x on y via huber regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Huber Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Huber Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'epsilon': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc69010>, 'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc69810>, 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc69f50>},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.quantile": {"fullname": "calidhayte.calibrate.Calibrate.quantile", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.quantile", "kind": "function", "doc": "

    Fit x on y via quantile regression

    \n\n

    Parameters

    \n\n

    'interior-point',

    \n\n

    name : str, default=\"Quantile Regression\"\n Name of classification technique.\nrandom_search : bool, default=False\n Whether to perform RandomizedSearch to optimise parameters\nparameters : dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions\n The parameters used in RandomizedSearchCV

    \n", "signature": "(\tself,\tname: str = 'Quantile Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'quantile': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6a690>, 'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6ae90>, 'tol': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6b5d0>, 'solver': ['highs-ds', 'highs-ipm', 'highs', 'revised simplex']},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.decision_tree": {"fullname": "calidhayte.calibrate.Calibrate.decision_tree", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.decision_tree", "kind": "function", "doc": "

    Fit x on y via decision tree

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Decision Tree\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Decision Tree',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'criterion': ['squared_error', 'friedman_mse', 'absolute_error', 'poisson'], 'splitter': ['best', 'random'], 'max_features': [None, 'sqrt', 'log2'], 'ccp_alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6bd10>},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.extra_tree": {"fullname": "calidhayte.calibrate.Calibrate.extra_tree", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.extra_tree", "kind": "function", "doc": "

    Fit x on y via extra tree

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Extra Tree\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Extra Tree',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'criterion': ['squared_error', 'friedman_mse', 'absolute_error', 'poisson'], 'splitter': ['best', 'random'], 'max_features': [None, 'sqrt', 'log2'], 'ccp_alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6c650>},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.random_forest": {"fullname": "calidhayte.calibrate.Calibrate.random_forest", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.random_forest", "kind": "function", "doc": "

    Fit x on y via random forest

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Random Forest\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Random Forest',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], 'bootstrap': [True, False], 'max_samples': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6d510>, 'criterion': ['squared_error', 'friedman_mse', 'absolute_error', 'poisson'], 'max_features': [None, 'sqrt', 'log2'], 'ccp_alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6d7d0>},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"fullname": "calidhayte.calibrate.Calibrate.extra_trees_ensemble", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.extra_trees_ensemble", "kind": "function", "doc": "

    Fit x on y via extra trees ensemble

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Extra Trees Ensemble\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Extra Trees Ensemble',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], 'bootstrap': [True, False], 'max_samples': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6e590>, 'criterion': ['squared_error', 'friedman_mse', 'absolute_error', 'poisson'], 'max_features': [None, 'sqrt', 'log2'], 'ccp_alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6e850>},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"fullname": "calidhayte.calibrate.Calibrate.gradient_boost_regressor", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.gradient_boost_regressor", "kind": "function", "doc": "

    Fit x on y via gradient boosting regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Gradient Boosting Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Gradient Boosting Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'loss': ['squared_error', 'absolute_error', 'huber', 'quantile'], 'learning_rate': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6f010>, 'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], 'subsample': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc6fe50>, 'criterion': ['friedman_mse', 'squared_error'], 'max_features': [None, 'sqrt', 'log2'], 'init': [None, 'zero', <class 'sklearn.linear_model._base.LinearRegression'>, <class 'sklearn.linear_model._theil_sen.TheilSenRegressor'>], 'ccp_alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc74050>},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"fullname": "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.hist_gradient_boost_regressor", "kind": "function", "doc": "

    Fit x on y via histogram-based gradient boosting regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Histogram-Based Gradient Boosting Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Histogram-Based Gradient Boosting Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'loss': ['squared_error', 'absolute_error', 'gamma', 'poisson', 'quantile'], 'quantile': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc74850>, 'learning_rate': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc75090>, 'max_iter': [5, 10, 25, 50, 100, 200, 250, 500], 'l2_regularization': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc75dd0>, 'max_bins': [1, 3, 7, 15, 31, 63, 127, 255]},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"fullname": "calidhayte.calibrate.Calibrate.mlp_regressor", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.mlp_regressor", "kind": "function", "doc": "

    Fit x on y via multi-layer perceptron regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Multi-Layer Perceptron Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Multi-Layer Perceptron Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'hidden_layer_sizes': [(100,), (100, 200), (10,), (200, 400), (100, 200, 300)], 'activation': ['identity', 'logistic', 'tanh', 'relu'], 'solver': ['lbfgs', 'sgd', 'adam'], 'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc76590>, 'batch_size': ['auto', 20, 200, 500, 1000, 5000, 10000], 'learning_rate': ['constant', 'invscaling', 'adaptive'], 'learning_rate_init': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc76ed0>, 'power_t': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc770d0>, 'max_iter': [5, 10, 25, 50, 100, 200, 250, 500], 'shuffle': [True, False], 'momentum': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc77e10>, 'beta_1': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc84050>, 'beta_2': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc84790>, 'epsilon': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc84ed0>},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.svr": {"fullname": "calidhayte.calibrate.Calibrate.svr", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.svr", "kind": "function", "doc": "

    Fit x on y via support vector regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Support Vector Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Support Vector Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'kernel': ['linear', 'poly', 'rbf', 'sigmoid'], 'degree': [2, 3, 4], 'gamma': ['scale', 'auto'], 'coef0': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc85610>, 'C': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc85ed0>, 'epsilon': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc86610>, 'shrinking': [True, False]},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.linear_svr": {"fullname": "calidhayte.calibrate.Calibrate.linear_svr", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.linear_svr", "kind": "function", "doc": "

    Fit x on y via linear support vector regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Linear Support Vector Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Linear Support Vector Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'C': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc86d50>, 'epsilon': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc87590>, 'loss': ['epsilon_insensitive', 'squared_epsilon_insensitive']},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.nu_svr": {"fullname": "calidhayte.calibrate.Calibrate.nu_svr", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.nu_svr", "kind": "function", "doc": "

    Fit x on y via nu-support vector regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Nu-Support Vector Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Nu-Support Vector Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'kernel': ['linear', 'poly', 'rbf', 'sigmoid'], 'degree': [2, 3, 4], 'gamma': ['scale', 'auto'], 'coef0': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc87cd0>, 'shrinking': [True, False], 'nu': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc9c610>},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.gaussian_process": {"fullname": "calidhayte.calibrate.Calibrate.gaussian_process", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.gaussian_process", "kind": "function", "doc": "

    Fit x on y via gaussian process regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Gaussian Process Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Gaussian Process Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'kernel': [None, <class 'sklearn.gaussian_process.kernels.RBF'>, <class 'sklearn.gaussian_process.kernels.Matern'>, <class 'sklearn.gaussian_process.kernels.DotProduct'>, <class 'sklearn.gaussian_process.kernels.WhiteKernel'>, <class 'sklearn.gaussian_process.kernels.CompoundKernel'>, <class 'sklearn.gaussian_process.kernels.ExpSineSquared'>], 'alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc9cd90>, 'normalize_y': [True, False]},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.isotonic": {"fullname": "calidhayte.calibrate.Calibrate.isotonic", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.isotonic", "kind": "function", "doc": "

    Fit x on y via isotonic regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"Isotonic Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'Isotonic Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'increasing': [True, False]},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.xgboost": {"fullname": "calidhayte.calibrate.Calibrate.xgboost", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.xgboost", "kind": "function", "doc": "

    Fit x on y via xgboost regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"XGBoost Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'XGBoost Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], 'max_bins': [1, 3, 7, 15, 31, 63, 127, 255], 'grow_policy': ['depthwise', 'lossguide'], 'learning_rate': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc9d9d0>, 'tree_method': ['exact', 'approx', 'hist'], 'gamma': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc9e050>, 'subsample': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc9e7d0>, 'reg_alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc9ef10>, 'reg_lambda': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc9f650>},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"fullname": "calidhayte.calibrate.Calibrate.xgboost_rf", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.xgboost_rf", "kind": "function", "doc": "

    Fit x on y via xgboosted random forest regression

    \n\n
    Parameters
    \n\n
      \n
    • name (str, default=\"XGBoost Random Forest Regression\"):\nName of classification technique.
    • \n
    • random_search (bool, default=False):\nWhether to perform RandomizedSearch to optimise parameters
    • \n
    • parameters (dict[ str, Union[ scipy.stats.rv_continuous, List[Union[int, str, float]] ] ], default=Preset distributions):\nThe parameters used in RandomizedSearchCV
    • \n
    \n", "signature": "(\tself,\tname: str = 'XGBoost Random Forest Regression',\trandom_search: bool = False,\tparameters: dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_continuous, typing.List[typing.Union[int, str, float]]]] = {'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], 'max_bin': [1, 3, 7, 15, 31, 63, 127, 255], 'grow_policy': ['depthwise', 'lossguide'], 'learning_rate': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbc9ff90>, 'tree_method': ['exact', 'approx', 'hist'], 'gamma': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbca0710>, 'subsample': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbca0e90>, 'reg_alpha': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbca15d0>, 'reg_lambda': <scipy.stats._distn_infrastructure.rv_continuous_frozen object at 0x7fc3dbca1d10>},\t**kwargs):", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.return_measurements": {"fullname": "calidhayte.calibrate.Calibrate.return_measurements", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.return_measurements", "kind": "function", "doc": "

    Returns the measurements used, with missing values and\nnon-overlapping measurements excluded

    \n\n
    Returns
    \n\n
      \n
    • dict[str, pd.DataFrame]: Dictionary with 2 keys:
    • \n
    \n\n\n\n\n \n \n\n\n\n\n \n \n\n\n \n \n\n\n
    KeyValue
    xx_data
    yy_data
    \n", "signature": "(self) -> dict[str, pandas.core.frame.DataFrame]:", "funcdef": "def"}, "calidhayte.calibrate.Calibrate.return_models": {"fullname": "calidhayte.calibrate.Calibrate.return_models", "modulename": "calidhayte.calibrate", "qualname": "Calibrate.return_models", "kind": "function", "doc": "

    Returns the models stored in the object

    \n\n
    Returns
    \n\n
      \n
    • dict[str, str, str, int, Pipeline]: The calibrated models. They are stored in a nested structure as\nfollows:\n
        \n
      1. Primary Key, name of the technique (e.g Lasso Regression).
      2. \n
      3. Scaling technique (e.g Yeo-Johnson Transform).
      4. \n
      5. Combination of variables used or target if calibration is\nunivariate (e.g \"target + a + b).
      6. \n
      7. Fold, which fold was used excluded from the calibration. If data\nfolds 0-3.\nif 5-fold cross validated, a key of 4 indicates the data was\ntrained on
      8. \n
    • \n
    \n", "signature": "(\tself) -> dict[str, dict[str, dict[str, dict[int, sklearn.pipeline.Pipeline]]]]:", "funcdef": "def"}, "calidhayte.graphs": {"fullname": "calidhayte.graphs", "modulename": "calidhayte.graphs", "kind": "module", "doc": "

    \n"}, "calidhayte.graphs.Graphs": {"fullname": "calidhayte.graphs.Graphs", "modulename": "calidhayte.graphs", "qualname": "Graphs", "kind": "class", "doc": "

    Calculates errors between \"true\" and \"predicted\" measurements, plots\ngraphs and returns all results

    \n"}, "calidhayte.graphs.Graphs.__init__": {"fullname": "calidhayte.graphs.Graphs.__init__", "modulename": "calidhayte.graphs", "qualname": "Graphs.__init__", "kind": "function", "doc": "

    \n", "signature": "(\tx: pandas.core.frame.DataFrame,\tx_name: str,\ty: pandas.core.frame.DataFrame,\ty_name: str,\ttarget: str,\tmodels: dict[str, dict[str, dict[str, dict[int, sklearn.pipeline.Pipeline]]]],\tstyle: str = 'bmh',\tbackend: str = 'TkAgg')"}, "calidhayte.graphs.Graphs.x": {"fullname": "calidhayte.graphs.Graphs.x", "modulename": "calidhayte.graphs", "qualname": "Graphs.x", "kind": "variable", "doc": "

    Independent variable(s) that are calibrated against y,\nthe independent variable. Index should match y.

    \n", "annotation": ": pandas.core.frame.DataFrame"}, "calidhayte.graphs.Graphs.y": {"fullname": "calidhayte.graphs.Graphs.y", "modulename": "calidhayte.graphs", "qualname": "Graphs.y", "kind": "variable", "doc": "

    Dependent variable used to calibrate the independent variables x.\nIndex should match x.

    \n", "annotation": ": pandas.core.frame.DataFrame"}, "calidhayte.graphs.Graphs.x_name": {"fullname": "calidhayte.graphs.Graphs.x_name", "modulename": "calidhayte.graphs", "qualname": "Graphs.x_name", "kind": "variable", "doc": "

    Label for x measurements

    \n", "annotation": ": str"}, "calidhayte.graphs.Graphs.y_name": {"fullname": "calidhayte.graphs.Graphs.y_name", "modulename": "calidhayte.graphs", "qualname": "Graphs.y_name", "kind": "variable", "doc": "

    Label for y measurements

    \n", "annotation": ": str"}, "calidhayte.graphs.Graphs.target": {"fullname": "calidhayte.graphs.Graphs.target", "modulename": "calidhayte.graphs", "qualname": "Graphs.target", "kind": "variable", "doc": "

    Measurand in y to calibrate against

    \n"}, "calidhayte.graphs.Graphs.models": {"fullname": "calidhayte.graphs.Graphs.models", "modulename": "calidhayte.graphs", "qualname": "Graphs.models", "kind": "variable", "doc": "

    The precalibrated models. They are stored in a nested structure as\nfollows:

    \n\n
      \n
    1. Primary Key, name of the technique (e.g Lasso Regression).
    2. \n
    3. Scaling technique (e.g Yeo-Johnson Transform).
    4. \n
    5. Combination of variables used or target if calibration is\nunivariate (e.g \"target + a + b).
    6. \n
    7. Fold, which fold was used excluded from the calibration. If data\nif 5-fold cross validated, a key of 4 indicates the data was trained on\nfolds 0-3.
    8. \n
    \n\n
    stateDiagram-v2\n models --> Technique\n state Technique {\n [*] --> Scaling\n [*]: The calibration technique used\n [*]: (e.g \"Lasso Regression\")\n state Scaling {\n [*] --> Variables\n [*]: The scaling technique used\n [*]: (e.g \"Yeo-Johnson Transform\")\n state Variables {\n [*] : The combination of variables used\n [*] : (e.g \"x + a + b\")\n [*] --> Fold\n state Fold {\n [*] : Which fold was excluded from training data\n [*] : (e.g 4 indicates folds 0-3 were used to train)\n }\n }\n }\n }\n
    \n", "annotation": ": dict[str, dict[str, dict[str, dict[int, sklearn.pipeline.Pipeline]]]]"}, "calidhayte.graphs.Graphs.plots": {"fullname": "calidhayte.graphs.Graphs.plots", "modulename": "calidhayte.graphs", "qualname": "Graphs.plots", "kind": "variable", "doc": "

    The plotted data, stored in a similar structure to models

    \n\n
      \n
    1. Primary Key, name of the technique (e.g Lasso Regression).
    2. \n
    3. Scaling technique (e.g Yeo-Johnson Transform).
    4. \n
    5. Combination of variables used or target if calibration is\nunivariate (e.g \"target + a + b).
    6. \n
    7. Name of the plot (e.g. 'Bland-Altman')
    8. \n
    \n\n
    stateDiagram-v2\n models --> Technique\n state Technique {\n [*] --> Scaling\n [*]: The calibration technique used\n [*]: (e.g \"Lasso Regression\")\n state Scaling {\n [*] --> Variables\n [*]: The scaling technique used\n [*]: (e.g \"Yeo-Johnson Transform\")\n state Variables {\n [*] : The combination of variables used\n [*] : (e.g \"x + a + b\")\n [*] --> pn\n state \"Plot Name\" as pn {\n [*] : Name of the plot\n [*] : (e.g Bland-Altman)\n }\n }\n }\n }\n
    \n", "annotation": ": dict[str, dict[str, dict[str, dict[str, matplotlib.figure.Figure]]]]"}, "calidhayte.graphs.Graphs.style": {"fullname": "calidhayte.graphs.Graphs.style", "modulename": "calidhayte.graphs", "qualname": "Graphs.style", "kind": "variable", "doc": "

    Name of in-built matplotlib style or path to stylesheet

    \n", "annotation": ": Union[str, pathlib.Path]"}, "calidhayte.graphs.Graphs.backend": {"fullname": "calidhayte.graphs.Graphs.backend", "modulename": "calidhayte.graphs", "qualname": "Graphs.backend", "kind": "variable", "doc": "

    Matplotlib backend to use

    \n"}, "calidhayte.graphs.Graphs.plot_meta": {"fullname": "calidhayte.graphs.Graphs.plot_meta", "modulename": "calidhayte.graphs", "qualname": "Graphs.plot_meta", "kind": "function", "doc": "

    Iterates over data and creates plots using function specified in\nplot_func

    \n\n

    Should not be accessed directly, should instead be called by\nanother method

    \n\n
    Parameters
    \n\n
      \n
    • plot_func (Callable):\nFunction that returns matplotlib figure
    • \n
    • name (str):\nName to give plot, used as key in plots dict
    • \n
    • **kwargs: Additional arguments passed to plot_func
    • \n
    \n", "signature": "(\tself,\tplot_func: Callable[..., matplotlib.figure.Figure],\tname: str,\t**kwargs):", "funcdef": "def"}, "calidhayte.graphs.Graphs.bland_altman_plot": {"fullname": "calidhayte.graphs.Graphs.bland_altman_plot", "modulename": "calidhayte.graphs", "qualname": "Graphs.bland_altman_plot", "kind": "function", "doc": "

    \n", "signature": "(self, title=None):", "funcdef": "def"}, "calidhayte.graphs.Graphs.ecdf_plot": {"fullname": "calidhayte.graphs.Graphs.ecdf_plot", "modulename": "calidhayte.graphs", "qualname": "Graphs.ecdf_plot", "kind": "function", "doc": "

    \n", "signature": "(self, title=None):", "funcdef": "def"}, "calidhayte.graphs.Graphs.lin_reg_plot": {"fullname": "calidhayte.graphs.Graphs.lin_reg_plot", "modulename": "calidhayte.graphs", "qualname": "Graphs.lin_reg_plot", "kind": "function", "doc": "

    \n", "signature": "(self, title=None):", "funcdef": "def"}, "calidhayte.graphs.Graphs.shap": {"fullname": "calidhayte.graphs.Graphs.shap", "modulename": "calidhayte.graphs", "qualname": "Graphs.shap", "kind": "function", "doc": "

    \n", "signature": "(self, pipeline_keys: list[str], title=None):", "funcdef": "def"}, "calidhayte.graphs.Graphs.save_plots": {"fullname": "calidhayte.graphs.Graphs.save_plots", "modulename": "calidhayte.graphs", "qualname": "Graphs.save_plots", "kind": "function", "doc": "

    \n", "signature": "(\tself,\tpath: str,\tfiletype: Union[Literal['png', 'pgf', 'pdf'], collections.abc.Iterable[Literal['png', 'pgf', 'pdf']]] = 'png'):", "funcdef": "def"}, "calidhayte.graphs.ecdf": {"fullname": "calidhayte.graphs.ecdf", "modulename": "calidhayte.graphs", "qualname": "ecdf", "kind": "function", "doc": "

    \n", "signature": "(data):", "funcdef": "def"}, "calidhayte.graphs.lin_reg_plot": {"fullname": "calidhayte.graphs.lin_reg_plot", "modulename": "calidhayte.graphs", "qualname": "lin_reg_plot", "kind": "function", "doc": "

    \n", "signature": "(\tx: pandas.core.series.Series,\ty: pandas.core.series.Series,\tx_name: str,\ty_name: str,\ttitle: Optional[str] = None):", "funcdef": "def"}, "calidhayte.graphs.bland_altman_plot": {"fullname": "calidhayte.graphs.bland_altman_plot", "modulename": "calidhayte.graphs", "qualname": "bland_altman_plot", "kind": "function", "doc": "

    \n", "signature": "(\tx: pandas.core.frame.DataFrame,\ty: pandas.core.series.Series,\ttitle: Optional[str] = None,\t**kwargs):", "funcdef": "def"}, "calidhayte.graphs.ecdf_plot": {"fullname": "calidhayte.graphs.ecdf_plot", "modulename": "calidhayte.graphs", "qualname": "ecdf_plot", "kind": "function", "doc": "

    \n", "signature": "(\tx: pandas.core.frame.DataFrame,\ty: pandas.core.series.Series,\tx_name: str,\ty_name: str,\ttitle: Optional[str] = None):", "funcdef": "def"}, "calidhayte.graphs.shap_plot": {"fullname": "calidhayte.graphs.shap_plot", "modulename": "calidhayte.graphs", "qualname": "shap_plot", "kind": "function", "doc": "

    \n", "signature": "(shaps: pandas.core.frame.DataFrame, x: pandas.core.frame.DataFrame):", "funcdef": "def"}, "calidhayte.graphs.get_shap": {"fullname": "calidhayte.graphs.get_shap", "modulename": "calidhayte.graphs", "qualname": "get_shap", "kind": "function", "doc": "

    \n", "signature": "(\tx: pandas.core.frame.DataFrame,\ty: pandas.core.frame.DataFrame,\tpipeline: dict[int, sklearn.pipeline.Pipeline]):", "funcdef": "def"}, "calidhayte.results": {"fullname": "calidhayte.results", "modulename": "calidhayte.results", "kind": "module", "doc": "

    Determine the performance of different calibration techniques using a range of\ndifferent metrics.

    \n\n

    Acts as a wrapper for scikit-learn performance metrics 1.

    \n\n\n"}, "calidhayte.results.CoefficientPipelineDict": {"fullname": "calidhayte.results.CoefficientPipelineDict", "modulename": "calidhayte.results", "qualname": "CoefficientPipelineDict", "kind": "variable", "doc": "

    Type alias for the nested dictionaries that the models are stored in

    \n", "annotation": ": TypeAlias", "default_value": "dict[str, dict[str, dict[str, dict[int, sklearn.pipeline.Pipeline]]]]"}, "calidhayte.results.Results": {"fullname": "calidhayte.results.Results", "modulename": "calidhayte.results", "qualname": "Results", "kind": "class", "doc": "

    Determine performance of models using a range of metrics.

    \n\n

    Used to compare a range of different models that were fitted in the\nCalibrate class in coefficients.py.

    \n"}, "calidhayte.results.Results.__init__": {"fullname": "calidhayte.results.Results.__init__", "modulename": "calidhayte.results", "qualname": "Results.__init__", "kind": "function", "doc": "

    Initialises the class

    \n\n
    Parameters
    \n\n
      \n
    • x_data (pd.DataFrame):\nDependent measurements
    • \n
    • y_data (pd.DataFrame):\nIndependent measurements
    • \n
    • target (str):\nColumn name of the primary feature to use in calibration, must be\nthe name of a column in both x_data and y_data.
    • \n
    • models (CoefficientPipelineDict):\nThe calibrated models.
    • \n
    \n", "signature": "(\tx_data: pandas.core.frame.DataFrame,\ty_data: pandas.core.frame.DataFrame,\ttarget: str,\tmodels: dict[str, dict[str, dict[str, dict[int, sklearn.pipeline.Pipeline]]]])"}, "calidhayte.results.Results.x": {"fullname": "calidhayte.results.Results.x", "modulename": "calidhayte.results", "qualname": "Results.x", "kind": "variable", "doc": "

    Dependent measurements

    \n", "annotation": ": pandas.core.frame.DataFrame"}, "calidhayte.results.Results.y": {"fullname": "calidhayte.results.Results.y", "modulename": "calidhayte.results", "qualname": "Results.y", "kind": "variable", "doc": "

    Independent Measurements

    \n", "annotation": ": pandas.core.frame.DataFrame"}, "calidhayte.results.Results.target": {"fullname": "calidhayte.results.Results.target", "modulename": "calidhayte.results", "qualname": "Results.target", "kind": "variable", "doc": "

    Column name of primary feature to use in calibration

    \n", "annotation": ": str"}, "calidhayte.results.Results.models": {"fullname": "calidhayte.results.Results.models", "modulename": "calidhayte.results", "qualname": "Results.models", "kind": "variable", "doc": "

    They are stored in a nested structure as\nfollows:

    \n\n
      \n
    1. Primary Key, name of the technique (e.g Lasso Regression).
    2. \n
    3. Scaling technique (e.g Yeo-Johnson Transform).
    4. \n
    5. Combination of variables used or target if calibration is\nunivariate (e.g \"target + a + b).
    6. \n
    7. Fold, which fold was used excluded from the calibration. If data\nif 5-fold cross validated, a key of 4 indicates the data was\ntrained on folds 0-3.
    8. \n
    \n\n
    stateDiagram-v2\n models --> Technique\n state Technique {\n [*] --> Scaling\n [*]: The calibration technique used\n [*]: (e.g \"Lasso Regression\")\n state Scaling {\n [*] --> Variables\n [*]: The scaling technique used\n [*]: (e.g \"Yeo-Johnson Transform\")\n state Variables {\n [*] : The combination of variables used\n [*] : (e.g \"x + a + b\")\n [*] --> Fold\n state Fold {\n [*] : Which fold was excluded from training data\n [*] : (e.g 4 indicates folds 0-3 were used to train)\n }\n }\n }\n }\n
    \n", "annotation": ": dict[str, dict[str, dict[str, dict[int, sklearn.pipeline.Pipeline]]]]"}, "calidhayte.results.Results.errors": {"fullname": "calidhayte.results.Results.errors", "modulename": "calidhayte.results", "qualname": "Results.errors", "kind": "variable", "doc": "

    Results of error metric valculations. Index increases sequentially\nby 1, columns contain the technique, scaling method, variables and\nfold for each row. It also contains a column for each metric.

    \n\n\n\n\n \n \n \n \n \n \n \n \n\n\n\n\n \n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n \n\n\n
    TechniqueScaling MethodVariablesFoldExplained Variance Score...Mean Absolute Percentage Error
    0Random ForestStandard Scalingx + a00.95...0.05
    1Theil-SenYeo-JohnsonScalingx + a + b10.98...0.01
    ........................
    55Extra TreesNonex20.43...0.52
    \n", "annotation": ": pandas.core.frame.DataFrame"}, "calidhayte.results.Results.explained_variance_score": {"fullname": "calidhayte.results.Results.explained_variance_score", "modulename": "calidhayte.results", "qualname": "Results.explained_variance_score", "kind": "function", "doc": "

    Calculate the explained variance score between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.max": {"fullname": "calidhayte.results.Results.max", "modulename": "calidhayte.results", "qualname": "Results.max", "kind": "function", "doc": "

    Calculate the max error between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.mean_absolute": {"fullname": "calidhayte.results.Results.mean_absolute", "modulename": "calidhayte.results", "qualname": "Results.mean_absolute", "kind": "function", "doc": "

    Calculate the mean absolute error between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.root_mean_squared": {"fullname": "calidhayte.results.Results.root_mean_squared", "modulename": "calidhayte.results", "qualname": "Results.root_mean_squared", "kind": "function", "doc": "

    Calculate the root mean squared error between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.root_mean_squared_log": {"fullname": "calidhayte.results.Results.root_mean_squared_log", "modulename": "calidhayte.results", "qualname": "Results.root_mean_squared_log", "kind": "function", "doc": "

    Calculate the root mean squared log error between the true values\n(y) and predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.median_absolute": {"fullname": "calidhayte.results.Results.median_absolute", "modulename": "calidhayte.results", "qualname": "Results.median_absolute", "kind": "function", "doc": "

    Calculate the median absolute error between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.mean_absolute_percentage": {"fullname": "calidhayte.results.Results.mean_absolute_percentage", "modulename": "calidhayte.results", "qualname": "Results.mean_absolute_percentage", "kind": "function", "doc": "

    Calculate the mean absolute percentage error between the true\nvalues (y) and predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.r2": {"fullname": "calidhayte.results.Results.r2", "modulename": "calidhayte.results", "qualname": "Results.r2", "kind": "function", "doc": "

    Calculate the r2 between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.mean_poisson_deviance": {"fullname": "calidhayte.results.Results.mean_poisson_deviance", "modulename": "calidhayte.results", "qualname": "Results.mean_poisson_deviance", "kind": "function", "doc": "

    Calculate the mean poisson deviance between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.mean_gamma_deviance": {"fullname": "calidhayte.results.Results.mean_gamma_deviance", "modulename": "calidhayte.results", "qualname": "Results.mean_gamma_deviance", "kind": "function", "doc": "

    Calculate the mean gamma deviance between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.mean_tweedie_deviance": {"fullname": "calidhayte.results.Results.mean_tweedie_deviance", "modulename": "calidhayte.results", "qualname": "Results.mean_tweedie_deviance", "kind": "function", "doc": "

    Calculate the mean tweedie deviance between the true values (y)\nand predicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.mean_pinball_loss": {"fullname": "calidhayte.results.Results.mean_pinball_loss", "modulename": "calidhayte.results", "qualname": "Results.mean_pinball_loss", "kind": "function", "doc": "

    Calculate the mean pinball loss between the true values (y)\npredicted y (x) 1.

    \n\n
    \n
    \n
      \n
    1. \n

      Link 

      \n
    2. \n
    \n
    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.results.Results.return_errors": {"fullname": "calidhayte.results.Results.return_errors", "modulename": "calidhayte.results", "qualname": "Results.return_errors", "kind": "function", "doc": "

    Returns all calculated errors in dataframe format

    \n\n

    Initially the error dataframe has the following structure:

    \n\n\n\n\n \n \n \n \n \n \n \n \n\n\n\n\n \n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n \n\n\n
    TechniqueScaling MethodVariablesFoldExplained Variance Score...Mean Absolute Percentage Error
    0Random ForestStandard Scalingx + a00.95...0.05
    1Theil-SenYeo-JohnsonScalingx + a + b10.98...0.01
    ........................
    55Extra TreesNonex20.43...0.52
    \n\n

    However, before returning the data, a new MultiIndex is built using\nthe Technique, Scaling Method, Variables and Fold columns. This\nallows easy comparison of the different techniques by grouping on one\nor multiple levels of the MultiIndex.

    \n\n
    Returns
    \n\n
      \n
    • pd.DataFrame: Results dataframe in the following format:
    • \n
    \n\n\n\n\n \n \n \n \n \n \n \n\n\n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n
    Explained Variance Score...Mean Absolute Percentage Error
    Random ForestStandard Scalingx + a00.95...0.05
    Theil-SenYeo-JohnsonScalingx + a + b10.98...0.01
    .....................
    Extra TreesNonex20.43...0.52
    \n", "signature": "(self) -> pandas.core.frame.DataFrame:", "funcdef": "def"}, "calidhayte.summary": {"fullname": "calidhayte.summary", "modulename": "calidhayte.summary", "kind": "module", "doc": "

    \n"}, "calidhayte.summary.Summary": {"fullname": "calidhayte.summary.Summary", "modulename": "calidhayte.summary", "qualname": "Summary", "kind": "class", "doc": "

    \n"}, "calidhayte.summary.Summary.__init__": {"fullname": "calidhayte.summary.Summary.__init__", "modulename": "calidhayte.summary", "qualname": "Summary.__init__", "kind": "function", "doc": "

    \n", "signature": "(\tresults: pandas.core.frame.DataFrame,\tcols: list[str],\tstyle: str = 'bmh',\tbackend: str = 'TkAgg')"}, "calidhayte.summary.Summary.results": {"fullname": "calidhayte.summary.Summary.results", "modulename": "calidhayte.summary", "qualname": "Summary.results", "kind": "variable", "doc": "

    \n"}, "calidhayte.summary.Summary.plots": {"fullname": "calidhayte.summary.Summary.plots", "modulename": "calidhayte.summary", "qualname": "Summary.plots", "kind": "variable", "doc": "

    \n", "annotation": ": dict[str, dict[str, matplotlib.figure.Figure]]"}, "calidhayte.summary.Summary.cols": {"fullname": "calidhayte.summary.Summary.cols", "modulename": "calidhayte.summary", "qualname": "Summary.cols", "kind": "variable", "doc": "

    \n", "annotation": ": list[str]"}, "calidhayte.summary.Summary.style": {"fullname": "calidhayte.summary.Summary.style", "modulename": "calidhayte.summary", "qualname": "Summary.style", "kind": "variable", "doc": "

    \n"}, "calidhayte.summary.Summary.backend": {"fullname": "calidhayte.summary.Summary.backend", "modulename": "calidhayte.summary", "qualname": "Summary.backend", "kind": "variable", "doc": "

    \n"}, "calidhayte.summary.Summary.boxplots": {"fullname": "calidhayte.summary.Summary.boxplots", "modulename": "calidhayte.summary", "qualname": "Summary.boxplots", "kind": "function", "doc": "

    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.summary.Summary.histograms": {"fullname": "calidhayte.summary.Summary.histograms", "modulename": "calidhayte.summary", "qualname": "Summary.histograms", "kind": "function", "doc": "

    \n", "signature": "(self):", "funcdef": "def"}, "calidhayte.summary.Summary.save_plots": {"fullname": "calidhayte.summary.Summary.save_plots", "modulename": "calidhayte.summary", "qualname": "Summary.save_plots", "kind": "function", "doc": "

    \n", "signature": "(self, path, filetype: str = 'png'):", "funcdef": "def"}}, "docInfo": {"calidhayte": {"qualname": 0, "fullname": 1, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 1226}, "calidhayte.calibrate": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 111}, "calidhayte.calibrate.cont_strat_folds": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 117, "bases": 0, "doc": 336}, "calidhayte.calibrate.Calibrate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 986}, "calidhayte.calibrate.Calibrate.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 338, "bases": 0, "doc": 334}, "calidhayte.calibrate.Calibrate.x_data": {"qualname": 3, "fullname": 5, "annotation": 5, "default_value": 0, "signature": 0, "bases": 0, "doc": 8}, "calidhayte.calibrate.Calibrate.target": {"qualname": 2, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 32}, "calidhayte.calibrate.Calibrate.scaler_list": {"qualname": 3, "fullname": 5, "annotation": 4, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "calidhayte.calibrate.Calibrate.scaler": {"qualname": 2, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "calidhayte.calibrate.Calibrate.y_data": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 89}, "calidhayte.calibrate.Calibrate.models": {"qualname": 2, "fullname": 4, "annotation": 8, "default_value": 0, "signature": 0, "bases": 0, "doc": 192}, "calidhayte.calibrate.Calibrate.folds": {"qualname": 2, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 12}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 86, "bases": 0, "doc": 75}, "calidhayte.calibrate.Calibrate.linreg": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 155, "bases": 0, "doc": 74}, "calidhayte.calibrate.Calibrate.ridge": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 337, "bases": 0, "doc": 74}, "calidhayte.calibrate.Calibrate.ridge_cv": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 64, "bases": 0, "doc": 54}, "calidhayte.calibrate.Calibrate.lasso": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 286, "bases": 0, "doc": 74}, "calidhayte.calibrate.Calibrate.lasso_cv": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 64, "bases": 0, "doc": 54}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 288, "bases": 0, "doc": 77}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"qualname": 5, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 57}, "calidhayte.calibrate.Calibrate.elastic_net": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 339, "bases": 0, "doc": 76}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 65, "bases": 0, "doc": 54}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"qualname": 5, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 341, "bases": 0, "doc": 78}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"qualname": 6, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 60}, "calidhayte.calibrate.Calibrate.lars": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 227, "bases": 0, "doc": 76}, "calidhayte.calibrate.Calibrate.lars_lasso": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 206, "bases": 0, "doc": 78}, "calidhayte.calibrate.Calibrate.omp": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 227, "bases": 0, "doc": 77}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 413, "bases": 0, "doc": 76}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 414, "bases": 0, "doc": 78}, "calidhayte.calibrate.Calibrate.tweedie": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 336, "bases": 0, "doc": 74}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 616, "bases": 0, "doc": 76}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 341, "bases": 0, "doc": 77}, "calidhayte.calibrate.Calibrate.ransac": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 170, "bases": 0, "doc": 72}, "calidhayte.calibrate.Calibrate.theil_sen": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 205, "bases": 0, "doc": 76}, "calidhayte.calibrate.Calibrate.huber": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 306, "bases": 0, "doc": 74}, "calidhayte.calibrate.Calibrate.quantile": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 360, "bases": 0, "doc": 65}, "calidhayte.calibrate.Calibrate.decision_tree": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 328, "bases": 0, "doc": 74}, "calidhayte.calibrate.Calibrate.extra_tree": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 328, "bases": 0, "doc": 74}, "calidhayte.calibrate.Calibrate.random_forest": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 432, "bases": 0, "doc": 74}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 433, "bases": 0, "doc": 76}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 222, "bases": 0, "doc": 76}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"qualname": 5, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 494, "bases": 0, "doc": 80}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 873, "bases": 0, "doc": 78}, "calidhayte.calibrate.Calibrate.svr": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 441, "bases": 0, "doc": 76}, "calidhayte.calibrate.Calibrate.linear_svr": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 291, "bases": 0, "doc": 78}, "calidhayte.calibrate.Calibrate.nu_svr": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 391, "bases": 0, "doc": 78}, "calidhayte.calibrate.Calibrate.gaussian_process": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 170, "bases": 0, "doc": 76}, "calidhayte.calibrate.Calibrate.isotonic": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 176, "bases": 0, "doc": 74}, "calidhayte.calibrate.Calibrate.xgboost": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 605, "bases": 0, "doc": 74}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 607, "bases": 0, "doc": 78}, "calidhayte.calibrate.Calibrate.return_measurements": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 80}, "calidhayte.calibrate.Calibrate.return_models": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 119}, "calidhayte.graphs": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "calidhayte.graphs.Graphs": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 15}, "calidhayte.graphs.Graphs.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 198, "bases": 0, "doc": 3}, "calidhayte.graphs.Graphs.x": {"qualname": 2, "fullname": 4, "annotation": 5, "default_value": 0, "signature": 0, "bases": 0, "doc": 22}, "calidhayte.graphs.Graphs.y": {"qualname": 2, "fullname": 4, "annotation": 5, "default_value": 0, "signature": 0, "bases": 0, "doc": 20}, "calidhayte.graphs.Graphs.x_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 8}, "calidhayte.graphs.Graphs.y_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 8}, "calidhayte.graphs.Graphs.target": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "calidhayte.graphs.Graphs.models": {"qualname": 2, "fullname": 4, "annotation": 8, "default_value": 0, "signature": 0, "bases": 0, "doc": 192}, "calidhayte.graphs.Graphs.plots": {"qualname": 2, "fullname": 4, "annotation": 8, "default_value": 0, "signature": 0, "bases": 0, "doc": 164}, "calidhayte.graphs.Graphs.style": {"qualname": 2, "fullname": 4, "annotation": 4, "default_value": 0, "signature": 0, "bases": 0, "doc": 12}, "calidhayte.graphs.Graphs.backend": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 6}, "calidhayte.graphs.Graphs.plot_meta": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 82}, "calidhayte.graphs.Graphs.bland_altman_plot": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "calidhayte.graphs.Graphs.ecdf_plot": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "calidhayte.graphs.Graphs.lin_reg_plot": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "calidhayte.graphs.Graphs.shap": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 38, "bases": 0, "doc": 3}, "calidhayte.graphs.Graphs.save_plots": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 127, "bases": 0, "doc": 3}, "calidhayte.graphs.ecdf": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "calidhayte.graphs.lin_reg_plot": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 106, "bases": 0, "doc": 3}, "calidhayte.graphs.bland_altman_plot": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 90, "bases": 0, "doc": 3}, "calidhayte.graphs.ecdf_plot": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 106, "bases": 0, "doc": 3}, "calidhayte.graphs.shap_plot": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 56, "bases": 0, "doc": 3}, "calidhayte.graphs.get_shap": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 91, "bases": 0, "doc": 3}, "calidhayte.results": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 61}, "calidhayte.results.CoefficientPipelineDict": {"qualname": 1, "fullname": 3, "annotation": 2, "default_value": 7, "signature": 0, "bases": 0, "doc": 14}, "calidhayte.results.Results": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 35}, "calidhayte.results.Results.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 132, "bases": 0, "doc": 81}, "calidhayte.results.Results.x": {"qualname": 2, "fullname": 4, "annotation": 5, "default_value": 0, "signature": 0, "bases": 0, "doc": 4}, "calidhayte.results.Results.y": {"qualname": 2, "fullname": 4, "annotation": 5, "default_value": 0, "signature": 0, "bases": 0, "doc": 4}, "calidhayte.results.Results.target": {"qualname": 2, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "calidhayte.results.Results.models": {"qualname": 2, "fullname": 4, "annotation": 8, "default_value": 0, "signature": 0, "bases": 0, "doc": 189}, "calidhayte.results.Results.errors": {"qualname": 2, "fullname": 4, "annotation": 5, "default_value": 0, "signature": 0, "bases": 0, "doc": 220}, "calidhayte.results.Results.explained_variance_score": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 47}, "calidhayte.results.Results.max": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 46}, "calidhayte.results.Results.mean_absolute": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 47}, "calidhayte.results.Results.root_mean_squared": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 48}, "calidhayte.results.Results.root_mean_squared_log": {"qualname": 5, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 49}, "calidhayte.results.Results.median_absolute": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 47}, "calidhayte.results.Results.mean_absolute_percentage": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 48}, "calidhayte.results.Results.r2": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 45}, "calidhayte.results.Results.mean_poisson_deviance": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 47}, "calidhayte.results.Results.mean_gamma_deviance": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 47}, "calidhayte.results.Results.mean_tweedie_deviance": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 47}, "calidhayte.results.Results.mean_pinball_loss": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 46}, "calidhayte.results.Results.return_errors": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 435}, "calidhayte.summary": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "calidhayte.summary.Summary": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 91, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.results": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.plots": {"qualname": 2, "fullname": 4, "annotation": 6, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.cols": {"qualname": 2, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.style": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.backend": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.boxplots": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.histograms": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "calidhayte.summary.Summary.save_plots": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 37, "bases": 0, "doc": 3}}, "length": 109, "save": true}, "index": {"qualname": {"root": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 4, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.summary.Summary.cols": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}}, "df": 49}}}}}}}}, "v": {"docs": {"calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 5}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}}}}}}}}, "y": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.summary.Summary.style": {"tf": 1}}, "df": 2}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.explained_variance_score": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 3}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"calidhayte.graphs.Graphs.shap": {"tf": 1}, "calidhayte.graphs.shap_plot": {"tf": 1}, "calidhayte.graphs.get_shap": {"tf": 1}}, "df": 3}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 2}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 2}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.summary.Summary": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}, "calidhayte.summary.Summary.results": {"tf": 1}, "calidhayte.summary.Summary.plots": {"tf": 1}, "calidhayte.summary.Summary.cols": {"tf": 1}, "calidhayte.summary.Summary.style": {"tf": 1}, "calidhayte.summary.Summary.backend": {"tf": 1}, "calidhayte.summary.Summary.boxplots": {"tf": 1}, "calidhayte.summary.Summary.histograms": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 10}}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.folds": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 4}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}}, "df": 1}}}}}}}}, "x": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}}, "df": 4, "g": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 2}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}}, "df": 3}}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.graphs.Graphs.target": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}}, "df": 3}}}}, "s": {"docs": {}, "df": 0, "k": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}}, "df": 2}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}}, "df": 2, "s": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}}, "df": 1}}, "n": {"docs": {"calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}}, "df": 5}}}, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 1}, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}}, "df": 4}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 4}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}}}, "l": {"docs": {}, "df": 0, "p": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {"calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 8}}, "t": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.results.Results.median_absolute": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "x": {"docs": {"calidhayte.results.Results.max": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.shap_plot": {"tf": 1}}, "df": 8, "s": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.summary.Summary.plots": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 4}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 1}}}}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 3}}}}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.backend": {"tf": 1}, "calidhayte.summary.Summary.backend": {"tf": 1}}, "df": 2}}}}}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 2}}}, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.summary.Summary.boxplots": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}}, "df": 2}}}}}, "r": {"2": {"docs": {"calidhayte.results.Results.r2": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.ransac": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 3}}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 3}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}, "calidhayte.summary.Summary.results": {"tf": 1}}, "df": 21}}}}}}, "f": {"docs": {"calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 1}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 2}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.results.Results.explained_variance_score": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "f": {"docs": {"calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.ecdf": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}}, "df": 3}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 2}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}, "u": {"docs": {"calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"calidhayte.calibrate.Calibrate.omp": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 1}}, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}}, "df": 2}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}}, "df": 3}}}}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 3}}}}}, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.graphs.Graphs.target": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.graphs.Graphs.backend": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.Graphs.shap": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}}, "df": 17}}}}}, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 1}}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.graphs.get_shap": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.huber": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.summary.Summary.histograms": {"tf": 1}}, "df": 1}}}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.quantile": {"tf": 1}}, "df": 1}}}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.explained_variance_score": {"tf": 1}}, "df": 1}}}}}}}}}}, "fullname": {"root": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 4, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs": {"tf": 1}, "calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.graphs.Graphs.target": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.graphs.Graphs.backend": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.Graphs.shap": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.graphs.ecdf": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.shap_plot": {"tf": 1}, "calidhayte.graphs.get_shap": {"tf": 1}, "calidhayte.results": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}, "calidhayte.summary": {"tf": 1}, "calidhayte.summary.Summary": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}, "calidhayte.summary.Summary.results": {"tf": 1}, "calidhayte.summary.Summary.plots": {"tf": 1}, "calidhayte.summary.Summary.cols": {"tf": 1}, "calidhayte.summary.Summary.style": {"tf": 1}, "calidhayte.summary.Summary.backend": {"tf": 1}, "calidhayte.summary.Summary.boxplots": {"tf": 1}, "calidhayte.summary.Summary.histograms": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 109}}}}}}, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.target": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}}, "df": 51}}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.summary.Summary.cols": {"tf": 1}}, "df": 1}}}, "v": {"docs": {"calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 5}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}}}}}}}}, "y": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.summary.Summary.style": {"tf": 1}}, "df": 2}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.explained_variance_score": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 3}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"calidhayte.graphs.Graphs.shap": {"tf": 1}, "calidhayte.graphs.shap_plot": {"tf": 1}, "calidhayte.graphs.get_shap": {"tf": 1}}, "df": 3}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 2}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 2}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.summary": {"tf": 1}, "calidhayte.summary.Summary": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.__init__": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.results": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.plots": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.cols": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.style": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.backend": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.boxplots": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.histograms": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.save_plots": {"tf": 1.4142135623730951}}, "df": 11}}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.folds": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 4}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}}, "df": 1}}}}}}}}, "x": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}}, "df": 4, "g": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 2}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}}, "df": 3}}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.graphs.Graphs.target": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}}, "df": 3}}}}, "s": {"docs": {}, "df": 0, "k": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}}, "df": 2}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}}, "df": 2, "s": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}}, "df": 1}}, "n": {"docs": {"calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}}, "df": 5}}}, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 1}, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}}, "df": 4}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 4}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}}}, "l": {"docs": {}, "df": 0, "p": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {"calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 8}}, "t": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.results.Results.median_absolute": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "x": {"docs": {"calidhayte.results.Results.max": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.shap_plot": {"tf": 1}}, "df": 8, "s": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.summary.Summary.plots": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 4}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 1}}}}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 3}}}}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.backend": {"tf": 1}, "calidhayte.summary.Summary.backend": {"tf": 1}}, "df": 2}}}}}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 2}}}, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.summary.Summary.boxplots": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}}, "df": 2}}}}}, "r": {"2": {"docs": {"calidhayte.results.Results.r2": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.ransac": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 3}}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 3}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.x": {"tf": 1.4142135623730951}, "calidhayte.results.Results.y": {"tf": 1.4142135623730951}, "calidhayte.results.Results.target": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1.4142135623730951}, "calidhayte.results.Results.explained_variance_score": {"tf": 1.4142135623730951}, "calidhayte.results.Results.max": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_absolute": {"tf": 1.4142135623730951}, "calidhayte.results.Results.root_mean_squared": {"tf": 1.4142135623730951}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1.4142135623730951}, "calidhayte.results.Results.median_absolute": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1.4142135623730951}, "calidhayte.results.Results.r2": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.results": {"tf": 1}}, "df": 23}}}}}}, "f": {"docs": {"calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 1}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 2}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.results.Results.explained_variance_score": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "f": {"docs": {"calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.ecdf": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}}, "df": 3}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 2}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}, "u": {"docs": {"calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"calidhayte.calibrate.Calibrate.omp": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 1}}, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}}, "df": 2}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}}, "df": 3}}}}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 3}}}}}, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.graphs": {"tf": 1}, "calidhayte.graphs.Graphs": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.x": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.y": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.x_name": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.y_name": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.target": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.style": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.backend": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.shap": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1.4142135623730951}, "calidhayte.graphs.ecdf": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.shap_plot": {"tf": 1}, "calidhayte.graphs.get_shap": {"tf": 1}}, "df": 24}}}}}, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 1}}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.graphs.get_shap": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.huber": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.summary.Summary.histograms": {"tf": 1}}, "df": 1}}}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.quantile": {"tf": 1}}, "df": 1}}}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.explained_variance_score": {"tf": 1}}, "df": 1}}}}}}}}}}, "annotation": {"root": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.folds": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.summary.Summary.plots": {"tf": 1}, "calidhayte.summary.Summary.cols": {"tf": 1}}, "df": 21, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 6}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 6}}}}, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 6}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.summary.Summary.plots": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 6}}}}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.plots": {"tf": 2}, "calidhayte.results.Results.models": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.plots": {"tf": 1.4142135623730951}}, "df": 6}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 3}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}}, "df": 4}}, "k": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 3}}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.summary.Summary.cols": {"tf": 1}}, "df": 2}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.folds": {"tf": 1}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.summary.Summary.plots": {"tf": 1}}, "df": 2}}}}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}}, "df": 1}}}}}}}}}}}, "default_value": {"root": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1.7320508075688772}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "signature": {"root": {"0": {"docs": {"calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}}, "df": 1, "x": {"7": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "c": {"3": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"3": {"0": {"docs": {}, "df": 0, "d": {"5": {"0": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "1": {"9": {"5": {"0": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"5": {"5": {"0": {"docs": {"calidhayte.calibrate.Calibrate.lasso": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"5": {"0": {"docs": {"calidhayte.calibrate.Calibrate.lasso": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "c": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "3": {"3": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "c": {"3": {"docs": {}, "df": 0, "c": {"5": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "d": {"3": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "a": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "e": {"7": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "f": {"0": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "9": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}}, "4": {"8": {"1": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "f": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "9": {"6": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "docs": {}, "df": 0, "a": {"4": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "b": {"2": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0, "b": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}, "5": {"0": {"2": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "1": {"2": {"9": {"0": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "2": {"1": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"9": {"0": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "3": {"7": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0, "f": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "6": {"8": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "9": {"0": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.huber": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.huber": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "f": {"5": {"0": {"docs": {"calidhayte.calibrate.Calibrate.huber": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0, "a": {"6": {"9": {"0": {"docs": {"calidhayte.calibrate.Calibrate.quantile": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "e": {"9": {"0": {"docs": {"calidhayte.calibrate.Calibrate.quantile": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "b": {"5": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.quantile": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0, "d": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "c": {"6": {"5": {"0": {"docs": {"calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "d": {"5": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "7": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "e": {"5": {"9": {"0": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"5": {"0": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "f": {"0": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "e": {"5": {"0": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}, "7": {"4": {"0": {"5": {"0": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"5": {"0": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "5": {"0": {"9": {"0": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "6": {"5": {"9": {"0": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "7": {"0": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0, "e": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "8": {"4": {"0": {"5": {"0": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "7": {"9": {"0": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "5": {"6": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "6": {"6": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "d": {"5": {"0": {"docs": {"calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "7": {"5": {"9": {"0": {"docs": {"calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "docs": {}, "df": 0}, "9": {"docs": {}, "df": 0, "c": {"6": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "d": {"9": {"0": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "d": {"9": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "e": {"0": {"5": {"0": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "7": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0, "f": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "f": {"6": {"5": {"0": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "f": {"9": {"0": {"docs": {"calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}, "docs": {}, "df": 0, "a": {"0": {"7": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "e": {"9": {"0": {"docs": {"calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "1": {"5": {"docs": {}, "df": 0, "d": {"0": {"docs": {"calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0, "d": {"1": {"0": {"docs": {"calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}}}}}, "docs": {}, "df": 0}}}, "docs": {}, "df": 0}}, "1": {"0": {"0": {"0": {"0": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}, "docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}, "docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 2}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 7}, "docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 10}, "2": {"7": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0}, "5": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 3}, "docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 9}, "2": {"0": {"0": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 7}, "docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}, "5": {"0": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 7}, "5": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 3}, "docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 7}, "docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 8}, "3": {"0": {"0": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "1": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 3}, "9": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 5.656854249492381}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 4.69041575982343}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 3.4641016151377544}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 3.4641016151377544}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars": {"tf": 2}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 2}, "calidhayte.calibrate.Calibrate.omp": {"tf": 2}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 3.4641016151377544}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 3.4641016151377544}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 6.48074069840786}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 3.7416573867739413}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 2}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 2}, "calidhayte.calibrate.Calibrate.huber": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 4.242640687119285}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 4.898979485566356}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 7.211102550927978}, "calidhayte.calibrate.Calibrate.svr": {"tf": 5.291502622129181}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 3.4641016151377544}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 2}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 5.477225575051661}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 5.477225575051661}, "calidhayte.graphs.Graphs.__init__": {"tf": 2}, "calidhayte.graphs.Graphs.save_plots": {"tf": 3.7416573867739413}, "calidhayte.summary.Summary.__init__": {"tf": 2}, "calidhayte.summary.Summary.save_plots": {"tf": 1.4142135623730951}}, "df": 43}, "docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 8}, "4": {"0": {"0": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 4}, "5": {"0": {"0": {"0": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}, "docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 7}, "docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 7}, "docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 12}, "6": {"2": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}, "3": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 3}, "docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}}, "df": 2}, "7": {"docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 5}, "8": {"docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}}, "df": 2}, "9": {"docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}}, "df": 2}, "docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 9.746794344808963}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 15.459624833740307}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 8.12403840463596}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 11.180339887498949}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 15.874507866387544}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 7.0710678118654755}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 14.730919862656235}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 7.0710678118654755}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 14.730919862656235}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 7.0710678118654755}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 15.905973720586866}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 7.0710678118654755}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 15.905973720586866}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 7.0710678118654755}, "calidhayte.calibrate.Calibrate.lars": {"tf": 13.45362404707371}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 12.609520212918492}, "calidhayte.calibrate.Calibrate.omp": {"tf": 13.45362404707371}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 17.406895185529212}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 17.406895185529212}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 15.968719422671311}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 21.095023109728988}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 15.905973720586866}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 11.704699910719626}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 12.609520212918492}, "calidhayte.calibrate.Calibrate.huber": {"tf": 15.198684153570664}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 16.34013463836819}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 15.620499351813308}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 15.620499351813308}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 18}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 18}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 8.306623862918075}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 19.235384061671343}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 25.436194683953808}, "calidhayte.calibrate.Calibrate.svr": {"tf": 18.16590212458495}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 14.730919862656235}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 17.146428199482248}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 7.615773105863909}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 11.874342087037917}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 21.213203435596427}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 21.213203435596427}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 5.830951894845301}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 7.416198487095663}, "calidhayte.graphs.Graphs.__init__": {"tf": 12.569805089976535}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 7.483314773547883}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 4.242640687119285}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 4.242640687119285}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 4.242640687119285}, "calidhayte.graphs.Graphs.shap": {"tf": 5.5677643628300215}, "calidhayte.graphs.Graphs.save_plots": {"tf": 9.797958971132712}, "calidhayte.graphs.ecdf": {"tf": 3.1622776601683795}, "calidhayte.graphs.lin_reg_plot": {"tf": 9.273618495495704}, "calidhayte.graphs.bland_altman_plot": {"tf": 8.660254037844387}, "calidhayte.graphs.ecdf_plot": {"tf": 9.273618495495704}, "calidhayte.graphs.shap_plot": {"tf": 6.782329983125268}, "calidhayte.graphs.get_shap": {"tf": 8.660254037844387}, "calidhayte.results.Results.__init__": {"tf": 10.295630140987}, "calidhayte.results.Results.explained_variance_score": {"tf": 3.1622776601683795}, "calidhayte.results.Results.max": {"tf": 3.1622776601683795}, "calidhayte.results.Results.mean_absolute": {"tf": 3.1622776601683795}, "calidhayte.results.Results.root_mean_squared": {"tf": 3.1622776601683795}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 3.1622776601683795}, "calidhayte.results.Results.median_absolute": {"tf": 3.1622776601683795}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 3.1622776601683795}, "calidhayte.results.Results.r2": {"tf": 3.1622776601683795}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 3.1622776601683795}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 3.1622776601683795}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 3.1622776601683795}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 3.1622776601683795}, "calidhayte.results.Results.return_errors": {"tf": 4.898979485566356}, "calidhayte.summary.Summary.__init__": {"tf": 8.54400374531753}, "calidhayte.summary.Summary.boxplots": {"tf": 3.1622776601683795}, "calidhayte.summary.Summary.histograms": {"tf": 3.1622776601683795}, "calidhayte.summary.Summary.save_plots": {"tf": 5.477225575051661}}, "df": 73, "d": {"docs": {}, "df": 0, "f": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.ecdf": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}}, "df": 3, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.shap_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.get_shap": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 11}}}}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 2}, "calidhayte.graphs.Graphs.__init__": {"tf": 2}, "calidhayte.graphs.get_shap": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 2}}, "df": 35, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 2}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 2}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 2}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 2}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.huber": {"tf": 2}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 2}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 2}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 2}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.svr": {"tf": 2}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 2.449489742783178}}, "df": 32}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}}, "df": 1}}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {"calidhayte.calibrate.Calibrate.quantile": {"tf": 1}}, "df": 1}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 1}}}}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.lin_reg_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.bland_altman_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.ecdf_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.shap_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.get_shap": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 12}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 32}}}}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 2}}}, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.omp": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 3}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 5}}}}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 2.6457513110645907}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.shap": {"tf": 1}, "calidhayte.graphs.get_shap": {"tf": 1.7320508075688772}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}}, "df": 5}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.graphs.Graphs.save_plots": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 2}}, "g": {"docs": {}, "df": 0, "f": {"docs": {"calidhayte.graphs.Graphs.save_plots": {"tf": 1.4142135623730951}}, "df": 1}}, "d": {"docs": {}, "df": 0, "f": {"docs": {"calidhayte.graphs.Graphs.save_plots": {"tf": 1.4142135623730951}}, "df": 1}}}, "c": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}}, "df": 3, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.lin_reg_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.bland_altman_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.ecdf_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.shap_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.get_shap": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 12}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {"calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 1}}, "x": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 2}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 2}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 2}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.huber": {"tf": 2}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 2}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 2}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 2}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.svr": {"tf": 2}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 2.449489742783178}}, "df": 32}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 2}}}}}}, "e": {"docs": {}, "df": 0, "f": {"0": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}}, "df": 2}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}}, "df": 2}}}}}}}, "g": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}}, "df": 1}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 5}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}}, "df": 5}}}}}}}}, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}}, "df": 4}}}}}, "c": {"docs": {}, "df": 0, "p": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}}, "df": 5}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 2.449489742783178}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.shap_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.get_shap": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 11}}}, "o": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 2.23606797749979}}, "df": 27}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}}, "df": 5}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 37}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 32}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}}, "df": 5}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 2}}}}}}}}, "t": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 4}}}}, "s": {"docs": {}, "df": 0, "k": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}}, "n": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 2.8284271247461903}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 4, "s": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}}, "df": 7}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.7320508075688772}}, "df": 32}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}}, "df": 13}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "k": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.Graphs.shap": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}}, "df": 7}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 5}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 3}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.__init__": {"tf": 2.8284271247461903}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.graphs.Graphs.shap": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1.7320508075688772}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1.7320508075688772}, "calidhayte.results.Results.__init__": {"tf": 2}, "calidhayte.summary.Summary.__init__": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 52, "a": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 2}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 2}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 2}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.huber": {"tf": 2}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 2}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 2}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 2}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.svr": {"tf": 2}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 2.449489742783178}}, "df": 32}}}, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}}}}}}}}, "y": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}}, "df": 2}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "f": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.Graphs.shap": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}, "calidhayte.summary.Summary.boxplots": {"tf": 1}, "calidhayte.summary.Summary.histograms": {"tf": 1}, "calidhayte.summary.Summary.save_plots": {"tf": 1}}, "df": 62}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}}, "df": 4}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 37}}}}, "n": {"docs": {"calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}}, "df": 2}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.graphs.lin_reg_plot": {"tf": 2}, "calidhayte.graphs.bland_altman_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.ecdf_plot": {"tf": 1.4142135623730951}}, "df": 3}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 2}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 3, "r": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 2}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 2}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 2}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.huber": {"tf": 2}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 2}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 2}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.svr": {"tf": 2}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 2.449489742783178}}, "df": 32}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 4}}}}}, "v": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}}, "df": 1, "a": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}}, "df": 1}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 2}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}}, "df": 9}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}}, "df": 5}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"calidhayte.calibrate.Calibrate.quantile": {"tf": 1}}, "df": 1}}}}}, "z": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1, "s": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}}, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 2}}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 3}}}}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 3}}}}}}, "k": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.graphs.get_shap": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 6}}}}}}, "g": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}, "h": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 2}}}}}}}, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.graphs.shap_plot": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.graphs.get_shap": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 36}, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 2}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 2}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 2}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.huber": {"tf": 2}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 2}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 2}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 2}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.svr": {"tf": 2}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 2.449489742783178}}, "df": 32}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}}, "v": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 2}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 2}}, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}}, "df": 1}}}}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}}, "df": 2}}}}}}}, "p": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.quantile": {"tf": 1}}, "df": 1}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}}, "df": 1}}}}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}}}, "w": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 3}}}}}}}, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 2.6457513110645907}}, "df": 3}}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 5}}}}, "t": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 2.23606797749979}}, "df": 27}}, "x": {"2": {"7": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 6.164414002968976}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 4.242640687119285}}, "df": 2}, "docs": {}, "df": 0}, "docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.lin_reg_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.shap_plot": {"tf": 1}, "calidhayte.graphs.get_shap": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 8, "g": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 2}}}}}}}, "y": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.lin_reg_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.get_shap": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 8, "e": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}}, "df": 32, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 2}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1}}, "df": 2}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 6}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 21}}}}, "t": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 2.23606797749979}}, "df": 27}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}}, "df": 2}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 3, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 1}}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}}, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}}}}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}}}}}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "x": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 2}}}}}}, "l": {"1": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.4142135623730951}}, "df": 3}, "2": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1.4142135623730951}}, "df": 3}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}}, "df": 5, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.graphs.Graphs.shap": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 32, "[": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 2}}}}}}}}}}, "t": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 2.23606797749979}}, "df": 27}, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}}, "df": 1}}}, "b": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}}, "df": 5}}}, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 4}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 6}}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}}, "df": 5, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 2}}}}}}}, "g": {"2": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}}, "df": 5}, "docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 7, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.Graphs.shap": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}}, "df": 15}, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}}, "df": 2}}}}}, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.graphs.lin_reg_plot": {"tf": 1.4142135623730951}, "calidhayte.graphs.ecdf_plot": {"tf": 1.4142135623730951}}, "df": 42}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4}, "w": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {"calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}}, "df": 1}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "x": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 5}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.omp": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}}}, "x": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 9}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}}, "df": 5}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 2}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 2}}}}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "x": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}, "o": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 37}, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 2}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 2}}}}}}}, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 3}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}}, "df": 1, "d": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 2}}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}}, "df": 2}}, "t": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 1, "s": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}}, "df": 2}}}, "m": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.graphs.Graphs.__init__": {"tf": 1}, "calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 2}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 2}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.4142135623730951}}, "df": 4}}}}}}}}, "k": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}}, "df": 40}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 3, "s": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 2.449489742783178}}, "df": 1}}}}}, "y": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.graphs.Graphs.shap": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.4142135623730951}}, "df": 2, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 29}}}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.quantile": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.summary.Summary.__init__": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.4142135623730951}}, "df": 37}}}, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.ransac": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 3}}, "e": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 6}}}, "v": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 2}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 2}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 2}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.huber": {"tf": 2}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 2}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 2}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 2}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.svr": {"tf": 2}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 2.449489742783178}}, "df": 32}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}}, "df": 3}}}}, "b": {"docs": {}, "df": 0, "f": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 3}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 2.23606797749979}}, "df": 27}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.omp": {"tf": 1}}, "df": 1}}}}}}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.graphs.lin_reg_plot": {"tf": 1}, "calidhayte.graphs.bland_altman_plot": {"tf": 1}, "calidhayte.graphs.ecdf_plot": {"tf": 1}}, "df": 3}}}}}}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 4, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}}}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.4142135623730951}}, "df": 7}}}}, "p": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.7320508075688772}}, "df": 6}}}}}}, "t": {"docs": {}, "df": 0, "a": {"0": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.ransac": {"tf": 1}}, "df": 1, "s": {"docs": {"calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 5}}}}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 2}}}, "p": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 2}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}}, "df": 1}}}}}}}}, "h": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}}, "df": 3}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.quantile": {"tf": 1.7320508075688772}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 2, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}}, "df": 1}}}}}}, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "bases": {"root": {"docs": {}, "df": 0}}, "doc": {"root": {"0": {"1": {"docs": {"calidhayte": {"tf": 2.6457513110645907}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 3}, "2": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}, "3": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "5": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 3}, "9": {"docs": {"calidhayte": {"tf": 1.7320508075688772}}, "df": 1}, "docs": {"calidhayte": {"tf": 2.449489742783178}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 2.8284271247461903}, "calidhayte.results.Results.return_errors": {"tf": 3.872983346207417}}, "df": 9, "|": {"1": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "1": {"0": {"0": {"0": {"docs": {}, "df": 0, "|": {"2": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0, "|": {"2": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "3": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "6": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "9": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 3}, "6": {"0": {"docs": {"calidhayte.calibrate": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 1.7320508075688772}, "calidhayte.results": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 15}, "docs": {}, "df": 0}, "docs": {"calidhayte": {"tf": 2.6457513110645907}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 3.3166247903554}, "calidhayte.results": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1.7320508075688772}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.7320508075688772}}, "df": 19}, "2": {"0": {"2": {"2": {"docs": {"calidhayte": {"tf": 3}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "docs": {"calidhayte": {"tf": 2}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate": {"tf": 2}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 7, "|": {"9": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}}, "3": {"0": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}, "7": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "9": {"docs": {"calidhayte": {"tf": 4}, "calidhayte.calibrate.cont_strat_folds": {"tf": 2}, "calidhayte.calibrate.Calibrate": {"tf": 7.3484692283495345}}, "df": 3}, "docs": {"calidhayte": {"tf": 2.23606797749979}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 2}, "calidhayte.calibrate.Calibrate": {"tf": 2}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 8, "|": {"1": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}}, "df": 1}, "2": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "3": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "4": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}}, "4": {"3": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 2}, "docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 6, "|": {"3": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "5": {"0": {"0": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 2}, "5": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 2}, "docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 8, "|": {"2": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}}, "6": {"2": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0, "|": {"4": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}}, "7": {"8": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}, "docs": {"calidhayte": {"tf": 2}, "calidhayte.calibrate.Calibrate": {"tf": 2}}, "df": 2, "|": {"2": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "8": {"6": {"1": {"7": {"docs": {"calidhayte.calibrate": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 1.7320508075688772}, "calidhayte.results": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 15}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2, "|": {"7": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}}, "9": {"5": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 2}, "8": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 2}, "docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 2}}, "df": 2}, "docs": {"calidhayte": {"tf": 27.294688127912362}, "calidhayte.calibrate": {"tf": 7.937253933193772}, "calidhayte.calibrate.cont_strat_folds": {"tf": 13.114877048604}, "calidhayte.calibrate.Calibrate": {"tf": 25.03996805109783}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 10.295630140987}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.target": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 3.605551275463989}, "calidhayte.calibrate.Calibrate.models": {"tf": 7.416198487095663}, "calidhayte.calibrate.Calibrate.folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 4.47213595499958}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 4.242640687119285}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 4.242640687119285}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 4.242640687119285}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 4.242640687119285}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 4.242640687119285}, "calidhayte.calibrate.Calibrate.lars": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.omp": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.huber": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 3.872983346207417}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.svr": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 5.0990195135927845}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 7.211102550927978}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 5.744562646538029}, "calidhayte.graphs": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.__init__": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.x": {"tf": 2.6457513110645907}, "calidhayte.graphs.Graphs.y": {"tf": 2.6457513110645907}, "calidhayte.graphs.Graphs.x_name": {"tf": 2}, "calidhayte.graphs.Graphs.y_name": {"tf": 2}, "calidhayte.graphs.Graphs.target": {"tf": 2}, "calidhayte.graphs.Graphs.models": {"tf": 7.416198487095663}, "calidhayte.graphs.Graphs.plots": {"tf": 7.54983443527075}, "calidhayte.graphs.Graphs.style": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.backend": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 5.477225575051661}, "calidhayte.graphs.Graphs.bland_altman_plot": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.ecdf_plot": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.lin_reg_plot": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.shap": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.save_plots": {"tf": 1.7320508075688772}, "calidhayte.graphs.ecdf": {"tf": 1.7320508075688772}, "calidhayte.graphs.lin_reg_plot": {"tf": 1.7320508075688772}, "calidhayte.graphs.bland_altman_plot": {"tf": 1.7320508075688772}, "calidhayte.graphs.ecdf_plot": {"tf": 1.7320508075688772}, "calidhayte.graphs.shap_plot": {"tf": 1.7320508075688772}, "calidhayte.graphs.get_shap": {"tf": 1.7320508075688772}, "calidhayte.results": {"tf": 5.656854249492381}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1.4142135623730951}, "calidhayte.results.Results": {"tf": 2.8284271247461903}, "calidhayte.results.Results.__init__": {"tf": 5.830951894845301}, "calidhayte.results.Results.x": {"tf": 1.4142135623730951}, "calidhayte.results.Results.y": {"tf": 1.4142135623730951}, "calidhayte.results.Results.target": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 7.416198487095663}, "calidhayte.results.Results.errors": {"tf": 11.958260743101398}, "calidhayte.results.Results.explained_variance_score": {"tf": 5.385164807134504}, "calidhayte.results.Results.max": {"tf": 5.385164807134504}, "calidhayte.results.Results.mean_absolute": {"tf": 5.385164807134504}, "calidhayte.results.Results.root_mean_squared": {"tf": 5.385164807134504}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 5.385164807134504}, "calidhayte.results.Results.median_absolute": {"tf": 5.385164807134504}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 5.385164807134504}, "calidhayte.results.Results.r2": {"tf": 5.385164807134504}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 5.385164807134504}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 5.385164807134504}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 5.385164807134504}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 5.385164807134504}, "calidhayte.results.Results.return_errors": {"tf": 16.97056274847714}, "calidhayte.summary": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.__init__": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.results": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.plots": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.cols": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.style": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.backend": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.boxplots": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.histograms": {"tf": 1.7320508075688772}, "calidhayte.summary.Summary.save_plots": {"tf": 1.7320508075688772}}, "df": 109, "c": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte": {"tf": 1.7320508075688772}}, "df": 1, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}}}}, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 2}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.target": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}}, "df": 6, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "d": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 7}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1.7320508075688772}}, "df": 12, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}}, "e": {"docs": {"calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 12, "s": {"docs": {"calidhayte.graphs.Graphs": {"tf": 1}}, "df": 1}, "d": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "@": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "k": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "n": {"docs": {"calidhayte": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 3}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 2}}}, "s": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 34}}}}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 2}}, "d": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}, "m": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}}, "df": 2}}}}, "/": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "@": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 5}}}}}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 8, "s": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 3}}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results": {"tf": 1}}, "df": 1}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.results.Results.__init__": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}}}}}}}}, "x": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 11}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 2}}}}}}, "s": {"docs": {}, "df": 0, "v": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 2.449489742783178}}, "df": 3}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 37}}}}}}}}}}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}}, "df": 1, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "g": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.cont_strat_folds": {"tf": 2}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 10}}}}, "s": {"docs": {}, "df": 0, "k": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}}, "df": 4}}}, "o": {"docs": {"calidhayte": {"tf": 2.8284271247461903}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 3}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.target": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.graphs.Graphs.backend": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1.4142135623730951}, "calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 52}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 2.449489742783178}, "calidhayte.graphs.Graphs.plots": {"tf": 2.449489742783178}, "calidhayte.results.Results.models": {"tf": 2.449489742783178}, "calidhayte.results.Results.errors": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 44, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.results": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 4}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 4}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 3.1622776601683795}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 2}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.models": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 2.449489742783178}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 2.6457513110645907}, "calidhayte.graphs.Graphs.plots": {"tf": 2.6457513110645907}, "calidhayte.results": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1.4142135623730951}, "calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 2}, "calidhayte.results.Results.models": {"tf": 2.449489742783178}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1.4142135623730951}, "calidhayte.results.Results.max": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_absolute": {"tf": 1.4142135623730951}, "calidhayte.results.Results.root_mean_squared": {"tf": 1.4142135623730951}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1.4142135623730951}, "calidhayte.results.Results.median_absolute": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1.4142135623730951}, "calidhayte.results.Results.r2": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 2.6457513110645907}}, "df": 66, "n": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "m": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "y": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 5}, "i": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 4}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 3}}, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "t": {"docs": {"calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}}, "df": 6}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 15}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 7}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 4}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 3}}}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}}, "df": 2, "s": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 3}}}}, "w": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}}, "df": 2}}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "f": {"docs": {"calidhayte": {"tf": 2.8284271247461903}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.models": {"tf": 2}, "calidhayte.calibrate.Calibrate.folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.models": {"tf": 2}, "calidhayte.graphs.Graphs.plots": {"tf": 2.23606797749979}, "calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.results": {"tf": 1.4142135623730951}, "calidhayte.results.Results": {"tf": 1.7320508075688772}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 2}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 57}, "n": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 46, "e": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 4}, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 2}}}, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.return_models": {"tf": 1}}, "df": 1, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "r": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 2}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 10, "g": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.results": {"tf": 1}}, "df": 3}}}}}}}}}}}}}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 32}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}}, "df": 1}}}}}}}}}}}, "s": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}}, "df": 3, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1.7320508075688772}}, "df": 1}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}}, "df": 3}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "d": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 3}}, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 4}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 37}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}}, "df": 1}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 3, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}}, "df": 2}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 42, "a": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}}, "df": 4, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 4}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}}}}}}, "y": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 6}}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 2}, "calidhayte.graphs.Graphs.models": {"tf": 2}, "calidhayte.graphs.Graphs.plots": {"tf": 2}, "calidhayte.results.Results.models": {"tf": 2}}, "df": 6, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 4}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 32}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 4}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 6}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 2}}}}}}}}, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "y": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 6, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1.4142135623730951}}, "df": 5}}}}, "u": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.results": {"tf": 1}}, "df": 3}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 32}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 2}}, "df": 2, "r": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 2}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 2}, "calidhayte.graphs.Graphs.plots": {"tf": 2}, "calidhayte.results.Results.models": {"tf": 2}, "calidhayte.results.Results.errors": {"tf": 1.7320508075688772}, "calidhayte.results.Results.return_errors": {"tf": 2}}, "df": 9, "/": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 3}}}}, "k": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 2}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.graphs.Graphs.backend": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 3}}}}}}}}, "y": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "x": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.results.Results.max": {"tf": 1}}, "df": 2}, "n": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1.7320508075688772}}, "df": 1, "s": {"docs": {"calidhayte": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.x": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}}, "df": 11}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.target": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 10}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 4, "s": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 3}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.results.Results.errors": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"calidhayte.results": {"tf": 1.4142135623730951}, "calidhayte.results.Results": {"tf": 1}}, "df": 2}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}}, "df": 2}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}}, "df": 5, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 2}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "k": {"docs": {"calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 11}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 2}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "x": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 2}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}}, "df": 4, "s": {"docs": {"calidhayte": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.models": {"tf": 2.449489742783178}, "calidhayte.results.Results.models": {"tf": 2.449489742783178}, "calidhayte.results.Results.errors": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 11, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 2}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 9}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 4}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 2}}}, "r": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.results": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1.4142135623730951}}, "df": 9, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 6}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 6}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1.7320508075688772}}, "df": 1, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 38, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.results.Results": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 37}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 32}}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 2}}, "s": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, ":": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.results": {"tf": 1}}, "df": 3}}}}}}, "x": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}}}}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}}}}}}}, "m": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.results": {"tf": 1}}, "df": 4}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.huber": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 2}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.target": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.target": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1.4142135623730951}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 51, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte": {"tf": 2.23606797749979}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.x": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.y": {"tf": 1}}, "df": 6}}}}}}}, "x": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 4}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 4}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 2}}}}}}}}}, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.quantile": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}}, "df": 2}}}, "s": {"docs": {"calidhayte": {"tf": 2}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 9, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.isotonic": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "t": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}}, "df": 4, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}, "f": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1.7320508075688772}}, "df": 6}}, "d": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}}, "t": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.x": {"tf": 1}}, "df": 5}}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 2}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.7320508075688772}}, "df": 40}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}}, "df": 2}}}}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 2}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}}, "df": 3}}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 2}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 3.1622776601683795}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 2}, "calidhayte.results.Results.models": {"tf": 1.7320508075688772}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 17, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 2}}, "df": 6, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 4, "s": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 32}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.results": {"tf": 1.4142135623730951}, "calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 4}}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 33, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 2}, "f": {"docs": {"calidhayte": {"tf": 3.1622776601683795}, "calidhayte.calibrate.cont_strat_folds": {"tf": 2.8284271247461903}}, "df": 2}}, "e": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.models": {"tf": 2.6457513110645907}, "calidhayte.graphs.Graphs.plots": {"tf": 2.8284271247461903}, "calidhayte.results.Results.models": {"tf": 2.6457513110645907}}, "df": 7, "x": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 5}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 4}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1.4142135623730951}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.7320508075688772}}, "df": 9, "s": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 3}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}}}, "c": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "f": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 2}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1.4142135623730951}}, "df": 2}}, "s": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}}, "df": 4}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 8}}}, "e": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.backend": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}}, "df": 5, "d": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 2.449489742783178}, "calidhayte.graphs.Graphs.plots": {"tf": 2}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 2.449489742783178}}, "df": 51}}}, "n": {"docs": {}, "df": 0, "i": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 8}}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 32}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}}}}, "a": {"docs": {"calidhayte": {"tf": 3}, "calidhayte.calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.cont_strat_folds": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate": {"tf": 3.3166247903554}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 2}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.models": {"tf": 2}, "calidhayte.graphs.Graphs.plots": {"tf": 1.7320508075688772}, "calidhayte.results": {"tf": 1.4142135623730951}, "calidhayte.results.Results": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 2}, "calidhayte.results.Results.errors": {"tf": 1.7320508075688772}, "calidhayte.results.Results.return_errors": {"tf": 2.23606797749979}}, "df": 16, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.results": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}}, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.target": {"tf": 1}}, "df": 7}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 3}}}}}, "d": {"docs": {"calidhayte": {"tf": 2}, "calidhayte.calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.target": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.graphs.Graphs": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 24}, "y": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}}, "df": 3}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 2}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.x": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 9}, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 6, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.scaler": {"tf": 1}}, "df": 1, "s": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 5}}}}}, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.CoefficientPipelineDict": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"calidhayte": {"tf": 2}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 11, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}}, "df": 2}}}}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 6}}}}}}}, "t": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 2}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}}}}}, "v": {"2": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 4}, "docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.x": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.y": {"tf": 1}}, "df": 3, "s": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 2}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.y": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 2}, "calidhayte.graphs.Graphs.plots": {"tf": 2}, "calidhayte.results.Results.models": {"tf": 2}, "calidhayte.results.Results.errors": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 11}}}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 3}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.folds": {"tf": 1}}, "df": 2}}}, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "d": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 9}}}}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}}, "df": 3, "s": {"docs": {"calidhayte": {"tf": 2}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 15}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}}, "df": 1}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "a": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 39}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}}, "df": 3}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 2}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 2}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 2}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 2}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 2}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 2}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 2}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 2}, "calidhayte.calibrate.Calibrate.omp": {"tf": 2}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 2}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 2}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 2}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 2}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 2}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 2}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 2}, "calidhayte.calibrate.Calibrate.huber": {"tf": 2}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 2}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 2}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 2}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 2}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 2}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 2}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 2}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 2}, "calidhayte.calibrate.Calibrate.svr": {"tf": 2}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 2}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 2}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 2}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 2}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 2}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 2}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 41}}}}}}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 35, "s": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}}, "df": 2}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results": {"tf": 1.4142135623730951}, "calidhayte.results.Results": {"tf": 1}}, "df": 2}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 3}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}, "t": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 32}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.scaler": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 13}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.models": {"tf": 1}}, "df": 1}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 9}}}}}}, "i": {"docs": {}, "df": 0, "p": {"docs": {"calidhayte": {"tf": 2}}, "df": 1, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 2}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}}, "df": 2, "s": {"docs": {"calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 2}}, "df": 3, "s": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1.4142135623730951}}, "df": 3}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {"calidhayte.results.Results": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}, "d": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 2}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 6}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.quantile": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}}, "df": 1}}, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}}, "df": 3, "o": {"2": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}}, "df": 2, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 3}}, "t": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 8}}, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 2.449489742783178}}, "df": 1}, "m": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1.7320508075688772}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 2}, "calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.target": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 50}}}, "u": {"docs": {"calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}}, "df": 1, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.folds": {"tf": 1}}, "df": 3}}}}}, "e": {"docs": {}, "df": 0, "w": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 2}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 2}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.CoefficientPipelineDict": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 5}}}}, "t": {"docs": {"calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}}, "df": 3}}}, "b": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 10, "y": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 5}, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}}, "df": 4}}}}}}, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.4142135623730951}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.backend": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {"calidhayte": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 2}, "calidhayte.calibrate.Calibrate.x_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1.4142135623730951}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 8, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 13}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.graphs.Graphs.style": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1}}, "df": 4}}, "x": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}, "o": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 37}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1, "s": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "k": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.folds": {"tf": 1}}, "df": 5, "e": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 7, "s": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 2}, "calidhayte.calibrate.Calibrate.scaler_list": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1}}, "df": 3}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.results.Results": {"tf": 1}, "calidhayte.results.Results.models": {"tf": 1}}, "df": 4}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.target": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 4}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.scaler": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1.4142135623730951}}, "df": 3}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 2}, "r": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1.4142135623730951}}, "df": 2}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 32}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 6}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.results": {"tf": 1}}, "df": 2}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.models": {"tf": 1.7320508075688772}, "calidhayte.results.Results.models": {"tf": 1.7320508075688772}}, "df": 4}}}, "g": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.7320508075688772}, "calidhayte.graphs.Graphs.models": {"tf": 2.6457513110645907}, "calidhayte.graphs.Graphs.plots": {"tf": 2.8284271247461903}, "calidhayte.results.Results.models": {"tf": 2.6457513110645907}}, "df": 6, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 2.6457513110645907}, "calidhayte.graphs.Graphs": {"tf": 1}}, "df": 2}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.4142135623730951}}, "df": 4}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 4}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "+": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, ":": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"calidhayte": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}}}}}}, "h": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.graphs.Graphs.plot_meta": {"tf": 1}}, "df": 1}}}, "t": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 3.4641016151377544}, "calidhayte.calibrate.Calibrate": {"tf": 8.888194417315589}, "calidhayte.calibrate.Calibrate.models": {"tf": 2}, "calidhayte.graphs.Graphs.models": {"tf": 2}, "calidhayte.graphs.Graphs.plots": {"tf": 2}, "calidhayte.results.Results.models": {"tf": 2}}, "df": 6}, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}}, "df": 3}}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}, "r": {"2": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 2.6457513110645907}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 39, "s": {"docs": {"calidhayte.calibrate": {"tf": 1}}, "df": 1}}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"calidhayte": {"tf": 3.3166247903554}, "calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 4}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "d": {"docs": {"calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}}, "df": 5}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2, "s": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs": {"tf": 1}, "calidhayte.graphs.Graphs.plot_meta": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 6}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1.7320508075688772}}, "df": 2, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}}}}}}}, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.y_data": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1.7320508075688772}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 43, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 32, "c": {"docs": {}, "df": 0, "v": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 32}}}}}}}}}}}}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.results": {"tf": 1}, "calidhayte.results.Results": {"tf": 1.4142135623730951}}, "df": 4}}, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1.4142135623730951}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1}}, "df": 1}, "d": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"calidhayte.calibrate.cont_strat_folds": {"tf": 1}}, "df": 1}}}}}, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}}, "df": 1}, "o": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 2}}}, "v": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 32}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.ridge": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1.4142135623730951}}, "df": 3}}}}}, "x": {"docs": {"calidhayte": {"tf": 3.7416573867739413}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 2.8284271247461903}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.target": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.y_data": {"tf": 1}, "calidhayte.calibrate.Calibrate.models": {"tf": 1}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.y": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1}, "calidhayte.graphs.Graphs.plots": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1}, "calidhayte.results.Results.errors": {"tf": 1.7320508075688772}, "calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 2.449489742783178}}, "df": 66, "g": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 4, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 1}}}}}}}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 6, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}}}}}, "y": {"docs": {"calidhayte": {"tf": 3}, "calidhayte.calibrate.cont_strat_folds": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 2.23606797749979}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.target": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}, "calidhayte.calibrate.Calibrate.return_measurements": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.x": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}, "calidhayte.graphs.Graphs.target": {"tf": 1}, "calidhayte.results.Results.__init__": {"tf": 1.4142135623730951}, "calidhayte.results.Results.explained_variance_score": {"tf": 1.4142135623730951}, "calidhayte.results.Results.max": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_absolute": {"tf": 1.4142135623730951}, "calidhayte.results.Results.root_mean_squared": {"tf": 1.4142135623730951}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1.4142135623730951}, "calidhayte.results.Results.median_absolute": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1.4142135623730951}, "calidhayte.results.Results.r2": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1.4142135623730951}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1.4142135623730951}}, "df": 60, "e": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}, "calidhayte.results.Results.errors": {"tf": 1}, "calidhayte.results.Results.return_errors": {"tf": 1.4142135623730951}}, "df": 8}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {"calidhayte": {"tf": 1}, "calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 2.449489742783178}, "calidhayte.calibrate.Calibrate.pymc_bayesian": {"tf": 1.7320508075688772}, "calidhayte.calibrate.Calibrate.linreg": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1.4142135623730951}}, "df": 4}}}, "k": {"docs": {"calidhayte.results.Results.explained_variance_score": {"tf": 1}, "calidhayte.results.Results.max": {"tf": 1}, "calidhayte.results.Results.mean_absolute": {"tf": 1}, "calidhayte.results.Results.root_mean_squared": {"tf": 1}, "calidhayte.results.Results.root_mean_squared_log": {"tf": 1}, "calidhayte.results.Results.median_absolute": {"tf": 1}, "calidhayte.results.Results.mean_absolute_percentage": {"tf": 1}, "calidhayte.results.Results.r2": {"tf": 1}, "calidhayte.results.Results.mean_poisson_deviance": {"tf": 1}, "calidhayte.results.Results.mean_gamma_deviance": {"tf": 1}, "calidhayte.results.Results.mean_tweedie_deviance": {"tf": 1}, "calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 12}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 2}, "calidhayte.calibrate.Calibrate.__init__": {"tf": 1.4142135623730951}}, "df": 2, "[": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate.linreg": {"tf": 1}, "calidhayte.calibrate.Calibrate.ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.multi_task_elastic_net": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1}, "calidhayte.calibrate.Calibrate.omp": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ridge": {"tf": 1}, "calidhayte.calibrate.Calibrate.bayesian_ard": {"tf": 1}, "calidhayte.calibrate.Calibrate.tweedie": {"tf": 1}, "calidhayte.calibrate.Calibrate.stochastic_gradient_descent": {"tf": 1}, "calidhayte.calibrate.Calibrate.passive_aggressive": {"tf": 1}, "calidhayte.calibrate.Calibrate.ransac": {"tf": 1}, "calidhayte.calibrate.Calibrate.theil_sen": {"tf": 1}, "calidhayte.calibrate.Calibrate.huber": {"tf": 1}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1}, "calidhayte.calibrate.Calibrate.decision_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_tree": {"tf": 1}, "calidhayte.calibrate.Calibrate.random_forest": {"tf": 1}, "calidhayte.calibrate.Calibrate.extra_trees_ensemble": {"tf": 1}, "calidhayte.calibrate.Calibrate.gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.hist_gradient_boost_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1}, "calidhayte.calibrate.Calibrate.svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.linear_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.nu_svr": {"tf": 1}, "calidhayte.calibrate.Calibrate.gaussian_process": {"tf": 1}, "calidhayte.calibrate.Calibrate.isotonic": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost": {"tf": 1}, "calidhayte.calibrate.Calibrate.xgboost_rf": {"tf": 1}}, "df": 32}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte": {"tf": 1}}, "df": 1}, "g": {"docs": {"calidhayte.results.Results.root_mean_squared_log": {"tf": 1}}, "df": 1}, "s": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.mean_pinball_loss": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"calidhayte.calibrate": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate": {"tf": 1.4142135623730951}, "calidhayte.results": {"tf": 1.4142135623730951}}, "df": 3}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}}, "df": 3}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.results.Results.return_errors": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {"calidhayte.calibrate.Calibrate.models": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.multi_task_lasso_cv": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.elastic_net_cv": {"tf": 1}, "calidhayte.calibrate.Calibrate.lars_lasso": {"tf": 1.4142135623730951}, "calidhayte.calibrate.Calibrate.return_models": {"tf": 1}, "calidhayte.graphs.Graphs.models": {"tf": 1.4142135623730951}, "calidhayte.graphs.Graphs.plots": {"tf": 1.4142135623730951}, "calidhayte.results.Results.models": {"tf": 1.4142135623730951}}, "df": 11}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"calidhayte.calibrate.Calibrate.mlp_regressor": {"tf": 1.4142135623730951}}, "df": 1}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"calidhayte.graphs.Graphs.x_name": {"tf": 1}, "calidhayte.graphs.Graphs.y_name": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {"calidhayte.calibrate.Calibrate": {"tf": 1}}, "df": 1}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"calidhayte.calibrate.Calibrate.__init__": {"tf": 2}, "calidhayte.calibrate.Calibrate.quantile": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}}, "pipeline": ["trimmer"], "_isPrebuiltIndex": true}; // mirrored in build-search-index.js (part 1) // Also split on html tags. this is a cheap heuristic, but good enough. diff --git a/src/calidhayte/calibrate.py b/src/calidhayte/calibrate.py index bc582d9..5d1964c 100644 --- a/src/calidhayte/calibrate.py +++ b/src/calidhayte/calibrate.py @@ -30,6 +30,7 @@ from sklearn import neural_network as nn from sklearn import svm from sklearn import tree +from sklearn.gaussian_process import kernels as kern import sklearn.preprocessing as pre from sklearn.model_selection import StratifiedKFold, RandomizedSearchCV from sklearn.compose import ColumnTransformer @@ -1645,6 +1646,22 @@ def decision_tree( List[Union[int, str, float]] ] ] = { + 'criterion': [ + 'squared_error', + 'friedman_mse', + 'absolute_error', + 'poisson' + ], + 'splitter': [ + 'best', + 'random' + ], + 'max_features': [ + None, + 'sqrt', + 'log2' + ], + 'ccp_alpha': uniform(loc=0, scale=2), }, **kwargs ): @@ -1691,6 +1708,22 @@ def extra_tree( List[Union[int, str, float]] ] ] = { + 'criterion': [ + 'squared_error', + 'friedman_mse', + 'absolute_error', + 'poisson' + ], + 'splitter': [ + 'best', + 'random' + ], + 'max_features': [ + None, + 'sqrt', + 'log2' + ], + 'ccp_alpha': uniform(loc=0, scale=2), }, **kwargs ): @@ -1737,6 +1770,21 @@ def random_forest( List[Union[int, str, float]] ] ] = { + 'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], + 'bootstrap': [True, False], + 'max_samples': uniform(loc=0.01, scale=0.99), + 'criterion': [ + 'squared_error', + 'friedman_mse', + 'absolute_error', + 'poisson' + ], + 'max_features': [ + None, + 'sqrt', + 'log2' + ], + 'ccp_alpha': uniform(loc=0, scale=2), }, **kwargs ): @@ -1783,6 +1831,21 @@ def extra_trees_ensemble( List[Union[int, str, float]] ] ] = { + 'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], + 'bootstrap': [True, False], + 'max_samples': uniform(loc=0.01, scale=0.99), + 'criterion': [ + 'squared_error', + 'friedman_mse', + 'absolute_error', + 'poisson' + ], + 'max_features': [ + None, + 'sqrt', + 'log2' + ], + 'ccp_alpha': uniform(loc=0, scale=2), }, **kwargs ): @@ -1829,6 +1892,31 @@ def gradient_boost_regressor( List[Union[int, str, float]] ] ] = { + 'loss': [ + 'squared_error', + 'absolute_error', + 'huber', + 'quantile' + ], + 'learning_rate': uniform(loc=0, scale=2), + 'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], + 'subsample': uniform(loc=0.01, scale=0.99), + 'criterion': [ + 'friedman_mse', + 'squared_error' + ], + 'max_features': [ + None, + 'sqrt', + 'log2' + ], + 'init': [ + None, + 'zero', + lm.LinearRegression, + lm.TheilSenRegressor + ], + 'ccp_alpha': uniform(loc=0, scale=2) }, **kwargs ): @@ -1875,6 +1963,18 @@ def hist_gradient_boost_regressor( List[Union[int, str, float]] ] ] = { + 'loss': [ + 'squared_error', + 'absolute_error', + 'gamma', + 'poisson', + 'quantile' + ], + 'quantile': uniform(loc=0, scale=1), + 'learning_rate': uniform(loc=0, scale=2), + 'max_iter': [5, 10, 25, 50, 100, 200, 250, 500], + 'l2_regularization': uniform(loc=0, scale=2), + 'max_bins': [1, 3, 7, 15, 31, 63, 127, 255] }, **kwargs ): @@ -1921,6 +2021,48 @@ def mlp_regressor( List[Union[int, str, float]] ] ] = { + 'hidden_layer_sizes': [ + (100, ), + (100, 200), + (10, ), + (200, 400), + (100, 200, 300) + ], + 'activation': [ + 'identity', + 'logistic', + 'tanh', + 'relu' + ], + 'solver': [ + 'lbfgs', + 'sgd', + 'adam' + ], + 'alpha': uniform(loc=0, scale=0.1), + 'batch_size': [ + 'auto', + 20, + 200, + 500, + 1000, + 5000, + 10000 + ], + 'learning_rate': [ + 'constant', + 'invscaling', + 'adaptive' + ], + 'learning_rate_init': uniform(loc=0, scale=0.1), + 'power_t': uniform(loc=0.1, scale=0.9), + 'max_iter': [5, 10, 25, 50, 100, 200, 250, 500], + 'shuffle': [True, False], + 'momentum': uniform(loc=0.1, scale=0.9), + 'beta_1': uniform(loc=0.1, scale=0.9), + 'beta_2': uniform(loc=0.1, scale=0.9), + 'epsilon': uniform(loc=1E8, scale=1E6), + }, **kwargs ): @@ -1967,6 +2109,18 @@ def svr( List[Union[int, str, float]] ] ] = { + 'kernel': [ + 'linear', + 'poly', + 'rbf', + 'sigmoid', + ], + 'degree': [2, 3, 4], + 'gamma': ['scale', 'auto'], + 'coef0': uniform(loc=0, scale=1), + 'C': uniform(loc=0.1, scale=1.9), + 'epsilon': uniform(loc=1E8, scale=1), + 'shrinking': [True, False] }, **kwargs ): @@ -2013,6 +2167,9 @@ def linear_svr( List[Union[int, str, float]] ] ] = { + 'C': uniform(loc=0.1, scale=1.9), + 'epsilon': uniform(loc=1E8, scale=1), + 'loss': ['epsilon_insensitive', 'squared_epsilon_insensitive'] }, **kwargs ): @@ -2059,6 +2216,17 @@ def nu_svr( List[Union[int, str, float]] ] ] = { + 'kernel': [ + 'linear', + 'poly', + 'rbf', + 'sigmoid', + ], + 'degree': [2, 3, 4], + 'gamma': ['scale', 'auto'], + 'coef0': uniform(loc=0, scale=1), + 'shrinking': [True, False], + 'nu': uniform(loc=0, scale=1), }, **kwargs ): @@ -2105,6 +2273,17 @@ def gaussian_process( List[Union[int, str, float]] ] ] = { + 'kernel': [ + None, + kern.RBF, + kern.Matern, + kern.DotProduct, + kern.WhiteKernel, + kern.CompoundKernel, + kern.ExpSineSquared + ], + 'alpha': uniform(loc=0, scale=1E8), + 'normalize_y': [True, False] }, **kwargs ): @@ -2140,52 +2319,6 @@ def gaussian_process( random_search=random_search ) - def pls( - self, - name: str = "PLS Regression", - random_search: bool = False, - parameters: dict[ - str, - Union[ - scipy.stats.rv_continuous, - List[Union[int, str, float]] - ] - ] = { - }, - **kwargs - ): - """ - Fit x on y via pls regression - - Parameters - ---------- - name : str, default="Gaussian Process Regression" - Name of classification technique. - random_search : bool, default=False - Whether to perform RandomizedSearch to optimise parameters - parameters : dict[\ - str,\ - Union[\ - scipy.stats.rv_continuous,\ - List[Union[int, str, float]]\ - ]\ - ], default=Preset distributions - The parameters used in RandomizedSearchCV - """ - if random_search: - classifier = RandomizedSearchCV( - cd.PLSRegression(**kwargs), - parameters, - cv=self.folds - ) - else: - classifier = cd.PLSRegression(**kwargs) - self._sklearn_regression_meta( - classifier, - f'{name}{" (Random Search)" if random_search else ""}', - random_search=random_search - ) - def isotonic( self, name: str = "Isotonic Regression", @@ -2197,6 +2330,7 @@ def isotonic( List[Union[int, str, float]] ] ] = { + 'increasing': [True, False] }, **kwargs ): @@ -2244,6 +2378,18 @@ def xgboost( List[Union[int, str, float]] ] ] = { + 'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], + 'max_bins': [1, 3, 7, 15, 31, 63, 127, 255], + 'grow_policy': [ + 'depthwise', + 'lossguide' + ], + 'learning_rate': uniform(loc=0, scale=2), + 'tree_method': ['exact', 'approx', 'hist'], + 'gamma': uniform(loc=0, scale=1), + 'subsample': uniform(loc=0, scale=1), + 'reg_alpha': uniform(loc=0, scale=1), + 'reg_lambda': uniform(loc=0, scale=1) }, **kwargs ): @@ -2290,6 +2436,18 @@ def xgboost_rf( List[Union[int, str, float]] ] ] = { + 'n_estimators': [5, 10, 25, 50, 100, 200, 250, 500], + 'max_bin': [1, 3, 7, 15, 31, 63, 127, 255], + 'grow_policy': [ + 'depthwise', + 'lossguide' + ], + 'learning_rate': uniform(loc=0, scale=2), + 'tree_method': ['exact', 'approx', 'hist'], + 'gamma': uniform(loc=0, scale=1), + 'subsample': uniform(loc=0, scale=1), + 'reg_alpha': uniform(loc=0, scale=1), + 'reg_lambda': uniform(loc=0, scale=1) }, **kwargs ):