diff --git a/data/any_env.py b/data/any_env.py index 2074dac9..c54e16c5 100644 --- a/data/any_env.py +++ b/data/any_env.py @@ -12,10 +12,10 @@ def create_data(X=None): @staticmethod def create_data_popen(X=None): # Specify the python package dependencies. Will be installed in order of list - pyversion = "3.8" + pyversion = "3.11" _install_h2oaicore = False _install_datatable = True - _modules_needed_by_name = ["pandas==1.1.5"] + _modules_needed_by_name = ["pandas==1.5.3"] import os from h2oaicore.data import DataContribLoader diff --git a/data/any_env2.py b/data/any_env2.py index ce0781e2..fea01eee 100644 --- a/data/any_env2.py +++ b/data/any_env2.py @@ -4,7 +4,7 @@ import functools -def wrap_create(pyversion="3.8", install_h2oaicore=False, install_datatable=True, modules_needed_by_name=[], +def wrap_create(pyversion="3.11", install_h2oaicore=False, install_datatable=True, modules_needed_by_name=[], cache_env=False, file=None, id=None, **kwargs_wrapper): """ Decorate a function to create_data in popen in isolated env @@ -23,7 +23,7 @@ def wrapper(*args, **kwargs): return actual_decorator -def create_data_popen(func, *args, pyversion="3.8", install_h2oaicore=False, install_datatable=True, +def create_data_popen(func, *args, pyversion="3.11", install_h2oaicore=False, install_datatable=True, modules_needed_by_name=[], cache_env=False, file=None, id=None, X=None, **kwargs): """ Run recipe in popen in isolated env @@ -119,9 +119,9 @@ class FreshEnvData(CustomData): # NOTE: Keep @wrap_create on a single line # NOTE: If want to share cache across recipes, can set cache_env=True and set id= # Below caches the env into "id" folder - # @wrap_create(pyversion="3.8", install_h2oaicore=False, install_datatable=True, modules_needed_by_name=["pandas==1.1.5"], cache_env=True, file=__file__, id="myrecipe12345") + # @wrap_create(pyversion="3.11", install_h2oaicore=False, install_datatable=True, modules_needed_by_name=["pandas==1.5.3"], cache_env=True, file=__file__, id="myrecipe12345") # Below does not cache the env - @wrap_create(pyversion="3.8", install_h2oaicore=False, install_datatable=True, modules_needed_by_name=["pandas==1.1.5"], file=__file__) + @wrap_create(pyversion="3.11", install_h2oaicore=False, install_datatable=True, modules_needed_by_name=["pandas==1.5.3"], file=__file__) def create_data(X=None): import os import datatable as dt diff --git a/data/any_env3.py b/data/any_env3.py index 8f073b51..608703b7 100644 --- a/data/any_env3.py +++ b/data/any_env3.py @@ -10,9 +10,9 @@ class FreshEnvData(CustomData): # NOTE: Keep @wrap_create on a single line # NOTE: If want to share cache across recipes, can set cache_env=True and set id= # Below caches the env into "id" folder - # @wrap_create(pyversion="3.6", install_h2oaicore=False, install_datatable=True, modules_needed_by_name=["pandas==1.1.5"], cache_env=True, file=__file__, id="myrecipe12345") + # @wrap_create(pyversion="3.11", install_h2oaicore=False, install_datatable=True, modules_needed_by_name=["pandas==1.5.3"], cache_env=True, file=__file__, id="myrecipe12345") # Below does not cache the env - @wrap_create(pyversion="3.8", install_h2oaicore=False, install_datatable=True, modules_needed_by_name=["pandas==1.1.5"], file=__file__) + @wrap_create(pyversion="3.11", install_h2oaicore=False, install_datatable=True, modules_needed_by_name=["pandas==1.5.3"], file=__file__) def create_data(X=None): import os import datatable as dt diff --git a/data/any_env4.py b/data/any_env4.py index 0b0c0c81..ae612d1b 100644 --- a/data/any_env4.py +++ b/data/any_env4.py @@ -6,10 +6,10 @@ class FreshEnvData(CustomData): # Specify the python package dependencies. Will be installed in order of list # Below caches the env into "id" folder - # isolate_env = dict(pyversion="3.6", install_h2oaicore=False, install_datatable=True, modules_needed_by_name=["pandas==1.1.5"], cache_env=True, id="myrecipe12345") + # isolate_env = dict(pyversion="3.11", install_h2oaicore=False, install_datatable=True, modules_needed_by_name=["pandas==1.5.3"], cache_env=True, id="myrecipe12345") # Below does not cache the env - isolate_env = dict(pyversion="3.8", install_h2oaicore=False, install_datatable=True, - modules_needed_by_name=["pandas==1.1.5"]) + isolate_env = dict(pyversion="3.11", install_h2oaicore=False, install_datatable=True, + modules_needed_by_name=["pandas==1.5.3"]) @staticmethod def create_data(X=None): diff --git a/data/audio_to_image.py b/data/audio_to_image.py index 96a664d0..a01c6fa8 100644 --- a/data/audio_to_image.py +++ b/data/audio_to_image.py @@ -38,7 +38,7 @@ class AudioToMelSpectogram: For Ubuntu, required to do: sudo apt-get install libsndfile1 libsndfile1-dev For Centos, required to do: sudo yum install libsndfile libsndfile-dev """ - _modules_needed_by_name = ["librosa==0.8.1"] + _modules_needed_by_name = ["librosa==0.10.2.post1"] def __init__( self, min_seconds=2, sampling_rate=44100, n_mels=128, hop_length=345 * 2 diff --git a/data/nlp/text_summarization.py b/data/nlp/text_summarization.py index 9d10f3ff..4cc4f4a5 100644 --- a/data/nlp/text_summarization.py +++ b/data/nlp/text_summarization.py @@ -10,7 +10,7 @@ # output dataset name output_dataset_name = "data_with_summary" -_global_modules_needed_by_name = ["gensim==3.8.0"] +_global_modules_needed_by_name = ["gensim==4.3.2"] class TextSummarizationClass(CustomData): diff --git a/data/nlp/topic_modeling.py b/data/nlp/topic_modeling.py index 497ac262..61f4d985 100644 --- a/data/nlp/topic_modeling.py +++ b/data/nlp/topic_modeling.py @@ -20,7 +20,7 @@ # number of top words to be represented in the column name n_words_colname = 10 -_global_modules_needed_by_name = ["gensim==3.8.0"] +_global_modules_needed_by_name = ["gensim==4.3.2"] stop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've", "you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers', diff --git a/data/video_to_image.py b/data/video_to_image.py index 11d03d49..2fdf2f8c 100644 --- a/data/video_to_image.py +++ b/data/video_to_image.py @@ -35,7 +35,7 @@ class VideoToFrames: Additionally detects all faces for each frame. """ - _modules_needed_by_name = ["torchvision==0.4.1", "facenet-pytorch==2.2.9"] + _modules_needed_by_name = ["torchvision==0.18.0+rocm6.0", "facenet-pytorch==2.5.3"] def __init__(self, num_frames_per_video=3, face_additional_area=0.5): self.num_frames_per_video = num_frames_per_video diff --git a/data/wav2txt.py b/data/wav2txt.py index 4da251fc..b0a06420 100644 --- a/data/wav2txt.py +++ b/data/wav2txt.py @@ -29,7 +29,7 @@ class AzureWav2Txt(BaseData): """ """Specify the python package dependencies (will be installed via pip install mypackage==1.3.37)""" - _modules_needed_by_name = ["azure-cognitiveservices-speech==1.16.0"] + _modules_needed_by_name = ["azure-cognitiveservices-speech==1.37.0"] @staticmethod def create_data(X: dt.Frame = None) -> dt.Frame: diff --git a/explainers/explainers/morris_sensitivity_explainer.py b/explainers/explainers/morris_sensitivity_explainer.py index a9fe0a16..e53e0d16 100644 --- a/explainers/explainers/morris_sensitivity_explainer.py +++ b/explainers/explainers/morris_sensitivity_explainer.py @@ -48,7 +48,7 @@ class MorrisSensitivityLeExplainer(CustomExplainer, CustomDaiExplainer): # declaration of explanation types this explainer creates e.g. feature importance _explanation_types = [GlobalFeatImpExplanation] # Python package dependencies (can be installed using pip) - _modules_needed_by_name = ["interpret==0.3.2"] + _modules_needed_by_name = ["interpret==0.6.1"] # explainer constructor must not have any required parameters def __init__(self): diff --git a/models/algorithms/autogluon.py b/models/algorithms/autogluon.py index 77341316..58bab8c0 100644 --- a/models/algorithms/autogluon.py +++ b/models/algorithms/autogluon.py @@ -27,7 +27,7 @@ class AutoGluonModel(CustomModel): # autogluon depends upon slightly different package versions than DAI has, or could work with xgboost but needs official xgboost <1.5 # so use isolated env - isolate_env = dict(pyversion="3.8", install_h2oaicore=False, install_datatable=True, cache_env=True, + isolate_env = dict(pyversion="3.11", install_h2oaicore=False, install_datatable=True, cache_env=True, cache_by_full_module_name=False, install_pip="latest", modules_needed_by_name=['autogluon==0.3.1']) diff --git a/models/algorithms/calibratedClassifier.py b/models/algorithms/calibratedClassifier.py index f97c2111..0d2c9e67 100644 --- a/models/algorithms/calibratedClassifier.py +++ b/models/algorithms/calibratedClassifier.py @@ -34,7 +34,7 @@ class CalibratedClassifierModel: le = LabelEncoder() - _modules_needed_by_name = ['ml_insights==0.1.4'] # for SplineCalibration + _modules_needed_by_name = ['ml_insights==1.0.3'] # for SplineCalibration @staticmethod def is_enabled(): diff --git a/models/algorithms/catboost.py b/models/algorithms/catboost.py index 152aeaca..ce1844f0 100644 --- a/models/algorithms/catboost.py +++ b/models/algorithms/catboost.py @@ -87,7 +87,7 @@ def has_pred_contribs(self): def has_output_margin(self): return True - _modules_needed_by_name = ['catboost==1.0.5'] + _modules_needed_by_name = ['catboost==1.2.5'] def set_default_params(self, accuracy=10, time_tolerance=10, interpretability=1, diff --git a/models/algorithms/catboost_regression_uncertanity.py b/models/algorithms/catboost_regression_uncertanity.py index 485af05a..728aa7ef 100644 --- a/models/algorithms/catboost_regression_uncertanity.py +++ b/models/algorithms/catboost_regression_uncertanity.py @@ -46,7 +46,7 @@ class CatBoostRegressionUncertanityModel(CustomModel): _display_name = "CatBoostRegressionUncertanity" _description = "Yandex CatBoost GBM" - _modules_needed_by_name = ["catboost==1.0.4"] + _modules_needed_by_name = ["catboost==1.2.5"] @staticmethod def do_acceptance_test(): diff --git a/models/algorithms/h2o-3-gbm-poisson.py b/models/algorithms/h2o-3-gbm-poisson.py index ef475030..e1727f0c 100644 --- a/models/algorithms/h2o-3-gbm-poisson.py +++ b/models/algorithms/h2o-3-gbm-poisson.py @@ -11,7 +11,7 @@ import numpy as np -_global_modules_needed_by_name = ['h2o==3.34.0.7'] +_global_modules_needed_by_name = ['h2o==3.46.0.2'] import h2o import os diff --git a/models/algorithms/h2o-3-models.py b/models/algorithms/h2o-3-models.py index 15fcd3d2..a66539b2 100644 --- a/models/algorithms/h2o-3-models.py +++ b/models/algorithms/h2o-3-models.py @@ -12,7 +12,7 @@ import numpy as np import pandas as pd -_global_modules_needed_by_name = ['h2o==3.34.0.7'] +_global_modules_needed_by_name = ['h2o==3.46.0.2'] import h2o import os diff --git a/models/algorithms/h2o-glm-poisson.py b/models/algorithms/h2o-glm-poisson.py index bd3ccb57..5453f46c 100644 --- a/models/algorithms/h2o-glm-poisson.py +++ b/models/algorithms/h2o-glm-poisson.py @@ -9,7 +9,7 @@ from h2oaicore.systemutils import config, user_dir, remove, IgnoreEntirelyError import numpy as np -_global_modules_needed_by_name = ['h2o==3.34.0.7'] +_global_modules_needed_by_name = ['h2o==3.46.0.2'] import h2o import os diff --git a/models/mli/model_ebm.py b/models/mli/model_ebm.py index 18ec3f9e..4db7f41f 100644 --- a/models/mli/model_ebm.py +++ b/models/mli/model_ebm.py @@ -32,7 +32,7 @@ class EBMModel(CustomModel): "Unified Framework for Machine Learning Interpretability. " "URL https://arxiv.org/pdf/1909.09223.pdf" ) - _modules_needed_by_name = ["pillow==8.3.2", "interpret==0.1.20"] + _modules_needed_by_name = ["pillow==10.3.0", "interpret==0.6.1"] @staticmethod def do_acceptance_test(): diff --git a/models/mli/model_gam.py b/models/mli/model_gam.py index 98279878..6df78178 100755 --- a/models/mli/model_gam.py +++ b/models/mli/model_gam.py @@ -17,7 +17,7 @@ class GAM(CustomModel): _multiclass = False _display_name = "GAM" _description = "Generalized Additive Model" - _modules_needed_by_name = ['pygam==0.8.0'] + _modules_needed_by_name = ['pygam==0.9.1'] _testing_can_skip_failure = False # ensure tested as if shouldn't fail @staticmethod diff --git a/models/timeseries/autoarima_parallel.py b/models/timeseries/autoarima_parallel.py index 9ff86824..9a592452 100644 --- a/models/timeseries/autoarima_parallel.py +++ b/models/timeseries/autoarima_parallel.py @@ -79,24 +79,20 @@ def do_acceptance_test(): froms3 = True if froms3: _root_path = "https://s3.amazonaws.com/artifacts.h2o.ai/deps/dai/recipes" - _suffix = "-cp38-cp38-linux_x86_64.whl" + _suffix = "-cp311-cp311-linux_x86_64.whl" _modules_needed_by_name = [ '%s/setuptools_git-1.2%s' % (_root_path, _suffix), '%s/LunarCalendar-0.0.9%s' % (_root_path, _suffix), - '%s/ephem-3.7.7.1%s' % (_root_path, _suffix), - '%s/cmdstanpy-0.9.5%s' % (_root_path, _suffix), - '%s/pystan-2.19.1.1%s' % (_root_path, _suffix), - '%s/httpstan-4.5.0%s' % (_root_path, _suffix), - '%s/fbprophet-0.7.1%s' % (_root_path, _suffix), - "statsforecast==0.6.0", - "prophet==1.1", + '%s/ephem-4.1.5%s' % (_root_path, _suffix), + '%s/cmdstanpy-1.2.2%s' % (_root_path, _suffix), + '%s/pystan-3.9.1%s' % (_root_path, _suffix), + '%s/httpstan-4.12.0%s' % (_root_path, _suffix), + '%s/prophet-1.1.5%s' % (_root_path, _suffix), + "statsforecast==1.7.4", ] else: - _modules_needed_by_name = ['holidays==0.11.1', 'convertdate', 'lunarcalendar', 'pystan==2.19.1.1', - 'fbprophet==0.7.1', - "statsforecast==0.6.0", - "prophet==1.1", - ] + _modules_needed_by_name = ['holidays==0.47', 'convertdate', 'lunarcalendar', 'pystan==3.9.1', + 'prophet==1.1.5', 'statsforecast==1.7.4'] def set_default_params( self, accuracy=None, time_tolerance=None, interpretability=None, **kwargs diff --git a/models/timeseries/fb_prophet.py b/models/timeseries/fb_prophet.py index c3d7820a..55e4689f 100644 --- a/models/timeseries/fb_prophet.py +++ b/models/timeseries/fb_prophet.py @@ -53,19 +53,19 @@ def do_acceptance_test(): froms3 = True if froms3: _root_path = "https://s3.amazonaws.com/artifacts.h2o.ai/deps/dai/recipes" - _suffix = "-cp38-cp38-linux_x86_64.whl" + _suffix = "-cp311-cp311-linux_x86_64.whl" _modules_needed_by_name = [ '%s/setuptools_git-1.2%s' % (_root_path, _suffix), '%s/LunarCalendar-0.0.9%s' % (_root_path, _suffix), - '%s/ephem-3.7.7.1%s' % (_root_path, _suffix), - '%s/cmdstanpy-0.9.5%s' % (_root_path, _suffix), - '%s/pystan-2.19.1.1%s' % (_root_path, _suffix), - '%s/httpstan-4.5.0%s' % (_root_path, _suffix), - '%s/fbprophet-0.7.1%s' % (_root_path, _suffix), + '%s/ephem-4.1.5%s' % (_root_path, _suffix), + '%s/cmdstanpy-1.2.2%s' % (_root_path, _suffix), + '%s/pystan-3.9.1%s' % (_root_path, _suffix), + '%s/httpstan-4.12.0%s' % (_root_path, _suffix), + '%s/prophet-1.1.5%s' % (_root_path, _suffix), ] else: - _modules_needed_by_name = ['holidays==0.11.1', 'convertdate', 'lunarcalendar', 'pystan==2.19.1.1', - 'fbprophet==0.7.1'] + _modules_needed_by_name = ['holidays==0.47', 'convertdate', 'lunarcalendar', 'pystan==3.9.1', + 'prophet==1.1.5'] def set_default_params(self, accuracy=None, time_tolerance=None, interpretability=None, @@ -192,7 +192,7 @@ def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=No priors = {} models = {} - mod = importlib.import_module('fbprophet') + mod = importlib.import_module('prophet') Prophet = getattr(mod, "Prophet") # Fit 1 FB Prophet model per time group columns diff --git a/models/timeseries/fb_prophet_parallel.py b/models/timeseries/fb_prophet_parallel.py index 0b5cbe2c..c826c05e 100644 --- a/models/timeseries/fb_prophet_parallel.py +++ b/models/timeseries/fb_prophet_parallel.py @@ -67,19 +67,19 @@ def do_acceptance_test(): froms3 = True if froms3: _root_path = "https://s3.amazonaws.com/artifacts.h2o.ai/deps/dai/recipes" - _suffix = "-cp38-cp38-linux_x86_64.whl" + _suffix = "-cp311-cp311-linux_x86_64.whl" _modules_needed_by_name = [ '%s/setuptools_git-1.2%s' % (_root_path, _suffix), '%s/LunarCalendar-0.0.9%s' % (_root_path, _suffix), - '%s/ephem-3.7.7.1%s' % (_root_path, _suffix), - '%s/cmdstanpy-0.9.5%s' % (_root_path, _suffix), - '%s/pystan-2.19.1.1%s' % (_root_path, _suffix), - '%s/httpstan-4.5.0%s' % (_root_path, _suffix), - '%s/fbprophet-0.7.1%s' % (_root_path, _suffix), + '%s/ephem-4.1.5%s' % (_root_path, _suffix), + '%s/cmdstanpy-1.2.2%s' % (_root_path, _suffix), + '%s/pystan-3.9.1%s' % (_root_path, _suffix), + '%s/httpstan-4.12.0%s' % (_root_path, _suffix), + '%s/prophet-1.1.5%s' % (_root_path, _suffix), ] else: - _modules_needed_by_name = ['holidays==0.11.1', 'convertdate', 'lunarcalendar', 'pystan==2.19.1.1', - 'fbprophet==0.7.1'] + _modules_needed_by_name = ['holidays==0.47', 'convertdate', 'lunarcalendar', 'pystan==3.9.1', + 'prophet==1.1.5'] def set_default_params(self, accuracy=None, time_tolerance=None, interpretability=None, @@ -178,7 +178,7 @@ def _fit_async(X_path, grp_hash, tmp_folder): # print("prophet - small data work-around for group: %s" % grp_hash) return grp_hash, None # Import FB Prophet package - mod = importlib.import_module('fbprophet') + mod = importlib.import_module('prophet') Prophet = getattr(mod, "Prophet") nrows = X[['ds', 'y']].shape[0] n_changepoints = max(1, int(nrows * (2 / 3))) @@ -264,7 +264,7 @@ def _fit_async(X_path, grp_hash, tmp_folder, params, cap): return grp_hash, None # Import FB Prophet package - mod = importlib.import_module('fbprophet') + mod = importlib.import_module('prophet') Prophet = getattr(mod, "Prophet") # Fit current model and prior @@ -398,7 +398,7 @@ def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=No X_avg = X[['ds', 'y']].groupby('ds').mean().reset_index() # Send that to Prophet - mod = importlib.import_module('fbprophet') + mod = importlib.import_module('prophet') Prophet = getattr(mod, "Prophet") nrows = X[['ds', 'y']].shape[0] n_changepoints = max(1, int(nrows * (2 / 3))) diff --git a/models/timeseries/nixtla_arimax.py b/models/timeseries/nixtla_arimax.py index b8dd2e62..81d302d9 100644 --- a/models/timeseries/nixtla_arimax.py +++ b/models/timeseries/nixtla_arimax.py @@ -74,7 +74,7 @@ class AutoARIMAParallelModel(CustomTimeSeriesModel): _parallel_task = True _testing_can_skip_failure = False # ensure tested as if shouldn't fail - _modules_needed_by_name = ['statsforecast==1.5.0'] + _modules_needed_by_name = ['statsforecast==1.7.4'] @staticmethod def is_enabled(): diff --git a/models/timeseries/nixtla_ces.py b/models/timeseries/nixtla_ces.py index c2f3201c..81780acb 100644 --- a/models/timeseries/nixtla_ces.py +++ b/models/timeseries/nixtla_ces.py @@ -74,7 +74,7 @@ class AutoCESParallelModel(CustomTimeSeriesModel): _parallel_task = True _testing_can_skip_failure = False # ensure tested as if shouldn't fail - _modules_needed_by_name = ['statsforecast==1.5.0'] + _modules_needed_by_name = ['statsforecast==1.7.4'] @staticmethod def is_enabled(): diff --git a/models/timeseries/nixtla_ets.py b/models/timeseries/nixtla_ets.py index 0f8afe29..597a98be 100644 --- a/models/timeseries/nixtla_ets.py +++ b/models/timeseries/nixtla_ets.py @@ -74,7 +74,7 @@ class AutoETSParallelModel(CustomTimeSeriesModel): _parallel_task = True _testing_can_skip_failure = False # ensure tested as if shouldn't fail - _modules_needed_by_name = ['statsforecast==1.5.0'] + _modules_needed_by_name = ['statsforecast==1.7.4'] @staticmethod def is_enabled(): diff --git a/models/timeseries/nixtla_theta.py b/models/timeseries/nixtla_theta.py index acee7058..ea9bf34d 100644 --- a/models/timeseries/nixtla_theta.py +++ b/models/timeseries/nixtla_theta.py @@ -74,7 +74,7 @@ class AutoThetaParallelModel(CustomTimeSeriesModel): _parallel_task = True _testing_can_skip_failure = False # ensure tested as if shouldn't fail - _modules_needed_by_name = ['statsforecast==1.5.0'] + _modules_needed_by_name = ['statsforecast==1.7.4'] @staticmethod def is_enabled(): diff --git a/models/unsupervised/TextKMeansIsolationForest.py b/models/unsupervised/TextKMeansIsolationForest.py index 53074b4a..7b4c13ec 100644 --- a/models/unsupervised/TextKMeansIsolationForest.py +++ b/models/unsupervised/TextKMeansIsolationForest.py @@ -214,7 +214,7 @@ class TextLDATopicUnsupervisedTransformer(CustomUnsupervisedTransformer): """Transformer to extract topics from text column using LDA""" _is_reproducible = False _testing_can_skip_failure = False # ensure tested as if shouldn't fail - _modules_needed_by_name = ["gensim==3.8.0"] + _modules_needed_by_name = ["gensim==4.3.2"] def __init__(self, n_topics, **kwargs): super().__init__(**kwargs) diff --git a/models/unsupervised/lda.py b/models/unsupervised/lda.py index e7387c8c..8d5b7f97 100644 --- a/models/unsupervised/lda.py +++ b/models/unsupervised/lda.py @@ -11,7 +11,7 @@ class TextLDATopicTransformer(CustomUnsupervisedTransformer): """Transformer to extract topics from text column using LDA""" _is_reproducible = False _testing_can_skip_failure = False # ensure tested as if shouldn't fail - _modules_needed_by_name = ["gensim==3.8.0"] + _modules_needed_by_name = ["gensim==4.3.2"] def __init__(self, n_topics, **kwargs): super().__init__(**kwargs) diff --git a/transformers/anomaly /isolation_forest.py b/transformers/anomaly /isolation_forest.py index 24336d86..b1f57c78 100644 --- a/transformers/anomaly /isolation_forest.py +++ b/transformers/anomaly /isolation_forest.py @@ -15,7 +15,7 @@ from h2oaicore.systemutils import config, user_dir, remove, IgnoreEntirelyError, print_debug from h2oaicore.transformer_utils import CustomTransformer -_global_modules_needed_by_name = ['h2o==3.34.0.7'] +_global_modules_needed_by_name = ['h2o==3.46.0.2'] import h2o from h2o import H2OFrame from h2o.estimators import H2OEstimator diff --git a/transformers/augmentation/uszipcode_features_database.py b/transformers/augmentation/uszipcode_features_database.py index a82018dc..51b5b86f 100644 --- a/transformers/augmentation/uszipcode_features_database.py +++ b/transformers/augmentation/uszipcode_features_database.py @@ -4,7 +4,7 @@ import datatable as dt import numpy as np -_global_modules_needed_by_name = ['pycodestyle==2.7.0', 'uszipcode==0.2.6'] +_global_modules_needed_by_name = ['pycodestyle==2.11.1', 'uszipcode==1.0.1'] from uszipcode import SearchEngine diff --git a/transformers/augmentation/uszipcode_features_light.py b/transformers/augmentation/uszipcode_features_light.py index 17a5b42d..ec1ccd28 100644 --- a/transformers/augmentation/uszipcode_features_light.py +++ b/transformers/augmentation/uszipcode_features_light.py @@ -4,7 +4,7 @@ import numpy as np from abc import ABC, abstractmethod -_global_modules_needed_by_name = ['zipcodes==1.0.5'] +_global_modules_needed_by_name = ['zipcodes==1.2.0'] import zipcodes diff --git a/transformers/datetime/days_until_dec2020.py b/transformers/datetime/days_until_dec2020.py index b81dde61..105c4cf4 100644 --- a/transformers/datetime/days_until_dec2020.py +++ b/transformers/datetime/days_until_dec2020.py @@ -16,7 +16,7 @@ import pandas as pd import dateparser -_global_modules_needed_by_name = ['regex==2021.4.4', 'tzlocal==3.0.0', 'dateparser==0.7.1'] +_global_modules_needed_by_name = ['regex==2024.5.15', 'tzlocal==5.2', 'dateparser==1.2.0'] def convert_to_age(ts): diff --git a/transformers/executables/pe_data_directory_features.py b/transformers/executables/pe_data_directory_features.py index 4131e5d4..05b76abc 100644 --- a/transformers/executables/pe_data_directory_features.py +++ b/transformers/executables/pe_data_directory_features.py @@ -7,7 +7,7 @@ class PEDataDirectoryFeatures(CustomTransformer): _unsupervised = True - _modules_needed_by_name = ['lief==0.11.4'] + _modules_needed_by_name = ['lief==0.14.1'] _regression = True _binary = True _multiclass = True diff --git a/transformers/executables/pe_exports_features.py b/transformers/executables/pe_exports_features.py index 3ab7644e..5c27d973 100644 --- a/transformers/executables/pe_exports_features.py +++ b/transformers/executables/pe_exports_features.py @@ -7,7 +7,7 @@ class PEExportsFeatures(CustomTransformer): _unsupervised = True - _modules_needed_by_name = ['lief==0.11.4'] + _modules_needed_by_name = ['lief==0.14.1'] _regression = True _binary = True _multiclass = True diff --git a/transformers/executables/pe_general_features.py b/transformers/executables/pe_general_features.py index d47442a9..7a9075a2 100644 --- a/transformers/executables/pe_general_features.py +++ b/transformers/executables/pe_general_features.py @@ -7,7 +7,7 @@ class PEGeneralFeatures(CustomTransformer): _unsupervised = True - _modules_needed_by_name = ['lief==0.11.4'] + _modules_needed_by_name = ['lief==0.14.1'] _regression = True _binary = True _multiclass = True diff --git a/transformers/executables/pe_header_features.py b/transformers/executables/pe_header_features.py index 9f36b165..8b570238 100644 --- a/transformers/executables/pe_header_features.py +++ b/transformers/executables/pe_header_features.py @@ -7,7 +7,7 @@ class PEHeaderFeatures(CustomTransformer): _unsupervised = True - _modules_needed_by_name = ['lief==0.11.4'] + _modules_needed_by_name = ['lief==0.14.1'] _regression = True _binary = True _multiclass = True diff --git a/transformers/executables/pe_imports_features.py b/transformers/executables/pe_imports_features.py index 433fc46f..9901a040 100644 --- a/transformers/executables/pe_imports_features.py +++ b/transformers/executables/pe_imports_features.py @@ -7,7 +7,7 @@ class PEImportsFeatures(CustomTransformer): _unsupervised = True - _modules_needed_by_name = ['lief==0.11.4'] + _modules_needed_by_name = ['lief==0.14.1'] _regression = True _binary = True _multiclass = True diff --git a/transformers/executables/pe_section_characteristics.py b/transformers/executables/pe_section_characteristics.py index fb0f45d1..25b65e6e 100644 --- a/transformers/executables/pe_section_characteristics.py +++ b/transformers/executables/pe_section_characteristics.py @@ -7,7 +7,7 @@ class PESectionCharacteristics(CustomTransformer): _unsupervised = True - _modules_needed_by_name = ['lief==0.11.4'] + _modules_needed_by_name = ['lief==0.14.1'] _regression = True _binary = True _multiclass = True diff --git a/transformers/image/image_ocr_transformer.py b/transformers/image/image_ocr_transformer.py index 676ed502..a55aa763 100644 --- a/transformers/image/image_ocr_transformer.py +++ b/transformers/image/image_ocr_transformer.py @@ -7,7 +7,7 @@ class ImageOCRTextTransformer(CustomTransformer): _unsupervised = True - _modules_needed_by_name = ['pillow==8.3.2', "pytesseract==0.3.0"] + _modules_needed_by_name = ['pillow==10.3.0', "pytesseract==0.3.10"] _parallel_task = True # if enabled, params_base['n_jobs'] will be >= 1 (adaptive to system), otherwise 1 _can_use_gpu = True # if enabled, will use special job scheduler for GPUs _can_use_multi_gpu = True # if enabled, can get access to multiple GPUs for single transformer (experimental) diff --git a/transformers/image/image_url_transformer.py b/transformers/image/image_url_transformer.py index 999033eb..de9148ef 100644 --- a/transformers/image/image_url_transformer.py +++ b/transformers/image/image_url_transformer.py @@ -16,7 +16,7 @@ class MyImgTransformer(TensorFlowModel, CustomTransformer): # Need Pillow before nlp imports keras, else when here too late. # I.e. wasn't enough to put keras imports inside fit/transform to delay after Pillow installed - _modules_needed_by_name = ['pillow==8.3.2'] + _modules_needed_by_name = ['pillow==10.3.0'] _tensorflow = True _mojo = False _parallel_task = True # assumes will use n_jobs in params_base diff --git a/transformers/nlp/text_embedding_similarity_transformers.py b/transformers/nlp/text_embedding_similarity_transformers.py index 4f761213..e2e16ea1 100644 --- a/transformers/nlp/text_embedding_similarity_transformers.py +++ b/transformers/nlp/text_embedding_similarity_transformers.py @@ -9,7 +9,7 @@ class EmbeddingSimilarityTransformer(CustomTransformer): _unsupervised = True - _modules_needed_by_name = ["gensim==3.8.0", 'regex==2021.4.4', 'flair==0.4.1', 'segtok==1.5.7'] + _modules_needed_by_name = ["gensim==4.3.2", 'regex==2024.5.15', 'flair==0.12.2', 'segtok==1.5.11'] _is_reproducible = False _can_use_gpu = True _repl_val = 0 diff --git a/transformers/nlp/text_named_entities_transformer.py b/transformers/nlp/text_named_entities_transformer.py index 8e23f34c..998c4a8f 100644 --- a/transformers/nlp/text_named_entities_transformer.py +++ b/transformers/nlp/text_named_entities_transformer.py @@ -12,10 +12,11 @@ class TextNamedEntityTransformer(CustomTransformer): """Transformer to extract the count of Named Entities""" _testing_can_skip_failure = False # ensure tested as if shouldn't fail _root_path = "https://s3.amazonaws.com/artifacts.h2o.ai/deps/dai/recipes" - _suffix = "-cp38-cp38-linux_x86_64.whl" - froms3 = True + _suffix = "-cp311-cp311-linux_x86_64.whl" + froms3 = False _is_reproducible = False # some issue with deepcopy and refit, do not get same result if froms3: + # TODO: upload the wheel files to S3 _modules_needed_by_name = [ '%s/blis-0.4.1%s' % (_root_path, _suffix), '%s/catalogue-1.0.0%s' % (_root_path, _suffix), @@ -30,7 +31,7 @@ class TextNamedEntityTransformer(CustomTransformer): '%s/wasabi-0.8.2%s' % (_root_path, _suffix), ] else: - _modules_needed_by_name = ["spacy==2.2.3", + _modules_needed_by_name = ["spacy==3.7.4", "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.2.5/en_core_web_sm-2.2.5.tar.gz#egg=en_core_web_sm==2.2.5"] def __init__(self, **kwargs): diff --git a/transformers/nlp/text_pos_tagging_transformer.py b/transformers/nlp/text_pos_tagging_transformer.py index e49a014c..785cd450 100644 --- a/transformers/nlp/text_pos_tagging_transformer.py +++ b/transformers/nlp/text_pos_tagging_transformer.py @@ -12,7 +12,7 @@ class POSTagTransformer: """Transformer to extract the count of POS tags""" _method = NotImplemented - _modules_needed_by_name = ["nltk==3.4.3"] + _modules_needed_by_name = ["nltk==3.8.1"] _testing_can_skip_failure = False # ensure tested as if shouldn't fail def set_tagger(self): diff --git a/transformers/nlp/text_preprocessing_transformer.py b/transformers/nlp/text_preprocessing_transformer.py index 813cf469..15bdaf36 100644 --- a/transformers/nlp/text_preprocessing_transformer.py +++ b/transformers/nlp/text_preprocessing_transformer.py @@ -17,7 +17,7 @@ class TextPreprocessingTransformer(CustomTransformer): """Transformer to preprocess the text""" _numeric_output = False _is_reproducible = True - _modules_needed_by_name = ["nltk==3.4.3"] + _modules_needed_by_name = ["nltk==3.8.1"] _testing_can_skip_failure = False # ensure tested as if shouldn't fail def __init__(self, **kwargs): diff --git a/transformers/nlp/text_sentiment_transformer.py b/transformers/nlp/text_sentiment_transformer.py index be681c82..be946281 100644 --- a/transformers/nlp/text_sentiment_transformer.py +++ b/transformers/nlp/text_sentiment_transformer.py @@ -9,7 +9,7 @@ class TextSentimentTransformer(CustomTransformer): _unsupervised = True _testing_can_skip_failure = False # ensure tested as if shouldn't fail - _modules_needed_by_name = ['nltk==3.4.3', 'textblob'] + _modules_needed_by_name = ['nltk==3.8.1', 'textblob'] @staticmethod def get_default_properties(): diff --git a/transformers/nlp/text_similarity_transformers.py b/transformers/nlp/text_similarity_transformers.py index 55f7a7b2..91a2a181 100644 --- a/transformers/nlp/text_similarity_transformers.py +++ b/transformers/nlp/text_similarity_transformers.py @@ -3,7 +3,7 @@ import datatable as dt import numpy as np -_global_modules_needed_by_name = ['nltk==3.4.3'] +_global_modules_needed_by_name = ['nltk==3.8.1'] import nltk @@ -130,7 +130,7 @@ def transform(self, X: dt.Frame): class EditDistanceTransformer(CustomTransformer): _unsupervised = True - _modules_needed_by_name = ['editdistance==0.5.3'] + _modules_needed_by_name = ['editdistance==0.8.1'] @staticmethod def get_default_properties(): diff --git a/transformers/nlp/text_topic_modeling_transformer.py b/transformers/nlp/text_topic_modeling_transformer.py index 4a2ce07f..0302ff6a 100644 --- a/transformers/nlp/text_topic_modeling_transformer.py +++ b/transformers/nlp/text_topic_modeling_transformer.py @@ -11,7 +11,7 @@ class TextLDATopicTransformer(CustomTransformer): """Transformer to extract topics from text column using LDA""" _is_reproducible = False _testing_can_skip_failure = False # ensure tested as if shouldn't fail - _modules_needed_by_name = ["gensim==3.8.0"] + _modules_needed_by_name = ["gensim==4.3.2"] def __init__(self, n_topics, **kwargs): super().__init__(**kwargs) diff --git a/transformers/nlp/text_url_summary_transformer.py b/transformers/nlp/text_url_summary_transformer.py index f46db057..aeb76fb0 100644 --- a/transformers/nlp/text_url_summary_transformer.py +++ b/transformers/nlp/text_url_summary_transformer.py @@ -9,7 +9,7 @@ class TextURLSummaryTransformer(CustomTransformer): _numeric_output = False _testing_can_skip_failure = False # ensure tested as if shouldn't fail - _modules_needed_by_name = ["gensim==3.8.0", "beautifulsoup4==4.9.2"] + _modules_needed_by_name = ["gensim==4.3.2", "beautifulsoup4==4.12.3"] _display_name = 'TextURLSummaryTransformer' @staticmethod diff --git a/transformers/speech/audio_MFCC_transformer.py b/transformers/speech/audio_MFCC_transformer.py index 0baadf68..25c9fe5c 100644 --- a/transformers/speech/audio_MFCC_transformer.py +++ b/transformers/speech/audio_MFCC_transformer.py @@ -7,7 +7,7 @@ class AudioMFCCTransformer(CustomTransformer): _unsupervised = True - _modules_needed_by_name = ['librosa==0.8.1'] + _modules_needed_by_name = ['librosa==0.10.2.post1'] _parallel_task = True # if enabled, params_base['n_jobs'] will be >= 1 (adaptive to system), otherwise 1 _can_use_gpu = True # if enabled, will use special job scheduler for GPUs _can_use_multi_gpu = True # if enabled, can get access to multiple GPUs for single transformer (experimental) diff --git a/transformers/speech/azure_speech_to_text.py b/transformers/speech/azure_speech_to_text.py index b402a959..0773935f 100644 --- a/transformers/speech/azure_speech_to_text.py +++ b/transformers/speech/azure_speech_to_text.py @@ -31,7 +31,7 @@ class AzureSpeechToText(CustomTransformer): _numeric_output = False _display_name = 'AzureSpeechToTextTransformer' - _modules_needed_by_name = ["azure-cognitiveservices-speech==1.16.0"] + _modules_needed_by_name = ["azure-cognitiveservices-speech==1.37.0"] @staticmethod def get_default_properties(): diff --git a/transformers/survival/h2o-3-coxph-pretransformer.py b/transformers/survival/h2o-3-coxph-pretransformer.py index 4e118e68..2d41a04e 100644 --- a/transformers/survival/h2o-3-coxph-pretransformer.py +++ b/transformers/survival/h2o-3-coxph-pretransformer.py @@ -10,7 +10,7 @@ import os import uuid -_global_modules_needed_by_name = ['h2o==3.34.0.7'] +_global_modules_needed_by_name = ['h2o==3.46.0.2'] import h2o from h2oaicore.systemutils import temporary_files_path, config, remove from h2o.estimators.coxph import H2OCoxProportionalHazardsEstimator diff --git a/transformers/timeseries/auto_arima_forecast.py b/transformers/timeseries/auto_arima_forecast.py index 1fa59be0..1d3ff2fd 100644 --- a/transformers/timeseries/auto_arima_forecast.py +++ b/transformers/timeseries/auto_arima_forecast.py @@ -16,7 +16,7 @@ class MyAutoArimaTransformer(CustomTimeSeriesTransformer): _binary = False _multiclass = False - _modules_needed_by_name = ['pmdarima==1.8.3'] + _modules_needed_by_name = ['pmdarima==2.0.4'] _included_model_classes = None _testing_can_skip_failure = False # ensure tested as if shouldn't fail _lag_recipe_allowed = True diff --git a/transformers/timeseries/parallel_auto_arima_forecast.py b/transformers/timeseries/parallel_auto_arima_forecast.py index 0b832bd9..0357cc04 100644 --- a/transformers/timeseries/parallel_auto_arima_forecast.py +++ b/transformers/timeseries/parallel_auto_arima_forecast.py @@ -52,7 +52,7 @@ class MyParallelAutoArimaTransformer(CustomTimeSeriesTransformer): """Implementation of the ARIMA transformer using a pool of processes to fit models in parallel""" _binary = False _multiclass = False - _modules_needed_by_name = ['pmdarima==1.8.3'] + _modules_needed_by_name = ['pmdarima==2.0.4'] _included_model_classes = None _testing_can_skip_failure = False # ensure tested as if shouldn't fail _lag_recipe_allowed = True diff --git a/transformers/timeseries/parallel_prophet_forecast.py b/transformers/timeseries/parallel_prophet_forecast.py index eabe5292..cd341684 100644 --- a/transformers/timeseries/parallel_prophet_forecast.py +++ b/transformers/timeseries/parallel_prophet_forecast.py @@ -80,19 +80,19 @@ class MyParallelProphetTransformer(CustomTimeSeriesTransformer): froms3 = True if froms3: _root_path = "https://s3.amazonaws.com/artifacts.h2o.ai/deps/dai/recipes" - _suffix = "-cp38-cp38-linux_x86_64.whl" + _suffix = "-cp311-cp311-linux_x86_64.whl" _modules_needed_by_name = [ '%s/setuptools_git-1.2%s' % (_root_path, _suffix), '%s/LunarCalendar-0.0.9%s' % (_root_path, _suffix), - '%s/ephem-3.7.7.1%s' % (_root_path, _suffix), - '%s/cmdstanpy-0.9.5%s' % (_root_path, _suffix), - '%s/pystan-2.19.1.1%s' % (_root_path, _suffix), - '%s/httpstan-4.5.0%s' % (_root_path, _suffix), - '%s/fbprophet-0.7.1%s' % (_root_path, _suffix), + '%s/ephem-4.1.5%s' % (_root_path, _suffix), + '%s/cmdstanpy-1.2.2%s' % (_root_path, _suffix), + '%s/pystan-3.9.1%s' % (_root_path, _suffix), + '%s/httpstan-4.12.0%s' % (_root_path, _suffix), + '%s/prophet-1.1.5%s' % (_root_path, _suffix), ] else: - _modules_needed_by_name = ['holidays==0.11.1', 'convertdate', 'lunarcalendar', 'pystan==2.19.1.1', - 'fbprophet==0.7.1'] + _modules_needed_by_name = ['holidays==0.47', 'convertdate', 'lunarcalendar', 'pystan==3.9.1', + 'prophet==1.1.5'] _included_model_classes = None # ["gblinear"] for strong trends - can extrapolate _testing_can_skip_failure = False # ensure tested as if shouldn't fail @@ -151,7 +151,7 @@ def _fit_async(X_path, grp_hash, tmp_folder, params): # print("prophet - small data work-around for group: %s" % grp_hash) return grp_hash, None # Import FB Prophet package - mod = importlib.import_module('fbprophet') + mod = importlib.import_module('prophet') Prophet = getattr(mod, "Prophet") model = Prophet(yearly_seasonality=True, weekly_seasonality=True, daily_seasonality=True) @@ -311,7 +311,7 @@ def fit(self, X: dt.Frame, y: np.array = None, **kwargs): "country_holidays": self.country_holidays, "monthly_seasonality": self.monthly_seasonality } - mod = importlib.import_module('fbprophet') + mod = importlib.import_module('prophet') Prophet = getattr(mod, "Prophet") self.model = Prophet(yearly_seasonality=True, weekly_seasonality=True, daily_seasonality=True) diff --git a/transformers/timeseries/parallel_prophet_forecast_using_individual_groups.py b/transformers/timeseries/parallel_prophet_forecast_using_individual_groups.py index 6586958a..5da3c51d 100644 --- a/transformers/timeseries/parallel_prophet_forecast_using_individual_groups.py +++ b/transformers/timeseries/parallel_prophet_forecast_using_individual_groups.py @@ -102,19 +102,19 @@ class MyProphetOnSingleGroupsTransformer(CustomTimeSeriesTransformer): froms3 = True if froms3: _root_path = "https://s3.amazonaws.com/artifacts.h2o.ai/deps/dai/recipes" - _suffix = "-cp38-cp38-linux_x86_64.whl" + _suffix = "-cp311-cp311-linux_x86_64.whl" _modules_needed_by_name = [ '%s/setuptools_git-1.2%s' % (_root_path, _suffix), '%s/LunarCalendar-0.0.9%s' % (_root_path, _suffix), - '%s/ephem-3.7.7.1%s' % (_root_path, _suffix), - '%s/cmdstanpy-0.9.5%s' % (_root_path, _suffix), - '%s/pystan-2.19.1.1%s' % (_root_path, _suffix), - '%s/httpstan-4.5.0%s' % (_root_path, _suffix), - '%s/fbprophet-0.7.1%s' % (_root_path, _suffix), + '%s/ephem-4.1.5%s' % (_root_path, _suffix), + '%s/cmdstanpy-1.2.2%s' % (_root_path, _suffix), + '%s/pystan-3.9.1%s' % (_root_path, _suffix), + '%s/httpstan-4.12.0%s' % (_root_path, _suffix), + '%s/prophet-1.1.5%s' % (_root_path, _suffix), ] else: - _modules_needed_by_name = ['holidays==0.11.1', 'convertdate', 'lunarcalendar', 'pystan==2.19.1.1', - 'fbprophet==0.7.1'] + _modules_needed_by_name = ['holidays==0.47', 'convertdate', 'lunarcalendar', 'pystan==3.9.1', + 'prophet==1.1.5'] _included_model_classes = None # ["gblinear"] for strong trends - can extrapolate _testing_can_skip_failure = False # ensure tested as if shouldn't fail @@ -172,7 +172,7 @@ def _fit_async(data_path, grp_hash, tmp_folder, params): # if X.shape[0] < 20: # return grp_hash, None # Import FB Prophet package - mod = importlib.import_module('fbprophet') + mod = importlib.import_module('prophet') Prophet = getattr(mod, "Prophet") model = fit_prophet_model(Prophet, X, params) model_path = os.path.join(tmp_folder, "fbprophet_model" + str(uuid.uuid4())) @@ -334,7 +334,7 @@ def fit_prophet_model_on_average_target(self, X): "country_holidays": self.country_holidays, "monthly_seasonality": self.monthly_seasonality } - mod = importlib.import_module('fbprophet') + mod = importlib.import_module('prophet') Prophet = getattr(mod, "Prophet") avg_model = fit_prophet_model(Prophet, X_avg, params, force=True) diff --git a/transformers/timeseries/serial_prophet_forecast.py b/transformers/timeseries/serial_prophet_forecast.py index dbcd95de..7b254eff 100644 --- a/transformers/timeseries/serial_prophet_forecast.py +++ b/transformers/timeseries/serial_prophet_forecast.py @@ -36,19 +36,19 @@ class MySerialProphetTransformer(CustomTimeSeriesTransformer): froms3 = True if froms3: _root_path = "https://s3.amazonaws.com/artifacts.h2o.ai/deps/dai/recipes" - _suffix = "-cp38-cp38-linux_x86_64.whl" + _suffix = "-cp311-cp311-linux_x86_64.whl" _modules_needed_by_name = [ '%s/setuptools_git-1.2%s' % (_root_path, _suffix), '%s/LunarCalendar-0.0.9%s' % (_root_path, _suffix), - '%s/ephem-3.7.7.1%s' % (_root_path, _suffix), - '%s/cmdstanpy-0.9.5%s' % (_root_path, _suffix), - '%s/pystan-2.19.1.1%s' % (_root_path, _suffix), - '%s/httpstan-4.5.0%s' % (_root_path, _suffix), - '%s/fbprophet-0.7.1%s' % (_root_path, _suffix), + '%s/ephem-4.1.5%s' % (_root_path, _suffix), + '%s/cmdstanpy-1.2.2%s' % (_root_path, _suffix), + '%s/pystan-3.9.1%s' % (_root_path, _suffix), + '%s/httpstan-4.12.0%s' % (_root_path, _suffix), + '%s/prophet-1.1.5%s' % (_root_path, _suffix), ] else: - _modules_needed_by_name = ['holidays==0.11.1', 'convertdate', 'lunarcalendar', 'pystan==2.19.1.1', - 'fbprophet==0.7.1'] + _modules_needed_by_name = ['holidays==0.47', 'convertdate', 'lunarcalendar', 'pystan==3.9.1', + 'prophet==1.1.5'] # _modules_needed_by_name = ['fbprophet'] _included_model_classes = None # ["gblinear"] for strong trends - can extrapolate _testing_can_skip_failure = False # ensure tested as if shouldn't fail @@ -70,7 +70,7 @@ def acceptance_test_timeout(): return 20.0 def fit(self, X: dt.Frame, y: np.array = None): - mod = importlib.import_module('fbprophet') + mod = importlib.import_module('prophet') Prophet = getattr(mod, "Prophet") # from fbprophet import Prophet self.models = {}