Skip to content

Commit

Permalink
Python 3.11 support #227. Reorganized short list / competition logs.
Browse files Browse the repository at this point in the history
  • Loading branch information
antoinecarme committed Mar 9, 2023
1 parent 7227165 commit e3e5466
Show file tree
Hide file tree
Showing 3 changed files with 46 additions and 15 deletions.
37 changes: 25 additions & 12 deletions pyaf/TS/ModelSelection_Voting.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ def collectPerformanceIndices_ModelSelection(self, iSignal, iSigDecs) :
logger = tsutil.get_pyaf_logger();
lTimer = tsutil.cTimer(("MODEL_SELECTION", {"Signal" : iSignal, "Transformations" : sorted(list(iSigDecs.keys()))}))
lVotingScores = self.compute_voting_scores(iSigDecs)
lCriterion = self.mOptions.mModelSelection_Criterion
rows_list = []
lPerfsByModel = {}
for (lName, sigdec) in iSigDecs.items():
Expand All @@ -47,26 +48,38 @@ def collectPerformanceIndices_ModelSelection(self, iSignal, iSigDecs) :
lTranformName = sigdec.mSignal;
lDecompType = model[1];
lModelFormula = model
lModelCategory = value[0][2].get_model_category()
tsmodel = value[0][2]
lModelCategory = tsmodel.get_model_category()
lSplit = value[0][2].mTimeInfo.mOptions.mCustomSplit
# value format : self.mPerfsByModel[lModel.mOutName] = [lModel, lComplexity, lFitPerf , lForecastPerf, lTestPerf];
lComplexity = value[1];
lFitPerf = value[2];
lForecastPerf = value[3];
lTestPerf = value[4];
model_perfs = tsmodel.get_perfs_summary()
lFitPerf = model_perfs["Fit"];
lForecastPerf = model_perfs["Forecast"]
lTestPerf = model_perfs["Test"]
H = tsmodel.mTimeInfo.mHorizon

lVoting = lVotingScores[ lModelFormula[3] ]
row = [lSplit, lTranformName, lDecompType, lModelFormula[3], lModelFormula, lModelCategory, lComplexity,
lFitPerf,
lForecastPerf,
lTestPerf, lVoting]
lFitPerf[1].getCriterionValue(lCriterion),
lFitPerf[H].getCriterionValue(lCriterion),
lForecastPerf[1].getCriterionValue(lCriterion),
lForecastPerf[H].getCriterionValue(lCriterion),
lTestPerf[1].getCriterionValue(lCriterion),
lTestPerf[H].getCriterionValue(lCriterion),
lVoting]
rows_list.append(row);

self.mTrPerfDetails = pd.DataFrame(rows_list, columns=
('Split', 'Transformation', 'DecompositionType',
'Model', 'DetailedFormula', 'Category', 'Complexity',
'Fit' + self.mOptions.mModelSelection_Criterion,
'Forecast' + self.mOptions.mModelSelection_Criterion,
'Test' + self.mOptions.mModelSelection_Criterion, "Voting"))
'Fit_' + self.mOptions.mModelSelection_Criterion + "_1",
'Fit_' + self.mOptions.mModelSelection_Criterion + "_H",
'Forecast_' + self.mOptions.mModelSelection_Criterion + "_1",
'Forecast_' + self.mOptions.mModelSelection_Criterion + "_H",
'Test_' + self.mOptions.mModelSelection_Criterion + "_1",
'Test_' + self.mOptions.mModelSelection_Criterion + "_H",
"Voting"))
# print(self.mTrPerfDetails.head(self.mTrPerfDetails.shape[0]));
lIndicator = 'Voting';
lBestPerf = self.mTrPerfDetails[ lIndicator ].max();
Expand All @@ -88,8 +101,8 @@ def collectPerformanceIndices_ModelSelection(self, iSignal, iSigDecs) :
# print("BEST_MODEL", lBestName, lBestModel)
self.mBestModel = lBestModel
self.mPerfsByModel = lPerfsByModel
self.mModelShortList = lInterestingModels[['Transformation', 'DecompositionType', 'Model', lIndicator, 'Complexity']]
print(self.mModelShortList.head());
self.mModelShortList = lInterestingModels[['Transformation', 'DecompositionType', 'Model', lIndicator, 'Complexity', 'Forecast_' + self.mOptions.mModelSelection_Criterion + "_1", 'Forecast_' + self.mOptions.mModelSelection_Criterion + "_H"]]
# print(self.mModelShortList.head());
return (iSignal, lPerfsByModel, lBestModel, self.mModelShortList)


Expand Down
12 changes: 10 additions & 2 deletions pyaf/TS/SignalDecomposition.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,6 @@ def train(self , iInputDS, iTimes, iSignals, iHorizons, iExogenousData = None):
self.mModelShortListBySignal = lTrainer.mModelShortList
# some backward compatibility
lFirstSignal = self.mSignals[0]
self.mTrPerfDetails = lTrainer.mTrPerfDetails[lFirstSignal]
self.mModelShortList = lTrainer.mModelShortList[lFirstSignal]
self.mBestModel = self.mBestModels[lFirstSignal]
self.mTrainingTime = lTimer.get_elapsed_time()

Expand All @@ -116,12 +114,22 @@ def getModelFormula(self):
lFormula[lSignal] = self.mBestModel.getFormula();
return lFormula;

def get_competition_details(self):
logger = tsutil.get_pyaf_logger();
for lSignal in self.mSignals:
logger.info("COMPETITION_DETAIL_START '" + lSignal + "'");
lShortList_Dict = self.mModelShortListBySignal[lSignal].to_dict(orient = 'index')
# print(lShortList_Dict)
for (k, v) in lShortList_Dict.items():
logger.info("COMPETITION_DETAIL_SHORT_LIST '" + lSignal + "' " + str(k) + " " + str(v));
logger.info("COMPETITION_DETAIL_END '" + lSignal + "'");

def getModelInfo(self):
for lSignal in self.mSignals:
self.mBestModels[lSignal].getInfo()
logger = tsutil.get_pyaf_logger();
logger.info("TRAINING_TIME_IN_SECONDS " + str(self.mTrainingTime));
self.get_competition_details()

def to_dict(self, iWithOptions = False):
dict1 = {}
Expand Down
12 changes: 11 additions & 1 deletion pyaf/TS/TimeSeriesModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,17 @@ def updatePerfs(self, compute_all_indicators = False):
self.mForecastPerfs = lPredictionIntervalsEstimator.mForecastPerformances
self.mTestPerfs = lPredictionIntervalsEstimator.mTestPerformances


def get_perfs_summary(self):
output = {"Fit" : {}, "Forecast" : {}, "Test" : {}}
lForecastColumn = str(self.mOriginalSignal) + "_Forecast";
lCriterion = self.mTimeInfo.mOptions.mModelSelection_Criterion
for h in [1, self.mTimeInfo.mHorizon]:
lHorizonName = lForecastColumn + "_" + str(h);
output["Fit"][h] = self.mFitPerfs[lHorizonName]
output["Forecast"][h] = self.mForecastPerfs[lHorizonName]
output["Test"][h] = self.mTestPerfs[lHorizonName]
return output

def aggregate_criteria(self, criteria):
lAggregated = criteria[0]
return lAggregated
Expand Down

0 comments on commit e3e5466

Please sign in to comment.