Skip to content

Commit

Permalink
Revert "silencing warnings"
Browse files Browse the repository at this point in the history
This reverts commit f5878b5.
  • Loading branch information
Michael ZBYSZYNSKI committed Nov 14, 2023
1 parent f5878b5 commit 0196c3f
Show file tree
Hide file tree
Showing 7 changed files with 506 additions and 531 deletions.
2 changes: 1 addition & 1 deletion dependencies/jsoncpp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4206,7 +4206,7 @@ JSONCPP_STRING valueToString(double value, bool useSpecialFloats, unsigned int p
int len = -1;

char formatString[6];
snprintf(formatString, 6, "%%.%dg", precision);
sprintf(formatString, "%%.%dg", precision);

// Print into the buffer. We need not request the alternative representation
// that always has a decimal point because JSON doesn't distingish the
Expand Down
2 changes: 1 addition & 1 deletion dependencies/libsvm/libsvm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ namespace LIBSVM {
char buf[BUFSIZ];
va_list ap;
va_start(ap,fmt);
vsnprintf(buf,BUFSIZ,fmt,ap);
vsprintf(buf,fmt,ap);
va_end(ap);
(*svm_print_string)(buf);
}
Expand Down
188 changes: 93 additions & 95 deletions src/classification.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,127 +14,125 @@
#endif

template<typename T>
classificationTemplate<T>::classificationTemplate() :
classificationType(knn) //this is the default algorithm
classificationTemplate<T>::classificationTemplate()
{
modelSet<T>::numInputs = -1;
modelSet<T>::numOutputs = -1;
modelSet<T>::isTraining = false;
}
modelSet<T>::numInputs = -1;
modelSet<T>::numOutputs = -1;
modelSet<T>::isTraining = false;
classificationType = knn; //this is the default algorithm
};

template<typename T>
classificationTemplate<T>::classificationTemplate(classificationTypes classification_type) :
classificationType(classification_type)
classificationTemplate<T>::classificationTemplate(classificationTypes classification_type)
{
modelSet<T>::numInputs = -1;
modelSet<T>::numOutputs = -1;
modelSet<T>::isTraining = false;
modelSet<T>::numInputs = -1;
modelSet<T>::numOutputs = -1;
modelSet<T>::isTraining = false;
classificationType = classification_type;
};

template<typename T>
classificationTemplate<T>::classificationTemplate(const int &num_inputs, const int &num_outputs) //TODO: this feature isn't really useful
{
modelSet<T>::numInputs = num_inputs;
modelSet<T>::numOutputs = num_outputs;
modelSet<T>::isTraining = false;
std::vector<size_t> whichInputs;

for (size_t i {}; i < modelSet<T>::numInputs; ++i)
{
whichInputs.push_back(i);
}

std::vector<trainingExampleTemplate<T> > trainingSet;

for (size_t i {}; i < modelSet<T>::numOutputs; ++i)
{
modelSet<T>::myModelSet.push_back(new knnClassification<T>(modelSet<T>::numInputs, whichInputs, trainingSet, 1));
}
{
modelSet<T>::numInputs = num_inputs;
modelSet<T>::numOutputs = num_outputs;
modelSet<T>::isTraining = false;
std::vector<size_t> whichInputs;

for (size_t i = 0; i < modelSet<T>::numInputs; ++i)
{
whichInputs.push_back(i);
}
std::vector<trainingExampleTemplate<T> > trainingSet;

for (size_t i = 0; i < modelSet<T>::numOutputs; ++i)
{
modelSet<T>::myModelSet.push_back(new knnClassification<T>(modelSet<T>::numInputs, whichInputs, trainingSet, 1));
}
};

template<typename T>
classificationTemplate<T>::classificationTemplate(const std::vector<trainingExampleTemplate<T> > &trainingSet)
classificationTemplate<T>::classificationTemplate(const std::vector<trainingExampleTemplate<T> > &trainingSet)
{
modelSet<T>::numInputs = -1;
modelSet<T>::numOutputs = -1;
modelSet<T>::isTraining = false;
train(trainingSet);
modelSet<T>::numInputs = -1;
modelSet<T>::numOutputs = -1;
modelSet<T>::isTraining = false;
train(trainingSet);
};

template<typename T>
bool classificationTemplate<T>::train(const std::vector<trainingExampleTemplate<T> > &training_set)
bool classificationTemplate<T>::train(const std::vector<trainingExampleTemplate<T> > &training_set)
{
//TODO: time this process?
modelSet<T>::reset();

if (training_set.size() > 0)
{
//create model(s) here
modelSet<T>::numInputs = static_cast<int>(training_set[0].input.size());
modelSet<T>::numOutputs = static_cast<int>(training_set[0].output.size());

for (int i {}; i < modelSet<T>::numInputs; ++i)
{
modelSet<T>::inputNames.push_back("inputs-" + std::to_string(i + 1));
}
modelSet<T>::numOutputs = int(training_set[0].output.size());

for (auto example : training_set)
{
if (example.input.size() != modelSet<T>::numInputs)
{
throw std::length_error("unequal feature vectors in input.");
return false;
}

if (example.output.size() != modelSet<T>::numOutputs)
{
throw std::length_error("unequal output vectors.");
return false;
}
}
std::vector<size_t> whichInputs;

for (int inputNum {}; inputNum < modelSet<T>::numInputs; ++inputNum)
{
whichInputs.push_back(inputNum);
}

for (int i {}; i < modelSet<T>::numOutputs; ++i)
//TODO: time this process?
modelSet<T>::reset();

if (training_set.size() > 0)
{
if (classificationType == svm)
{
modelSet<T>::myModelSet.push_back(new svmClassification<T>(modelSet<T>::numInputs));
}
else
{
modelSet<T>::myModelSet.push_back(new knnClassification<T>(modelSet<T>::numInputs, whichInputs, training_set, 1));
}
//create model(s) here
modelSet<T>::numInputs = int(training_set[0].input.size());
modelSet<T>::numOutputs = int(training_set[0].output.size());

for (int i = 0; i < modelSet<T>::numInputs; ++i)
{
modelSet<T>::inputNames.push_back("inputs-" + std::to_string(i + 1));
}
modelSet<T>::numOutputs = int(training_set[0].output.size());

for ( auto example : training_set)
{
if (example.input.size() != modelSet<T>::numInputs)
{
throw std::length_error("unequal feature vectors in input.");
return false;
}
if (example.output.size() != modelSet<T>::numOutputs)
{
throw std::length_error("unequal output vectors.");
return false;
}
}
std::vector<size_t> whichInputs;

for (int j = 0; j < modelSet<T>::numInputs; ++j)
{
whichInputs.push_back(j);
}

for (int i = 0; i < modelSet<T>::numOutputs; ++i)
{
if (classificationType == svm)
{
modelSet<T>::myModelSet.push_back(new svmClassification<T>(modelSet<T>::numInputs));
}
else
{
modelSet<T>::myModelSet.push_back(new knnClassification<T>(modelSet<T>::numInputs, whichInputs, training_set, 1));
}
}

return modelSet<T>::train(training_set);
}

return modelSet<T>::train(training_set);
}

return false;
return false;
}

template<typename T>
std::vector<int> classificationTemplate<T>::getK()
std::vector<int> classificationTemplate<T>::getK()
{
std::vector<int> kVector;
for (const baseModel<T>* model : modelSet<T>::myModelSet)
{
kVector.push_back(dynamic_cast<const knnClassification<T>*>(model)->getK()); //FIXME: I really dislike this design
}

return kVector;
std::vector<int> kVector;

for (baseModel<T>* model : modelSet<T>::myModelSet)
{
knnClassification<T>* kNNModel = dynamic_cast<knnClassification<T>*>(model); //FIXME: I really dislike this design
kVector.push_back(kNNModel->getK());
}
return kVector;
}

template<typename T>
void classificationTemplate<T>::setK(const int whichModel, const int newK)
void classificationTemplate<T>::setK(const int whichModel, const int newK)
{
dynamic_cast<knnClassification<T>*>(modelSet<T>::myModelSet[whichModel])->setK(newK); //FIXME: I really dislike this design
knnClassification<T>* kNNModel = dynamic_cast<knnClassification<T>*>(modelSet<T>::myModelSet[whichModel]); //FIXME: I really dislike this design
kNNModel->setK(newK);
}

//explicit instantiation
Expand Down
86 changes: 39 additions & 47 deletions src/fastDTW.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,70 +19,62 @@ fastDTW<T>::~fastDTW() {};
template<typename T>
warpInfo<T> fastDTW<T>::fullFastDTW(const std::vector<std::vector<T>> &seriesX, const std::vector<std::vector<T > > &seriesY, int searchRadius)
{

#ifndef EMSCRIPTEN
if (seriesY.size() > seriesX.size())
{
return fullFastDTW(seriesY, seriesX, searchRadius); //TODO: I'm not sure why I need this. Also, not sure why it fails with Emscripten.
}
if (seriesY.size() > seriesX.size())
{
return fullFastDTW(seriesY, seriesX, searchRadius); //TODO: I'm not sure why I need this. Also, not sure why it fails with Emscripten.
}
#endif

dtw<T> dtw;
searchRadius = std::max(0, searchRadius);
const int minSeries { searchRadius + 2 };

if (seriesX.size() <= minSeries || seriesY.size() <= minSeries)
{
return dtw.dynamicTimeWarp(seriesX, seriesY);
}

const T resolution = 2.0; //TODO: Just hardcode this?

const std::vector<std::vector<T>> shrunkenX { downsample(seriesX, resolution) };
const std::vector<std::vector<T>> shrunkenY { downsample(seriesY, resolution) };

//some nice recursion here
const searchWindow<T> window(static_cast<int>(seriesX.size()), static_cast<int>(seriesY.size()), getWarpPath(shrunkenX, shrunkenY, searchRadius), searchRadius);
return dtw.constrainedDTW(seriesX, seriesY, window);

dtw<T> dtw;
searchRadius = (searchRadius < 0) ? 0 : searchRadius;
int minSeries = searchRadius + 2;
if (seriesX.size() <= minSeries || seriesY.size() <= minSeries)
{
return dtw.dynamicTimeWarp(seriesX, seriesY);
}

T resolution = 2.0;//TODO: Just hardcode this?
std::vector<std::vector<T>> shrunkenX = downsample(seriesX, resolution);
std::vector<std::vector<T>> shrunkenY = downsample(seriesY, resolution);

//some nice recursion here
searchWindow<T> window(int(seriesX.size()), int(seriesY.size()), getWarpPath(shrunkenX, shrunkenY, searchRadius), searchRadius);
return dtw.constrainedDTW(seriesX, seriesY, window);
};

template<typename T>
T fastDTW<T>::getCost(const std::vector<std::vector<T>> &seriesX, const std::vector<std::vector<T > > &seriesY, int searchRadius)
{
const warpInfo<T> info { fullFastDTW(seriesX, seriesY, searchRadius) };
return info.cost;
warpInfo<T> info = fullFastDTW(seriesX, seriesY, searchRadius);
return info.cost;
};

template<typename T>
warpPath fastDTW<T>::getWarpPath(const std::vector<std::vector<T>> &seriesX, const std::vector<std::vector<T > > &seriesY, int searchRadius)
{
const warpInfo<T> info { fullFastDTW(seriesX, seriesY, searchRadius) };
return info.path;
warpInfo<T> info = fullFastDTW(seriesX, seriesY, searchRadius);
return info.path;
};

template<typename T>
inline std::vector<std::vector<T> > fastDTW<T>::downsample(const std::vector<std::vector<T>> &series, T resolution)
inline std::vector<std::vector<T> > fastDTW<T>::downsample(const std::vector<std::vector<T>> &series, T resolution)
{
std::vector<std::vector<T> > shrunkenSeries;

for (std::size_t i {}; i < series.size(); ++i)
{
if (i % 2 == 0)
{
shrunkenSeries.push_back(series[i]);
}
else
{
const int shrunkIndex { static_cast<int>(i * 0.5) };
for (std::size_t j {}; j < series[i].size(); ++j)
{
shrunkenSeries[shrunkIndex][j] = (shrunkenSeries[shrunkIndex][j] + series[i][j]) * (T)0.5;
}
std::vector<std::vector<T> > shrunkenSeries;

for (std::size_t i = 0; i < series.size(); ++i) {
if (i % 2 == 0) {
shrunkenSeries.push_back(series[i]);
} else {
int shrunkIndex = int(i * 0.5);
for (std::size_t j = 0; j < series[i].size(); ++j) {
shrunkenSeries[shrunkIndex][j] = (shrunkenSeries[shrunkIndex][j] + series[i][j]) * (T)0.5;
}
}
}
}

//TODO: implement downsampling by resolution
return shrunkenSeries;
//TODO: implement downsampling by resolution
return shrunkenSeries;
}

//explicit instantiation
Expand Down
Loading

0 comments on commit 0196c3f

Please sign in to comment.