From 50b8371c71f29e0e2ad7afdcdc984612d4225b83 Mon Sep 17 00:00:00 2001 From: Michael ZBYSZYNSKI Date: Tue, 14 Nov 2023 11:31:51 +0000 Subject: [PATCH] silence warnings, style --- dependencies/jsoncpp.cpp | 2 +- dependencies/libsvm/libsvm.cpp | 2 +- src/rapidStream.cpp | 449 +++++++++++++++++---------------- src/regression.cpp | 316 ++++++++++++----------- 4 files changed, 384 insertions(+), 385 deletions(-) diff --git a/dependencies/jsoncpp.cpp b/dependencies/jsoncpp.cpp index 85abbab6..4c1b04c1 100644 --- a/dependencies/jsoncpp.cpp +++ b/dependencies/jsoncpp.cpp @@ -4206,7 +4206,7 @@ JSONCPP_STRING valueToString(double value, bool useSpecialFloats, unsigned int p int len = -1; char formatString[6]; - sprintf(formatString, "%%.%dg", precision); + snprintf(formatString, 6, "%%.%dg", precision); // Print into the buffer. We need not request the alternative representation // that always has a decimal point because JSON doesn't distingish the diff --git a/dependencies/libsvm/libsvm.cpp b/dependencies/libsvm/libsvm.cpp index 7186344e..44b949d7 100644 --- a/dependencies/libsvm/libsvm.cpp +++ b/dependencies/libsvm/libsvm.cpp @@ -52,7 +52,7 @@ namespace LIBSVM { char buf[BUFSIZ]; va_list ap; va_start(ap,fmt); - vsprintf(buf,fmt,ap); + vsnprintf(buf,BUFSIZ,fmt,ap); va_end(ap); (*svm_print_string)(buf); } diff --git a/src/rapidStream.cpp b/src/rapidStream.cpp index fcada46e..f121a43e 100644 --- a/src/rapidStream.cpp +++ b/src/rapidStream.cpp @@ -17,256 +17,259 @@ namespace rapidLib { - template - rapidStream::rapidStream(std::size_t window_size) - { - windowSize = window_size; - windowIndex = 0; - circularWindow.resize(windowSize); - std::fill(circularWindow.begin(), circularWindow.end(), 0); - - //Baysian Filter setup - bayesFilt.diffusion = powf(10., -2); - bayesFilt.jump_rate = powf(10., -10); - bayesFilt.mvc[0] = 1.; - bayesFilt.init(); - } - template - rapidStream::rapidStream() - { - windowSize = 3; - windowIndex = 0; - circularWindow.resize(windowSize); - std::fill(circularWindow.begin(), circularWindow.end(), 0); +template +rapidStream::rapidStream(std::size_t window_size) : +windowSize(window_size), +windowIndex(0) +{ + circularWindow.resize(windowSize); + std::fill(circularWindow.begin(), circularWindow.end(), 0); + + //Baysian Filter setup + bayesFilt.diffusion = powf(10., -2); + bayesFilt.jump_rate = powf(10., -10); + bayesFilt.mvc[0] = 1.; + bayesFilt.init(); +} - //Baysian Filter setup - bayesFilt.diffusion = powf(10., -2); - bayesFilt.jump_rate = powf(10., -10); - bayesFilt.mvc[0] = 1.; - bayesFilt.init(); - } +template +rapidStream::rapidStream() : +windowSize(3), +windowIndex(0) +{ + circularWindow.resize(windowSize); + std::fill(circularWindow.begin(), circularWindow.end(), 0); + + //Baysian Filter setup + bayesFilt.diffusion = powf(10., -2); + bayesFilt.jump_rate = powf(10., -10); + bayesFilt.mvc[0] = 1.; + bayesFilt.init(); +} - template - rapidStream::~rapidStream() - { - //delete[]circularWindow; - } +template +rapidStream::~rapidStream() +{ + //delete[]circularWindow; +} - template - void rapidStream::clear() - { - windowIndex = 0; - circularWindow.resize(windowSize); - std::fill(circularWindow.begin(), circularWindow.end(), 0); - } +template +void rapidStream::clear() +{ + windowIndex = 0; + circularWindow.resize(windowSize); + std::fill(circularWindow.begin(), circularWindow.end(), 0); +} - template - void rapidStream::pushToWindow(T input) - { - circularWindow[windowIndex] = input; - windowIndex = (windowIndex + 1) % windowSize; - } +template +void rapidStream::pushToWindow(T input) +{ + circularWindow[windowIndex] = input; + windowIndex = (windowIndex + 1) % windowSize; +} - template - inline T rapidStream::calcCurrentVel(std::size_t i) const - { - return circularWindow[(i + windowIndex) % windowSize] - circularWindow[(i + windowIndex - 1) % windowSize]; - } +template +inline T rapidStream::calcCurrentVel(std::size_t i) const +{ + return circularWindow[(i + windowIndex) % windowSize] - circularWindow[(i + windowIndex - 1) % windowSize]; +} - template - T rapidStream::velocity() const - { - return calcCurrentVel(-1); - }; +template +T rapidStream::velocity() const +{ + return calcCurrentVel(-1); +}; - template - T rapidStream::acceleration() const - { - return calcCurrentVel(-2) - calcCurrentVel(-3); - }; +template +T rapidStream::acceleration() const +{ + return calcCurrentVel(-2) - calcCurrentVel(-3); +}; + +template +T rapidStream::minimum() const +{ + return *std::min_element(circularWindow.begin(), circularWindow.end()); +} - template - T rapidStream::minimum() const +template +T rapidStream::maximum() const +{ + return *std::max_element(circularWindow.begin(), circularWindow.end());; +} + +template +uint32_t rapidStream::numZeroCrossings() const +{ + uint32_t zeroCrossings {}; + + //Is the begininng positive, negative, or 0? + int previous { 1 }; + if (circularWindow[windowIndex] < 0) + { + previous = -1; + } + else if (circularWindow[windowIndex] == 0) + { + ++zeroCrossings; + previous = 0; + } + + for (std::size_t i { 1 }; i < windowSize; ++i) + { + const std::size_t index { (windowIndex + i) % windowSize}; + + if (circularWindow[index] < 0 && previous >= 0) //Transition to negative { - return *std::min_element(circularWindow.begin(), circularWindow.end()); + ++zeroCrossings; + previous = -1; } - - template - T rapidStream::maximum() const + else if (circularWindow[index] > 0 && previous <= 0) //Transition to positive { - return *std::max_element(circularWindow.begin(), circularWindow.end());; + ++zeroCrossings; + previous = 1; } - - template - uint32_t rapidStream::numZeroCrossings() const + else //Sample == 0 { - uint32_t zeroCrossings = 0; - //Is the begininng positive, negative, or 0? - int previous = 1; - if (circularWindow[windowIndex] < 0) - { - previous = -1; - } - else if (circularWindow[windowIndex] == 0) - { - ++zeroCrossings; - previous = 0; - } - - for (std::size_t i = 1; i < windowSize; ++i) - { - std::size_t index = (windowIndex + i) % windowSize; - if (circularWindow[index] < 0 && previous >= 0) - { //Transition to negative - ++zeroCrossings; - previous = -1; - } - else if (circularWindow[index] > 0 && previous <= 0) - { //Transition to positive - ++zeroCrossings; - previous = 1; - } - else - { //Sample == 0 - previous = 0; - } - } - return zeroCrossings; + previous = 0; } + } + return zeroCrossings; +} - template - T rapidStream::sum() const - { - return std::accumulate(circularWindow.begin(), circularWindow.end(), 0); - } +template +T rapidStream::sum() const +{ + return std::accumulate(circularWindow.begin(), circularWindow.end(), 0); +} - template - T rapidStream::mean() const - { - return sum() / windowSize; - } +template +T rapidStream::mean() const +{ + return sum() / windowSize; +} - template - T rapidStream::standardDeviation() const - { - T newMean = mean(); - T standardDeviation = 0.; - for (auto value:circularWindow) - { - standardDeviation += (T)pow(value - newMean, 2); - } - return sqrt(standardDeviation / windowSize); - } +template +T rapidStream::standardDeviation() const +{ + const T newMean { mean() }; + T standardDeviation {}; + + for (auto value : circularWindow) + { + standardDeviation += static_cast(pow(value - newMean, 2)); + } + return sqrt(standardDeviation / windowSize); +} - template - T rapidStream::rms() const - { - T rms = 0; - for (auto value:circularWindow) - { - rms += value * value; - } - rms = rms / windowSize; - return sqrt(rms); - } +template +T rapidStream::rms() const +{ + T rms {}; + + for (auto value:circularWindow) + { + rms += value * value; + } + + rms = rms / windowSize; + return sqrt(rms); +} - template - T rapidStream::bayesFilter(T input) - { - std::vector inputVec = { float(input) }; - bayesFilt.update(inputVec); - return T(bayesFilt.output[0]); - } +template +T rapidStream::bayesFilter(T input) +{ + std::vector inputVec = { float(input) }; + bayesFilt.update(inputVec); + return static_cast(bayesFilt.output[0]); +} - template - void rapidStream::bayesSetDiffusion(float diffusion) - { - bayesFilt.diffusion = powf(10., diffusion); - bayesFilt.init(); - } +template +void rapidStream::bayesSetDiffusion(float diffusion) +{ + bayesFilt.diffusion = powf(10., diffusion); + bayesFilt.init(); +} - template - void rapidStream::bayesSetJumpRate(float jump_rate) - { - bayesFilt.jump_rate = powf(10., jump_rate); - bayesFilt.init(); - } +template +void rapidStream::bayesSetJumpRate(float jump_rate) +{ + bayesFilt.jump_rate = powf(10., jump_rate); + bayesFilt.init(); +} - template - void rapidStream::bayesSetMVC(float mvc) - { - bayesFilt.mvc[0] = mvc; - bayesFilt.init(); - } +template +void rapidStream::bayesSetMVC(float mvc) +{ + bayesFilt.mvc[0] = mvc; + bayesFilt.init(); +} - template - T rapidStream::minVelocity() const - { - T minVel = std::numeric_limits::infinity(); - for (std::size_t i = 0; i < windowSize; ++i) - { - T currentVel = calcCurrentVel(i); - if (currentVel < minVel) { - minVel = currentVel; - } - } - return minVel; - } +template +T rapidStream::minVelocity() const +{ + T minVel { std::numeric_limits::infinity() }; + + for (std::size_t i {}; i < windowSize; ++i) + { + const T currentVel { calcCurrentVel(i) }; + if (currentVel < minVel) minVel = currentVel; + } + + return minVel; +} - template - T rapidStream::maxVelocity() const - { - T maxVel = std::numeric_limits::lowest(); - for (std::size_t i = 0; i < windowSize; ++i) - { - T currentVel = calcCurrentVel(i); - if (currentVel > maxVel) - { - maxVel = currentVel; - } - } - return maxVel; - } +template +T rapidStream::maxVelocity() const +{ + T maxVel { std::numeric_limits::lowest() }; + + for (std::size_t i {}; i < windowSize; ++i) + { + const T currentVel { calcCurrentVel(i) }; + if (currentVel > maxVel) maxVel = currentVel; + } + + return maxVel; +} - template - T rapidStream::minAcceleration() const - { - T minAccel = std::numeric_limits::infinity(); - T lastVel = calcCurrentVel(1); - for (std::size_t i = 2; i < windowSize; ++i) - { - T currentVel = calcCurrentVel(i); - T currentAccel = currentVel - lastVel; - lastVel = currentVel; - if (currentAccel < minAccel) - { - minAccel = currentAccel; - } - } - return minAccel; - } +template +T rapidStream::minAcceleration() const +{ + T minAccel { std::numeric_limits::infinity() }; + T lastVel { calcCurrentVel(1) }; + + for (std::size_t i { 2 }; i < windowSize; ++i) + { + const T currentVel { calcCurrentVel(i) }; + const T currentAccel { currentVel - lastVel }; + lastVel = currentVel; + if (currentAccel < minAccel) minAccel = currentAccel; + } + + return minAccel; +} - template - T rapidStream::maxAcceleration() const - { - T maxAccel = std::numeric_limits::lowest(); - T lastVel = calcCurrentVel(1); - for (std::size_t i = 2; i < windowSize; ++i) - { - T currentVel = calcCurrentVel(i); - T currentAccel = currentVel - lastVel; - lastVel = currentVel; - if (currentAccel > maxAccel) - { - maxAccel = currentAccel; - } - } - return maxAccel; - } +template +T rapidStream::maxAcceleration() const +{ + T maxAccel { std::numeric_limits::lowest() }; + T lastVel { calcCurrentVel(1) }; + + for (std::size_t i { 2 }; i < windowSize; ++i) + { + const T currentVel { calcCurrentVel(i) }; + const T currentAccel { currentVel - lastVel }; + lastVel = currentVel; + if (currentAccel > maxAccel) maxAccel = currentAccel; + } + + return maxAccel; +} - //explicit instantiation - template class rapidStream; - template class rapidStream; +//explicit instantiation +template class rapidStream; +template class rapidStream; }; diff --git a/src/regression.cpp b/src/regression.cpp index 4e9b1ad2..3616c61b 100644 --- a/src/regression.cpp +++ b/src/regression.cpp @@ -18,234 +18,230 @@ #endif template -regressionTemplate::regressionTemplate() +regressionTemplate::regressionTemplate() : +numHiddenLayers(1), +numHiddenNodes(0), //this will be changed by training +numEpochs(500) { - modelSet::numInputs = -1; - modelSet::numOutputs = -1; - numHiddenLayers = 1; - numHiddenNodes = 0; //this will be changed by training - numEpochs = 500; - modelSet::isTraining = false; + modelSet::numInputs = -1; + modelSet::numOutputs = -1; + modelSet::isTraining = false; }; template regressionTemplate::regressionTemplate(const int &num_inputs, const int &num_outputs) { - modelSet::numInputs = num_inputs; - modelSet::numOutputs = num_outputs; - numHiddenLayers = 1; - numEpochs = 500; - numHiddenNodes = num_inputs; - modelSet::isTraining = false; - created = false; - std::vector whichInputs; - - for (int i = 0; i < modelSet::numInputs; ++i) - { - whichInputs.push_back(i); - } - - for (int i = 0; i < modelSet::numOutputs; ++i) - { - modelSet::myModelSet.push_back(new neuralNetwork(modelSet::numInputs, whichInputs, numHiddenLayers, numHiddenNodes)); - } - created = true; + modelSet::numInputs = num_inputs; + modelSet::numOutputs = num_outputs; + numHiddenLayers = 1; + numEpochs = 500; + numHiddenNodes = num_inputs; + modelSet::isTraining = false; + created = false; + std::vector whichInputs; + + for (int i {}; i < modelSet::numInputs; ++i) whichInputs.push_back(i); + + for (int i {}; i < modelSet::numOutputs; ++i) + { + modelSet::myModelSet.push_back(new neuralNetwork(modelSet::numInputs, whichInputs, numHiddenLayers, numHiddenNodes)); + } + + created = true; }; template -regressionTemplate::regressionTemplate(const std::vector > &training_set) +regressionTemplate::regressionTemplate(const std::vector > &training_set) { - modelSet::numInputs = -1; - modelSet::numOutputs = -1; - modelSet::isTraining = false; - train(training_set); + modelSet::numInputs = -1; + modelSet::numOutputs = -1; + modelSet::isTraining = false; + train(training_set); }; template std::vector regressionTemplate::getNumHiddenLayers() const { - std::vector vecNumHiddenLayers; + std::vector vecNumHiddenLayers {}; - if (std::begin(modelSet::myModelSet) != std::end(modelSet::myModelSet)) + if (std::begin(modelSet::myModelSet) != std::end(modelSet::myModelSet)) + { + for (const baseModel* model : modelSet::myModelSet) { - for (const baseModel* model : modelSet::myModelSet) - { - vecNumHiddenLayers.push_back(dynamic_cast*>(model)->getNumHiddenLayers()); //FIXME: I really dislike this design - } - } - else - { - vecNumHiddenLayers = { numHiddenLayers }; + vecNumHiddenLayers.push_back(dynamic_cast*>(model)->getNumHiddenLayers()); //FIXME: I really dislike this design } + } + else + { + vecNumHiddenLayers = { numHiddenLayers }; + } - return vecNumHiddenLayers; + return vecNumHiddenLayers; } template void regressionTemplate::setNumHiddenLayers(const int &num_hidden_layers) { - numHiddenLayers = num_hidden_layers; - //Set any existing models - if (std::begin(modelSet::myModelSet) != std::end(modelSet::myModelSet)) + numHiddenLayers = num_hidden_layers; + //Set any existing models + if (std::begin(modelSet::myModelSet) != std::end(modelSet::myModelSet)) + { + for (baseModel* model : modelSet::myModelSet) { - for (baseModel* model : modelSet::myModelSet) - { - neuralNetwork* nnModel = dynamic_cast*>(model); //FIXME: I really dislike this design - nnModel->setNumHiddenLayers(num_hidden_layers); - } + dynamic_cast*>(model)->setNumHiddenLayers(num_hidden_layers); //FIXME: I really dislike this design } + } } template std::vector regressionTemplate::getNumHiddenNodes() const { - std::vector vecNumHiddenNodes; + std::vector vecNumHiddenNodes {}; - if (std::begin(modelSet::myModelSet) != std::end(modelSet::myModelSet)) - { - for (const baseModel* model : modelSet::myModelSet) - { - vecNumHiddenNodes.push_back( dynamic_cast*>(model)->getNumHiddenNodes()); //FIXME: I really dislike this design - } - } - else + if (std::begin(modelSet::myModelSet) != std::end(modelSet::myModelSet)) + { + for (const baseModel* model : modelSet::myModelSet) { - vecNumHiddenNodes = { numHiddenNodes }; + vecNumHiddenNodes.push_back(dynamic_cast*>(model)->getNumHiddenNodes()); //FIXME: I really dislike this design } + } + else + { + vecNumHiddenNodes = { numHiddenNodes }; + } - return vecNumHiddenNodes; + return vecNumHiddenNodes; } template void regressionTemplate::setNumHiddenNodes(const int &num_hidden_nodes) { - numHiddenNodes = num_hidden_nodes; - //Set any existing models - if (std::begin(modelSet::myModelSet) != std::end(modelSet::myModelSet)) + numHiddenNodes = num_hidden_nodes; + + //Set any existing models + if (std::begin(modelSet::myModelSet) != std::end(modelSet::myModelSet)) + { + for (baseModel* model : modelSet::myModelSet) { - for (baseModel* model : modelSet::myModelSet) - { - neuralNetwork* nnModel = dynamic_cast*>(model); //FIXME: I really dislike this design - nnModel->setNumHiddenNodes(num_hidden_nodes); - } + dynamic_cast*>(model)->setNumHiddenNodes(num_hidden_nodes); //FIXME: I really dislike this design } + } } template -std::vector regressionTemplate::getNumEpochs() const +std::vector regressionTemplate::getNumEpochs() const { - std::vector vecEpochs; - if (std::begin(modelSet::myModelSet) != std::end(modelSet::myModelSet)) - { - for (baseModel* model : modelSet::myModelSet) - { - neuralNetwork* nnModel = dynamic_cast*>(model); //FIXME: I really dislike this design - vecEpochs.push_back(nnModel->getEpochs()); - } - } - else + std::vector vecEpochs; + if (std::begin(modelSet::myModelSet) != std::end(modelSet::myModelSet)) + { + for (const baseModel* model : modelSet::myModelSet) { - vecEpochs = { numEpochs }; + vecEpochs.push_back(dynamic_cast*>(model)->getEpochs()); //FIXME: I really dislike this design } - return vecEpochs; + } + else + { + vecEpochs = { numEpochs }; + } + return vecEpochs; } template -void regressionTemplate::setNumEpochs(const size_t &epochs) +void regressionTemplate::setNumEpochs(const size_t &epochs) { - numEpochs = epochs; - //set any existing models - if (std::begin(modelSet::myModelSet) != std::end(modelSet::myModelSet)) + numEpochs = epochs; + + //set any existing models + if (std::begin(modelSet::myModelSet) != std::end(modelSet::myModelSet)) + { + for (baseModel* model : modelSet::myModelSet) { - for (baseModel* model : modelSet::myModelSet) - { - neuralNetwork* nnModel = dynamic_cast*>(model); //FIXME: I really dislike this design - nnModel->setEpochs(epochs); - } + dynamic_cast*>(model)->setEpochs(epochs); //FIXME: I really dislike this design } + } } template -bool regressionTemplate::train(const std::vector > &training_set) +bool regressionTemplate::train(const std::vector > &training_set) { - //clock_t timer; - //timer = clock(); - modelSet::reset(); //FIXME: Should use modelSet if already created? - if (training_set.size() > 0) + //clock_t timer; + //timer = clock(); + modelSet::reset(); //FIXME: Should use modelSet if already created? + if (training_set.size() > 0) + { + //create model(s) here + modelSet::numInputs = int(training_set[0].input.size()); + modelSet::numOutputs = int(training_set[0].output.size()); + + for (int i = 0; i < modelSet::numInputs; ++i) { - //create model(s) here - modelSet::numInputs = int(training_set[0].input.size()); - modelSet::numOutputs = int(training_set[0].output.size()); - - for (int i = 0; i < modelSet::numInputs; ++i) - { - modelSet::inputNames.push_back("inputs-" + std::to_string(i + 1)); - } - modelSet::numOutputs = int(training_set[0].output.size()); - - for ( auto example : training_set) - { - if (example.input.size() != modelSet::numInputs) - { - throw std::length_error("unequal feature vectors in input."); - return false; - } - if (example.output.size() != modelSet::numOutputs) - { - throw std::length_error("unequal output vectors."); - return false; - } - } - - if(numHiddenNodes == 0) numHiddenNodes = modelSet::numInputs; - std::vector whichInputs; - - for (int j = 0; j < modelSet::numInputs; ++j) - { - whichInputs.push_back(j); - } - - for (int i = 0; i < modelSet::numOutputs; ++i) - { - modelSet::myModelSet.push_back(new neuralNetwork(modelSet::numInputs, whichInputs, numHiddenLayers, numHiddenNodes)); - } - - if (numEpochs != 500) - { - for (baseModel* model : modelSet::myModelSet) - { - neuralNetwork* nnModel = dynamic_cast*>(model); //FIXME: I really dislike this design - nnModel->setEpochs(numEpochs); - } - } - - //timer = clock() - timer; - bool result = modelSet::train(training_set); - //std::cout << "Regression trained in " << (float)timer/CLOCKS_PER_SEC << " ms." << std::endl; - return result; + modelSet::inputNames.push_back("inputs-" + std::to_string(i + 1)); } - throw std::length_error("empty training set."); - return false; + modelSet::numOutputs = int(training_set[0].output.size()); + + for ( auto example : training_set) + { + if (example.input.size() != modelSet::numInputs) + { + throw std::length_error("unequal feature vectors in input."); + return false; + } + if (example.output.size() != modelSet::numOutputs) + { + throw std::length_error("unequal output vectors."); + return false; + } + } + + if(numHiddenNodes == 0) numHiddenNodes = modelSet::numInputs; + std::vector whichInputs; + + for (int j = 0; j < modelSet::numInputs; ++j) + { + whichInputs.push_back(j); + } + + for (int i = 0; i < modelSet::numOutputs; ++i) + { + modelSet::myModelSet.push_back(new neuralNetwork(modelSet::numInputs, whichInputs, numHiddenLayers, numHiddenNodes)); + } + + if (numEpochs != 500) + { + for (baseModel* model : modelSet::myModelSet) + { + neuralNetwork* nnModel = dynamic_cast*>(model); //FIXME: I really dislike this design + nnModel->setEpochs(numEpochs); + } + } + + //timer = clock() - timer; + bool result = modelSet::train(training_set); + //std::cout << "Regression trained in " << (float)timer/CLOCKS_PER_SEC << " ms." << std::endl; + return result; + } + throw std::length_error("empty training set."); + return false; } template float regressionTemplate::getTrainingProgress() { - float progress = modelSet::isTrained ? 1.f : 0.f; - - if (modelSet::isTraining) + float progress = modelSet::isTrained ? 1.f : 0.f; + + if (modelSet::isTraining) + { + for (baseModel* model : modelSet::myModelSet) { - for (baseModel* model : modelSet::myModelSet) - { - neuralNetwork* nnModel = dynamic_cast*>(model); //FIXME: I really dislike this design - progress += (nnModel->getCurrentEpoch() / nnModel->getEpochs()); - } - - progress /= modelSet::myModelSet.size(); + neuralNetwork* nnModel = dynamic_cast*>(model); //FIXME: I really dislike this design + progress += (nnModel->getCurrentEpoch() / nnModel->getEpochs()); } - - return progress; + + progress /= modelSet::myModelSet.size(); + } + + return progress; } //explicit instantiation