From b6360c37a22ea1efe0581ce8832ee9e3d06a9b75 Mon Sep 17 00:00:00 2001 From: David Hirvonen Date: Sat, 21 Oct 2017 17:47:53 -0400 Subject: [PATCH] simplify emulator tests (#597) * * refactor test image vector (various resolutions) to use map for sparse sampling of image width * use GAUZE_ANDROID_USE_EMULATOR preprocessor switch to reduce testing on emulator * clang-format * clang-format src/app --- src/app/acf/mat2cpb.cpp | 4 +- src/app/cpr/train_cpr.cpp | 2 +- src/app/dlib/facexml.cpp | 4 +- src/app/face/face.cpp | 4 +- src/app/hci/hci.cpp | 30 +-- src/app/qt/facefilter/CameraListener.cpp | 9 +- src/app/qt/facefilter/CameraListener.h | 12 +- src/app/qt/facefilter/FPSCalculator.cpp | 2 +- src/app/qt/facefilter/FrameHandler.cpp | 16 +- src/app/qt/facefilter/FrameHandler.h | 13 +- src/app/qt/facefilter/GLVersion.h | 10 +- src/app/qt/facefilter/ImageLogger.cpp | 21 +- src/app/qt/facefilter/ImageLogger.h | 1 + src/app/qt/facefilter/QMLCameraManager.cpp | 24 +- src/app/qt/facefilter/QMLCameraManager.h | 3 +- .../qt/facefilter/QtFaceDetectorFactory.cpp | 2 +- .../QtFaceDetectorFactoryCereal.cpp | 2 +- src/app/qt/facefilter/VideoFilterRunnable.cpp | 2 +- src/app/qt/facefilter/main.cpp | 25 +-- src/lib/drishti/acf/ACFField.h | 2 +- src/lib/drishti/acf/GPUACF.cpp | 16 +- src/lib/drishti/acf/GPUACF.h | 2 +- src/lib/drishti/acf/ut/test-drishti-acf.cpp | 31 +-- src/lib/drishti/core/Field.h | 15 +- src/lib/drishti/core/ImageView.h | 12 +- src/lib/drishti/core/Shape.cpp | 2 +- src/lib/drishti/core/drishti_stdlib_string.h | 6 +- src/lib/drishti/core/hungarian.h | 1 - src/lib/drishti/core/ut/test-drishti-core.cpp | 15 +- src/lib/drishti/drishti/Array.hpp | 4 +- src/lib/drishti/drishti/Context.cpp | 3 +- src/lib/drishti/drishti/Context.hpp | 10 +- src/lib/drishti/drishti/ContextImpl.h | 2 +- src/lib/drishti/drishti/Face.hpp | 6 +- src/lib/drishti/drishti/FaceMonitorAdapter.h | 44 ++-- src/lib/drishti/drishti/FaceTracker.hpp | 28 ++- src/lib/drishti/drishti/Image.hpp | 18 +- src/lib/drishti/drishti/Sensor.cpp | 6 +- src/lib/drishti/drishti/Sensor.hpp | 11 +- src/lib/drishti/drishti/SensorImpl.h | 2 +- .../drishti/drishti/ut/test-EyeSegmenter.cpp | 210 +++++++++--------- .../drishti/drishti/ut/test-FaceTracker.cpp | 30 +-- src/lib/drishti/eye/EyeModelIris.cpp | 4 +- src/lib/drishti/eye/ut/test-drishti-eye.cpp | 167 +++++++------- src/lib/drishti/face/Face.cpp | 4 +- src/lib/drishti/face/FaceDetector.cpp | 34 +-- .../drishti/face/FaceDetectorFactoryJson.cpp | 11 +- .../drishti/face/FaceDetectorFactoryJson.h | 2 +- src/lib/drishti/face/FaceTracker.cpp | 51 ++--- src/lib/drishti/face/FaceTracker.h | 17 +- src/lib/drishti/face/gpu/EyeFilter.cpp | 2 +- src/lib/drishti/face/gpu/EyeFilter.h | 3 +- src/lib/drishti/face/gpu/FaceStabilizer.cpp | 4 +- src/lib/drishti/face/ut/test-drishti-face.cpp | 8 +- src/lib/drishti/hci/FaceFinder.cpp | 129 ++++++----- src/lib/drishti/hci/FaceFinder.h | 23 +- src/lib/drishti/hci/FaceFinderImpl.h | 6 +- src/lib/drishti/hci/FaceFinderPainter.cpp | 6 +- src/lib/drishti/hci/FaceFinderPainter.h | 5 +- src/lib/drishti/hci/FaceMonitor.h | 26 +-- src/lib/drishti/hci/ut/FaceMonitorHCITest.h | 4 +- src/lib/drishti/hci/ut/test-drishti-hci.cpp | 8 +- src/lib/drishti/ml/ObjectDetector.cpp | 3 +- src/lib/drishti/ml/XGBoosterImpl.h | 2 +- src/lib/drishti/ml/shape_predictor.h | 17 +- src/lib/drishti/ml/shape_predictor_archive.h | 2 +- src/lib/drishti/rcpr/CPR.h | 2 +- src/lib/drishti/rcpr/CPRIOArchive.h | 2 +- src/lib/drishti/rcpr/cprTrain.cpp | 2 +- src/lib/drishti/rcpr/poseGt.cpp | 23 +- src/lib/drishti/sensor/Sensor.h | 5 +- src/tests/drishti/CMakeLists.txt | 4 + 72 files changed, 591 insertions(+), 647 deletions(-) diff --git a/src/app/acf/mat2cpb.cpp b/src/app/acf/mat2cpb.cpp index c0f828dc..da6eb9a4 100644 --- a/src/app/acf/mat2cpb.cpp +++ b/src/app/acf/mat2cpb.cpp @@ -69,7 +69,7 @@ int gauze_main(int argc, char** argv) else { std::ofstream ofs(sOutput); - if(ofs) + if (ofs) { remove(sOutput.c_str()); } @@ -78,7 +78,7 @@ int gauze_main(int argc, char** argv) logger->error("Unable to open {} for writing", sOutput); } } - + drishti::acf::Detector acf(sInput); save_cpb(sOutput, acf); diff --git a/src/app/cpr/train_cpr.cpp b/src/app/cpr/train_cpr.cpp index a204896e..0268a7e8 100644 --- a/src/app/cpr/train_cpr.cpp +++ b/src/app/cpr/train_cpr.cpp @@ -241,7 +241,7 @@ int gauze_main(int argc, char** argv) cprPrm.L = L; cprPrm.T = T; cprPrm.model = model; - + // Note: feature parameters can be overriden cprPrm.ftrPrm->type = 2; cprPrm.ftrPrm->F = F; diff --git a/src/app/dlib/facexml.cpp b/src/app/dlib/facexml.cpp index 31608cb4..01e5bdca 100644 --- a/src/app/dlib/facexml.cpp +++ b/src/app/dlib/facexml.cpp @@ -240,9 +240,9 @@ int gauze_main(int argc, char** argv) } logger->info("Have {} faces", faces.size()); - + int code = drishtiFaceToDlib(faces, sOutput); - if(code != 0) + if (code != 0) { logger->error("Failed to create file {} for writing"); } diff --git a/src/app/face/face.cpp b/src/app/face/face.cpp index 94d0a20f..7ce925e1 100644 --- a/src/app/face/face.cpp +++ b/src/app/face/face.cpp @@ -160,7 +160,7 @@ int gauze_main(int argc, char** argv) auto factory = std::make_shared(); float minZ = 0.1f, maxZ = 1.f; - + cxxopts::Options options("drishti-acf", "Command line interface for ACF object detection (see Piotr's toolbox)"); // clang-format off @@ -236,7 +236,7 @@ int gauze_main(int argc, char** argv) return 1; } - if(!sFactory.empty()) + if (!sFactory.empty()) { factory = std::make_shared(sFactory); } diff --git a/src/app/hci/hci.cpp b/src/app/hci/hci.cpp index ce1b3c81..3972375a 100644 --- a/src/app/hci/hci.cpp +++ b/src/app/hci/hci.cpp @@ -72,7 +72,7 @@ int gauze_main(int argc, char** argv) auto factory = std::make_shared(); cxxopts::Options options("drishti-hci", "Command line interface for video sequence FaceFinder processing."); - + float minZ = 0.1f, maxZ = 2.f; // clang-format off @@ -148,11 +148,11 @@ int gauze_main(int argc, char** argv) return 1; } - if(!sFactory.empty()) + if (!sFactory.empty()) { factory = std::make_shared(sFactory); } - + // Check for valid models std::vector> config{ { factory->sFaceDetector, "face-detector" }, @@ -173,11 +173,11 @@ int gauze_main(int argc, char** argv) // Events will not be handled correctly. This is probably because _TSGetMainThread // was called for the first time off the main thread. - // NOTE: We can create the OpenGL context prior to AVFoundation use as a workaround + // NOTE: We can create the OpenGL context prior to AVFoundation use as a workaround auto opengl = aglet::GLContext::create(aglet::GLContext::kAuto, doWindow ? "hci" : "", 640, 480); #if defined(_WIN32) || defined(_WIN64) - CV_Assert(!glewInit()); -#endif + CV_Assert(!glewInit()); +#endif auto video = drishti::videoio::VideoSourceCV::create(sInput); video->setOutputFormat(drishti::videoio::VideoSourceCV::ARGB); // be explicit, fail on error @@ -190,8 +190,8 @@ int gauze_main(int argc, char** argv) logger->info("No frames available in video"); return -1; } - - if(maxZ < minZ) + + if (maxZ < minZ) { logger->error("max distance must be > min distance"); return -1; @@ -212,13 +212,13 @@ int gauze_main(int argc, char** argv) settings.faceFinderInterval = 0.f; settings.regressorCropScale = scale; settings.acfCalibration = cascCal; - settings.renderFaces = true; // *** rendering *** - settings.renderPupils = true; // *** rendering *** - settings.renderCorners = false; // *** rendering *** + settings.renderFaces = true; // *** rendering *** + settings.renderPupils = true; // *** rendering *** + settings.renderCorners = false; // *** rendering *** settings.minDetectionDistance = minZ; settings.maxDetectionDistance = maxZ; { // Create a sensor specification - if(fx == 0.f) + if (fx == 0.f) { fx = frame.image.cols; // use a sensible default guess } @@ -231,9 +231,9 @@ int gauze_main(int argc, char** argv) // Allocate the detector: auto detector = drishti::hci::FaceFinderPainter::create(factory, settings, nullptr); - detector->setLetterboxHeight(1.0); // *** rendering *** - detector->setShowMotionAxes(false); // *** rendering *** - detector->setShowDetectionScales(false); // *** rendering *** + detector->setLetterboxHeight(1.0); // *** rendering *** + detector->setShowMotionAxes(false); // *** rendering *** + detector->setShowDetectionScales(false); // *** rendering *** ogles_gpgpu::VideoSource source; ogles_gpgpu::SwizzleProc swizzle(ogles_gpgpu::SwizzleProc::kSwizzleGRAB); // kSwizzleRGBA diff --git a/src/app/qt/facefilter/CameraListener.cpp b/src/app/qt/facefilter/CameraListener.cpp index 834a3a1b..46395ac8 100644 --- a/src/app/qt/facefilter/CameraListener.cpp +++ b/src/app/qt/facefilter/CameraListener.cpp @@ -4,7 +4,7 @@ #include "drishti/core/make_unique.h" -CameraListener::CameraListener(QCamera *camera, LoggerPtr &logger, const GLVersion &glVersion) +CameraListener::CameraListener(QCamera* camera, LoggerPtr& logger, const GLVersion& glVersion) : m_camera(camera) , m_logger(logger) , m_glVersion(glVersion) @@ -19,7 +19,7 @@ void CameraListener::setUsePBO(bool flag) void CameraListener::updateCameraStatus(QCamera::Status status) { - if(status == QCamera::LoadedStatus) + if (status == QCamera::LoadedStatus) { configureCamera(); } @@ -28,7 +28,7 @@ void CameraListener::updateCameraStatus(QCamera::Status status) void CameraListener::configureCamera() { m_logger->info("Configuring camera"); - + // Create a QMLCameraManager and configure camera auto qmlCameraManager = drishti::core::make_unique(m_camera, m_logger); auto size = qmlCameraManager->configure(); @@ -38,7 +38,7 @@ void CameraListener::configureCamera() m_logger->error("Unable to configure valid camera resolution"); throw std::runtime_error("Unable to configure valid camera resolution"); } - + m_logger->info("device: {}", qmlCameraManager->getDeviceName()); m_logger->info("description: {}", qmlCameraManager->getDescription()); m_logger->info("resolution: {} {}", qmlCameraManager->getSize().width, qmlCameraManager->getSize().height); @@ -59,5 +59,4 @@ void CameraListener::configureCamera() frameHandler->setSize(qmlCameraManager->getSize()); } } - } diff --git a/src/app/qt/facefilter/CameraListener.h b/src/app/qt/facefilter/CameraListener.h index 2edff797..cac08a8b 100644 --- a/src/app/qt/facefilter/CameraListener.h +++ b/src/app/qt/facefilter/CameraListener.h @@ -14,25 +14,23 @@ class CameraListener : public QObject Q_OBJECT public: - using LoggerPtr = std::shared_ptr; - CameraListener(QCamera *camera, LoggerPtr &logger, const GLVersion &version); - + CameraListener(QCamera* camera, LoggerPtr& logger, const GLVersion& version); + void setUsePBO(bool flag); - + void configureCamera(); protected: - - QCamera *m_camera = nullptr; + QCamera* m_camera = nullptr; std::shared_ptr m_logger; GLVersion m_glVersion; bool m_usePBO = false; public slots: - void updateCameraStatus(QCamera::Status status); + void updateCameraStatus(QCamera::Status status); }; #endif // _facefilter_CameraListener_h_ diff --git a/src/app/qt/facefilter/FPSCalculator.cpp b/src/app/qt/facefilter/FPSCalculator.cpp index f8b4f7e6..26c4a55b 100644 --- a/src/app/qt/facefilter/FPSCalculator.cpp +++ b/src/app/qt/facefilter/FPSCalculator.cpp @@ -36,5 +36,5 @@ float FPSCalculator::fps() fps_ = (count_ > 0) ? ((fps_ * alpha_) + ((1.0 - alpha_) * fps)) : fps; last_ = now; ++count_; - return fps_; + return fps_; } diff --git a/src/app/qt/facefilter/FrameHandler.cpp b/src/app/qt/facefilter/FrameHandler.cpp index 640aecbc..9497cbdd 100644 --- a/src/app/qt/facefilter/FrameHandler.cpp +++ b/src/app/qt/facefilter/FrameHandler.cpp @@ -48,8 +48,8 @@ std::unique_ptr loadJSON(spdlog::logger& logger); FrameHandlerManager* FrameHandlerManager::m_instance = nullptr; -FrameHandlerManager::FrameHandlerManager(const std::string& name, const std::string& description, const GLVersion &glVersion) : - m_glVersion(glVersion) +FrameHandlerManager::FrameHandlerManager(const std::string& name, const std::string& description, const GLVersion& glVersion) + : m_glVersion(glVersion) { m_logger = drishti::core::Logger::create("drishti"); m_logger->info("FrameHandlerManager #################################################################"); @@ -72,7 +72,7 @@ FrameHandlerManager::FrameHandlerManager(const std::string& name, const std::str { std::string host = address["host"]; std::string port = address["port"]; - + float frequency = address["frequency"]; m_imageLogger = std::make_shared(host, port); m_imageLogger->setMaxFramesPerSecond(frequency); // throttle network traffic @@ -97,7 +97,7 @@ FrameHandlerManager::FrameHandlerManager(const std::string& name, const std::str const auto& intrinsic = sensor["intrinsic"]; cv::Size size; - const auto &sizeParams = intrinsic["size"]; + const auto& sizeParams = intrinsic["size"]; if (!sizeParams.empty()) { size.width = sizeParams["width"].get(); @@ -105,7 +105,7 @@ FrameHandlerManager::FrameHandlerManager(const std::string& name, const std::str } cv::Point2f p; - const auto &principalParams = intrinsic["principal"]; + const auto& principalParams = intrinsic["principal"]; if (!principalParams.empty()) { p.x = principalParams["x"].get(); @@ -189,7 +189,7 @@ cv::Size FrameHandlerManager::getSize() const } FrameHandlerManager* -FrameHandlerManager::get(const std::string& name, const std::string& description, const GLVersion &glVersion) +FrameHandlerManager::get(const std::string& name, const std::string& description, const GLVersion& glVersion) { if (!m_instance) { @@ -225,8 +225,6 @@ std::unique_ptr loadJSON(spdlog::logger& logger) { throw std::runtime_error("loadJSON: Can't read file"); } - + return json; } - - diff --git a/src/app/qt/facefilter/FrameHandler.h b/src/app/qt/facefilter/FrameHandler.h index 908344e0..85b4f9f6 100644 --- a/src/app/qt/facefilter/FrameHandler.h +++ b/src/app/qt/facefilter/FrameHandler.h @@ -51,24 +51,24 @@ class FrameHandlerManager using Settings = nlohmann::json; using FrameHandler = std::function; - FrameHandlerManager(const std::string& name, const std::string& description, const GLVersion &glVersion); + FrameHandlerManager(const std::string& name, const std::string& description, const GLVersion& glVersion); ~FrameHandlerManager(); bool good() const; - static FrameHandlerManager* get(const std::string& name = {}, const std::string& description = {}, const GLVersion &glVersion = {}); + static FrameHandlerManager* get(const std::string& name = {}, const std::string& description = {}, const GLVersion& glVersion = {}); void setUsePBO(bool flag) { m_usePBO = flag; } - + bool getUsePBO() { return m_usePBO; } - + int getOrientation() const { return m_orientation; @@ -141,13 +141,12 @@ class FrameHandlerManager const Settings* getSettings() const { return m_settings.get(); } protected: - std::unique_ptr m_settings; - + DetectionParams m_detectionParams; GLVersion m_glVersion; - + bool m_usePBO = false; int m_orientation = 0; std::string m_deviceName; diff --git a/src/app/qt/facefilter/GLVersion.h b/src/app/qt/facefilter/GLVersion.h index 84c31774..b5ecfd0a 100644 --- a/src/app/qt/facefilter/GLVersion.h +++ b/src/app/qt/facefilter/GLVersion.h @@ -2,17 +2,21 @@ #define _facefilter_gl_version_h_ #if defined(major) -# undef major +#undef major #endif #if defined(minor) -# undef minor +#undef minor #endif struct GLVersion { GLVersion() = default; - GLVersion(int major, int minor) : major(major), minor(minor) {} + GLVersion(int major, int minor) + : major(major) + , minor(minor) + { + } int major = 2; int minor = 0; }; diff --git a/src/app/qt/facefilter/ImageLogger.cpp b/src/app/qt/facefilter/ImageLogger.cpp index 42236672..558922b9 100644 --- a/src/app/qt/facefilter/ImageLogger.cpp +++ b/src/app/qt/facefilter/ImageLogger.cpp @@ -31,11 +31,10 @@ DRISHTI_CORE_NAMESPACE_BEGIN struct ImageLogger::Impl { public: - using Clock = std::chrono::high_resolution_clock; using Duration = Clock::duration; using TimePoint = Clock::time_point; - + Impl(const std::string& host, const std::string& port) : host(host) , port(port) @@ -43,16 +42,16 @@ struct ImageLogger::Impl } ~Impl() = default; - - float getElapsed(const TimePoint &now) const + + float getElapsed(const TimePoint& now) const { return std::chrono::duration_cast>(now - timeOfLastImage).count(); } - - bool isTooSoon(const TimePoint &now) + + bool isTooSoon(const TimePoint& now) { - const float fps = 1.f/(getElapsed(Clock::now())+1e-6f); - if(fps < maxFramesPerSecond) + const float fps = 1.f / (getElapsed(Clock::now()) + 1e-6f); + if (fps < maxFramesPerSecond) { timeOfLastImage = now; return false; @@ -62,7 +61,7 @@ struct ImageLogger::Impl return true; } } - + std::string host; std::string port; float maxFramesPerSecond = std::numeric_limits::max(); @@ -95,11 +94,11 @@ static std::vector create(const cv::Mat& image) void ImageLogger::operator()(const cv::Mat& image) { - if(impl->isTooSoon(Impl::Clock::now())) + if (impl->isTooSoon(Impl::Clock::now())) { return; } - + // Normal boost::asio setup boost::asio::io_service ios; boost::asio::ip::tcp::resolver r{ ios }; diff --git a/src/app/qt/facefilter/ImageLogger.h b/src/app/qt/facefilter/ImageLogger.h index 3568b34d..80b63985 100644 --- a/src/app/qt/facefilter/ImageLogger.h +++ b/src/app/qt/facefilter/ImageLogger.h @@ -29,6 +29,7 @@ class ImageLogger void setMaxFramesPerSecond(float value); float getMaxFramesPerSecond() const; void operator()(const cv::Mat& image); + protected: struct Impl; std::unique_ptr impl; diff --git a/src/app/qt/facefilter/QMLCameraManager.cpp b/src/app/qt/facefilter/QMLCameraManager.cpp index 02e7019d..9f938c37 100644 --- a/src/app/qt/facefilter/QMLCameraManager.cpp +++ b/src/app/qt/facefilter/QMLCameraManager.cpp @@ -78,25 +78,24 @@ cv::Size QMLCameraManager::configureCamera() #else desiredFormats = { QVideoFrame::Format_ARGB32 }; #endif - + auto viewfinderSettings = m_camera->supportedViewfinderSettings(); QCameraImageProcessing* processing = m_camera->imageProcessing(); if (processing->isAvailable()) { processing->setWhiteBalanceMode(QCameraImageProcessing::WhiteBalanceAuto); - } + } auto pExposure = m_camera->exposure(); if (pExposure) { - std::vector exposureModes = - { + std::vector exposureModes = { QCameraExposure::ExposureAuto, QCameraExposure::ExposurePortrait, QCameraExposure::ExposureBacklight }; - for (const auto &mode : exposureModes) + for (const auto& mode : exposureModes) { if (pExposure->isExposureModeSupported(mode)) { @@ -104,22 +103,21 @@ cv::Size QMLCameraManager::configureCamera() break; } } - - std::vector meteringModes = - { - QCameraExposure::MeteringSpot, + + std::vector meteringModes = { + QCameraExposure::MeteringSpot, QCameraExposure::MeteringMatrix, QCameraExposure::MeteringAverage }; - for (const auto &mode : meteringModes) + for (const auto& mode : meteringModes) { if (pExposure->isMeteringModeSupported(mode)) { m_logger->info("Exposure metering mode supported: {}", int(mode)); pExposure->setMeteringMode(mode); - if(mode == QCameraExposure::MeteringSpot) + if (mode == QCameraExposure::MeteringSpot) { - pExposure->setSpotMeteringPoint({0.5, 0.5}); + pExposure->setSpotMeteringPoint({ 0.5, 0.5 }); } break; } @@ -132,7 +130,7 @@ cv::Size QMLCameraManager::configureCamera() m_logger->info("Image processing: {}", int(pImageProcessing->isAvailable())); pImageProcessing->setWhiteBalanceMode(QCameraImageProcessing::WhiteBalanceAuto); } - + m_logger->info("# of settings: {}", viewfinderSettings.size()); m_logger->warn("Limiting video resolution to <= 2048"); diff --git a/src/app/qt/facefilter/QMLCameraManager.h b/src/app/qt/facefilter/QMLCameraManager.h index d79f0800..b540d93c 100644 --- a/src/app/qt/facefilter/QMLCameraManager.h +++ b/src/app/qt/facefilter/QMLCameraManager.h @@ -31,8 +31,9 @@ class QMLCameraManager virtual int getOrientation() const; virtual cv::Size getSize() const; virtual cv::Size configure(); + protected: - virtual cv::Size configureCamera(); + virtual cv::Size configureCamera(); cv::Size m_size; QCamera* m_camera = nullptr; diff --git a/src/app/qt/facefilter/QtFaceDetectorFactory.cpp b/src/app/qt/facefilter/QtFaceDetectorFactory.cpp index 6cc57aab..08b786c1 100644 --- a/src/app/qt/facefilter/QtFaceDetectorFactory.cpp +++ b/src/app/qt/facefilter/QtFaceDetectorFactory.cpp @@ -84,7 +84,7 @@ QtFaceDetectorFactory::QtFaceDetectorFactory() return false; }; // clang-format on - + load("drishti_assets.json", loader); } diff --git a/src/app/qt/facefilter/QtFaceDetectorFactoryCereal.cpp b/src/app/qt/facefilter/QtFaceDetectorFactoryCereal.cpp index fa4d1ff7..e0cc0431 100644 --- a/src/app/qt/facefilter/QtFaceDetectorFactoryCereal.cpp +++ b/src/app/qt/facefilter/QtFaceDetectorFactoryCereal.cpp @@ -22,7 +22,7 @@ drishti::face::FaceModel QtFaceDetectorFactory::getMeanFace() { drishti::face::FaceModel face; - + // clang-format off LoaderFunction loader = [&](std::istream& is, const std::string& hint) { diff --git a/src/app/qt/facefilter/VideoFilterRunnable.cpp b/src/app/qt/facefilter/VideoFilterRunnable.cpp index 208715b7..7c78df0b 100644 --- a/src/app/qt/facefilter/VideoFilterRunnable.cpp +++ b/src/app/qt/facefilter/VideoFilterRunnable.cpp @@ -258,7 +258,7 @@ GLuint VideoFilterRunnable::createTextureForFrame(QVideoFrame* input) { m_pImpl->m_logger->info("VideoFilterRunnable::createTestureForFrame()"); m_pImpl->m_logger->info("OpenGL: {}", glGetString(GL_VERSION)); - + GLuint outTexture = detectFaces(input); return outTexture; } diff --git a/src/app/qt/facefilter/main.cpp b/src/app/qt/facefilter/main.cpp index 67c6b7ac..824b2d13 100644 --- a/src/app/qt/facefilter/main.cpp +++ b/src/app/qt/facefilter/main.cpp @@ -51,7 +51,7 @@ #include #if QT_VERSION >= QT_VERSION_CHECK(5, 6, 0) -# include +#include #endif // } @@ -74,13 +74,13 @@ static void printResources(); std::shared_ptr -createCameraListener(QQuickItem* root, std::shared_ptr &logger, const GLVersion &glVersion) +createCameraListener(QQuickItem* root, std::shared_ptr& logger, const GLVersion& glVersion) { // Manage the camera: QObject* qmlCamera = root->findChild("CameraObject"); assert(qmlCamera != nullptr); CV_Assert(qmlCamera != nullptr); - + QCamera* camera = qvariant_cast(qmlCamera->property("mediaObject")); assert(camera != nullptr); CV_Assert(camera != nullptr); @@ -88,7 +88,7 @@ createCameraListener(QQuickItem* root, std::shared_ptr &logger, return std::make_shared(camera, logger, glVersion); } -static QSurfaceFormat createGLESSurfaceFormat(const GLVersion &glVersion) +static QSurfaceFormat createGLESSurfaceFormat(const GLVersion& glVersion) { QSurfaceFormat format; format.setSamples(4); @@ -107,9 +107,9 @@ int facefilter_main(int argc, char** argv, std::shared_ptr& logg #endif // https://stackoverflow.com/questions/40385482/why-cant-i-use-opengl-es-3-0-in-qt - GLVersion glVersion { 2, 0 }; + GLVersion glVersion{ 2, 0 }; bool usePBO = false; - + #if defined(QT_OPENGL_ES_3) && defined(DRISHTI_OPENGL_ES3) glVersion.major = 3; usePBO = true; @@ -121,7 +121,7 @@ int facefilter_main(int argc, char** argv, std::shared_ptr& logg logger->info("Start"); printResources(); - + QGuiApplication app(argc, argv); qmlRegisterType("facefilter.test", 1, 0, "VideoFilter"); @@ -153,19 +153,19 @@ int facefilter_main(int argc, char** argv, std::shared_ptr& logg // Create a CameraListener in order to trigger instantiation of classes // when the camera is in state QCamera::LoadedStatus. The QML Android // camera does not response to manual camera->load() calls. - // * QMLCameraManager : for camera customizations + // * QMLCameraManager : for camera customizations // * FrameHandlerManager : for application specific settings (depends on camera "name") auto listener = createCameraListener(root, logger, glVersion); listener->setUsePBO(usePBO); - + #if defined(Q_OS_IOS) // On iOS we do not receive the // * iOS does not receive QCamera::LoadedStatus, but we can configure the camera manually // * Android does receive QCamera::LoadedStatus,so we wait fot the signal - listener->configureCamera(); + listener->configureCamera(); #endif - - view.showFullScreen(); + + view.showFullScreen(); return app.exec(); } @@ -210,4 +210,3 @@ static void printResources() qDebug() << it.next(); } } - diff --git a/src/lib/drishti/acf/ACFField.h b/src/lib/drishti/acf/ACFField.h index 2718117d..523a64c9 100644 --- a/src/lib/drishti/acf/ACFField.h +++ b/src/lib/drishti/acf/ACFField.h @@ -58,7 +58,7 @@ struct Field { set(df.name, df.has, df.isLeaf, df.value); } - } + } void set(const std::string& name_, bool has_, bool isLeaf_, const T& value_) { diff --git a/src/lib/drishti/acf/GPUACF.cpp b/src/lib/drishti/acf/GPUACF.cpp index 8623d9d0..fd2d6979 100644 --- a/src/lib/drishti/acf/GPUACF.cpp +++ b/src/lib/drishti/acf/GPUACF.cpp @@ -329,7 +329,7 @@ struct ACF::Impl status |= m_doFlow && ~m_hasFlowOutput; return status; } - + std::shared_ptr m_logger; }; @@ -519,7 +519,7 @@ void ACF::operator()(const FrameInput& frame) VideoSource::operator()(frame); // call main method // Start the transfer asynchronously here: - if(impl->m_usePBO) + if (impl->m_usePBO) { beginTransfer(); } @@ -538,7 +538,7 @@ void ACF::beginTransfer() impl->reduceGradHistProcBSmooth->getResultData(nullptr); } break; - + case kM012345: { impl->mergeProcLG56->getResultData(nullptr); @@ -557,7 +557,7 @@ void ACF::beginTransfer() { impl->luvTransposeOut->getResultData(nullptr); } - + if (impl->m_doFlow) { impl->flowBgraInterface->getResultData(nullptr); @@ -850,7 +850,7 @@ cv::Mat ACF::getChannelsImpl() impl->m_logger->info("TIMING:{}:{};total={}", tag, ss.str(), elapsed); } }; // clang-format on - + if (impl->needsTextures()) { { // Create a scope for glFlush() timing @@ -874,7 +874,7 @@ cv::Mat ACF::getChannelsImpl() ACF::ChannelSpecification planeIndex; const auto& rgba = impl->m_rgba; // alias - + cv::Mat flow; MatP acf, gray, luv; @@ -902,7 +902,7 @@ cv::Mat ACF::getChannelsImpl() PlaneInfoVec luvInfo{ { luv[0], rgba[0], alpha }, { luv[1], rgba[1], alpha }, { luv[2], rgba[2], alpha } }; planeIndex.emplace_back(luvInfo, impl->luvTransposeOut.get()); } - + if (impl->m_doFlow) { const auto flowSize = impl->flowBgraInterface->getOutFrameSize(); @@ -961,7 +961,7 @@ cv::Mat ACF::getChannelsImpl() impl->m_luvPlanar = luv; impl->m_hasLuvOutput = true; } - + if (impl->m_doFlow) { impl->m_flow = flow; diff --git a/src/lib/drishti/acf/GPUACF.h b/src/lib/drishti/acf/GPUACF.h index 9cfb0013..9e760bfd 100644 --- a/src/lib/drishti/acf/GPUACF.h +++ b/src/lib/drishti/acf/GPUACF.h @@ -54,7 +54,7 @@ struct ACF : public ogles_gpgpu::VideoSource void setUsePBO(bool flag); bool getUsePBO() const; - + void setLogger(std::shared_ptr& logger); bool getChannelStatus(); bool getFlowStatus(); diff --git a/src/lib/drishti/acf/ut/test-drishti-acf.cpp b/src/lib/drishti/acf/ut/test-drishti-acf.cpp index 39c81839..0eb2b7b8 100644 --- a/src/lib/drishti/acf/ut/test-drishti-acf.cpp +++ b/src/lib/drishti/acf/ut/test-drishti-acf.cpp @@ -33,12 +33,6 @@ #include #include -// #!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#! -// #!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#! -// #!#!#!#!#!#!#!#!#!#!#!#!#!#!# Work in progress !#!#!#!#!#!#!#!#!#!#!#!#! -// #!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#! -// #!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#! - #include #include "drishti/core/drawing.h" @@ -123,8 +117,8 @@ class ACFTest : public ::testing::Test m_context = aglet::GLContext::create(aglet::GLContext::kAuto); CV_Assert(m_context && (*m_context)); #if defined(_WIN32) || defined(_WIN64) - CV_Assert(!glewInit()); -#endif + CV_Assert(!glewInit()); +#endif #endif } @@ -215,12 +209,12 @@ class ACFTest : public ::testing::Test static const bool doCorners = false; ogles_gpgpu::Size2d inputSize(image.cols, image.rows); - m_acf = std::make_shared(nullptr, inputSize, sizes, ogles_gpgpu::ACF::kLUVM012345, doGrayscale, doCorners, false); + m_acf = std::make_shared(nullptr, inputSize, sizes, ogles_gpgpu::ACF::kM012345, doGrayscale, doCorners, false); m_acf->setRotation(0); cv::Mat input = image; #if DRISHTI_ACF_TEST_WARM_UP_GPU - for (int i = 0; i < 10; i++) + for (int i = 0; i < 3; i++) { (*m_acf)({ input.cols, input.rows }, input.ptr(), true, 0, DFLT_TEXTURE_FORMAT); } @@ -228,23 +222,6 @@ class ACFTest : public ::testing::Test (*m_acf)({ input.cols, input.rows }, input.ptr(), true, 0, DFLT_TEXTURE_FORMAT); m_acf->fill(Pgpu, Pcpu); - - { - // This code block is a place holder for 7 channel ACF output, which conveniently fits in 2 textures - // for faster transfers on low performing Android devices. Currently there is no 7 channel ACF - // classifier/detector, so this is used as a place holder to illustrate the raw channel extraction - // and pyramid formatting until equivalent CPU formatting is in place. - auto acf7 = std::make_shared(nullptr, inputSize, sizes, ogles_gpgpu::ACF::kM012345, doGrayscale, doCorners, false); - (*acf7)({ input.cols, input.rows }, input.ptr(), true, 0, DFLT_TEXTURE_FORMAT); - - drishti::acf::Detector::Pyramid Pgpu7; - acf7->fill(Pgpu7, Pcpu); - - //cv::imshow("Pgpu7", draw(Pgpu7); - //cv::imshow("LM012345", acf7->getChannels()); - //cv::imshow("LUVM012345", m_acf->getChannels()); - //cv::waitKey(0); - } } std::shared_ptr m_context; diff --git a/src/lib/drishti/core/Field.h b/src/lib/drishti/core/Field.h index defb7ff0..1999c2da 100644 --- a/src/lib/drishti/core/Field.h +++ b/src/lib/drishti/core/Field.h @@ -24,8 +24,15 @@ DRISHTI_CORE_NAMESPACE_BEGIN template struct Field { - Field() : has(false) {} - Field(const T& t) : has(true), value(t) {} + Field() + : has(false) + { + } + Field(const T& t) + : has(true) + , value(t) + { + } ~Field() = default; Field& operator=(const T& src) @@ -48,7 +55,7 @@ struct Field has = has_; value = value_; } - + void clear() { has = false; @@ -93,7 +100,7 @@ struct Field ar& value; } - bool has = false; + bool has = false; T value; }; diff --git a/src/lib/drishti/core/ImageView.h b/src/lib/drishti/core/ImageView.h index 2bc19b8d..6e339202 100644 --- a/src/lib/drishti/core/ImageView.h +++ b/src/lib/drishti/core/ImageView.h @@ -18,24 +18,22 @@ DRISHTI_CORE_NAMESPACE_BEGIN struct Texture { Texture() = default; - Texture(const cv::Size &size, std::uint32_t texId) + Texture(const cv::Size& size, std::uint32_t texId) : size(size) , texId(texId) { - } - cv::Size size; //! Teture dimensions in pixels - std::uint32_t texId = 0; //! Identifier for the texture + cv::Size size; //! Teture dimensions in pixels + std::uint32_t texId = 0; //! Identifier for the texture }; - + struct ImageView { ImageView() = default; - ImageView(const Texture &texture, const cv::Mat4b &image) + ImageView(const Texture& texture, const cv::Mat4b& image) : texture(texture) , image(image) { - } Texture texture; //! Texture descriptor cv::Mat4b image; //! Image descriptor diff --git a/src/lib/drishti/core/Shape.cpp b/src/lib/drishti/core/Shape.cpp index 6311e286..37e27ea9 100644 --- a/src/lib/drishti/core/Shape.cpp +++ b/src/lib/drishti/core/Shape.cpp @@ -111,7 +111,7 @@ void fitSpline(const PointVec& controlPoints, PointVec& interpolatedPoints, int { float u = float(i) / count; PointType p = spline(u); - interpolatedPoints[i] = cv::Point2f(p(0,0), p(1,0)); + interpolatedPoints[i] = cv::Point2f(p(0, 0), p(1, 0)); } } } diff --git a/src/lib/drishti/core/drishti_stdlib_string.h b/src/lib/drishti/core/drishti_stdlib_string.h index 4b95a8c4..a5ea9e8e 100644 --- a/src/lib/drishti/core/drishti_stdlib_string.h +++ b/src/lib/drishti/core/drishti_stdlib_string.h @@ -81,15 +81,15 @@ inline long double stold(const std::string& s) } // strto* -inline unsigned long strtoll( const char *str, char **str_end, int base) +inline unsigned long strtoll(const char* str, char** str_end, int base) { return ::strtoll(str, str_end, base); } -inline unsigned long long strtoull( const char *str, char **str_end, int base) +inline unsigned long long strtoull(const char* str, char** str_end, int base) { return ::strtoull(str, str_end, base); } -inline float strtof(const char *str, char **str_end) +inline float strtof(const char* str, char** str_end) { return ::strtof(str, str_end); } diff --git a/src/lib/drishti/core/hungarian.h b/src/lib/drishti/core/hungarian.h index 091242e6..0c136010 100644 --- a/src/lib/drishti/core/hungarian.h +++ b/src/lib/drishti/core/hungarian.h @@ -51,7 +51,6 @@ #ifndef __drishti_core_hungarian_h__ #define __drishti_core_hungarian_h__ - #include #include diff --git a/src/lib/drishti/core/ut/test-drishti-core.cpp b/src/lib/drishti/core/ut/test-drishti-core.cpp index 44867e66..da215312 100644 --- a/src/lib/drishti/core/ut/test-drishti-core.cpp +++ b/src/lib/drishti/core/ut/test-drishti-core.cpp @@ -23,20 +23,18 @@ BEGIN_EMPTY_NAMESPACE TEST(HungarianAssignment, hungarian) { - std::vector points1 - { + std::vector points1{ { 1.f, 1.f }, { 2.f, 2.f }, { 3.f, 3.f } }; - std::vector points2 - { + std::vector points2{ { 0.0f, 1.0f }, { 2.5f, 1.5f }, { 3.0f, 2.5f } }; - + std::unordered_map direct_assignment; std::unordered_map reverse_assignment; std::vector> C(points1.size(), std::vector(points2.size(), std::numeric_limits::max())); @@ -48,11 +46,11 @@ TEST(HungarianAssignment, hungarian) C[i][j] = cv::norm(points1[i] - points2[j]); } } - + drishti::core::MinimizeLinearAssignment(C, direct_assignment, reverse_assignment); - + ASSERT_EQ(direct_assignment.size(), 3); - for(const auto &m : direct_assignment) + for (const auto& m : direct_assignment) { ASSERT_EQ(m.first, m.second); } @@ -139,4 +137,3 @@ TEST(ChannelConversion, convert_rem_16) } END_EMPTY_NAMESPACE - diff --git a/src/lib/drishti/drishti/Array.hpp b/src/lib/drishti/drishti/Array.hpp index 9acb11ad..2a44fee1 100644 --- a/src/lib/drishti/drishti/Array.hpp +++ b/src/lib/drishti/drishti/Array.hpp @@ -178,8 +178,8 @@ class Array void resize(std::size_t size) { size_ = std::min(N, size); } std::size_t size() const { return size_; } - - constexpr std::size_t limit() const {return N; }; + + constexpr std::size_t limit() const { return N; }; T& operator[](std::size_t index) { return data_[index]; } const T& operator[](std::size_t index) const { return data_[index]; } diff --git a/src/lib/drishti/drishti/Context.cpp b/src/lib/drishti/drishti/Context.cpp index 19926826..2c940405 100644 --- a/src/lib/drishti/drishti/Context.cpp +++ b/src/lib/drishti/drishti/Context.cpp @@ -30,7 +30,6 @@ Context::Context(drishti::sdk::SensorModel& sensor) Context::~Context() = default; - void Context::setDoSingleFace(bool flag) { impl->doSingleFace = flag; @@ -40,7 +39,7 @@ bool Context::getDoSingleFace() const { return impl->doSingleFace; } - + void Context::setMinDetectionDistance(float value) { impl->minDetectionDistance = value; diff --git a/src/lib/drishti/drishti/Context.hpp b/src/lib/drishti/drishti/Context.hpp index 3f81cf72..04886bbc 100644 --- a/src/lib/drishti/drishti/Context.hpp +++ b/src/lib/drishti/drishti/Context.hpp @@ -27,9 +27,8 @@ _DRISHTI_SDK_BEGIN class DRISHTI_EXPORT Context { public: - struct Impl; - + Context(drishti::sdk::SensorModel& sensor); ~Context(); @@ -37,7 +36,7 @@ class DRISHTI_EXPORT Context void setDoSingleFace(bool flag); bool getDoSingleFace() const; - + void setMinDetectionDistance(float value); float getMinDetectionDistance() const; @@ -52,12 +51,12 @@ class DRISHTI_EXPORT Context void setRegressorCropScale(float value); float getRegressorCropScale() const; - + void setMinTrackHits(int hits); int getMinTrackHits() const; void setMaxTrackMisses(int hits); - int getMaxTrackMisses() const; + int getMaxTrackMisses() const; void setMinFaceSeparation(float value); float getMinFaceSeparation() const; @@ -66,7 +65,6 @@ class DRISHTI_EXPORT Context bool getDoOptimizedPipeline() const; protected: - std::unique_ptr impl; }; diff --git a/src/lib/drishti/drishti/ContextImpl.h b/src/lib/drishti/drishti/ContextImpl.h index 6bc547b1..c8593365 100644 --- a/src/lib/drishti/drishti/ContextImpl.h +++ b/src/lib/drishti/drishti/ContextImpl.h @@ -42,7 +42,7 @@ struct Context::Impl int maxTrackMisses = 1; float minFaceSeparation = 0.125f; bool doOptimizedPipeline = false; - + std::shared_ptr sensor; std::shared_ptr logger; std::shared_ptr> threads; diff --git a/src/lib/drishti/drishti/Face.hpp b/src/lib/drishti/drishti/Face.hpp index ca4b6e24..ff7d2e66 100644 --- a/src/lib/drishti/drishti/Face.hpp +++ b/src/lib/drishti/drishti/Face.hpp @@ -33,17 +33,17 @@ struct DRISHTI_EXPORT Face * A region of interest, typically provided by an object detector. */ drishti::sdk::Recti roi; - + /** * Detailed eye models for the subject's right and left eyes. */ drishti::sdk::Array eyes; - + /** * Face landmarks provided for the operative annotation style. */ drishti::sdk::Array landmarks; - + /** * Estimated 3D position for point between the eyes. */ diff --git a/src/lib/drishti/drishti/FaceMonitorAdapter.h b/src/lib/drishti/drishti/FaceMonitorAdapter.h index eca42ac7..f10277ba 100644 --- a/src/lib/drishti/drishti/FaceMonitorAdapter.h +++ b/src/lib/drishti/drishti/FaceMonitorAdapter.h @@ -20,20 +20,19 @@ _DRISHTI_SDK_BEGIN - // Maintain lightweight inline conversions in private header for internal use inline drishti::sdk::Face convert(const drishti::face::FaceModel& model) { drishti::sdk::Face f; - if(model.eyeFullR.has && model.eyeFullL.has) + if (model.eyeFullR.has && model.eyeFullL.has) { f.eyes.resize(2); f.eyes[0] = drishti::sdk::convert(*model.eyeFullR); f.eyes[1] = drishti::sdk::convert(*model.eyeFullL); } - if(model.points.has) + if (model.points.has) { f.landmarks = drishti::sdk::cvToDrishti(*model.points); } @@ -63,9 +62,9 @@ struct FaceMonitorAdapter : public drishti::hci::FaceMonitor public: using HighResolutionClock = std::chrono::high_resolution_clock; using TimePoint = HighResolutionClock::time_point; // ; - using Faces = std::vector; - - FaceMonitorAdapter(drishti_face_tracker_t& table, int n=std::numeric_limits::max()) + using Faces = std::vector; + + FaceMonitorAdapter(drishti_face_tracker_t& table, int n = std::numeric_limits::max()) : m_start(HighResolutionClock::now()) , m_table(table) , m_n(n) @@ -73,19 +72,19 @@ struct FaceMonitorAdapter : public drishti::hci::FaceMonitor } ~FaceMonitorAdapter() = default; - + virtual Request request(const Faces& faces, const TimePoint& timeStamp) { double elapsed = std::chrono::duration_cast>(timeStamp - m_start).count(); - + drishti_face_tracker_result_t result; convert(faces, result.faceModels); - + auto request = m_table.update(m_table.context, result, elapsed); - - return Request{request.n, request.getImage, request.getTexture}; + + return Request{ request.n, request.getImage, request.getTexture }; } - + virtual void grab(const std::vector& frames, bool isInitialized) { // Populate public API buffer using public SDK wrapper types w/ shallow copy: @@ -96,7 +95,7 @@ struct FaceMonitorAdapter : public drishti::hci::FaceMonitor // Copy the full frame "face" image and metadata: convert(frames[i].image, results[i].image); convert(frames[i].faceModels, results[i].faceModels); - + // Copy the eye images and metadata: convert(frames[i].eyes, results[i].eyes); results[i].eyeModels.resize(2); @@ -113,8 +112,7 @@ struct FaceMonitorAdapter : public drishti::hci::FaceMonitor } protected: - - static void convert(const Faces& facesIn, drishti::sdk::Array &facesOut) + static void convert(const Faces& facesIn, drishti::sdk::Array& facesOut) { facesOut.resize(std::min(facesOut.limit(), facesIn.size())); for (std::size_t i = 0; i < facesOut.size(); i++) @@ -122,13 +120,13 @@ struct FaceMonitorAdapter : public drishti::hci::FaceMonitor facesOut[i] = drishti::sdk::convert(facesIn[i]); } } - - static drishti::sdk::Texture convert(const core::Texture &texture) + + static drishti::sdk::Texture convert(const core::Texture& texture) { - return {{texture.size.width, texture.size.height}, texture.texId }; + return { { texture.size.width, texture.size.height }, texture.texId }; } - - static void convert(const core::ImageView &src, drishti_image_tex_t &dst) + + static void convert(const core::ImageView& src, drishti_image_tex_t& dst) { dst.texture = convert(src.texture); if (!src.image.empty()) @@ -136,10 +134,10 @@ struct FaceMonitorAdapter : public drishti::hci::FaceMonitor dst.image = cvToDrishti(src.image); } } - - TimePoint m_start; //! Timestmap for the start of tracking + + TimePoint m_start; //! Timestmap for the start of tracking drishti_face_tracker_t m_table; //! Table of callbacks for face tracker output - int m_n = 1; //! Number of frames to request for each callback + int m_n = 1; //! Number of frames to request for each callback }; _DRISHTI_SDK_END diff --git a/src/lib/drishti/drishti/FaceTracker.hpp b/src/lib/drishti/drishti/FaceTracker.hpp index 1335dc08..f0b93fc7 100644 --- a/src/lib/drishti/drishti/FaceTracker.hpp +++ b/src/lib/drishti/drishti/FaceTracker.hpp @@ -51,7 +51,7 @@ struct drishti_face_tracker_result_t * The image description (memory and/or OpenGL texture) */ drishti_image_tex_t image; - + /** * Face models for the current frame. */ @@ -61,7 +61,7 @@ struct drishti_face_tracker_result_t * A filtered eye image pair (subject's rigth and left eyes) for the nearest face in the scene. */ drishti_image_tex_t eyes; // eye pair image [ left | right ] - + /** * Detailed eye models corresponding to the filtered eye pair image. */ @@ -87,22 +87,21 @@ typedef struct drishti_image * Image width in pixels */ std::size_t width; - + /** * Image height in pixels */ std::size_t height; - + /** * Number of channels */ std::size_t channels; - + /** * Row stride in bytes */ std::size_t stride; - } drishti_image_t; @@ -117,17 +116,17 @@ typedef struct drishti_request * Retrieve the last N frames. */ int n; - + /** * Get frames in user memory. */ bool getImage; - + /** * Get OpenGL textures. */ bool getTexture; - + } drishti_request_t; /** @@ -193,19 +192,19 @@ typedef struct drishti_face_tracker * A pointer to allocated FaceTracker state. */ void* context; - + /** * A callback containins a list of all detected faces (if any) for each frame. * Note: This will be called for every frame (after an initializationp period) * even if no faces are deteted in the current frame. */ drishti_face_tracker_update_t update; - + /** * A callback returning frames requested by the functino. */ drishti_face_tracker_callback_t callback; - + /** * A callback for memory allocation. */ @@ -248,7 +247,7 @@ class DRISHTI_EXPORT FaceTracker FaceTracker(const FaceTracker&) = delete; FaceTracker& operator=(const FaceTracker&) = delete; FaceTracker(FaceTracker&&) = delete; - + /** * Destructor */ @@ -270,7 +269,7 @@ class DRISHTI_EXPORT FaceTracker * Return true if object was successfully allocated. */ bool good() const; - + /** * Returns true if object was successfully allcoated. */ @@ -284,7 +283,6 @@ class DRISHTI_EXPORT FaceTracker void add(drishti_face_tracker_t& table); protected: - struct Impl; std::unique_ptr m_impl; }; diff --git a/src/lib/drishti/drishti/Image.hpp b/src/lib/drishti/drishti/Image.hpp index f20756ed..82f91840 100644 --- a/src/lib/drishti/drishti/Image.hpp +++ b/src/lib/drishti/drishti/Image.hpp @@ -87,20 +87,20 @@ template struct Matrix { Matrix() {} - Matrix(const Matrix &src) + Matrix(const Matrix& src) { for (int y = 0; y < rowDim; y++) { - for (int x = 0; x< colDim; x++) + for (int x = 0; x < colDim; x++) { - data[y][x] = src(y,x); + data[y][x] = src(y, x); } } } int rows() const { return rowDim; } int cols() const { return colDim; } - + T& operator()(int y, int x) { return data[y][x]; } const T& operator()(int y, int x) const { return data[y][x]; } @@ -111,13 +111,13 @@ struct Matrix { for (int x = 0; x < 3; x++) { - I(y,x) = 0.f; + I(y, x) = 0.f; } - I(y,y) = 1.f; + I(y, y) = 1.f; } return I; } - + T data[rowDim][colDim]; }; @@ -163,13 +163,13 @@ typedef Rect Rectf; struct DRISHTI_EXPORT Texture { Texture() = default; - Texture(const Vec2i &size, std::uint32_t texId) + Texture(const Vec2i& size, std::uint32_t texId) : size(size) , texId(texId) { // copy } - + Vec2i size; std::uint32_t texId; }; diff --git a/src/lib/drishti/drishti/Sensor.cpp b/src/lib/drishti/drishti/Sensor.cpp index 13e1cdb6..f15818d9 100644 --- a/src/lib/drishti/drishti/Sensor.cpp +++ b/src/lib/drishti/drishti/Sensor.cpp @@ -35,7 +35,7 @@ SensorModel::Intrinsic::Intrinsic(const Vec2f& c, float fx, const Vec2i& size) { } -SensorModel::Extrinsic::Extrinsic(const Matrix33f &R) +SensorModel::Extrinsic::Extrinsic(const Matrix33f& R) : m_R(R) { } @@ -53,11 +53,11 @@ SensorModel::SensorModel(const Intrinsic& intrinsic, const Extrinsic& extrinsic) { for (int x = 0; x < 3; x++) { - R(y,x) = extrinsic.m_R(y,x); + R(y, x) = extrinsic.m_R(y, x); } } drishti::sensor::SensorModel::Extrinsic extrinsic_(R); - + impl = make_unique(intrinsic_, extrinsic_); } diff --git a/src/lib/drishti/drishti/Sensor.hpp b/src/lib/drishti/drishti/Sensor.hpp index 5543c491..1d7d89d9 100644 --- a/src/lib/drishti/drishti/Sensor.hpp +++ b/src/lib/drishti/drishti/Sensor.hpp @@ -34,7 +34,7 @@ class DRISHTI_EXPORT SensorModel // ### Extrinsic camera parameters: struct Extrinsic { - Extrinsic(const Matrix33f &R); + Extrinsic(const Matrix33f& R); Matrix33f m_R; }; @@ -42,19 +42,18 @@ class DRISHTI_EXPORT SensorModel ~SensorModel(); struct Impl; - - std::unique_ptr &getImpl() + + std::unique_ptr& getImpl() { return impl; } - const std::unique_ptr &getImpl() const + const std::unique_ptr& getImpl() const { return impl; - } + } protected: - std::unique_ptr impl; }; diff --git a/src/lib/drishti/drishti/SensorImpl.h b/src/lib/drishti/drishti/SensorImpl.h index bbf41929..8a2886f2 100644 --- a/src/lib/drishti/drishti/SensorImpl.h +++ b/src/lib/drishti/drishti/SensorImpl.h @@ -18,7 +18,7 @@ _DRISHTI_SDK_BEGIN struct SensorModel::Impl { - Impl(const sensor::SensorModel::Intrinsic &intrinsic, const sensor::SensorModel::Extrinsic &extrinsic) + Impl(const sensor::SensorModel::Intrinsic& intrinsic, const sensor::SensorModel::Extrinsic& extrinsic) { sensor = std::make_shared(intrinsic, extrinsic); } diff --git a/src/lib/drishti/drishti/ut/test-EyeSegmenter.cpp b/src/lib/drishti/drishti/ut/test-EyeSegmenter.cpp index 7a876059..28b2a93b 100644 --- a/src/lib/drishti/drishti/ut/test-EyeSegmenter.cpp +++ b/src/lib/drishti/drishti/ut/test-EyeSegmenter.cpp @@ -109,6 +109,8 @@ class EyeSegmenterTest : public ::testing::Test bool isRight; }; + using EntryPair = std::pair; + // Setup EyeSegmenterTest() { @@ -144,24 +146,33 @@ class EyeSegmenterTest : public ::testing::Test cv::Mat padded; cv::Rect roi({ 0, 0 }, image.size()); - padToAspectRatio(image, padded, 4.0 / 3.0); + padToAspectRatio(image, padded, m_eyeSegmenter->getRequiredAspectRatio()); assert(!padded.empty()); - // Next create each possible size from 0 to 1000: - for (int width = 0; width < 512; width++) + std::vector widths = { 0 }; +#if defined(GAUZE_ANDROID_USE_EMULATOR) + for (int width = 16; width <= 256; width *= 2) + { + widths.push_back(width); + } +#else + for (int width = 16; width <= 256; width++) + { + widths.push_back(width); + } +#endif + for (const auto& width : widths) { cv::Mat resized; if (width > 0) { - int height(float(width) / m_eyeSegmenter->getRequiredAspectRatio() + 0.5f); + int height = static_cast(static_cast(width) / m_eyeSegmenter->getRequiredAspectRatio() + 0.5f); cv::resize(padded, resized, { width, height }, width); } - { - // Create right image - Entry entry{ drishti::sdk::cvToDrishti(resized), resized, true }; - m_images.emplace_back(entry); - } + // Create right image + Entry entry{ drishti::sdk::cvToDrishti(resized), resized, true }; + m_images[width] = entry; } } @@ -182,15 +193,20 @@ class EyeSegmenterTest : public ::testing::Test entry.storage.create(rows, cols, CV_8UC3); entry.image = drishti::sdk::cvToDrishti(entry.storage); entry.isRight = true; - entry.storage = cv::Scalar(color[0], color[1], color[2]); } + std::map::iterator getFirstValid() + { + const auto isok = [&](const EntryPair& e) { return e.first >= m_eyeSegmenter->getMinWidth(); }; + return std::find_if(m_images.begin(), m_images.end(), isok); + } + // Objects declared here can be used by all tests in the test case for EyeSegmenter. std::shared_ptr m_eyeSegmenter; // Test images: - std::vector m_images; + std::map m_images; // Ground truth: std::shared_ptr m_eye; @@ -248,74 +264,66 @@ static void checkInvalid(const drishti::sdk::Eye& eye) TEST_F(EyeSegmenterTest, EyeSerialization) { - if (m_eyeSegmenter && sOutputDirectory) - { - int targetWidth = 127; - drishti::sdk::Eye eye; - /* int code = */ (*m_eyeSegmenter)(m_images[targetWidth].image, eye, m_images[targetWidth].isRight); + int targetWidth = 128; + drishti::sdk::Eye eye; + /* int code = */ (*m_eyeSegmenter)(m_images[targetWidth].image, eye, m_images[targetWidth].isRight); + { + drishti::sdk::EyeOStream adapter(eye, drishti::sdk::EyeStream::JSON); + std::string filename = sOutputDirectory; + filename += "/right_eye.json"; + std::ofstream os(filename); + if (os) { - drishti::sdk::EyeOStream adapter(eye, drishti::sdk::EyeStream::JSON); - std::string filename = sOutputDirectory; - filename += "/right_eye.json"; - std::ofstream os(filename); - if (os) - { - os << adapter; - } + os << adapter; } + } - { // Convert this for use by private API - auto privateEye = drishti::sdk::convert(eye); - privateEye.eyelids = privateEye.eyelidsSpline; + { // Convert this for use by private API + auto privateEye = drishti::sdk::convert(eye); + privateEye.eyelids = privateEye.eyelidsSpline; - std::string filename = sOutputDirectory; - filename += "/right_eye_private.json"; + std::string filename = sOutputDirectory; + filename += "/right_eye_private.json"; - std::ofstream os(filename); - if (os) - { - cereal::JSONOutputArchive oa(os); - typedef decltype(oa) Archive; - oa << GENERIC_NVP("eye", privateEye); - } + std::ofstream os(filename); + if (os) + { + cereal::JSONOutputArchive oa(os); + typedef decltype(oa) Archive; + oa << GENERIC_NVP("eye", privateEye); } } } TEST_F(EyeSegmenterTest, ImageEmpty) { - if (m_eyeSegmenter) + for (const auto& entry : m_images) { - // Requires at least 1 image: - ASSERT_GE(m_images.size(), 1); - - // Make sure image is empty: - ASSERT_EQ(m_images[0].image.getCols(), 0); + if (entry.first == 0) + { + drishti::sdk::Eye eye; + int code = (*m_eyeSegmenter)(m_images[0].image, eye, m_images[0].isRight); + EXPECT_EQ(code, 1); + checkInvalid(eye); - drishti::sdk::Eye eye; - int code = (*m_eyeSegmenter)(m_images[0].image, eye, m_images[0].isRight); - EXPECT_EQ(code, 1); - checkInvalid(eye); + break; + } } } TEST_F(EyeSegmenterTest, ImageTooSmall) { - if (m_eyeSegmenter) + for (const auto& entry : m_images) { - // Requires at least 1 image: - EXPECT_GE(m_images.size(), m_eyeSegmenter->getMinWidth()); - - for (int i = 1; i < m_eyeSegmenter->getMinWidth(); i++) + if ((entry.first > 0) && (entry.first < m_eyeSegmenter->getMinWidth())) { - // Make sure image has the expected size: - ASSERT_EQ(m_images[i].image.getCols(), i); - drishti::sdk::Eye eye; - int code = (*m_eyeSegmenter)(m_images[i].image, eye, m_images[i].isRight); + int code = (*m_eyeSegmenter)(entry.second.image, eye, entry.second.isRight); EXPECT_EQ(code, 1); checkInvalid(eye); + + break; } } } @@ -324,30 +332,24 @@ TEST_F(EyeSegmenterTest, ImageTooSmall) // * hamming distance for sclera and iris masks components TEST_F(EyeSegmenterTest, ImageValid) { - if (m_eyeSegmenter) + for (auto iter = getFirstValid(); iter != m_images.end(); iter++) { - // Requires at least m_eyeSegmenter->getMinWidth() + 1 - ASSERT_GT(m_images.size(), m_eyeSegmenter->getMinWidth()); - - for (int i = m_eyeSegmenter->getMinWidth(); i < m_images.size(); i++) - { - // Make sure image has the expected size: - EXPECT_EQ(m_images[i].image.getCols(), i); + // Make sure image has the expected size: + EXPECT_EQ(iter->second.image.getCols(), iter->first); - drishti::sdk::Eye eye; - int code = (*m_eyeSegmenter)(m_images[i].image, eye, m_images[i].isRight); + drishti::sdk::Eye eye; + int code = (*m_eyeSegmenter)(iter->second.image, eye, iter->second.isRight); - // Sanity check on each model: + // Sanity check on each model: - EXPECT_EQ(code, 0); - checkValid(eye, m_images[i].storage.size()); + EXPECT_EQ(code, 0); + checkValid(eye, iter->second.storage.size()); - // Ground truth comparison for reasonable resolutions - if (i > 128 && m_eye) - { - const float threshold = (i == m_eye->getRoi().width) ? m_scoreThreshold : 0.5; - ASSERT_GT(detectionScore(eye, *m_eye), threshold); - } + // Ground truth comparison for reasonable resolutions + if ((iter->first > 128) && m_eye) + { + const float threshold = (iter->first == m_eye->getRoi().width) ? m_scoreThreshold : 0.5; + ASSERT_GT(detectionScore(eye, *m_eye), threshold); } } } @@ -355,34 +357,29 @@ TEST_F(EyeSegmenterTest, ImageValid) // Currently there is no internal quality check, but this is included for regression: TEST_F(EyeSegmenterTest, ImageIsBlack) { - if (m_eyeSegmenter) - { - Entry entry; - int width = 256; - int height = int(float(width) / m_eyeSegmenter->getRequiredAspectRatio() + 0.5f); - createImage(entry, height, width, { 0, 0, 0 }); - - drishti::sdk::Eye eye; - int code = (*m_eyeSegmenter)(entry.image, eye, entry.isRight); - EXPECT_EQ(code, 0); - checkValid(eye, entry.storage.size()); - } + Entry entry; + const int width = 256; + const int height = static_cast(static_cast(width) / m_eyeSegmenter->getRequiredAspectRatio() + 0.5f); + createImage(entry, height, width, { 0, 0, 0 }); + + drishti::sdk::Eye eye; + int code = (*m_eyeSegmenter)(entry.image, eye, entry.isRight); + EXPECT_EQ(code, 0); + checkValid(eye, entry.storage.size()); } TEST_F(EyeSegmenterTest, ImageIsWhite) { - if (m_eyeSegmenter) - { - Entry entry; - int width = 256; - int height = int(float(width) / m_eyeSegmenter->getRequiredAspectRatio() + 0.5f); - createImage(entry, height, width, { 0, 0, 0 }); + Entry entry; - drishti::sdk::Eye eye; - int code = (*m_eyeSegmenter)(entry.image, eye, entry.isRight); - EXPECT_EQ(code, 0); - checkValid(eye, entry.storage.size()); - } + const int width = 256; + const int height = static_cast(static_cast(width) / m_eyeSegmenter->getRequiredAspectRatio() + 0.5f); + createImage(entry, height, width, { 0, 0, 0 }); + + drishti::sdk::Eye eye; + int code = (*m_eyeSegmenter)(entry.image, eye, entry.isRight); + EXPECT_EQ(code, 0); + checkValid(eye, entry.storage.size()); } #if defined(DRISHTI_BUILD_C_INTERFACE) @@ -391,27 +388,24 @@ TEST_F(EyeSegmenterTest, ExternCInterface) auto segmenter = createC(sEyeRegressor); ASSERT_NE(segmenter, nullptr); - ASSERT_NE(m_eye, nullptr); - - // Requires at least m_eyeSegmenter->getMinWidth() + 1 - ASSERT_GT(m_images.size(), m_eyeSegmenter->getMinWidth()); + ASSERT_NE(m_eye, nullptr); - for (int i = m_eyeSegmenter->getMinWidth(); i < m_images.size(); i++) + for (auto iter = getFirstValid(); iter != m_images.end(); iter++) { // Make sure image has the expected size: - EXPECT_EQ(m_images[i].image.getCols(), i); + EXPECT_EQ(iter->second.image.getCols(), iter->first); drishti::sdk::Eye eye; - int code = drishti_eye_segmenter_segment(segmenter.get(), m_images[i].image, eye, m_images[i].isRight); + int code = drishti_eye_segmenter_segment(segmenter.get(), iter->second.image, eye, iter->second.isRight); // Sanity check on each model: EXPECT_EQ(code, 0); - checkValid(eye, m_images[i].storage.size()); + checkValid(eye, iter->second.storage.size()); // Ground truth comparison for reasonable resolutions - if (i > 128 && m_eye) + if ((iter->first >= 128) && m_eye) { - const float threshold = (i == m_eye->getRoi().width) ? m_scoreThreshold : 0.5; + const float threshold = (iter->first == m_eye->getRoi().width) ? m_scoreThreshold : 0.5; ASSERT_GT(detectionScore(eye, *m_eye), threshold); } } @@ -493,7 +487,7 @@ static float detectionScore(const drishti::sdk::Eye& eyeA, const drishti::sdk::E } catch (...) { - // opencv throws if count is 0 + // opencv throws if count is 0 } float score = denominator ? float(numerator) / (denominator) : 0; diff --git a/src/lib/drishti/drishti/ut/test-FaceTracker.cpp b/src/lib/drishti/drishti/ut/test-FaceTracker.cpp index 9af490cd..1ae1dbe4 100644 --- a/src/lib/drishti/drishti/ut/test-FaceTracker.cpp +++ b/src/lib/drishti/drishti/ut/test-FaceTracker.cpp @@ -78,8 +78,8 @@ class FaceTest : public ::testing::Test #if defined(DRISHTI_DO_GPU_TESTING) m_context = aglet::GLContext::create(aglet::GLContext::kAuto); #if defined(_WIN32) || defined(_WIN64) - CV_Assert(!glewInit()); -#endif + CV_Assert(!glewInit()); +#endif #endif // TODO: we need to load ground truth output for each shader // (some combinations could be tested, but that is probably excessive!) @@ -105,7 +105,7 @@ class FaceTest : public ::testing::Test { const float fx = size.width; const drishti::sdk::Vec2f p(image.cols / 2, image.rows / 2); - drishti::sdk::SensorModel::Intrinsic intrinsic(p, fx, {size.width, size.height}); + drishti::sdk::SensorModel::Intrinsic intrinsic(p, fx, { size.width, size.height }); drishti::sdk::Matrix33f I = drishti::sdk::Matrix33f::eye(); drishti::sdk::SensorModel::Extrinsic extrinsic(I); drishti::sdk::SensorModel sensor(intrinsic, extrinsic); @@ -164,7 +164,7 @@ class FaceTest : public ::testing::Test drishti::sdk::VideoFrame frame({ image.cols, image.rows }, image.ptr(), true, 0, DFLT_TEXTURE_FORMAT); - const int iterations = 10; + const int iterations = 3; for (int i = 0; i < iterations; i++) { (*tracker)(frame); @@ -177,7 +177,7 @@ class FaceTest : public ::testing::Test { const float fx = size.width; const drishti::sdk::Vec2f p(image.cols / 2, image.rows / 2); - drishti::sdk::SensorModel::Intrinsic intrinsic(p, fx, {size.width, size.height}); + drishti::sdk::SensorModel::Intrinsic intrinsic(p, fx, { size.width, size.height }); drishti::sdk::Matrix33f R; drishti::sdk::SensorModel::Extrinsic extrinsic(R); @@ -223,13 +223,13 @@ class FaceTest : public ::testing::Test { m_logger->info("callback: Received results"); -// int count = 0; -// for (const auto& r : results) -// { -// std::stringstream ss; -// ss << "/tmp/image_" << count++ << ".png"; -// cv::imwrite(ss.str(), drishti::sdk::drishtiToCv(r.image)); -// } + // int count = 0; + // for (const auto& r : results) + // { + // std::stringstream ss; + // ss << "/tmp/image_" << count++ << ".png"; + // cv::imwrite(ss.str(), drishti::sdk::drishtiToCv(r.image)); + // } return 0; } @@ -238,7 +238,7 @@ class FaceTest : public ::testing::Test { m_logger->info("trigger: Received results at time {}}", timestamp); // if(some_condition_is_true(faces)) { - return {3, true, true}; // request last 3 images and textures + return { 3, true, true }; // request last 3 images and textures // } } @@ -267,7 +267,7 @@ class FaceTest : public ::testing::Test { return ft->trigger(faces, timestamp); } - return {0,false,false}; + return { 0, false, false }; } static int allocatorFunc(void* context, const drishti_image_t& spec, drishti::sdk::Image4b& image) @@ -298,7 +298,7 @@ class FaceTest : public ::testing::Test drishti::sdk::VideoFrame frame({ image.cols, image.rows }, image.ptr(), true, 0, DFLT_TEXTURE_FORMAT); - const int iterations = 10; + const int iterations = 3; for (int i = 0; i < iterations; i++) { drishti_face_tracker_track(tracker.get(), frame); diff --git a/src/lib/drishti/eye/EyeModelIris.cpp b/src/lib/drishti/eye/EyeModelIris.cpp index dcdc1e08..a2ea83d0 100644 --- a/src/lib/drishti/eye/EyeModelIris.cpp +++ b/src/lib/drishti/eye/EyeModelIris.cpp @@ -44,7 +44,7 @@ void EyeModelEstimator::Impl::segmentIris(const cv::Mat& I, EyeModel& eye) const } // Initial iris estimates: - EllipseVec irises {{ eye.irisEllipse.center, eye.irisEllipse.size, cpr->getPStar().angle }}; + EllipseVec irises{ { eye.irisEllipse.center, eye.irisEllipse.size, cpr->getPStar().angle } }; cv::RNG rng; if (m_irisInits > 1) @@ -100,7 +100,7 @@ EyeModelEstimator::Impl::estimateCentralIris(const cv::Mat& I, const cv::Mat& M, // XGBoost is not reentrant: harness({ 0, int(irises.size()) }); - //cv::parallel_for_({0, int(irises.size())}, harness, 2); +//cv::parallel_for_({0, int(irises.size())}, harness, 2); #if DRISHTI_CPR_DEBUG_PHI_ESTIMATE drawIrisEstimates(I, estimates, "iris-out"); diff --git a/src/lib/drishti/eye/ut/test-drishti-eye.cpp b/src/lib/drishti/eye/ut/test-drishti-eye.cpp index 6d8604b6..14b95777 100644 --- a/src/lib/drishti/eye/ut/test-drishti-eye.cpp +++ b/src/lib/drishti/eye/ut/test-drishti-eye.cpp @@ -25,6 +25,7 @@ #include +#include #include #include @@ -100,24 +101,32 @@ class EyeModelEstimatorTest : public ::testing::Test cv::Mat padded; cv::Rect roi({ 0, 0 }, image.size()); - padToAspectRatio(image, padded, 4.0 / 3.0); + padToAspectRatio(image, padded, EYE_ASPECT_RATIO); assert(!padded.empty()); - // Next create each possible size from 0 to 1000: - for (int width = 0; width < 256; width++) + std::vector widths = { 0 }; // first one is empty +#if defined(GAUZE_ANDROID_USE_EMULATOR) + for (int width = 16; width <= 256; width *= 2) + { + widths.push_back(width); + } +#else + for (int width = 16; width <= 256; width++) + { + widths.push_back(width); + } +#endif + + for (const auto& width : widths) { cv::Mat resized; if (width > 0) { - int height(float(width) / EYE_ASPECT_RATIO + 0.5f); + const int height = static_cast(static_cast(width) / EYE_ASPECT_RATIO + 0.5f); cv::resize(padded, resized, { width, height }, width); } - { - // Create right image - Entry entry{ resized, true }; - m_images.emplace_back(entry); - } + m_images[width] = { resized, true }; } } @@ -147,13 +156,19 @@ class EyeModelEstimatorTest : public ::testing::Test entry.isRight = true; } + std::map::iterator getFirstGreat(int width = 32) + { + auto isok = [=](const std::pair& e) { return e.second.image.cols >= width; }; + return std::find_if(m_images.begin(), m_images.end(), isok); + } + // Objects declared here can be used by all tests in the test case for EyeModelEstimator. std::shared_ptr m_eyeSegmenter; // Test images: - std::vector m_images; + std::map m_images; - int m_targetWidth = 127; + int m_targetWidth = 128; // Ground truth image: cv::Mat m_image; @@ -207,14 +222,11 @@ TEST(EyeModelEstimator, StreamConstructor) TEST_F(EyeModelEstimatorTest, CerealSerialization) { - if (m_eyeSegmenter) - { - std::string filename = std::string(sOutputDirectory) + "/eye.cpb"; - save_cpb(filename, *m_eyeSegmenter); + std::string filename = std::string(sOutputDirectory) + "/eye.cpb"; + save_cpb(filename, *m_eyeSegmenter); - drishti::eye::EyeModelEstimator segmenter2; - load_cpb(filename, segmenter2); - } + drishti::eye::EyeModelEstimator segmenter2; + load_cpb(filename, segmenter2); } /* @@ -223,21 +235,18 @@ TEST_F(EyeModelEstimatorTest, CerealSerialization) TEST_F(EyeModelEstimatorTest, EyeSerialization) { - if (m_eyeSegmenter) - { - drishti::eye::EyeModel eye; + drishti::eye::EyeModel eye; - assert(m_images[m_targetWidth].isRight); - /* int code = */ (*m_eyeSegmenter)(m_images[m_targetWidth].image, eye); + assert(m_images[m_targetWidth].isRight); + /* int code = */ (*m_eyeSegmenter)(m_images[m_targetWidth].image, eye); - std::string filename(sOutputDirectory); - filename += "/right_eye_2.json"; + std::string filename(sOutputDirectory); + filename += "/right_eye_test.json"; - std::ofstream os(filename); - cereal::JSONOutputArchive oa(os); - typedef decltype(oa) Archive; - oa << GENERIC_NVP("eye", eye); - } + std::ofstream os(filename); + cereal::JSONOutputArchive oa(os); + typedef decltype(oa) Archive; + oa << GENERIC_NVP("eye", eye); } // * hamming distance for sclera and iris masks components @@ -248,24 +257,24 @@ TEST_F(EyeModelEstimatorTest, ImageValid) return; } - for (int i = 32; i < m_images.size(); i++) + for (auto iter = getFirstGreat(); iter != m_images.end(); iter++) { // Make sure image has the expected size: - EXPECT_EQ(m_images[i].image.cols, i); + EXPECT_EQ(iter->second.image.cols, iter->first); - assert(m_images[i].isRight); + assert(iter->second.isRight); drishti::eye::EyeModel eye; - int code = (*m_eyeSegmenter)(m_images[i].image, eye); + int code = (*m_eyeSegmenter)(iter->second.image, eye); EXPECT_EQ(code, 0); eye.refine(); - checkValid(eye, m_images[i].image.size()); + checkValid(eye, iter->second.image.size()); // Ground truth comparison for reasonable resolutions - if (i > 100) + if (iter->first >= 128) { - const float scaleGroundTruthToCurrent = float(m_images[i].image.cols) / float(m_targetWidth); - const float score = detectionScore(*m_eye, eye, m_images[i].image.size(), scaleGroundTruthToCurrent); + const float scaleGroundTruthToCurrent = static_cast(iter->second.image.cols) / static_cast(m_targetWidth); + const float score = detectionScore(*m_eye, eye, iter->second.image.size(), scaleGroundTruthToCurrent); ASSERT_GT(score, m_scoreThreshold); } } @@ -278,23 +287,23 @@ TEST_F(EyeModelEstimatorTest, IsRepeatable) return; } - for (int i = 64; i < m_images.size(); i++) + for (auto iter = getFirstGreat(); iter != m_images.end(); iter++) { // Make sure image has the expected size: - EXPECT_EQ(m_images[i].image.cols, i); + EXPECT_EQ(iter->second.image.cols, iter->first); - assert(m_images[i].isRight); + assert(iter->second.isRight); drishti::eye::EyeModel eyeA, eyeB; - int codeA = (*m_eyeSegmenter)(m_images[i].image, eyeA); + int codeA = (*m_eyeSegmenter)(iter->second.image, eyeA); EXPECT_EQ(codeA, 0); eyeA.refine(); - checkValid(eyeA, m_images[i].image.size()); + checkValid(eyeA, iter->second.image.size()); - int codeB = (*m_eyeSegmenter)(m_images[i].image, eyeB); + int codeB = (*m_eyeSegmenter)(iter->second.image, eyeB); EXPECT_EQ(codeB, 0); eyeB.refine(); - checkValid(eyeB, m_images[i].image.size()); + checkValid(eyeB, iter->second.image.size()); EXPECT_EQ(isEqual(eyeA, eyeB), true); } @@ -303,36 +312,30 @@ TEST_F(EyeModelEstimatorTest, IsRepeatable) // Currently there is no internal quality check, but this is included for regression: TEST_F(EyeModelEstimatorTest, ImageIsBlack) { - if (m_eyeSegmenter) - { - Entry entry; - int width = 256; - int height = int(float(width) / EYE_ASPECT_RATIO + 0.5f); - createImage(entry, height, width, { 0, 0, 0 }); - - assert(entry.isRight); - drishti::eye::EyeModel eye; - int code = (*m_eyeSegmenter)(entry.image, eye); - EXPECT_EQ(code, 0); - checkValid(eye, entry.image.size()); - } + Entry entry; + const int width = 128; + const int height = static_cast(static_cast(width) / EYE_ASPECT_RATIO + 0.5f); + createImage(entry, height, width, { 0, 0, 0 }); + + assert(entry.isRight); + drishti::eye::EyeModel eye; + int code = (*m_eyeSegmenter)(entry.image, eye); + EXPECT_EQ(code, 0); + checkValid(eye, entry.image.size()); } TEST_F(EyeModelEstimatorTest, ImageIsWhite) { - if (m_eyeSegmenter) - { - Entry entry; - int width = 256; - int height = int(float(width) / EYE_ASPECT_RATIO + 0.5f); - createImage(entry, height, width, { 0, 0, 0 }); - - assert(entry.isRight); - drishti::eye::EyeModel eye; - int code = (*m_eyeSegmenter)(entry.image, eye); - EXPECT_EQ(code, 0); - checkValid(eye, entry.image.size()); - } + Entry entry; + const int width = 128; + const int height = static_cast(static_cast(width) / EYE_ASPECT_RATIO + 0.5f); + createImage(entry, height, width, { 0, 0, 0 }); + + assert(entry.isRight); + drishti::eye::EyeModel eye; + int code = (*m_eyeSegmenter)(entry.image, eye); + EXPECT_EQ(code, 0); + checkValid(eye, entry.image.size()); } // ####### @@ -349,13 +352,13 @@ static cv::Point padToAspectRatio(const cv::Mat& image, cv::Mat& padded, double int top = 0, left = 0, bottom = 0, right = 0; if (double(image.cols) / image.rows > aspectRatio) { - int padding = int(double(image.cols) / aspectRatio + 0.5) - image.rows; + int padding = static_cast(static_cast(image.cols) / aspectRatio + 0.5) - image.rows; top = padding / 2; bottom = padding - top; } else { - int padding = int(double(image.rows) * aspectRatio + 0.5) - image.cols; + int padding = static_cast(static_cast(image.rows) * aspectRatio + 0.5) - image.cols; left = padding / 2; right = padding - left; } @@ -382,6 +385,7 @@ detectionScore(const drishti::eye::EyeModel& eyeA, const drishti::eye::EyeModel& } catch (...) { + // opencv throws when empty } try { @@ -389,25 +393,10 @@ detectionScore(const drishti::eye::EyeModel& eyeA, const drishti::eye::EyeModel& } catch (...) { + // opencv throws when empty } float score = denominator ? float(numerator) / (denominator) : 0; -#define DEBUG_PASCAL 0 -#if DEBUG_PASCAL - { - std::cout << "SCORE: " << score << std::endl; - cv::imshow("maskA", maskGT); // opt - cv::imshow("maskB", maskB); // opt - int i = 0; - cv::Mat tmp[2] = { maskGT, maskB }; - do - { - cv::imshow("a", tmp[i++ % 2]); - } while (cv::waitKey(0) != int('q')); - cv::waitKey(0); - } -#endif - return score; } diff --git a/src/lib/drishti/face/Face.cpp b/src/lib/drishti/face/Face.cpp index 989379be..c601b0a8 100644 --- a/src/lib/drishti/face/Face.cpp +++ b/src/lib/drishti/face/Face.cpp @@ -65,11 +65,11 @@ std::vector asSpline(const std::vector& points, int n, { std::vector spline; drishti::core::fitSpline(points, spline, n, closed); - if(closed) + if (closed) { spline.push_back(spline.front()); } - + return spline; } diff --git a/src/lib/drishti/face/FaceDetector.cpp b/src/lib/drishti/face/FaceDetector.cpp index bf59aa41..62170e77 100644 --- a/src/lib/drishti/face/FaceDetector.cpp +++ b/src/lib/drishti/face/FaceDetector.cpp @@ -118,7 +118,7 @@ class FaceDetector::Impl if (m_eyeRegressor.size() && m_eyeRegressor[0] && m_eyeRegressor[1] && m_doEyeRefinement && faces.size()) { - for(auto &f : faces) + for (auto& f : faces) { DRISHTI_EYE::EyeModel eyeR, eyeL; segmentEyes(Ib.Ib, f, eyeR, eyeL); @@ -136,9 +136,9 @@ class FaceDetector::Impl } } - using RectPair = std::array; - using MatPair = std::array; - static void extractCrops(const cv::Mat& Ib, const RectPair &eyes, const cv::Rect& bounds, MatPair &crops) + using RectPair = std::array; + using MatPair = std::array; + static void extractCrops(const cv::Mat& Ib, const RectPair& eyes, const cv::Rect& bounds, MatPair& crops) { for (int i = 0; i < 2; i++) { @@ -154,7 +154,7 @@ class FaceDetector::Impl crops[i].setTo(0); Ib(roi).copyTo(crops[i](roi - eyes[i].tl())); } - } + } } void segmentEyes(const cv::Mat1b& Ib, FaceModel& face, DRISHTI_EYE::EyeModel& eyeR, DRISHTI_EYE::EyeModel& eyeL) @@ -174,9 +174,9 @@ class FaceDetector::Impl // clang-format on MatPair crops; - RectPair eyes = {{ roiR, roiL }}; + RectPair eyes = { { roiR, roiL } }; extractCrops(Ib, eyes, { { 0, 0 }, Ib.size() }, crops); - + cv::Mat flipped; cv::flip(crops[1], flipped, 1); // Flip left eye to right eye cs crops[1] = flipped; @@ -185,8 +185,8 @@ class FaceDetector::Impl float theta = std::atan2(v.y, v.x); eyeR.angle = theta; eyeL.angle = (-theta); - - std::array results {{ &eyeR, &eyeL }}; + + std::array results{ { &eyeR, &eyeL } }; for (int i = 0; i < 2; i++) { m_eyeRegressor[i]->setDoIndependentIrisAndPupil(m_doIrisRefinement); @@ -208,7 +208,7 @@ class FaceDetector::Impl eyeL.roi = eyes[1]; } } - + void findLandmarks(const PaddedImage& Ib, std::vector& shapes, const cv::Matx33f& Hdr_, bool isDetection) { // Scope based eye segmentation timer: @@ -245,13 +245,13 @@ class FaceDetector::Impl // Perform an addition (optional) scaling that can be tuned easily by the user as some detection // scales will perform better than the mean mapping used above (experimentally). shapes[i].roi = scaleRoi(roi, m_scaling); - + // Crop the image such that the ROI to pixel geometry is preserved. For most cases this is // a simple shallow copy/view, but in cases where the border is clipped, then we will effectively // perform border padding to achieve this goal. This make our prediction ROI closest to the ROI // used during training and ensures our cascaded pose regression has the best chance of success. cv::Mat crop = geometryPreservingCrop(shapes[i].roi, gray); - + std::vector mask; std::vector points; (*m_regressor)(crop, points, mask); @@ -262,19 +262,19 @@ class FaceDetector::Impl } } } - + static cv::Rect scaleRoi(const cv::Rect& roi, float scale) { cv::Point2f tl(roi.tl()), br(roi.br()), center((tl + br) * 0.5f), diag(br - center); return cv::Rect(center - (diag * scale), center + (diag * scale)); } - + // Return requested light weight copy if roi is contained in frame bounds, else perform // a deep copy that preseves the crop geometry via border padding. This ensures that // landmark regression has the best chance of success. - static cv::Mat geometryPreservingCrop(const cv::Rect &roi, const cv::Mat &gray) + static cv::Mat geometryPreservingCrop(const cv::Rect& roi, const cv::Mat& gray) { - const cv::Rect bounds({0,0}, gray.size()); + const cv::Rect bounds({ 0, 0 }, gray.size()); const cv::Rect clipped = roi & bounds; cv::Mat crop = gray(clipped); if (clipped.size() != roi.size()) @@ -285,7 +285,7 @@ class FaceDetector::Impl } return crop; } - + // Notes on motion and coordinate systems: // // FaceDetector::m_Hrd : map normalized regressor mean face to normalized detector mean face diff --git a/src/lib/drishti/face/FaceDetectorFactoryJson.cpp b/src/lib/drishti/face/FaceDetectorFactoryJson.cpp index 46ce781c..b1ac22e0 100644 --- a/src/lib/drishti/face/FaceDetectorFactoryJson.cpp +++ b/src/lib/drishti/face/FaceDetectorFactoryJson.cpp @@ -21,9 +21,9 @@ namespace bfs = boost::filesystem; DRISHTI_FACE_NAMESPACE_BEGIN -static std::string cat(const std::string &a, const std::string &b) { return a + b; } +static std::string cat(const std::string& a, const std::string& b) { return a + b; } -FaceDetectorFactoryJson::FaceDetectorFactoryJson(const std::string &sModels) +FaceDetectorFactoryJson::FaceDetectorFactoryJson(const std::string& sModels) { std::ifstream ifs(sModels); if (!ifs) @@ -34,8 +34,7 @@ FaceDetectorFactoryJson::FaceDetectorFactoryJson(const std::string &sModels) nlohmann::json json; ifs >> json; - std::vector< std::pair > bindings = - { + std::vector> bindings = { { "face_detector", &sFaceDetector }, { "eye_model_regressor", &sEyeRegressor }, { "face_landmark_regressor", &sFaceRegressor }, @@ -44,11 +43,11 @@ FaceDetectorFactoryJson::FaceDetectorFactoryJson(const std::string &sModels) // Get the directory name: auto path = bfs::path(sModels); - for(auto &binding : bindings) + for (auto& binding : bindings) { auto filename = path.parent_path() / json[binding.first].get(); (*binding.second) = filename.string(); - if(binding.second->empty()) + if (binding.second->empty()) { throw std::runtime_error(cat("FaceDetectorFactoryJson::FaceDetectorFactoryJson()", binding.first)); } diff --git a/src/lib/drishti/face/FaceDetectorFactoryJson.h b/src/lib/drishti/face/FaceDetectorFactoryJson.h index 45afb56c..2b377643 100644 --- a/src/lib/drishti/face/FaceDetectorFactoryJson.h +++ b/src/lib/drishti/face/FaceDetectorFactoryJson.h @@ -20,7 +20,7 @@ DRISHTI_FACE_NAMESPACE_BEGIN class FaceDetectorFactoryJson : public FaceDetectorFactory { public: - FaceDetectorFactoryJson(const std::string &sModels); + FaceDetectorFactoryJson(const std::string& sModels); }; DRISHTI_FACE_NAMESPACE_END diff --git a/src/lib/drishti/face/FaceTracker.cpp b/src/lib/drishti/face/FaceTracker.cpp index c383d26c..ab0b314a 100644 --- a/src/lib/drishti/face/FaceTracker.cpp +++ b/src/lib/drishti/face/FaceTracker.cpp @@ -17,9 +17,9 @@ DRISHTI_FACE_NAMESPACE_BEGIN struct FaceTracker::Impl { using FaceTrack = std::pair; - using FaceTrackVec = std::vector; - using FaceModelVec = std::vector; - + using FaceTrackVec = std::vector; + using FaceModelVec = std::vector; + Impl(float costThreshold, std::size_t minTrackHits, std::size_t maxTrackMisses) : m_costThreshold(costThreshold) , m_minTrackHits(minTrackHits) @@ -27,12 +27,12 @@ struct FaceTracker::Impl { } - void update(const FaceModelVec &facesIn, FaceTrackVec &facesOut) + void update(const FaceModelVec& facesIn, FaceTrackVec& facesOut) { - if(m_tracks.size() == 0) + if (m_tracks.size() == 0) { // Initialize tracks: - for (const auto &f : facesIn) + for (const auto& f : facesIn) { m_tracks.emplace_back(f, TrackInfo(m_id++)); } @@ -42,13 +42,13 @@ struct FaceTracker::Impl std::unordered_map direct_assignment; std::unordered_map reverse_assignment; std::vector> C; - - if(facesIn.size()) + + if (facesIn.size()) { C = std::vector>(m_tracks.size(), std::vector(facesIn.size())); - for(int i = 0; i < m_tracks.size(); i++) + for (int i = 0; i < m_tracks.size(); i++) { - for(int j = 0; j < facesIn.size(); j++) + for (int j = 0; j < facesIn.size(); j++) { C[i][j] = cv::norm(*m_tracks[i].first.eyesCenter - *facesIn[j].eyesCenter); } @@ -57,10 +57,10 @@ struct FaceTracker::Impl } std::vector hits(m_tracks.size(), 0); - for(const auto &m : direct_assignment) + for (const auto& m : direct_assignment) { // Create a new track, or update an old track: - if(C[m.first][m.second] > m_costThreshold) + if (C[m.first][m.second] > m_costThreshold) { m_tracks.emplace_back(facesIn[m.second], TrackInfo(m_id++)); } @@ -71,33 +71,34 @@ struct FaceTracker::Impl hits[m.first] = 1; } } - - for(int i = 0; i < hits.size(); i++) + + for (int i = 0; i < hits.size(); i++) { - if(!hits[i]) + if (!hits[i]) { m_tracks[i].second.miss(); } } - + // Prune old tracks: - m_tracks.erase(std::remove_if(m_tracks.begin(), m_tracks.end(), [this](const FaceTrack &track) { - return (track.second.misses >= m_maxTrackMisses); - }), m_tracks.end()); + m_tracks.erase(std::remove_if(m_tracks.begin(), m_tracks.end(), [this](const FaceTrack& track) { + return (track.second.misses >= m_maxTrackMisses); + }), + m_tracks.end()); } - + // Return subset of remaining mature tracks: - std::copy_if(m_tracks.begin(), m_tracks.end(), std::back_inserter(facesOut), [this](const FaceTrack &track) { - return (track.second.age > m_minTrackHits); + std::copy_if(m_tracks.begin(), m_tracks.end(), std::back_inserter(facesOut), [this](const FaceTrack& track) { + return (track.second.age > m_minTrackHits); }); } float m_costThreshold = 0.15; // meters std::size_t m_minTrackHits = 3; std::size_t m_maxTrackMisses = 3; - + std::size_t m_id = 0; - + FaceTrackVec m_tracks; }; @@ -108,7 +109,7 @@ FaceTracker::FaceTracker(float costThreshold, std::size_t minTrackHits, std::siz FaceTracker::~FaceTracker() = default; -void FaceTracker::operator()(const FaceModelVec &facesIn, FaceTrackVec &facesOut) +void FaceTracker::operator()(const FaceModelVec& facesIn, FaceTrackVec& facesOut) { m_impl->update(facesIn, facesOut); } diff --git a/src/lib/drishti/face/FaceTracker.h b/src/lib/drishti/face/FaceTracker.h index bd2abcd0..159f4b07 100644 --- a/src/lib/drishti/face/FaceTracker.h +++ b/src/lib/drishti/face/FaceTracker.h @@ -21,7 +21,6 @@ DRISHTI_FACE_NAMESPACE_BEGIN class FaceTracker { public: - struct TrackInfo { TrackInfo() = default; @@ -30,40 +29,40 @@ class FaceTracker , age(1) , hits(1) , misses(0) - {} - + { + } + void hit() { age++; hits++; misses = 0; } - + void miss() { age++; hits = 0; misses++; } - + std::size_t identifier = 0; std::size_t age = 0; - std::size_t hits = 0; // consecutive hits + std::size_t hits = 0; // consecutive hits std::size_t misses = 0; // consecutive misses }; using FaceTrack = std::pair; - using FaceTrackVec = std::vector; + using FaceTrackVec = std::vector; using FaceModelVec = std::vector; struct Impl; FaceTracker(float costThreshold = 0.15f, std::size_t minTrackHits = 3, std::size_t maxTrackMisses = 3); ~FaceTracker(); - void operator()(const FaceModelVec &facesIn, FaceTrackVec &facesOut); + void operator()(const FaceModelVec& facesIn, FaceTrackVec& facesOut); protected: - std::unique_ptr m_impl; }; diff --git a/src/lib/drishti/face/gpu/EyeFilter.cpp b/src/lib/drishti/face/gpu/EyeFilter.cpp index 79f4a363..668adfa7 100644 --- a/src/lib/drishti/face/gpu/EyeFilter.cpp +++ b/src/lib/drishti/face/gpu/EyeFilter.cpp @@ -92,7 +92,7 @@ void EyeFilter::dump(std::vector& frames, std::vector& eyes, eyes.resize(n); for (int i = 0; i < n; i++) { - if(getImage) + if (getImage) { // Pull frames in reverse order such that frames[0] is newest auto* filter = (*fifoProc)[length - i - 1]; diff --git a/src/lib/drishti/face/gpu/EyeFilter.h b/src/lib/drishti/face/gpu/EyeFilter.h index 81dac59e..b978e8c5 100644 --- a/src/lib/drishti/face/gpu/EyeFilter.h +++ b/src/lib/drishti/face/gpu/EyeFilter.h @@ -98,9 +98,8 @@ class EyeFilter : public ogles_gpgpu::MultiPassProc void renderIris(); protected: - int m_history = 3; - + std::vector m_faces; EyewWarpPair m_eyes; diff --git a/src/lib/drishti/face/gpu/FaceStabilizer.cpp b/src/lib/drishti/face/gpu/FaceStabilizer.cpp index dd2d4d7d..656bf598 100644 --- a/src/lib/drishti/face/gpu/FaceStabilizer.cpp +++ b/src/lib/drishti/face/gpu/FaceStabilizer.cpp @@ -22,13 +22,13 @@ FaceStabilizer::FaceStabilizer(const cv::Size& sizeOut) cv::Matx33f FaceStabilizer::stabilize(const drishti::face::FaceModel& face, const cv::Size& sizeOut, float span) { using PointPair = std::array; - + PointPair eyeCenters{ { face.eyeFullR->irisEllipse.center, face.eyeFullL->irisEllipse.center } }; if (std::min(face.eyeFullR->openness(), face.eyeFullL->openness()) < 0.1f) { eyeCenters = { { core::centroid(face.eyeRight), core::centroid(face.eyeLeft) } }; } - + // clang-format off const PointPair screenCenters {{ diff --git a/src/lib/drishti/face/ut/test-drishti-face.cpp b/src/lib/drishti/face/ut/test-drishti-face.cpp index 53472141..097c22f0 100644 --- a/src/lib/drishti/face/ut/test-drishti-face.cpp +++ b/src/lib/drishti/face/ut/test-drishti-face.cpp @@ -16,10 +16,10 @@ #include "drishti/face/FaceDetectorAndTracker.h" -extern const char * sFaceDetector; -extern const char * sFaceDetectorMean; -extern const char * sFaceRegressor; -extern const char * sEyeRegressor; +extern const char* sFaceDetector; +extern const char* sFaceDetectorMean; +extern const char* sFaceRegressor; +extern const char* sEyeRegressor; TEST(FaceDetectorAndTracker, Instantiation) { diff --git a/src/lib/drishti/hci/FaceFinder.cpp b/src/lib/drishti/hci/FaceFinder.cpp index 5a8f9dd5..b20bbc9b 100644 --- a/src/lib/drishti/hci/FaceFinder.cpp +++ b/src/lib/drishti/hci/FaceFinder.cpp @@ -107,7 +107,7 @@ FaceFinder::~FaceFinder() { try { - if(impl->threads && impl->doOptimizedPipeline) + if (impl->threads && impl->doOptimizedPipeline) { // If this has already been retrieved it will throw impl->scene.get(); // block on any abandoned calls @@ -164,7 +164,7 @@ void FaceFinder::dumpEyes(ImageViews& frames, EyeModelPairs& eyes, int n, bool g { std::vector images; impl->eyeFilter->dump(images, eyes, n, getImage); - + frames.resize(images.size()); for (int i = 0; i < images.size(); i++) { @@ -182,11 +182,11 @@ void FaceFinder::dumpFaces(ImageViews& frames, int n, bool getImage) { auto* filter = (*impl->fifo)[length - i - 1]; const auto size = filter->getOutFrameSize(); - + // Always assign texture (no cost) - frames[i].texture = { {size.width, size.height}, filter->getOutputTexId() }; - - if(getImage) + frames[i].texture = { { size.width, size.height }, filter->getOutputTexId() }; + + if (getImage) { frames[i].image.create(size.height, size.width); filter->getResultData(frames[i].image.ptr()); @@ -330,33 +330,33 @@ void FaceFinder::initPainter(const cv::Size& /* inputSizeUp */) impl->logger->info("Init painter"); } -void FaceFinder::initFaceFilters(const cv::Size &inputSizeUp) +void FaceFinder::initFaceFilters(const cv::Size& inputSizeUp) { const auto outputRenderOrientation = ::ogles_gpgpu::degreesToOrientation(360 - impl->outputOrientation); impl->rotater = drishti::core::make_unique(); impl->rotater->setOutputRenderOrientation(outputRenderOrientation); - + impl->warper = drishti::core::make_unique(); impl->warper->add(impl->rotater.get()); impl->warper->prepare(inputSizeUp.width, inputSizeUp.width, GL_RGBA); } -GLuint FaceFinder::stabilize(GLuint inputTexId, const cv::Size &inputSizeUp, const drishti::face::FaceModel &face) +GLuint FaceFinder::stabilize(GLuint inputTexId, const cv::Size& inputSizeUp, const drishti::face::FaceModel& face) { - if(face.points.has) + if (face.points.has) { const cv::Matx33f S = transformation::denormalize(inputSizeUp); const cv::Matx33f Sinv = S.inv(); const cv::Matx33f H = drishti::face::FaceStabilizer::stabilize(face, inputSizeUp, 0.33); - + cv::Matx44f MVP; transformation::R3x3To4x4(Sinv * H * S, MVP); - + ogles_gpgpu::Mat44f MVPt; - cv::Mat(MVP.t()).copyTo(cv::Mat(4,4,CV_32FC1,&MVPt.data[0][0])); + cv::Mat(MVP.t()).copyTo(cv::Mat(4, 4, CV_32FC1, &MVPt.data[0][0])); impl->warper->setTransformMatrix(MVPt); } - + impl->warper->process(inputTexId, 1, GL_TEXTURE_2D); return impl->rotater->getOutputTexId(); } @@ -377,10 +377,10 @@ void FaceFinder::init(const cv::Size& inputSize) impl->faceEstimator = std::make_shared(*impl->sensor); initColormap(); - initACF(inputSizeUp); // initialize ACF first (configure opengl platform extensions) + initACF(inputSizeUp); // initialize ACF first (configure opengl platform extensions) initFIFO(inputSizeUp, impl->history); // keep last N frames - initPainter(inputSizeUp); // {inputSizeUp.width/4, inputSizeUp.height/4} - initFaceFilters(inputSizeUp); // gpu "filter" (effects) + initPainter(inputSizeUp); // {inputSizeUp.width/4, inputSizeUp.height/4} + initFaceFilters(inputSizeUp); // gpu "filter" (effects) if (impl->doIris) { @@ -398,7 +398,7 @@ void FaceFinder::init(const cv::Size& inputSize) } template -void push_fifo(Container &container, const typename Container::value_type &value, int size) +void push_fifo(Container& container, const typename Container::value_type& value, int size) { container.push_front(value); if (container.size() > size) @@ -417,21 +417,21 @@ std::pair FaceFinder::runFast(const FrameInput& frame2, { FrameInput frame1; frame1.size = frame2.size; - + ScenePrimitives scene2(impl->frameIndex), scene1, scene0, *outputScene = &scene2; - - if(impl->fifo->getBufferCount() > 0) + + if (impl->fifo->getBufferCount() > 0) { core::ScopeTimeLogger preprocessTimeLogger = [this](double t) { impl->timerInfo.acfProcessingTime = t; }; - + // read GPU results for frame n-1 - + // Here we always trigger GPU pipeline reads // to ensure upright + redeuced grayscale images will // be available for regression, even if we won't be using ACF detection. impl->acf->getChannels(); - - if(impl->acf->getChannelStatus()) + + if (impl->acf->getChannelStatus()) { // If the ACF textures were loaded in the last call, then we know // that detections were requrested for the last frame, and we will @@ -439,7 +439,7 @@ std::pair FaceFinder::runFast(const FrameInput& frame2, scene1.m_P = std::make_sharedP)>(); fill(*scene1.m_P); } - + // ### Grayscale image ### if (impl->doLandmarks) { @@ -448,19 +448,19 @@ std::pair FaceFinder::runFast(const FrameInput& frame2, } // Start GPU pipeline for the current frame, immediately after we have - // retrieved results for the previous frame. + // retrieved results for the previous frame. computeAcf(frame2, false, doDetection); GLuint texture2 = impl->acf->first()->getOutputTexId(), texture0 = 0, outputTexture = texture2; - - if(impl->fifo->getBufferCount() > 0) + + if (impl->fifo->getBufferCount() > 0) { - if(impl->fifo->getBufferCount() > 1) + if (impl->fifo->getBufferCount() > 1) { // Retrieve CPU processing for frame n-2 scene0 = impl->scene.get(); // scene n-2 texture0 = (*impl->fifo)[-2]->getOutputTexId(); // texture n-2 - updateEyes(texture0, scene0); // update the eye texture - + updateEyes(texture0, scene0); // update the eye texture + outputTexture = paint(scene0, texture0); outputScene = &scene0; } @@ -477,15 +477,15 @@ std::pair FaceFinder::runFast(const FrameInput& frame2, return sceneOut; }); } - + // Add the current frame to FIFO impl->fifo->useTexture(texture2, 1); impl->fifo->render(); - + // Clear face motion estimate, update window: impl->faceMotion = { 0.f, 0.f, 0.f }; push_fifo(impl->scenePrimitives, *outputScene, impl->history); - + return std::make_pair(outputTexture, *outputScene); } @@ -502,11 +502,11 @@ std::pair FaceFinder::runSimple(const FrameInput& frame GLuint texture1 = impl->acf->first()->getOutputTexId(), outputTexture = 0; detect(frame1, scene1, doDetection); - + if (doAnnotations()) { updateEyes(texture1, scene1); - + // Excplicit output variable configuration: scene1.draw(impl->renderFaces, impl->renderPupils, impl->renderCorners); outputTexture = paint(scene1, texture1); // was 1 @@ -521,11 +521,11 @@ std::pair FaceFinder::runSimple(const FrameInput& frame // Add the current frame to FIFO impl->fifo->useTexture(texture1, 1); impl->fifo->render(); - + // Clear face motion estimate, update window: impl->faceMotion = { 0.f, 0.f, 0.f }; push_fifo(impl->scenePrimitives, *outputScene, impl->history); - + return std::make_pair(outputTexture, *outputScene); } @@ -541,14 +541,14 @@ GLuint FaceFinder::operator()(const FrameInput& frame1) } }; // clang-format on - + // Get current timestamp const auto& now = faceFinderTimeLogger.getTime(); const bool doDetection = needsDetection(now); - + GLuint outputTexture = 0; ScenePrimitives outputScene; - if(impl->threads && impl->doOptimizedPipeline) + if (impl->threads && impl->doOptimizedPipeline) { std::tie(outputTexture, outputScene) = runFast(frame1, doDetection); } @@ -556,14 +556,14 @@ GLuint FaceFinder::operator()(const FrameInput& frame1) { std::tie(outputTexture, outputScene) = runSimple(frame1, doDetection); } - + if (impl->imageLogger && outputScene.faces().size() && !outputScene.image().empty()) { (impl->imageLogger)(outputScene.image()); } - + impl->frameIndex++; // increment frame index - + if (impl->scenePrimitives.size() >= 2) { if (impl->scenePrimitives[0].faces().size() && impl->scenePrimitives[1].faces().size()) @@ -603,7 +603,7 @@ void FaceFinder::notifyListeners(const ScenePrimitives& scene, const TimePoint& std::vector requests(impl->faceMonitorCallback.size()); FaceMonitor::Request request; - + if (scene.faces().size()) { // 1) If any active face request is satisifed grab a frame+face buffer: @@ -703,7 +703,7 @@ std::shared_ptr FaceFinder::createAcfGpu(const FrameInpu // to ensure grayscale images will be available // for regression, even if we won't be using ACF detection. cv::Mat acf = impl->acf->getChannels(); - + if (doDetection) { assert(acf.type() == CV_8UC1); @@ -832,14 +832,14 @@ int FaceFinder::detectOnly(ScenePrimitives& scene, bool doDetection) return scene.objects().size(); } -void FaceFinder::scaleToFullResolution(std::vector &faces) +void FaceFinder::scaleToFullResolution(std::vector& faces) { const float Srf = 1.0f / impl->acf->getGrayscaleScale(); const cv::Matx33f Hrf = transformation::scale(Srf); for (auto& f : faces) { f = Hrf * f; - + // Tag each face w/ approximate distance: if (impl->faceEstimator) { @@ -861,7 +861,7 @@ int FaceFinder::detect(const FrameInput& frame, ScenePrimitives& scene, bool doD // Start with empty face detections: std::vector faces; drishti::face::FaceDetector::PaddedImage Ib(scene.image(), { { 0, 0 }, scene.image().size() }); - + if (impl->detector && (!doDetection || scene.m_P)) { if (!scene.objects().size()) @@ -889,11 +889,11 @@ int FaceFinder::detect(const FrameInput& frame, ScenePrimitives& scene, bool doD // are all upright, but the output texture used for the display // is still in the native (potentially rotated) coordinate system, // so we need to perform scaling wrt that. - + scaleToFullResolution(faces); } } - + { // Perform simple prediction on every frame. This occurs on full resolution // FaceModel objects, for which approximate location is known. In some cases @@ -902,14 +902,14 @@ int FaceFinder::detect(const FrameInput& frame, ScenePrimitives& scene, bool doD // 1) map to regression image resolution // 2) refine the face model // 3) map back to the full resolution image - + const float Sfr = impl->acf->getGrayscaleScale(); // full->regression const cv::Matx33f Hfr = transformation::scale(Sfr); drishti::face::FaceTracker::FaceTrackVec tracksOut; (*impl->faceTracker)(faces, tracksOut); faces.clear(); - for(auto & f : tracksOut) + for (auto& f : tracksOut) { // For any missed face we need to update the landmarks from the if (f.second.misses > 0) @@ -921,21 +921,21 @@ int FaceFinder::detect(const FrameInput& frame, ScenePrimitives& scene, bool doD scene.faces().emplace_back(f.first); // store output } } - + // Faces have been mapped to impl->faceDetector->refine(Ib, faces, cv::Matx33f::eye(), false); scaleToFullResolution(faces); - for (auto &f : faces) + for (auto& f : faces) { scene.faces().emplace_back(f); } - + // Sort near to far: - std::sort(scene.faces().begin(), scene.faces().end(), [](const face::FaceModel &a, const face::FaceModel &b) { + std::sort(scene.faces().begin(), scene.faces().end(), [](const face::FaceModel& a, const face::FaceModel& b) { return (a.eyesCenter->z < b.eyesCenter->z); }); } - + return 0; } @@ -1078,7 +1078,7 @@ void FaceFinder::init2(drishti::face::FaceDetectorFactory& resources) impl->detector = dynamic_cast(impl->faceDetector->getDetector()); if (impl->detector) - { + { if (impl->acfCalibration != 0.f) { // Perform modification @@ -1112,21 +1112,20 @@ void FaceFinder::init2(drishti::face::FaceDetectorFactory& resources) // clang-format on const cv::Point2f center = core::centroid(centers); - const cv::Matx33f H = transformation::scale(impl->regressorCropScale, impl->regressorCropScale*1.1, center); + const cv::Matx33f H = transformation::scale(impl->regressorCropScale, impl->regressorCropScale * 1.1, center); faceDetectorMean = H * faceDetectorMean; } impl->faceDetector->setFaceDetectorMean(faceDetectorMean); } - + impl->faceTracker = core::make_unique( impl->minFaceSeparation, impl->minTrackHits, - impl->maxTrackMisses - ); + impl->maxTrackMisses); } -static void smooth(double &t0, double t1, double alpha=0.95) +static void smooth(double& t0, double t1, double alpha = 0.95) { t0 = (t0 != 0.0) ? t1 : ((t0 * alpha) + (t1 * (1.0 - alpha))); } diff --git a/src/lib/drishti/hci/FaceFinder.h b/src/lib/drishti/hci/FaceFinder.h index 531babe9..32b85010 100644 --- a/src/lib/drishti/hci/FaceFinder.h +++ b/src/lib/drishti/hci/FaceFinder.h @@ -58,7 +58,7 @@ class FaceFinder std::function acfProcessingTimeLogger; std::function blobExtractionTimeLogger; std::function renderSceneTimeLogger; - + void init(); friend std::ostream& operator<<(std::ostream& stream, const TimerInfo& info); @@ -99,7 +99,7 @@ class FaceFinder bool renderFaces = true; bool renderPupils = true; bool renderCorners = true; - + int history = DRISHTI_HCI_FACEFINDER_HISTORY; }; @@ -114,7 +114,7 @@ class FaceFinder create(FaceDetectorFactoryPtr& factory, Settings& config, void* glContext = nullptr); virtual GLuint operator()(const FrameInput& frame); - + float getMaxDistance() const; float getMinDistance() const; @@ -133,26 +133,25 @@ class FaceFinder void setImageLogger(const ImageLogger& logger); protected: - using ImageViews = std::vector; using EyeModelPair = std::array; using EyeModelPairs = std::vector; - + std::pair runFast(const FrameInput& frame, bool doDetection); std::pair runSimple(const FrameInput& frame, bool doDetection); - + bool needsDetection(const TimePoint& ts) const; void computeGazePoints(); void updateEyes(GLuint inputTexId, const ScenePrimitives& scene); - void scaleToFullResolution(std::vector &faces); - + void scaleToFullResolution(std::vector& faces); + void notifyListeners(const ScenePrimitives& scene, const TimePoint& time, bool isFull); virtual void init(const cv::Size& inputSize); virtual void initPainter(const cv::Size& inputSizeUp); - void initFaceFilters(const cv::Size &inputSizeUp); + void initFaceFilters(const cv::Size& inputSizeUp); void initACF(const cv::Size& inputSizeUp); void initFIFO(const cv::Size& inputSize, std::size_t n); void initBlobFilter(); @@ -162,14 +161,14 @@ class FaceFinder void initTimeLoggers(); void init2(drishti::face::FaceDetectorFactory& resources); - void dumpEyes(ImageViews& frames, EyeModelPairs& eyes, int n=1, bool getImage = false); - void dumpFaces(ImageViews& frames, int n=1, bool getImage = false); + void dumpEyes(ImageViews& frames, EyeModelPairs& eyes, int n = 1, bool getImage = false); + void dumpFaces(ImageViews& frames, int n = 1, bool getImage = false); int detectOnly(ScenePrimitives& scene, bool doDetection); virtual int detect(const FrameInput& frame, ScenePrimitives& scene, bool doDetection); virtual GLuint paint(const ScenePrimitives& scene, GLuint inputTexture); virtual void preprocess(const FrameInput& frame, ScenePrimitives& scene, bool needsDetection); // compute acf - GLuint stabilize(GLuint inputTexId, const cv::Size &inputSizeUp, const drishti::face::FaceModel &face); + GLuint stabilize(GLuint inputTexId, const cv::Size& inputSizeUp, const drishti::face::FaceModel& face); int computeDetectionWidth(const cv::Size& inputSizeUp) const; void computeAcf(const FrameInput& frame, bool doLuv, bool doDetection); std::shared_ptr createAcfGpu(const FrameInput& frame, bool doDetection); diff --git a/src/lib/drishti/hci/FaceFinderImpl.h b/src/lib/drishti/hci/FaceFinderImpl.h index 794ec519..95209108 100644 --- a/src/lib/drishti/hci/FaceFinderImpl.h +++ b/src/lib/drishti/hci/FaceFinderImpl.h @@ -125,7 +125,7 @@ struct FaceFinder::Impl int outputOrientation = 0; float brightness = 1.f; - std::shared_ptr fifo; // store last N faces + std::shared_ptr fifo; // store last N faces // ::::::::::::::::::::::::::::::::::::::: // ::: ACF and detection parameters: ::: @@ -150,7 +150,7 @@ struct FaceFinder::Impl float minFaceSeparation = 0.15; std::unique_ptr faceDetector; std::unique_ptr faceTracker; - + drishti::acf::Detector* detector = nullptr; // weak ref std::pair> objects; std::future scene; @@ -203,7 +203,7 @@ struct FaceFinder::Impl bool usePBO = false; bool doOptimizedPipeline = true; int history = 3; // frame history - + // ::::::::::::::::::::::: // ::: Filters/Effects ::: // ::::::::::::::::::::::: diff --git a/src/lib/drishti/hci/FaceFinderPainter.cpp b/src/lib/drishti/hci/FaceFinderPainter.cpp index 6bbdcc45..ac70dfe6 100644 --- a/src/lib/drishti/hci/FaceFinderPainter.cpp +++ b/src/lib/drishti/hci/FaceFinderPainter.cpp @@ -214,16 +214,14 @@ void cat(const Container& a, const Container& b, Container& c) std::copy(b.begin(), b.end(), std::back_inserter(c)); } - GLuint FaceFinderPainter::filter(const ScenePrimitives& scene, GLuint inputTexture) { // clang-format on MethodLog timeSummary(DRISHTI_LOCATION_SIMPLE); - core::ScopeTimeLogger paintLogger = [&](double ts) - { + core::ScopeTimeLogger paintLogger = [&](double ts) { impl->logger->info("TIMING:{}={};{}", timeSummary.name, ts, timeSummary.ss.str()); }; - // clang-format off +// clang-format off #if DRISHTI_HCI_FACE_FINDER_PAINTER_SHOW_CIRCLE { diff --git a/src/lib/drishti/hci/FaceFinderPainter.h b/src/lib/drishti/hci/FaceFinderPainter.h index 3027045f..fca3bed8 100644 --- a/src/lib/drishti/hci/FaceFinderPainter.h +++ b/src/lib/drishti/hci/FaceFinderPainter.h @@ -32,13 +32,12 @@ DRISHTI_HCI_NAMESPACE_BEGIN class FaceFinderPainter : public FaceFinder { public: - enum EffectKind { kWireframes, kStabilize }; - + class Impl; using FaceFinderPtr = std::unique_ptr; using FrameDelegate = std::function; @@ -56,7 +55,7 @@ class FaceFinderPainter : public FaceFinder void setShowDetectionScales(bool value); bool getShowDetectionScales() const; - + void setEffectKind(EffectKind kind); EffectKind getEffectKind() const; diff --git a/src/lib/drishti/hci/FaceMonitor.h b/src/lib/drishti/hci/FaceMonitor.h index f4f56f06..ebcbad10 100644 --- a/src/lib/drishti/hci/FaceMonitor.h +++ b/src/lib/drishti/hci/FaceMonitor.h @@ -27,24 +27,23 @@ DRISHTI_HCI_NAMESPACE_BEGIN class FaceMonitor { public: - //! An alias for the system high resolution clock using HighResolutionClock = std::chrono::high_resolution_clock; //! An alias for a time point from the high resolution clock using TimePoint = HighResolutionClock::time_point; - + //! An alias for a vector of positions using Positions = std::vector; //! An alias for a vector of positions using Faces = std::vector; - + /** * A face image structure containing metadata with associated l * andmarks and eye images. */ - + struct FaceImage { //! Timestamp corresponding to acquisition time. @@ -68,20 +67,19 @@ class FaceMonitor struct Request { - Request() = default; + Request() = default; Request(int n, bool getImage, bool getTexture) : n(n) , getImage(getImage) , getTexture(getTexture) { - } - - int n = 0; //! Number of frames requested (last n) - bool getImage = false; //! Request an image (typically incurs some overhead) - bool getTexture = false; //! Request a texture (typically no overhead) - - Request & operator |=(const Request &src) + + int n = 0; //! Number of frames requested (last n) + bool getImage = false; //! Request an image (typically incurs some overhead) + bool getTexture = false; //! Request a texture (typically no overhead) + + Request& operator|=(const Request& src) { n = std::max(n, src.n); getTexture |= src.getTexture; @@ -89,7 +87,7 @@ class FaceMonitor return (*this); } }; - + /** * A user defined virtual method callback that should report the number * of frames that should be captured from teh FIFO buffer based on the @@ -106,7 +104,7 @@ class FaceMonitor * N is the number of frames requested in the preceding request callback. * @param frames A vector containing the last N consecutive FaceImage objects * @param isInitialized Return true if the FIFO buffer is fully initialized. - */ + */ virtual void grab(const std::vector& frames, bool isInitialized) = 0; }; diff --git a/src/lib/drishti/hci/ut/FaceMonitorHCITest.h b/src/lib/drishti/hci/ut/FaceMonitorHCITest.h index 240a2dbd..2dfe957c 100644 --- a/src/lib/drishti/hci/ut/FaceMonitorHCITest.h +++ b/src/lib/drishti/hci/ut/FaceMonitorHCITest.h @@ -17,12 +17,12 @@ class FaceMonitorHCITest : public drishti::hci::FaceMonitor /* * API */ - + int m_index = 0; virtual Request request(const Faces& faces, const TimePoint& timeStamp) { - return Request {3, true, true}; + return Request{ 3, true, true }; } virtual void grab(const std::vector& frames, bool isInitialized) diff --git a/src/lib/drishti/hci/ut/test-drishti-hci.cpp b/src/lib/drishti/hci/ut/test-drishti-hci.cpp index 85498a2f..59616137 100644 --- a/src/lib/drishti/hci/ut/test-drishti-hci.cpp +++ b/src/lib/drishti/hci/ut/test-drishti-hci.cpp @@ -117,14 +117,14 @@ class HCITest : public ::testing::Test m_settings.minTrackHits = 0; // allow all detections for testing m_settings.maxTrackMisses = 1; m_settings.minFaceSeparation = 0.15f; - + m_settings.history = 3; #if defined(DRISHTI_DO_GPU_TESTING) m_context = aglet::GLContext::create(aglet::GLContext::kAuto); #if defined(_WIN32) || defined(_WIN64) - CV_Assert(!glewInit()); -#endif + CV_Assert(!glewInit()); +#endif #endif } @@ -204,7 +204,7 @@ class HCITest : public ::testing::Test { cv::flip(image, image, 1); ogles_gpgpu::FrameInput frame({ image.cols, image.rows }, image.ptr(), true, 0, DFLT_TEXTURE_FORMAT); - + (*detector)(frame); // Wait on face request callback: diff --git a/src/lib/drishti/ml/ObjectDetector.cpp b/src/lib/drishti/ml/ObjectDetector.cpp index 10972724..dc8e431a 100644 --- a/src/lib/drishti/ml/ObjectDetector.cpp +++ b/src/lib/drishti/ml/ObjectDetector.cpp @@ -21,14 +21,13 @@ void ObjectDetector::prune(std::vector& objects, std::vector& { CV_Assert(objects.size() == scores.size()); - // Assum sorted detections: if (objects.size() > 1) { int cutoff = 1; for (int i = 1; i < std::min(m_maxDetectionCount, objects.size()); i++) { - cutoff = i+1; + cutoff = i + 1; if (scores[i] < (scores[0] * m_detectionScorePruneRatio)) { break; diff --git a/src/lib/drishti/ml/XGBoosterImpl.h b/src/lib/drishti/ml/XGBoosterImpl.h index a62822f5..1e345607 100644 --- a/src/lib/drishti/ml/XGBoosterImpl.h +++ b/src/lib/drishti/ml/XGBoosterImpl.h @@ -165,7 +165,7 @@ template void XGBooster::serialize(Archive& ar, const unsigned int version) { drishti_throw_assert(version == 1, "Incorrect XGBooster archive format, please update models"); - + ar& m_impl; } diff --git a/src/lib/drishti/ml/shape_predictor.h b/src/lib/drishti/ml/shape_predictor.h index 5a5b9ae4..e406417f 100644 --- a/src/lib/drishti/ml/shape_predictor.h +++ b/src/lib/drishti/ml/shape_predictor.h @@ -168,7 +168,7 @@ inline static void add32F(const drishti::ml::fshape& a, const fshape& b, fshape& template T compute_npd(const T& a, const T& b) { - return (a-b)/(a+b+T(1e-6)); + return (a - b) / (a + b + T(1e-6)); //return (a - b)/(a + b); //return ((a + b) == T(0.0)) ? std::numeric_limits::lowest() : (a - b) / (a + b); } @@ -793,7 +793,7 @@ class shape_predictor { impl::create_shape_relative_encoding(initial_shape, pixel_coordinates[i], anchor_idx[i], deltas[i]); } - + m_num_workers = 4; //cv::getNumberOfCPUs(); } @@ -870,21 +870,20 @@ class shape_predictor const unsigned long block_size = std::max(1UL, (num + m_num_workers - 1) / m_num_workers); std::vector block_sums(m_num_workers); std::vector shape_accumulators(m_num_workers); - drishti::core::ParallelHomogeneousLambda harness = [&](int block) - { + drishti::core::ParallelHomogeneousLambda harness = [&](int block) { const unsigned long block_begin = block * block_size; - const unsigned long block_end = std::min(num, block_begin + block_size); + const unsigned long block_end = std::min(num, block_begin + block_size); for (unsigned long i = block_begin; i < block_end; ++i) { - auto &f = forests[iter][i]; + auto& f = forests[iter][i]; add16sAnd16s(shape_accumulators[block], f(feature_pixel_values, Fixed(), m_npd), shape_accumulators[block]); } }; //harness({0,static_cast(num_workers)}); - cv::parallel_for_({0,static_cast(m_num_workers)}, harness); + cv::parallel_for_({ 0, static_cast(m_num_workers) }, harness); - for(auto &s : shape_accumulators) + for (auto& s : shape_accumulators) { add16sAnd16s(shape_accumulator, s, shape_accumulator); } @@ -895,7 +894,7 @@ class shape_predictor add16sAnd16s(shape_accumulator, f(feature_pixel_values, Fixed(), m_npd), shape_accumulator); } #endif - + // fixed -> float active_shape.set_size(shape_accumulator.size()); for (int i = 0; i < shape_accumulator.size(); i++) diff --git a/src/lib/drishti/ml/shape_predictor_archive.h b/src/lib/drishti/ml/shape_predictor_archive.h index 1defff58..f1862a20 100644 --- a/src/lib/drishti/ml/shape_predictor_archive.h +++ b/src/lib/drishti/ml/shape_predictor_archive.h @@ -122,7 +122,7 @@ template void serialize(Archive& ar, drishti::ml::shape_predictor& sp, const unsigned int version) { drishti_throw_assert(version == 4, "Incorrect shape_predictor archive format, please update models"); - + drishti::ml::fshape& initial_shape = sp.initial_shape; std::vector>& forests = sp.forests; std::vector>& anchor_idx = sp.anchor_idx; diff --git a/src/lib/drishti/rcpr/CPR.h b/src/lib/drishti/rcpr/CPR.h index fcc65541..8adf59b3 100644 --- a/src/lib/drishti/rcpr/CPR.h +++ b/src/lib/drishti/rcpr/CPR.h @@ -180,7 +180,7 @@ class CPR : public drishti::ml::ShapeEstimator { core::Field model; core::Field pStar; - core::Field pDstr; + core::Field pDstr; core::Field T; core::Field pStar_; // CPR Verison 2 diff --git a/src/lib/drishti/rcpr/CPRIOArchive.h b/src/lib/drishti/rcpr/CPRIOArchive.h index 09570c40..a81ecbf4 100644 --- a/src/lib/drishti/rcpr/CPRIOArchive.h +++ b/src/lib/drishti/rcpr/CPRIOArchive.h @@ -124,7 +124,7 @@ template void CPR::serialize(Archive& ar, const unsigned int version) { drishti_throw_assert(version == 1, "Incorrect CPR archive format, please update models"); - + ar& cprPrm; ar& regModel; } diff --git a/src/lib/drishti/rcpr/cprTrain.cpp b/src/lib/drishti/rcpr/cprTrain.cpp index 6b9fc23e..54989606 100644 --- a/src/lib/drishti/rcpr/cprTrain.cpp +++ b/src/lib/drishti/rcpr/cprTrain.cpp @@ -272,7 +272,7 @@ int CPR::cprTrain(const ImageMaskPairVec& Is, const EllipseVec& pGtIn, const HVe params.maxDepth = recipe.maxDepth; params.featureSubsample = float(recipe.featureSampleSize) / recipe.featurePoolSize; - xgbdt[i] = std::make_shared(params); + xgbdt[i] = std::make_shared(params); xgbdt[i]->train(data, target, recipe.doMask ? mask : MatrixType()); predictions[i].resize(data.size()); diff --git a/src/lib/drishti/rcpr/poseGt.cpp b/src/lib/drishti/rcpr/poseGt.cpp index c947b61c..260111bd 100644 --- a/src/lib/drishti/rcpr/poseGt.cpp +++ b/src/lib/drishti/rcpr/poseGt.cpp @@ -24,7 +24,6 @@ DRISHTI_RCPR_NAMESPACE_BEGIN static Matx33Real getPose(const Vector1d& phi); static std::vector xsToPoints(const Matx33Real& HS, const PointVec& xs); - // function part = createPart( parent, wts ) // % Create single part for model (parent==0 implies root). // if(nargin<2), wts=[0.313 0.342 11.339 13.059 6.998]; end @@ -40,7 +39,7 @@ int createPart(int parent, CPR::Model::Parts& part) CV_Assert(parent == 0); // for now, just one part part.prn = parent; - part.lks = { + part.lks = { static_cast(0.0), static_cast(0.0), static_cast(0.0), @@ -108,7 +107,7 @@ int createModel(int type, CPR::Model& model) // ######## using FtrData = CPR::RegModel::Regs::FtrData; -using FtrResult = CPR::FeaturesResult; +using FtrResult = CPR::FeaturesResult; int featuresComp(const CPR::Model& model, const Vector1d& phi, const ImageMaskPair& Im, const FtrData& ftrData, FtrResult& result, bool useNPD) { const auto& I = Im.getImage(); @@ -118,14 +117,14 @@ int featuresComp(const CPR::Model& model, const Vector1d& phi, const ImageMaskPa Matx33Real HS = getPose(phi); // just single component model for now (don't need multiple parts) const auto& xs = *(ftrData.xs); auto points = xsToPoints(HS, xs); - + // Make sure we avoid out of bounds pixels, somewhat arbitrarily // we can just set these to the top left corner. - for (auto& p: points) + for (auto& p : points) { - if(!cv::Rect({0,0}, I.size()).contains(p)) + if (!cv::Rect({ 0, 0 }, I.size()).contains(p)) { - p = {0.f,0.f}; + p = { 0.f, 0.f }; } } @@ -136,8 +135,8 @@ int featuresComp(const CPR::Model& model, const Vector1d& phi, const ImageMaskPa ftrs.resize(points.size() / 2); for (int j = 0, i = 0; i < points.size(); j++, i += 2) { - double f1 = static_cast(I.at(points[i+0])) / 255.0; - double f2 = static_cast(I.at(points[i+1])) / 255.0; + double f1 = static_cast(I.at(points[i + 0])) / 255.0; + double f2 = static_cast(I.at(points[i + 1])) / 255.0; double d; if (useNPD) // possibly use functor (opt) @@ -159,8 +158,8 @@ int featuresComp(const CPR::Model& model, const Vector1d& phi, const ImageMaskPa auto& mask = result.ftrMask; if (!M.empty()) { - uint8_t m1 = M.at(points[i+0]); - uint8_t m2 = M.at(points[i+1]); + uint8_t m1 = M.at(points[i + 0]); + uint8_t m2 = M.at(points[i + 1]); uint8_t valid = m1 & m2; mask.push_back(valid); @@ -341,7 +340,7 @@ static std::vector xsToPoints(const Matx33Real& HS, const PointVec& for (int i = 0; i < points.size(); i++) { const auto q = HS * cv::Point3f(xs[i].x, xs[i].y, 1.f); - points[i] = {q.x, q.y}; // pure affine (i.e., no /z) + points[i] = { q.x, q.y }; // pure affine (i.e., no /z) } return points; } diff --git a/src/lib/drishti/sensor/Sensor.h b/src/lib/drishti/sensor/Sensor.h index db81590e..cbb2ad8f 100644 --- a/src/lib/drishti/sensor/Sensor.h +++ b/src/lib/drishti/sensor/Sensor.h @@ -53,7 +53,10 @@ class SensorModel struct Extrinsic { Extrinsic() {} - Extrinsic(const cv::Matx33f &R) : R(R) {} + Extrinsic(const cv::Matx33f& R) + : R(R) + { + } cv::Matx33f R; }; diff --git a/src/tests/drishti/CMakeLists.txt b/src/tests/drishti/CMakeLists.txt index 81a46a85..16d3187f 100644 --- a/src/tests/drishti/CMakeLists.txt +++ b/src/tests/drishti/CMakeLists.txt @@ -37,6 +37,10 @@ if(DRISHTI_BUILD_C_INTERFACE) target_compile_definitions(${test_app} PUBLIC DRISHTI_BUILD_C_INTERFACE=1) endif() +if(GAUZE_ANDROID_USE_EMULATOR) + target_compile_definitions(${test_app} PUBLIC GAUZE_ANDROID_USE_EMULATOR=1) +endif() + target_include_directories(${test_app} PUBLIC "$") set_property(TARGET ${test_app} PROPERTY FOLDER "app/tests")