diff --git a/.gitignore b/.gitignore index 259148f..dfb8009 100644 --- a/.gitignore +++ b/.gitignore @@ -30,3 +30,12 @@ *.exe *.out *.app + +# cmake/hunter +_*/ + +# osx +.DS_Store + +# emacs +*~ diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..2a84718 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,24 @@ +cmake_minimum_required(VERSION 3.8) + +option(HUNTER_KEEP_PACKAGE_SOURCES "Keep" ON) + +include("cmake/HunterGate.cmake") + +HunterGate( + URL "https://github.com/ruslo/hunter/archive/v0.22.16.tar.gz" + SHA1 "84153076a3cebf4869c904fa5c93ea309386b583" + LOCAL # cmake/Hunter/config.cmake +) + +project(dlib_dnn_mmod_detect VERSION 0.0.1) + +hunter_add_package(dlib) +find_package(dlib CONFIG REQUIRED) + +set(app_list dnn_mmod_find_thing_ex dnn_mmod_train_find_thing_ex) + +foreach(name ${app_list}) + add_executable(${name} ${name}.cpp dnn_mmod_sample_detector.h) + target_link_libraries(${name} PUBLIC dlib::dlib) + install(TARGETS ${name} DESTINATION bin) +endforeach() diff --git a/cmake/Hunter/config.cmake b/cmake/Hunter/config.cmake new file mode 100644 index 0000000..e91b773 --- /dev/null +++ b/cmake/Hunter/config.cmake @@ -0,0 +1,18 @@ +set(dlib_cmake_args + DLIB_HEADER_ONLY=OFF #all previous builds were header on, so that is the default + DLIB_ENABLE_ASSERTS=OFF #must be set on/off or debug/release build will differ and config will not match one + DLIB_NO_GUI_SUPPORT=ON + DLIB_ISO_CPP_ONLY=OFF # needed for directory navigation code (loading training data) + DLIB_JPEG_SUPPORT=OFF # https://github.com/hunter-packages/dlib/blob/eb79843227d0be45e1efa68ef9cc6cc187338c8e/dlib/CMakeLists.txt#L422-L432 + DLIB_LINK_WITH_SQLITE3=OFF + DLIB_USE_BLAS=OFF + DLIB_USE_LAPACK=OFF + DLIB_USE_CUDA=ON + DLIB_PNG_SUPPORT=ON + DLIB_JPEG_SUPPORT=ON + DLIB_GIF_SUPPORT=OFF + DLIB_USE_MKL_FFT=OFF + HUNTER_INSTALL_LICENSE_FILES=dlib/LICENSE.txt +) + +hunter_config(dlib VERSION ${HUNTER_dlib_VERSION} CMAKE_ARGS ${dlib_cmake_args} DUMMY_SOURCES=2) diff --git a/cmake/HunterGate.cmake b/cmake/HunterGate.cmake new file mode 100644 index 0000000..c24c0e5 --- /dev/null +++ b/cmake/HunterGate.cmake @@ -0,0 +1,543 @@ +# Copyright (c) 2013-2017, Ruslan Baratov +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This is a gate file to Hunter package manager. +# Include this file using `include` command and add package you need, example: +# +# cmake_minimum_required(VERSION 3.0) +# +# include("cmake/HunterGate.cmake") +# HunterGate( +# URL "https://github.com/path/to/hunter/archive.tar.gz" +# SHA1 "798501e983f14b28b10cda16afa4de69eee1da1d" +# ) +# +# project(MyProject) +# +# hunter_add_package(Foo) +# hunter_add_package(Boo COMPONENTS Bar Baz) +# +# Projects: +# * https://github.com/hunter-packages/gate/ +# * https://github.com/ruslo/hunter + +option(HUNTER_ENABLED "Enable Hunter package manager support" ON) +if(HUNTER_ENABLED) + if(CMAKE_VERSION VERSION_LESS "3.0") + message(FATAL_ERROR "At least CMake version 3.0 required for hunter dependency management." + " Update CMake or set HUNTER_ENABLED to OFF.") + endif() +endif() + +include(CMakeParseArguments) # cmake_parse_arguments + +option(HUNTER_STATUS_PRINT "Print working status" ON) +option(HUNTER_STATUS_DEBUG "Print a lot info" OFF) +option(HUNTER_TLS_VERIFY "Enable/disable TLS certificate checking on downloads" ON) + +set(HUNTER_WIKI "https://github.com/ruslo/hunter/wiki") + +function(hunter_gate_status_print) + foreach(print_message ${ARGV}) + if(HUNTER_STATUS_PRINT OR HUNTER_STATUS_DEBUG) + message(STATUS "[hunter] ${print_message}") + endif() + endforeach() +endfunction() + +function(hunter_gate_status_debug) + foreach(print_message ${ARGV}) + if(HUNTER_STATUS_DEBUG) + string(TIMESTAMP timestamp) + message(STATUS "[hunter *** DEBUG *** ${timestamp}] ${print_message}") + endif() + endforeach() +endfunction() + +function(hunter_gate_wiki wiki_page) + message("------------------------------ WIKI -------------------------------") + message(" ${HUNTER_WIKI}/${wiki_page}") + message("-------------------------------------------------------------------") + message("") + message(FATAL_ERROR "") +endfunction() + +function(hunter_gate_internal_error) + message("") + foreach(print_message ${ARGV}) + message("[hunter ** INTERNAL **] ${print_message}") + endforeach() + message("[hunter ** INTERNAL **] [Directory:${CMAKE_CURRENT_LIST_DIR}]") + message("") + hunter_gate_wiki("error.internal") +endfunction() + +function(hunter_gate_fatal_error) + cmake_parse_arguments(hunter "" "WIKI" "" "${ARGV}") + string(COMPARE EQUAL "${hunter_WIKI}" "" have_no_wiki) + if(have_no_wiki) + hunter_gate_internal_error("Expected wiki") + endif() + message("") + foreach(x ${hunter_UNPARSED_ARGUMENTS}) + message("[hunter ** FATAL ERROR **] ${x}") + endforeach() + message("[hunter ** FATAL ERROR **] [Directory:${CMAKE_CURRENT_LIST_DIR}]") + message("") + hunter_gate_wiki("${hunter_WIKI}") +endfunction() + +function(hunter_gate_user_error) + hunter_gate_fatal_error(${ARGV} WIKI "error.incorrect.input.data") +endfunction() + +function(hunter_gate_self root version sha1 result) + string(COMPARE EQUAL "${root}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("root is empty") + endif() + + string(COMPARE EQUAL "${version}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("version is empty") + endif() + + string(COMPARE EQUAL "${sha1}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("sha1 is empty") + endif() + + string(SUBSTRING "${sha1}" 0 7 archive_id) + + if(EXISTS "${root}/cmake/Hunter") + set(hunter_self "${root}") + else() + set( + hunter_self + "${root}/_Base/Download/Hunter/${version}/${archive_id}/Unpacked" + ) + endif() + + set("${result}" "${hunter_self}" PARENT_SCOPE) +endfunction() + +# Set HUNTER_GATE_ROOT cmake variable to suitable value. +function(hunter_gate_detect_root) + # Check CMake variable + string(COMPARE NOTEQUAL "${HUNTER_ROOT}" "" not_empty) + if(not_empty) + set(HUNTER_GATE_ROOT "${HUNTER_ROOT}" PARENT_SCOPE) + hunter_gate_status_debug("HUNTER_ROOT detected by cmake variable") + return() + endif() + + # Check environment variable + string(COMPARE NOTEQUAL "$ENV{HUNTER_ROOT}" "" not_empty) + if(not_empty) + set(HUNTER_GATE_ROOT "$ENV{HUNTER_ROOT}" PARENT_SCOPE) + hunter_gate_status_debug("HUNTER_ROOT detected by environment variable") + return() + endif() + + # Check HOME environment variable + string(COMPARE NOTEQUAL "$ENV{HOME}" "" result) + if(result) + set(HUNTER_GATE_ROOT "$ENV{HOME}/.hunter" PARENT_SCOPE) + hunter_gate_status_debug("HUNTER_ROOT set using HOME environment variable") + return() + endif() + + # Check SYSTEMDRIVE and USERPROFILE environment variable (windows only) + if(WIN32) + string(COMPARE NOTEQUAL "$ENV{SYSTEMDRIVE}" "" result) + if(result) + set(HUNTER_GATE_ROOT "$ENV{SYSTEMDRIVE}/.hunter" PARENT_SCOPE) + hunter_gate_status_debug( + "HUNTER_ROOT set using SYSTEMDRIVE environment variable" + ) + return() + endif() + + string(COMPARE NOTEQUAL "$ENV{USERPROFILE}" "" result) + if(result) + set(HUNTER_GATE_ROOT "$ENV{USERPROFILE}/.hunter" PARENT_SCOPE) + hunter_gate_status_debug( + "HUNTER_ROOT set using USERPROFILE environment variable" + ) + return() + endif() + endif() + + hunter_gate_fatal_error( + "Can't detect HUNTER_ROOT" + WIKI "error.detect.hunter.root" + ) +endfunction() + +macro(hunter_gate_lock dir) + if(NOT HUNTER_SKIP_LOCK) + if("${CMAKE_VERSION}" VERSION_LESS "3.2") + hunter_gate_fatal_error( + "Can't lock, upgrade to CMake 3.2 or use HUNTER_SKIP_LOCK" + WIKI "error.can.not.lock" + ) + endif() + hunter_gate_status_debug("Locking directory: ${dir}") + file(LOCK "${dir}" DIRECTORY GUARD FUNCTION) + hunter_gate_status_debug("Lock done") + endif() +endmacro() + +function(hunter_gate_download dir) + string( + COMPARE + NOTEQUAL + "$ENV{HUNTER_DISABLE_AUTOINSTALL}" + "" + disable_autoinstall + ) + if(disable_autoinstall AND NOT HUNTER_RUN_INSTALL) + hunter_gate_fatal_error( + "Hunter not found in '${dir}'" + "Set HUNTER_RUN_INSTALL=ON to auto-install it from '${HUNTER_GATE_URL}'" + "Settings:" + " HUNTER_ROOT: ${HUNTER_GATE_ROOT}" + " HUNTER_SHA1: ${HUNTER_GATE_SHA1}" + WIKI "error.run.install" + ) + endif() + string(COMPARE EQUAL "${dir}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("Empty 'dir' argument") + endif() + + string(COMPARE EQUAL "${HUNTER_GATE_SHA1}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("HUNTER_GATE_SHA1 empty") + endif() + + string(COMPARE EQUAL "${HUNTER_GATE_URL}" "" is_bad) + if(is_bad) + hunter_gate_internal_error("HUNTER_GATE_URL empty") + endif() + + set(done_location "${dir}/DONE") + set(sha1_location "${dir}/SHA1") + + set(build_dir "${dir}/Build") + set(cmakelists "${dir}/CMakeLists.txt") + + hunter_gate_lock("${dir}") + if(EXISTS "${done_location}") + # while waiting for lock other instance can do all the job + hunter_gate_status_debug("File '${done_location}' found, skip install") + return() + endif() + + file(REMOVE_RECURSE "${build_dir}") + file(REMOVE_RECURSE "${cmakelists}") + + file(MAKE_DIRECTORY "${build_dir}") # check directory permissions + + # Disabling languages speeds up a little bit, reduces noise in the output + # and avoids path too long windows error + file( + WRITE + "${cmakelists}" + "cmake_minimum_required(VERSION 3.0)\n" + "project(HunterDownload LANGUAGES NONE)\n" + "include(ExternalProject)\n" + "ExternalProject_Add(\n" + " Hunter\n" + " URL\n" + " \"${HUNTER_GATE_URL}\"\n" + " URL_HASH\n" + " SHA1=${HUNTER_GATE_SHA1}\n" + " DOWNLOAD_DIR\n" + " \"${dir}\"\n" + " TLS_VERIFY\n" + " ${HUNTER_TLS_VERIFY}\n" + " SOURCE_DIR\n" + " \"${dir}/Unpacked\"\n" + " CONFIGURE_COMMAND\n" + " \"\"\n" + " BUILD_COMMAND\n" + " \"\"\n" + " INSTALL_COMMAND\n" + " \"\"\n" + ")\n" + ) + + if(HUNTER_STATUS_DEBUG) + set(logging_params "") + else() + set(logging_params OUTPUT_QUIET) + endif() + + hunter_gate_status_debug("Run generate") + + # Need to add toolchain file too. + # Otherwise on Visual Studio + MDD this will fail with error: + # "Could not find an appropriate version of the Windows 10 SDK installed on this machine" + if(EXISTS "${CMAKE_TOOLCHAIN_FILE}") + get_filename_component(absolute_CMAKE_TOOLCHAIN_FILE "${CMAKE_TOOLCHAIN_FILE}" ABSOLUTE) + set(toolchain_arg "-DCMAKE_TOOLCHAIN_FILE=${absolute_CMAKE_TOOLCHAIN_FILE}") + else() + # 'toolchain_arg' can't be empty + set(toolchain_arg "-DCMAKE_TOOLCHAIN_FILE=") + endif() + + string(COMPARE EQUAL "${CMAKE_MAKE_PROGRAM}" "" no_make) + if(no_make) + set(make_arg "") + else() + # Test case: remove Ninja from PATH but set it via CMAKE_MAKE_PROGRAM + set(make_arg "-DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}") + endif() + + execute_process( + COMMAND + "${CMAKE_COMMAND}" + "-H${dir}" + "-B${build_dir}" + "-G${CMAKE_GENERATOR}" + "${toolchain_arg}" + ${make_arg} + WORKING_DIRECTORY "${dir}" + RESULT_VARIABLE download_result + ${logging_params} + ) + + if(NOT download_result EQUAL 0) + hunter_gate_internal_error("Configure project failed") + endif() + + hunter_gate_status_print( + "Initializing Hunter workspace (${HUNTER_GATE_SHA1})" + " ${HUNTER_GATE_URL}" + " -> ${dir}" + ) + execute_process( + COMMAND "${CMAKE_COMMAND}" --build "${build_dir}" + WORKING_DIRECTORY "${dir}" + RESULT_VARIABLE download_result + ${logging_params} + ) + + if(NOT download_result EQUAL 0) + hunter_gate_internal_error("Build project failed") + endif() + + file(REMOVE_RECURSE "${build_dir}") + file(REMOVE_RECURSE "${cmakelists}") + + file(WRITE "${sha1_location}" "${HUNTER_GATE_SHA1}") + file(WRITE "${done_location}" "DONE") + + hunter_gate_status_debug("Finished") +endfunction() + +# Must be a macro so master file 'cmake/Hunter' can +# apply all variables easily just by 'include' command +# (otherwise PARENT_SCOPE magic needed) +macro(HunterGate) + if(HUNTER_GATE_DONE) + # variable HUNTER_GATE_DONE set explicitly for external project + # (see `hunter_download`) + set_property(GLOBAL PROPERTY HUNTER_GATE_DONE YES) + endif() + + # First HunterGate command will init Hunter, others will be ignored + get_property(_hunter_gate_done GLOBAL PROPERTY HUNTER_GATE_DONE SET) + + if(NOT HUNTER_ENABLED) + # Empty function to avoid error "unknown function" + function(hunter_add_package) + endfunction() + + set( + _hunter_gate_disabled_mode_dir + "${CMAKE_CURRENT_LIST_DIR}/cmake/Hunter/disabled-mode" + ) + if(EXISTS "${_hunter_gate_disabled_mode_dir}") + hunter_gate_status_debug( + "Adding \"disabled-mode\" modules: ${_hunter_gate_disabled_mode_dir}" + ) + list(APPEND CMAKE_PREFIX_PATH "${_hunter_gate_disabled_mode_dir}") + endif() + elseif(_hunter_gate_done) + hunter_gate_status_debug("Secondary HunterGate (use old settings)") + hunter_gate_self( + "${HUNTER_CACHED_ROOT}" + "${HUNTER_VERSION}" + "${HUNTER_SHA1}" + _hunter_self + ) + include("${_hunter_self}/cmake/Hunter") + else() + set(HUNTER_GATE_LOCATION "${CMAKE_CURRENT_LIST_DIR}") + + string(COMPARE NOTEQUAL "${PROJECT_NAME}" "" _have_project_name) + if(_have_project_name) + hunter_gate_fatal_error( + "Please set HunterGate *before* 'project' command. " + "Detected project: ${PROJECT_NAME}" + WIKI "error.huntergate.before.project" + ) + endif() + + cmake_parse_arguments( + HUNTER_GATE "LOCAL" "URL;SHA1;GLOBAL;FILEPATH" "" ${ARGV} + ) + + string(COMPARE EQUAL "${HUNTER_GATE_SHA1}" "" _empty_sha1) + string(COMPARE EQUAL "${HUNTER_GATE_URL}" "" _empty_url) + string( + COMPARE + NOTEQUAL + "${HUNTER_GATE_UNPARSED_ARGUMENTS}" + "" + _have_unparsed + ) + string(COMPARE NOTEQUAL "${HUNTER_GATE_GLOBAL}" "" _have_global) + string(COMPARE NOTEQUAL "${HUNTER_GATE_FILEPATH}" "" _have_filepath) + + if(_have_unparsed) + hunter_gate_user_error( + "HunterGate unparsed arguments: ${HUNTER_GATE_UNPARSED_ARGUMENTS}" + ) + endif() + if(_empty_sha1) + hunter_gate_user_error("SHA1 suboption of HunterGate is mandatory") + endif() + if(_empty_url) + hunter_gate_user_error("URL suboption of HunterGate is mandatory") + endif() + if(_have_global) + if(HUNTER_GATE_LOCAL) + hunter_gate_user_error("Unexpected LOCAL (already has GLOBAL)") + endif() + if(_have_filepath) + hunter_gate_user_error("Unexpected FILEPATH (already has GLOBAL)") + endif() + endif() + if(HUNTER_GATE_LOCAL) + if(_have_global) + hunter_gate_user_error("Unexpected GLOBAL (already has LOCAL)") + endif() + if(_have_filepath) + hunter_gate_user_error("Unexpected FILEPATH (already has LOCAL)") + endif() + endif() + if(_have_filepath) + if(_have_global) + hunter_gate_user_error("Unexpected GLOBAL (already has FILEPATH)") + endif() + if(HUNTER_GATE_LOCAL) + hunter_gate_user_error("Unexpected LOCAL (already has FILEPATH)") + endif() + endif() + + hunter_gate_detect_root() # set HUNTER_GATE_ROOT + + # Beautify path, fix probable problems with windows path slashes + get_filename_component( + HUNTER_GATE_ROOT "${HUNTER_GATE_ROOT}" ABSOLUTE + ) + hunter_gate_status_debug("HUNTER_ROOT: ${HUNTER_GATE_ROOT}") + if(NOT HUNTER_ALLOW_SPACES_IN_PATH) + string(FIND "${HUNTER_GATE_ROOT}" " " _contain_spaces) + if(NOT _contain_spaces EQUAL -1) + hunter_gate_fatal_error( + "HUNTER_ROOT (${HUNTER_GATE_ROOT}) contains spaces." + "Set HUNTER_ALLOW_SPACES_IN_PATH=ON to skip this error" + "(Use at your own risk!)" + WIKI "error.spaces.in.hunter.root" + ) + endif() + endif() + + string( + REGEX + MATCH + "[0-9]+\\.[0-9]+\\.[0-9]+[-_a-z0-9]*" + HUNTER_GATE_VERSION + "${HUNTER_GATE_URL}" + ) + string(COMPARE EQUAL "${HUNTER_GATE_VERSION}" "" _is_empty) + if(_is_empty) + set(HUNTER_GATE_VERSION "unknown") + endif() + + hunter_gate_self( + "${HUNTER_GATE_ROOT}" + "${HUNTER_GATE_VERSION}" + "${HUNTER_GATE_SHA1}" + _hunter_self + ) + + set(_master_location "${_hunter_self}/cmake/Hunter") + if(EXISTS "${HUNTER_GATE_ROOT}/cmake/Hunter") + # Hunter downloaded manually (e.g. by 'git clone') + set(_unused "xxxxxxxxxx") + set(HUNTER_GATE_SHA1 "${_unused}") + set(HUNTER_GATE_VERSION "${_unused}") + else() + get_filename_component(_archive_id_location "${_hunter_self}/.." ABSOLUTE) + set(_done_location "${_archive_id_location}/DONE") + set(_sha1_location "${_archive_id_location}/SHA1") + + # Check Hunter already downloaded by HunterGate + if(NOT EXISTS "${_done_location}") + hunter_gate_download("${_archive_id_location}") + endif() + + if(NOT EXISTS "${_done_location}") + hunter_gate_internal_error("hunter_gate_download failed") + endif() + + if(NOT EXISTS "${_sha1_location}") + hunter_gate_internal_error("${_sha1_location} not found") + endif() + file(READ "${_sha1_location}" _sha1_value) + string(COMPARE EQUAL "${_sha1_value}" "${HUNTER_GATE_SHA1}" _is_equal) + if(NOT _is_equal) + hunter_gate_internal_error( + "Short SHA1 collision:" + " ${_sha1_value} (from ${_sha1_location})" + " ${HUNTER_GATE_SHA1} (HunterGate)" + ) + endif() + if(NOT EXISTS "${_master_location}") + hunter_gate_user_error( + "Master file not found:" + " ${_master_location}" + "try to update Hunter/HunterGate" + ) + endif() + endif() + include("${_master_location}") + set_property(GLOBAL PROPERTY HUNTER_GATE_DONE YES) + endif() +endmacro() diff --git a/dnn_mmod_find_thing_ex.cpp b/dnn_mmod_find_thing_ex.cpp new file mode 100644 index 0000000..f3c1b4c --- /dev/null +++ b/dnn_mmod_find_thing_ex.cpp @@ -0,0 +1,279 @@ +// The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt +/* + + Modified from original source here: + + https://github.com/davisking/dlib/blob/master/examples/dnn_mmod_find_cars2_ex.cpp + + This example shows how to run a CNN based eye detector using dlib. The + example loads a pretrained model and uses it to find eyes in an image. + It does not rely on a first stage face detector step. This can be useful + in case where the iamges contain partial faces due to FOV, cropping or occlusion. + + The model used by this example was trained by dnn_mmod_train_find_things_ex.cpp + example. Also, since this is a CNN, you really should use a GPU to get the + best execution speed. For instance, when run on a NVIDIA 1080ti, this detector + runs at 98fps when run on the provided test image. That's more than an order + of magnitude faster than when run on the CPU. + + Users who are just learning about dlib's deep learning API should read + the dnn_introduction_ex.cpp and dnn_introduction2_ex.cpp examples to learn + how the API works. For an introduction to the object detection method you + should read dnn_mmod_ex.cpp. + +*/ + + +#include +#include +#include +#include +#include + +#if !defined(DLIB_NO_GUI_SUPPORT) +# include +#endif + +using namespace std; +using namespace dlib; + +#include "dnn_mmod_sample_detector.h" + +// https://stackoverflow.com/a/1567703 +class Line +{ + std::string data; + +public: + friend std::istream& operator>>(std::istream& is, Line& l) + { + std::getline(is, l.data); + return is; + } + operator std::string() const { return data; } +}; + + +static void expand(const std::string& filename, std::vector& filenames) +{ + // Create input file list (or single image) + filenames = { filename }; + if (filename.find(".txt") != std::string::npos) + { + std::ifstream ifs(filename); + if (ifs) + { + filenames.clear(); + std::copy(std::istream_iterator(ifs), std::istream_iterator(), std::back_inserter(filenames)); + } + else + { + throw std::runtime_error("Unable to open file: " + filename); + } + } +} + +static std::string basename(const std::string& name, const std::string& ext=".") +{ + size_t pos = name.rfind("/"); + + if (pos != std::string::npos) + { + pos += 1; + } + else + { + pos = 0; + } + + std::string base = name.substr(pos); + return base.substr(0, std::min(base.size(), base.rfind(ext))); +}; + +// ---------------------------------------------------------------------------------------- + +int main(int argc, char **argv) try +{ + command_line_parser parser; + parser.add_option("h", "Display this help message."); + parser.add_option("i", "Input image filename or list of filenames with *.txt extension.", 1); + parser.add_option("m", "Detector model layout (must match dnn_mmod_sample_detector.h)", 1); + parser.add_option("o", "Output directory for annotated images.", 1); + + parser.parse(argc, argv); + + if (parser.option("h") || (argc == 1)) + { + cout << "Usage: dnn_mmod_find_things_ex -i -m -o \n"; + parser.print_options(); + return EXIT_SUCCESS; + } + + if(!parser.option("i")) + { + std::cout << "Must specify input image (or *.txt list of images)" << std::endl; + return EXIT_SUCCESS; + } + + std::string in_file = parser.option("i").argument(); + std::vector filenames; + expand(in_file, filenames); + + if(!parser.option("m")) + { + std::cout << "Must specify network model weights (*.dat)" << std::endl; + return EXIT_SUCCESS; + } + + const std::string network_filename = parser.option("m").argument(); + + std::string output_directory; + if(parser.option("o")) + { + output_directory = parser.option("o").argument(); + } + + net_type net; + deserialize(network_filename) >> net; + + dlib::pipe jobs(filenames.size()); + for(std::size_t i = 0; i < filenames.size(); i++) + { + auto index = i; // need copy here + jobs.enqueue(index); + } + + struct fs_image + { + std::size_t index; + matrix image; + bool sentinel; + }; + + dlib::pipe images(4); + + auto data_loader = [&images, &jobs, &filenames](int value) + { + + std::size_t index; + while(jobs.dequeue_or_timeout(index, 0)) + { + try + { + fs_image image { index }; + load_image(image.image, filenames[image.index]); + images.enqueue(image); + } + catch(std::exception& e) + { + cerr << e.what() << endl; + } + } + + fs_image sentinel { 0, {}, true }; + images.enqueue(sentinel); + + cout << "DONE" << std::endl; + }; + + std::thread data_loader1([data_loader](){ data_loader(1); }); + std::thread data_loader2([data_loader](){ data_loader(2); }); + std::thread data_loader3([data_loader](){ data_loader(3); }); + std::thread data_loader4([data_loader](){ data_loader(4); }); + + int sentinels = 0; + + // Run network on single thread + fs_image image; + while(images.dequeue(image)) + { + if(image.sentinel && (++sentinels == 4)) + { + break; + } + +#if !defined(DLIB_NO_GUI_SUPPORT) + image_window win; + win.set_image(image.image); +#endif + + std::string filename = filenames[image.index]; + + // Run the detector on the image and show us the output. + std::vector detections; + try + { + detections = net(image.image); + } + catch(std::exception &e) + { + cerr << e.what() << std::endl; + cerr << "Skipping image ..." << filename << std::endl; + continue; + } + + std::cout << image.index << ' ' << filename << ' ' << detections.size(); + for (const auto& d : detections) + { + const auto &roi = d.rect; + std::cout << ' ' << roi.left() << ' ' << roi.top() << ' ' << roi.width() << ' ' << roi.height(); + + if(!output_directory.empty()) + { + draw_rectangle(image.image, d, rgb_pixel(0,255,0), 4); + } + +#if !defined(DLIB_NO_GUI_SUPPORT) + win.add_overlay(rect, rgb_pixel(255,0,0)); +#endif + } + std::cout << std::endl; + + if(!output_directory.empty()) + { + std::string output_filename; + output_filename += output_directory; + output_filename += "/"; + output_filename += basename(filename); + output_filename += ".jpg"; + try + { + save_jpeg(image.image, output_filename); + } + catch(image_save_error &e) + { + cerr << e.what() << endl; + cerr << "Failed to write image at path : " << output_filename << std::endl; + } + } + } + + + jobs.disable(); + images.disable(); + + data_loader1.join(); + data_loader2.join(); + data_loader3.join(); + data_loader4.join(); + + return EXIT_SUCCESS; +} +catch(image_load_error& e) +{ + cerr << e.what() << endl; + cerr << "The test image is located in the examples folder. So you should run this program from a sub folder so that the relative path is correct." << endl; +} +catch(serialization_error& e) +{ + cerr << e.what() << endl; + cerr << "The correct model file can be obtained from: http://dlib.net/files/mmod_rear_end_vehicle_detector.dat.bz2" << endl; +} +catch(std::exception& e) +{ + cerr << e.what() << endl; +} + + + + diff --git a/dnn_mmod_sample_detector.h b/dnn_mmod_sample_detector.h new file mode 100644 index 0000000..cc69c9d --- /dev/null +++ b/dnn_mmod_sample_detector.h @@ -0,0 +1,10 @@ +#ifndef __dnn_mmod_sample_detector_h__ +#define __dnn_mmod_sample_detector_h__ + +template using con5d = con; +template using con5 = con; +template using downsampler = relu>>>>>>>>; +template using rcon5 = relu>>; +using net_type = loss_mmod>>>>>>>; + +#endif // __dnn_mmod_sample_detector_h__ diff --git a/dnn_mmod_train_find_thing_ex.cpp b/dnn_mmod_train_find_thing_ex.cpp new file mode 100644 index 0000000..2802932 --- /dev/null +++ b/dnn_mmod_train_find_thing_ex.cpp @@ -0,0 +1,450 @@ +// The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt +/* + + Modified from original source here: + + https://github.com/davisking/dlib/blob/master/examples/dnn_mmod_train_find_cars_ex.cpp + + This example shows how to train a CNN based object detector using dlib's + loss_mmod loss layer. This loss layer implements the Max-Margin Object + Detection loss as described in the paper: + Max-Margin Object Detection by Davis E. King (http://arxiv.org/abs/1502.00046). + This is the same loss used by the popular SVM+HOG object detector in dlib + (see fhog_object_detector_ex.cpp) except here we replace the HOG features + with a CNN and train the entire detector end-to-end. This allows us to make + much more powerful detectors. + + It would be a good idea to become familiar with dlib's DNN tooling before reading this + example. So you should read dnn_introduction_ex.cpp and dnn_introduction2_ex.cpp + before reading this example program. You should also read the introductory DNN+MMOD + example dnn_mmod_ex.cpp as well before proceeding. + + This example is essentially a more complex version of dnn_mmod_ex.cpp. In it we train + a detector that finds eyes (without requiring face detection). I will also discuss some + aspects of data preparation useful when training this kind of detector. + +*/ + + +#include +#include +#include + +#if !defined(DLIB_NO_GUI_SUPPORT) +# include +#endif + +using namespace std; +using namespace dlib; + +#include "dnn_mmod_sample_detector.h" + +// ---------------------------------------------------------------------------------------- + +int ignore_overlapped_boxes( + std::vector& boxes, + const test_box_overlap& overlaps +) +/*! + ensures + - Whenever two rectangles in boxes overlap, according to overlaps(), we set the + smallest box to ignore. + - returns the number of newly ignored boxes. +!*/ +{ + int num_ignored = 0; + for (size_t i = 0; i < boxes.size(); ++i) + { + if (boxes[i].ignore) + continue; + for (size_t j = i+1; j < boxes.size(); ++j) + { + if (boxes[j].ignore) + continue; + if (overlaps(boxes[i], boxes[j])) + { + ++num_ignored; + if(boxes[i].rect.area() < boxes[j].rect.area()) + boxes[i].ignore = true; + else + boxes[j].ignore = true; + } + } + } + return num_ignored; +} + +// ---------------------------------------------------------------------------------------- + +int main(int argc, char** argv) try +{ + if (argc != 2) + { + cout << "Give the path to a folder containing training.xml and testing.xml files." << endl; + cout << "This example program is specifically designed to run on the dlib vehicle " << endl; + cout << "detection dataset, which is available at this URL: " << endl; + cout << " http://dlib.net/files/data/dlib_rear_end_vehicles_v1.tar" << endl; + cout << endl; + cout << "So download that dataset, extract it somewhere, and then run this program" << endl; + cout << "with the dlib_rear_end_vehicles folder as an argument. E.g. if you extract" << endl; + cout << "the dataset to the current folder then you should run this example program" << endl; + cout << "by typing: " << endl; + cout << " ./dnn_mmod_train_find_things_ex dlib_rear_end_vehicles" << endl; + cout << endl; + cout << "It takes about a day to finish if run on a high end GPU like a 1080ti." << endl; + cout << endl; + return 0; + } + const std::string data_directory = argv[1]; + + + std::vector> images_train, images_test; + std::vector> boxes_train, boxes_test; + load_image_dataset(images_train, boxes_train, data_directory+"/training.xml"); + load_image_dataset(images_test, boxes_test, data_directory+"/testing.xml"); + + // When I was creating the dlib vehicle detection dataset I had to label all the things + // in each image. MMOD requires all things to be labeled, since any unlabeled part of an + // image is implicitly assumed to be not a thing, and the algorithm will use it as + // negative training data. So every thing must be labeled, either with a normal + // rectangle or an "ignore" rectangle that tells MMOD to simply ignore it (i.e. neither + // treat it as a thing to detect nor as negative training data). + // + // In our present case, many images contain very tiny things in the distance, ones that + // are essentially just dark smudges. It's not reasonable to expect the CNN + // architecture we defined to detect such vehicles. However, I erred on the side of + // having more complete annotations when creating the dataset. So when I labeled these + // images I labeled many of these really difficult cases as vehicles to detect. + // + // So the first thing we are going to do is clean up our dataset a little bit. In + // particular, we are going to mark boxes smaller than 35*35 pixels as ignore since + // only really small and blurry things appear at those sizes. We will also mark boxes + // that are heavily overlapped by another box as ignore. We do this because we want to + // allow for stronger non-maximum suppression logic in the learned detector, since that + // will help make it easier to learn a good detector. + // + // To explain this non-max suppression idea further it's important to understand how + // the detector works. Essentially, sliding window detectors scan all image locations + // and ask "is there a thing here?". If there really is a thing in a specific location in + // an image then usually many slightly different sliding window locations will produce + // high detection scores, indicating that there is a thing at those locations. If we + // just stopped there then each thing would produce multiple detections. But that isn't + // what we want. We want each thing to produce just one detection. So it's common for + // detectors to include "non-maximum suppression" logic which simply takes the + // strongest detection and then deletes all detections "close to" the strongest. This + // is a simple post-processing step that can eliminate duplicate detections. However, + // we have to define what "close to" means. We can do this by looking at your training + // data and checking how close the closest target boxes are to each other, and then + // picking a "close to" measure that doesn't suppress those target boxes but is + // otherwise as tight as possible. This is exactly what the mmod_options object does + // by default. + // + // Importantly, this means that if your training dataset contains an image with two + // target boxes that really overlap a whole lot, then the non-maximum suppression + // "close to" measure will be configured to allow detections to really overlap a whole + // lot. On the other hand, if your dataset didn't contain any overlapped boxes at all, + // then the non-max suppression logic would be configured to filter out any boxes that + // overlapped at all, and thus would be performing a much stronger non-max suppression. + // + // Why does this matter? Well, remember that we want to avoid duplicate detections. + // If non-max suppression just kills everything in a really wide area around a thing then + // the CNN doesn't really need to learn anything about avoiding duplicate detections. + // However, if non-max suppression only suppresses a tiny area around each detection + // then the CNN will need to learn to output small detection scores for those areas of + // the image not suppressed. The smaller the non-max suppression region the more the + // CNN has to learn and the more difficult the learning problem will become. This is + // why we remove highly overlapped objects from the training dataset. That is, we do + // it so the non-max suppression logic will be able to be reasonably effective. Here + // we are ensuring that any boxes that are entirely contained by another are + // suppressed. We also ensure that boxes with an intersection over union of 0.5 or + // greater are suppressed. This will improve the resulting detector since it will be + // able to use more aggressive non-max suppression settings. + + int num_overlapped_ignored_test = 0; + for (auto& v : boxes_test) + num_overlapped_ignored_test += ignore_overlapped_boxes(v, test_box_overlap(0.50, 0.95)); + + int num_overlapped_ignored = 0; + int num_additional_ignored = 0; + for (auto& v : boxes_train) + { + num_overlapped_ignored += ignore_overlapped_boxes(v, test_box_overlap(0.50, 0.95)); + +/* + for (auto& bb : v) + { + if (bb.rect.width() < 35 && bb.rect.height() < 35) + { + if (!bb.ignore) + { + bb.ignore = true; + ++num_additional_ignored; + } + } + + // The dlib vehicle detection dataset doesn't contain any detections with + // really extreme aspect ratios. However, some datasets do, often because of + // bad labeling. So it's a good idea to check for that and either eliminate + // those boxes or set them to ignore. Although, this depends on your + // application. + // + // For instance, if your dataset has boxes with an aspect ratio + // of 10 then you should think about what that means for the network + // architecture. Does the receptive field even cover the entirety of the box + // in those cases? Do you thinge about these boxes? Are they labeling errors? + // I find that many people will download some dataset from the internet and + // just take it as given. They run it through some training algorithm and take + // the dataset as unchallengeable truth. But many datasets are full of + // labeling errors. There are also a lot of datasets that aren't full of + // errors, but are annotated in a sloppy and inconsistent way. Fixing those + // errors and inconsistencies can often greatly improve models trained from + // such data. It's almost always worth the time to try and improve your + // training dataset. + // + // In any case, my point is that there are other types of dataset cleaning you + // could put here. What exactly you need depends on your application. But you + // should thingefully consider it and not take your dataset as a given. The work + // of creating a good detector is largely about creating a high quality + // training dataset. + } + */ + } + + // When modifying a dataset like this, it's a really good idea to print a log of how + // many boxes you ignored. It's easy to accidentally ignore a huge block of data, so + // you should always look and see that things are doing what you expect. + cout << "num_overlapped_ignored: "<< num_overlapped_ignored << endl; + cout << "num_additional_ignored: "<< num_additional_ignored << endl; + cout << "num_overlapped_ignored_test: "<< num_overlapped_ignored_test << endl; + + + cout << "num training images: " << images_train.size() << endl; + cout << "num testing images: " << images_test.size() << endl; + + + // Our vehicle detection dataset has basically 3 different types of boxes. Square + // boxes, tall and skinny boxes (e.g. semi trucks), and short and wide boxes (e.g. + // sedans). Here we are telling the MMOD algorithm that a vehicle is recognizable as + // long as the longest box side is at least 70 pixels long and the shortest box side is + // at least 30 pixels long. mmod_options will use these parameters to decide how large + // each of the sliding windows needs to be so as to be able to detect all the vehicles. + // Since our dataset has basically these 3 different aspect ratios, it will decide to + // use 3 different sliding windows. This means the final con layer in the network will + // have 3 filters, one for each of these aspect ratios. + // + // Another thing to consider when setting the sliding window size is the "stride" of + // your network. The network we defined above downsamples the image by a factor of 8x + // in the first few layers. So when the sliding windows are scanning the image, they + // are stepping over it with a stride of 8 pixels. If you set the sliding window size + // too small then the stride will become an issue. For instance, if you set the + // sliding window size to 4 pixels, then it means a 4x4 window will be moved by 8 + // pixels at a time when scanning. This is obviously a problem since 75% of the image + // won't even be visited by the sliding window. So you need to set the window size to + // be big enough relative to the stride of your network. In our case, the windows are + // at least 30 pixels in length, so being moved by 8 pixel steps is fine. + mmod_options options(boxes_train, 60, 40); + + + // This setting is very important and dataset specific. The vehicle detection dataset + // contains boxes that are marked as "ignore", as we discussed above. Some of them are + // ignored because we set ignore to true in the above code. However, the xml files + // also contained a lot of ignore boxes. Some of them are large boxes that encompass + // large parts of an image and the intention is to have everything inside those boxes + // be ignored. Therefore, we need to tell the MMOD algorithm to do that, which we do + // by setting options.overlaps_ignore appropriately. + // + // But first, we need to understand exactly what this option does. The MMOD loss + // is essentially counting the number of false alarms + missed detections produced by + // the detector for each image. During training, the code is running the detector on + // each image in a mini-batch and looking at its output and counting the number of + // mistakes. The optimizer tries to find parameters settings that minimize the number + // of detector mistakes. + // + // This overlaps_ignore option allows you to tell the loss that some outputs from the + // detector should be totally ignored, as if they never happened. In particular, if a + // detection overlaps a box in the training data with ignore==true then that detection + // is ignored. This overlap is determined by calling + // options.overlaps_ignore(the_detection, the_ignored_training_box). If it returns + // true then that detection is ignored. + // + // You should read the documentation for test_box_overlap, the class type for + // overlaps_ignore for full details. However, the gist is that the default behavior is + // to only consider boxes as overlapping if their intersection over union is > 0.5. + // However, the dlib vehicle detection dataset contains large boxes that are meant to + // mask out large areas of an image. So intersection over union isn't an appropriate + // way to measure "overlaps with box" in this case. We want any box that is contained + // inside one of these big regions to be ignored, even if the detection box is really + // small. So we set overlaps_ignore to behave that way with this line. + options.overlaps_ignore = test_box_overlap(0.5, 0.95); + + net_type net(options); + + // The final layer of the network must be a con layer that contains + // options.detector_windows.size() filters. This is because these final filters are + // what perform the final "sliding window" detection in the network. For the dlib + // vehicle dataset, there will be 3 sliding window detectors, so we will be setting + // num_filters to 3 here. + net.subnet().layer_details().set_num_filters(options.detector_windows.size()); + + + dnn_trainer trainer(net,sgd(0.0001,0.9)); + trainer.set_learning_rate(0.1); + trainer.be_verbose(); + + + // While training, we are going to use early stopping. That is, we will be checking + // how good the detector is performing on our test data and when it stops getting + // better on the test data we will drop the learning rate. We will keep doing that + // until the learning rate is less than 1e-4. These two settings tell the trainer to + // do that. Essentially, we are setting the first argument to infinity, and only the + // test iterations without progress threshold will matter. In particular, it says that + // once we observe 1000 testing mini-batches where the test loss clearly isn't + // decreasing we will lower the learning rate. + trainer.set_iterations_without_progress_threshold(50000); + trainer.set_test_iterations_without_progress_threshold(500); + + const string sync_filename = "mmod_things_sync"; + trainer.set_synchronization_file(sync_filename, std::chrono::minutes(5)); + + std::vector> mini_batch_samples; + std::vector> mini_batch_labels; + random_cropper cropper; + cropper.set_seed(time(0)); + cropper.set_chip_dims(320, 320); + // Usually you want to give the cropper whatever min sizes you passed to the + // mmod_options constructor, or very slightly smaller sizes, which is what we do here. + cropper.set_min_object_size(60,40); + cropper.set_max_rotation_degrees(2); + dlib::rand rnd; + + // Log the training parameters to the console + cout << trainer << cropper << endl; + + int cnt = 1; + // Run the trainer until the learning rate gets small. + while(trainer.get_learning_rate() >= 1e-4) + { + + trainer.set_test_iterations_without_progress_threshold(500); + + // Every 30 mini-batches we do a testing mini-batch. + if (cnt%30 != 0 || images_test.size() == 0) + { + cropper(80, images_train, boxes_train, mini_batch_samples, mini_batch_labels); + // We can also randomly jitter the colors and that often helps a detector + // generalize better to new images. + for (auto&& img : mini_batch_samples) + disturb_colors(img, rnd); + + // It's a good idea to, at least once, put code here that displays the images + // and boxes the random cropper is generating. You should look at them and + // think about if the output makes sense for your problem. Most of the time + // it will be fine, but sometimes you will realize that the pattern of cropping + // isn't really appropriate for your problem and you will need to make some + // change to how the mini-batches are being generated. Maybe you will tweak + // some of the cropper's settings, or write your own entirely separate code to + // create mini-batches. But either way, if you don't look you will never know. + // An easy way to do this is to create a dlib::image_window to display the + // images and boxes. + +// #if !defined(DLIB_NO_GUI_SUPPORT) +// image_window win; +// for(std::size_t i = 0; i < mini_batch_samples.size(); i++) +// { +// win.clear_overlay(); +// win.set_image(mini_batch_samples[i]); +// for(auto &r : mini_batch_labels[i]) +// { +// win.add_overlay(r); +// std::cout << "rect: " << r.rect << " label: " << r.label << " conf: " << r.detection_confidence << " ignore: " << int(r.ignore) << std::endl; +// } +// std::cout << "OK" << std::endl; +// cin.get(); +// } +// #endif + + + trainer.train_one_step(mini_batch_samples, mini_batch_labels); + } + else + { + cropper(80, images_test, boxes_test, mini_batch_samples, mini_batch_labels); + // We can also randomly jitter the colors and that often helps a detector + // generalize better to new images. + for (auto&& img : mini_batch_samples) + { + disturb_colors(img, rnd); + + } + + trainer.test_one_step(mini_batch_samples, mini_batch_labels); + } + + ++cnt; + } + // wait for training threads to stop + trainer.get_net(); + cout << "done training" << endl; + + // Save the network to disk + net.clean(); + serialize("mmod_rear_end_vehicle_detector.dat") << net; + + + // It's a really good idea to print the training parameters. This is because you will + // invariably be running multiple rounds of training and should be logging the output + // to a file. This print statement will include many of the training parameters in + // your log. + cout << trainer << cropper << endl; + + cout << "\nsync_filename: " << sync_filename << endl; + cout << "num training images: "<< images_train.size() << endl; + cout << "training results: " << test_object_detection_function(net, images_train, boxes_train, test_box_overlap(), 0, options.overlaps_ignore); + // Upsampling the data will allow the detector to find smaller things. Recall that + // we configured it to use a sliding window nominally 70 pixels in size. So upsampling + // here will let it find things nominally 35 pixels in size. Although we include a + // limit of 1800*1800 here which means "don't upsample an image if it's already larger + // than 1800*1800". We do this so we don't run out of RAM, which is a concern because + // some of the images in the dlib vehicle dataset are really high resolution. + upsample_image_dataset>(images_train, boxes_train, 1800*1800); + cout << "training upsampled results: " << test_object_detection_function(net, images_train, boxes_train, test_box_overlap(), 0, options.overlaps_ignore); + + + cout << "num testing images: "<< images_test.size() << endl; + cout << "testing results: " << test_object_detection_function(net, images_test, boxes_test, test_box_overlap(), 0, options.overlaps_ignore); + upsample_image_dataset>(images_test, boxes_test, 1800*1800); + cout << "testing upsampled results: " << test_object_detection_function(net, images_test, boxes_test, test_box_overlap(), 0, options.overlaps_ignore); + + /* + This program takes many hours to execute on a high end GPU. It took about a day to + train on a NVIDIA 1080ti. The resulting model file is available at + http://dlib.net/files/mmod_rear_end_vehicle_detector.dat.bz2 + It should be noted that this file on dlib.net has a dlib::shape_predictor appended + onto the end of it (see dnn_mmod_find_things_ex.cpp for an example of its use). This + explains why the model file on dlib.net is larger than the + mmod_rear_end_vehicle_detector.dat output by this program. + + You can see some videos of this vehicle detector running on YouTube: + https://www.youtube.com/watch?v=4B3bzmxMAZU + https://www.youtube.com/watch?v=bP2SUo5vSlc + + Also, the training and testing accuracies were: + num training images: 2217 + training results: 0.990738 0.736431 0.736073 + training upsampled results: 0.986837 0.937694 0.936912 + num testing images: 135 + testing results: 0.988827 0.471372 0.470806 + testing upsampled results: 0.987879 0.651132 0.650399 + */ + + return 0; + +} +catch(std::exception& e) +{ + cout << e.what() << endl; +} + + + + diff --git a/sample_build.sh b/sample_build.sh new file mode 100644 index 0000000..2572491 --- /dev/null +++ b/sample_build.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# Simple build script illustrating build command on OS X platform. +# 1) Replace 'xcode' below with an appropriate toolchain from the polly repository. +# 2) Update CUDNN_HOME to correct path for your system. +# +# See: https://github.com/ruslo/polly + +TOOLCHAIN=xcode +CONFIG=Release +CUDNN_HOME=${HOME}/pkg/cuDNN/v7.0.4 polly.py --toolchain ${TOOLCHAIN} --config-all ${CONFIG} --install ${*}