diff --git a/CMakeLists.txt b/CMakeLists.txt
index f46ca8de1..6c38a65be 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -26,10 +26,28 @@ if (NOT WIN32 AND NOT UNIX AND NOT APPLE)
endif (NOT WIN32 AND NOT UNIX AND NOT APPLE)
-### BUILD_TYPE
+### CMAKE_BUILD_TYPE
# Default: Release
-set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build." FORCE)
-set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo")
+# Bug fixed: By default, it uses something different to Release, that provokes OpenPose to be about 15% slower than
+# it should be.
+# Is CMAKE_BUILD_TYPE "Debug" or "MinSizeRel" or "RelWithDebInfo"?
+set(CMAKE_BUILD_TYPE_KNOWN FALSE)
+if (${CMAKE_BUILD_TYPE} MATCHES "Debug")
+ set(CMAKE_BUILD_TYPE_KNOWN TRUE)
+endif (${CMAKE_BUILD_TYPE} MATCHES "Debug")
+if (${CMAKE_BUILD_TYPE} MATCHES "MinSizeRel")
+ set(CMAKE_BUILD_TYPE_KNOWN TRUE)
+endif (${CMAKE_BUILD_TYPE} MATCHES "MinSizeRel")
+if (${CMAKE_BUILD_TYPE} MATCHES "RelWithDebInfo")
+ set(CMAKE_BUILD_TYPE_KNOWN TRUE)
+endif (${CMAKE_BUILD_TYPE} MATCHES "RelWithDebInfo")
+# Assign proper CMAKE_BUILD_TYPE
+if (${CMAKE_BUILD_TYPE_KNOWN})
+ set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build.")
+else (${CMAKE_BUILD_TYPE_KNOWN})
+ set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build." FORCE)
+endif (${CMAKE_BUILD_TYPE_KNOWN})
+set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Release" "Debug" "MinSizeRel" "RelWithDebInfo")
### FLAGS
diff --git a/README.md b/README.md
index 5fbf7792f..163274b80 100644
--- a/README.md
+++ b/README.md
@@ -8,7 +8,7 @@
|-------------|
|[![Build Status](https://travis-ci.org/CMU-Perceptual-Computing-Lab/openpose.svg?branch=master)](https://travis-ci.org/CMU-Perceptual-Computing-Lab/openpose)|
-[OpenPose](https://github.com/CMU-Perceptual-Computing-Lab/openpose) represents the **first real-time multi-person system to jointly detect human body, hand, and facial keypoints (in total 135 keypoints) on single images**.
+[OpenPose](https://github.com/CMU-Perceptual-Computing-Lab/openpose) represents the **first real-time multi-person system to jointly detect human body, hand, facial, and foot keypoints (in total 135 keypoints) on single images**.
@@ -113,9 +113,9 @@ bin\OpenPoseDemo.exe --video examples\media\video.avi
- **Calibration toolbox**: To easily calibrate your cameras for 3-D OpenPose or any other stereo vision task. See [doc/modules/calibration_module.md](doc/modules/calibration_module.md).
-- **OpenPose Wrapper**: If you want to read a specific input, and/or add your custom post-processing function, and/or implement your own display/saving, check the `Wrapper` tutorial on [examples/tutorial_wrapper/](examples/tutorial_wrapper/). You can create your custom code on [examples/user_code/](examples/user_code/) and quickly compile it with CMake when compiling the whole OpenPose project. Quickly **add your custom code**: See [examples/user_code/README.md](examples/user_code/README.md) for further details.
+- **OpenPose C++ API**: If you want to read a specific input, and/or add your custom post-processing function, and/or implement your own display/saving, check the C++ API tutorial on [examples/tutorial_api_cpp/](examples/tutorial_api_cpp/) and [doc/library_introduction.md](doc/library_introduction.md). You can create your custom code on [examples/user_code/](examples/user_code/) and quickly compile it with CMake when compiling the whole OpenPose project. Quickly **add your custom code**: See [examples/user_code/README.md](examples/user_code/README.md) for further details.
-- **OpenPose C++ API**: See [doc/library_introduction.md](doc/library_introduction.md).
+- **OpenPose Python API**: Analogously to the C++ API, find the tutorial for the Python API on [examples/tutorial_api_python/](examples/tutorial_api_python/).
- **Adding an extra module**: Check [doc/library_add_new_module.md](./doc/library_add_new_module.md).
diff --git a/doc/installation.md b/doc/installation.md
index 9d5dff259..bd4a954d6 100644
--- a/doc/installation.md
+++ b/doc/installation.md
@@ -112,14 +112,20 @@ Any problem installing OpenPose? Check [doc/faq.md](./faq.md) and/or post a GitH
- Windows: download and install the latest CMake win64-x64 msi installer from the [CMake website](https://cmake.org/download/), called `cmake-X.X.X-win64-x64.msi`.
- Mac: `brew cask install cmake`.
3. Windows - **Microsoft Visual Studio (VS) 2015 Enterprise Update 3**:
- - If **Visual Studio 2017 Community** is desired, we do not officially support it, but it might be compiled by firstly [enabling CUDA 8.0 in VS2017](https://stackoverflow.com/questions/43745099/using-cuda-with-visual-studio-2017?answertab=active#tab-top) or use **VS2017 with CUDA 9** by checking the `.vcxproj` file and changing the necessary paths from CUDA 8 to 9.
- - VS 2015 Enterprise Update 1 will give some compiler errors and VS 2015 Community has not been tested.
+ - **IMPORTANT**: Enable all C++-related flags when selecting the components to install.
+ - Different VS versions:
+ - If **Visual Studio 2017 Community** is desired, we do not officially support it, but it might be compiled by firstly [enabling CUDA 8.0 in VS2017](https://stackoverflow.com/questions/43745099/using-cuda-with-visual-studio-2017?answertab=active#tab-top) or use **VS2017 with CUDA 9** by checking the `.vcxproj` file and changing the necessary paths from CUDA 8 to 9.
+ - VS 2015 Enterprise Update 1 will give some compiler errors.
+ - VS 2015 Community has not been tested.
4. Nvidia GPU version prerequisites:
1. [**CUDA 8**](https://developer.nvidia.com/cuda-80-ga2-download-archive):
- Ubuntu: Run `sudo ubuntu/install_cuda.sh` or alternatively download and install it from their website.
- - Windows: Install CUDA 8.0 after Visual Studio 2015 is installed to assure that the CUDA installation will generate all necessary files for VS. If CUDA was already installed, re-install - **IMPORTANT 1/2**: Nvidia V, any Nvidia with Volta architecture, and newer Nvidia model GPUs require at least CUDA 9.
- - **IMPORTANT 2/2**: As of a recent Windows update, you might want to download the Nvidia [drivers](http://www.nvidia.com/Download/index.aspx) first, and then install CUDA without the Graphics Driver flag or else your system might hang.
- 2. [**cuDNN 5.1**](https://developer.nvidia.com/cudnn):
+ - Windows: Install CUDA 8.0 after Visual Studio 2015 is installed to assure that the CUDA installation will generate all necessary files for VS. If CUDA was already installed, re-install it.
+ - **Important installation tips**:
+ - New Nvidia model GPUs (e.g., Nvidia V, GTX 2080, any Nvidia with Volta or Turing architecture, etc.) require at least CUDA 9.
+ - (Windows issue, reported Sep 2018): If your computer hangs when installing CUDA drivers, try installing first the [Nvidia drivers](http://www.nvidia.com/Download/index.aspx), and then installing CUDA without the Graphics Driver flag.
+ - (Windows): If CMake returns and error message similar to `CUDA_TOOLKIT_ROOT_DIR not found or specified` or any other CUDA component missing, then: 1) Re-install Visual Studio 2015; 2) Reboot your PC; 3) Re-install CUDA.
+ 2. [**cuDNN 5.1**](https://developer.nvidia.com/rdp/cudnn-archive):
- Ubuntu: Run `sudo ubuntu/install_cudnn.sh` or alternatively download and install it from their website.
- Windows (and Ubuntu if manual installation): In order to manually install it, just unzip it and copy (merge) the contents on the CUDA folder, usually `/usr/local/cuda/` in Ubuntu and `C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v8.0` in Windows.
5. AMD GPU version prerequisites:
diff --git a/doc/library_add_new_module.md b/doc/library_add_new_module.md
index a02228e3f..fadce0da8 100644
--- a/doc/library_add_new_module.md
+++ b/doc/library_add_new_module.md
@@ -8,7 +8,7 @@ In order to add a new module, these are the recommended steps in order to develo
2. Implement all the functionality in one `Worker` (i.e. inherit from `Worker` and implement all the functionality on that class).
1. The first letter of the class name should be `W` (e.g. `WHairExtractor`).
2. To initially simplify development:
- 1. Initialize the Worker class with the specific std::shared_ptr> instead of directly using a template class (following the `examples/tutorial_wrapper` synchronous examples).
+ 1. Initialize the Worker class with the specific std::shared_ptr> instead of directly using a template class (following the `examples/tutorial_api_cpp` synchronous examples).
2. Use the whole op::Datum as unique argument of your auxiliary functions.
3. Use the OpenPose Wrapper in ThreadManagerMode::SingleThread mode (e.g. it allows you to directly use cv::imshow).
4. If you are using your own custom Caffe -> initially change the Caffe for your version. It should directly work.
@@ -20,6 +20,8 @@ In order to add a new module, these are the recommended steps in order to develo
5. If the Workers need extra data from `Datum`, simply add into `Datum` the new variables required (without removing/modifying any previous variables!).
6. Read also the release steps before starting this developping phase.
+
+
## Release Steps
In order to release the new module:
diff --git a/doc/library_extend_functionality.md b/doc/library_extend_functionality.md
index deec7c53a..a11da00be 100644
--- a/doc/library_extend_functionality.md
+++ b/doc/library_extend_functionality.md
@@ -7,7 +7,7 @@ If you intend to extend the functionality of our library:
2. Check the basic library overview doc on [library_overview.md](library_overview.md).
-3. Read, understand and play with the basic real time pose demo source code [examples/openpose/openpose.cpp](../examples/openpose/openpose.cpp) and [examples/tutorial_wrapper](../examples/tutorial_wrapper). It includes all the functionality of our library, and it has been properly commented.
+3. Read, understand and play with the basic real time pose demo source code [examples/openpose/openpose.cpp](../examples/openpose/openpose.cpp) and [examples/tutorial_api_cpp](../examples/tutorial_api_cpp). It includes all the functionality of our library, and it has been properly commented.
4. Read, understand and play with the other tutorials in [examples/](../examples/). It includes more specific examples.
@@ -15,6 +15,6 @@ If you intend to extend the functionality of our library:
6. Take a look to the stucuture of the already existing modules.
-7. The C++ headers files add documentation in [Doxygen](http://www.doxygen.org/) format. Create this documentation by compiling the [include](../include/) folder with Doxygen. This documentation will be completed during the next few weeks/months.
+7. The C++ headers files add documentation in [Doxygen](http://www.doxygen.org/) format. Create this documentation by compiling the [include](../include/) folder with Doxygen. This documentation is slowly but continuously improved.
8. You can also take a look to the source code or ask us on GitHub.
diff --git a/doc/modules/3d_reconstruction_module.md b/doc/modules/3d_reconstruction_module.md
index d09493444..98418ead4 100644
--- a/doc/modules/3d_reconstruction_module.md
+++ b/doc/modules/3d_reconstruction_module.md
@@ -109,7 +109,7 @@ It should be similar to the following image.
You can copy and modify the OpenPose 3-D demo to use any camera brand by:
1. You can optionally turn off the `WITH_FLIR_CAMERA` while compiling CMake.
-2. Copy any of the `examples/tutorial_wrapper/*.cpp` examples (we recommend `2_user_synchronous.cpp`).
+2. Copy `examples/tutorial_api_cpp/7_synchronous_custom_input.cpp` (or 9_synchronous_custom_all.cpp).
3. Modify `WUserInput` and add your custom code there. Your code should fill `Datum::name`, `Datum::cameraMatrix`, `Datum::cvInputData`, and `Datum::cvOutputData` (fill cvOutputData = cvInputData).
4. Remove `WUserPostProcessing` and `WUserOutput` (unless you want to have your custom post-processing and/or output).
diff --git a/doc/modules/python_module.md b/doc/modules/python_module.md
index 27b8892c3..496ff9a84 100644
--- a/doc/modules/python_module.md
+++ b/doc/modules/python_module.md
@@ -35,21 +35,21 @@ pip install opencv-python
## Testing
-Two examples can be found in `build/examples/tutorial_python` in your build folder. Navigate directly to this path to run examples.
+Two examples can be found in `build/examples/tutorial_api_python` in your build folder. Navigate directly to this path to run examples.
- `1_extract_pose` demonstrates a simple use of the API.
-- `2_pose_from_heatmaps` demonstrates constructing pose from heatmaps from the caffe network. (Requires Python Caffe to be installed seperately)
+- `2_pose_from_heatmaps` demonstrates constructing pose from heatmaps from the caffe network (Requires Python Caffe to be installed seperately, only tested on Ubuntu).
```
# From command line
-cd build/examples/tutorial_python
+cd build/examples/tutorial_api_python
python 1_extract_pose.py
```
## Exporting Python OpenPose
-Note: This step is only required if you are moving the `*.py` files outside their original location, or writting new `*.py` scripts outside `build/examples/tutorial_python`.
+Note: This step is only required if you are moving the `*.py` files outside their original location, or writting new `*.py` scripts outside `build/examples/tutorial_api_python`.
- Option a, installing OpenPose: On an Ubuntu or OSX based system, you could install OpenPose by running `sudo make install`, you could then set the OpenPose path in your python scripts to the OpenPose installation path (default: `/usr/local/python`) and start using OpenPose at any location. Take a look at `build/examples/tutorial_pose/1_extract_pose.py` for an example.
-- Option b, not Installing OpenPose: To move the OpenPose Python API demos to a different folder, ensure that the line `sys.path.append('{OpenPose_path}/python')` is properly set in your `*.py` files, where `{OpenPose_path}` points to your build folder of OpenPose. Take a look at `build/examples/tutorial_pose/1_extract_pose.py` for an example.
+- Option b, not installing OpenPose: To move the OpenPose Python API demos to a different folder, ensure that the line `sys.path.append('{OpenPose_path}/python')` is properly set in your `*.py` files, where `{OpenPose_path}` points to your build folder of OpenPose. Take a look at `build/examples/tutorial_pose/1_extract_pose.py` for an example.
diff --git a/doc/release_notes.md b/doc/release_notes.md
index 790bd3003..b9153ef0d 100644
--- a/doc/release_notes.md
+++ b/doc/release_notes.md
@@ -57,7 +57,7 @@ OpenPose Library - Release Notes
9. WCocoJsonSaver finished and removed its 3599-image limit.
10. Added `--camera_fps` so generated video will use that frame rate.
11. Reduced the number of printed information messages. Default logging priority threshold increased to Priority::Max.
- 12. Google flags to OpenPose configuration parameters reader moved from each demo to utilities/flagsToOpenPose.
+ 12. GFlags to OpenPose configuration parameters reader moved from each demo to utilities/flagsToOpenPose.
13. Nms classes do not use `numberParts` for `Reshape`, they deduce the value.
14. Improved documentation.
2. Functions or parameters renamed:
@@ -259,15 +259,23 @@ OpenPose Library - Release Notes
-## Current version - future OpenPose 1.4.1
+## Current version - future OpenPose 1.5.0
1. Main improvements:
1. Added initial single-person tracker for further speed up or visual smoothing (`--tracking` flag).
2. Greedy body part connector implemented in CUDA: +~30% speed up in Nvidia (CUDA) version with default flags and +~10% in maximum accuracy configuration. In addition, it provides a small 0.5% boost in accuracy (default flags).
3. OpenPose can be built as Unity plugin: Added flag `BUILD_UNITY_SUPPORT` and special Unity code.
4. If camera is unplugged, OpenPose GUI and command line will display a warning and try to reconnect it.
+ 5. Wrapper classes simplified and renamed.
+ 6. API and examples improved:
+ 1. New header file `flags.hpp` that includes all OpenPose flags, removing the need to copy them repeatedly on each OpenPose example file.
+ 2. `tutorial_wrapper` renamed as `tutorial_api_cpp` as well as new examples were added.
+ 2. `tutorial_python` renamed as `tutorial_api_python` as well as new examples were added.
+ 3. `tutorial_pose` and `tutorial_thread` renamed as `tutorial_developer`, not meant to be used by users, but rather for OpenPose developers.
2. Functions or parameters renamed:
- 1. By default, python example `2_pose_from_heatmaps.py` was using 2 scales starting at -1x736, changed to 1 scale at -1x368.
+ 1. By default, python example `tutorial_developer/python_2_pose_from_heatmaps.py` was using 2 scales starting at -1x736, changed to 1 scale at -1x368.
+ 2. WrapperStructPose default parameters changed to match those of the OpenPose demo binary.
3. Main bugs fixed:
+ 1. CMake-GUI was forcing to Release mode, allowed Debug modes too.
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index b2df7751a..167ec367f 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -1,10 +1,9 @@
add_subdirectory(calibration)
add_subdirectory(openpose)
add_subdirectory(tutorial_add_module)
-add_subdirectory(tutorial_pose)
-add_subdirectory(tutorial_python)
-add_subdirectory(tutorial_thread)
-add_subdirectory(tutorial_wrapper)
+add_subdirectory(tutorial_api_python)
+add_subdirectory(tutorial_api_cpp)
+add_subdirectory(tutorial_developer)
add_subdirectory(user_code)
if (UNIX OR APPLE)
add_subdirectory(tests)
diff --git a/examples/calibration/CMakeLists.txt b/examples/calibration/CMakeLists.txt
index 963f4bce2..bb19ea5d3 100644
--- a/examples/calibration/CMakeLists.txt
+++ b/examples/calibration/CMakeLists.txt
@@ -1,4 +1,4 @@
-set(EXAMPLE_FILES
+set(EXAMPLE_FILES
calibration.cpp)
foreach(EXAMPLE_FILE ${EXAMPLE_FILES})
diff --git a/examples/calibration/calibration.cpp b/examples/calibration/calibration.cpp
index 80d8c79bb..4c0b935a4 100644
--- a/examples/calibration/calibration.cpp
+++ b/examples/calibration/calibration.cpp
@@ -3,26 +3,12 @@
// Implemented on top of OpenCV.
// It computes and saves the intrinsics parameters of the input images.
-// C++ std library dependencies
-#include // `std::chrono::` functions and classes, e.g. std::chrono::milliseconds
-#include // std::this_thread
-// Other 3rdparty dependencies
-// GFlags: DEFINE_bool, _int32, _int64, _uint64, _double, _string
-#include
-// Allow Google Flags in Ubuntu 14
-#ifndef GFLAGS_GFLAGS_H_
- namespace gflags = google;
-#endif
+// Command-line user intraface
+#define OPENPOSE_FLAGS_DISABLE_POSE
+#include
// OpenPose dependencies
#include
-// See all the available parameter options withe the `--help` flag. E.g. `build/examples/openpose/openpose.bin --help`
-// Note: This command will show you flags for other unnecessary 3rdparty files. Check only the flags for the OpenPose
-// executable. E.g. for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`.
-// Debugging/Other
-DEFINE_int32(logging_level, 3, "The logging level. Integer in the range [0, 255]. 0 will output any log() message, while"
- " 255 will not output any. Current OpenPose library messages are in the range 0-4: 1 for"
- " low priority messages and 4 for important ones.");
// Calibration
DEFINE_int32(mode, 1, "Select 1 for intrinsic camera parameter calibration, 2 for extrinsic calibration.");
DEFINE_string(calibration_image_dir, "images/intrinsics/", "Directory where the images for camera parameter calibration are placed.");
diff --git a/examples/openpose/CMakeLists.txt b/examples/openpose/CMakeLists.txt
index 5fa7691df..8f1d87c48 100644
--- a/examples/openpose/CMakeLists.txt
+++ b/examples/openpose/CMakeLists.txt
@@ -1,4 +1,4 @@
-set(EXAMPLE_FILES
+set(EXAMPLE_FILES
openpose.cpp)
foreach(EXAMPLE_FILE ${EXAMPLE_FILES})
diff --git a/examples/openpose/openpose.cpp b/examples/openpose/openpose.cpp
index 3170c0a2e..1661c2f5c 100755
--- a/examples/openpose/openpose.cpp
+++ b/examples/openpose/openpose.cpp
@@ -1,7 +1,7 @@
// ------------------------- OpenPose Library Tutorial - Real Time Pose Estimation -------------------------
-// If the user wants to learn to use the OpenPose library, we highly recommend to start with the `examples/tutorial_*/`
-// folders.
-// This example summarizes all the funcitonality of the OpenPose library:
+// If the user wants to learn to use the OpenPose library, we highly recommend to start with the
+// examples in `examples/tutorial_api_cpp/`.
+// This example summarizes all the functionality of the OpenPose library:
// 1. Read folder of images / video / webcam (`producer` module)
// 2. Extract and render body keypoint / heatmap / PAF of that image (`pose` module)
// 3. Extract and render face keypoint / heatmap / PAF of that image (`face` module)
@@ -16,228 +16,11 @@
// 2. `utilities` module: for the error & logging functions, i.e. op::error & op::log respectively
// This file should only be used for the user to take specific examples.
-// C++ std library dependencies
-#include // `std::chrono::` functions and classes, e.g. std::chrono::milliseconds
-#include // std::this_thread
-// Other 3rdparty dependencies
-// GFlags: DEFINE_bool, _int32, _int64, _uint64, _double, _string
-#include
-// Allow Google Flags in Ubuntu 14
-#ifndef GFLAGS_GFLAGS_H_
- namespace gflags = google;
-#endif
+// Command-line user intraface
+#include
// OpenPose dependencies
#include
-// See all the available parameter options withe the `--help` flag. E.g. `build/examples/openpose/openpose.bin --help`
-// Note: This command will show you flags for other unnecessary 3rdparty files. Check only the flags for the OpenPose
-// executable. E.g. for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`.
-// Debugging/Other
-DEFINE_int32(logging_level, 3, "The logging level. Integer in the range [0, 255]. 0 will output any log() message, while"
- " 255 will not output any. Current OpenPose library messages are in the range 0-4: 1 for"
- " low priority messages and 4 for important ones.");
-DEFINE_bool(disable_multi_thread, false, "It would slightly reduce the frame rate in order to highly reduce the lag. Mainly useful"
- " for 1) Cases where it is needed a low latency (e.g. webcam in real-time scenarios with"
- " low-range GPU devices); and 2) Debugging OpenPose when it is crashing to locate the"
- " error.");
-DEFINE_int32(profile_speed, 1000, "If PROFILER_ENABLED was set in CMake or Makefile.config files, OpenPose will show some"
- " runtime statistics at this frame number.");
-// Producer
-DEFINE_int32(camera, -1, "The camera index for cv::VideoCapture. Integer in the range [0, 9]. Select a negative"
- " number (by default), to auto-detect and open the first available camera.");
-DEFINE_string(camera_resolution, "-1x-1", "Set the camera resolution (either `--camera` or `--flir_camera`). `-1x-1` will use the"
- " default 1280x720 for `--camera`, or the maximum flir camera resolution available for"
- " `--flir_camera`");
-DEFINE_double(camera_fps, 30.0, "Frame rate for the webcam (also used when saving video). Set this value to the minimum"
- " value between the OpenPose displayed speed and the webcam real frame rate.");
-DEFINE_string(video, "", "Use a video file instead of the camera. Use `examples/media/video.avi` for our default"
- " example video.");
-DEFINE_string(image_dir, "", "Process a directory of images. Use `examples/media/` for our default example folder with 20"
- " images. Read all standard formats (jpg, png, bmp, etc.).");
-DEFINE_bool(flir_camera, false, "Whether to use FLIR (Point-Grey) stereo camera.");
-DEFINE_int32(flir_camera_index, -1, "Select -1 (default) to run on all detected flir cameras at once. Otherwise, select the flir"
- " camera index to run, where 0 corresponds to the detected flir camera with the lowest"
- " serial number, and `n` to the `n`-th lowest serial number camera.");
-DEFINE_string(ip_camera, "", "String with the IP camera URL. It supports protocols like RTSP and HTTP.");
-DEFINE_uint64(frame_first, 0, "Start on desired frame number. Indexes are 0-based, i.e. the first frame has index 0.");
-DEFINE_uint64(frame_last, -1, "Finish on desired frame number. Select -1 to disable. Indexes are 0-based, e.g. if set to"
- " 10, it will process 11 frames (0-10).");
-DEFINE_bool(frame_flip, false, "Flip/mirror each frame (e.g. for real time webcam demonstrations).");
-DEFINE_int32(frame_rotate, 0, "Rotate each frame, 4 possible values: 0, 90, 180, 270.");
-DEFINE_bool(frames_repeat, false, "Repeat frames when finished.");
-DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g. for video). If the processing time is"
- " too long, it will skip frames. If it is too fast, it will slow it down.");
-DEFINE_string(camera_parameter_folder, "models/cameraParameters/flir/", "String with the folder where the camera parameters are located.");
-DEFINE_bool(frame_keep_distortion, false, "If false (default), it will undistortionate the image based on the"
- " `camera_parameter_folder` camera parameters; if true, it will not undistortionate, i.e.,"
- " it will leave it as it is.");
-// OpenPose
-DEFINE_string(model_folder, "models/", "Folder path (absolute or relative) where the models (pose, face, ...) are located.");
-DEFINE_string(output_resolution, "-1x-1", "The image resolution (display and output). Use \"-1x-1\" to force the program to use the"
- " input image resolution.");
-DEFINE_int32(num_gpu, -1, "The number of GPU devices to use. If negative, it will use all the available GPUs in your"
- " machine.");
-DEFINE_int32(num_gpu_start, 0, "GPU device start number.");
-DEFINE_int32(keypoint_scale, 0, "Scaling of the (x,y) coordinates of the final pose data array, i.e. the scale of the (x,y)"
- " coordinates that will be saved with the `write_json` & `write_keypoint` flags."
- " Select `0` to scale it to the original source resolution; `1`to scale it to the net output"
- " size (set with `net_resolution`); `2` to scale it to the final output size (set with"
- " `resolution`); `3` to scale it in the range [0,1], where (0,0) would be the top-left"
- " corner of the image, and (1,1) the bottom-right one; and 4 for range [-1,1], where"
- " (-1,-1) would be the top-left corner of the image, and (1,1) the bottom-right one. Non"
- " related with `scale_number` and `scale_gap`.");
-DEFINE_int32(number_people_max, -1, "This parameter will limit the maximum number of people detected, by keeping the people with"
- " top scores. The score is based in person area over the image, body part score, as well as"
- " joint score (between each pair of connected body parts). Useful if you know the exact"
- " number of people in the scene, so it can remove false positives (if all the people have"
- " been detected. However, it might also include false negatives by removing very small or"
- " highly occluded people. -1 will keep them all.");
-// OpenPose Body Pose
-DEFINE_bool(body_disable, false, "Disable body keypoint detection. Option only possible for faster (but less accurate) face"
- " keypoint detection.");
-DEFINE_string(model_pose, "BODY_25", "Model to be used. E.g. `COCO` (18 keypoints), `MPI` (15 keypoints, ~10% faster), "
- "`MPI_4_layers` (15 keypoints, even faster but less accurate).");
-DEFINE_string(net_resolution, "-1x368", "Multiples of 16. If it is increased, the accuracy potentially increases. If it is"
- " decreased, the speed increases. For maximum speed-accuracy balance, it should keep the"
- " closest aspect ratio possible to the images or videos to be processed. Using `-1` in"
- " any of the dimensions, OP will choose the optimal aspect ratio depending on the user's"
- " input value. E.g. the default `-1x368` is equivalent to `656x368` in 16:9 resolutions,"
- " e.g. full HD (1980x1080) and HD (1280x720) resolutions.");
-DEFINE_int32(scale_number, 1, "Number of scales to average.");
-DEFINE_double(scale_gap, 0.3, "Scale gap between scales. No effect unless scale_number > 1. Initial scale is always 1."
- " If you want to change the initial scale, you actually want to multiply the"
- " `net_resolution` by your desired initial scale.");
-// OpenPose Body Pose Heatmaps and Part Candidates
-DEFINE_bool(heatmaps_add_parts, false, "If true, it will fill op::Datum::poseHeatMaps array with the body part heatmaps, and"
- " analogously face & hand heatmaps to op::Datum::faceHeatMaps & op::Datum::handHeatMaps."
- " If more than one `add_heatmaps_X` flag is enabled, it will place then in sequential"
- " memory order: body parts + bkg + PAFs. It will follow the order on"
- " POSE_BODY_PART_MAPPING in `src/openpose/pose/poseParameters.cpp`. Program speed will"
- " considerably decrease. Not required for OpenPose, enable it only if you intend to"
- " explicitly use this information later.");
-DEFINE_bool(heatmaps_add_bkg, false, "Same functionality as `add_heatmaps_parts`, but adding the heatmap corresponding to"
- " background.");
-DEFINE_bool(heatmaps_add_PAFs, false, "Same functionality as `add_heatmaps_parts`, but adding the PAFs.");
-DEFINE_int32(heatmaps_scale, 2, "Set 0 to scale op::Datum::poseHeatMaps in the range [-1,1], 1 for [0,1]; 2 for integer"
- " rounded [0,255]; and 3 for no scaling.");
-DEFINE_bool(part_candidates, false, "Also enable `write_json` in order to save this information. If true, it will fill the"
- " op::Datum::poseCandidates array with the body part candidates. Candidates refer to all"
- " the detected body parts, before being assembled into people. Note that the number of"
- " candidates is equal or higher than the number of final body parts (i.e. after being"
- " assembled into people). The empty body parts are filled with 0s. Program speed will"
- " slightly decrease. Not required for OpenPose, enable it only if you intend to explicitly"
- " use this information.");
-// OpenPose Face
-DEFINE_bool(face, false, "Enables face keypoint detection. It will share some parameters from the body pose, e.g."
- " `model_folder`. Note that this will considerable slow down the performance and increse"
- " the required GPU memory. In addition, the greater number of people on the image, the"
- " slower OpenPose will be.");
-DEFINE_string(face_net_resolution, "368x368", "Multiples of 16 and squared. Analogous to `net_resolution` but applied to the face keypoint"
- " detector. 320x320 usually works fine while giving a substantial speed up when multiple"
- " faces on the image.");
-// OpenPose Hand
-DEFINE_bool(hand, false, "Enables hand keypoint detection. It will share some parameters from the body pose, e.g."
- " `model_folder`. Analogously to `--face`, it will also slow down the performance, increase"
- " the required GPU memory and its speed depends on the number of people.");
-DEFINE_string(hand_net_resolution, "368x368", "Multiples of 16 and squared. Analogous to `net_resolution` but applied to the hand keypoint"
- " detector.");
-DEFINE_int32(hand_scale_number, 1, "Analogous to `scale_number` but applied to the hand keypoint detector. Our best results"
- " were found with `hand_scale_number` = 6 and `hand_scale_range` = 0.4.");
-DEFINE_double(hand_scale_range, 0.4, "Analogous purpose than `scale_gap` but applied to the hand keypoint detector. Total range"
- " between smallest and biggest scale. The scales will be centered in ratio 1. E.g. if"
- " scaleRange = 0.4 and scalesNumber = 2, then there will be 2 scales, 0.8 and 1.2.");
-DEFINE_bool(hand_tracking, false, "Adding hand tracking might improve hand keypoints detection for webcam (if the frame rate"
- " is high enough, i.e. >7 FPS per GPU) and video. This is not person ID tracking, it"
- " simply looks for hands in positions at which hands were located in previous frames, but"
- " it does not guarantee the same person ID among frames.");
-// OpenPose 3-D Reconstruction
-DEFINE_bool(3d, false, "Running OpenPose 3-D reconstruction demo: 1) Reading from a stereo camera system."
- " 2) Performing 3-D reconstruction from the multiple views. 3) Displaying 3-D reconstruction"
- " results. Note that it will only display 1 person. If multiple people is present, it will"
- " fail.");
-DEFINE_int32(3d_min_views, -1, "Minimum number of views required to reconstruct each keypoint. By default (-1), it will"
- " require all the cameras to see the keypoint in order to reconstruct it.");
-DEFINE_int32(3d_views, 1, "Complementary option to `--image_dir` or `--video`. OpenPose will read as many images per"
- " iteration, allowing tasks such as stereo camera processing (`--3d`). Note that"
- " `--camera_parameters_folder` must be set. OpenPose must find as many `xml` files in the"
- " parameter folder as this number indicates.");
-// Extra algorithms
-DEFINE_bool(identification, false, "Experimental, not available yet. Whether to enable people identification across frames.");
-DEFINE_int32(tracking, -1, "Experimental, not available yet. Whether to enable people tracking across frames. The"
- " value indicates the number of frames where tracking is run between each OpenPose keypoint"
- " detection. Select -1 (default) to disable it or 0 to run simultaneously OpenPose keypoint"
- " detector and tracking for potentially higher accurary than only OpenPose.");
-DEFINE_int32(ik_threads, 0, "Experimental, not available yet. Whether to enable inverse kinematics (IK) from 3-D"
- " keypoints to obtain 3-D joint angles. By default (0 threads), it is disabled. Increasing"
- " the number of threads will increase the speed but also the global system latency.");
-// OpenPose Rendering
-DEFINE_int32(part_to_show, 0, "Prediction channel to visualize (default: 0). 0 for all the body parts, 1-18 for each body"
- " part heat map, 19 for the background heat map, 20 for all the body part heat maps"
- " together, 21 for all the PAFs, 22-40 for each body part pair PAF.");
-DEFINE_bool(disable_blending, false, "If enabled, it will render the results (keypoint skeletons or heatmaps) on a black"
- " background, instead of being rendered into the original image. Related: `part_to_show`,"
- " `alpha_pose`, and `alpha_pose`.");
-// OpenPose Rendering Pose
-DEFINE_double(render_threshold, 0.05, "Only estimated keypoints whose score confidences are higher than this threshold will be"
- " rendered. Generally, a high threshold (> 0.5) will only render very clear body parts;"
- " while small thresholds (~0.1) will also output guessed and occluded keypoints, but also"
- " more false positives (i.e. wrong detections).");
-DEFINE_int32(render_pose, -1, "Set to 0 for no rendering, 1 for CPU rendering (slightly faster), and 2 for GPU rendering"
- " (slower but greater functionality, e.g. `alpha_X` flags). If -1, it will pick CPU if"
- " CPU_ONLY is enabled, or GPU if CUDA is enabled. If rendering is enabled, it will render"
- " both `outputData` and `cvOutputData` with the original image and desired body part to be"
- " shown (i.e. keypoints, heat maps or PAFs).");
-DEFINE_double(alpha_pose, 0.6, "Blending factor (range 0-1) for the body part rendering. 1 will show it completely, 0 will"
- " hide it. Only valid for GPU rendering.");
-DEFINE_double(alpha_heatmap, 0.7, "Blending factor (range 0-1) between heatmap and original frame. 1 will only show the"
- " heatmap, 0 will only show the frame. Only valid for GPU rendering.");
-// OpenPose Rendering Face
-DEFINE_double(face_render_threshold, 0.4, "Analogous to `render_threshold`, but applied to the face keypoints.");
-DEFINE_int32(face_render, -1, "Analogous to `render_pose` but applied to the face. Extra option: -1 to use the same"
- " configuration that `render_pose` is using.");
-DEFINE_double(face_alpha_pose, 0.6, "Analogous to `alpha_pose` but applied to face.");
-DEFINE_double(face_alpha_heatmap, 0.7, "Analogous to `alpha_heatmap` but applied to face.");
-// OpenPose Rendering Hand
-DEFINE_double(hand_render_threshold, 0.2, "Analogous to `render_threshold`, but applied to the hand keypoints.");
-DEFINE_int32(hand_render, -1, "Analogous to `render_pose` but applied to the hand. Extra option: -1 to use the same"
- " configuration that `render_pose` is using.");
-DEFINE_double(hand_alpha_pose, 0.6, "Analogous to `alpha_pose` but applied to hand.");
-DEFINE_double(hand_alpha_heatmap, 0.7, "Analogous to `alpha_heatmap` but applied to hand.");
-// Display
-DEFINE_bool(fullscreen, false, "Run in full-screen mode (press f during runtime to toggle).");
-DEFINE_bool(no_gui_verbose, false, "Do not write text on output images on GUI (e.g. number of current frame and people). It"
- " does not affect the pose rendering.");
-DEFINE_int32(display, -1, "Display mode: -1 for automatic selection; 0 for no display (useful if there is no X server"
- " and/or to slightly speed up the processing if visual output is not required); 2 for 2-D"
- " display; 3 for 3-D display (if `--3d` enabled); and 1 for both 2-D and 3-D display.");
-// Result Saving
-DEFINE_string(write_images, "", "Directory to write rendered frames in `write_images_format` image format.");
-DEFINE_string(write_images_format, "png", "File extension and format for `write_images`, e.g. png, jpg or bmp. Check the OpenCV"
- " function cv::imwrite for all compatible extensions.");
-DEFINE_string(write_video, "", "Full file path to write rendered frames in motion JPEG video format. It might fail if the"
- " final path does not finish in `.avi`. It internally uses cv::VideoWriter. Flag"
- " `camera_fps` controls FPS.");
-DEFINE_string(write_json, "", "Directory to write OpenPose output in JSON format. It includes body, hand, and face pose"
- " keypoints (2-D and 3-D), as well as pose candidates (if `--part_candidates` enabled).");
-DEFINE_string(write_coco_json, "", "Full file path to write people pose data with JSON COCO validation format.");
-DEFINE_string(write_coco_foot_json, "", "Full file path to write people foot pose data with JSON COCO validation format.");
-DEFINE_string(write_heatmaps, "", "Directory to write body pose heatmaps in PNG format. At least 1 `add_heatmaps_X` flag"
- " must be enabled.");
-DEFINE_string(write_heatmaps_format, "png", "File extension and format for `write_heatmaps`, analogous to `write_images_format`."
- " For lossless compression, recommended `png` for integer `heatmaps_scale` and `float` for"
- " floating values.");
-DEFINE_string(write_keypoint, "", "(Deprecated, use `write_json`) Directory to write the people pose keypoint data. Set format"
- " with `write_keypoint_format`.");
-DEFINE_string(write_keypoint_format, "yml", "(Deprecated, use `write_json`) File extension and format for `write_keypoint`: json, xml,"
- " yaml & yml. Json not available for OpenCV < 3.0, use `write_json` instead.");
-// Result Saving - Extra Algorithms
-DEFINE_string(write_video_adam, "", "Experimental, not available yet. E.g.: `~/Desktop/adamResult.avi`. Flag `camera_fps`"
- " controls FPS.");
-DEFINE_string(write_bvh, "", "Experimental, not available yet. E.g.: `~/Desktop/mocapResult.bvh`.");
-// UDP communication
-DEFINE_string(udp_host, "", "Experimental, not available yet. IP for UDP communication. E.g., `192.168.0.1`.");
-DEFINE_string(udp_port, "8051", "Experimental, not available yet. Port number for UDP communication.");
-
int openPoseDemo()
{
try
@@ -256,7 +39,7 @@ int openPoseDemo()
// // Print out speed values faster
// op::Profiler::setDefaultX(100);
- // Applying user defined configuration - Google flags to program variables
+ // Applying user defined configuration - GFlags to program variables
// outputSize
const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
// netInputSize
@@ -325,7 +108,7 @@ int openPoseDemo()
// Configure wrapper
opWrapper.configure(wrapperStructPose, wrapperStructFace, wrapperStructHand, wrapperStructExtra,
wrapperStructInput, wrapperStructOutput);
- // Set to single-thread running (to debug and/or reduce latency)
+ // Set to single-thread (for sequential processing and/or debugging and/or reducing latency)
if (FLAGS_disable_multi_thread)
opWrapper.disableMultiThreading();
diff --git a/examples/tests/README.md b/examples/tests/README.md
new file mode 100644
index 000000000..1dca5a95a
--- /dev/null
+++ b/examples/tests/README.md
@@ -0,0 +1,4 @@
+# Low Level API Examples
+**Disclaimer**: This folder is meant for internal OpenPose developers. The Examples might highly change, and we will not answer questions about them nor provide official support for them.
+
+**If the OpenPose library does not compile for an error happening due to a file from this folder, notify us**.
diff --git a/examples/tests/clTest.cpp b/examples/tests/clTest.cpp
index 8c102c653..91f2368d5 100644
--- a/examples/tests/clTest.cpp
+++ b/examples/tests/clTest.cpp
@@ -1,19 +1,14 @@
// ------------------------- OpenPose Resize Layer Testing -------------------------
+// Command-line user intraface
+#define OPENPOSE_FLAGS_DISABLE_POSE
+#include
+// OpenPose dependencies
#include
+// OpenCL dependencies
#ifdef USE_OPENCL
#include
#include
-#include // `std::chrono::` functions and classes, e.g. std::chrono::milliseconds
-// GFlags: DEFINE_bool, _int32, _int64, _uint64, _double, _string
-#include
-// Allow Google Flags in Ubuntu 14
-#ifndef GFLAGS_GFLAGS_H_
-namespace gflags = google;
-#endif
-#ifdef USE_CAFFE
-#include
-#endif
DEFINE_string(image_path, "examples/media/COCO_val2014_000000000192.jpg", "Process the desired image.");
diff --git a/examples/tests/handFromJsonTest.cpp b/examples/tests/handFromJsonTest.cpp
index 98e8c8ad4..c6f9e1353 100644
--- a/examples/tests/handFromJsonTest.cpp
+++ b/examples/tests/handFromJsonTest.cpp
@@ -1,19 +1,14 @@
// ------------------------- OpenPose Library Tutorial - Hand Keypoint Detection from JSON Ground-Truth Data -------------------------
// Example to test hands accuracy given ground-truth bounding boxes.
-#include // `std::chrono::` functions and classes, e.g. std::chrono::milliseconds
-// GFlags: DEFINE_bool, _int32, _int64, _uint64, _double, _string
-#include
-// Allow Google Flags in Ubuntu 14
-#ifndef GFLAGS_GFLAGS_H_
- namespace gflags = google;
-#endif
+// Command-line user intraface
+#define OPENPOSE_FLAGS_DISABLE_POSE
+#include
+// OpenPose dependencies
#include
#include "wrapperHandFromJsonTest.hpp"
// For info about the flags, check `examples/openpose/openpose.bin`.
-// Debugging/Other
-DEFINE_int32(logging_level, 3, "");
// Producer
DEFINE_string(image_dir, "", "");
DEFINE_string(hand_ground_truth, "", "");
@@ -44,7 +39,7 @@ int handFromJsonTest()
__LINE__, __FUNCTION__, __FILE__);
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
- // Applying user defined configuration - Google flags to program variables
+ // Applying user defined configuration - GFlags to program variables
// handNetInputSize
const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
// producerType
diff --git a/examples/tests/resizeTest.cpp b/examples/tests/resizeTest.cpp
index 568485d55..a19c826db 100644
--- a/examples/tests/resizeTest.cpp
+++ b/examples/tests/resizeTest.cpp
@@ -1,14 +1,12 @@
// ------------------------- OpenPose Resize Layer Testing -------------------------
+// Command-line user intraface
+#define OPENPOSE_FLAGS_DISABLE_POSE
+#include
+// OpenPose dependencies
#include
+
#ifdef USE_CUDA
- #include // `std::chrono::` functions and classes, e.g. std::chrono::milliseconds
- // GFlags: DEFINE_bool, _int32, _int64, _uint64, _double, _string
- #include
- // Allow Google Flags in Ubuntu 14
- #ifndef GFLAGS_GFLAGS_H_
- namespace gflags = google;
- #endif
#ifdef USE_CAFFE
#include
#endif
diff --git a/examples/tutorial_add_module/1_custom_post_processing.cpp b/examples/tutorial_add_module/1_custom_post_processing.cpp
index 79ae80f3c..c1c2c19ca 100644
--- a/examples/tutorial_add_module/1_custom_post_processing.cpp
+++ b/examples/tutorial_add_module/1_custom_post_processing.cpp
@@ -20,233 +20,16 @@
// 4. If extra classes and files are required, add those extra files inside the OpenPose include and src folders,
// under a new folder (i.e. `include/newMethod/` and `src/newMethod/`), including `namespace op` on those files.
-// This example is a sub-case of `tutorial_wrapper/2_user_synchronous.cpp`, where only custom post-processing is
+// This example is a sub-case of `tutorial_api_cpp/6_synchronous_custom_postprocessing.cpp`, where only custom post-processing is
// considered.
-// C++ std library dependencies
-#include // `std::chrono::` functions and classes, e.g. std::chrono::milliseconds
-#include // std::this_thread
-// Other 3rdparty dependencies
-// GFlags: DEFINE_bool, _int32, _int64, _uint64, _double, _string
-#include
-// Allow Google Flags in Ubuntu 14
-#ifndef GFLAGS_GFLAGS_H_
- namespace gflags = google;
-#endif
+// Command-line user intraface
+#include
// OpenPose dependencies
#include
#include "userDatum.hpp"
#include "wUserPostProcessing.hpp"
-// See all the available parameter options withe the `--help` flag. E.g. `build/examples/openpose/openpose.bin --help`
-// Note: This command will show you flags for other unnecessary 3rdparty files. Check only the flags for the OpenPose
-// executable. E.g. for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`.
-// Debugging/Other
-DEFINE_int32(logging_level, 3, "The logging level. Integer in the range [0, 255]. 0 will output any log() message, while"
- " 255 will not output any. Current OpenPose library messages are in the range 0-4: 1 for"
- " low priority messages and 4 for important ones.");
-DEFINE_bool(disable_multi_thread, false, "It would slightly reduce the frame rate in order to highly reduce the lag. Mainly useful"
- " for 1) Cases where it is needed a low latency (e.g. webcam in real-time scenarios with"
- " low-range GPU devices); and 2) Debugging OpenPose when it is crashing to locate the"
- " error.");
-DEFINE_int32(profile_speed, 1000, "If PROFILER_ENABLED was set in CMake or Makefile.config files, OpenPose will show some"
- " runtime statistics at this frame number.");
-// Producer
-DEFINE_int32(camera, -1, "The camera index for cv::VideoCapture. Integer in the range [0, 9]. Select a negative"
- " number (by default), to auto-detect and open the first available camera.");
-DEFINE_string(camera_resolution, "-1x-1", "Set the camera resolution (either `--camera` or `--flir_camera`). `-1x-1` will use the"
- " default 1280x720 for `--camera`, or the maximum flir camera resolution available for"
- " `--flir_camera`");
-DEFINE_double(camera_fps, 30.0, "Frame rate for the webcam (also used when saving video). Set this value to the minimum"
- " value between the OpenPose displayed speed and the webcam real frame rate.");
-DEFINE_string(video, "", "Use a video file instead of the camera. Use `examples/media/video.avi` for our default"
- " example video.");
-DEFINE_string(image_dir, "", "Process a directory of images. Use `examples/media/` for our default example folder with 20"
- " images. Read all standard formats (jpg, png, bmp, etc.).");
-DEFINE_bool(flir_camera, false, "Whether to use FLIR (Point-Grey) stereo camera.");
-DEFINE_int32(flir_camera_index, -1, "Select -1 (default) to run on all detected flir cameras at once. Otherwise, select the flir"
- " camera index to run, where 0 corresponds to the detected flir camera with the lowest"
- " serial number, and `n` to the `n`-th lowest serial number camera.");
-DEFINE_string(ip_camera, "", "String with the IP camera URL. It supports protocols like RTSP and HTTP.");
-DEFINE_uint64(frame_first, 0, "Start on desired frame number. Indexes are 0-based, i.e. the first frame has index 0.");
-DEFINE_uint64(frame_last, -1, "Finish on desired frame number. Select -1 to disable. Indexes are 0-based, e.g. if set to"
- " 10, it will process 11 frames (0-10).");
-DEFINE_bool(frame_flip, false, "Flip/mirror each frame (e.g. for real time webcam demonstrations).");
-DEFINE_int32(frame_rotate, 0, "Rotate each frame, 4 possible values: 0, 90, 180, 270.");
-DEFINE_bool(frames_repeat, false, "Repeat frames when finished.");
-DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g. for video). If the processing time is"
- " too long, it will skip frames. If it is too fast, it will slow it down.");
-DEFINE_string(camera_parameter_folder, "models/cameraParameters/flir/", "String with the folder where the camera parameters are located.");
-DEFINE_bool(frame_keep_distortion, false, "If false (default), it will undistortionate the image based on the"
- " `camera_parameter_folder` camera parameters; if true, it will not undistortionate, i.e.,"
- " it will leave it as it is.");
-// OpenPose
-DEFINE_string(model_folder, "models/", "Folder path (absolute or relative) where the models (pose, face, ...) are located.");
-DEFINE_string(output_resolution, "-1x-1", "The image resolution (display and output). Use \"-1x-1\" to force the program to use the"
- " input image resolution.");
-DEFINE_int32(num_gpu, -1, "The number of GPU devices to use. If negative, it will use all the available GPUs in your"
- " machine.");
-DEFINE_int32(num_gpu_start, 0, "GPU device start number.");
-DEFINE_int32(keypoint_scale, 0, "Scaling of the (x,y) coordinates of the final pose data array, i.e. the scale of the (x,y)"
- " coordinates that will be saved with the `write_json` & `write_keypoint` flags."
- " Select `0` to scale it to the original source resolution; `1`to scale it to the net output"
- " size (set with `net_resolution`); `2` to scale it to the final output size (set with"
- " `resolution`); `3` to scale it in the range [0,1], where (0,0) would be the top-left"
- " corner of the image, and (1,1) the bottom-right one; and 4 for range [-1,1], where"
- " (-1,-1) would be the top-left corner of the image, and (1,1) the bottom-right one. Non"
- " related with `scale_number` and `scale_gap`.");
-DEFINE_int32(number_people_max, -1, "This parameter will limit the maximum number of people detected, by keeping the people with"
- " top scores. The score is based in person area over the image, body part score, as well as"
- " joint score (between each pair of connected body parts). Useful if you know the exact"
- " number of people in the scene, so it can remove false positives (if all the people have"
- " been detected. However, it might also include false negatives by removing very small or"
- " highly occluded people. -1 will keep them all.");
-// OpenPose Body Pose
-DEFINE_bool(body_disable, false, "Disable body keypoint detection. Option only possible for faster (but less accurate) face"
- " keypoint detection.");
-DEFINE_string(model_pose, "BODY_25", "Model to be used. E.g. `COCO` (18 keypoints), `MPI` (15 keypoints, ~10% faster), "
- "`MPI_4_layers` (15 keypoints, even faster but less accurate).");
-DEFINE_string(net_resolution, "-1x368", "Multiples of 16. If it is increased, the accuracy potentially increases. If it is"
- " decreased, the speed increases. For maximum speed-accuracy balance, it should keep the"
- " closest aspect ratio possible to the images or videos to be processed. Using `-1` in"
- " any of the dimensions, OP will choose the optimal aspect ratio depending on the user's"
- " input value. E.g. the default `-1x368` is equivalent to `656x368` in 16:9 resolutions,"
- " e.g. full HD (1980x1080) and HD (1280x720) resolutions.");
-DEFINE_int32(scale_number, 1, "Number of scales to average.");
-DEFINE_double(scale_gap, 0.3, "Scale gap between scales. No effect unless scale_number > 1. Initial scale is always 1."
- " If you want to change the initial scale, you actually want to multiply the"
- " `net_resolution` by your desired initial scale.");
-// OpenPose Body Pose Heatmaps and Part Candidates
-DEFINE_bool(heatmaps_add_parts, false, "If true, it will fill op::Datum::poseHeatMaps array with the body part heatmaps, and"
- " analogously face & hand heatmaps to op::Datum::faceHeatMaps & op::Datum::handHeatMaps."
- " If more than one `add_heatmaps_X` flag is enabled, it will place then in sequential"
- " memory order: body parts + bkg + PAFs. It will follow the order on"
- " POSE_BODY_PART_MAPPING in `src/openpose/pose/poseParameters.cpp`. Program speed will"
- " considerably decrease. Not required for OpenPose, enable it only if you intend to"
- " explicitly use this information later.");
-DEFINE_bool(heatmaps_add_bkg, false, "Same functionality as `add_heatmaps_parts`, but adding the heatmap corresponding to"
- " background.");
-DEFINE_bool(heatmaps_add_PAFs, false, "Same functionality as `add_heatmaps_parts`, but adding the PAFs.");
-DEFINE_int32(heatmaps_scale, 2, "Set 0 to scale op::Datum::poseHeatMaps in the range [-1,1], 1 for [0,1]; 2 for integer"
- " rounded [0,255]; and 3 for no scaling.");
-DEFINE_bool(part_candidates, false, "Also enable `write_json` in order to save this information. If true, it will fill the"
- " op::Datum::poseCandidates array with the body part candidates. Candidates refer to all"
- " the detected body parts, before being assembled into people. Note that the number of"
- " candidates is equal or higher than the number of final body parts (i.e. after being"
- " assembled into people). The empty body parts are filled with 0s. Program speed will"
- " slightly decrease. Not required for OpenPose, enable it only if you intend to explicitly"
- " use this information.");
-// OpenPose Face
-DEFINE_bool(face, false, "Enables face keypoint detection. It will share some parameters from the body pose, e.g."
- " `model_folder`. Note that this will considerable slow down the performance and increse"
- " the required GPU memory. In addition, the greater number of people on the image, the"
- " slower OpenPose will be.");
-DEFINE_string(face_net_resolution, "368x368", "Multiples of 16 and squared. Analogous to `net_resolution` but applied to the face keypoint"
- " detector. 320x320 usually works fine while giving a substantial speed up when multiple"
- " faces on the image.");
-// OpenPose Hand
-DEFINE_bool(hand, false, "Enables hand keypoint detection. It will share some parameters from the body pose, e.g."
- " `model_folder`. Analogously to `--face`, it will also slow down the performance, increase"
- " the required GPU memory and its speed depends on the number of people.");
-DEFINE_string(hand_net_resolution, "368x368", "Multiples of 16 and squared. Analogous to `net_resolution` but applied to the hand keypoint"
- " detector.");
-DEFINE_int32(hand_scale_number, 1, "Analogous to `scale_number` but applied to the hand keypoint detector. Our best results"
- " were found with `hand_scale_number` = 6 and `hand_scale_range` = 0.4.");
-DEFINE_double(hand_scale_range, 0.4, "Analogous purpose than `scale_gap` but applied to the hand keypoint detector. Total range"
- " between smallest and biggest scale. The scales will be centered in ratio 1. E.g. if"
- " scaleRange = 0.4 and scalesNumber = 2, then there will be 2 scales, 0.8 and 1.2.");
-DEFINE_bool(hand_tracking, false, "Adding hand tracking might improve hand keypoints detection for webcam (if the frame rate"
- " is high enough, i.e. >7 FPS per GPU) and video. This is not person ID tracking, it"
- " simply looks for hands in positions at which hands were located in previous frames, but"
- " it does not guarantee the same person ID among frames.");
-// OpenPose 3-D Reconstruction
-DEFINE_bool(3d, false, "Running OpenPose 3-D reconstruction demo: 1) Reading from a stereo camera system."
- " 2) Performing 3-D reconstruction from the multiple views. 3) Displaying 3-D reconstruction"
- " results. Note that it will only display 1 person. If multiple people is present, it will"
- " fail.");
-DEFINE_int32(3d_min_views, -1, "Minimum number of views required to reconstruct each keypoint. By default (-1), it will"
- " require all the cameras to see the keypoint in order to reconstruct it.");
-DEFINE_int32(3d_views, 1, "Complementary option to `--image_dir` or `--video`. OpenPose will read as many images per"
- " iteration, allowing tasks such as stereo camera processing (`--3d`). Note that"
- " `--camera_parameters_folder` must be set. OpenPose must find as many `xml` files in the"
- " parameter folder as this number indicates.");
-// Extra algorithms
-DEFINE_bool(identification, false, "Experimental, not available yet. Whether to enable people identification across frames.");
-DEFINE_int32(tracking, -1, "Experimental, not available yet. Whether to enable people tracking across frames. The"
- " value indicates the number of frames where tracking is run between each OpenPose keypoint"
- " detection. Select -1 (default) to disable it or 0 to run simultaneously OpenPose keypoint"
- " detector and tracking for potentially higher accurary than only OpenPose.");
-DEFINE_int32(ik_threads, 0, "Experimental, not available yet. Whether to enable inverse kinematics (IK) from 3-D"
- " keypoints to obtain 3-D joint angles. By default (0 threads), it is disabled. Increasing"
- " the number of threads will increase the speed but also the global system latency.");
-// OpenPose Rendering
-DEFINE_int32(part_to_show, 0, "Prediction channel to visualize (default: 0). 0 for all the body parts, 1-18 for each body"
- " part heat map, 19 for the background heat map, 20 for all the body part heat maps"
- " together, 21 for all the PAFs, 22-40 for each body part pair PAF.");
-DEFINE_bool(disable_blending, false, "If enabled, it will render the results (keypoint skeletons or heatmaps) on a black"
- " background, instead of being rendered into the original image. Related: `part_to_show`,"
- " `alpha_pose`, and `alpha_pose`.");
-// OpenPose Rendering Pose
-DEFINE_double(render_threshold, 0.05, "Only estimated keypoints whose score confidences are higher than this threshold will be"
- " rendered. Generally, a high threshold (> 0.5) will only render very clear body parts;"
- " while small thresholds (~0.1) will also output guessed and occluded keypoints, but also"
- " more false positives (i.e. wrong detections).");
-DEFINE_int32(render_pose, -1, "Set to 0 for no rendering, 1 for CPU rendering (slightly faster), and 2 for GPU rendering"
- " (slower but greater functionality, e.g. `alpha_X` flags). If -1, it will pick CPU if"
- " CPU_ONLY is enabled, or GPU if CUDA is enabled. If rendering is enabled, it will render"
- " both `outputData` and `cvOutputData` with the original image and desired body part to be"
- " shown (i.e. keypoints, heat maps or PAFs).");
-DEFINE_double(alpha_pose, 0.6, "Blending factor (range 0-1) for the body part rendering. 1 will show it completely, 0 will"
- " hide it. Only valid for GPU rendering.");
-DEFINE_double(alpha_heatmap, 0.7, "Blending factor (range 0-1) between heatmap and original frame. 1 will only show the"
- " heatmap, 0 will only show the frame. Only valid for GPU rendering.");
-// OpenPose Rendering Face
-DEFINE_double(face_render_threshold, 0.4, "Analogous to `render_threshold`, but applied to the face keypoints.");
-DEFINE_int32(face_render, -1, "Analogous to `render_pose` but applied to the face. Extra option: -1 to use the same"
- " configuration that `render_pose` is using.");
-DEFINE_double(face_alpha_pose, 0.6, "Analogous to `alpha_pose` but applied to face.");
-DEFINE_double(face_alpha_heatmap, 0.7, "Analogous to `alpha_heatmap` but applied to face.");
-// OpenPose Rendering Hand
-DEFINE_double(hand_render_threshold, 0.2, "Analogous to `render_threshold`, but applied to the hand keypoints.");
-DEFINE_int32(hand_render, -1, "Analogous to `render_pose` but applied to the hand. Extra option: -1 to use the same"
- " configuration that `render_pose` is using.");
-DEFINE_double(hand_alpha_pose, 0.6, "Analogous to `alpha_pose` but applied to hand.");
-DEFINE_double(hand_alpha_heatmap, 0.7, "Analogous to `alpha_heatmap` but applied to hand.");
-// Display
-DEFINE_bool(fullscreen, false, "Run in full-screen mode (press f during runtime to toggle).");
-DEFINE_bool(no_gui_verbose, false, "Do not write text on output images on GUI (e.g. number of current frame and people). It"
- " does not affect the pose rendering.");
-DEFINE_int32(display, -1, "Display mode: -1 for automatic selection; 0 for no display (useful if there is no X server"
- " and/or to slightly speed up the processing if visual output is not required); 2 for 2-D"
- " display; 3 for 3-D display (if `--3d` enabled); and 1 for both 2-D and 3-D display.");
-// Result Saving
-DEFINE_string(write_images, "", "Directory to write rendered frames in `write_images_format` image format.");
-DEFINE_string(write_images_format, "png", "File extension and format for `write_images`, e.g. png, jpg or bmp. Check the OpenCV"
- " function cv::imwrite for all compatible extensions.");
-DEFINE_string(write_video, "", "Full file path to write rendered frames in motion JPEG video format. It might fail if the"
- " final path does not finish in `.avi`. It internally uses cv::VideoWriter. Flag"
- " `camera_fps` controls FPS.");
-DEFINE_string(write_json, "", "Directory to write OpenPose output in JSON format. It includes body, hand, and face pose"
- " keypoints (2-D and 3-D), as well as pose candidates (if `--part_candidates` enabled).");
-DEFINE_string(write_coco_json, "", "Full file path to write people pose data with JSON COCO validation format.");
-DEFINE_string(write_coco_foot_json, "", "Full file path to write people foot pose data with JSON COCO validation format.");
-DEFINE_string(write_heatmaps, "", "Directory to write body pose heatmaps in PNG format. At least 1 `add_heatmaps_X` flag"
- " must be enabled.");
-DEFINE_string(write_heatmaps_format, "png", "File extension and format for `write_heatmaps`, analogous to `write_images_format`."
- " For lossless compression, recommended `png` for integer `heatmaps_scale` and `float` for"
- " floating values.");
-DEFINE_string(write_keypoint, "", "(Deprecated, use `write_json`) Directory to write the people pose keypoint data. Set format"
- " with `write_keypoint_format`.");
-DEFINE_string(write_keypoint_format, "yml", "(Deprecated, use `write_json`) File extension and format for `write_keypoint`: json, xml,"
- " yaml & yml. Json not available for OpenCV < 3.0, use `write_json` instead.");
-// Result Saving - Extra Algorithms
-DEFINE_string(write_video_adam, "", "Experimental, not available yet. E.g.: `~/Desktop/adamResult.avi`. Flag `camera_fps`"
- " controls FPS.");
-DEFINE_string(write_bvh, "", "Experimental, not available yet. E.g.: `~/Desktop/mocapResult.bvh`.");
-// UDP communication
-DEFINE_string(udp_host, "", "Experimental, not available yet. IP for UDP communication. E.g., `192.168.0.1`.");
-DEFINE_string(udp_port, "8051", "Experimental, not available yet. Port number for UDP communication.");
-
int openPoseTutorialWrapper4()
{
try
@@ -260,7 +43,7 @@ int openPoseTutorialWrapper4()
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
op::Profiler::setDefaultX(FLAGS_profile_speed);
- // Applying user defined configuration - Google flags to program variables
+ // Applying user defined configuration - GFlags to program variables
// outputSize
const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
// netInputSize
@@ -334,12 +117,12 @@ int openPoseTutorialWrapper4()
);
// Add custom processing
const auto workerProcessingOnNewThread = false;
- opWrapper.setWorkerPostProcessing(wUserPostProcessing, workerProcessingOnNewThread);
+ opWrapper.setWorker(op::WorkerType::PostProcessing, wUserPostProcessing, workerProcessingOnNewThread);
// Configure wrapper
opWrapper.configure(wrapperStructPose, wrapperStructFace, wrapperStructHand, wrapperStructExtra,
wrapperStructInput, wrapperStructOutput);
- // Set to single-thread running (to debug and/or reduce latency)
+ // Set to single-thread (for sequential processing and/or debugging and/or reducing latency)
if (FLAGS_disable_multi_thread)
opWrapper.disableMultiThreading();
diff --git a/examples/tutorial_api_cpp/1_body_from_image.cpp b/examples/tutorial_api_cpp/1_body_from_image.cpp
new file mode 100644
index 000000000..98e038a36
--- /dev/null
+++ b/examples/tutorial_api_cpp/1_body_from_image.cpp
@@ -0,0 +1,103 @@
+// ------------------------- OpenPose API Tutorial - Example 1 - Body from image -------------------------
+// It reads an image, process it, and displays it with the pose keypoints.
+
+// Command-line user intraface
+#define OPENPOSE_FLAGS_DISABLE_POSE
+#include
+// OpenPose dependencies
+#include
+
+// Custom OpenPose flags
+// Producer
+DEFINE_string(image_path, "examples/media/COCO_val2014_000000000192.jpg",
+ "Process an image. Read all standard formats (jpg, png, bmp, etc.).");
+
+// This worker will just read and return all the jpg files in a directory
+void display(const std::shared_ptr>& datumsPtr)
+{
+ // User's displaying/saving/other processing here
+ // datum.cvOutputData: rendered frame with pose or heatmaps
+ // datum.poseKeypoints: Array with the estimated pose
+ if (datumsPtr != nullptr && !datumsPtr->empty())
+ {
+ // Display image
+ cv::imshow("User worker GUI", datumsPtr->at(0).cvOutputData);
+ cv::waitKey(0);
+ }
+ else
+ op::log("Nullptr or empty datumsPtr found.", op::Priority::High);
+}
+
+void printKeypoints(const std::shared_ptr>& datumsPtr)
+{
+ // Example: How to use the pose keypoints
+ if (datumsPtr != nullptr && !datumsPtr->empty())
+ {
+ // Alternative 1
+ op::log("Body keypoints: " + datumsPtr->at(0).poseKeypoints.toString());
+
+ // // Alternative 2
+ // op::log(datumsPtr->at(0).poseKeypoints);
+
+ // // Alternative 3
+ // std::cout << datumsPtr->at(0).poseKeypoints << std::endl;
+
+ // // Alternative 4 - Accesing each element of the keypoints
+ // op::log("\nKeypoints:");
+ // const auto& poseKeypoints = datumsPtr->at(0).poseKeypoints;
+ // op::log("Person pose keypoints:");
+ // for (auto person = 0 ; person < poseKeypoints.getSize(0) ; person++)
+ // {
+ // op::log("Person " + std::to_string(person) + " (x, y, score):");
+ // for (auto bodyPart = 0 ; bodyPart < poseKeypoints.getSize(1) ; bodyPart++)
+ // {
+ // std::string valueToPrint;
+ // for (auto xyscore = 0 ; xyscore < poseKeypoints.getSize(2) ; xyscore++)
+ // valueToPrint += std::to_string( poseKeypoints[{person, bodyPart, xyscore}] ) + " ";
+ // op::log(valueToPrint);
+ // }
+ // }
+ // op::log(" ");
+ }
+ else
+ op::log("Nullptr or empty datumsPtr found.", op::Priority::High);
+}
+
+int bodyFromImage()
+{
+ op::log("Starting OpenPose demo...", op::Priority::High);
+
+ // Configuring OpenPose
+ op::log("Configuring OpenPose...", op::Priority::High);
+ op::Wrapper> opWrapper{op::ThreadManagerMode::Asynchronous};
+ // Set to single-thread (for sequential processing and/or debugging and/or reducing latency)
+ if (FLAGS_disable_multi_thread)
+ opWrapper.disableMultiThreading();
+ // Starting OpenPose
+ op::log("Starting thread(s)...", op::Priority::High);
+ opWrapper.start();
+
+ // Process and display image
+ const auto imageToProcess = cv::imread(FLAGS_image_path);
+ auto datumProcessed = opWrapper.emplaceAndPop(imageToProcess);
+ if (datumProcessed != nullptr)
+ {
+ printKeypoints(datumProcessed);
+ display(datumProcessed);
+ }
+ else
+ op::log("Image could not be processed.", op::Priority::High);
+
+ // Return successful message
+ op::log("Stopping OpenPose...", op::Priority::High);
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ // Parsing command line flags
+ gflags::ParseCommandLineFlags(&argc, &argv, true);
+
+ // Running bodyFromImage
+ return bodyFromImage();
+}
diff --git a/examples/tutorial_api_cpp/2_whole_body_from_image.cpp b/examples/tutorial_api_cpp/2_whole_body_from_image.cpp
new file mode 100644
index 000000000..92e111003
--- /dev/null
+++ b/examples/tutorial_api_cpp/2_whole_body_from_image.cpp
@@ -0,0 +1,85 @@
+// ------------------------- OpenPose API Tutorial - Example 2 - Whole body from image -------------------------
+// It reads an image, process it, and displays it with the pose, hand, and face keypoints.
+
+// Command-line user intraface
+#define OPENPOSE_FLAGS_DISABLE_POSE
+#include
+// OpenPose dependencies
+#include
+
+// Custom OpenPose flags
+// Producer
+DEFINE_string(image_path, "examples/media/COCO_val2014_000000000241.jpg",
+ "Process an image. Read all standard formats (jpg, png, bmp, etc.).");
+
+// This worker will just read and return all the jpg files in a directory
+void display(const std::shared_ptr>& datumsPtr)
+{
+ // User's displaying/saving/other processing here
+ // datum.cvOutputData: rendered frame with pose or heatmaps
+ // datum.poseKeypoints: Array with the estimated pose
+ if (datumsPtr != nullptr && !datumsPtr->empty())
+ {
+ // Display image
+ cv::imshow("User worker GUI", datumsPtr->at(0).cvOutputData);
+ cv::waitKey(0);
+ }
+ else
+ op::log("Nullptr or empty datumsPtr found.", op::Priority::High);
+}
+
+void printKeypoints(const std::shared_ptr>& datumsPtr)
+{
+ // Example: How to use the pose keypoints
+ if (datumsPtr != nullptr && !datumsPtr->empty())
+ {
+ op::log("Body keypoints: " + datumsPtr->at(0).poseKeypoints.toString());
+ op::log("Face keypoints: " + datumsPtr->at(0).faceKeypoints.toString());
+ op::log("Left hand keypoints: " + datumsPtr->at(0).handKeypoints[0].toString());
+ op::log("Right hand keypoints: " + datumsPtr->at(0).handKeypoints[1].toString());
+ }
+ else
+ op::log("Nullptr or empty datumsPtr found.", op::Priority::High);
+}
+
+int wholeBodyFromImage()
+{
+ op::log("Starting OpenPose demo...", op::Priority::High);
+
+ // Configuring OpenPose
+ op::log("Configuring OpenPose...", op::Priority::High);
+ op::Wrapper> opWrapper{op::ThreadManagerMode::Asynchronous};
+ // Add hand and face
+ opWrapper.configure(op::WrapperStructFace{true});
+ opWrapper.configure(op::WrapperStructHand{true});
+ // Set to single-thread (for sequential processing and/or debugging and/or reducing latency)
+ if (FLAGS_disable_multi_thread)
+ opWrapper.disableMultiThreading();
+ // Starting OpenPose
+ op::log("Starting thread(s)...", op::Priority::High);
+ opWrapper.start();
+
+ // Process and display image
+ const auto imageToProcess = cv::imread(FLAGS_image_path);
+ auto datumProcessed = opWrapper.emplaceAndPop(imageToProcess);
+ if (datumProcessed != nullptr)
+ {
+ printKeypoints(datumProcessed);
+ display(datumProcessed);
+ }
+ else
+ op::log("Image could not be processed.", op::Priority::High);
+
+ // Return successful message
+ op::log("Stopping OpenPose...", op::Priority::High);
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ // Parsing command line flags
+ gflags::ParseCommandLineFlags(&argc, &argv, true);
+
+ // Running wholeBodyFromImage
+ return wholeBodyFromImage();
+}
diff --git a/examples/tutorial_api_cpp/3_keypoints_from_image_configurable.cpp b/examples/tutorial_api_cpp/3_keypoints_from_image_configurable.cpp
new file mode 100644
index 000000000..57054a849
--- /dev/null
+++ b/examples/tutorial_api_cpp/3_keypoints_from_image_configurable.cpp
@@ -0,0 +1,150 @@
+// ------------------------- OpenPose API Tutorial - Example 3 - Body from image configurable -------------------------
+// It reads an image, process it, and displays it with the pose (and optionally hand and face) keypoints. In addition,
+// it includes all the OpenPose configuration flags (enable/disable hand, face, output saving, etc.).
+
+// Command-line user intraface
+#define OPENPOSE_FLAGS_DISABLE_PRODUCER
+#define OPENPOSE_FLAGS_DISABLE_DISPLAY
+#include
+// OpenPose dependencies
+#include
+
+// Custom OpenPose flags
+// Producer
+DEFINE_string(image_path, "examples/media/COCO_val2014_000000000294.jpg",
+ "Process an image. Read all standard formats (jpg, png, bmp, etc.).");
+
+// This worker will just read and return all the jpg files in a directory
+void display(const std::shared_ptr>& datumsPtr)
+{
+ // User's displaying/saving/other processing here
+ // datum.cvOutputData: rendered frame with pose or heatmaps
+ // datum.poseKeypoints: Array with the estimated pose
+ if (datumsPtr != nullptr && !datumsPtr->empty())
+ {
+ // Display image
+ cv::imshow("User worker GUI", datumsPtr->at(0).cvOutputData);
+ cv::waitKey(0);
+ }
+ else
+ op::log("Nullptr or empty datumsPtr found.", op::Priority::High);
+}
+
+void printKeypoints(const std::shared_ptr>& datumsPtr)
+{
+ // Example: How to use the pose keypoints
+ if (datumsPtr != nullptr && !datumsPtr->empty())
+ {
+ op::log("Body keypoints: " + datumsPtr->at(0).poseKeypoints.toString());
+ op::log("Face keypoints: " + datumsPtr->at(0).faceKeypoints.toString());
+ op::log("Left hand keypoints: " + datumsPtr->at(0).handKeypoints[0].toString());
+ op::log("Right hand keypoints: " + datumsPtr->at(0).handKeypoints[1].toString());
+ }
+ else
+ op::log("Nullptr or empty datumsPtr found.", op::Priority::High);
+}
+
+int wholeBodyFromImage()
+{
+ op::log("Starting OpenPose demo...", op::Priority::High);
+
+ // logging_level
+ op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.",
+ __LINE__, __FUNCTION__, __FILE__);
+ op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
+ op::Profiler::setDefaultX(FLAGS_profile_speed);
+
+ // Applying user defined configuration - GFlags to program variables
+ // outputSize
+ const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
+ // netInputSize
+ const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "-1x368");
+ // faceNetInputSize
+ const auto faceNetInputSize = op::flagsToPoint(FLAGS_face_net_resolution, "368x368 (multiples of 16)");
+ // handNetInputSize
+ const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
+ // poseModel
+ const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
+ // JSON saving
+ if (!FLAGS_write_keypoint.empty())
+ op::log("Flag `write_keypoint` is deprecated and will eventually be removed."
+ " Please, use `write_json` instead.", op::Priority::Max);
+ // keypointScale
+ const auto keypointScale = op::flagsToScaleMode(FLAGS_keypoint_scale);
+ // heatmaps to add
+ const auto heatMapTypes = op::flagsToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg,
+ FLAGS_heatmaps_add_PAFs);
+ const auto heatMapScale = op::flagsToHeatMapScaleMode(FLAGS_heatmaps_scale);
+ // >1 camera view?
+ const auto multipleView = (FLAGS_3d || FLAGS_3d_views > 1);
+ // Enabling Google Logging
+ const bool enableGoogleLogging = true;
+ // Logging
+ op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
+
+ // Configuring OpenPose
+ op::log("Configuring OpenPose...", op::Priority::High);
+ op::Wrapper> opWrapper{op::ThreadManagerMode::Asynchronous};
+ // Pose configuration (use WrapperStructPose{} for default and recommended configuration)
+ const op::WrapperStructPose wrapperStructPose{
+ !FLAGS_body_disable, netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
+ FLAGS_scale_number, (float)FLAGS_scale_gap, op::flagsToRenderMode(FLAGS_render_pose, multipleView),
+ poseModel, !FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
+ FLAGS_part_to_show, FLAGS_model_folder, heatMapTypes, heatMapScale, FLAGS_part_candidates,
+ (float)FLAGS_render_threshold, FLAGS_number_people_max, enableGoogleLogging};
+ // Face configuration (use op::WrapperStructFace{} to disable it)
+ const op::WrapperStructFace wrapperStructFace{
+ FLAGS_face, faceNetInputSize, op::flagsToRenderMode(FLAGS_face_render, multipleView, FLAGS_render_pose),
+ (float)FLAGS_face_alpha_pose, (float)FLAGS_face_alpha_heatmap, (float)FLAGS_face_render_threshold};
+ // Hand configuration (use op::WrapperStructHand{} to disable it)
+ const op::WrapperStructHand wrapperStructHand{
+ FLAGS_hand, handNetInputSize, FLAGS_hand_scale_number, (float)FLAGS_hand_scale_range, FLAGS_hand_tracking,
+ op::flagsToRenderMode(FLAGS_hand_render, multipleView, FLAGS_render_pose), (float)FLAGS_hand_alpha_pose,
+ (float)FLAGS_hand_alpha_heatmap, (float)FLAGS_hand_render_threshold};
+ // Extra functionality configuration (use op::WrapperStructExtra{} to disable it)
+ const op::WrapperStructExtra wrapperStructExtra{
+ FLAGS_3d, FLAGS_3d_min_views, FLAGS_identification, FLAGS_tracking, FLAGS_ik_threads};
+ // Consumer (comment or use default argument to disable any output)
+ const auto displayMode = op::DisplayMode::NoDisplay;
+ const bool guiVerbose = false;
+ const bool fullScreen = false;
+ const op::WrapperStructOutput wrapperStructOutput{
+ displayMode, guiVerbose, fullScreen, FLAGS_write_keypoint,
+ op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, FLAGS_write_coco_json,
+ FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, FLAGS_write_video,
+ FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, FLAGS_write_video_adam,
+ FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port};
+ // Configure wrapper
+ opWrapper.configure(wrapperStructPose, wrapperStructFace, wrapperStructHand, wrapperStructExtra,
+ op::WrapperStructInput{}, wrapperStructOutput);
+ // Set to single-thread (for sequential processing and/or debugging and/or reducing latency)
+ if (FLAGS_disable_multi_thread)
+ opWrapper.disableMultiThreading();
+ // Starting OpenPose
+ op::log("Starting thread(s)...", op::Priority::High);
+ opWrapper.start();
+
+ // Process and display image
+ const auto imageToProcess = cv::imread(FLAGS_image_path);
+ auto datumProcessed = opWrapper.emplaceAndPop(imageToProcess);
+ if (datumProcessed != nullptr)
+ {
+ printKeypoints(datumProcessed);
+ display(datumProcessed);
+ }
+ else
+ op::log("Image could not be processed.", op::Priority::High);
+
+ // Return successful message
+ op::log("Stopping OpenPose...", op::Priority::High);
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ // Parsing command line flags
+ gflags::ParseCommandLineFlags(&argc, &argv, true);
+
+ // Running wholeBodyFromImage
+ return wholeBodyFromImage();
+}
diff --git a/examples/tutorial_api_cpp/4_asynchronous_loop_custom_input_and_output.cpp b/examples/tutorial_api_cpp/4_asynchronous_loop_custom_input_and_output.cpp
new file mode 100644
index 000000000..765bfb12a
--- /dev/null
+++ b/examples/tutorial_api_cpp/4_asynchronous_loop_custom_input_and_output.cpp
@@ -0,0 +1,314 @@
+// ------------------------- OpenPose Library Tutorial - Wrapper - Example 1 - Asynchronous -------------------------
+// Asynchronous mode: ideal for fast prototyping when performance is not an issue. The user emplaces/pushes and pops frames from the OpenPose wrapper
+// when he desires to.
+
+// This example shows the user how to use the OpenPose wrapper class:
+ // 1. User reads images
+ // 2. Extract and render keypoint / heatmap / PAF of that image
+ // 3. Save the results on disk
+ // 4. User displays the rendered pose
+ // Everything in a multi-thread scenario
+// In addition to the previous OpenPose modules, we also need to use:
+ // 1. `core` module:
+ // For the Array class that the `pose` module needs
+ // For the Datum struct that the `thread` module sends between the queues
+ // 2. `utilities` module: for the error & logging functions, i.e. op::error & op::log respectively
+// This file should only be used for the user to take specific examples.
+
+// Command-line user intraface
+#define OPENPOSE_FLAGS_DISABLE_PRODUCER
+#define OPENPOSE_FLAGS_DISABLE_DISPLAY
+#include
+// OpenPose dependencies
+#include
+
+// Custom OpenPose flags
+// Producer
+DEFINE_string(image_dir, "examples/media/",
+ "Process a directory of images. Read all standard formats (jpg, png, bmp, etc.).");
+
+// If the user needs his own variables, he can inherit the op::Datum struct and add them
+// UserDatum can be directly used by the OpenPose wrapper because it inherits from op::Datum, just define Wrapper instead of
+// Wrapper
+struct UserDatum : public op::Datum
+{
+ bool boolThatUserNeedsForSomeReason;
+
+ UserDatum(const bool boolThatUserNeedsForSomeReason_ = false) :
+ boolThatUserNeedsForSomeReason{boolThatUserNeedsForSomeReason_}
+ {}
+};
+
+// The W-classes can be implemented either as a template or as simple classes given
+// that the user usually knows which kind of data he will move between the queues,
+// in this case we assume a std::shared_ptr of a std::vector of UserDatum
+
+// This worker will just read and return all the jpg files in a directory
+class UserInputClass
+{
+public:
+ UserInputClass(const std::string& directoryPath) :
+ mImageFiles{op::getFilesOnDirectory(directoryPath, "jpg")},
+ // If we want "jpg" + "png" images
+ // mImageFiles{op::getFilesOnDirectory(directoryPath, std::vector{"jpg", "png"})},
+ mCounter{0},
+ mClosed{false}
+ {
+ if (mImageFiles.empty())
+ op::error("No images found on: " + directoryPath, __LINE__, __FUNCTION__, __FILE__);
+ }
+
+ std::shared_ptr> createDatum()
+ {
+ // Close program when empty frame
+ if (mClosed || mImageFiles.size() <= mCounter)
+ {
+ op::log("Last frame read and added to queue. Closing program after it is processed.", op::Priority::High);
+ // This funtion stops this worker, which will eventually stop the whole thread system once all the frames
+ // have been processed
+ mClosed = true;
+ return nullptr;
+ }
+ else // if (!mClosed)
+ {
+ // Create new datum
+ auto datumsPtr = std::make_shared>();
+ datumsPtr->emplace_back();
+ auto& datum = datumsPtr->at(0);
+
+ // Fill datum
+ datum.cvInputData = cv::imread(mImageFiles.at(mCounter++));
+
+ // If empty frame -> return nullptr
+ if (datum.cvInputData.empty())
+ {
+ op::log("Empty frame detected on path: " + mImageFiles.at(mCounter-1) + ". Closing program.",
+ op::Priority::High);
+ mClosed = true;
+ datumsPtr = nullptr;
+ }
+
+ return datumsPtr;
+ }
+ }
+
+ bool isFinished() const
+ {
+ return mClosed;
+ }
+
+private:
+ const std::vector mImageFiles;
+ unsigned long long mCounter;
+ bool mClosed;
+};
+
+// This worker will just read and return all the jpg files in a directory
+class UserOutputClass
+{
+public:
+ bool display(const std::shared_ptr>& datumsPtr)
+ {
+ // User's displaying/saving/other processing here
+ // datum.cvOutputData: rendered frame with pose or heatmaps
+ // datum.poseKeypoints: Array with the estimated pose
+ char key = ' ';
+ if (datumsPtr != nullptr && !datumsPtr->empty())
+ {
+ cv::imshow("User worker GUI", datumsPtr->at(0).cvOutputData);
+ // Display image and sleeps at least 1 ms (it usually sleeps ~5-10 msec to display the image)
+ key = (char)cv::waitKey(1);
+ }
+ else
+ op::log("Nullptr or empty datumsPtr found.", op::Priority::High, __LINE__, __FUNCTION__, __FILE__);
+ return (key == 27);
+ }
+ void printKeypoints(const std::shared_ptr>& datumsPtr)
+ {
+ // Example: How to use the pose keypoints
+ if (datumsPtr != nullptr && !datumsPtr->empty())
+ {
+ op::log("\nKeypoints:");
+ // Accesing each element of the keypoints
+ const auto& poseKeypoints = datumsPtr->at(0).poseKeypoints;
+ op::log("Person pose keypoints:");
+ for (auto person = 0 ; person < poseKeypoints.getSize(0) ; person++)
+ {
+ op::log("Person " + std::to_string(person) + " (x, y, score):");
+ for (auto bodyPart = 0 ; bodyPart < poseKeypoints.getSize(1) ; bodyPart++)
+ {
+ std::string valueToPrint;
+ for (auto xyscore = 0 ; xyscore < poseKeypoints.getSize(2) ; xyscore++)
+ valueToPrint += std::to_string( poseKeypoints[{person, bodyPart, xyscore}] ) + " ";
+ op::log(valueToPrint);
+ }
+ }
+ op::log(" ");
+ // Alternative: just getting std::string equivalent
+ op::log("Face keypoints: " + datumsPtr->at(0).faceKeypoints.toString());
+ op::log("Left hand keypoints: " + datumsPtr->at(0).handKeypoints[0].toString());
+ op::log("Right hand keypoints: " + datumsPtr->at(0).handKeypoints[1].toString());
+ // Heatmaps
+ const auto& poseHeatMaps = datumsPtr->at(0).poseHeatMaps;
+ if (!poseHeatMaps.empty())
+ {
+ op::log("Pose heatmaps size: [" + std::to_string(poseHeatMaps.getSize(0)) + ", "
+ + std::to_string(poseHeatMaps.getSize(1)) + ", "
+ + std::to_string(poseHeatMaps.getSize(2)) + "]");
+ const auto& faceHeatMaps = datumsPtr->at(0).faceHeatMaps;
+ op::log("Face heatmaps size: [" + std::to_string(faceHeatMaps.getSize(0)) + ", "
+ + std::to_string(faceHeatMaps.getSize(1)) + ", "
+ + std::to_string(faceHeatMaps.getSize(2)) + ", "
+ + std::to_string(faceHeatMaps.getSize(3)) + "]");
+ const auto& handHeatMaps = datumsPtr->at(0).handHeatMaps;
+ op::log("Left hand heatmaps size: [" + std::to_string(handHeatMaps[0].getSize(0)) + ", "
+ + std::to_string(handHeatMaps[0].getSize(1)) + ", "
+ + std::to_string(handHeatMaps[0].getSize(2)) + ", "
+ + std::to_string(handHeatMaps[0].getSize(3)) + "]");
+ op::log("Right hand heatmaps size: [" + std::to_string(handHeatMaps[1].getSize(0)) + ", "
+ + std::to_string(handHeatMaps[1].getSize(1)) + ", "
+ + std::to_string(handHeatMaps[1].getSize(2)) + ", "
+ + std::to_string(handHeatMaps[1].getSize(3)) + "]");
+ }
+ }
+ else
+ op::log("Nullptr or empty datumsPtr found.", op::Priority::High, __LINE__, __FUNCTION__, __FILE__);
+ }
+};
+
+int openPoseTutorialWrapper3()
+{
+ try
+ {
+ op::log("Starting OpenPose demo...", op::Priority::High);
+ const auto timerBegin = std::chrono::high_resolution_clock::now();
+
+ // logging_level
+ op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.",
+ __LINE__, __FUNCTION__, __FILE__);
+ op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
+ op::Profiler::setDefaultX(FLAGS_profile_speed);
+
+ // Applying user defined configuration - GFlags to program variables
+ // outputSize
+ const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
+ // netInputSize
+ const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "-1x368");
+ // faceNetInputSize
+ const auto faceNetInputSize = op::flagsToPoint(FLAGS_face_net_resolution, "368x368 (multiples of 16)");
+ // handNetInputSize
+ const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
+ // poseModel
+ const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
+ // JSON saving
+ if (!FLAGS_write_keypoint.empty())
+ op::log("Flag `write_keypoint` is deprecated and will eventually be removed."
+ " Please, use `write_json` instead.", op::Priority::Max);
+ // keypointScale
+ const auto keypointScale = op::flagsToScaleMode(FLAGS_keypoint_scale);
+ // heatmaps to add
+ const auto heatMapTypes = op::flagsToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg,
+ FLAGS_heatmaps_add_PAFs);
+ const auto heatMapScale = op::flagsToHeatMapScaleMode(FLAGS_heatmaps_scale);
+ // >1 camera view?
+ const auto multipleView = (FLAGS_3d || FLAGS_3d_views > 1);
+ // Enabling Google Logging
+ const bool enableGoogleLogging = true;
+ // Logging
+ op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
+
+ // Configure OpenPose
+ op::log("Configuring OpenPose wrapper...", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
+ op::Wrapper> opWrapper{op::ThreadManagerMode::Asynchronous};
+ // Pose configuration (use WrapperStructPose{} for default and recommended configuration)
+ const op::WrapperStructPose wrapperStructPose{
+ !FLAGS_body_disable, netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
+ FLAGS_scale_number, (float)FLAGS_scale_gap, op::flagsToRenderMode(FLAGS_render_pose, multipleView),
+ poseModel, !FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
+ FLAGS_part_to_show, FLAGS_model_folder, heatMapTypes, heatMapScale, FLAGS_part_candidates,
+ (float)FLAGS_render_threshold, FLAGS_number_people_max, enableGoogleLogging};
+ // Face configuration (use op::WrapperStructFace{} to disable it)
+ const op::WrapperStructFace wrapperStructFace{
+ FLAGS_face, faceNetInputSize, op::flagsToRenderMode(FLAGS_face_render, multipleView, FLAGS_render_pose),
+ (float)FLAGS_face_alpha_pose, (float)FLAGS_face_alpha_heatmap, (float)FLAGS_face_render_threshold};
+ // Hand configuration (use op::WrapperStructHand{} to disable it)
+ const op::WrapperStructHand wrapperStructHand{
+ FLAGS_hand, handNetInputSize, FLAGS_hand_scale_number, (float)FLAGS_hand_scale_range, FLAGS_hand_tracking,
+ op::flagsToRenderMode(FLAGS_hand_render, multipleView, FLAGS_render_pose), (float)FLAGS_hand_alpha_pose,
+ (float)FLAGS_hand_alpha_heatmap, (float)FLAGS_hand_render_threshold};
+ // Extra functionality configuration (use op::WrapperStructExtra{} to disable it)
+ const op::WrapperStructExtra wrapperStructExtra{
+ FLAGS_3d, FLAGS_3d_min_views, FLAGS_identification, FLAGS_tracking, FLAGS_ik_threads};
+ // Consumer (comment or use default argument to disable any output)
+ const auto displayMode = op::DisplayMode::NoDisplay;
+ const bool guiVerbose = false;
+ const bool fullScreen = false;
+ const op::WrapperStructOutput wrapperStructOutput{
+ displayMode, guiVerbose, fullScreen, FLAGS_write_keypoint,
+ op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, FLAGS_write_coco_json,
+ FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, FLAGS_write_video,
+ FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, FLAGS_write_video_adam,
+ FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port};
+ // Configure wrapper
+ opWrapper.configure(wrapperStructPose, wrapperStructFace, wrapperStructHand, wrapperStructExtra,
+ op::WrapperStructInput{}, wrapperStructOutput);
+ // Set to single-thread (for sequential processing and/or debugging and/or reducing latency)
+ if (FLAGS_disable_multi_thread)
+ opWrapper.disableMultiThreading();
+
+ op::log("Starting thread(s)...", op::Priority::High);
+ opWrapper.start();
+
+ // User processing
+ UserInputClass userInputClass(FLAGS_image_dir);
+ UserOutputClass userOutputClass;
+ bool userWantsToExit = false;
+ while (!userWantsToExit && !userInputClass.isFinished())
+ {
+ // Push frame
+ auto datumToProcess = userInputClass.createDatum();
+ if (datumToProcess != nullptr)
+ {
+ auto successfullyEmplaced = opWrapper.waitAndEmplace(datumToProcess);
+ // Pop frame
+ std::shared_ptr> datumProcessed;
+ if (successfullyEmplaced && opWrapper.waitAndPop(datumProcessed))
+ {
+ userWantsToExit = userOutputClass.display(datumProcessed);
+ userOutputClass.printKeypoints(datumProcessed);
+ }
+ else
+ op::log("Processed datum could not be emplaced.", op::Priority::High,
+ __LINE__, __FUNCTION__, __FILE__);
+ }
+ }
+
+ op::log("Stopping thread(s)", op::Priority::High);
+ opWrapper.stop();
+
+ // Measuring total time
+ const auto now = std::chrono::high_resolution_clock::now();
+ const auto totalTimeSec = (double)std::chrono::duration_cast(now-timerBegin).count()
+ * 1e-9;
+ const auto message = "OpenPose demo successfully finished. Total time: "
+ + std::to_string(totalTimeSec) + " seconds.";
+ op::log(message, op::Priority::High);
+
+ // Return successful message
+ return 0;
+ }
+ catch (const std::exception& e)
+ {
+ op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
+ return -1;
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ // Parsing command line flags
+ gflags::ParseCommandLineFlags(&argc, &argv, true);
+
+ // Running openPoseTutorialWrapper3
+ return openPoseTutorialWrapper3();
+}
diff --git a/examples/tutorial_api_cpp/5_asynchronous_loop_custom_output.cpp b/examples/tutorial_api_cpp/5_asynchronous_loop_custom_output.cpp
new file mode 100644
index 000000000..4e1463737
--- /dev/null
+++ b/examples/tutorial_api_cpp/5_asynchronous_loop_custom_output.cpp
@@ -0,0 +1,251 @@
+// ------------------------- OpenPose Library Tutorial - Wrapper - Example 3 - Asynchronous Output -------------------------
+// Asynchronous output mode: ideal for fast prototyping when performance is not an issue and user wants to use the output OpenPose format. The user
+// simply gets the processed frames from the OpenPose wrapper when he desires to.
+
+// This example shows the user how to use the OpenPose wrapper class:
+ // 1. Read folder of images / video / webcam
+ // 2. Extract and render keypoint / heatmap / PAF of that image
+ // 3. Save the results on disk
+ // 4. User displays the rendered pose
+ // Everything in a multi-thread scenario
+// In addition to the previous OpenPose modules, we also need to use:
+ // 1. `core` module:
+ // For the Array class that the `pose` module needs
+ // For the Datum struct that the `thread` module sends between the queues
+ // 2. `utilities` module: for the error & logging functions, i.e. op::error & op::log respectively
+// This file should only be used for the user to take specific examples.
+
+// Command-line user intraface
+#define OPENPOSE_FLAGS_DISABLE_DISPLAY
+#include
+// OpenPose dependencies
+#include
+
+// If the user needs his own variables, he can inherit the op::Datum struct and add them
+// UserDatum can be directly used by the OpenPose wrapper because it inherits from op::Datum, just define
+// Wrapper instead of Wrapper
+struct UserDatum : public op::Datum
+{
+ bool boolThatUserNeedsForSomeReason;
+
+ UserDatum(const bool boolThatUserNeedsForSomeReason_ = false) :
+ boolThatUserNeedsForSomeReason{boolThatUserNeedsForSomeReason_}
+ {}
+};
+
+// The W-classes can be implemented either as a template or as simple classes given
+// that the user usually knows which kind of data he will move between the queues,
+// in this case we assume a std::shared_ptr of a std::vector of UserDatum
+
+// This worker will just read and return all the jpg files in a directory
+class UserOutputClass
+{
+public:
+ bool display(const std::shared_ptr>& datumsPtr)
+ {
+ // User's displaying/saving/other processing here
+ // datum.cvOutputData: rendered frame with pose or heatmaps
+ // datum.poseKeypoints: Array with the estimated pose
+ char key = ' ';
+ if (datumsPtr != nullptr && !datumsPtr->empty())
+ {
+ cv::imshow("User worker GUI", datumsPtr->at(0).cvOutputData);
+ // Display image and sleeps at least 1 ms (it usually sleeps ~5-10 msec to display the image)
+ key = (char)cv::waitKey(1);
+ }
+ else
+ op::log("Nullptr or empty datumsPtr found.", op::Priority::High, __LINE__, __FUNCTION__, __FILE__);
+ return (key == 27);
+ }
+ void printKeypoints(const std::shared_ptr>& datumsPtr)
+ {
+ // Example: How to use the pose keypoints
+ if (datumsPtr != nullptr && !datumsPtr->empty())
+ {
+ op::log("\nKeypoints:");
+ // Accesing each element of the keypoints
+ const auto& poseKeypoints = datumsPtr->at(0).poseKeypoints;
+ op::log("Person pose keypoints:");
+ for (auto person = 0 ; person < poseKeypoints.getSize(0) ; person++)
+ {
+ op::log("Person " + std::to_string(person) + " (x, y, score):");
+ for (auto bodyPart = 0 ; bodyPart < poseKeypoints.getSize(1) ; bodyPart++)
+ {
+ std::string valueToPrint;
+ for (auto xyscore = 0 ; xyscore < poseKeypoints.getSize(2) ; xyscore++)
+ {
+ valueToPrint += std::to_string( poseKeypoints[{person, bodyPart, xyscore}] ) + " ";
+ }
+ op::log(valueToPrint);
+ }
+ }
+ op::log(" ");
+ // Alternative: just getting std::string equivalent
+ op::log("Face keypoints: " + datumsPtr->at(0).faceKeypoints.toString());
+ op::log("Left hand keypoints: " + datumsPtr->at(0).handKeypoints[0].toString());
+ op::log("Right hand keypoints: " + datumsPtr->at(0).handKeypoints[1].toString());
+ // Heatmaps
+ const auto& poseHeatMaps = datumsPtr->at(0).poseHeatMaps;
+ if (!poseHeatMaps.empty())
+ {
+ op::log("Pose heatmaps size: [" + std::to_string(poseHeatMaps.getSize(0)) + ", "
+ + std::to_string(poseHeatMaps.getSize(1)) + ", "
+ + std::to_string(poseHeatMaps.getSize(2)) + "]");
+ const auto& faceHeatMaps = datumsPtr->at(0).faceHeatMaps;
+ op::log("Face heatmaps size: [" + std::to_string(faceHeatMaps.getSize(0)) + ", "
+ + std::to_string(faceHeatMaps.getSize(1)) + ", "
+ + std::to_string(faceHeatMaps.getSize(2)) + ", "
+ + std::to_string(faceHeatMaps.getSize(3)) + "]");
+ const auto& handHeatMaps = datumsPtr->at(0).handHeatMaps;
+ op::log("Left hand heatmaps size: [" + std::to_string(handHeatMaps[0].getSize(0)) + ", "
+ + std::to_string(handHeatMaps[0].getSize(1)) + ", "
+ + std::to_string(handHeatMaps[0].getSize(2)) + ", "
+ + std::to_string(handHeatMaps[0].getSize(3)) + "]");
+ op::log("Right hand heatmaps size: [" + std::to_string(handHeatMaps[1].getSize(0)) + ", "
+ + std::to_string(handHeatMaps[1].getSize(1)) + ", "
+ + std::to_string(handHeatMaps[1].getSize(2)) + ", "
+ + std::to_string(handHeatMaps[1].getSize(3)) + "]");
+ }
+ }
+ else
+ op::log("Nullptr or empty datumsPtr found.", op::Priority::High, __LINE__, __FUNCTION__, __FILE__);
+ }
+};
+
+int openPoseTutorialWrapper1()
+{
+ try
+ {
+ op::log("Starting OpenPose demo...", op::Priority::High);
+ const auto timerBegin = std::chrono::high_resolution_clock::now();
+
+ // logging_level
+ op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.",
+ __LINE__, __FUNCTION__, __FILE__);
+ op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
+ op::Profiler::setDefaultX(FLAGS_profile_speed);
+
+ // Applying user defined configuration - GFlags to program variables
+ // outputSize
+ const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
+ // netInputSize
+ const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "-1x368");
+ // faceNetInputSize
+ const auto faceNetInputSize = op::flagsToPoint(FLAGS_face_net_resolution, "368x368 (multiples of 16)");
+ // handNetInputSize
+ const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
+ // producerType
+ const auto producerSharedPtr = op::flagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_ip_camera, FLAGS_camera,
+ FLAGS_flir_camera, FLAGS_camera_resolution, FLAGS_camera_fps,
+ FLAGS_camera_parameter_folder, !FLAGS_frame_keep_distortion,
+ (unsigned int) FLAGS_3d_views, FLAGS_flir_camera_index);
+ // poseModel
+ const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
+ // JSON saving
+ if (!FLAGS_write_keypoint.empty())
+ op::log("Flag `write_keypoint` is deprecated and will eventually be removed."
+ " Please, use `write_json` instead.", op::Priority::Max);
+ // keypointScale
+ const auto keypointScale = op::flagsToScaleMode(FLAGS_keypoint_scale);
+ // heatmaps to add
+ const auto heatMapTypes = op::flagsToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg,
+ FLAGS_heatmaps_add_PAFs);
+ const auto heatMapScale = op::flagsToHeatMapScaleMode(FLAGS_heatmaps_scale);
+ // >1 camera view?
+ const auto multipleView = (FLAGS_3d || FLAGS_3d_views > 1 || FLAGS_flir_camera);
+ // Enabling Google Logging
+ const bool enableGoogleLogging = true;
+ // Logging
+ op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
+
+ // Configure OpenPose
+ op::log("Configuring OpenPose wrapper...", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
+ op::Wrapper> opWrapper{op::ThreadManagerMode::AsynchronousOut};
+ // Pose configuration (use WrapperStructPose{} for default and recommended configuration)
+ const op::WrapperStructPose wrapperStructPose{
+ !FLAGS_body_disable, netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
+ FLAGS_scale_number, (float)FLAGS_scale_gap, op::flagsToRenderMode(FLAGS_render_pose, multipleView),
+ poseModel, !FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
+ FLAGS_part_to_show, FLAGS_model_folder, heatMapTypes, heatMapScale, FLAGS_part_candidates,
+ (float)FLAGS_render_threshold, FLAGS_number_people_max, enableGoogleLogging};
+ // Face configuration (use op::WrapperStructFace{} to disable it)
+ const op::WrapperStructFace wrapperStructFace{
+ FLAGS_face, faceNetInputSize, op::flagsToRenderMode(FLAGS_face_render, multipleView, FLAGS_render_pose),
+ (float)FLAGS_face_alpha_pose, (float)FLAGS_face_alpha_heatmap, (float)FLAGS_face_render_threshold};
+ // Hand configuration (use op::WrapperStructHand{} to disable it)
+ const op::WrapperStructHand wrapperStructHand{
+ FLAGS_hand, handNetInputSize, FLAGS_hand_scale_number, (float)FLAGS_hand_scale_range, FLAGS_hand_tracking,
+ op::flagsToRenderMode(FLAGS_hand_render, multipleView, FLAGS_render_pose), (float)FLAGS_hand_alpha_pose,
+ (float)FLAGS_hand_alpha_heatmap, (float)FLAGS_hand_render_threshold};
+ // Extra functionality configuration (use op::WrapperStructExtra{} to disable it)
+ const op::WrapperStructExtra wrapperStructExtra{
+ FLAGS_3d, FLAGS_3d_min_views, FLAGS_identification, FLAGS_tracking, FLAGS_ik_threads};
+ // Producer (use default to disable any input)
+ const op::WrapperStructInput wrapperStructInput{
+ producerSharedPtr, FLAGS_frame_first, FLAGS_frame_last, FLAGS_process_real_time, FLAGS_frame_flip,
+ FLAGS_frame_rotate, FLAGS_frames_repeat};
+ // Consumer (comment or use default argument to disable any output)
+ const auto displayMode = op::DisplayMode::NoDisplay;
+ const bool guiVerbose = false;
+ const bool fullScreen = false;
+ const op::WrapperStructOutput wrapperStructOutput{
+ displayMode, guiVerbose, fullScreen, FLAGS_write_keypoint,
+ op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, FLAGS_write_coco_json,
+ FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, FLAGS_write_video,
+ FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, FLAGS_write_video_adam,
+ FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port};
+ // Configure wrapper
+ opWrapper.configure(wrapperStructPose, wrapperStructFace, wrapperStructHand, wrapperStructExtra,
+ wrapperStructInput, wrapperStructOutput);
+ // Set to single-thread (for sequential processing and/or debugging and/or reducing latency)
+ if (FLAGS_disable_multi_thread)
+ opWrapper.disableMultiThreading();
+
+ op::log("Starting thread(s)...", op::Priority::High);
+ opWrapper.start();
+
+ // User processing
+ UserOutputClass userOutputClass;
+ bool userWantsToExit = false;
+ while (!userWantsToExit)
+ {
+ // Pop frame
+ std::shared_ptr> datumProcessed;
+ if (opWrapper.waitAndPop(datumProcessed))
+ {
+ userWantsToExit = userOutputClass.display(datumProcessed);;
+ userOutputClass.printKeypoints(datumProcessed);
+ }
+ else
+ op::log("Processed datum could not be emplaced.", op::Priority::High, __LINE__, __FUNCTION__, __FILE__);
+ }
+
+ op::log("Stopping thread(s)", op::Priority::High);
+ opWrapper.stop();
+
+ // Measuring total time
+ const auto now = std::chrono::high_resolution_clock::now();
+ const auto totalTimeSec = (double)std::chrono::duration_cast(now-timerBegin).count()
+ * 1e-9;
+ const auto message = "OpenPose demo successfully finished. Total time: "
+ + std::to_string(totalTimeSec) + " seconds.";
+ op::log(message, op::Priority::High);
+
+ // Return successful message
+ return 0;
+ }
+ catch (const std::exception& e)
+ {
+ op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
+ return -1;
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ // Parsing command line flags
+ gflags::ParseCommandLineFlags(&argc, &argv, true);
+
+ // Running openPoseTutorialWrapper1
+ return openPoseTutorialWrapper1();
+}
diff --git a/examples/tutorial_api_cpp/6_synchronous_custom_postprocessing.cpp b/examples/tutorial_api_cpp/6_synchronous_custom_postprocessing.cpp
new file mode 100644
index 000000000..539bd3fc4
--- /dev/null
+++ b/examples/tutorial_api_cpp/6_synchronous_custom_postprocessing.cpp
@@ -0,0 +1,222 @@
+// ------------------------- OpenPose Library Tutorial - Real Time Pose Estimation -------------------------
+// If the user wants to learn to use the OpenPose library, we highly recommend to start with the
+// examples in `examples/tutorial_api_cpp/`.
+// This example summarizes all the functionality of the OpenPose library:
+ // 1. Read folder of images / video / webcam (`producer` module)
+ // 2. Extract and render body keypoint / heatmap / PAF of that image (`pose` module)
+ // 3. Extract and render face keypoint / heatmap / PAF of that image (`face` module)
+ // 4. Save the results on disk (`filestream` module)
+ // 5. Display the rendered pose (`gui` module)
+ // Everything in a multi-thread scenario (`thread` module)
+ // Points 2 to 5 are included in the `wrapper` module
+// In addition to the previous OpenPose modules, we also need to use:
+ // 1. `core` module:
+ // For the Array class that the `pose` module needs
+ // For the Datum struct that the `thread` module sends between the queues
+ // 2. `utilities` module: for the error & logging functions, i.e. op::error & op::log respectively
+// This file should only be used for the user to take specific examples.
+
+// Command-line user intraface
+#include
+// OpenPose dependencies
+#include
+
+// If the user needs his own variables, he can inherit the op::Datum struct and add them
+// UserDatum can be directly used by the OpenPose wrapper because it inherits from op::Datum, just define
+// Wrapper instead of Wrapper
+struct UserDatum : public op::Datum
+{
+ bool boolThatUserNeedsForSomeReason;
+
+ UserDatum(const bool boolThatUserNeedsForSomeReason_ = false) :
+ boolThatUserNeedsForSomeReason{boolThatUserNeedsForSomeReason_}
+ {}
+};
+
+// The W-classes can be implemented either as a template or as simple classes given
+// that the user usually knows which kind of data he will move between the queues,
+// in this case we assume a std::shared_ptr of a std::vector of UserDatum
+
+// This worker will just invert the image
+class WUserPostProcessing : public op::Worker>>
+{
+public:
+ WUserPostProcessing()
+ {
+ // User's constructor here
+ }
+
+ void initializationOnThread() {}
+
+ void work(std::shared_ptr>& datumsPtr)
+ {
+ // User's post-processing (after OpenPose processing & before OpenPose outputs) here
+ // datum.cvOutputData: rendered frame with pose or heatmaps
+ // datum.poseKeypoints: Array with the estimated pose
+ try
+ {
+ if (datumsPtr != nullptr && !datumsPtr->empty())
+ for (auto& datum : *datumsPtr)
+ cv::bitwise_not(datum.cvOutputData, datum.cvOutputData);
+ }
+ catch (const std::exception& e)
+ {
+ this->stop();
+ op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
+ }
+ }
+};
+
+int openPoseDemo()
+{
+ try
+ {
+ op::log("Starting OpenPose demo...", op::Priority::High);
+ const auto timerBegin = std::chrono::high_resolution_clock::now();
+
+ // logging_level
+ op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.",
+ __LINE__, __FUNCTION__, __FILE__);
+ op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
+ op::Profiler::setDefaultX(FLAGS_profile_speed);
+ // // For debugging
+ // // Print all logging messages
+ // op::ConfigureLog::setPriorityThreshold(op::Priority::None);
+ // // Print out speed values faster
+ // op::Profiler::setDefaultX(100);
+
+ // Applying user defined configuration - GFlags to program variables
+ // outputSize
+ const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
+ // netInputSize
+ const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "-1x368");
+ // faceNetInputSize
+ const auto faceNetInputSize = op::flagsToPoint(FLAGS_face_net_resolution, "368x368 (multiples of 16)");
+ // handNetInputSize
+ const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
+ // producerType
+ const auto producerSharedPtr = op::flagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_ip_camera, FLAGS_camera,
+ FLAGS_flir_camera, FLAGS_camera_resolution, FLAGS_camera_fps,
+ FLAGS_camera_parameter_folder, !FLAGS_frame_keep_distortion,
+ (unsigned int) FLAGS_3d_views, FLAGS_flir_camera_index);
+ // poseModel
+ const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
+ // JSON saving
+ if (!FLAGS_write_keypoint.empty())
+ op::log("Flag `write_keypoint` is deprecated and will eventually be removed."
+ " Please, use `write_json` instead.", op::Priority::Max);
+ // keypointScale
+ const auto keypointScale = op::flagsToScaleMode(FLAGS_keypoint_scale);
+ // heatmaps to add
+ const auto heatMapTypes = op::flagsToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg,
+ FLAGS_heatmaps_add_PAFs);
+ const auto heatMapScale = op::flagsToHeatMapScaleMode(FLAGS_heatmaps_scale);
+ // >1 camera view?
+ const auto multipleView = (FLAGS_3d || FLAGS_3d_views > 1 || FLAGS_flir_camera);
+ // Enabling Google Logging
+ const bool enableGoogleLogging = true;
+ // Logging
+ op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
+
+ // OpenPose wrapper
+ op::log("Configuring OpenPose wrapper...", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
+ // op::Wrapper> opWrapper;
+ op::Wrapper> opWrapper;
+
+ // Initializing the user custom classes
+ // Processing
+ auto wUserPostProcessing = std::make_shared();
+ // Add custom processing
+ const auto workerProcessingOnNewThread = true;
+ opWrapper.setWorker(op::WorkerType::PostProcessing, wUserPostProcessing, workerProcessingOnNewThread);
+
+ // Pose configuration (use WrapperStructPose{} for default and recommended configuration)
+ const op::WrapperStructPose wrapperStructPose{
+ !FLAGS_body_disable, netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
+ FLAGS_scale_number, (float)FLAGS_scale_gap, op::flagsToRenderMode(FLAGS_render_pose, multipleView),
+ poseModel, !FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
+ FLAGS_part_to_show, FLAGS_model_folder, heatMapTypes, heatMapScale, FLAGS_part_candidates,
+ (float)FLAGS_render_threshold, FLAGS_number_people_max, enableGoogleLogging};
+ // Face configuration (use op::WrapperStructFace{} to disable it)
+ const op::WrapperStructFace wrapperStructFace{
+ FLAGS_face, faceNetInputSize, op::flagsToRenderMode(FLAGS_face_render, multipleView, FLAGS_render_pose),
+ (float)FLAGS_face_alpha_pose, (float)FLAGS_face_alpha_heatmap, (float)FLAGS_face_render_threshold};
+ // Hand configuration (use op::WrapperStructHand{} to disable it)
+ const op::WrapperStructHand wrapperStructHand{
+ FLAGS_hand, handNetInputSize, FLAGS_hand_scale_number, (float)FLAGS_hand_scale_range, FLAGS_hand_tracking,
+ op::flagsToRenderMode(FLAGS_hand_render, multipleView, FLAGS_render_pose), (float)FLAGS_hand_alpha_pose,
+ (float)FLAGS_hand_alpha_heatmap, (float)FLAGS_hand_render_threshold};
+ // Producer (use default to disable any input)
+ const op::WrapperStructInput wrapperStructInput{
+ producerSharedPtr, FLAGS_frame_first, FLAGS_frame_last, FLAGS_process_real_time, FLAGS_frame_flip,
+ FLAGS_frame_rotate, FLAGS_frames_repeat};
+ // Extra functionality configuration (use op::WrapperStructExtra{} to disable it)
+ const op::WrapperStructExtra wrapperStructExtra{
+ FLAGS_3d, FLAGS_3d_min_views, FLAGS_identification, FLAGS_tracking, FLAGS_ik_threads};
+ // Consumer (comment or use default argument to disable any output)
+ const op::WrapperStructOutput wrapperStructOutput{
+ op::flagsToDisplayMode(FLAGS_display, FLAGS_3d), !FLAGS_no_gui_verbose, FLAGS_fullscreen,
+ FLAGS_write_keypoint, op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json,
+ FLAGS_write_coco_json, FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format,
+ FLAGS_write_video, FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format,
+ FLAGS_write_video_adam, FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port};
+ // Configure wrapper
+ opWrapper.configure(wrapperStructPose, wrapperStructFace, wrapperStructHand, wrapperStructExtra,
+ wrapperStructInput, wrapperStructOutput);
+ // Set to single-thread (for sequential processing and/or debugging and/or reducing latency)
+ if (FLAGS_disable_multi_thread)
+ opWrapper.disableMultiThreading();
+
+ // Start processing
+ // Two different ways of running the program on multithread environment
+ op::log("Starting thread(s)...", op::Priority::High);
+ // Start, run & stop threads - it blocks this thread until all others have finished
+ opWrapper.exec();
+
+ // // Option b) Keeping this thread free in case you want to do something else meanwhile, e.g. profiling the GPU
+ // memory
+ // // VERY IMPORTANT NOTE: if OpenCV is compiled with Qt support, this option will not work. Qt needs the main
+ // // thread to plot visual results, so the final GUI (which uses OpenCV) would return an exception similar to:
+ // // `QMetaMethod::invoke: Unable to invoke methods with return values in queued connections`
+ // // Start threads
+ // opWrapper.start();
+ // // Profile used GPU memory
+ // // 1: wait ~10sec so the memory has been totally loaded on GPU
+ // // 2: profile the GPU memory
+ // const auto sleepTimeMs = 10;
+ // for (auto i = 0 ; i < 10000/sleepTimeMs && opWrapper.isRunning() ; i++)
+ // std::this_thread::sleep_for(std::chrono::milliseconds{sleepTimeMs});
+ // op::Profiler::profileGpuMemory(__LINE__, __FUNCTION__, __FILE__);
+ // // Keep program alive while running threads
+ // while (opWrapper.isRunning())
+ // std::this_thread::sleep_for(std::chrono::milliseconds{sleepTimeMs});
+ // // Stop and join threads
+ // op::log("Stopping thread(s)", op::Priority::High);
+ // opWrapper.stop();
+
+ // Measuring total time
+ const auto now = std::chrono::high_resolution_clock::now();
+ const auto totalTimeSec = (double)std::chrono::duration_cast(now-timerBegin).count()
+ * 1e-9;
+ const auto message = "OpenPose demo successfully finished. Total time: "
+ + std::to_string(totalTimeSec) + " seconds.";
+ op::log(message, op::Priority::High);
+
+ // Return successful message
+ return 0;
+ }
+ catch (const std::exception& e)
+ {
+ op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
+ return -1;
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ // Parsing command line flags
+ gflags::ParseCommandLineFlags(&argc, &argv, true);
+
+ // Running openPoseDemo
+ return openPoseDemo();
+}
diff --git a/examples/tutorial_api_cpp/7_synchronous_custom_input.cpp b/examples/tutorial_api_cpp/7_synchronous_custom_input.cpp
new file mode 100644
index 000000000..e35d729bf
--- /dev/null
+++ b/examples/tutorial_api_cpp/7_synchronous_custom_input.cpp
@@ -0,0 +1,266 @@
+// ------------------------- OpenPose Library Tutorial - Real Time Pose Estimation -------------------------
+// If the user wants to learn to use the OpenPose library, we highly recommend to start with the
+// examples in `examples/tutorial_api_cpp/`.
+// This example summarizes all the functionality of the OpenPose library:
+ // 1. Read folder of images / video / webcam (`producer` module)
+ // 2. Extract and render body keypoint / heatmap / PAF of that image (`pose` module)
+ // 3. Extract and render face keypoint / heatmap / PAF of that image (`face` module)
+ // 4. Save the results on disk (`filestream` module)
+ // 5. Display the rendered pose (`gui` module)
+ // Everything in a multi-thread scenario (`thread` module)
+ // Points 2 to 5 are included in the `wrapper` module
+// In addition to the previous OpenPose modules, we also need to use:
+ // 1. `core` module:
+ // For the Array class that the `pose` module needs
+ // For the Datum struct that the `thread` module sends between the queues
+ // 2. `utilities` module: for the error & logging functions, i.e. op::error & op::log respectively
+// This file should only be used for the user to take specific examples.
+
+// Command-line user intraface
+#define OPENPOSE_FLAGS_DISABLE_PRODUCER
+#include
+// OpenPose dependencies
+#include
+
+// Custom OpenPose flags
+// Producer
+DEFINE_string(image_dir, "examples/media/",
+ "Process a directory of images. Read all standard formats (jpg, png, bmp, etc.).");
+
+// If the user needs his own variables, he can inherit the op::Datum struct and add them
+// UserDatum can be directly used by the OpenPose wrapper because it inherits from op::Datum, just define
+// Wrapper instead of Wrapper
+struct UserDatum : public op::Datum
+{
+ bool boolThatUserNeedsForSomeReason;
+
+ UserDatum(const bool boolThatUserNeedsForSomeReason_ = false) :
+ boolThatUserNeedsForSomeReason{boolThatUserNeedsForSomeReason_}
+ {}
+};
+
+// The W-classes can be implemented either as a template or as simple classes given
+// that the user usually knows which kind of data he will move between the queues,
+// in this case we assume a std::shared_ptr of a std::vector of UserDatum
+
+// This worker will just read and return all the jpg files in a directory
+class WUserInput : public op::WorkerProducer>>
+{
+public:
+ WUserInput(const std::string& directoryPath) :
+ mImageFiles{op::getFilesOnDirectory(directoryPath, "jpg")},
+ // If we want "jpg" + "png" images
+ // mImageFiles{op::getFilesOnDirectory(directoryPath, std::vector{"jpg", "png"})},
+ mCounter{0}
+ {
+ if (mImageFiles.empty())
+ op::error("No images found on: " + directoryPath, __LINE__, __FUNCTION__, __FILE__);
+ }
+
+ void initializationOnThread() {}
+
+ std::shared_ptr> workProducer()
+ {
+ try
+ {
+ // Close program when empty frame
+ if (mImageFiles.size() <= mCounter)
+ {
+ op::log("Last frame read and added to queue. Closing program after it is processed.",
+ op::Priority::High);
+ // This funtion stops this worker, which will eventually stop the whole thread system once all the
+ // frames have been processed
+ this->stop();
+ return nullptr;
+ }
+ else
+ {
+ // Create new datum
+ auto datumsPtr = std::make_shared>();
+ datumsPtr->emplace_back();
+ auto& datum = datumsPtr->at(0);
+
+ // Fill datum
+ datum.cvInputData = cv::imread(mImageFiles.at(mCounter++));
+
+ // If empty frame -> return nullptr
+ if (datum.cvInputData.empty())
+ {
+ op::log("Empty frame detected on path: " + mImageFiles.at(mCounter-1) + ". Closing program.",
+ op::Priority::High);
+ this->stop();
+ datumsPtr = nullptr;
+ }
+
+ return datumsPtr;
+ }
+ }
+ catch (const std::exception& e)
+ {
+ this->stop();
+ op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
+ return nullptr;
+ }
+ }
+
+private:
+ const std::vector mImageFiles;
+ unsigned long long mCounter;
+};
+
+int openPoseDemo()
+{
+ try
+ {
+ op::log("Starting OpenPose demo...", op::Priority::High);
+ const auto timerBegin = std::chrono::high_resolution_clock::now();
+
+ // logging_level
+ op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.",
+ __LINE__, __FUNCTION__, __FILE__);
+ op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
+ op::Profiler::setDefaultX(FLAGS_profile_speed);
+ // // For debugging
+ // // Print all logging messages
+ // op::ConfigureLog::setPriorityThreshold(op::Priority::None);
+ // // Print out speed values faster
+ // op::Profiler::setDefaultX(100);
+
+ // Applying user defined configuration - GFlags to program variables
+ // outputSize
+ const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
+ // netInputSize
+ const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "-1x368");
+ // faceNetInputSize
+ const auto faceNetInputSize = op::flagsToPoint(FLAGS_face_net_resolution, "368x368 (multiples of 16)");
+ // handNetInputSize
+ const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
+ // // producerType
+ // const auto producerSharedPtr = op::flagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_ip_camera, FLAGS_camera,
+ // FLAGS_flir_camera, FLAGS_camera_resolution, FLAGS_camera_fps,
+ // FLAGS_camera_parameter_folder, !FLAGS_frame_keep_distortion,
+ // (unsigned int) FLAGS_3d_views, FLAGS_flir_camera_index);
+ // poseModel
+ const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
+ // JSON saving
+ if (!FLAGS_write_keypoint.empty())
+ op::log("Flag `write_keypoint` is deprecated and will eventually be removed."
+ " Please, use `write_json` instead.", op::Priority::Max);
+ // keypointScale
+ const auto keypointScale = op::flagsToScaleMode(FLAGS_keypoint_scale);
+ // heatmaps to add
+ const auto heatMapTypes = op::flagsToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg,
+ FLAGS_heatmaps_add_PAFs);
+ const auto heatMapScale = op::flagsToHeatMapScaleMode(FLAGS_heatmaps_scale);
+ // >1 camera view?
+ // const auto multipleView = (FLAGS_3d || FLAGS_3d_views > 1 || FLAGS_flir_camera);
+ const auto multipleView = false;
+ // Enabling Google Logging
+ const bool enableGoogleLogging = true;
+ // Logging
+ op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
+
+ // OpenPose wrapper
+ op::log("Configuring OpenPose wrapper...", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
+ // op::Wrapper> opWrapper;
+ op::Wrapper> opWrapper;
+
+ // Initializing the user custom classes
+ // Frames producer (e.g. video, webcam, ...)
+ auto wUserInput = std::make_shared(FLAGS_image_dir);
+ // Add custom processing
+ const auto workerInputOnNewThread = true;
+ opWrapper.setWorker(op::WorkerType::Input, wUserInput, workerInputOnNewThread);
+
+ // Pose configuration (use WrapperStructPose{} for default and recommended configuration)
+ const op::WrapperStructPose wrapperStructPose{
+ !FLAGS_body_disable, netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
+ FLAGS_scale_number, (float)FLAGS_scale_gap, op::flagsToRenderMode(FLAGS_render_pose, multipleView),
+ poseModel, !FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
+ FLAGS_part_to_show, FLAGS_model_folder, heatMapTypes, heatMapScale, FLAGS_part_candidates,
+ (float)FLAGS_render_threshold, FLAGS_number_people_max, enableGoogleLogging};
+ // Face configuration (use op::WrapperStructFace{} to disable it)
+ const op::WrapperStructFace wrapperStructFace{
+ FLAGS_face, faceNetInputSize, op::flagsToRenderMode(FLAGS_face_render, multipleView, FLAGS_render_pose),
+ (float)FLAGS_face_alpha_pose, (float)FLAGS_face_alpha_heatmap, (float)FLAGS_face_render_threshold};
+ // Hand configuration (use op::WrapperStructHand{} to disable it)
+ const op::WrapperStructHand wrapperStructHand{
+ FLAGS_hand, handNetInputSize, FLAGS_hand_scale_number, (float)FLAGS_hand_scale_range, FLAGS_hand_tracking,
+ op::flagsToRenderMode(FLAGS_hand_render, multipleView, FLAGS_render_pose), (float)FLAGS_hand_alpha_pose,
+ (float)FLAGS_hand_alpha_heatmap, (float)FLAGS_hand_render_threshold};
+ // Extra functionality configuration (use op::WrapperStructExtra{} to disable it)
+ const op::WrapperStructExtra wrapperStructExtra{
+ FLAGS_3d, FLAGS_3d_min_views, FLAGS_identification, FLAGS_tracking, FLAGS_ik_threads};
+ // Producer (use default to disable any input)
+ // const op::WrapperStructInput wrapperStructInput{producerSharedPtr, FLAGS_frame_first, FLAGS_frame_last,
+ // FLAGS_process_real_time, FLAGS_frame_flip, FLAGS_frame_rotate,
+ // FLAGS_frames_repeat};
+ const op::WrapperStructInput wrapperStructInput;
+ // Consumer (comment or use default argument to disable any output)
+ const op::WrapperStructOutput wrapperStructOutput{
+ op::flagsToDisplayMode(FLAGS_display, FLAGS_3d), !FLAGS_no_gui_verbose, FLAGS_fullscreen,
+ FLAGS_write_keypoint, op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json,
+ FLAGS_write_coco_json, FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format,
+ FLAGS_write_video, FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format,
+ FLAGS_write_video_adam, FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port};
+ // Configure wrapper
+
+ opWrapper.configure(wrapperStructPose, wrapperStructFace, wrapperStructHand, wrapperStructExtra,
+ wrapperStructInput, wrapperStructOutput);
+ // Set to single-thread (for sequential processing and/or debugging and/or reducing latency)
+ if (FLAGS_disable_multi_thread)
+ opWrapper.disableMultiThreading();
+
+ // Start processing
+ // Two different ways of running the program on multithread environment
+ op::log("Starting thread(s)...", op::Priority::High);
+ // Start, run & stop threads - it blocks this thread until all others have finished
+ opWrapper.exec();
+
+ // // Option b) Keeping this thread free in case you want to do something else meanwhile, e.g. profiling the GPU
+ // memory
+ // // VERY IMPORTANT NOTE: if OpenCV is compiled with Qt support, this option will not work. Qt needs the main
+ // // thread to plot visual results, so the final GUI (which uses OpenCV) would return an exception similar to:
+ // // `QMetaMethod::invoke: Unable to invoke methods with return values in queued connections`
+ // // Start threads
+ // opWrapper.start();
+ // // Profile used GPU memory
+ // // 1: wait ~10sec so the memory has been totally loaded on GPU
+ // // 2: profile the GPU memory
+ // const auto sleepTimeMs = 10;
+ // for (auto i = 0 ; i < 10000/sleepTimeMs && opWrapper.isRunning() ; i++)
+ // std::this_thread::sleep_for(std::chrono::milliseconds{sleepTimeMs});
+ // op::Profiler::profileGpuMemory(__LINE__, __FUNCTION__, __FILE__);
+ // // Keep program alive while running threads
+ // while (opWrapper.isRunning())
+ // std::this_thread::sleep_for(std::chrono::milliseconds{sleepTimeMs});
+ // // Stop and join threads
+ // op::log("Stopping thread(s)", op::Priority::High);
+ // opWrapper.stop();
+
+ // Measuring total time
+ const auto now = std::chrono::high_resolution_clock::now();
+ const auto totalTimeSec = (double)std::chrono::duration_cast(now-timerBegin).count()
+ * 1e-9;
+ const auto message = "OpenPose demo successfully finished. Total time: "
+ + std::to_string(totalTimeSec) + " seconds.";
+ op::log(message, op::Priority::High);
+
+ // Return successful message
+ return 0;
+ }
+ catch (const std::exception& e)
+ {
+ op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
+ return -1;
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ // Parsing command line flags
+ gflags::ParseCommandLineFlags(&argc, &argv, true);
+
+ // Running openPoseDemo
+ return openPoseDemo();
+}
diff --git a/examples/tutorial_api_cpp/8_synchronous_custom_output.cpp b/examples/tutorial_api_cpp/8_synchronous_custom_output.cpp
new file mode 100644
index 000000000..ade60a016
--- /dev/null
+++ b/examples/tutorial_api_cpp/8_synchronous_custom_output.cpp
@@ -0,0 +1,274 @@
+// ------------------------- OpenPose Library Tutorial - Real Time Pose Estimation -------------------------
+// If the user wants to learn to use the OpenPose library, we highly recommend to start with the
+// examples in `examples/tutorial_api_cpp/`.
+// This example summarizes all the functionality of the OpenPose library:
+ // 1. Read folder of images / video / webcam (`producer` module)
+ // 2. Extract and render body keypoint / heatmap / PAF of that image (`pose` module)
+ // 3. Extract and render face keypoint / heatmap / PAF of that image (`face` module)
+ // 4. Save the results on disk (`filestream` module)
+ // 5. Display the rendered pose (`gui` module)
+ // Everything in a multi-thread scenario (`thread` module)
+ // Points 2 to 5 are included in the `wrapper` module
+// In addition to the previous OpenPose modules, we also need to use:
+ // 1. `core` module:
+ // For the Array class that the `pose` module needs
+ // For the Datum struct that the `thread` module sends between the queues
+ // 2. `utilities` module: for the error & logging functions, i.e. op::error & op::log respectively
+// This file should only be used for the user to take specific examples.
+
+// Command-line user intraface
+#include
+// OpenPose dependencies
+#include
+
+// If the user needs his own variables, he can inherit the op::Datum struct and add them
+// UserDatum can be directly used by the OpenPose wrapper because it inherits from op::Datum, just define
+// Wrapper instead of Wrapper
+struct UserDatum : public op::Datum
+{
+ bool boolThatUserNeedsForSomeReason;
+
+ UserDatum(const bool boolThatUserNeedsForSomeReason_ = false) :
+ boolThatUserNeedsForSomeReason{boolThatUserNeedsForSomeReason_}
+ {}
+};
+
+// The W-classes can be implemented either as a template or as simple classes given
+// that the user usually knows which kind of data he will move between the queues,
+// in this case we assume a std::shared_ptr of a std::vector of UserDatum
+
+// This worker will just read and return all the jpg files in a directory
+class WUserOutput : public op::WorkerConsumer>>
+{
+public:
+ void initializationOnThread() {}
+
+ void workConsumer(const std::shared_ptr>& datumsPtr)
+ {
+ try
+ {
+ // User's displaying/saving/other processing here
+ // datum.cvOutputData: rendered frame with pose or heatmaps
+ // datum.poseKeypoints: Array with the estimated pose
+ if (datumsPtr != nullptr && !datumsPtr->empty())
+ {
+ // Show in command line the resulting pose keypoints for body, face and hands
+ op::log("\nKeypoints:");
+ // Accesing each element of the keypoints
+ const auto& poseKeypoints = datumsPtr->at(0).poseKeypoints;
+ op::log("Person pose keypoints:");
+ for (auto person = 0 ; person < poseKeypoints.getSize(0) ; person++)
+ {
+ op::log("Person " + std::to_string(person) + " (x, y, score):");
+ for (auto bodyPart = 0 ; bodyPart < poseKeypoints.getSize(1) ; bodyPart++)
+ {
+ std::string valueToPrint;
+ for (auto xyscore = 0 ; xyscore < poseKeypoints.getSize(2) ; xyscore++)
+ {
+ valueToPrint += std::to_string( poseKeypoints[{person, bodyPart, xyscore}] ) + " ";
+ }
+ op::log(valueToPrint);
+ }
+ }
+ op::log(" ");
+ // Alternative: just getting std::string equivalent
+ op::log("Face keypoints: " + datumsPtr->at(0).faceKeypoints.toString());
+ op::log("Left hand keypoints: " + datumsPtr->at(0).handKeypoints[0].toString());
+ op::log("Right hand keypoints: " + datumsPtr->at(0).handKeypoints[1].toString());
+ // Heatmaps
+ const auto& poseHeatMaps = datumsPtr->at(0).poseHeatMaps;
+ if (!poseHeatMaps.empty())
+ {
+ op::log("Pose heatmaps size: [" + std::to_string(poseHeatMaps.getSize(0)) + ", "
+ + std::to_string(poseHeatMaps.getSize(1)) + ", "
+ + std::to_string(poseHeatMaps.getSize(2)) + "]");
+ const auto& faceHeatMaps = datumsPtr->at(0).faceHeatMaps;
+ op::log("Face heatmaps size: [" + std::to_string(faceHeatMaps.getSize(0)) + ", "
+ + std::to_string(faceHeatMaps.getSize(1)) + ", "
+ + std::to_string(faceHeatMaps.getSize(2)) + ", "
+ + std::to_string(faceHeatMaps.getSize(3)) + "]");
+ const auto& handHeatMaps = datumsPtr->at(0).handHeatMaps;
+ op::log("Left hand heatmaps size: [" + std::to_string(handHeatMaps[0].getSize(0)) + ", "
+ + std::to_string(handHeatMaps[0].getSize(1)) + ", "
+ + std::to_string(handHeatMaps[0].getSize(2)) + ", "
+ + std::to_string(handHeatMaps[0].getSize(3)) + "]");
+ op::log("Right hand heatmaps size: [" + std::to_string(handHeatMaps[1].getSize(0)) + ", "
+ + std::to_string(handHeatMaps[1].getSize(1)) + ", "
+ + std::to_string(handHeatMaps[1].getSize(2)) + ", "
+ + std::to_string(handHeatMaps[1].getSize(3)) + "]");
+ }
+
+ // Display rendered output image
+ cv::imshow("User worker GUI", datumsPtr->at(0).cvOutputData);
+ // Display image and sleeps at least 1 ms (it usually sleeps ~5-10 msec to display the image)
+ const char key = (char)cv::waitKey(1);
+ if (key == 27)
+ this->stop();
+ }
+ }
+ catch (const std::exception& e)
+ {
+ this->stop();
+ op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
+ }
+ }
+};
+
+int openPoseDemo()
+{
+ try
+ {
+ op::log("Starting OpenPose demo...", op::Priority::High);
+ const auto timerBegin = std::chrono::high_resolution_clock::now();
+
+ // logging_level
+ op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.",
+ __LINE__, __FUNCTION__, __FILE__);
+ op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
+ op::Profiler::setDefaultX(FLAGS_profile_speed);
+ // // For debugging
+ // // Print all logging messages
+ // op::ConfigureLog::setPriorityThreshold(op::Priority::None);
+ // // Print out speed values faster
+ // op::Profiler::setDefaultX(100);
+
+ // Applying user defined configuration - GFlags to program variables
+ // outputSize
+ const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
+ // netInputSize
+ const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "-1x368");
+ // faceNetInputSize
+ const auto faceNetInputSize = op::flagsToPoint(FLAGS_face_net_resolution, "368x368 (multiples of 16)");
+ // handNetInputSize
+ const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
+ // producerType
+ const auto producerSharedPtr = op::flagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_ip_camera, FLAGS_camera,
+ FLAGS_flir_camera, FLAGS_camera_resolution, FLAGS_camera_fps,
+ FLAGS_camera_parameter_folder, !FLAGS_frame_keep_distortion,
+ (unsigned int) FLAGS_3d_views, FLAGS_flir_camera_index);
+ // poseModel
+ const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
+ // JSON saving
+ if (!FLAGS_write_keypoint.empty())
+ op::log("Flag `write_keypoint` is deprecated and will eventually be removed."
+ " Please, use `write_json` instead.", op::Priority::Max);
+ // keypointScale
+ const auto keypointScale = op::flagsToScaleMode(FLAGS_keypoint_scale);
+ // heatmaps to add
+ const auto heatMapTypes = op::flagsToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg,
+ FLAGS_heatmaps_add_PAFs);
+ const auto heatMapScale = op::flagsToHeatMapScaleMode(FLAGS_heatmaps_scale);
+ // >1 camera view?
+ const auto multipleView = (FLAGS_3d || FLAGS_3d_views > 1 || FLAGS_flir_camera);
+ // Enabling Google Logging
+ const bool enableGoogleLogging = true;
+ // Logging
+ op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
+
+ // OpenPose wrapper
+ op::log("Configuring OpenPose wrapper...", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
+ // op::Wrapper> opWrapper;
+ op::Wrapper> opWrapper;
+
+ // Initializing the user custom classes
+ // GUI (Display)
+ auto wUserOutput = std::make_shared();
+ // Add custom processing
+ const auto workerOutputOnNewThread = true;
+ opWrapper.setWorker(op::WorkerType::Output, wUserOutput, workerOutputOnNewThread);
+
+ // Pose configuration (use WrapperStructPose{} for default and recommended configuration)
+ const op::WrapperStructPose wrapperStructPose{
+ !FLAGS_body_disable, netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
+ FLAGS_scale_number, (float)FLAGS_scale_gap, op::flagsToRenderMode(FLAGS_render_pose, multipleView),
+ poseModel, !FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
+ FLAGS_part_to_show, FLAGS_model_folder, heatMapTypes, heatMapScale, FLAGS_part_candidates,
+ (float)FLAGS_render_threshold, FLAGS_number_people_max, enableGoogleLogging};
+ // Face configuration (use op::WrapperStructFace{} to disable it)
+ const op::WrapperStructFace wrapperStructFace{
+ FLAGS_face, faceNetInputSize, op::flagsToRenderMode(FLAGS_face_render, multipleView, FLAGS_render_pose),
+ (float)FLAGS_face_alpha_pose, (float)FLAGS_face_alpha_heatmap, (float)FLAGS_face_render_threshold};
+ // Hand configuration (use op::WrapperStructHand{} to disable it)
+ const op::WrapperStructHand wrapperStructHand{
+ FLAGS_hand, handNetInputSize, FLAGS_hand_scale_number, (float)FLAGS_hand_scale_range, FLAGS_hand_tracking,
+ op::flagsToRenderMode(FLAGS_hand_render, multipleView, FLAGS_render_pose), (float)FLAGS_hand_alpha_pose,
+ (float)FLAGS_hand_alpha_heatmap, (float)FLAGS_hand_render_threshold};
+ // Extra functionality configuration (use op::WrapperStructExtra{} to disable it)
+ const op::WrapperStructExtra wrapperStructExtra{
+ FLAGS_3d, FLAGS_3d_min_views, FLAGS_identification, FLAGS_tracking, FLAGS_ik_threads};
+ // Producer (use default to disable any input)
+ const op::WrapperStructInput wrapperStructInput{
+ producerSharedPtr, FLAGS_frame_first, FLAGS_frame_last, FLAGS_process_real_time, FLAGS_frame_flip,
+ FLAGS_frame_rotate, FLAGS_frames_repeat};
+ // Consumer (comment or use default argument to disable any output)
+ // const op::WrapperStructOutput wrapperStructOutput{op::flagsToDisplayMode(FLAGS_display, FLAGS_3d),
+ // !FLAGS_no_gui_verbose, FLAGS_fullscreen, FLAGS_write_keypoint,
+ const auto displayMode = op::DisplayMode::NoDisplay;
+ const bool guiVerbose = false;
+ const bool fullScreen = false;
+ const op::WrapperStructOutput wrapperStructOutput{
+ displayMode, guiVerbose, fullScreen, FLAGS_write_keypoint,
+ op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, FLAGS_write_coco_json,
+ FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, FLAGS_write_video,
+ FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, FLAGS_write_video_adam,
+ FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port};
+ // Configure wrapper
+ opWrapper.configure(wrapperStructPose, wrapperStructFace, wrapperStructHand, wrapperStructExtra,
+ wrapperStructInput, wrapperStructOutput);
+ // Set to single-thread (for sequential processing and/or debugging and/or reducing latency)
+ if (FLAGS_disable_multi_thread)
+ opWrapper.disableMultiThreading();
+
+ // Start processing
+ // Two different ways of running the program on multithread environment
+ op::log("Starting thread(s)...", op::Priority::High);
+ // Start, run & stop threads - it blocks this thread until all others have finished
+ opWrapper.exec();
+
+ // // Option b) Keeping this thread free in case you want to do something else meanwhile, e.g. profiling the GPU
+ // memory
+ // // VERY IMPORTANT NOTE: if OpenCV is compiled with Qt support, this option will not work. Qt needs the main
+ // // thread to plot visual results, so the final GUI (which uses OpenCV) would return an exception similar to:
+ // // `QMetaMethod::invoke: Unable to invoke methods with return values in queued connections`
+ // // Start threads
+ // opWrapper.start();
+ // // Profile used GPU memory
+ // // 1: wait ~10sec so the memory has been totally loaded on GPU
+ // // 2: profile the GPU memory
+ // const auto sleepTimeMs = 10;
+ // for (auto i = 0 ; i < 10000/sleepTimeMs && opWrapper.isRunning() ; i++)
+ // std::this_thread::sleep_for(std::chrono::milliseconds{sleepTimeMs});
+ // op::Profiler::profileGpuMemory(__LINE__, __FUNCTION__, __FILE__);
+ // // Keep program alive while running threads
+ // while (opWrapper.isRunning())
+ // std::this_thread::sleep_for(std::chrono::milliseconds{sleepTimeMs});
+ // // Stop and join threads
+ // op::log("Stopping thread(s)", op::Priority::High);
+ // opWrapper.stop();
+
+ // Measuring total time
+ const auto now = std::chrono::high_resolution_clock::now();
+ const auto totalTimeSec = (double)std::chrono::duration_cast(now-timerBegin).count()
+ * 1e-9;
+ const auto message = "OpenPose demo successfully finished. Total time: "
+ + std::to_string(totalTimeSec) + " seconds.";
+ op::log(message, op::Priority::High);
+
+ // Return successful message
+ return 0;
+ }
+ catch (const std::exception& e)
+ {
+ op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
+ return -1;
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ // Parsing command line flags
+ gflags::ParseCommandLineFlags(&argc, &argv, true);
+
+ // Running openPoseDemo
+ return openPoseDemo();
+}
diff --git a/examples/tutorial_api_cpp/9_synchronous_custom_all.cpp b/examples/tutorial_api_cpp/9_synchronous_custom_all.cpp
new file mode 100644
index 000000000..682176cfa
--- /dev/null
+++ b/examples/tutorial_api_cpp/9_synchronous_custom_all.cpp
@@ -0,0 +1,362 @@
+// ------------------------- OpenPose Library Tutorial - Wrapper - Example 2 - Synchronous -------------------------
+// Synchronous mode: ideal for performance. The user can add his own frames producer / post-processor / consumer to the OpenPose wrapper or use the
+// default ones.
+
+// This example shows the user how to use the OpenPose wrapper class:
+ // 1. User reads images
+ // 2. Extract and render keypoint / heatmap / PAF of that image
+ // 3. Save the results on disk
+ // 4. User displays the rendered pose
+ // Everything in a multi-thread scenario
+// In addition to the previous OpenPose modules, we also need to use:
+ // 1. `core` module:
+ // For the Array class that the `pose` module needs
+ // For the Datum struct that the `thread` module sends between the queues
+ // 2. `utilities` module: for the error & logging functions, i.e. op::error & op::log respectively
+// This file should only be used for the user to take specific examples.
+
+// Command-line user intraface
+#define OPENPOSE_FLAGS_DISABLE_PRODUCER
+#define OPENPOSE_FLAGS_DISABLE_DISPLAY
+#include
+// OpenPose dependencies
+#include
+
+// Custom OpenPose flags
+// Producer
+DEFINE_string(image_dir, "examples/media/",
+ "Process a directory of images. Read all standard formats (jpg, png, bmp, etc.).");
+
+// If the user needs his own variables, he can inherit the op::Datum struct and add them
+// UserDatum can be directly used by the OpenPose wrapper because it inherits from op::Datum, just define
+// Wrapper instead of Wrapper
+struct UserDatum : public op::Datum
+{
+ bool boolThatUserNeedsForSomeReason;
+
+ UserDatum(const bool boolThatUserNeedsForSomeReason_ = false) :
+ boolThatUserNeedsForSomeReason{boolThatUserNeedsForSomeReason_}
+ {}
+};
+
+// The W-classes can be implemented either as a template or as simple classes given
+// that the user usually knows which kind of data he will move between the queues,
+// in this case we assume a std::shared_ptr of a std::vector of UserDatum
+
+// This worker will just read and return all the jpg files in a directory
+class WUserInput : public op::WorkerProducer>>
+{
+public:
+ WUserInput(const std::string& directoryPath) :
+ mImageFiles{op::getFilesOnDirectory(directoryPath, "jpg")},
+ // If we want "jpg" + "png" images
+ // mImageFiles{op::getFilesOnDirectory(directoryPath, std::vector{"jpg", "png"})},
+ mCounter{0}
+ {
+ if (mImageFiles.empty())
+ op::error("No images found on: " + directoryPath, __LINE__, __FUNCTION__, __FILE__);
+ }
+
+ void initializationOnThread() {}
+
+ std::shared_ptr> workProducer()
+ {
+ try
+ {
+ // Close program when empty frame
+ if (mImageFiles.size() <= mCounter)
+ {
+ op::log("Last frame read and added to queue. Closing program after it is processed.",
+ op::Priority::High);
+ // This funtion stops this worker, which will eventually stop the whole thread system once all the
+ // frames have been processed
+ this->stop();
+ return nullptr;
+ }
+ else
+ {
+ // Create new datum
+ auto datumsPtr = std::make_shared>();
+ datumsPtr->emplace_back();
+ auto& datum = datumsPtr->at(0);
+
+ // Fill datum
+ datum.cvInputData = cv::imread(mImageFiles.at(mCounter++));
+
+ // If empty frame -> return nullptr
+ if (datum.cvInputData.empty())
+ {
+ op::log("Empty frame detected on path: " + mImageFiles.at(mCounter-1) + ". Closing program.",
+ op::Priority::High);
+ this->stop();
+ datumsPtr = nullptr;
+ }
+
+ return datumsPtr;
+ }
+ }
+ catch (const std::exception& e)
+ {
+ this->stop();
+ op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
+ return nullptr;
+ }
+ }
+
+private:
+ const std::vector mImageFiles;
+ unsigned long long mCounter;
+};
+
+// This worker will just invert the image
+class WUserPostProcessing : public op::Worker>>
+{
+public:
+ WUserPostProcessing()
+ {
+ // User's constructor here
+ }
+
+ void initializationOnThread() {}
+
+ void work(std::shared_ptr>& datumsPtr)
+ {
+ // User's post-processing (after OpenPose processing & before OpenPose outputs) here
+ // datum.cvOutputData: rendered frame with pose or heatmaps
+ // datum.poseKeypoints: Array with the estimated pose
+ try
+ {
+ if (datumsPtr != nullptr && !datumsPtr->empty())
+ for (auto& datum : *datumsPtr)
+ cv::bitwise_not(datum.cvOutputData, datum.cvOutputData);
+ }
+ catch (const std::exception& e)
+ {
+ this->stop();
+ op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
+ }
+ }
+};
+
+// This worker will just read and return all the jpg files in a directory
+class WUserOutput : public op::WorkerConsumer>>
+{
+public:
+ void initializationOnThread() {}
+
+ void workConsumer(const std::shared_ptr>& datumsPtr)
+ {
+ try
+ {
+ // User's displaying/saving/other processing here
+ // datum.cvOutputData: rendered frame with pose or heatmaps
+ // datum.poseKeypoints: Array with the estimated pose
+ if (datumsPtr != nullptr && !datumsPtr->empty())
+ {
+ // Show in command line the resulting pose keypoints for body, face and hands
+ op::log("\nKeypoints:");
+ // Accesing each element of the keypoints
+ const auto& poseKeypoints = datumsPtr->at(0).poseKeypoints;
+ op::log("Person pose keypoints:");
+ for (auto person = 0 ; person < poseKeypoints.getSize(0) ; person++)
+ {
+ op::log("Person " + std::to_string(person) + " (x, y, score):");
+ for (auto bodyPart = 0 ; bodyPart < poseKeypoints.getSize(1) ; bodyPart++)
+ {
+ std::string valueToPrint;
+ for (auto xyscore = 0 ; xyscore < poseKeypoints.getSize(2) ; xyscore++)
+ {
+ valueToPrint += std::to_string( poseKeypoints[{person, bodyPart, xyscore}] ) + " ";
+ }
+ op::log(valueToPrint);
+ }
+ }
+ op::log(" ");
+ // Alternative: just getting std::string equivalent
+ op::log("Face keypoints: " + datumsPtr->at(0).faceKeypoints.toString());
+ op::log("Left hand keypoints: " + datumsPtr->at(0).handKeypoints[0].toString());
+ op::log("Right hand keypoints: " + datumsPtr->at(0).handKeypoints[1].toString());
+ // Heatmaps
+ const auto& poseHeatMaps = datumsPtr->at(0).poseHeatMaps;
+ if (!poseHeatMaps.empty())
+ {
+ op::log("Pose heatmaps size: [" + std::to_string(poseHeatMaps.getSize(0)) + ", "
+ + std::to_string(poseHeatMaps.getSize(1)) + ", "
+ + std::to_string(poseHeatMaps.getSize(2)) + "]");
+ const auto& faceHeatMaps = datumsPtr->at(0).faceHeatMaps;
+ op::log("Face heatmaps size: [" + std::to_string(faceHeatMaps.getSize(0)) + ", "
+ + std::to_string(faceHeatMaps.getSize(1)) + ", "
+ + std::to_string(faceHeatMaps.getSize(2)) + ", "
+ + std::to_string(faceHeatMaps.getSize(3)) + "]");
+ const auto& handHeatMaps = datumsPtr->at(0).handHeatMaps;
+ op::log("Left hand heatmaps size: [" + std::to_string(handHeatMaps[0].getSize(0)) + ", "
+ + std::to_string(handHeatMaps[0].getSize(1)) + ", "
+ + std::to_string(handHeatMaps[0].getSize(2)) + ", "
+ + std::to_string(handHeatMaps[0].getSize(3)) + "]");
+ op::log("Right hand heatmaps size: [" + std::to_string(handHeatMaps[1].getSize(0)) + ", "
+ + std::to_string(handHeatMaps[1].getSize(1)) + ", "
+ + std::to_string(handHeatMaps[1].getSize(2)) + ", "
+ + std::to_string(handHeatMaps[1].getSize(3)) + "]");
+ }
+
+ // Display rendered output image
+ cv::imshow("User worker GUI", datumsPtr->at(0).cvOutputData);
+ // Display image and sleeps at least 1 ms (it usually sleeps ~5-10 msec to display the image)
+ const char key = (char)cv::waitKey(1);
+ if (key == 27)
+ this->stop();
+ }
+ }
+ catch (const std::exception& e)
+ {
+ this->stop();
+ op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
+ }
+ }
+};
+
+int openPoseTutorialWrapper2()
+{
+ try
+ {
+ op::log("Starting OpenPose demo...", op::Priority::High);
+ const auto timerBegin = std::chrono::high_resolution_clock::now();
+
+ // logging_level
+ op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.",
+ __LINE__, __FUNCTION__, __FILE__);
+ op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
+ op::Profiler::setDefaultX(FLAGS_profile_speed);
+
+ // Applying user defined configuration - GFlags to program variables
+ // outputSize
+ const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
+ // netInputSize
+ const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "-1x368");
+ // faceNetInputSize
+ const auto faceNetInputSize = op::flagsToPoint(FLAGS_face_net_resolution, "368x368 (multiples of 16)");
+ // handNetInputSize
+ const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
+ // poseModel
+ const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
+ // JSON saving
+ if (!FLAGS_write_keypoint.empty())
+ op::log("Flag `write_keypoint` is deprecated and will eventually be removed."
+ " Please, use `write_json` instead.", op::Priority::Max);
+ // keypointScale
+ const auto keypointScale = op::flagsToScaleMode(FLAGS_keypoint_scale);
+ // heatmaps to add
+ const auto heatMapTypes = op::flagsToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg,
+ FLAGS_heatmaps_add_PAFs);
+ const auto heatMapScale = op::flagsToHeatMapScaleMode(FLAGS_heatmaps_scale);
+ // >1 camera view?
+ const auto multipleView = (FLAGS_3d || FLAGS_3d_views > 1);
+ // Enabling Google Logging
+ const bool enableGoogleLogging = true;
+ // Logging
+ op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
+
+ // Initializing the user custom classes
+ // Frames producer (e.g. video, webcam, ...)
+ auto wUserInput = std::make_shared(FLAGS_image_dir);
+ // Processing
+ auto wUserPostProcessing = std::make_shared();
+ // GUI (Display)
+ auto wUserOutput = std::make_shared();
+
+ op::Wrapper> opWrapper;
+ // Add custom input
+ const auto workerInputOnNewThread = false;
+ opWrapper.setWorker(op::WorkerType::Input, wUserInput, workerInputOnNewThread);
+ // Add custom processing
+ const auto workerProcessingOnNewThread = false;
+ opWrapper.setWorker(op::WorkerType::PostProcessing, wUserPostProcessing, workerProcessingOnNewThread);
+ // Add custom output
+ const auto workerOutputOnNewThread = true;
+ opWrapper.setWorker(op::WorkerType::Output, wUserOutput, workerOutputOnNewThread);
+ // Configure OpenPose
+ op::log("Configuring OpenPose wrapper...", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
+ const op::WrapperStructPose wrapperStructPose{
+ !FLAGS_body_disable, netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
+ FLAGS_scale_number, (float)FLAGS_scale_gap, op::flagsToRenderMode(FLAGS_render_pose, multipleView),
+ poseModel, !FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
+ FLAGS_part_to_show, FLAGS_model_folder, heatMapTypes, heatMapScale, FLAGS_part_candidates,
+ (float)FLAGS_render_threshold, FLAGS_number_people_max, enableGoogleLogging};
+ // Face configuration (use op::WrapperStructFace{} to disable it)
+ const op::WrapperStructFace wrapperStructFace{
+ FLAGS_face, faceNetInputSize, op::flagsToRenderMode(FLAGS_face_render, multipleView, FLAGS_render_pose),
+ (float)FLAGS_face_alpha_pose, (float)FLAGS_face_alpha_heatmap, (float)FLAGS_face_render_threshold};
+ // Hand configuration (use op::WrapperStructHand{} to disable it)
+ const op::WrapperStructHand wrapperStructHand{
+ FLAGS_hand, handNetInputSize, FLAGS_hand_scale_number, (float)FLAGS_hand_scale_range, FLAGS_hand_tracking,
+ op::flagsToRenderMode(FLAGS_hand_render, multipleView, FLAGS_render_pose), (float)FLAGS_hand_alpha_pose,
+ (float)FLAGS_hand_alpha_heatmap, (float)FLAGS_hand_render_threshold};
+ // Extra functionality configuration (use op::WrapperStructExtra{} to disable it)
+ const op::WrapperStructExtra wrapperStructExtra{
+ FLAGS_3d, FLAGS_3d_min_views, FLAGS_identification, FLAGS_tracking, FLAGS_ik_threads};
+ // Consumer (comment or use default argument to disable any output)
+ const auto displayMode = op::DisplayMode::NoDisplay;
+ const bool guiVerbose = false;
+ const bool fullScreen = false;
+ const op::WrapperStructOutput wrapperStructOutput{
+ displayMode, guiVerbose, fullScreen, FLAGS_write_keypoint,
+ op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, FLAGS_write_coco_json,
+ FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, FLAGS_write_video,
+ FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, FLAGS_write_video_adam,
+ FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port};
+ // Configure wrapper
+ opWrapper.configure(wrapperStructPose, wrapperStructFace, wrapperStructHand, wrapperStructExtra,
+ op::WrapperStructInput{}, wrapperStructOutput);
+ // Set to single-thread (for sequential processing and/or debugging and/or reducing latency)
+ if (FLAGS_disable_multi_thread)
+ opWrapper.disableMultiThreading();
+
+ op::log("Starting thread(s)...", op::Priority::High);
+ // Two different ways of running the program on multithread environment
+ // Start, run & stop threads - it blocks this thread until all others have finished
+ opWrapper.exec();
+
+ // Option b) Keeping this thread free in case you want to do something else meanwhile, e.g. profiling the GPU memory
+ // // VERY IMPORTANT NOTE: if OpenCV is compiled with Qt support, this option will not work. Qt needs the main
+ // // thread to plot visual results, so the final GUI (which uses OpenCV) would return an exception similar to:
+ // // `QMetaMethod::invoke: Unable to invoke methods with return values in queued connections`
+ // // Start threads
+ // opWrapper.start();
+ // // Profile used GPU memory
+ // // 1: wait ~10sec so the memory has been totally loaded on GPU
+ // // 2: profile the GPU memory
+ // std::this_thread::sleep_for(std::chrono::milliseconds{1000});
+ // op::log("Random task here...", op::Priority::High);
+ // // Keep program alive while running threads
+ // while (opWrapper.isRunning())
+ // std::this_thread::sleep_for(std::chrono::milliseconds{33});
+ // // Stop and join threads
+ // op::log("Stopping thread(s)", op::Priority::High);
+ // opWrapper.stop();
+
+ // Measuring total time
+ const auto now = std::chrono::high_resolution_clock::now();
+ const auto totalTimeSec = (double)std::chrono::duration_cast(now-timerBegin).count()
+ * 1e-9;
+ const auto message = "OpenPose demo successfully finished. Total time: "
+ + std::to_string(totalTimeSec) + " seconds.";
+ op::log(message, op::Priority::High);
+
+ // Return successful message
+ return 0;
+ }
+ catch (const std::exception& e)
+ {
+ op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
+ return -1;
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ // Parsing command line flags
+ gflags::ParseCommandLineFlags(&argc, &argv, true);
+
+ // Running openPoseTutorialWrapper2
+ return openPoseTutorialWrapper2();
+}
diff --git a/examples/tutorial_wrapper/CMakeLists.txt b/examples/tutorial_api_cpp/CMakeLists.txt
similarity index 74%
rename from examples/tutorial_wrapper/CMakeLists.txt
rename to examples/tutorial_api_cpp/CMakeLists.txt
index db1095b11..8d9dc1917 100644
--- a/examples/tutorial_wrapper/CMakeLists.txt
+++ b/examples/tutorial_api_cpp/CMakeLists.txt
@@ -1,34 +1,37 @@
-set(EXAMPLE_FILES
- 1_user_synchronous_postprocessing.cpp
- 2_user_synchronous_input.cpp
- 3_user_synchronous_output.cpp
- 4_user_synchronous_all.cpp
- 5_user_asynchronous.cpp
- 6_user_asynchronous_output.cpp)
-
-include(${CMAKE_SOURCE_DIR}/cmake/Utils.cmake)
-
-foreach(EXAMPLE_FILE ${EXAMPLE_FILES})
-
- get_filename_component(SOURCE_NAME ${EXAMPLE_FILE} NAME_WE)
-
- if (UNIX OR APPLE)
- set(EXE_NAME "${SOURCE_NAME}.bin")
- elseif (WIN32)
- set(EXE_NAME "${SOURCE_NAME}")
- endif ()
-
- message(STATUS "Adding Example ${EXE_NAME}")
- add_executable(${EXE_NAME} ${EXAMPLE_FILE})
- target_link_libraries(${EXE_NAME} openpose ${examples_3rdparty_libraries})
-
- if (WIN32)
- set_property(TARGET ${EXE_NAME} PROPERTY FOLDER "Examples/Tutorial/Wrapper")
- configure_file(${CMAKE_SOURCE_DIR}/cmake/OpenPose${VCXPROJ_FILE_GPU_MODE}.vcxproj.user
- ${CMAKE_CURRENT_BINARY_DIR}/${EXE_NAME}.vcxproj.user @ONLY)
- # Properties->General->Output Directory
- set_property(TARGET ${EXE_NAME} PROPERTY RUNTIME_OUTPUT_DIRECTORY_RELEASE ${PROJECT_BINARY_DIR}/$(Platform)/$(Configuration))
- set_property(TARGET ${EXE_NAME} PROPERTY RUNTIME_OUTPUT_DIRECTORY_DEBUG ${PROJECT_BINARY_DIR}/$(Platform)/$(Configuration))
- endif (WIN32)
-
-endforeach()
+set(EXAMPLE_FILES
+ 1_body_from_image.cpp
+ 2_whole_body_from_image.cpp
+ 3_keypoints_from_image_configurable.cpp
+ 4_asynchronous_loop_custom_input_and_output.cpp
+ 5_asynchronous_loop_custom_output.cpp
+ 6_synchronous_custom_postprocessing.cpp
+ 7_synchronous_custom_input.cpp
+ 8_synchronous_custom_output.cpp
+ 9_synchronous_custom_all.cpp)
+
+include(${CMAKE_SOURCE_DIR}/cmake/Utils.cmake)
+
+foreach(EXAMPLE_FILE ${EXAMPLE_FILES})
+
+ get_filename_component(SOURCE_NAME ${EXAMPLE_FILE} NAME_WE)
+
+ if (UNIX OR APPLE)
+ set(EXE_NAME "${SOURCE_NAME}.bin")
+ elseif (WIN32)
+ set(EXE_NAME "${SOURCE_NAME}")
+ endif ()
+
+ message(STATUS "Adding Example ${EXE_NAME}")
+ add_executable(${EXE_NAME} ${EXAMPLE_FILE})
+ target_link_libraries(${EXE_NAME} openpose ${examples_3rdparty_libraries})
+
+ if (WIN32)
+ set_property(TARGET ${EXE_NAME} PROPERTY FOLDER "Examples/Tutorial/C++ API")
+ configure_file(${CMAKE_SOURCE_DIR}/cmake/OpenPose${VCXPROJ_FILE_GPU_MODE}.vcxproj.user
+ ${CMAKE_CURRENT_BINARY_DIR}/${EXE_NAME}.vcxproj.user @ONLY)
+ # Properties->General->Output Directory
+ set_property(TARGET ${EXE_NAME} PROPERTY RUNTIME_OUTPUT_DIRECTORY_RELEASE ${PROJECT_BINARY_DIR}/$(Platform)/$(Configuration))
+ set_property(TARGET ${EXE_NAME} PROPERTY RUNTIME_OUTPUT_DIRECTORY_DEBUG ${PROJECT_BINARY_DIR}/$(Platform)/$(Configuration))
+ endif (WIN32)
+
+endforeach()
diff --git a/examples/tutorial_api_cpp/README.md b/examples/tutorial_api_cpp/README.md
new file mode 100644
index 000000000..88a12669d
--- /dev/null
+++ b/examples/tutorial_api_cpp/README.md
@@ -0,0 +1,2 @@
+# C++ API Examples
+This folder provides examples to the basic OpenPose C++ API. The analogous Python API is exposed in [examples/tutorial_api_python/](../tutorial_api_python/).
diff --git a/examples/tutorial_python/1_extract_pose.py b/examples/tutorial_api_python/1_extract_pose.py
similarity index 100%
rename from examples/tutorial_python/1_extract_pose.py
rename to examples/tutorial_api_python/1_extract_pose.py
diff --git a/examples/tutorial_python/CMakeLists.txt b/examples/tutorial_api_python/CMakeLists.txt
similarity index 52%
rename from examples/tutorial_python/CMakeLists.txt
rename to examples/tutorial_api_python/CMakeLists.txt
index 834e2bb1b..437814b14 100644
--- a/examples/tutorial_python/CMakeLists.txt
+++ b/examples/tutorial_api_python/CMakeLists.txt
@@ -1,3 +1,2 @@
### Add Python Test
configure_file(1_extract_pose.py 1_extract_pose.py)
-configure_file(2_pose_from_heatmaps.py 2_pose_from_heatmaps.py)
diff --git a/examples/tutorial_api_python/README.md b/examples/tutorial_api_python/README.md
new file mode 100644
index 000000000..339caee14
--- /dev/null
+++ b/examples/tutorial_api_python/README.md
@@ -0,0 +1,2 @@
+# Python API Examples
+This folder provides examples to the basic OpenPose Python API. The analogous C++ API is exposed in [examples/tutorial_api_cpp/](../tutorial_api_cpp/).
diff --git a/examples/tutorial_thread/CMakeLists.txt b/examples/tutorial_developer/CMakeLists.txt
similarity index 70%
rename from examples/tutorial_thread/CMakeLists.txt
rename to examples/tutorial_developer/CMakeLists.txt
index f905383f9..344109202 100644
--- a/examples/tutorial_thread/CMakeLists.txt
+++ b/examples/tutorial_developer/CMakeLists.txt
@@ -1,30 +1,35 @@
-set(EXAMPLE_FILES
- 1_openpose_read_and_display.cpp
- 2_user_processing_function.cpp
- 3_user_input_processing_and_output.cpp
- 4_user_input_processing_output_and_datum.cpp)
-
-foreach(EXAMPLE_FILE ${EXAMPLE_FILES})
-
- get_filename_component(SOURCE_NAME ${EXAMPLE_FILE} NAME_WE)
-
- if (UNIX OR APPLE)
- set(EXE_NAME "${SOURCE_NAME}.bin")
- elseif (WIN32)
- set(EXE_NAME "${SOURCE_NAME}")
- endif ()
-
- message(STATUS "Adding Example ${EXE_NAME}")
- add_executable(${EXE_NAME} ${EXAMPLE_FILE})
- target_link_libraries(${EXE_NAME} openpose ${examples_3rdparty_libraries})
-
- if (WIN32)
- set_property(TARGET ${EXE_NAME} PROPERTY FOLDER "Examples/Tutorial/Thread")
- configure_file(${CMAKE_SOURCE_DIR}/cmake/OpenPose${VCXPROJ_FILE_GPU_MODE}.vcxproj.user
- ${CMAKE_CURRENT_BINARY_DIR}/${EXE_NAME}.vcxproj.user @ONLY)
- # Properties->General->Output Directory
- set_property(TARGET ${EXE_NAME} PROPERTY RUNTIME_OUTPUT_DIRECTORY_RELEASE ${PROJECT_BINARY_DIR}/$(Platform)/$(Configuration))
- set_property(TARGET ${EXE_NAME} PROPERTY RUNTIME_OUTPUT_DIRECTORY_DEBUG ${PROJECT_BINARY_DIR}/$(Platform)/$(Configuration))
- endif (WIN32)
-
-endforeach()
+set(EXAMPLE_FILES
+ pose_1_extract_from_image.cpp
+ pose_2_extract_pose_or_heatmat_from_image.cpp
+ thread_1_openpose_read_and_display.cpp
+ thread_2_user_processing_function.cpp
+ thread_3_user_input_processing_and_output.cpp
+ thread_4_user_input_processing_output_and_datum.cpp)
+
+foreach(EXAMPLE_FILE ${EXAMPLE_FILES})
+
+ get_filename_component(SOURCE_NAME ${EXAMPLE_FILE} NAME_WE)
+
+ if (UNIX OR APPLE)
+ set(EXE_NAME "${SOURCE_NAME}.bin")
+ elseif (WIN32)
+ set(EXE_NAME "${SOURCE_NAME}")
+ endif ()
+
+ message(STATUS "Adding Example ${EXE_NAME}")
+ add_executable(${EXE_NAME} ${EXAMPLE_FILE})
+ target_link_libraries(${EXE_NAME} openpose ${examples_3rdparty_libraries})
+
+ if (WIN32)
+ set_property(TARGET ${EXE_NAME} PROPERTY FOLDER "Examples/Tutorial/Developer Examples")
+ configure_file(${CMAKE_SOURCE_DIR}/cmake/OpenPose${VCXPROJ_FILE_GPU_MODE}.vcxproj.user
+ ${CMAKE_CURRENT_BINARY_DIR}/${EXE_NAME}.vcxproj.user @ONLY)
+ # Properties->General->Output Directory
+ set_property(TARGET ${EXE_NAME} PROPERTY RUNTIME_OUTPUT_DIRECTORY_RELEASE ${PROJECT_BINARY_DIR}/$(Platform)/$(Configuration))
+ set_property(TARGET ${EXE_NAME} PROPERTY RUNTIME_OUTPUT_DIRECTORY_DEBUG ${PROJECT_BINARY_DIR}/$(Platform)/$(Configuration))
+ endif (WIN32)
+
+endforeach()
+
+### Add Python files
+configure_file(python_1_pose_from_heatmaps.py python_1_pose_from_heatmaps.py)
diff --git a/examples/tutorial_developer/README.md b/examples/tutorial_developer/README.md
new file mode 100644
index 000000000..4d6dab25a
--- /dev/null
+++ b/examples/tutorial_developer/README.md
@@ -0,0 +1,4 @@
+# Developer Examples
+**Disclaimer**: This folder is meant for internal OpenPose developers. The Examples might highly change, and we will not answer questions about them nor provide official support for them.
+
+**If the OpenPose library does not compile for an error happening due to a file from this folder, notify us**.
diff --git a/examples/tutorial_pose/1_extract_from_image.cpp b/examples/tutorial_developer/pose_1_extract_from_image.cpp
similarity index 99%
rename from examples/tutorial_pose/1_extract_from_image.cpp
rename to examples/tutorial_developer/pose_1_extract_from_image.cpp
index f49364e54..1a93f0306 100644
--- a/examples/tutorial_pose/1_extract_from_image.cpp
+++ b/examples/tutorial_developer/pose_1_extract_from_image.cpp
@@ -74,7 +74,7 @@ int openPoseTutorialPose1()
__LINE__, __FUNCTION__, __FILE__);
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
- // Step 2 - Read Google flags (user defined configuration)
+ // Step 2 - Read GFlags (user defined configuration)
// outputSize
const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
// netInputSize
diff --git a/examples/tutorial_pose/2_extract_pose_or_heatmat_from_image.cpp b/examples/tutorial_developer/pose_2_extract_pose_or_heatmat_from_image.cpp
similarity index 99%
rename from examples/tutorial_pose/2_extract_pose_or_heatmat_from_image.cpp
rename to examples/tutorial_developer/pose_2_extract_pose_or_heatmat_from_image.cpp
index 7b750e445..0f87b746b 100644
--- a/examples/tutorial_pose/2_extract_pose_or_heatmat_from_image.cpp
+++ b/examples/tutorial_developer/pose_2_extract_pose_or_heatmat_from_image.cpp
@@ -79,7 +79,7 @@ int openPoseTutorialPose2()
__LINE__, __FUNCTION__, __FILE__);
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
- // Step 2 - Read Google flags (user defined configuration)
+ // Step 2 - Read GFlags (user defined configuration)
// outputSize
const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
// netInputSize
diff --git a/examples/tutorial_python/2_pose_from_heatmaps.py b/examples/tutorial_developer/python_1_pose_from_heatmaps.py
similarity index 99%
rename from examples/tutorial_python/2_pose_from_heatmaps.py
rename to examples/tutorial_developer/python_1_pose_from_heatmaps.py
index 066792a18..c822938a5 100644
--- a/examples/tutorial_python/2_pose_from_heatmaps.py
+++ b/examples/tutorial_developer/python_1_pose_from_heatmaps.py
@@ -6,7 +6,7 @@
print("This sample can only be run if Python Caffe if available on your system")
print("Currently OpenPose does not compile Python Caffe. This may be supported in the future")
sys.exit(-1)
-
+
import os
os.environ["GLOG_minloglevel"] = "1"
import caffe
diff --git a/examples/tutorial_thread/1_openpose_read_and_display.cpp b/examples/tutorial_developer/thread_1_openpose_read_and_display.cpp
similarity index 98%
rename from examples/tutorial_thread/1_openpose_read_and_display.cpp
rename to examples/tutorial_developer/thread_1_openpose_read_and_display.cpp
index e204ec7b9..8a6fc486e 100644
--- a/examples/tutorial_thread/1_openpose_read_and_display.cpp
+++ b/examples/tutorial_developer/thread_1_openpose_read_and_display.cpp
@@ -2,7 +2,7 @@
// This third example shows the user how to:
// 1. Read folder of images / video / webcam (`producer` module)
// 2. Display the rendered pose (`gui` module)
- // Everything in a multi-thread scenario (`thread` module)
+ // Everything in a multi-thread scenario (`thread` module)
// In addition to the previous OpenPose modules, we also need to use:
// 1. `core` module: for the Datum struct that the `thread` module sends between the queues
// 2. `utilities` module: for the error & logging functions, i.e. op::error & op::log respectively
@@ -75,7 +75,7 @@ int openPoseTutorialThread1()
op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.",
__LINE__, __FUNCTION__, __FILE__);
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
- // Step 2 - Read Google flags (user defined configuration)
+ // Step 2 - Read GFlags (user defined configuration)
// outputSize
const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
// producerType
diff --git a/examples/tutorial_thread/2_user_processing_function.cpp b/examples/tutorial_developer/thread_2_user_processing_function.cpp
similarity index 99%
rename from examples/tutorial_thread/2_user_processing_function.cpp
rename to examples/tutorial_developer/thread_2_user_processing_function.cpp
index 4f3841e45..2d7a05e16 100644
--- a/examples/tutorial_thread/2_user_processing_function.cpp
+++ b/examples/tutorial_developer/thread_2_user_processing_function.cpp
@@ -3,7 +3,7 @@
// 1. Read folder of images / video / webcam (`producer` module)
// 2. Use the processing implemented by the user
// 3. Display the rendered pose (`gui` module)
- // Everything in a multi-thread scenario (`thread` module)
+ // Everything in a multi-thread scenario (`thread` module)
// In addition to the previous OpenPose modules, we also need to use:
// 1. `core` module: for the Datum struct that the `thread` module sends between the queues
// 2. `utilities` module: for the error & logging functions, i.e. op::error & op::log respectively
@@ -109,7 +109,7 @@ int openPoseTutorialThread2()
op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.",
__LINE__, __FUNCTION__, __FILE__);
op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
- // Step 2 - Read Google flags (user defined configuration)
+ // Step 2 - Read GFlags (user defined configuration)
// outputSize
const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
// producerType
diff --git a/examples/tutorial_thread/3_user_input_processing_and_output.cpp b/examples/tutorial_developer/thread_3_user_input_processing_and_output.cpp
similarity index 99%
rename from examples/tutorial_thread/3_user_input_processing_and_output.cpp
rename to examples/tutorial_developer/thread_3_user_input_processing_and_output.cpp
index 966c03879..d57689877 100644
--- a/examples/tutorial_thread/3_user_input_processing_and_output.cpp
+++ b/examples/tutorial_developer/thread_3_user_input_processing_and_output.cpp
@@ -3,7 +3,7 @@
// 1. Read folder of images / video / webcam (`producer` module)
// 2. Use the processing implemented by the user
// 3. Display the rendered pose (`gui` module)
- // Everything in a multi-thread scenario (`thread` module)
+ // Everything in a multi-thread scenario (`thread` module)
// In addition to the previous OpenPose modules, we also need to use:
// 1. `core` module: for the Datum struct that the `thread` module sends between the queues
// 2. `utilities` module: for the error & logging functions, i.e. op::error & op::log respectively
diff --git a/examples/tutorial_thread/4_user_input_processing_output_and_datum.cpp b/examples/tutorial_developer/thread_4_user_input_processing_output_and_datum.cpp
similarity index 99%
rename from examples/tutorial_thread/4_user_input_processing_output_and_datum.cpp
rename to examples/tutorial_developer/thread_4_user_input_processing_output_and_datum.cpp
index 40328220a..ec51cf29a 100644
--- a/examples/tutorial_thread/4_user_input_processing_output_and_datum.cpp
+++ b/examples/tutorial_developer/thread_4_user_input_processing_output_and_datum.cpp
@@ -3,7 +3,7 @@
// 1. Read folder of images / video / webcam (`producer` module)
// 2. Use the processing implemented by the user
// 3. Display the rendered pose (`gui` module)
- // Everything in a multi-thread scenario (`thread` module)
+ // Everything in a multi-thread scenario (`thread` module)
// In addition to the previous OpenPose modules, we also need to use:
// 1. `core` module: for the Datum struct that the `thread` module sends between the queues
// 2. `utilities` module: for the error & logging functions, i.e. op::error & op::log respectively
diff --git a/examples/tutorial_pose/CMakeLists.txt b/examples/tutorial_pose/CMakeLists.txt
deleted file mode 100644
index b78e83b31..000000000
--- a/examples/tutorial_pose/CMakeLists.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-set(EXAMPLE_FILES
- 1_extract_from_image.cpp
- 2_extract_pose_or_heatmat_from_image.cpp)
-
-foreach(EXAMPLE_FILE ${EXAMPLE_FILES})
-
- get_filename_component(SOURCE_NAME ${EXAMPLE_FILE} NAME_WE)
-
- if (UNIX OR APPLE)
- set(EXE_NAME "${SOURCE_NAME}.bin")
- elseif (WIN32)
- set(EXE_NAME "${SOURCE_NAME}")
- endif ()
-
- message(STATUS "Adding Example ${EXE_NAME}")
- add_executable(${EXE_NAME} ${EXAMPLE_FILE})
- target_link_libraries(${EXE_NAME} openpose ${examples_3rdparty_libraries})
-
- if (WIN32)
- set_property(TARGET ${EXE_NAME} PROPERTY FOLDER "Examples/Tutorial/Pose")
- configure_file(${CMAKE_SOURCE_DIR}/cmake/OpenPose${VCXPROJ_FILE_GPU_MODE}.vcxproj.user
- ${CMAKE_CURRENT_BINARY_DIR}/${EXE_NAME}.vcxproj.user @ONLY)
- # Properties->General->Output Directory
- set_property(TARGET ${EXE_NAME} PROPERTY RUNTIME_OUTPUT_DIRECTORY_RELEASE ${PROJECT_BINARY_DIR}/$(Platform)/$(Configuration))
- set_property(TARGET ${EXE_NAME} PROPERTY RUNTIME_OUTPUT_DIRECTORY_DEBUG ${PROJECT_BINARY_DIR}/$(Platform)/$(Configuration))
- endif (WIN32)
-
-endforeach()
diff --git a/examples/tutorial_wrapper/2_user_synchronous_input.cpp b/examples/tutorial_wrapper/2_user_synchronous_input.cpp
deleted file mode 100644
index d58760cfa..000000000
--- a/examples/tutorial_wrapper/2_user_synchronous_input.cpp
+++ /dev/null
@@ -1,477 +0,0 @@
-// ------------------------- OpenPose Library Tutorial - Real Time Pose Estimation -------------------------
-// If the user wants to learn to use the OpenPose library, we highly recommend to start with the `examples/tutorial_*/`
-// folders.
-// This example summarizes all the funcitonality of the OpenPose library:
- // 1. Read folder of images / video / webcam (`producer` module)
- // 2. Extract and render body keypoint / heatmap / PAF of that image (`pose` module)
- // 3. Extract and render face keypoint / heatmap / PAF of that image (`face` module)
- // 4. Save the results on disk (`filestream` module)
- // 5. Display the rendered pose (`gui` module)
- // Everything in a multi-thread scenario (`thread` module)
- // Points 2 to 5 are included in the `wrapper` module
-// In addition to the previous OpenPose modules, we also need to use:
- // 1. `core` module:
- // For the Array class that the `pose` module needs
- // For the Datum struct that the `thread` module sends between the queues
- // 2. `utilities` module: for the error & logging functions, i.e. op::error & op::log respectively
-// This file should only be used for the user to take specific examples.
-
-// C++ std library dependencies
-#include // `std::chrono::` functions and classes, e.g. std::chrono::milliseconds
-#include // std::this_thread
-// Other 3rdparty dependencies
-// GFlags: DEFINE_bool, _int32, _int64, _uint64, _double, _string
-#include
-// Allow Google Flags in Ubuntu 14
-#ifndef GFLAGS_GFLAGS_H_
- namespace gflags = google;
-#endif
-// OpenPose dependencies
-#include
-
-// See all the available parameter options withe the `--help` flag. E.g. `build/examples/openpose/openpose.bin --help`
-// Note: This command will show you flags for other unnecessary 3rdparty files. Check only the flags for the OpenPose
-// executable. E.g. for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`.
-// Debugging/Other
-DEFINE_int32(logging_level, 3, "The logging level. Integer in the range [0, 255]. 0 will output any log() message, while"
- " 255 will not output any. Current OpenPose library messages are in the range 0-4: 1 for"
- " low priority messages and 4 for important ones.");
-DEFINE_bool(disable_multi_thread, false, "It would slightly reduce the frame rate in order to highly reduce the lag. Mainly useful"
- " for 1) Cases where it is needed a low latency (e.g. webcam in real-time scenarios with"
- " low-range GPU devices); and 2) Debugging OpenPose when it is crashing to locate the"
- " error.");
-DEFINE_int32(profile_speed, 1000, "If PROFILER_ENABLED was set in CMake or Makefile.config files, OpenPose will show some"
- " runtime statistics at this frame number.");
-// Producer
-DEFINE_int32(camera, -1, "The camera index for cv::VideoCapture. Integer in the range [0, 9]. Select a negative"
- " number (by default), to auto-detect and open the first available camera.");
-DEFINE_string(camera_resolution, "-1x-1", "Set the camera resolution (either `--camera` or `--flir_camera`). `-1x-1` will use the"
- " default 1280x720 for `--camera`, or the maximum flir camera resolution available for"
- " `--flir_camera`");
-DEFINE_double(camera_fps, 30.0, "Frame rate for the webcam (also used when saving video). Set this value to the minimum"
- " value between the OpenPose displayed speed and the webcam real frame rate.");
-DEFINE_string(video, "", "Use a video file instead of the camera. Use `examples/media/video.avi` for our default"
- " example video.");
-DEFINE_string(image_dir, "", "Process a directory of images. Use `examples/media/` for our default example folder with 20"
- " images. Read all standard formats (jpg, png, bmp, etc.).");
-DEFINE_bool(flir_camera, false, "Whether to use FLIR (Point-Grey) stereo camera.");
-DEFINE_int32(flir_camera_index, -1, "Select -1 (default) to run on all detected flir cameras at once. Otherwise, select the flir"
- " camera index to run, where 0 corresponds to the detected flir camera with the lowest"
- " serial number, and `n` to the `n`-th lowest serial number camera.");
-DEFINE_string(ip_camera, "", "String with the IP camera URL. It supports protocols like RTSP and HTTP.");
-DEFINE_uint64(frame_first, 0, "Start on desired frame number. Indexes are 0-based, i.e. the first frame has index 0.");
-DEFINE_uint64(frame_last, -1, "Finish on desired frame number. Select -1 to disable. Indexes are 0-based, e.g. if set to"
- " 10, it will process 11 frames (0-10).");
-DEFINE_bool(frame_flip, false, "Flip/mirror each frame (e.g. for real time webcam demonstrations).");
-DEFINE_int32(frame_rotate, 0, "Rotate each frame, 4 possible values: 0, 90, 180, 270.");
-DEFINE_bool(frames_repeat, false, "Repeat frames when finished.");
-DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g. for video). If the processing time is"
- " too long, it will skip frames. If it is too fast, it will slow it down.");
-DEFINE_string(camera_parameter_folder, "models/cameraParameters/flir/", "String with the folder where the camera parameters are located.");
-DEFINE_bool(frame_keep_distortion, false, "If false (default), it will undistortionate the image based on the"
- " `camera_parameter_folder` camera parameters; if true, it will not undistortionate, i.e.,"
- " it will leave it as it is.");
-// OpenPose
-DEFINE_string(model_folder, "models/", "Folder path (absolute or relative) where the models (pose, face, ...) are located.");
-DEFINE_string(output_resolution, "-1x-1", "The image resolution (display and output). Use \"-1x-1\" to force the program to use the"
- " input image resolution.");
-DEFINE_int32(num_gpu, -1, "The number of GPU devices to use. If negative, it will use all the available GPUs in your"
- " machine.");
-DEFINE_int32(num_gpu_start, 0, "GPU device start number.");
-DEFINE_int32(keypoint_scale, 0, "Scaling of the (x,y) coordinates of the final pose data array, i.e. the scale of the (x,y)"
- " coordinates that will be saved with the `write_json` & `write_keypoint` flags."
- " Select `0` to scale it to the original source resolution; `1`to scale it to the net output"
- " size (set with `net_resolution`); `2` to scale it to the final output size (set with"
- " `resolution`); `3` to scale it in the range [0,1], where (0,0) would be the top-left"
- " corner of the image, and (1,1) the bottom-right one; and 4 for range [-1,1], where"
- " (-1,-1) would be the top-left corner of the image, and (1,1) the bottom-right one. Non"
- " related with `scale_number` and `scale_gap`.");
-DEFINE_int32(number_people_max, -1, "This parameter will limit the maximum number of people detected, by keeping the people with"
- " top scores. The score is based in person area over the image, body part score, as well as"
- " joint score (between each pair of connected body parts). Useful if you know the exact"
- " number of people in the scene, so it can remove false positives (if all the people have"
- " been detected. However, it might also include false negatives by removing very small or"
- " highly occluded people. -1 will keep them all.");
-// OpenPose Body Pose
-DEFINE_bool(body_disable, false, "Disable body keypoint detection. Option only possible for faster (but less accurate) face"
- " keypoint detection.");
-DEFINE_string(model_pose, "BODY_25", "Model to be used. E.g. `COCO` (18 keypoints), `MPI` (15 keypoints, ~10% faster), "
- "`MPI_4_layers` (15 keypoints, even faster but less accurate).");
-DEFINE_string(net_resolution, "-1x368", "Multiples of 16. If it is increased, the accuracy potentially increases. If it is"
- " decreased, the speed increases. For maximum speed-accuracy balance, it should keep the"
- " closest aspect ratio possible to the images or videos to be processed. Using `-1` in"
- " any of the dimensions, OP will choose the optimal aspect ratio depending on the user's"
- " input value. E.g. the default `-1x368` is equivalent to `656x368` in 16:9 resolutions,"
- " e.g. full HD (1980x1080) and HD (1280x720) resolutions.");
-DEFINE_int32(scale_number, 1, "Number of scales to average.");
-DEFINE_double(scale_gap, 0.3, "Scale gap between scales. No effect unless scale_number > 1. Initial scale is always 1."
- " If you want to change the initial scale, you actually want to multiply the"
- " `net_resolution` by your desired initial scale.");
-// OpenPose Body Pose Heatmaps and Part Candidates
-DEFINE_bool(heatmaps_add_parts, false, "If true, it will fill op::Datum::poseHeatMaps array with the body part heatmaps, and"
- " analogously face & hand heatmaps to op::Datum::faceHeatMaps & op::Datum::handHeatMaps."
- " If more than one `add_heatmaps_X` flag is enabled, it will place then in sequential"
- " memory order: body parts + bkg + PAFs. It will follow the order on"
- " POSE_BODY_PART_MAPPING in `src/openpose/pose/poseParameters.cpp`. Program speed will"
- " considerably decrease. Not required for OpenPose, enable it only if you intend to"
- " explicitly use this information later.");
-DEFINE_bool(heatmaps_add_bkg, false, "Same functionality as `add_heatmaps_parts`, but adding the heatmap corresponding to"
- " background.");
-DEFINE_bool(heatmaps_add_PAFs, false, "Same functionality as `add_heatmaps_parts`, but adding the PAFs.");
-DEFINE_int32(heatmaps_scale, 2, "Set 0 to scale op::Datum::poseHeatMaps in the range [-1,1], 1 for [0,1]; 2 for integer"
- " rounded [0,255]; and 3 for no scaling.");
-DEFINE_bool(part_candidates, false, "Also enable `write_json` in order to save this information. If true, it will fill the"
- " op::Datum::poseCandidates array with the body part candidates. Candidates refer to all"
- " the detected body parts, before being assembled into people. Note that the number of"
- " candidates is equal or higher than the number of final body parts (i.e. after being"
- " assembled into people). The empty body parts are filled with 0s. Program speed will"
- " slightly decrease. Not required for OpenPose, enable it only if you intend to explicitly"
- " use this information.");
-// OpenPose Face
-DEFINE_bool(face, false, "Enables face keypoint detection. It will share some parameters from the body pose, e.g."
- " `model_folder`. Note that this will considerable slow down the performance and increse"
- " the required GPU memory. In addition, the greater number of people on the image, the"
- " slower OpenPose will be.");
-DEFINE_string(face_net_resolution, "368x368", "Multiples of 16 and squared. Analogous to `net_resolution` but applied to the face keypoint"
- " detector. 320x320 usually works fine while giving a substantial speed up when multiple"
- " faces on the image.");
-// OpenPose Hand
-DEFINE_bool(hand, false, "Enables hand keypoint detection. It will share some parameters from the body pose, e.g."
- " `model_folder`. Analogously to `--face`, it will also slow down the performance, increase"
- " the required GPU memory and its speed depends on the number of people.");
-DEFINE_string(hand_net_resolution, "368x368", "Multiples of 16 and squared. Analogous to `net_resolution` but applied to the hand keypoint"
- " detector.");
-DEFINE_int32(hand_scale_number, 1, "Analogous to `scale_number` but applied to the hand keypoint detector. Our best results"
- " were found with `hand_scale_number` = 6 and `hand_scale_range` = 0.4.");
-DEFINE_double(hand_scale_range, 0.4, "Analogous purpose than `scale_gap` but applied to the hand keypoint detector. Total range"
- " between smallest and biggest scale. The scales will be centered in ratio 1. E.g. if"
- " scaleRange = 0.4 and scalesNumber = 2, then there will be 2 scales, 0.8 and 1.2.");
-DEFINE_bool(hand_tracking, false, "Adding hand tracking might improve hand keypoints detection for webcam (if the frame rate"
- " is high enough, i.e. >7 FPS per GPU) and video. This is not person ID tracking, it"
- " simply looks for hands in positions at which hands were located in previous frames, but"
- " it does not guarantee the same person ID among frames.");
-// OpenPose 3-D Reconstruction
-DEFINE_bool(3d, false, "Running OpenPose 3-D reconstruction demo: 1) Reading from a stereo camera system."
- " 2) Performing 3-D reconstruction from the multiple views. 3) Displaying 3-D reconstruction"
- " results. Note that it will only display 1 person. If multiple people is present, it will"
- " fail.");
-DEFINE_int32(3d_min_views, -1, "Minimum number of views required to reconstruct each keypoint. By default (-1), it will"
- " require all the cameras to see the keypoint in order to reconstruct it.");
-DEFINE_int32(3d_views, 1, "Complementary option to `--image_dir` or `--video`. OpenPose will read as many images per"
- " iteration, allowing tasks such as stereo camera processing (`--3d`). Note that"
- " `--camera_parameters_folder` must be set. OpenPose must find as many `xml` files in the"
- " parameter folder as this number indicates.");
-// Extra algorithms
-DEFINE_bool(identification, false, "Experimental, not available yet. Whether to enable people identification across frames.");
-DEFINE_int32(tracking, -1, "Experimental, not available yet. Whether to enable people tracking across frames. The"
- " value indicates the number of frames where tracking is run between each OpenPose keypoint"
- " detection. Select -1 (default) to disable it or 0 to run simultaneously OpenPose keypoint"
- " detector and tracking for potentially higher accurary than only OpenPose.");
-DEFINE_int32(ik_threads, 0, "Experimental, not available yet. Whether to enable inverse kinematics (IK) from 3-D"
- " keypoints to obtain 3-D joint angles. By default (0 threads), it is disabled. Increasing"
- " the number of threads will increase the speed but also the global system latency.");
-// OpenPose Rendering
-DEFINE_int32(part_to_show, 0, "Prediction channel to visualize (default: 0). 0 for all the body parts, 1-18 for each body"
- " part heat map, 19 for the background heat map, 20 for all the body part heat maps"
- " together, 21 for all the PAFs, 22-40 for each body part pair PAF.");
-DEFINE_bool(disable_blending, false, "If enabled, it will render the results (keypoint skeletons or heatmaps) on a black"
- " background, instead of being rendered into the original image. Related: `part_to_show`,"
- " `alpha_pose`, and `alpha_pose`.");
-// OpenPose Rendering Pose
-DEFINE_double(render_threshold, 0.05, "Only estimated keypoints whose score confidences are higher than this threshold will be"
- " rendered. Generally, a high threshold (> 0.5) will only render very clear body parts;"
- " while small thresholds (~0.1) will also output guessed and occluded keypoints, but also"
- " more false positives (i.e. wrong detections).");
-DEFINE_int32(render_pose, -1, "Set to 0 for no rendering, 1 for CPU rendering (slightly faster), and 2 for GPU rendering"
- " (slower but greater functionality, e.g. `alpha_X` flags). If -1, it will pick CPU if"
- " CPU_ONLY is enabled, or GPU if CUDA is enabled. If rendering is enabled, it will render"
- " both `outputData` and `cvOutputData` with the original image and desired body part to be"
- " shown (i.e. keypoints, heat maps or PAFs).");
-DEFINE_double(alpha_pose, 0.6, "Blending factor (range 0-1) for the body part rendering. 1 will show it completely, 0 will"
- " hide it. Only valid for GPU rendering.");
-DEFINE_double(alpha_heatmap, 0.7, "Blending factor (range 0-1) between heatmap and original frame. 1 will only show the"
- " heatmap, 0 will only show the frame. Only valid for GPU rendering.");
-// OpenPose Rendering Face
-DEFINE_double(face_render_threshold, 0.4, "Analogous to `render_threshold`, but applied to the face keypoints.");
-DEFINE_int32(face_render, -1, "Analogous to `render_pose` but applied to the face. Extra option: -1 to use the same"
- " configuration that `render_pose` is using.");
-DEFINE_double(face_alpha_pose, 0.6, "Analogous to `alpha_pose` but applied to face.");
-DEFINE_double(face_alpha_heatmap, 0.7, "Analogous to `alpha_heatmap` but applied to face.");
-// OpenPose Rendering Hand
-DEFINE_double(hand_render_threshold, 0.2, "Analogous to `render_threshold`, but applied to the hand keypoints.");
-DEFINE_int32(hand_render, -1, "Analogous to `render_pose` but applied to the hand. Extra option: -1 to use the same"
- " configuration that `render_pose` is using.");
-DEFINE_double(hand_alpha_pose, 0.6, "Analogous to `alpha_pose` but applied to hand.");
-DEFINE_double(hand_alpha_heatmap, 0.7, "Analogous to `alpha_heatmap` but applied to hand.");
-// Display
-DEFINE_bool(fullscreen, false, "Run in full-screen mode (press f during runtime to toggle).");
-DEFINE_bool(no_gui_verbose, false, "Do not write text on output images on GUI (e.g. number of current frame and people). It"
- " does not affect the pose rendering.");
-DEFINE_int32(display, -1, "Display mode: -1 for automatic selection; 0 for no display (useful if there is no X server"
- " and/or to slightly speed up the processing if visual output is not required); 2 for 2-D"
- " display; 3 for 3-D display (if `--3d` enabled); and 1 for both 2-D and 3-D display.");
-// Result Saving
-DEFINE_string(write_images, "", "Directory to write rendered frames in `write_images_format` image format.");
-DEFINE_string(write_images_format, "png", "File extension and format for `write_images`, e.g. png, jpg or bmp. Check the OpenCV"
- " function cv::imwrite for all compatible extensions.");
-DEFINE_string(write_video, "", "Full file path to write rendered frames in motion JPEG video format. It might fail if the"
- " final path does not finish in `.avi`. It internally uses cv::VideoWriter. Flag"
- " `camera_fps` controls FPS.");
-DEFINE_string(write_json, "", "Directory to write OpenPose output in JSON format. It includes body, hand, and face pose"
- " keypoints (2-D and 3-D), as well as pose candidates (if `--part_candidates` enabled).");
-DEFINE_string(write_coco_json, "", "Full file path to write people pose data with JSON COCO validation format.");
-DEFINE_string(write_coco_foot_json, "", "Full file path to write people foot pose data with JSON COCO validation format.");
-DEFINE_string(write_heatmaps, "", "Directory to write body pose heatmaps in PNG format. At least 1 `add_heatmaps_X` flag"
- " must be enabled.");
-DEFINE_string(write_heatmaps_format, "png", "File extension and format for `write_heatmaps`, analogous to `write_images_format`."
- " For lossless compression, recommended `png` for integer `heatmaps_scale` and `float` for"
- " floating values.");
-DEFINE_string(write_keypoint, "", "(Deprecated, use `write_json`) Directory to write the people pose keypoint data. Set format"
- " with `write_keypoint_format`.");
-DEFINE_string(write_keypoint_format, "yml", "(Deprecated, use `write_json`) File extension and format for `write_keypoint`: json, xml,"
- " yaml & yml. Json not available for OpenCV < 3.0, use `write_json` instead.");
-// Result Saving - Extra Algorithms
-DEFINE_string(write_video_adam, "", "Experimental, not available yet. E.g.: `~/Desktop/adamResult.avi`. Flag `camera_fps`"
- " controls FPS.");
-DEFINE_string(write_bvh, "", "Experimental, not available yet. E.g.: `~/Desktop/mocapResult.bvh`.");
-// UDP communication
-DEFINE_string(udp_host, "", "Experimental, not available yet. IP for UDP communication. E.g., `192.168.0.1`.");
-DEFINE_string(udp_port, "8051", "Experimental, not available yet. Port number for UDP communication.");
-
-
-// If the user needs his own variables, he can inherit the op::Datum struct and add them
-// UserDatum can be directly used by the OpenPose wrapper because it inherits from op::Datum, just define
-// Wrapper instead of Wrapper
-struct UserDatum : public op::Datum
-{
- bool boolThatUserNeedsForSomeReason;
-
- UserDatum(const bool boolThatUserNeedsForSomeReason_ = false) :
- boolThatUserNeedsForSomeReason{boolThatUserNeedsForSomeReason_}
- {}
-};
-
-// The W-classes can be implemented either as a template or as simple classes given
-// that the user usually knows which kind of data he will move between the queues,
-// in this case we assume a std::shared_ptr of a std::vector of UserDatum
-
-// This worker will just read and return all the jpg files in a directory
-class WUserInput : public op::WorkerProducer>>
-{
-public:
- WUserInput(const std::string& directoryPath) :
- mImageFiles{op::getFilesOnDirectory(directoryPath, "jpg")},
- // If we want "jpg" + "png" images
- // mImageFiles{op::getFilesOnDirectory(directoryPath, std::vector{"jpg", "png"})},
- mCounter{0}
- {
- if (mImageFiles.empty())
- op::error("No images found on: " + directoryPath, __LINE__, __FUNCTION__, __FILE__);
- }
-
- void initializationOnThread() {}
-
- std::shared_ptr> workProducer()
- {
- try
- {
- // Close program when empty frame
- if (mImageFiles.size() <= mCounter)
- {
- op::log("Last frame read and added to queue. Closing program after it is processed.",
- op::Priority::High);
- // This funtion stops this worker, which will eventually stop the whole thread system once all the
- // frames have been processed
- this->stop();
- return nullptr;
- }
- else
- {
- // Create new datum
- auto datumsPtr = std::make_shared>();
- datumsPtr->emplace_back();
- auto& datum = datumsPtr->at(0);
-
- // Fill datum
- datum.cvInputData = cv::imread(mImageFiles.at(mCounter++));
-
- // If empty frame -> return nullptr
- if (datum.cvInputData.empty())
- {
- op::log("Empty frame detected on path: " + mImageFiles.at(mCounter-1) + ". Closing program.",
- op::Priority::High);
- this->stop();
- datumsPtr = nullptr;
- }
-
- return datumsPtr;
- }
- }
- catch (const std::exception& e)
- {
- this->stop();
- op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
- return nullptr;
- }
- }
-
-private:
- const std::vector mImageFiles;
- unsigned long long mCounter;
-};
-
-int openPoseDemo()
-{
- try
- {
- op::log("Starting OpenPose demo...", op::Priority::High);
- const auto timerBegin = std::chrono::high_resolution_clock::now();
-
- // logging_level
- op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.",
- __LINE__, __FUNCTION__, __FILE__);
- op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
- op::Profiler::setDefaultX(FLAGS_profile_speed);
- // // For debugging
- // // Print all logging messages
- // op::ConfigureLog::setPriorityThreshold(op::Priority::None);
- // // Print out speed values faster
- // op::Profiler::setDefaultX(100);
-
- // Applying user defined configuration - Google flags to program variables
- // outputSize
- const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
- // netInputSize
- const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "-1x368");
- // faceNetInputSize
- const auto faceNetInputSize = op::flagsToPoint(FLAGS_face_net_resolution, "368x368 (multiples of 16)");
- // handNetInputSize
- const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
- // producerType
- const auto producerSharedPtr = op::flagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_ip_camera, FLAGS_camera,
- FLAGS_flir_camera, FLAGS_camera_resolution, FLAGS_camera_fps,
- FLAGS_camera_parameter_folder, !FLAGS_frame_keep_distortion,
- (unsigned int) FLAGS_3d_views, FLAGS_flir_camera_index);
- // poseModel
- const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
- // JSON saving
- if (!FLAGS_write_keypoint.empty())
- op::log("Flag `write_keypoint` is deprecated and will eventually be removed."
- " Please, use `write_json` instead.", op::Priority::Max);
- // keypointScale
- const auto keypointScale = op::flagsToScaleMode(FLAGS_keypoint_scale);
- // heatmaps to add
- const auto heatMapTypes = op::flagsToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg,
- FLAGS_heatmaps_add_PAFs);
- const auto heatMapScale = op::flagsToHeatMapScaleMode(FLAGS_heatmaps_scale);
- // >1 camera view?
- const auto multipleView = (FLAGS_3d || FLAGS_3d_views > 1 || FLAGS_flir_camera);
- // Enabling Google Logging
- const bool enableGoogleLogging = true;
- // Logging
- op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
-
- // OpenPose wrapper
- op::log("Configuring OpenPose wrapper...", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
- // op::Wrapper> opWrapper;
- op::Wrapper> opWrapper;
-
- // Initializing the user custom classes
- // Frames producer (e.g. video, webcam, ...)
- auto wUserInput = std::make_shared(FLAGS_image_dir);
- // Add custom processing
- const auto workerInputOnNewThread = true;
- opWrapper.setWorkerInput(wUserInput, workerInputOnNewThread);
-
- // Pose configuration (use WrapperStructPose{} for default and recommended configuration)
- const op::WrapperStructPose wrapperStructPose{
- !FLAGS_body_disable, netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
- FLAGS_scale_number, (float)FLAGS_scale_gap, op::flagsToRenderMode(FLAGS_render_pose, multipleView),
- poseModel, !FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
- FLAGS_part_to_show, FLAGS_model_folder, heatMapTypes, heatMapScale, FLAGS_part_candidates,
- (float)FLAGS_render_threshold, FLAGS_number_people_max, enableGoogleLogging};
- // Face configuration (use op::WrapperStructFace{} to disable it)
- const op::WrapperStructFace wrapperStructFace{
- FLAGS_face, faceNetInputSize, op::flagsToRenderMode(FLAGS_face_render, multipleView, FLAGS_render_pose),
- (float)FLAGS_face_alpha_pose, (float)FLAGS_face_alpha_heatmap, (float)FLAGS_face_render_threshold};
- // Hand configuration (use op::WrapperStructHand{} to disable it)
- const op::WrapperStructHand wrapperStructHand{
- FLAGS_hand, handNetInputSize, FLAGS_hand_scale_number, (float)FLAGS_hand_scale_range, FLAGS_hand_tracking,
- op::flagsToRenderMode(FLAGS_hand_render, multipleView, FLAGS_render_pose), (float)FLAGS_hand_alpha_pose,
- (float)FLAGS_hand_alpha_heatmap, (float)FLAGS_hand_render_threshold};
- // Extra functionality configuration (use op::WrapperStructExtra{} to disable it)
- const op::WrapperStructExtra wrapperStructExtra{
- FLAGS_3d, FLAGS_3d_min_views, FLAGS_identification, FLAGS_tracking, FLAGS_ik_threads};
- // Producer (use default to disable any input)
- // const op::WrapperStructInput wrapperStructInput{producerSharedPtr, FLAGS_frame_first, FLAGS_frame_last,
- // FLAGS_process_real_time, FLAGS_frame_flip, FLAGS_frame_rotate,
- // FLAGS_frames_repeat};
- const op::WrapperStructInput wrapperStructInput;
- // Consumer (comment or use default argument to disable any output)
- const op::WrapperStructOutput wrapperStructOutput{
- op::flagsToDisplayMode(FLAGS_display, FLAGS_3d), !FLAGS_no_gui_verbose, FLAGS_fullscreen,
- FLAGS_write_keypoint, op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json,
- FLAGS_write_coco_json, FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format,
- FLAGS_write_video, FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format,
- FLAGS_write_video_adam, FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port};
- // Configure wrapper
-
- opWrapper.configure(wrapperStructPose, wrapperStructFace, wrapperStructHand, wrapperStructExtra,
- wrapperStructInput, wrapperStructOutput);
- // Set to single-thread running (to debug and/or reduce latency)
- if (FLAGS_disable_multi_thread)
- opWrapper.disableMultiThreading();
-
- // Start processing
- // Two different ways of running the program on multithread environment
- op::log("Starting thread(s)...", op::Priority::High);
- // Start, run & stop threads - it blocks this thread until all others have finished
- opWrapper.exec();
-
- // // Option b) Keeping this thread free in case you want to do something else meanwhile, e.g. profiling the GPU
- // memory
- // // VERY IMPORTANT NOTE: if OpenCV is compiled with Qt support, this option will not work. Qt needs the main
- // // thread to plot visual results, so the final GUI (which uses OpenCV) would return an exception similar to:
- // // `QMetaMethod::invoke: Unable to invoke methods with return values in queued connections`
- // // Start threads
- // opWrapper.start();
- // // Profile used GPU memory
- // // 1: wait ~10sec so the memory has been totally loaded on GPU
- // // 2: profile the GPU memory
- // const auto sleepTimeMs = 10;
- // for (auto i = 0 ; i < 10000/sleepTimeMs && opWrapper.isRunning() ; i++)
- // std::this_thread::sleep_for(std::chrono::milliseconds{sleepTimeMs});
- // op::Profiler::profileGpuMemory(__LINE__, __FUNCTION__, __FILE__);
- // // Keep program alive while running threads
- // while (opWrapper.isRunning())
- // std::this_thread::sleep_for(std::chrono::milliseconds{sleepTimeMs});
- // // Stop and join threads
- // op::log("Stopping thread(s)", op::Priority::High);
- // opWrapper.stop();
-
- // Measuring total time
- const auto now = std::chrono::high_resolution_clock::now();
- const auto totalTimeSec = (double)std::chrono::duration_cast(now-timerBegin).count()
- * 1e-9;
- const auto message = "OpenPose demo successfully finished. Total time: "
- + std::to_string(totalTimeSec) + " seconds.";
- op::log(message, op::Priority::High);
-
- // Return successful message
- return 0;
- }
- catch (const std::exception& e)
- {
- op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
- return -1;
- }
-}
-
-int main(int argc, char *argv[])
-{
- // Parsing command line flags
- gflags::ParseCommandLineFlags(&argc, &argv, true);
-
- // Running openPoseDemo
- return openPoseDemo();
-}
diff --git a/examples/tutorial_wrapper/3_user_synchronous_output.cpp b/examples/tutorial_wrapper/3_user_synchronous_output.cpp
deleted file mode 100644
index f334a2064..000000000
--- a/examples/tutorial_wrapper/3_user_synchronous_output.cpp
+++ /dev/null
@@ -1,492 +0,0 @@
-// ------------------------- OpenPose Library Tutorial - Real Time Pose Estimation -------------------------
-// If the user wants to learn to use the OpenPose library, we highly recommend to start with the `examples/tutorial_*/`
-// folders.
-// This example summarizes all the funcitonality of the OpenPose library:
- // 1. Read folder of images / video / webcam (`producer` module)
- // 2. Extract and render body keypoint / heatmap / PAF of that image (`pose` module)
- // 3. Extract and render face keypoint / heatmap / PAF of that image (`face` module)
- // 4. Save the results on disk (`filestream` module)
- // 5. Display the rendered pose (`gui` module)
- // Everything in a multi-thread scenario (`thread` module)
- // Points 2 to 5 are included in the `wrapper` module
-// In addition to the previous OpenPose modules, we also need to use:
- // 1. `core` module:
- // For the Array class that the `pose` module needs
- // For the Datum struct that the `thread` module sends between the queues
- // 2. `utilities` module: for the error & logging functions, i.e. op::error & op::log respectively
-// This file should only be used for the user to take specific examples.
-
-// C++ std library dependencies
-#include // `std::chrono::` functions and classes, e.g. std::chrono::milliseconds
-#include // std::this_thread
-// Other 3rdparty dependencies
-// GFlags: DEFINE_bool, _int32, _int64, _uint64, _double, _string
-#include
-// Allow Google Flags in Ubuntu 14
-#ifndef GFLAGS_GFLAGS_H_
- namespace gflags = google;
-#endif
-// OpenPose dependencies
-#include
-
-// See all the available parameter options withe the `--help` flag. E.g. `build/examples/openpose/openpose.bin --help`
-// Note: This command will show you flags for other unnecessary 3rdparty files. Check only the flags for the OpenPose
-// executable. E.g. for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`.
-// Debugging/Other
-DEFINE_int32(logging_level, 3, "The logging level. Integer in the range [0, 255]. 0 will output any log() message, while"
- " 255 will not output any. Current OpenPose library messages are in the range 0-4: 1 for"
- " low priority messages and 4 for important ones.");
-DEFINE_bool(disable_multi_thread, false, "It would slightly reduce the frame rate in order to highly reduce the lag. Mainly useful"
- " for 1) Cases where it is needed a low latency (e.g. webcam in real-time scenarios with"
- " low-range GPU devices); and 2) Debugging OpenPose when it is crashing to locate the"
- " error.");
-DEFINE_int32(profile_speed, 1000, "If PROFILER_ENABLED was set in CMake or Makefile.config files, OpenPose will show some"
- " runtime statistics at this frame number.");
-// Producer
-DEFINE_int32(camera, -1, "The camera index for cv::VideoCapture. Integer in the range [0, 9]. Select a negative"
- " number (by default), to auto-detect and open the first available camera.");
-DEFINE_string(camera_resolution, "-1x-1", "Set the camera resolution (either `--camera` or `--flir_camera`). `-1x-1` will use the"
- " default 1280x720 for `--camera`, or the maximum flir camera resolution available for"
- " `--flir_camera`");
-DEFINE_double(camera_fps, 30.0, "Frame rate for the webcam (also used when saving video). Set this value to the minimum"
- " value between the OpenPose displayed speed and the webcam real frame rate.");
-DEFINE_string(video, "", "Use a video file instead of the camera. Use `examples/media/video.avi` for our default"
- " example video.");
-DEFINE_string(image_dir, "", "Process a directory of images. Use `examples/media/` for our default example folder with 20"
- " images. Read all standard formats (jpg, png, bmp, etc.).");
-DEFINE_bool(flir_camera, false, "Whether to use FLIR (Point-Grey) stereo camera.");
-DEFINE_int32(flir_camera_index, -1, "Select -1 (default) to run on all detected flir cameras at once. Otherwise, select the flir"
- " camera index to run, where 0 corresponds to the detected flir camera with the lowest"
- " serial number, and `n` to the `n`-th lowest serial number camera.");
-DEFINE_string(ip_camera, "", "String with the IP camera URL. It supports protocols like RTSP and HTTP.");
-DEFINE_uint64(frame_first, 0, "Start on desired frame number. Indexes are 0-based, i.e. the first frame has index 0.");
-DEFINE_uint64(frame_last, -1, "Finish on desired frame number. Select -1 to disable. Indexes are 0-based, e.g. if set to"
- " 10, it will process 11 frames (0-10).");
-DEFINE_bool(frame_flip, false, "Flip/mirror each frame (e.g. for real time webcam demonstrations).");
-DEFINE_int32(frame_rotate, 0, "Rotate each frame, 4 possible values: 0, 90, 180, 270.");
-DEFINE_bool(frames_repeat, false, "Repeat frames when finished.");
-DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g. for video). If the processing time is"
- " too long, it will skip frames. If it is too fast, it will slow it down.");
-DEFINE_string(camera_parameter_folder, "models/cameraParameters/flir/", "String with the folder where the camera parameters are located.");
-DEFINE_bool(frame_keep_distortion, false, "If false (default), it will undistortionate the image based on the"
- " `camera_parameter_folder` camera parameters; if true, it will not undistortionate, i.e.,"
- " it will leave it as it is.");
-// OpenPose
-DEFINE_string(model_folder, "models/", "Folder path (absolute or relative) where the models (pose, face, ...) are located.");
-DEFINE_string(output_resolution, "-1x-1", "The image resolution (display and output). Use \"-1x-1\" to force the program to use the"
- " input image resolution.");
-DEFINE_int32(num_gpu, -1, "The number of GPU devices to use. If negative, it will use all the available GPUs in your"
- " machine.");
-DEFINE_int32(num_gpu_start, 0, "GPU device start number.");
-DEFINE_int32(keypoint_scale, 0, "Scaling of the (x,y) coordinates of the final pose data array, i.e. the scale of the (x,y)"
- " coordinates that will be saved with the `write_json` & `write_keypoint` flags."
- " Select `0` to scale it to the original source resolution; `1`to scale it to the net output"
- " size (set with `net_resolution`); `2` to scale it to the final output size (set with"
- " `resolution`); `3` to scale it in the range [0,1], where (0,0) would be the top-left"
- " corner of the image, and (1,1) the bottom-right one; and 4 for range [-1,1], where"
- " (-1,-1) would be the top-left corner of the image, and (1,1) the bottom-right one. Non"
- " related with `scale_number` and `scale_gap`.");
-DEFINE_int32(number_people_max, -1, "This parameter will limit the maximum number of people detected, by keeping the people with"
- " top scores. The score is based in person area over the image, body part score, as well as"
- " joint score (between each pair of connected body parts). Useful if you know the exact"
- " number of people in the scene, so it can remove false positives (if all the people have"
- " been detected. However, it might also include false negatives by removing very small or"
- " highly occluded people. -1 will keep them all.");
-// OpenPose Body Pose
-DEFINE_bool(body_disable, false, "Disable body keypoint detection. Option only possible for faster (but less accurate) face"
- " keypoint detection.");
-DEFINE_string(model_pose, "BODY_25", "Model to be used. E.g. `COCO` (18 keypoints), `MPI` (15 keypoints, ~10% faster), "
- "`MPI_4_layers` (15 keypoints, even faster but less accurate).");
-DEFINE_string(net_resolution, "-1x368", "Multiples of 16. If it is increased, the accuracy potentially increases. If it is"
- " decreased, the speed increases. For maximum speed-accuracy balance, it should keep the"
- " closest aspect ratio possible to the images or videos to be processed. Using `-1` in"
- " any of the dimensions, OP will choose the optimal aspect ratio depending on the user's"
- " input value. E.g. the default `-1x368` is equivalent to `656x368` in 16:9 resolutions,"
- " e.g. full HD (1980x1080) and HD (1280x720) resolutions.");
-DEFINE_int32(scale_number, 1, "Number of scales to average.");
-DEFINE_double(scale_gap, 0.3, "Scale gap between scales. No effect unless scale_number > 1. Initial scale is always 1."
- " If you want to change the initial scale, you actually want to multiply the"
- " `net_resolution` by your desired initial scale.");
-// OpenPose Body Pose Heatmaps and Part Candidates
-DEFINE_bool(heatmaps_add_parts, false, "If true, it will fill op::Datum::poseHeatMaps array with the body part heatmaps, and"
- " analogously face & hand heatmaps to op::Datum::faceHeatMaps & op::Datum::handHeatMaps."
- " If more than one `add_heatmaps_X` flag is enabled, it will place then in sequential"
- " memory order: body parts + bkg + PAFs. It will follow the order on"
- " POSE_BODY_PART_MAPPING in `src/openpose/pose/poseParameters.cpp`. Program speed will"
- " considerably decrease. Not required for OpenPose, enable it only if you intend to"
- " explicitly use this information later.");
-DEFINE_bool(heatmaps_add_bkg, false, "Same functionality as `add_heatmaps_parts`, but adding the heatmap corresponding to"
- " background.");
-DEFINE_bool(heatmaps_add_PAFs, false, "Same functionality as `add_heatmaps_parts`, but adding the PAFs.");
-DEFINE_int32(heatmaps_scale, 2, "Set 0 to scale op::Datum::poseHeatMaps in the range [-1,1], 1 for [0,1]; 2 for integer"
- " rounded [0,255]; and 3 for no scaling.");
-DEFINE_bool(part_candidates, false, "Also enable `write_json` in order to save this information. If true, it will fill the"
- " op::Datum::poseCandidates array with the body part candidates. Candidates refer to all"
- " the detected body parts, before being assembled into people. Note that the number of"
- " candidates is equal or higher than the number of final body parts (i.e. after being"
- " assembled into people). The empty body parts are filled with 0s. Program speed will"
- " slightly decrease. Not required for OpenPose, enable it only if you intend to explicitly"
- " use this information.");
-// OpenPose Face
-DEFINE_bool(face, false, "Enables face keypoint detection. It will share some parameters from the body pose, e.g."
- " `model_folder`. Note that this will considerable slow down the performance and increse"
- " the required GPU memory. In addition, the greater number of people on the image, the"
- " slower OpenPose will be.");
-DEFINE_string(face_net_resolution, "368x368", "Multiples of 16 and squared. Analogous to `net_resolution` but applied to the face keypoint"
- " detector. 320x320 usually works fine while giving a substantial speed up when multiple"
- " faces on the image.");
-// OpenPose Hand
-DEFINE_bool(hand, false, "Enables hand keypoint detection. It will share some parameters from the body pose, e.g."
- " `model_folder`. Analogously to `--face`, it will also slow down the performance, increase"
- " the required GPU memory and its speed depends on the number of people.");
-DEFINE_string(hand_net_resolution, "368x368", "Multiples of 16 and squared. Analogous to `net_resolution` but applied to the hand keypoint"
- " detector.");
-DEFINE_int32(hand_scale_number, 1, "Analogous to `scale_number` but applied to the hand keypoint detector. Our best results"
- " were found with `hand_scale_number` = 6 and `hand_scale_range` = 0.4.");
-DEFINE_double(hand_scale_range, 0.4, "Analogous purpose than `scale_gap` but applied to the hand keypoint detector. Total range"
- " between smallest and biggest scale. The scales will be centered in ratio 1. E.g. if"
- " scaleRange = 0.4 and scalesNumber = 2, then there will be 2 scales, 0.8 and 1.2.");
-DEFINE_bool(hand_tracking, false, "Adding hand tracking might improve hand keypoints detection for webcam (if the frame rate"
- " is high enough, i.e. >7 FPS per GPU) and video. This is not person ID tracking, it"
- " simply looks for hands in positions at which hands were located in previous frames, but"
- " it does not guarantee the same person ID among frames.");
-// OpenPose 3-D Reconstruction
-DEFINE_bool(3d, false, "Running OpenPose 3-D reconstruction demo: 1) Reading from a stereo camera system."
- " 2) Performing 3-D reconstruction from the multiple views. 3) Displaying 3-D reconstruction"
- " results. Note that it will only display 1 person. If multiple people is present, it will"
- " fail.");
-DEFINE_int32(3d_min_views, -1, "Minimum number of views required to reconstruct each keypoint. By default (-1), it will"
- " require all the cameras to see the keypoint in order to reconstruct it.");
-DEFINE_int32(3d_views, 1, "Complementary option to `--image_dir` or `--video`. OpenPose will read as many images per"
- " iteration, allowing tasks such as stereo camera processing (`--3d`). Note that"
- " `--camera_parameters_folder` must be set. OpenPose must find as many `xml` files in the"
- " parameter folder as this number indicates.");
-// Extra algorithms
-DEFINE_bool(identification, false, "Experimental, not available yet. Whether to enable people identification across frames.");
-DEFINE_int32(tracking, -1, "Experimental, not available yet. Whether to enable people tracking across frames. The"
- " value indicates the number of frames where tracking is run between each OpenPose keypoint"
- " detection. Select -1 (default) to disable it or 0 to run simultaneously OpenPose keypoint"
- " detector and tracking for potentially higher accurary than only OpenPose.");
-DEFINE_int32(ik_threads, 0, "Experimental, not available yet. Whether to enable inverse kinematics (IK) from 3-D"
- " keypoints to obtain 3-D joint angles. By default (0 threads), it is disabled. Increasing"
- " the number of threads will increase the speed but also the global system latency.");
-// OpenPose Rendering
-DEFINE_int32(part_to_show, 0, "Prediction channel to visualize (default: 0). 0 for all the body parts, 1-18 for each body"
- " part heat map, 19 for the background heat map, 20 for all the body part heat maps"
- " together, 21 for all the PAFs, 22-40 for each body part pair PAF.");
-DEFINE_bool(disable_blending, false, "If enabled, it will render the results (keypoint skeletons or heatmaps) on a black"
- " background, instead of being rendered into the original image. Related: `part_to_show`,"
- " `alpha_pose`, and `alpha_pose`.");
-// OpenPose Rendering Pose
-DEFINE_double(render_threshold, 0.05, "Only estimated keypoints whose score confidences are higher than this threshold will be"
- " rendered. Generally, a high threshold (> 0.5) will only render very clear body parts;"
- " while small thresholds (~0.1) will also output guessed and occluded keypoints, but also"
- " more false positives (i.e. wrong detections).");
-DEFINE_int32(render_pose, -1, "Set to 0 for no rendering, 1 for CPU rendering (slightly faster), and 2 for GPU rendering"
- " (slower but greater functionality, e.g. `alpha_X` flags). If -1, it will pick CPU if"
- " CPU_ONLY is enabled, or GPU if CUDA is enabled. If rendering is enabled, it will render"
- " both `outputData` and `cvOutputData` with the original image and desired body part to be"
- " shown (i.e. keypoints, heat maps or PAFs).");
-DEFINE_double(alpha_pose, 0.6, "Blending factor (range 0-1) for the body part rendering. 1 will show it completely, 0 will"
- " hide it. Only valid for GPU rendering.");
-DEFINE_double(alpha_heatmap, 0.7, "Blending factor (range 0-1) between heatmap and original frame. 1 will only show the"
- " heatmap, 0 will only show the frame. Only valid for GPU rendering.");
-// OpenPose Rendering Face
-DEFINE_double(face_render_threshold, 0.4, "Analogous to `render_threshold`, but applied to the face keypoints.");
-DEFINE_int32(face_render, -1, "Analogous to `render_pose` but applied to the face. Extra option: -1 to use the same"
- " configuration that `render_pose` is using.");
-DEFINE_double(face_alpha_pose, 0.6, "Analogous to `alpha_pose` but applied to face.");
-DEFINE_double(face_alpha_heatmap, 0.7, "Analogous to `alpha_heatmap` but applied to face.");
-// OpenPose Rendering Hand
-DEFINE_double(hand_render_threshold, 0.2, "Analogous to `render_threshold`, but applied to the hand keypoints.");
-DEFINE_int32(hand_render, -1, "Analogous to `render_pose` but applied to the hand. Extra option: -1 to use the same"
- " configuration that `render_pose` is using.");
-DEFINE_double(hand_alpha_pose, 0.6, "Analogous to `alpha_pose` but applied to hand.");
-DEFINE_double(hand_alpha_heatmap, 0.7, "Analogous to `alpha_heatmap` but applied to hand.");
-// Display
-DEFINE_bool(fullscreen, false, "Run in full-screen mode (press f during runtime to toggle).");
-DEFINE_bool(no_gui_verbose, false, "Do not write text on output images on GUI (e.g. number of current frame and people). It"
- " does not affect the pose rendering.");
-DEFINE_int32(display, -1, "Display mode: -1 for automatic selection; 0 for no display (useful if there is no X server"
- " and/or to slightly speed up the processing if visual output is not required); 2 for 2-D"
- " display; 3 for 3-D display (if `--3d` enabled); and 1 for both 2-D and 3-D display.");
-// Result Saving
-DEFINE_string(write_images, "", "Directory to write rendered frames in `write_images_format` image format.");
-DEFINE_string(write_images_format, "png", "File extension and format for `write_images`, e.g. png, jpg or bmp. Check the OpenCV"
- " function cv::imwrite for all compatible extensions.");
-DEFINE_string(write_video, "", "Full file path to write rendered frames in motion JPEG video format. It might fail if the"
- " final path does not finish in `.avi`. It internally uses cv::VideoWriter. Flag"
- " `camera_fps` controls FPS.");
-DEFINE_string(write_json, "", "Directory to write OpenPose output in JSON format. It includes body, hand, and face pose"
- " keypoints (2-D and 3-D), as well as pose candidates (if `--part_candidates` enabled).");
-DEFINE_string(write_coco_json, "", "Full file path to write people pose data with JSON COCO validation format.");
-DEFINE_string(write_coco_foot_json, "", "Full file path to write people foot pose data with JSON COCO validation format.");
-DEFINE_string(write_heatmaps, "", "Directory to write body pose heatmaps in PNG format. At least 1 `add_heatmaps_X` flag"
- " must be enabled.");
-DEFINE_string(write_heatmaps_format, "png", "File extension and format for `write_heatmaps`, analogous to `write_images_format`."
- " For lossless compression, recommended `png` for integer `heatmaps_scale` and `float` for"
- " floating values.");
-DEFINE_string(write_keypoint, "", "(Deprecated, use `write_json`) Directory to write the people pose keypoint data. Set format"
- " with `write_keypoint_format`.");
-DEFINE_string(write_keypoint_format, "yml", "(Deprecated, use `write_json`) File extension and format for `write_keypoint`: json, xml,"
- " yaml & yml. Json not available for OpenCV < 3.0, use `write_json` instead.");
-// Result Saving - Extra Algorithms
-DEFINE_string(write_video_adam, "", "Experimental, not available yet. E.g.: `~/Desktop/adamResult.avi`. Flag `camera_fps`"
- " controls FPS.");
-DEFINE_string(write_bvh, "", "Experimental, not available yet. E.g.: `~/Desktop/mocapResult.bvh`.");
-// UDP communication
-DEFINE_string(udp_host, "", "Experimental, not available yet. IP for UDP communication. E.g., `192.168.0.1`.");
-DEFINE_string(udp_port, "8051", "Experimental, not available yet. Port number for UDP communication.");
-
-
-// If the user needs his own variables, he can inherit the op::Datum struct and add them
-// UserDatum can be directly used by the OpenPose wrapper because it inherits from op::Datum, just define
-// Wrapper instead of Wrapper
-struct UserDatum : public op::Datum
-{
- bool boolThatUserNeedsForSomeReason;
-
- UserDatum(const bool boolThatUserNeedsForSomeReason_ = false) :
- boolThatUserNeedsForSomeReason{boolThatUserNeedsForSomeReason_}
- {}
-};
-
-// The W-classes can be implemented either as a template or as simple classes given
-// that the user usually knows which kind of data he will move between the queues,
-// in this case we assume a std::shared_ptr of a std::vector of UserDatum
-
-// This worker will just read and return all the jpg files in a directory
-class WUserOutput : public op::WorkerConsumer>>
-{
-public:
- void initializationOnThread() {}
-
- void workConsumer(const std::shared_ptr>& datumsPtr)
- {
- try
- {
- // User's displaying/saving/other processing here
- // datum.cvOutputData: rendered frame with pose or heatmaps
- // datum.poseKeypoints: Array with the estimated pose
- if (datumsPtr != nullptr && !datumsPtr->empty())
- {
- // Show in command line the resulting pose keypoints for body, face and hands
- op::log("\nKeypoints:");
- // Accesing each element of the keypoints
- const auto& poseKeypoints = datumsPtr->at(0).poseKeypoints;
- op::log("Person pose keypoints:");
- for (auto person = 0 ; person < poseKeypoints.getSize(0) ; person++)
- {
- op::log("Person " + std::to_string(person) + " (x, y, score):");
- for (auto bodyPart = 0 ; bodyPart < poseKeypoints.getSize(1) ; bodyPart++)
- {
- std::string valueToPrint;
- for (auto xyscore = 0 ; xyscore < poseKeypoints.getSize(2) ; xyscore++)
- {
- valueToPrint += std::to_string( poseKeypoints[{person, bodyPart, xyscore}] ) + " ";
- }
- op::log(valueToPrint);
- }
- }
- op::log(" ");
- // Alternative: just getting std::string equivalent
- op::log("Face keypoints: " + datumsPtr->at(0).faceKeypoints.toString());
- op::log("Left hand keypoints: " + datumsPtr->at(0).handKeypoints[0].toString());
- op::log("Right hand keypoints: " + datumsPtr->at(0).handKeypoints[1].toString());
- // Heatmaps
- const auto& poseHeatMaps = datumsPtr->at(0).poseHeatMaps;
- if (!poseHeatMaps.empty())
- {
- op::log("Pose heatmaps size: [" + std::to_string(poseHeatMaps.getSize(0)) + ", "
- + std::to_string(poseHeatMaps.getSize(1)) + ", "
- + std::to_string(poseHeatMaps.getSize(2)) + "]");
- const auto& faceHeatMaps = datumsPtr->at(0).faceHeatMaps;
- op::log("Face heatmaps size: [" + std::to_string(faceHeatMaps.getSize(0)) + ", "
- + std::to_string(faceHeatMaps.getSize(1)) + ", "
- + std::to_string(faceHeatMaps.getSize(2)) + ", "
- + std::to_string(faceHeatMaps.getSize(3)) + "]");
- const auto& handHeatMaps = datumsPtr->at(0).handHeatMaps;
- op::log("Left hand heatmaps size: [" + std::to_string(handHeatMaps[0].getSize(0)) + ", "
- + std::to_string(handHeatMaps[0].getSize(1)) + ", "
- + std::to_string(handHeatMaps[0].getSize(2)) + ", "
- + std::to_string(handHeatMaps[0].getSize(3)) + "]");
- op::log("Right hand heatmaps size: [" + std::to_string(handHeatMaps[1].getSize(0)) + ", "
- + std::to_string(handHeatMaps[1].getSize(1)) + ", "
- + std::to_string(handHeatMaps[1].getSize(2)) + ", "
- + std::to_string(handHeatMaps[1].getSize(3)) + "]");
- }
-
- // Display rendered output image
- cv::imshow("User worker GUI", datumsPtr->at(0).cvOutputData);
- // Display image and sleeps at least 1 ms (it usually sleeps ~5-10 msec to display the image)
- const char key = (char)cv::waitKey(1);
- if (key == 27)
- this->stop();
- }
- }
- catch (const std::exception& e)
- {
- this->stop();
- op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
- }
- }
-};
-
-int openPoseDemo()
-{
- try
- {
- op::log("Starting OpenPose demo...", op::Priority::High);
- const auto timerBegin = std::chrono::high_resolution_clock::now();
-
- // logging_level
- op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.",
- __LINE__, __FUNCTION__, __FILE__);
- op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
- op::Profiler::setDefaultX(FLAGS_profile_speed);
- // // For debugging
- // // Print all logging messages
- // op::ConfigureLog::setPriorityThreshold(op::Priority::None);
- // // Print out speed values faster
- // op::Profiler::setDefaultX(100);
-
- // Applying user defined configuration - Google flags to program variables
- // outputSize
- const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
- // netInputSize
- const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "-1x368");
- // faceNetInputSize
- const auto faceNetInputSize = op::flagsToPoint(FLAGS_face_net_resolution, "368x368 (multiples of 16)");
- // handNetInputSize
- const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
- // producerType
- const auto producerSharedPtr = op::flagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_ip_camera, FLAGS_camera,
- FLAGS_flir_camera, FLAGS_camera_resolution, FLAGS_camera_fps,
- FLAGS_camera_parameter_folder, !FLAGS_frame_keep_distortion,
- (unsigned int) FLAGS_3d_views, FLAGS_flir_camera_index);
- // poseModel
- const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
- // JSON saving
- if (!FLAGS_write_keypoint.empty())
- op::log("Flag `write_keypoint` is deprecated and will eventually be removed."
- " Please, use `write_json` instead.", op::Priority::Max);
- // keypointScale
- const auto keypointScale = op::flagsToScaleMode(FLAGS_keypoint_scale);
- // heatmaps to add
- const auto heatMapTypes = op::flagsToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg,
- FLAGS_heatmaps_add_PAFs);
- const auto heatMapScale = op::flagsToHeatMapScaleMode(FLAGS_heatmaps_scale);
- // >1 camera view?
- const auto multipleView = (FLAGS_3d || FLAGS_3d_views > 1 || FLAGS_flir_camera);
- // Enabling Google Logging
- const bool enableGoogleLogging = true;
- // Logging
- op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
-
- // OpenPose wrapper
- op::log("Configuring OpenPose wrapper...", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
- // op::Wrapper> opWrapper;
- op::Wrapper> opWrapper;
-
- // Initializing the user custom classes
- // GUI (Display)
- auto wUserOutput = std::make_shared();
- // Add custom processing
- const auto workerOutputOnNewThread = true;
- opWrapper.setWorkerOutput(wUserOutput, workerOutputOnNewThread);
-
- // Pose configuration (use WrapperStructPose{} for default and recommended configuration)
- const op::WrapperStructPose wrapperStructPose{
- !FLAGS_body_disable, netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
- FLAGS_scale_number, (float)FLAGS_scale_gap, op::flagsToRenderMode(FLAGS_render_pose, multipleView),
- poseModel, !FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
- FLAGS_part_to_show, FLAGS_model_folder, heatMapTypes, heatMapScale, FLAGS_part_candidates,
- (float)FLAGS_render_threshold, FLAGS_number_people_max, enableGoogleLogging};
- // Face configuration (use op::WrapperStructFace{} to disable it)
- const op::WrapperStructFace wrapperStructFace{
- FLAGS_face, faceNetInputSize, op::flagsToRenderMode(FLAGS_face_render, multipleView, FLAGS_render_pose),
- (float)FLAGS_face_alpha_pose, (float)FLAGS_face_alpha_heatmap, (float)FLAGS_face_render_threshold};
- // Hand configuration (use op::WrapperStructHand{} to disable it)
- const op::WrapperStructHand wrapperStructHand{
- FLAGS_hand, handNetInputSize, FLAGS_hand_scale_number, (float)FLAGS_hand_scale_range, FLAGS_hand_tracking,
- op::flagsToRenderMode(FLAGS_hand_render, multipleView, FLAGS_render_pose), (float)FLAGS_hand_alpha_pose,
- (float)FLAGS_hand_alpha_heatmap, (float)FLAGS_hand_render_threshold};
- // Extra functionality configuration (use op::WrapperStructExtra{} to disable it)
- const op::WrapperStructExtra wrapperStructExtra{
- FLAGS_3d, FLAGS_3d_min_views, FLAGS_identification, FLAGS_tracking, FLAGS_ik_threads};
- // Producer (use default to disable any input)
- const op::WrapperStructInput wrapperStructInput{
- producerSharedPtr, FLAGS_frame_first, FLAGS_frame_last, FLAGS_process_real_time, FLAGS_frame_flip,
- FLAGS_frame_rotate, FLAGS_frames_repeat};
- // Consumer (comment or use default argument to disable any output)
- // const op::WrapperStructOutput wrapperStructOutput{op::flagsToDisplayMode(FLAGS_display, FLAGS_3d),
- // !FLAGS_no_gui_verbose, FLAGS_fullscreen, FLAGS_write_keypoint,
- const auto displayMode = op::DisplayMode::NoDisplay;
- const bool guiVerbose = false;
- const bool fullScreen = false;
- const op::WrapperStructOutput wrapperStructOutput{
- displayMode, guiVerbose, fullScreen, FLAGS_write_keypoint,
- op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, FLAGS_write_coco_json,
- FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, FLAGS_write_video,
- FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, FLAGS_write_video_adam,
- FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port};
- // Configure wrapper
- opWrapper.configure(wrapperStructPose, wrapperStructFace, wrapperStructHand, wrapperStructExtra,
- wrapperStructInput, wrapperStructOutput);
- // Set to single-thread running (to debug and/or reduce latency)
- if (FLAGS_disable_multi_thread)
- opWrapper.disableMultiThreading();
-
- // Start processing
- // Two different ways of running the program on multithread environment
- op::log("Starting thread(s)...", op::Priority::High);
- // Start, run & stop threads - it blocks this thread until all others have finished
- opWrapper.exec();
-
- // // Option b) Keeping this thread free in case you want to do something else meanwhile, e.g. profiling the GPU
- // memory
- // // VERY IMPORTANT NOTE: if OpenCV is compiled with Qt support, this option will not work. Qt needs the main
- // // thread to plot visual results, so the final GUI (which uses OpenCV) would return an exception similar to:
- // // `QMetaMethod::invoke: Unable to invoke methods with return values in queued connections`
- // // Start threads
- // opWrapper.start();
- // // Profile used GPU memory
- // // 1: wait ~10sec so the memory has been totally loaded on GPU
- // // 2: profile the GPU memory
- // const auto sleepTimeMs = 10;
- // for (auto i = 0 ; i < 10000/sleepTimeMs && opWrapper.isRunning() ; i++)
- // std::this_thread::sleep_for(std::chrono::milliseconds{sleepTimeMs});
- // op::Profiler::profileGpuMemory(__LINE__, __FUNCTION__, __FILE__);
- // // Keep program alive while running threads
- // while (opWrapper.isRunning())
- // std::this_thread::sleep_for(std::chrono::milliseconds{sleepTimeMs});
- // // Stop and join threads
- // op::log("Stopping thread(s)", op::Priority::High);
- // opWrapper.stop();
-
- // Measuring total time
- const auto now = std::chrono::high_resolution_clock::now();
- const auto totalTimeSec = (double)std::chrono::duration_cast(now-timerBegin).count()
- * 1e-9;
- const auto message = "OpenPose demo successfully finished. Total time: "
- + std::to_string(totalTimeSec) + " seconds.";
- op::log(message, op::Priority::High);
-
- // Return successful message
- return 0;
- }
- catch (const std::exception& e)
- {
- op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
- return -1;
- }
-}
-
-int main(int argc, char *argv[])
-{
- // Parsing command line flags
- gflags::ParseCommandLineFlags(&argc, &argv, true);
-
- // Running openPoseDemo
- return openPoseDemo();
-}
diff --git a/examples/tutorial_wrapper/4_user_synchronous_all.cpp b/examples/tutorial_wrapper/4_user_synchronous_all.cpp
deleted file mode 100644
index e15471945..000000000
--- a/examples/tutorial_wrapper/4_user_synchronous_all.cpp
+++ /dev/null
@@ -1,541 +0,0 @@
-// ------------------------- OpenPose Library Tutorial - Wrapper - Example 2 - Synchronous -------------------------
-// Synchronous mode: ideal for performance. The user can add his own frames producer / post-processor / consumer to the OpenPose wrapper or use the
-// default ones.
-
-// This example shows the user how to use the OpenPose wrapper class:
- // 1. User reads images
- // 2. Extract and render keypoint / heatmap / PAF of that image
- // 3. Save the results on disk
- // 4. User displays the rendered pose
- // Everything in a multi-thread scenario
-// In addition to the previous OpenPose modules, we also need to use:
- // 1. `core` module:
- // For the Array class that the `pose` module needs
- // For the Datum struct that the `thread` module sends between the queues
- // 2. `utilities` module: for the error & logging functions, i.e. op::error & op::log respectively
-// This file should only be used for the user to take specific examples.
-
-// C++ std library dependencies
-#include // `std::chrono::` functions and classes, e.g. std::chrono::milliseconds
-#include // std::this_thread
-// Other 3rdparty dependencies
-// GFlags: DEFINE_bool, _int32, _int64, _uint64, _double, _string
-#include
-// Allow Google Flags in Ubuntu 14
-#ifndef GFLAGS_GFLAGS_H_
- namespace gflags = google;
-#endif
-// OpenPose dependencies
-#include
-
-// See all the available parameter options withe the `--help` flag. E.g. `build/examples/openpose/openpose.bin --help`
-// Note: This command will show you flags for other unnecessary 3rdparty files. Check only the flags for the OpenPose
-// executable. E.g. for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`.
-// Debugging/Other
-DEFINE_int32(logging_level, 3, "The logging level. Integer in the range [0, 255]. 0 will output any log() message, while"
- " 255 will not output any. Current OpenPose library messages are in the range 0-4: 1 for"
- " low priority messages and 4 for important ones.");
-DEFINE_bool(disable_multi_thread, false, "It would slightly reduce the frame rate in order to highly reduce the lag. Mainly useful"
- " for 1) Cases where it is needed a low latency (e.g. webcam in real-time scenarios with"
- " low-range GPU devices); and 2) Debugging OpenPose when it is crashing to locate the"
- " error.");
-DEFINE_int32(profile_speed, 1000, "If PROFILER_ENABLED was set in CMake or Makefile.config files, OpenPose will show some"
- " runtime statistics at this frame number.");
-// Producer
-DEFINE_string(image_dir, "examples/media/", "Process a directory of images. Read all standard formats (jpg, png, bmp, etc.).");
-DEFINE_double(camera_fps, 30.0, "Frame rate for the webcam (also used when saving video). Set this value to the minimum"
- " value between the OpenPose displayed speed and the webcam real frame rate.");
-// OpenPose
-DEFINE_string(model_folder, "models/", "Folder path (absolute or relative) where the models (pose, face, ...) are located.");
-DEFINE_string(output_resolution, "-1x-1", "The image resolution (display and output). Use \"-1x-1\" to force the program to use the"
- " input image resolution.");
-DEFINE_int32(num_gpu, -1, "The number of GPU devices to use. If negative, it will use all the available GPUs in your"
- " machine.");
-DEFINE_int32(num_gpu_start, 0, "GPU device start number.");
-DEFINE_int32(keypoint_scale, 0, "Scaling of the (x,y) coordinates of the final pose data array, i.e. the scale of the (x,y)"
- " coordinates that will be saved with the `write_json` & `write_keypoint` flags."
- " Select `0` to scale it to the original source resolution; `1`to scale it to the net output"
- " size (set with `net_resolution`); `2` to scale it to the final output size (set with"
- " `resolution`); `3` to scale it in the range [0,1], where (0,0) would be the top-left"
- " corner of the image, and (1,1) the bottom-right one; and 4 for range [-1,1], where"
- " (-1,-1) would be the top-left corner of the image, and (1,1) the bottom-right one. Non"
- " related with `scale_number` and `scale_gap`.");
-DEFINE_int32(number_people_max, -1, "This parameter will limit the maximum number of people detected, by keeping the people with"
- " top scores. The score is based in person area over the image, body part score, as well as"
- " joint score (between each pair of connected body parts). Useful if you know the exact"
- " number of people in the scene, so it can remove false positives (if all the people have"
- " been detected. However, it might also include false negatives by removing very small or"
- " highly occluded people. -1 will keep them all.");
-// OpenPose Body Pose
-DEFINE_bool(body_disable, false, "Disable body keypoint detection. Option only possible for faster (but less accurate) face"
- " keypoint detection.");
-DEFINE_string(model_pose, "BODY_25", "Model to be used. E.g. `COCO` (18 keypoints), `MPI` (15 keypoints, ~10% faster), "
- "`MPI_4_layers` (15 keypoints, even faster but less accurate).");
-DEFINE_string(net_resolution, "-1x368", "Multiples of 16. If it is increased, the accuracy potentially increases. If it is"
- " decreased, the speed increases. For maximum speed-accuracy balance, it should keep the"
- " closest aspect ratio possible to the images or videos to be processed. Using `-1` in"
- " any of the dimensions, OP will choose the optimal aspect ratio depending on the user's"
- " input value. E.g. the default `-1x368` is equivalent to `656x368` in 16:9 resolutions,"
- " e.g. full HD (1980x1080) and HD (1280x720) resolutions.");
-DEFINE_int32(scale_number, 1, "Number of scales to average.");
-DEFINE_double(scale_gap, 0.3, "Scale gap between scales. No effect unless scale_number > 1. Initial scale is always 1."
- " If you want to change the initial scale, you actually want to multiply the"
- " `net_resolution` by your desired initial scale.");
-// OpenPose Body Pose Heatmaps and Part Candidates
-DEFINE_bool(heatmaps_add_parts, false, "If true, it will fill op::Datum::poseHeatMaps array with the body part heatmaps, and"
- " analogously face & hand heatmaps to op::Datum::faceHeatMaps & op::Datum::handHeatMaps."
- " If more than one `add_heatmaps_X` flag is enabled, it will place then in sequential"
- " memory order: body parts + bkg + PAFs. It will follow the order on"
- " POSE_BODY_PART_MAPPING in `src/openpose/pose/poseParameters.cpp`. Program speed will"
- " considerably decrease. Not required for OpenPose, enable it only if you intend to"
- " explicitly use this information later.");
-DEFINE_bool(heatmaps_add_bkg, false, "Same functionality as `add_heatmaps_parts`, but adding the heatmap corresponding to"
- " background.");
-DEFINE_bool(heatmaps_add_PAFs, false, "Same functionality as `add_heatmaps_parts`, but adding the PAFs.");
-DEFINE_int32(heatmaps_scale, 2, "Set 0 to scale op::Datum::poseHeatMaps in the range [-1,1], 1 for [0,1]; 2 for integer"
- " rounded [0,255]; and 3 for no scaling.");
-DEFINE_bool(part_candidates, false, "Also enable `write_json` in order to save this information. If true, it will fill the"
- " op::Datum::poseCandidates array with the body part candidates. Candidates refer to all"
- " the detected body parts, before being assembled into people. Note that the number of"
- " candidates is equal or higher than the number of final body parts (i.e. after being"
- " assembled into people). The empty body parts are filled with 0s. Program speed will"
- " slightly decrease. Not required for OpenPose, enable it only if you intend to explicitly"
- " use this information.");
-// OpenPose Face
-DEFINE_bool(face, false, "Enables face keypoint detection. It will share some parameters from the body pose, e.g."
- " `model_folder`. Note that this will considerable slow down the performance and increse"
- " the required GPU memory. In addition, the greater number of people on the image, the"
- " slower OpenPose will be.");
-DEFINE_string(face_net_resolution, "368x368", "Multiples of 16 and squared. Analogous to `net_resolution` but applied to the face keypoint"
- " detector. 320x320 usually works fine while giving a substantial speed up when multiple"
- " faces on the image.");
-// OpenPose Hand
-DEFINE_bool(hand, false, "Enables hand keypoint detection. It will share some parameters from the body pose, e.g."
- " `model_folder`. Analogously to `--face`, it will also slow down the performance, increase"
- " the required GPU memory and its speed depends on the number of people.");
-DEFINE_string(hand_net_resolution, "368x368", "Multiples of 16 and squared. Analogous to `net_resolution` but applied to the hand keypoint"
- " detector.");
-DEFINE_int32(hand_scale_number, 1, "Analogous to `scale_number` but applied to the hand keypoint detector. Our best results"
- " were found with `hand_scale_number` = 6 and `hand_scale_range` = 0.4.");
-DEFINE_double(hand_scale_range, 0.4, "Analogous purpose than `scale_gap` but applied to the hand keypoint detector. Total range"
- " between smallest and biggest scale. The scales will be centered in ratio 1. E.g. if"
- " scaleRange = 0.4 and scalesNumber = 2, then there will be 2 scales, 0.8 and 1.2.");
-DEFINE_bool(hand_tracking, false, "Adding hand tracking might improve hand keypoints detection for webcam (if the frame rate"
- " is high enough, i.e. >7 FPS per GPU) and video. This is not person ID tracking, it"
- " simply looks for hands in positions at which hands were located in previous frames, but"
- " it does not guarantee the same person ID among frames.");
-// OpenPose 3-D Reconstruction
-DEFINE_bool(3d, false, "Running OpenPose 3-D reconstruction demo: 1) Reading from a stereo camera system."
- " 2) Performing 3-D reconstruction from the multiple views. 3) Displaying 3-D reconstruction"
- " results. Note that it will only display 1 person. If multiple people is present, it will"
- " fail.");
-DEFINE_int32(3d_min_views, -1, "Minimum number of views required to reconstruct each keypoint. By default (-1), it will"
- " require all the cameras to see the keypoint in order to reconstruct it.");
-DEFINE_int32(3d_views, 1, "Complementary option to `--image_dir` or `--video`. OpenPose will read as many images per"
- " iteration, allowing tasks such as stereo camera processing (`--3d`). Note that"
- " `--camera_parameters_folder` must be set. OpenPose must find as many `xml` files in the"
- " parameter folder as this number indicates.");
-// Extra algorithms
-DEFINE_bool(identification, false, "Experimental, not available yet. Whether to enable people identification across frames.");
-DEFINE_int32(tracking, -1, "Experimental, not available yet. Whether to enable people tracking across frames. The"
- " value indicates the number of frames where tracking is run between each OpenPose keypoint"
- " detection. Select -1 (default) to disable it or 0 to run simultaneously OpenPose keypoint"
- " detector and tracking for potentially higher accurary than only OpenPose.");
-DEFINE_int32(ik_threads, 0, "Experimental, not available yet. Whether to enable inverse kinematics (IK) from 3-D"
- " keypoints to obtain 3-D joint angles. By default (0 threads), it is disabled. Increasing"
- " the number of threads will increase the speed but also the global system latency.");
-// OpenPose Rendering
-DEFINE_int32(part_to_show, 0, "Prediction channel to visualize (default: 0). 0 for all the body parts, 1-18 for each body"
- " part heat map, 19 for the background heat map, 20 for all the body part heat maps"
- " together, 21 for all the PAFs, 22-40 for each body part pair PAF.");
-DEFINE_bool(disable_blending, false, "If enabled, it will render the results (keypoint skeletons or heatmaps) on a black"
- " background, instead of being rendered into the original image. Related: `part_to_show`,"
- " `alpha_pose`, and `alpha_pose`.");
-// OpenPose Rendering Pose
-DEFINE_double(render_threshold, 0.05, "Only estimated keypoints whose score confidences are higher than this threshold will be"
- " rendered. Generally, a high threshold (> 0.5) will only render very clear body parts;"
- " while small thresholds (~0.1) will also output guessed and occluded keypoints, but also"
- " more false positives (i.e. wrong detections).");
-DEFINE_int32(render_pose, -1, "Set to 0 for no rendering, 1 for CPU rendering (slightly faster), and 2 for GPU rendering"
- " (slower but greater functionality, e.g. `alpha_X` flags). If -1, it will pick CPU if"
- " CPU_ONLY is enabled, or GPU if CUDA is enabled. If rendering is enabled, it will render"
- " both `outputData` and `cvOutputData` with the original image and desired body part to be"
- " shown (i.e. keypoints, heat maps or PAFs).");
-DEFINE_double(alpha_pose, 0.6, "Blending factor (range 0-1) for the body part rendering. 1 will show it completely, 0 will"
- " hide it. Only valid for GPU rendering.");
-DEFINE_double(alpha_heatmap, 0.7, "Blending factor (range 0-1) between heatmap and original frame. 1 will only show the"
- " heatmap, 0 will only show the frame. Only valid for GPU rendering.");
-// OpenPose Rendering Face
-DEFINE_double(face_render_threshold, 0.4, "Analogous to `render_threshold`, but applied to the face keypoints.");
-DEFINE_int32(face_render, -1, "Analogous to `render_pose` but applied to the face. Extra option: -1 to use the same"
- " configuration that `render_pose` is using.");
-DEFINE_double(face_alpha_pose, 0.6, "Analogous to `alpha_pose` but applied to face.");
-DEFINE_double(face_alpha_heatmap, 0.7, "Analogous to `alpha_heatmap` but applied to face.");
-// OpenPose Rendering Hand
-DEFINE_double(hand_render_threshold, 0.2, "Analogous to `render_threshold`, but applied to the hand keypoints.");
-DEFINE_int32(hand_render, -1, "Analogous to `render_pose` but applied to the hand. Extra option: -1 to use the same"
- " configuration that `render_pose` is using.");
-DEFINE_double(hand_alpha_pose, 0.6, "Analogous to `alpha_pose` but applied to hand.");
-DEFINE_double(hand_alpha_heatmap, 0.7, "Analogous to `alpha_heatmap` but applied to hand.");
-// Result Saving
-DEFINE_string(write_images, "", "Directory to write rendered frames in `write_images_format` image format.");
-DEFINE_string(write_images_format, "png", "File extension and format for `write_images`, e.g. png, jpg or bmp. Check the OpenCV"
- " function cv::imwrite for all compatible extensions.");
-DEFINE_string(write_video, "", "Full file path to write rendered frames in motion JPEG video format. It might fail if the"
- " final path does not finish in `.avi`. It internally uses cv::VideoWriter. Flag"
- " `camera_fps` controls FPS.");
-DEFINE_string(write_json, "", "Directory to write OpenPose output in JSON format. It includes body, hand, and face pose"
- " keypoints (2-D and 3-D), as well as pose candidates (if `--part_candidates` enabled).");
-DEFINE_string(write_coco_json, "", "Full file path to write people pose data with JSON COCO validation format.");
-DEFINE_string(write_coco_foot_json, "", "Full file path to write people foot pose data with JSON COCO validation format.");
-DEFINE_string(write_heatmaps, "", "Directory to write body pose heatmaps in PNG format. At least 1 `add_heatmaps_X` flag"
- " must be enabled.");
-DEFINE_string(write_heatmaps_format, "png", "File extension and format for `write_heatmaps`, analogous to `write_images_format`."
- " For lossless compression, recommended `png` for integer `heatmaps_scale` and `float` for"
- " floating values.");
-DEFINE_string(write_keypoint, "", "(Deprecated, use `write_json`) Directory to write the people pose keypoint data. Set format"
- " with `write_keypoint_format`.");
-DEFINE_string(write_keypoint_format, "yml", "(Deprecated, use `write_json`) File extension and format for `write_keypoint`: json, xml,"
- " yaml & yml. Json not available for OpenCV < 3.0, use `write_json` instead.");
-// Result Saving - Extra Algorithms
-DEFINE_string(write_video_adam, "", "Experimental, not available yet. E.g.: `~/Desktop/adamResult.avi`. Flag `camera_fps`"
- " controls FPS.");
-DEFINE_string(write_bvh, "", "Experimental, not available yet. E.g.: `~/Desktop/mocapResult.bvh`.");
-// UDP communication
-DEFINE_string(udp_host, "", "Experimental, not available yet. IP for UDP communication. E.g., `192.168.0.1`.");
-DEFINE_string(udp_port, "8051", "Experimental, not available yet. Port number for UDP communication.");
-
-
-// If the user needs his own variables, he can inherit the op::Datum struct and add them
-// UserDatum can be directly used by the OpenPose wrapper because it inherits from op::Datum, just define
-// Wrapper instead of Wrapper
-struct UserDatum : public op::Datum
-{
- bool boolThatUserNeedsForSomeReason;
-
- UserDatum(const bool boolThatUserNeedsForSomeReason_ = false) :
- boolThatUserNeedsForSomeReason{boolThatUserNeedsForSomeReason_}
- {}
-};
-
-// The W-classes can be implemented either as a template or as simple classes given
-// that the user usually knows which kind of data he will move between the queues,
-// in this case we assume a std::shared_ptr of a std::vector of UserDatum
-
-// This worker will just read and return all the jpg files in a directory
-class WUserInput : public op::WorkerProducer>>
-{
-public:
- WUserInput(const std::string& directoryPath) :
- mImageFiles{op::getFilesOnDirectory(directoryPath, "jpg")},
- // If we want "jpg" + "png" images
- // mImageFiles{op::getFilesOnDirectory(directoryPath, std::vector{"jpg", "png"})},
- mCounter{0}
- {
- if (mImageFiles.empty())
- op::error("No images found on: " + directoryPath, __LINE__, __FUNCTION__, __FILE__);
- }
-
- void initializationOnThread() {}
-
- std::shared_ptr> workProducer()
- {
- try
- {
- // Close program when empty frame
- if (mImageFiles.size() <= mCounter)
- {
- op::log("Last frame read and added to queue. Closing program after it is processed.",
- op::Priority::High);
- // This funtion stops this worker, which will eventually stop the whole thread system once all the
- // frames have been processed
- this->stop();
- return nullptr;
- }
- else
- {
- // Create new datum
- auto datumsPtr = std::make_shared>();
- datumsPtr->emplace_back();
- auto& datum = datumsPtr->at(0);
-
- // Fill datum
- datum.cvInputData = cv::imread(mImageFiles.at(mCounter++));
-
- // If empty frame -> return nullptr
- if (datum.cvInputData.empty())
- {
- op::log("Empty frame detected on path: " + mImageFiles.at(mCounter-1) + ". Closing program.",
- op::Priority::High);
- this->stop();
- datumsPtr = nullptr;
- }
-
- return datumsPtr;
- }
- }
- catch (const std::exception& e)
- {
- this->stop();
- op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
- return nullptr;
- }
- }
-
-private:
- const std::vector mImageFiles;
- unsigned long long mCounter;
-};
-
-// This worker will just invert the image
-class WUserPostProcessing : public op::Worker>>
-{
-public:
- WUserPostProcessing()
- {
- // User's constructor here
- }
-
- void initializationOnThread() {}
-
- void work(std::shared_ptr>& datumsPtr)
- {
- // User's post-processing (after OpenPose processing & before OpenPose outputs) here
- // datum.cvOutputData: rendered frame with pose or heatmaps
- // datum.poseKeypoints: Array with the estimated pose
- try
- {
- if (datumsPtr != nullptr && !datumsPtr->empty())
- for (auto& datum : *datumsPtr)
- cv::bitwise_not(datum.cvOutputData, datum.cvOutputData);
- }
- catch (const std::exception& e)
- {
- this->stop();
- op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
- }
- }
-};
-
-// This worker will just read and return all the jpg files in a directory
-class WUserOutput : public op::WorkerConsumer>>
-{
-public:
- void initializationOnThread() {}
-
- void workConsumer(const std::shared_ptr>& datumsPtr)
- {
- try
- {
- // User's displaying/saving/other processing here
- // datum.cvOutputData: rendered frame with pose or heatmaps
- // datum.poseKeypoints: Array with the estimated pose
- if (datumsPtr != nullptr && !datumsPtr->empty())
- {
- // Show in command line the resulting pose keypoints for body, face and hands
- op::log("\nKeypoints:");
- // Accesing each element of the keypoints
- const auto& poseKeypoints = datumsPtr->at(0).poseKeypoints;
- op::log("Person pose keypoints:");
- for (auto person = 0 ; person < poseKeypoints.getSize(0) ; person++)
- {
- op::log("Person " + std::to_string(person) + " (x, y, score):");
- for (auto bodyPart = 0 ; bodyPart < poseKeypoints.getSize(1) ; bodyPart++)
- {
- std::string valueToPrint;
- for (auto xyscore = 0 ; xyscore < poseKeypoints.getSize(2) ; xyscore++)
- {
- valueToPrint += std::to_string( poseKeypoints[{person, bodyPart, xyscore}] ) + " ";
- }
- op::log(valueToPrint);
- }
- }
- op::log(" ");
- // Alternative: just getting std::string equivalent
- op::log("Face keypoints: " + datumsPtr->at(0).faceKeypoints.toString());
- op::log("Left hand keypoints: " + datumsPtr->at(0).handKeypoints[0].toString());
- op::log("Right hand keypoints: " + datumsPtr->at(0).handKeypoints[1].toString());
- // Heatmaps
- const auto& poseHeatMaps = datumsPtr->at(0).poseHeatMaps;
- if (!poseHeatMaps.empty())
- {
- op::log("Pose heatmaps size: [" + std::to_string(poseHeatMaps.getSize(0)) + ", "
- + std::to_string(poseHeatMaps.getSize(1)) + ", "
- + std::to_string(poseHeatMaps.getSize(2)) + "]");
- const auto& faceHeatMaps = datumsPtr->at(0).faceHeatMaps;
- op::log("Face heatmaps size: [" + std::to_string(faceHeatMaps.getSize(0)) + ", "
- + std::to_string(faceHeatMaps.getSize(1)) + ", "
- + std::to_string(faceHeatMaps.getSize(2)) + ", "
- + std::to_string(faceHeatMaps.getSize(3)) + "]");
- const auto& handHeatMaps = datumsPtr->at(0).handHeatMaps;
- op::log("Left hand heatmaps size: [" + std::to_string(handHeatMaps[0].getSize(0)) + ", "
- + std::to_string(handHeatMaps[0].getSize(1)) + ", "
- + std::to_string(handHeatMaps[0].getSize(2)) + ", "
- + std::to_string(handHeatMaps[0].getSize(3)) + "]");
- op::log("Right hand heatmaps size: [" + std::to_string(handHeatMaps[1].getSize(0)) + ", "
- + std::to_string(handHeatMaps[1].getSize(1)) + ", "
- + std::to_string(handHeatMaps[1].getSize(2)) + ", "
- + std::to_string(handHeatMaps[1].getSize(3)) + "]");
- }
-
- // Display rendered output image
- cv::imshow("User worker GUI", datumsPtr->at(0).cvOutputData);
- // Display image and sleeps at least 1 ms (it usually sleeps ~5-10 msec to display the image)
- const char key = (char)cv::waitKey(1);
- if (key == 27)
- this->stop();
- }
- }
- catch (const std::exception& e)
- {
- this->stop();
- op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
- }
- }
-};
-
-int openPoseTutorialWrapper2()
-{
- try
- {
- op::log("Starting OpenPose demo...", op::Priority::High);
- const auto timerBegin = std::chrono::high_resolution_clock::now();
-
- // logging_level
- op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.",
- __LINE__, __FUNCTION__, __FILE__);
- op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
- op::Profiler::setDefaultX(FLAGS_profile_speed);
-
- // Applying user defined configuration - Google flags to program variables
- // outputSize
- const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
- // netInputSize
- const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "-1x368");
- // faceNetInputSize
- const auto faceNetInputSize = op::flagsToPoint(FLAGS_face_net_resolution, "368x368 (multiples of 16)");
- // handNetInputSize
- const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
- // poseModel
- const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
- // JSON saving
- if (!FLAGS_write_keypoint.empty())
- op::log("Flag `write_keypoint` is deprecated and will eventually be removed."
- " Please, use `write_json` instead.", op::Priority::Max);
- // keypointScale
- const auto keypointScale = op::flagsToScaleMode(FLAGS_keypoint_scale);
- // heatmaps to add
- const auto heatMapTypes = op::flagsToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg,
- FLAGS_heatmaps_add_PAFs);
- const auto heatMapScale = op::flagsToHeatMapScaleMode(FLAGS_heatmaps_scale);
- // >1 camera view?
- const auto multipleView = (FLAGS_3d || FLAGS_3d_views > 1);
- // Enabling Google Logging
- const bool enableGoogleLogging = true;
- // Logging
- op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
-
- // Initializing the user custom classes
- // Frames producer (e.g. video, webcam, ...)
- auto wUserInput = std::make_shared(FLAGS_image_dir);
- // Processing
- auto wUserPostProcessing = std::make_shared();
- // GUI (Display)
- auto wUserOutput = std::make_shared();
-
- op::Wrapper> opWrapper;
- // Add custom input
- const auto workerInputOnNewThread = false;
- opWrapper.setWorkerInput(wUserInput, workerInputOnNewThread);
- // Add custom processing
- const auto workerProcessingOnNewThread = false;
- opWrapper.setWorkerPostProcessing(wUserPostProcessing, workerProcessingOnNewThread);
- // Add custom output
- const auto workerOutputOnNewThread = true;
- opWrapper.setWorkerOutput(wUserOutput, workerOutputOnNewThread);
- // Configure OpenPose
- op::log("Configuring OpenPose wrapper...", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
- const op::WrapperStructPose wrapperStructPose{
- !FLAGS_body_disable, netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
- FLAGS_scale_number, (float)FLAGS_scale_gap, op::flagsToRenderMode(FLAGS_render_pose, multipleView),
- poseModel, !FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
- FLAGS_part_to_show, FLAGS_model_folder, heatMapTypes, heatMapScale, FLAGS_part_candidates,
- (float)FLAGS_render_threshold, FLAGS_number_people_max, enableGoogleLogging};
- // Face configuration (use op::WrapperStructFace{} to disable it)
- const op::WrapperStructFace wrapperStructFace{
- FLAGS_face, faceNetInputSize, op::flagsToRenderMode(FLAGS_face_render, multipleView, FLAGS_render_pose),
- (float)FLAGS_face_alpha_pose, (float)FLAGS_face_alpha_heatmap, (float)FLAGS_face_render_threshold};
- // Hand configuration (use op::WrapperStructHand{} to disable it)
- const op::WrapperStructHand wrapperStructHand{
- FLAGS_hand, handNetInputSize, FLAGS_hand_scale_number, (float)FLAGS_hand_scale_range, FLAGS_hand_tracking,
- op::flagsToRenderMode(FLAGS_hand_render, multipleView, FLAGS_render_pose), (float)FLAGS_hand_alpha_pose,
- (float)FLAGS_hand_alpha_heatmap, (float)FLAGS_hand_render_threshold};
- // Extra functionality configuration (use op::WrapperStructExtra{} to disable it)
- const op::WrapperStructExtra wrapperStructExtra{
- FLAGS_3d, FLAGS_3d_min_views, FLAGS_identification, FLAGS_tracking, FLAGS_ik_threads};
- // Consumer (comment or use default argument to disable any output)
- const auto displayMode = op::DisplayMode::NoDisplay;
- const bool guiVerbose = false;
- const bool fullScreen = false;
- const op::WrapperStructOutput wrapperStructOutput{
- displayMode, guiVerbose, fullScreen, FLAGS_write_keypoint,
- op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, FLAGS_write_coco_json,
- FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, FLAGS_write_video,
- FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, FLAGS_write_video_adam,
- FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port};
- // Configure wrapper
- opWrapper.configure(wrapperStructPose, wrapperStructFace, wrapperStructHand, wrapperStructExtra,
- op::WrapperStructInput{}, wrapperStructOutput);
- // Set to single-thread running (to debug and/or reduce latency)
- if (FLAGS_disable_multi_thread)
- opWrapper.disableMultiThreading();
-
- op::log("Starting thread(s)...", op::Priority::High);
- // Two different ways of running the program on multithread environment
- // Start, run & stop threads - it blocks this thread until all others have finished
- opWrapper.exec();
-
- // Option b) Keeping this thread free in case you want to do something else meanwhile, e.g. profiling the GPU memory
- // // VERY IMPORTANT NOTE: if OpenCV is compiled with Qt support, this option will not work. Qt needs the main
- // // thread to plot visual results, so the final GUI (which uses OpenCV) would return an exception similar to:
- // // `QMetaMethod::invoke: Unable to invoke methods with return values in queued connections`
- // // Start threads
- // opWrapper.start();
- // // Profile used GPU memory
- // // 1: wait ~10sec so the memory has been totally loaded on GPU
- // // 2: profile the GPU memory
- // std::this_thread::sleep_for(std::chrono::milliseconds{1000});
- // op::log("Random task here...", op::Priority::High);
- // // Keep program alive while running threads
- // while (opWrapper.isRunning())
- // std::this_thread::sleep_for(std::chrono::milliseconds{33});
- // // Stop and join threads
- // op::log("Stopping thread(s)", op::Priority::High);
- // opWrapper.stop();
-
- // Measuring total time
- const auto now = std::chrono::high_resolution_clock::now();
- const auto totalTimeSec = (double)std::chrono::duration_cast(now-timerBegin).count()
- * 1e-9;
- const auto message = "OpenPose demo successfully finished. Total time: "
- + std::to_string(totalTimeSec) + " seconds.";
- op::log(message, op::Priority::High);
-
- // Return successful message
- return 0;
- }
- catch (const std::exception& e)
- {
- op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
- return -1;
- }
-}
-
-int main(int argc, char *argv[])
-{
- // Parsing command line flags
- gflags::ParseCommandLineFlags(&argc, &argv, true);
-
- // Running openPoseTutorialWrapper2
- return openPoseTutorialWrapper2();
-}
diff --git a/examples/tutorial_wrapper/5_user_asynchronous.cpp b/examples/tutorial_wrapper/5_user_asynchronous.cpp
deleted file mode 100644
index 30010bca3..000000000
--- a/examples/tutorial_wrapper/5_user_asynchronous.cpp
+++ /dev/null
@@ -1,493 +0,0 @@
-// ------------------------- OpenPose Library Tutorial - Wrapper - Example 1 - Asynchronous -------------------------
-// Asynchronous mode: ideal for fast prototyping when performance is not an issue. The user emplaces/pushes and pops frames from the OpenPose wrapper
-// when he desires to.
-
-// This example shows the user how to use the OpenPose wrapper class:
- // 1. User reads images
- // 2. Extract and render keypoint / heatmap / PAF of that image
- // 3. Save the results on disk
- // 4. User displays the rendered pose
- // Everything in a multi-thread scenario
-// In addition to the previous OpenPose modules, we also need to use:
- // 1. `core` module:
- // For the Array class that the `pose` module needs
- // For the Datum struct that the `thread` module sends between the queues
- // 2. `utilities` module: for the error & logging functions, i.e. op::error & op::log respectively
-// This file should only be used for the user to take specific examples.
-
-// C++ std library dependencies
-#include // `std::chrono::` functions and classes, e.g. std::chrono::milliseconds
-#include // std::this_thread
-// Other 3rdparty dependencies
-// GFlags: DEFINE_bool, _int32, _int64, _uint64, _double, _string
-#include
-// Allow Google Flags in Ubuntu 14
-#ifndef GFLAGS_GFLAGS_H_
- namespace gflags = google;
-#endif
-// OpenPose dependencies
-#include
-
-// See all the available parameter options withe the `--help` flag. E.g. `build/examples/openpose/openpose.bin --help`
-// Note: This command will show you flags for other unnecessary 3rdparty files. Check only the flags for the OpenPose
-// executable. E.g. for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`.
-// Debugging/Other
-DEFINE_int32(logging_level, 3, "The logging level. Integer in the range [0, 255]. 0 will output any log() message, while"
- " 255 will not output any. Current OpenPose library messages are in the range 0-4: 1 for"
- " low priority messages and 4 for important ones.");
-DEFINE_bool(disable_multi_thread, false, "It would slightly reduce the frame rate in order to highly reduce the lag. Mainly useful"
- " for 1) Cases where it is needed a low latency (e.g. webcam in real-time scenarios with"
- " low-range GPU devices); and 2) Debugging OpenPose when it is crashing to locate the"
- " error.");
-DEFINE_int32(profile_speed, 1000, "If PROFILER_ENABLED was set in CMake or Makefile.config files, OpenPose will show some"
- " runtime statistics at this frame number.");
-// Producer
-DEFINE_string(image_dir, "examples/media/", "Process a directory of images. Read all standard formats (jpg, png, bmp, etc.).");
-DEFINE_double(camera_fps, 30.0, "Frame rate for the webcam (also used when saving video). Set this value to the minimum"
- " value between the OpenPose displayed speed and the webcam real frame rate.");
-// OpenPose
-DEFINE_string(model_folder, "models/", "Folder path (absolute or relative) where the models (pose, face, ...) are located.");
-DEFINE_string(output_resolution, "-1x-1", "The image resolution (display and output). Use \"-1x-1\" to force the program to use the"
- " input image resolution.");
-DEFINE_int32(num_gpu, -1, "The number of GPU devices to use. If negative, it will use all the available GPUs in your"
- " machine.");
-DEFINE_int32(num_gpu_start, 0, "GPU device start number.");
-DEFINE_int32(keypoint_scale, 0, "Scaling of the (x,y) coordinates of the final pose data array, i.e. the scale of the (x,y)"
- " coordinates that will be saved with the `write_json` & `write_keypoint` flags."
- " Select `0` to scale it to the original source resolution; `1`to scale it to the net output"
- " size (set with `net_resolution`); `2` to scale it to the final output size (set with"
- " `resolution`); `3` to scale it in the range [0,1], where (0,0) would be the top-left"
- " corner of the image, and (1,1) the bottom-right one; and 4 for range [-1,1], where"
- " (-1,-1) would be the top-left corner of the image, and (1,1) the bottom-right one. Non"
- " related with `scale_number` and `scale_gap`.");
-DEFINE_int32(number_people_max, -1, "This parameter will limit the maximum number of people detected, by keeping the people with"
- " top scores. The score is based in person area over the image, body part score, as well as"
- " joint score (between each pair of connected body parts). Useful if you know the exact"
- " number of people in the scene, so it can remove false positives (if all the people have"
- " been detected. However, it might also include false negatives by removing very small or"
- " highly occluded people. -1 will keep them all.");
-// OpenPose Body Pose
-DEFINE_bool(body_disable, false, "Disable body keypoint detection. Option only possible for faster (but less accurate) face"
- " keypoint detection.");
-DEFINE_string(model_pose, "BODY_25", "Model to be used. E.g. `COCO` (18 keypoints), `MPI` (15 keypoints, ~10% faster), "
- "`MPI_4_layers` (15 keypoints, even faster but less accurate).");
-DEFINE_string(net_resolution, "-1x368", "Multiples of 16. If it is increased, the accuracy potentially increases. If it is"
- " decreased, the speed increases. For maximum speed-accuracy balance, it should keep the"
- " closest aspect ratio possible to the images or videos to be processed. Using `-1` in"
- " any of the dimensions, OP will choose the optimal aspect ratio depending on the user's"
- " input value. E.g. the default `-1x368` is equivalent to `656x368` in 16:9 resolutions,"
- " e.g. full HD (1980x1080) and HD (1280x720) resolutions.");
-DEFINE_int32(scale_number, 1, "Number of scales to average.");
-DEFINE_double(scale_gap, 0.3, "Scale gap between scales. No effect unless scale_number > 1. Initial scale is always 1."
- " If you want to change the initial scale, you actually want to multiply the"
- " `net_resolution` by your desired initial scale.");
-// OpenPose Body Pose Heatmaps and Part Candidates
-DEFINE_bool(heatmaps_add_parts, false, "If true, it will fill op::Datum::poseHeatMaps array with the body part heatmaps, and"
- " analogously face & hand heatmaps to op::Datum::faceHeatMaps & op::Datum::handHeatMaps."
- " If more than one `add_heatmaps_X` flag is enabled, it will place then in sequential"
- " memory order: body parts + bkg + PAFs. It will follow the order on"
- " POSE_BODY_PART_MAPPING in `src/openpose/pose/poseParameters.cpp`. Program speed will"
- " considerably decrease. Not required for OpenPose, enable it only if you intend to"
- " explicitly use this information later.");
-DEFINE_bool(heatmaps_add_bkg, false, "Same functionality as `add_heatmaps_parts`, but adding the heatmap corresponding to"
- " background.");
-DEFINE_bool(heatmaps_add_PAFs, false, "Same functionality as `add_heatmaps_parts`, but adding the PAFs.");
-DEFINE_int32(heatmaps_scale, 2, "Set 0 to scale op::Datum::poseHeatMaps in the range [-1,1], 1 for [0,1]; 2 for integer"
- " rounded [0,255]; and 3 for no scaling.");
-DEFINE_bool(part_candidates, false, "Also enable `write_json` in order to save this information. If true, it will fill the"
- " op::Datum::poseCandidates array with the body part candidates. Candidates refer to all"
- " the detected body parts, before being assembled into people. Note that the number of"
- " candidates is equal or higher than the number of final body parts (i.e. after being"
- " assembled into people). The empty body parts are filled with 0s. Program speed will"
- " slightly decrease. Not required for OpenPose, enable it only if you intend to explicitly"
- " use this information.");
-// OpenPose Face
-DEFINE_bool(face, false, "Enables face keypoint detection. It will share some parameters from the body pose, e.g."
- " `model_folder`. Note that this will considerable slow down the performance and increse"
- " the required GPU memory. In addition, the greater number of people on the image, the"
- " slower OpenPose will be.");
-DEFINE_string(face_net_resolution, "368x368", "Multiples of 16 and squared. Analogous to `net_resolution` but applied to the face keypoint"
- " detector. 320x320 usually works fine while giving a substantial speed up when multiple"
- " faces on the image.");
-// OpenPose Hand
-DEFINE_bool(hand, false, "Enables hand keypoint detection. It will share some parameters from the body pose, e.g."
- " `model_folder`. Analogously to `--face`, it will also slow down the performance, increase"
- " the required GPU memory and its speed depends on the number of people.");
-DEFINE_string(hand_net_resolution, "368x368", "Multiples of 16 and squared. Analogous to `net_resolution` but applied to the hand keypoint"
- " detector.");
-DEFINE_int32(hand_scale_number, 1, "Analogous to `scale_number` but applied to the hand keypoint detector. Our best results"
- " were found with `hand_scale_number` = 6 and `hand_scale_range` = 0.4.");
-DEFINE_double(hand_scale_range, 0.4, "Analogous purpose than `scale_gap` but applied to the hand keypoint detector. Total range"
- " between smallest and biggest scale. The scales will be centered in ratio 1. E.g. if"
- " scaleRange = 0.4 and scalesNumber = 2, then there will be 2 scales, 0.8 and 1.2.");
-DEFINE_bool(hand_tracking, false, "Adding hand tracking might improve hand keypoints detection for webcam (if the frame rate"
- " is high enough, i.e. >7 FPS per GPU) and video. This is not person ID tracking, it"
- " simply looks for hands in positions at which hands were located in previous frames, but"
- " it does not guarantee the same person ID among frames.");
-// OpenPose 3-D Reconstruction
-DEFINE_bool(3d, false, "Running OpenPose 3-D reconstruction demo: 1) Reading from a stereo camera system."
- " 2) Performing 3-D reconstruction from the multiple views. 3) Displaying 3-D reconstruction"
- " results. Note that it will only display 1 person. If multiple people is present, it will"
- " fail.");
-DEFINE_int32(3d_min_views, -1, "Minimum number of views required to reconstruct each keypoint. By default (-1), it will"
- " require all the cameras to see the keypoint in order to reconstruct it.");
-DEFINE_int32(3d_views, 1, "Complementary option to `--image_dir` or `--video`. OpenPose will read as many images per"
- " iteration, allowing tasks such as stereo camera processing (`--3d`). Note that"
- " `--camera_parameters_folder` must be set. OpenPose must find as many `xml` files in the"
- " parameter folder as this number indicates.");
-// Extra algorithms
-DEFINE_bool(identification, false, "Experimental, not available yet. Whether to enable people identification across frames.");
-DEFINE_int32(tracking, -1, "Experimental, not available yet. Whether to enable people tracking across frames. The"
- " value indicates the number of frames where tracking is run between each OpenPose keypoint"
- " detection. Select -1 (default) to disable it or 0 to run simultaneously OpenPose keypoint"
- " detector and tracking for potentially higher accurary than only OpenPose.");
-DEFINE_int32(ik_threads, 0, "Experimental, not available yet. Whether to enable inverse kinematics (IK) from 3-D"
- " keypoints to obtain 3-D joint angles. By default (0 threads), it is disabled. Increasing"
- " the number of threads will increase the speed but also the global system latency.");
-// OpenPose Rendering
-DEFINE_int32(part_to_show, 0, "Prediction channel to visualize (default: 0). 0 for all the body parts, 1-18 for each body"
- " part heat map, 19 for the background heat map, 20 for all the body part heat maps"
- " together, 21 for all the PAFs, 22-40 for each body part pair PAF.");
-DEFINE_bool(disable_blending, false, "If enabled, it will render the results (keypoint skeletons or heatmaps) on a black"
- " background, instead of being rendered into the original image. Related: `part_to_show`,"
- " `alpha_pose`, and `alpha_pose`.");
-// OpenPose Rendering Pose
-DEFINE_double(render_threshold, 0.05, "Only estimated keypoints whose score confidences are higher than this threshold will be"
- " rendered. Generally, a high threshold (> 0.5) will only render very clear body parts;"
- " while small thresholds (~0.1) will also output guessed and occluded keypoints, but also"
- " more false positives (i.e. wrong detections).");
-DEFINE_int32(render_pose, -1, "Set to 0 for no rendering, 1 for CPU rendering (slightly faster), and 2 for GPU rendering"
- " (slower but greater functionality, e.g. `alpha_X` flags). If -1, it will pick CPU if"
- " CPU_ONLY is enabled, or GPU if CUDA is enabled. If rendering is enabled, it will render"
- " both `outputData` and `cvOutputData` with the original image and desired body part to be"
- " shown (i.e. keypoints, heat maps or PAFs).");
-DEFINE_double(alpha_pose, 0.6, "Blending factor (range 0-1) for the body part rendering. 1 will show it completely, 0 will"
- " hide it. Only valid for GPU rendering.");
-DEFINE_double(alpha_heatmap, 0.7, "Blending factor (range 0-1) between heatmap and original frame. 1 will only show the"
- " heatmap, 0 will only show the frame. Only valid for GPU rendering.");
-// OpenPose Rendering Face
-DEFINE_double(face_render_threshold, 0.4, "Analogous to `render_threshold`, but applied to the face keypoints.");
-DEFINE_int32(face_render, -1, "Analogous to `render_pose` but applied to the face. Extra option: -1 to use the same"
- " configuration that `render_pose` is using.");
-DEFINE_double(face_alpha_pose, 0.6, "Analogous to `alpha_pose` but applied to face.");
-DEFINE_double(face_alpha_heatmap, 0.7, "Analogous to `alpha_heatmap` but applied to face.");
-// OpenPose Rendering Hand
-DEFINE_double(hand_render_threshold, 0.2, "Analogous to `render_threshold`, but applied to the hand keypoints.");
-DEFINE_int32(hand_render, -1, "Analogous to `render_pose` but applied to the hand. Extra option: -1 to use the same"
- " configuration that `render_pose` is using.");
-DEFINE_double(hand_alpha_pose, 0.6, "Analogous to `alpha_pose` but applied to hand.");
-DEFINE_double(hand_alpha_heatmap, 0.7, "Analogous to `alpha_heatmap` but applied to hand.");
-// Result Saving
-DEFINE_string(write_images, "", "Directory to write rendered frames in `write_images_format` image format.");
-DEFINE_string(write_images_format, "png", "File extension and format for `write_images`, e.g. png, jpg or bmp. Check the OpenCV"
- " function cv::imwrite for all compatible extensions.");
-DEFINE_string(write_video, "", "Full file path to write rendered frames in motion JPEG video format. It might fail if the"
- " final path does not finish in `.avi`. It internally uses cv::VideoWriter. Flag"
- " `camera_fps` controls FPS.");
-DEFINE_string(write_json, "", "Directory to write OpenPose output in JSON format. It includes body, hand, and face pose"
- " keypoints (2-D and 3-D), as well as pose candidates (if `--part_candidates` enabled).");
-DEFINE_string(write_coco_json, "", "Full file path to write people pose data with JSON COCO validation format.");
-DEFINE_string(write_coco_foot_json, "", "Full file path to write people foot pose data with JSON COCO validation format.");
-DEFINE_string(write_heatmaps, "", "Directory to write body pose heatmaps in PNG format. At least 1 `add_heatmaps_X` flag"
- " must be enabled.");
-DEFINE_string(write_heatmaps_format, "png", "File extension and format for `write_heatmaps`, analogous to `write_images_format`."
- " For lossless compression, recommended `png` for integer `heatmaps_scale` and `float` for"
- " floating values.");
-DEFINE_string(write_keypoint, "", "(Deprecated, use `write_json`) Directory to write the people pose keypoint data. Set format"
- " with `write_keypoint_format`.");
-DEFINE_string(write_keypoint_format, "yml", "(Deprecated, use `write_json`) File extension and format for `write_keypoint`: json, xml,"
- " yaml & yml. Json not available for OpenCV < 3.0, use `write_json` instead.");
-// Result Saving - Extra Algorithms
-DEFINE_string(write_video_adam, "", "Experimental, not available yet. E.g.: `~/Desktop/adamResult.avi`. Flag `camera_fps`"
- " controls FPS.");
-DEFINE_string(write_bvh, "", "Experimental, not available yet. E.g.: `~/Desktop/mocapResult.bvh`.");
-// UDP communication
-DEFINE_string(udp_host, "", "Experimental, not available yet. IP for UDP communication. E.g., `192.168.0.1`.");
-DEFINE_string(udp_port, "8051", "Experimental, not available yet. Port number for UDP communication.");
-
-
-// If the user needs his own variables, he can inherit the op::Datum struct and add them
-// UserDatum can be directly used by the OpenPose wrapper because it inherits from op::Datum, just define Wrapper instead of
-// Wrapper
-struct UserDatum : public op::Datum
-{
- bool boolThatUserNeedsForSomeReason;
-
- UserDatum(const bool boolThatUserNeedsForSomeReason_ = false) :
- boolThatUserNeedsForSomeReason{boolThatUserNeedsForSomeReason_}
- {}
-};
-
-// The W-classes can be implemented either as a template or as simple classes given
-// that the user usually knows which kind of data he will move between the queues,
-// in this case we assume a std::shared_ptr of a std::vector of UserDatum
-
-// This worker will just read and return all the jpg files in a directory
-class UserInputClass
-{
-public:
- UserInputClass(const std::string& directoryPath) :
- mImageFiles{op::getFilesOnDirectory(directoryPath, "jpg")},
- // If we want "jpg" + "png" images
- // mImageFiles{op::getFilesOnDirectory(directoryPath, std::vector{"jpg", "png"})},
- mCounter{0},
- mClosed{false}
- {
- if (mImageFiles.empty())
- op::error("No images found on: " + directoryPath, __LINE__, __FUNCTION__, __FILE__);
- }
-
- std::shared_ptr> createDatum()
- {
- // Close program when empty frame
- if (mClosed || mImageFiles.size() <= mCounter)
- {
- op::log("Last frame read and added to queue. Closing program after it is processed.", op::Priority::High);
- // This funtion stops this worker, which will eventually stop the whole thread system once all the frames
- // have been processed
- mClosed = true;
- return nullptr;
- }
- else // if (!mClosed)
- {
- // Create new datum
- auto datumsPtr = std::make_shared>();
- datumsPtr->emplace_back();
- auto& datum = datumsPtr->at(0);
-
- // Fill datum
- datum.cvInputData = cv::imread(mImageFiles.at(mCounter++));
-
- // If empty frame -> return nullptr
- if (datum.cvInputData.empty())
- {
- op::log("Empty frame detected on path: " + mImageFiles.at(mCounter-1) + ". Closing program.",
- op::Priority::High);
- mClosed = true;
- datumsPtr = nullptr;
- }
-
- return datumsPtr;
- }
- }
-
- bool isFinished() const
- {
- return mClosed;
- }
-
-private:
- const std::vector mImageFiles;
- unsigned long long mCounter;
- bool mClosed;
-};
-
-// This worker will just read and return all the jpg files in a directory
-class UserOutputClass
-{
-public:
- bool display(const std::shared_ptr>& datumsPtr)
- {
- // User's displaying/saving/other processing here
- // datum.cvOutputData: rendered frame with pose or heatmaps
- // datum.poseKeypoints: Array with the estimated pose
- char key = ' ';
- if (datumsPtr != nullptr && !datumsPtr->empty())
- {
- cv::imshow("User worker GUI", datumsPtr->at(0).cvOutputData);
- // Display image and sleeps at least 1 ms (it usually sleeps ~5-10 msec to display the image)
- key = (char)cv::waitKey(1);
- }
- else
- op::log("Nullptr or empty datumsPtr found.", op::Priority::High, __LINE__, __FUNCTION__, __FILE__);
- return (key == 27);
- }
- void printKeypoints(const std::shared_ptr>& datumsPtr)
- {
- // Example: How to use the pose keypoints
- if (datumsPtr != nullptr && !datumsPtr->empty())
- {
- op::log("\nKeypoints:");
- // Accesing each element of the keypoints
- const auto& poseKeypoints = datumsPtr->at(0).poseKeypoints;
- op::log("Person pose keypoints:");
- for (auto person = 0 ; person < poseKeypoints.getSize(0) ; person++)
- {
- op::log("Person " + std::to_string(person) + " (x, y, score):");
- for (auto bodyPart = 0 ; bodyPart < poseKeypoints.getSize(1) ; bodyPart++)
- {
- std::string valueToPrint;
- for (auto xyscore = 0 ; xyscore < poseKeypoints.getSize(2) ; xyscore++)
- valueToPrint += std::to_string( poseKeypoints[{person, bodyPart, xyscore}] ) + " ";
- op::log(valueToPrint);
- }
- }
- op::log(" ");
- // Alternative: just getting std::string equivalent
- op::log("Face keypoints: " + datumsPtr->at(0).faceKeypoints.toString());
- op::log("Left hand keypoints: " + datumsPtr->at(0).handKeypoints[0].toString());
- op::log("Right hand keypoints: " + datumsPtr->at(0).handKeypoints[1].toString());
- // Heatmaps
- const auto& poseHeatMaps = datumsPtr->at(0).poseHeatMaps;
- if (!poseHeatMaps.empty())
- {
- op::log("Pose heatmaps size: [" + std::to_string(poseHeatMaps.getSize(0)) + ", "
- + std::to_string(poseHeatMaps.getSize(1)) + ", "
- + std::to_string(poseHeatMaps.getSize(2)) + "]");
- const auto& faceHeatMaps = datumsPtr->at(0).faceHeatMaps;
- op::log("Face heatmaps size: [" + std::to_string(faceHeatMaps.getSize(0)) + ", "
- + std::to_string(faceHeatMaps.getSize(1)) + ", "
- + std::to_string(faceHeatMaps.getSize(2)) + ", "
- + std::to_string(faceHeatMaps.getSize(3)) + "]");
- const auto& handHeatMaps = datumsPtr->at(0).handHeatMaps;
- op::log("Left hand heatmaps size: [" + std::to_string(handHeatMaps[0].getSize(0)) + ", "
- + std::to_string(handHeatMaps[0].getSize(1)) + ", "
- + std::to_string(handHeatMaps[0].getSize(2)) + ", "
- + std::to_string(handHeatMaps[0].getSize(3)) + "]");
- op::log("Right hand heatmaps size: [" + std::to_string(handHeatMaps[1].getSize(0)) + ", "
- + std::to_string(handHeatMaps[1].getSize(1)) + ", "
- + std::to_string(handHeatMaps[1].getSize(2)) + ", "
- + std::to_string(handHeatMaps[1].getSize(3)) + "]");
- }
- }
- else
- op::log("Nullptr or empty datumsPtr found.", op::Priority::High, __LINE__, __FUNCTION__, __FILE__);
- }
-};
-
-int openPoseTutorialWrapper3()
-{
- try
- {
- op::log("Starting OpenPose demo...", op::Priority::High);
- const auto timerBegin = std::chrono::high_resolution_clock::now();
-
- // logging_level
- op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.",
- __LINE__, __FUNCTION__, __FILE__);
- op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
- op::Profiler::setDefaultX(FLAGS_profile_speed);
-
- // Applying user defined configuration - Google flags to program variables
- // outputSize
- const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
- // netInputSize
- const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "-1x368");
- // faceNetInputSize
- const auto faceNetInputSize = op::flagsToPoint(FLAGS_face_net_resolution, "368x368 (multiples of 16)");
- // handNetInputSize
- const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
- // poseModel
- const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
- // JSON saving
- if (!FLAGS_write_keypoint.empty())
- op::log("Flag `write_keypoint` is deprecated and will eventually be removed."
- " Please, use `write_json` instead.", op::Priority::Max);
- // keypointScale
- const auto keypointScale = op::flagsToScaleMode(FLAGS_keypoint_scale);
- // heatmaps to add
- const auto heatMapTypes = op::flagsToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg,
- FLAGS_heatmaps_add_PAFs);
- const auto heatMapScale = op::flagsToHeatMapScaleMode(FLAGS_heatmaps_scale);
- // >1 camera view?
- const auto multipleView = (FLAGS_3d || FLAGS_3d_views > 1);
- // Enabling Google Logging
- const bool enableGoogleLogging = true;
- // Logging
- op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
-
- // Configure OpenPose
- op::log("Configuring OpenPose wrapper...", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
- op::Wrapper> opWrapper{op::ThreadManagerMode::Asynchronous};
- // Pose configuration (use WrapperStructPose{} for default and recommended configuration)
- const op::WrapperStructPose wrapperStructPose{
- !FLAGS_body_disable, netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
- FLAGS_scale_number, (float)FLAGS_scale_gap, op::flagsToRenderMode(FLAGS_render_pose, multipleView),
- poseModel, !FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
- FLAGS_part_to_show, FLAGS_model_folder, heatMapTypes, heatMapScale, FLAGS_part_candidates,
- (float)FLAGS_render_threshold, FLAGS_number_people_max, enableGoogleLogging};
- // Face configuration (use op::WrapperStructFace{} to disable it)
- const op::WrapperStructFace wrapperStructFace{
- FLAGS_face, faceNetInputSize, op::flagsToRenderMode(FLAGS_face_render, multipleView, FLAGS_render_pose),
- (float)FLAGS_face_alpha_pose, (float)FLAGS_face_alpha_heatmap, (float)FLAGS_face_render_threshold};
- // Hand configuration (use op::WrapperStructHand{} to disable it)
- const op::WrapperStructHand wrapperStructHand{
- FLAGS_hand, handNetInputSize, FLAGS_hand_scale_number, (float)FLAGS_hand_scale_range, FLAGS_hand_tracking,
- op::flagsToRenderMode(FLAGS_hand_render, multipleView, FLAGS_render_pose), (float)FLAGS_hand_alpha_pose,
- (float)FLAGS_hand_alpha_heatmap, (float)FLAGS_hand_render_threshold};
- // Extra functionality configuration (use op::WrapperStructExtra{} to disable it)
- const op::WrapperStructExtra wrapperStructExtra{
- FLAGS_3d, FLAGS_3d_min_views, FLAGS_identification, FLAGS_tracking, FLAGS_ik_threads};
- // Consumer (comment or use default argument to disable any output)
- const auto displayMode = op::DisplayMode::NoDisplay;
- const bool guiVerbose = false;
- const bool fullScreen = false;
- const op::WrapperStructOutput wrapperStructOutput{
- displayMode, guiVerbose, fullScreen, FLAGS_write_keypoint,
- op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, FLAGS_write_coco_json,
- FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, FLAGS_write_video,
- FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, FLAGS_write_video_adam,
- FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port};
- // Configure wrapper
- opWrapper.configure(wrapperStructPose, wrapperStructFace, wrapperStructHand, wrapperStructExtra,
- op::WrapperStructInput{}, wrapperStructOutput);
- // Set to single-thread running (to debug and/or reduce latency)
- if (FLAGS_disable_multi_thread)
- opWrapper.disableMultiThreading();
-
- op::log("Starting thread(s)...", op::Priority::High);
- opWrapper.start();
-
- // User processing
- UserInputClass userInputClass(FLAGS_image_dir);
- UserOutputClass userOutputClass;
- bool userWantsToExit = false;
- while (!userWantsToExit && !userInputClass.isFinished())
- {
- // Push frame
- auto datumToProcess = userInputClass.createDatum();
- if (datumToProcess != nullptr)
- {
- auto successfullyEmplaced = opWrapper.waitAndEmplace(datumToProcess);
- // Pop frame
- std::shared_ptr> datumProcessed;
- if (successfullyEmplaced && opWrapper.waitAndPop(datumProcessed))
- {
- userWantsToExit = userOutputClass.display(datumProcessed);
- userOutputClass.printKeypoints(datumProcessed);
- }
- else
- op::log("Processed datum could not be emplaced.", op::Priority::High,
- __LINE__, __FUNCTION__, __FILE__);
- }
- }
-
- op::log("Stopping thread(s)", op::Priority::High);
- opWrapper.stop();
-
- // Measuring total time
- const auto now = std::chrono::high_resolution_clock::now();
- const auto totalTimeSec = (double)std::chrono::duration_cast(now-timerBegin).count()
- * 1e-9;
- const auto message = "OpenPose demo successfully finished. Total time: "
- + std::to_string(totalTimeSec) + " seconds.";
- op::log(message, op::Priority::High);
-
- // Return successful message
- return 0;
- }
- catch (const std::exception& e)
- {
- op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
- return -1;
- }
-}
-
-int main(int argc, char *argv[])
-{
- // Parsing command line flags
- gflags::ParseCommandLineFlags(&argc, &argv, true);
-
- // Running openPoseTutorialWrapper3
- return openPoseTutorialWrapper3();
-}
diff --git a/examples/tutorial_wrapper/6_user_asynchronous_output.cpp b/examples/tutorial_wrapper/6_user_asynchronous_output.cpp
deleted file mode 100644
index c7386192d..000000000
--- a/examples/tutorial_wrapper/6_user_asynchronous_output.cpp
+++ /dev/null
@@ -1,461 +0,0 @@
-// ------------------------- OpenPose Library Tutorial - Wrapper - Example 3 - Asynchronous Output -------------------------
-// Asynchronous output mode: ideal for fast prototyping when performance is not an issue and user wants to use the output OpenPose format. The user
-// simply gets the processed frames from the OpenPose wrapper when he desires to.
-
-// This example shows the user how to use the OpenPose wrapper class:
- // 1. Read folder of images / video / webcam
- // 2. Extract and render keypoint / heatmap / PAF of that image
- // 3. Save the results on disk
- // 4. User displays the rendered pose
- // Everything in a multi-thread scenario
-// In addition to the previous OpenPose modules, we also need to use:
- // 1. `core` module:
- // For the Array class that the `pose` module needs
- // For the Datum struct that the `thread` module sends between the queues
- // 2. `utilities` module: for the error & logging functions, i.e. op::error & op::log respectively
-// This file should only be used for the user to take specific examples.
-
-// C++ std library dependencies
-#include // `std::chrono::` functions and classes, e.g. std::chrono::milliseconds
-#include // std::this_thread
-// Other 3rdparty dependencies
-// GFlags: DEFINE_bool, _int32, _int64, _uint64, _double, _string
-#include
-// Allow Google Flags in Ubuntu 14
-#ifndef GFLAGS_GFLAGS_H_
- namespace gflags = google;
-#endif
-// OpenPose dependencies
-#include
-
-// See all the available parameter options withe the `--help` flag. E.g. `build/examples/openpose/openpose.bin --help`
-// Note: This command will show you flags for other unnecessary 3rdparty files. Check only the flags for the OpenPose
-// executable. E.g. for `openpose.bin`, look for `Flags from examples/openpose/openpose.cpp:`.
-// Debugging/Other
-DEFINE_int32(logging_level, 3, "The logging level. Integer in the range [0, 255]. 0 will output any log() message, while"
- " 255 will not output any. Current OpenPose library messages are in the range 0-4: 1 for"
- " low priority messages and 4 for important ones.");
-DEFINE_bool(disable_multi_thread, false, "It would slightly reduce the frame rate in order to highly reduce the lag. Mainly useful"
- " for 1) Cases where it is needed a low latency (e.g. webcam in real-time scenarios with"
- " low-range GPU devices); and 2) Debugging OpenPose when it is crashing to locate the"
- " error.");
-DEFINE_int32(profile_speed, 1000, "If PROFILER_ENABLED was set in CMake or Makefile.config files, OpenPose will show some"
- " runtime statistics at this frame number.");
-// Producer
-DEFINE_int32(camera, -1, "The camera index for cv::VideoCapture. Integer in the range [0, 9]. Select a negative"
- " number (by default), to auto-detect and open the first available camera.");
-DEFINE_string(camera_resolution, "-1x-1", "Set the camera resolution (either `--camera` or `--flir_camera`). `-1x-1` will use the"
- " default 1280x720 for `--camera`, or the maximum flir camera resolution available for"
- " `--flir_camera`");
-DEFINE_double(camera_fps, 30.0, "Frame rate for the webcam (also used when saving video). Set this value to the minimum"
- " value between the OpenPose displayed speed and the webcam real frame rate.");
-DEFINE_string(video, "", "Use a video file instead of the camera. Use `examples/media/video.avi` for our default"
- " example video.");
-DEFINE_string(image_dir, "", "Process a directory of images. Use `examples/media/` for our default example folder with 20"
- " images. Read all standard formats (jpg, png, bmp, etc.).");
-DEFINE_bool(flir_camera, false, "Whether to use FLIR (Point-Grey) stereo camera.");
-DEFINE_int32(flir_camera_index, -1, "Select -1 (default) to run on all detected flir cameras at once. Otherwise, select the flir"
- " camera index to run, where 0 corresponds to the detected flir camera with the lowest"
- " serial number, and `n` to the `n`-th lowest serial number camera.");
-DEFINE_string(ip_camera, "", "String with the IP camera URL. It supports protocols like RTSP and HTTP.");
-DEFINE_uint64(frame_first, 0, "Start on desired frame number. Indexes are 0-based, i.e. the first frame has index 0.");
-DEFINE_uint64(frame_last, -1, "Finish on desired frame number. Select -1 to disable. Indexes are 0-based, e.g. if set to"
- " 10, it will process 11 frames (0-10).");
-DEFINE_bool(frame_flip, false, "Flip/mirror each frame (e.g. for real time webcam demonstrations).");
-DEFINE_int32(frame_rotate, 0, "Rotate each frame, 4 possible values: 0, 90, 180, 270.");
-DEFINE_bool(frames_repeat, false, "Repeat frames when finished.");
-DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g. for video). If the processing time is"
- " too long, it will skip frames. If it is too fast, it will slow it down.");
-DEFINE_string(camera_parameter_folder, "models/cameraParameters/flir/", "String with the folder where the camera parameters are located.");
-DEFINE_bool(frame_keep_distortion, false, "If false (default), it will undistortionate the image based on the"
- " `camera_parameter_folder` camera parameters; if true, it will not undistortionate, i.e.,"
- " it will leave it as it is.");
-// OpenPose
-DEFINE_string(model_folder, "models/", "Folder path (absolute or relative) where the models (pose, face, ...) are located.");
-DEFINE_string(output_resolution, "-1x-1", "The image resolution (display and output). Use \"-1x-1\" to force the program to use the"
- " input image resolution.");
-DEFINE_int32(num_gpu, -1, "The number of GPU devices to use. If negative, it will use all the available GPUs in your"
- " machine.");
-DEFINE_int32(num_gpu_start, 0, "GPU device start number.");
-DEFINE_int32(keypoint_scale, 0, "Scaling of the (x,y) coordinates of the final pose data array, i.e. the scale of the (x,y)"
- " coordinates that will be saved with the `write_json` & `write_keypoint` flags."
- " Select `0` to scale it to the original source resolution; `1`to scale it to the net output"
- " size (set with `net_resolution`); `2` to scale it to the final output size (set with"
- " `resolution`); `3` to scale it in the range [0,1], where (0,0) would be the top-left"
- " corner of the image, and (1,1) the bottom-right one; and 4 for range [-1,1], where"
- " (-1,-1) would be the top-left corner of the image, and (1,1) the bottom-right one. Non"
- " related with `scale_number` and `scale_gap`.");
-DEFINE_int32(number_people_max, -1, "This parameter will limit the maximum number of people detected, by keeping the people with"
- " top scores. The score is based in person area over the image, body part score, as well as"
- " joint score (between each pair of connected body parts). Useful if you know the exact"
- " number of people in the scene, so it can remove false positives (if all the people have"
- " been detected. However, it might also include false negatives by removing very small or"
- " highly occluded people. -1 will keep them all.");
-// OpenPose Body Pose
-DEFINE_bool(body_disable, false, "Disable body keypoint detection. Option only possible for faster (but less accurate) face"
- " keypoint detection.");
-DEFINE_string(model_pose, "BODY_25", "Model to be used. E.g. `COCO` (18 keypoints), `MPI` (15 keypoints, ~10% faster), "
- "`MPI_4_layers` (15 keypoints, even faster but less accurate).");
-DEFINE_string(net_resolution, "-1x368", "Multiples of 16. If it is increased, the accuracy potentially increases. If it is"
- " decreased, the speed increases. For maximum speed-accuracy balance, it should keep the"
- " closest aspect ratio possible to the images or videos to be processed. Using `-1` in"
- " any of the dimensions, OP will choose the optimal aspect ratio depending on the user's"
- " input value. E.g. the default `-1x368` is equivalent to `656x368` in 16:9 resolutions,"
- " e.g. full HD (1980x1080) and HD (1280x720) resolutions.");
-DEFINE_int32(scale_number, 1, "Number of scales to average.");
-DEFINE_double(scale_gap, 0.3, "Scale gap between scales. No effect unless scale_number > 1. Initial scale is always 1."
- " If you want to change the initial scale, you actually want to multiply the"
- " `net_resolution` by your desired initial scale.");
-// OpenPose Body Pose Heatmaps and Part Candidates
-DEFINE_bool(heatmaps_add_parts, false, "If true, it will fill op::Datum::poseHeatMaps array with the body part heatmaps, and"
- " analogously face & hand heatmaps to op::Datum::faceHeatMaps & op::Datum::handHeatMaps."
- " If more than one `add_heatmaps_X` flag is enabled, it will place then in sequential"
- " memory order: body parts + bkg + PAFs. It will follow the order on"
- " POSE_BODY_PART_MAPPING in `src/openpose/pose/poseParameters.cpp`. Program speed will"
- " considerably decrease. Not required for OpenPose, enable it only if you intend to"
- " explicitly use this information later.");
-DEFINE_bool(heatmaps_add_bkg, false, "Same functionality as `add_heatmaps_parts`, but adding the heatmap corresponding to"
- " background.");
-DEFINE_bool(heatmaps_add_PAFs, false, "Same functionality as `add_heatmaps_parts`, but adding the PAFs.");
-DEFINE_int32(heatmaps_scale, 2, "Set 0 to scale op::Datum::poseHeatMaps in the range [-1,1], 1 for [0,1]; 2 for integer"
- " rounded [0,255]; and 3 for no scaling.");
-DEFINE_bool(part_candidates, false, "Also enable `write_json` in order to save this information. If true, it will fill the"
- " op::Datum::poseCandidates array with the body part candidates. Candidates refer to all"
- " the detected body parts, before being assembled into people. Note that the number of"
- " candidates is equal or higher than the number of final body parts (i.e. after being"
- " assembled into people). The empty body parts are filled with 0s. Program speed will"
- " slightly decrease. Not required for OpenPose, enable it only if you intend to explicitly"
- " use this information.");
-// OpenPose Face
-DEFINE_bool(face, false, "Enables face keypoint detection. It will share some parameters from the body pose, e.g."
- " `model_folder`. Note that this will considerable slow down the performance and increse"
- " the required GPU memory. In addition, the greater number of people on the image, the"
- " slower OpenPose will be.");
-DEFINE_string(face_net_resolution, "368x368", "Multiples of 16 and squared. Analogous to `net_resolution` but applied to the face keypoint"
- " detector. 320x320 usually works fine while giving a substantial speed up when multiple"
- " faces on the image.");
-// OpenPose Hand
-DEFINE_bool(hand, false, "Enables hand keypoint detection. It will share some parameters from the body pose, e.g."
- " `model_folder`. Analogously to `--face`, it will also slow down the performance, increase"
- " the required GPU memory and its speed depends on the number of people.");
-DEFINE_string(hand_net_resolution, "368x368", "Multiples of 16 and squared. Analogous to `net_resolution` but applied to the hand keypoint"
- " detector.");
-DEFINE_int32(hand_scale_number, 1, "Analogous to `scale_number` but applied to the hand keypoint detector. Our best results"
- " were found with `hand_scale_number` = 6 and `hand_scale_range` = 0.4.");
-DEFINE_double(hand_scale_range, 0.4, "Analogous purpose than `scale_gap` but applied to the hand keypoint detector. Total range"
- " between smallest and biggest scale. The scales will be centered in ratio 1. E.g. if"
- " scaleRange = 0.4 and scalesNumber = 2, then there will be 2 scales, 0.8 and 1.2.");
-DEFINE_bool(hand_tracking, false, "Adding hand tracking might improve hand keypoints detection for webcam (if the frame rate"
- " is high enough, i.e. >7 FPS per GPU) and video. This is not person ID tracking, it"
- " simply looks for hands in positions at which hands were located in previous frames, but"
- " it does not guarantee the same person ID among frames.");
-// OpenPose 3-D Reconstruction
-DEFINE_bool(3d, false, "Running OpenPose 3-D reconstruction demo: 1) Reading from a stereo camera system."
- " 2) Performing 3-D reconstruction from the multiple views. 3) Displaying 3-D reconstruction"
- " results. Note that it will only display 1 person. If multiple people is present, it will"
- " fail.");
-DEFINE_int32(3d_min_views, -1, "Minimum number of views required to reconstruct each keypoint. By default (-1), it will"
- " require all the cameras to see the keypoint in order to reconstruct it.");
-DEFINE_int32(3d_views, 1, "Complementary option to `--image_dir` or `--video`. OpenPose will read as many images per"
- " iteration, allowing tasks such as stereo camera processing (`--3d`). Note that"
- " `--camera_parameters_folder` must be set. OpenPose must find as many `xml` files in the"
- " parameter folder as this number indicates.");
-// Extra algorithms
-DEFINE_bool(identification, false, "Experimental, not available yet. Whether to enable people identification across frames.");
-DEFINE_int32(tracking, -1, "Experimental, not available yet. Whether to enable people tracking across frames. The"
- " value indicates the number of frames where tracking is run between each OpenPose keypoint"
- " detection. Select -1 (default) to disable it or 0 to run simultaneously OpenPose keypoint"
- " detector and tracking for potentially higher accurary than only OpenPose.");
-DEFINE_int32(ik_threads, 0, "Experimental, not available yet. Whether to enable inverse kinematics (IK) from 3-D"
- " keypoints to obtain 3-D joint angles. By default (0 threads), it is disabled. Increasing"
- " the number of threads will increase the speed but also the global system latency.");
-// OpenPose Rendering
-DEFINE_int32(part_to_show, 0, "Prediction channel to visualize (default: 0). 0 for all the body parts, 1-18 for each body"
- " part heat map, 19 for the background heat map, 20 for all the body part heat maps"
- " together, 21 for all the PAFs, 22-40 for each body part pair PAF.");
-DEFINE_bool(disable_blending, false, "If enabled, it will render the results (keypoint skeletons or heatmaps) on a black"
- " background, instead of being rendered into the original image. Related: `part_to_show`,"
- " `alpha_pose`, and `alpha_pose`.");
-// OpenPose Rendering Pose
-DEFINE_double(render_threshold, 0.05, "Only estimated keypoints whose score confidences are higher than this threshold will be"
- " rendered. Generally, a high threshold (> 0.5) will only render very clear body parts;"
- " while small thresholds (~0.1) will also output guessed and occluded keypoints, but also"
- " more false positives (i.e. wrong detections).");
-DEFINE_int32(render_pose, -1, "Set to 0 for no rendering, 1 for CPU rendering (slightly faster), and 2 for GPU rendering"
- " (slower but greater functionality, e.g. `alpha_X` flags). If -1, it will pick CPU if"
- " CPU_ONLY is enabled, or GPU if CUDA is enabled. If rendering is enabled, it will render"
- " both `outputData` and `cvOutputData` with the original image and desired body part to be"
- " shown (i.e. keypoints, heat maps or PAFs).");
-DEFINE_double(alpha_pose, 0.6, "Blending factor (range 0-1) for the body part rendering. 1 will show it completely, 0 will"
- " hide it. Only valid for GPU rendering.");
-DEFINE_double(alpha_heatmap, 0.7, "Blending factor (range 0-1) between heatmap and original frame. 1 will only show the"
- " heatmap, 0 will only show the frame. Only valid for GPU rendering.");
-// OpenPose Rendering Face
-DEFINE_double(face_render_threshold, 0.4, "Analogous to `render_threshold`, but applied to the face keypoints.");
-DEFINE_int32(face_render, -1, "Analogous to `render_pose` but applied to the face. Extra option: -1 to use the same"
- " configuration that `render_pose` is using.");
-DEFINE_double(face_alpha_pose, 0.6, "Analogous to `alpha_pose` but applied to face.");
-DEFINE_double(face_alpha_heatmap, 0.7, "Analogous to `alpha_heatmap` but applied to face.");
-// OpenPose Rendering Hand
-DEFINE_double(hand_render_threshold, 0.2, "Analogous to `render_threshold`, but applied to the hand keypoints.");
-DEFINE_int32(hand_render, -1, "Analogous to `render_pose` but applied to the hand. Extra option: -1 to use the same"
- " configuration that `render_pose` is using.");
-DEFINE_double(hand_alpha_pose, 0.6, "Analogous to `alpha_pose` but applied to hand.");
-DEFINE_double(hand_alpha_heatmap, 0.7, "Analogous to `alpha_heatmap` but applied to hand.");
-// Result Saving
-DEFINE_string(write_images, "", "Directory to write rendered frames in `write_images_format` image format.");
-DEFINE_string(write_images_format, "png", "File extension and format for `write_images`, e.g. png, jpg or bmp. Check the OpenCV"
- " function cv::imwrite for all compatible extensions.");
-DEFINE_string(write_video, "", "Full file path to write rendered frames in motion JPEG video format. It might fail if the"
- " final path does not finish in `.avi`. It internally uses cv::VideoWriter. Flag"
- " `camera_fps` controls FPS.");
-DEFINE_string(write_json, "", "Directory to write OpenPose output in JSON format. It includes body, hand, and face pose"
- " keypoints (2-D and 3-D), as well as pose candidates (if `--part_candidates` enabled).");
-DEFINE_string(write_coco_json, "", "Full file path to write people pose data with JSON COCO validation format.");
-DEFINE_string(write_coco_foot_json, "", "Full file path to write people foot pose data with JSON COCO validation format.");
-DEFINE_string(write_heatmaps, "", "Directory to write body pose heatmaps in PNG format. At least 1 `add_heatmaps_X` flag"
- " must be enabled.");
-DEFINE_string(write_heatmaps_format, "png", "File extension and format for `write_heatmaps`, analogous to `write_images_format`."
- " For lossless compression, recommended `png` for integer `heatmaps_scale` and `float` for"
- " floating values.");
-DEFINE_string(write_keypoint, "", "(Deprecated, use `write_json`) Directory to write the people pose keypoint data. Set format"
- " with `write_keypoint_format`.");
-DEFINE_string(write_keypoint_format, "yml", "(Deprecated, use `write_json`) File extension and format for `write_keypoint`: json, xml,"
- " yaml & yml. Json not available for OpenCV < 3.0, use `write_json` instead.");
-// Result Saving - Extra Algorithms
-DEFINE_string(write_video_adam, "", "Experimental, not available yet. E.g.: `~/Desktop/adamResult.avi`. Flag `camera_fps`"
- " controls FPS.");
-DEFINE_string(write_bvh, "", "Experimental, not available yet. E.g.: `~/Desktop/mocapResult.bvh`.");
-// UDP communication
-DEFINE_string(udp_host, "", "Experimental, not available yet. IP for UDP communication. E.g., `192.168.0.1`.");
-DEFINE_string(udp_port, "8051", "Experimental, not available yet. Port number for UDP communication.");
-
-
-// If the user needs his own variables, he can inherit the op::Datum struct and add them
-// UserDatum can be directly used by the OpenPose wrapper because it inherits from op::Datum, just define
-// Wrapper instead of Wrapper
-struct UserDatum : public op::Datum
-{
- bool boolThatUserNeedsForSomeReason;
-
- UserDatum(const bool boolThatUserNeedsForSomeReason_ = false) :
- boolThatUserNeedsForSomeReason{boolThatUserNeedsForSomeReason_}
- {}
-};
-
-// The W-classes can be implemented either as a template or as simple classes given
-// that the user usually knows which kind of data he will move between the queues,
-// in this case we assume a std::shared_ptr of a std::vector of UserDatum
-
-// This worker will just read and return all the jpg files in a directory
-class UserOutputClass
-{
-public:
- bool display(const std::shared_ptr>& datumsPtr)
- {
- // User's displaying/saving/other processing here
- // datum.cvOutputData: rendered frame with pose or heatmaps
- // datum.poseKeypoints: Array with the estimated pose
- char key = ' ';
- if (datumsPtr != nullptr && !datumsPtr->empty())
- {
- cv::imshow("User worker GUI", datumsPtr->at(0).cvOutputData);
- // Display image and sleeps at least 1 ms (it usually sleeps ~5-10 msec to display the image)
- key = (char)cv::waitKey(1);
- }
- else
- op::log("Nullptr or empty datumsPtr found.", op::Priority::High, __LINE__, __FUNCTION__, __FILE__);
- return (key == 27);
- }
- void printKeypoints(const std::shared_ptr>& datumsPtr)
- {
- // Example: How to use the pose keypoints
- if (datumsPtr != nullptr && !datumsPtr->empty())
- {
- op::log("\nKeypoints:");
- // Accesing each element of the keypoints
- const auto& poseKeypoints = datumsPtr->at(0).poseKeypoints;
- op::log("Person pose keypoints:");
- for (auto person = 0 ; person < poseKeypoints.getSize(0) ; person++)
- {
- op::log("Person " + std::to_string(person) + " (x, y, score):");
- for (auto bodyPart = 0 ; bodyPart < poseKeypoints.getSize(1) ; bodyPart++)
- {
- std::string valueToPrint;
- for (auto xyscore = 0 ; xyscore < poseKeypoints.getSize(2) ; xyscore++)
- {
- valueToPrint += std::to_string( poseKeypoints[{person, bodyPart, xyscore}] ) + " ";
- }
- op::log(valueToPrint);
- }
- }
- op::log(" ");
- // Alternative: just getting std::string equivalent
- op::log("Face keypoints: " + datumsPtr->at(0).faceKeypoints.toString());
- op::log("Left hand keypoints: " + datumsPtr->at(0).handKeypoints[0].toString());
- op::log("Right hand keypoints: " + datumsPtr->at(0).handKeypoints[1].toString());
- // Heatmaps
- const auto& poseHeatMaps = datumsPtr->at(0).poseHeatMaps;
- if (!poseHeatMaps.empty())
- {
- op::log("Pose heatmaps size: [" + std::to_string(poseHeatMaps.getSize(0)) + ", "
- + std::to_string(poseHeatMaps.getSize(1)) + ", "
- + std::to_string(poseHeatMaps.getSize(2)) + "]");
- const auto& faceHeatMaps = datumsPtr->at(0).faceHeatMaps;
- op::log("Face heatmaps size: [" + std::to_string(faceHeatMaps.getSize(0)) + ", "
- + std::to_string(faceHeatMaps.getSize(1)) + ", "
- + std::to_string(faceHeatMaps.getSize(2)) + ", "
- + std::to_string(faceHeatMaps.getSize(3)) + "]");
- const auto& handHeatMaps = datumsPtr->at(0).handHeatMaps;
- op::log("Left hand heatmaps size: [" + std::to_string(handHeatMaps[0].getSize(0)) + ", "
- + std::to_string(handHeatMaps[0].getSize(1)) + ", "
- + std::to_string(handHeatMaps[0].getSize(2)) + ", "
- + std::to_string(handHeatMaps[0].getSize(3)) + "]");
- op::log("Right hand heatmaps size: [" + std::to_string(handHeatMaps[1].getSize(0)) + ", "
- + std::to_string(handHeatMaps[1].getSize(1)) + ", "
- + std::to_string(handHeatMaps[1].getSize(2)) + ", "
- + std::to_string(handHeatMaps[1].getSize(3)) + "]");
- }
- }
- else
- op::log("Nullptr or empty datumsPtr found.", op::Priority::High, __LINE__, __FUNCTION__, __FILE__);
- }
-};
-
-int openPoseTutorialWrapper1()
-{
- try
- {
- op::log("Starting OpenPose demo...", op::Priority::High);
- const auto timerBegin = std::chrono::high_resolution_clock::now();
-
- // logging_level
- op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.",
- __LINE__, __FUNCTION__, __FILE__);
- op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
- op::Profiler::setDefaultX(FLAGS_profile_speed);
-
- // Applying user defined configuration - Google flags to program variables
- // outputSize
- const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
- // netInputSize
- const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "-1x368");
- // faceNetInputSize
- const auto faceNetInputSize = op::flagsToPoint(FLAGS_face_net_resolution, "368x368 (multiples of 16)");
- // handNetInputSize
- const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
- // producerType
- const auto producerSharedPtr = op::flagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_ip_camera, FLAGS_camera,
- FLAGS_flir_camera, FLAGS_camera_resolution, FLAGS_camera_fps,
- FLAGS_camera_parameter_folder, !FLAGS_frame_keep_distortion,
- (unsigned int) FLAGS_3d_views, FLAGS_flir_camera_index);
- // poseModel
- const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
- // JSON saving
- if (!FLAGS_write_keypoint.empty())
- op::log("Flag `write_keypoint` is deprecated and will eventually be removed."
- " Please, use `write_json` instead.", op::Priority::Max);
- // keypointScale
- const auto keypointScale = op::flagsToScaleMode(FLAGS_keypoint_scale);
- // heatmaps to add
- const auto heatMapTypes = op::flagsToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg,
- FLAGS_heatmaps_add_PAFs);
- const auto heatMapScale = op::flagsToHeatMapScaleMode(FLAGS_heatmaps_scale);
- // >1 camera view?
- const auto multipleView = (FLAGS_3d || FLAGS_3d_views > 1 || FLAGS_flir_camera);
- // Enabling Google Logging
- const bool enableGoogleLogging = true;
- // Logging
- op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
-
- // Configure OpenPose
- op::log("Configuring OpenPose wrapper...", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
- op::Wrapper> opWrapper{op::ThreadManagerMode::AsynchronousOut};
- // Pose configuration (use WrapperStructPose{} for default and recommended configuration)
- const op::WrapperStructPose wrapperStructPose{
- !FLAGS_body_disable, netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
- FLAGS_scale_number, (float)FLAGS_scale_gap, op::flagsToRenderMode(FLAGS_render_pose, multipleView),
- poseModel, !FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
- FLAGS_part_to_show, FLAGS_model_folder, heatMapTypes, heatMapScale, FLAGS_part_candidates,
- (float)FLAGS_render_threshold, FLAGS_number_people_max, enableGoogleLogging};
- // Face configuration (use op::WrapperStructFace{} to disable it)
- const op::WrapperStructFace wrapperStructFace{
- FLAGS_face, faceNetInputSize, op::flagsToRenderMode(FLAGS_face_render, multipleView, FLAGS_render_pose),
- (float)FLAGS_face_alpha_pose, (float)FLAGS_face_alpha_heatmap, (float)FLAGS_face_render_threshold};
- // Hand configuration (use op::WrapperStructHand{} to disable it)
- const op::WrapperStructHand wrapperStructHand{
- FLAGS_hand, handNetInputSize, FLAGS_hand_scale_number, (float)FLAGS_hand_scale_range, FLAGS_hand_tracking,
- op::flagsToRenderMode(FLAGS_hand_render, multipleView, FLAGS_render_pose), (float)FLAGS_hand_alpha_pose,
- (float)FLAGS_hand_alpha_heatmap, (float)FLAGS_hand_render_threshold};
- // Extra functionality configuration (use op::WrapperStructExtra{} to disable it)
- const op::WrapperStructExtra wrapperStructExtra{
- FLAGS_3d, FLAGS_3d_min_views, FLAGS_identification, FLAGS_tracking, FLAGS_ik_threads};
- // Producer (use default to disable any input)
- const op::WrapperStructInput wrapperStructInput{
- producerSharedPtr, FLAGS_frame_first, FLAGS_frame_last, FLAGS_process_real_time, FLAGS_frame_flip,
- FLAGS_frame_rotate, FLAGS_frames_repeat};
- // Consumer (comment or use default argument to disable any output)
- const auto displayMode = op::DisplayMode::NoDisplay;
- const bool guiVerbose = false;
- const bool fullScreen = false;
- const op::WrapperStructOutput wrapperStructOutput{
- displayMode, guiVerbose, fullScreen, FLAGS_write_keypoint,
- op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json, FLAGS_write_coco_json,
- FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format, FLAGS_write_video,
- FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format, FLAGS_write_video_adam,
- FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port};
- // Configure wrapper
- opWrapper.configure(wrapperStructPose, wrapperStructFace, wrapperStructHand, wrapperStructExtra,
- wrapperStructInput, wrapperStructOutput);
- // Set to single-thread running (to debug and/or reduce latency)
- if (FLAGS_disable_multi_thread)
- opWrapper.disableMultiThreading();
-
- op::log("Starting thread(s)...", op::Priority::High);
- opWrapper.start();
-
- // User processing
- UserOutputClass userOutputClass;
- bool userWantsToExit = false;
- while (!userWantsToExit)
- {
- // Pop frame
- std::shared_ptr> datumProcessed;
- if (opWrapper.waitAndPop(datumProcessed))
- {
- userWantsToExit = userOutputClass.display(datumProcessed);;
- userOutputClass.printKeypoints(datumProcessed);
- }
- else
- op::log("Processed datum could not be emplaced.", op::Priority::High, __LINE__, __FUNCTION__, __FILE__);
- }
-
- op::log("Stopping thread(s)", op::Priority::High);
- opWrapper.stop();
-
- // Measuring total time
- const auto now = std::chrono::high_resolution_clock::now();
- const auto totalTimeSec = (double)std::chrono::duration_cast(now-timerBegin).count()
- * 1e-9;
- const auto message = "OpenPose demo successfully finished. Total time: "
- + std::to_string(totalTimeSec) + " seconds.";
- op::log(message, op::Priority::High);
-
- // Return successful message
- return 0;
- }
- catch (const std::exception& e)
- {
- op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
- return -1;
- }
-}
-
-int main(int argc, char *argv[])
-{
- // Parsing command line flags
- gflags::ParseCommandLineFlags(&argc, &argv, true);
-
- // Running openPoseTutorialWrapper1
- return openPoseTutorialWrapper1();
-}
diff --git a/examples/user_code/README.md b/examples/user_code/README.md
index 7bff0b6df..d907d1849 100644
--- a/examples/user_code/README.md
+++ b/examples/user_code/README.md
@@ -10,7 +10,7 @@ You can quickly add your custom code into this folder so that quick prototypes c
## How-to
1. Install/compile OpenPose as usual.
-2. Add your custom *.cpp / *.hpp files here,. Hint: You might want to start by copying the [OpenPoseDemo](../openpose/openpose.cpp) example or any of the [examples/tutorial_wrapper/](../tutorial_wrapper/) examples. Then, you can simply modify their content.
+2. Add your custom *.cpp / *.hpp files here,. Hint: You might want to start by copying the [OpenPoseDemo](../openpose/openpose.cpp) example or any of the [examples/tutorial_api_cpp/](../tutorial_api_cpp/) examples. Then, you can simply modify their content.
3. Add the name of your custom *.cpp / *.hpp files at the top of the [examples/user_code/CMakeLists.txt](./CMakeLists.txt) file.
4. Re-compile OpenPose.
```
diff --git a/include/openpose/core/datum.hpp b/include/openpose/core/datum.hpp
index 1cb168eff..d1bc36227 100644
--- a/include/openpose/core/datum.hpp
+++ b/include/openpose/core/datum.hpp
@@ -106,7 +106,7 @@ namespace op
* Order heatmaps: body parts + background (as appears in POSE_BODY_PART_MAPPING) + (x,y) channel of each PAF
* (sorted as appears in POSE_BODY_PART_PAIRS). See `pose/poseParameters.hpp`.
* The user can choose the heatmaps normalization: ranges [0, 1], [-1, 1] or [0, 255]. Check the
- * `heatmaps_scale` flag in the examples/tutorial_wrapper/ for more details.
+ * `heatmaps_scale` flag in {OpenPose_path}doc/demo_overview.md for more details.
* Size: #heatmaps x output_net_height x output_net_width
*/
Array poseHeatMaps;
diff --git a/include/openpose/core/macros.hpp b/include/openpose/core/macros.hpp
index 7b8f849d1..2755fb2e7 100644
--- a/include/openpose/core/macros.hpp
+++ b/include/openpose/core/macros.hpp
@@ -1,9 +1,11 @@
#ifndef OPENPOSE_CORE_MACROS_HPP
#define OPENPOSE_CORE_MACROS_HPP
+#include // std::chrono:: functionaligy, e.g., std::chrono::milliseconds
#include // std::shared_ptr
#include
#include
+#include // std::this_thread
#include
// OpenPose name and version
diff --git a/include/openpose/face/faceExtractorNet.hpp b/include/openpose/face/faceExtractorNet.hpp
index b09cbcc72..a717fdba1 100644
--- a/include/openpose/face/faceExtractorNet.hpp
+++ b/include/openpose/face/faceExtractorNet.hpp
@@ -2,7 +2,6 @@
#define OPENPOSE_FACE_FACE_EXTRACTOR_HPP
#include
-#include
#include // cv::Mat
#include
#include
diff --git a/examples/tutorial_wrapper/1_user_synchronous_postprocessing.cpp b/include/openpose/flags.hpp
similarity index 68%
rename from examples/tutorial_wrapper/1_user_synchronous_postprocessing.cpp
rename to include/openpose/flags.hpp
index f2f3acba4..d4592e869 100644
--- a/examples/tutorial_wrapper/1_user_synchronous_postprocessing.cpp
+++ b/include/openpose/flags.hpp
@@ -1,33 +1,16 @@
-// ------------------------- OpenPose Library Tutorial - Real Time Pose Estimation -------------------------
-// If the user wants to learn to use the OpenPose library, we highly recommend to start with the `examples/tutorial_*/`
-// folders.
-// This example summarizes all the funcitonality of the OpenPose library:
- // 1. Read folder of images / video / webcam (`producer` module)
- // 2. Extract and render body keypoint / heatmap / PAF of that image (`pose` module)
- // 3. Extract and render face keypoint / heatmap / PAF of that image (`face` module)
- // 4. Save the results on disk (`filestream` module)
- // 5. Display the rendered pose (`gui` module)
- // Everything in a multi-thread scenario (`thread` module)
- // Points 2 to 5 are included in the `wrapper` module
-// In addition to the previous OpenPose modules, we also need to use:
- // 1. `core` module:
- // For the Array class that the `pose` module needs
- // For the Datum struct that the `thread` module sends between the queues
- // 2. `utilities` module: for the error & logging functions, i.e. op::error & op::log respectively
-// This file should only be used for the user to take specific examples.
+#ifndef OPENPOSE_FLAGS_HPP
+#define OPENPOSE_FLAGS_HPP
+
+// Note: This class is not included within the basic OpenPose `headers.hpp` and must be explicitly included. In
+// addition, Google Flags library must also be linked to the resulting binary or library. OpenPose library does
+// not force to use Google Flags, but the OpenPose examples do so.
-// C++ std library dependencies
-#include // `std::chrono::` functions and classes, e.g. std::chrono::milliseconds
-#include // std::this_thread
-// Other 3rdparty dependencies
// GFlags: DEFINE_bool, _int32, _int64, _uint64, _double, _string
#include
// Allow Google Flags in Ubuntu 14
#ifndef GFLAGS_GFLAGS_H_
namespace gflags = google;
#endif
-// OpenPose dependencies
-#include
// See all the available parameter options withe the `--help` flag. E.g. `build/examples/openpose/openpose.bin --help`
// Note: This command will show you flags for other unnecessary 3rdparty files. Check only the flags for the OpenPose
@@ -42,14 +25,18 @@ DEFINE_bool(disable_multi_thread, false, "It would slightly reduc
" error.");
DEFINE_int32(profile_speed, 1000, "If PROFILER_ENABLED was set in CMake or Makefile.config files, OpenPose will show some"
" runtime statistics at this frame number.");
+#ifndef OPENPOSE_FLAGS_DISABLE_POSE
+#ifndef OPENPOSE_FLAGS_DISABLE_PRODUCER
// Producer
DEFINE_int32(camera, -1, "The camera index for cv::VideoCapture. Integer in the range [0, 9]. Select a negative"
" number (by default), to auto-detect and open the first available camera.");
DEFINE_string(camera_resolution, "-1x-1", "Set the camera resolution (either `--camera` or `--flir_camera`). `-1x-1` will use the"
" default 1280x720 for `--camera`, or the maximum flir camera resolution available for"
" `--flir_camera`");
+#endif // OPENPOSE_FLAGS_DISABLE_PRODUCER
DEFINE_double(camera_fps, 30.0, "Frame rate for the webcam (also used when saving video). Set this value to the minimum"
" value between the OpenPose displayed speed and the webcam real frame rate.");
+#ifndef OPENPOSE_FLAGS_DISABLE_PRODUCER
DEFINE_string(video, "", "Use a video file instead of the camera. Use `examples/media/video.avi` for our default"
" example video.");
DEFINE_string(image_dir, "", "Process a directory of images. Use `examples/media/` for our default example folder with 20"
@@ -71,6 +58,7 @@ DEFINE_string(camera_parameter_folder, "models/cameraParameters/flir/", "String
DEFINE_bool(frame_keep_distortion, false, "If false (default), it will undistortionate the image based on the"
" `camera_parameter_folder` camera parameters; if true, it will not undistortionate, i.e.,"
" it will leave it as it is.");
+#endif // OPENPOSE_FLAGS_DISABLE_PRODUCER
// OpenPose
DEFINE_string(model_folder, "models/", "Folder path (absolute or relative) where the models (pose, face, ...) are located.");
DEFINE_string(output_resolution, "-1x-1", "The image resolution (display and output). Use \"-1x-1\" to force the program to use the"
@@ -203,6 +191,7 @@ DEFINE_int32(hand_render, -1, "Analogous to `render_po
" configuration that `render_pose` is using.");
DEFINE_double(hand_alpha_pose, 0.6, "Analogous to `alpha_pose` but applied to hand.");
DEFINE_double(hand_alpha_heatmap, 0.7, "Analogous to `alpha_heatmap` but applied to hand.");
+#ifndef OPENPOSE_FLAGS_DISABLE_DISPLAY
// Display
DEFINE_bool(fullscreen, false, "Run in full-screen mode (press f during runtime to toggle).");
DEFINE_bool(no_gui_verbose, false, "Do not write text on output images on GUI (e.g. number of current frame and people). It"
@@ -210,6 +199,7 @@ DEFINE_bool(no_gui_verbose, false, "Do not write text on ou
DEFINE_int32(display, -1, "Display mode: -1 for automatic selection; 0 for no display (useful if there is no X server"
" and/or to slightly speed up the processing if visual output is not required); 2 for 2-D"
" display; 3 for 3-D display (if `--3d` enabled); and 1 for both 2-D and 3-D display.");
+#endif // OPENPOSE_FLAGS_DISABLE_DISPLAY
// Result Saving
DEFINE_string(write_images, "", "Directory to write rendered frames in `write_images_format` image format.");
DEFINE_string(write_images_format, "png", "File extension and format for `write_images`, e.g. png, jpg or bmp. Check the OpenCV"
@@ -237,204 +227,6 @@ DEFINE_string(write_bvh, "", "Experimental, not avail
// UDP communication
DEFINE_string(udp_host, "", "Experimental, not available yet. IP for UDP communication. E.g., `192.168.0.1`.");
DEFINE_string(udp_port, "8051", "Experimental, not available yet. Port number for UDP communication.");
+#endif // OPENPOSE_FLAGS_DISABLE_POSE
-
-// If the user needs his own variables, he can inherit the op::Datum struct and add them
-// UserDatum can be directly used by the OpenPose wrapper because it inherits from op::Datum, just define
-// Wrapper instead of Wrapper
-struct UserDatum : public op::Datum
-{
- bool boolThatUserNeedsForSomeReason;
-
- UserDatum(const bool boolThatUserNeedsForSomeReason_ = false) :
- boolThatUserNeedsForSomeReason{boolThatUserNeedsForSomeReason_}
- {}
-};
-
-// The W-classes can be implemented either as a template or as simple classes given
-// that the user usually knows which kind of data he will move between the queues,
-// in this case we assume a std::shared_ptr of a std::vector of UserDatum
-
-// This worker will just invert the image
-class WUserPostProcessing : public op::Worker>>
-{
-public:
- WUserPostProcessing()
- {
- // User's constructor here
- }
-
- void initializationOnThread() {}
-
- void work(std::shared_ptr>& datumsPtr)
- {
- // User's post-processing (after OpenPose processing & before OpenPose outputs) here
- // datum.cvOutputData: rendered frame with pose or heatmaps
- // datum.poseKeypoints: Array with the estimated pose
- try
- {
- if (datumsPtr != nullptr && !datumsPtr->empty())
- for (auto& datum : *datumsPtr)
- cv::bitwise_not(datum.cvOutputData, datum.cvOutputData);
- }
- catch (const std::exception& e)
- {
- this->stop();
- op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
- }
- }
-};
-
-int openPoseDemo()
-{
- try
- {
- op::log("Starting OpenPose demo...", op::Priority::High);
- const auto timerBegin = std::chrono::high_resolution_clock::now();
-
- // logging_level
- op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.",
- __LINE__, __FUNCTION__, __FILE__);
- op::ConfigureLog::setPriorityThreshold((op::Priority)FLAGS_logging_level);
- op::Profiler::setDefaultX(FLAGS_profile_speed);
- // // For debugging
- // // Print all logging messages
- // op::ConfigureLog::setPriorityThreshold(op::Priority::None);
- // // Print out speed values faster
- // op::Profiler::setDefaultX(100);
-
- // Applying user defined configuration - Google flags to program variables
- // outputSize
- const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
- // netInputSize
- const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "-1x368");
- // faceNetInputSize
- const auto faceNetInputSize = op::flagsToPoint(FLAGS_face_net_resolution, "368x368 (multiples of 16)");
- // handNetInputSize
- const auto handNetInputSize = op::flagsToPoint(FLAGS_hand_net_resolution, "368x368 (multiples of 16)");
- // producerType
- const auto producerSharedPtr = op::flagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_ip_camera, FLAGS_camera,
- FLAGS_flir_camera, FLAGS_camera_resolution, FLAGS_camera_fps,
- FLAGS_camera_parameter_folder, !FLAGS_frame_keep_distortion,
- (unsigned int) FLAGS_3d_views, FLAGS_flir_camera_index);
- // poseModel
- const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
- // JSON saving
- if (!FLAGS_write_keypoint.empty())
- op::log("Flag `write_keypoint` is deprecated and will eventually be removed."
- " Please, use `write_json` instead.", op::Priority::Max);
- // keypointScale
- const auto keypointScale = op::flagsToScaleMode(FLAGS_keypoint_scale);
- // heatmaps to add
- const auto heatMapTypes = op::flagsToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg,
- FLAGS_heatmaps_add_PAFs);
- const auto heatMapScale = op::flagsToHeatMapScaleMode(FLAGS_heatmaps_scale);
- // >1 camera view?
- const auto multipleView = (FLAGS_3d || FLAGS_3d_views > 1 || FLAGS_flir_camera);
- // Enabling Google Logging
- const bool enableGoogleLogging = true;
- // Logging
- op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
-
- // OpenPose wrapper
- op::log("Configuring OpenPose wrapper...", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
- // op::Wrapper> opWrapper;
- op::Wrapper> opWrapper;
-
- // Initializing the user custom classes
- // Processing
- auto wUserPostProcessing = std::make_shared();
- // Add custom processing
- const auto workerProcessingOnNewThread = true;
- opWrapper.setWorkerPostProcessing(wUserPostProcessing, workerProcessingOnNewThread);
-
- // Pose configuration (use WrapperStructPose{} for default and recommended configuration)
- const op::WrapperStructPose wrapperStructPose{
- !FLAGS_body_disable, netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
- FLAGS_scale_number, (float)FLAGS_scale_gap, op::flagsToRenderMode(FLAGS_render_pose, multipleView),
- poseModel, !FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
- FLAGS_part_to_show, FLAGS_model_folder, heatMapTypes, heatMapScale, FLAGS_part_candidates,
- (float)FLAGS_render_threshold, FLAGS_number_people_max, enableGoogleLogging};
- // Face configuration (use op::WrapperStructFace{} to disable it)
- const op::WrapperStructFace wrapperStructFace{
- FLAGS_face, faceNetInputSize, op::flagsToRenderMode(FLAGS_face_render, multipleView, FLAGS_render_pose),
- (float)FLAGS_face_alpha_pose, (float)FLAGS_face_alpha_heatmap, (float)FLAGS_face_render_threshold};
- // Hand configuration (use op::WrapperStructHand{} to disable it)
- const op::WrapperStructHand wrapperStructHand{
- FLAGS_hand, handNetInputSize, FLAGS_hand_scale_number, (float)FLAGS_hand_scale_range, FLAGS_hand_tracking,
- op::flagsToRenderMode(FLAGS_hand_render, multipleView, FLAGS_render_pose), (float)FLAGS_hand_alpha_pose,
- (float)FLAGS_hand_alpha_heatmap, (float)FLAGS_hand_render_threshold};
- // Producer (use default to disable any input)
- const op::WrapperStructInput wrapperStructInput{
- producerSharedPtr, FLAGS_frame_first, FLAGS_frame_last, FLAGS_process_real_time, FLAGS_frame_flip,
- FLAGS_frame_rotate, FLAGS_frames_repeat};
- // Extra functionality configuration (use op::WrapperStructExtra{} to disable it)
- const op::WrapperStructExtra wrapperStructExtra{
- FLAGS_3d, FLAGS_3d_min_views, FLAGS_identification, FLAGS_tracking, FLAGS_ik_threads};
- // Consumer (comment or use default argument to disable any output)
- const op::WrapperStructOutput wrapperStructOutput{
- op::flagsToDisplayMode(FLAGS_display, FLAGS_3d), !FLAGS_no_gui_verbose, FLAGS_fullscreen,
- FLAGS_write_keypoint, op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_json,
- FLAGS_write_coco_json, FLAGS_write_coco_foot_json, FLAGS_write_images, FLAGS_write_images_format,
- FLAGS_write_video, FLAGS_camera_fps, FLAGS_write_heatmaps, FLAGS_write_heatmaps_format,
- FLAGS_write_video_adam, FLAGS_write_bvh, FLAGS_udp_host, FLAGS_udp_port};
- // Configure wrapper
- opWrapper.configure(wrapperStructPose, wrapperStructFace, wrapperStructHand, wrapperStructExtra,
- wrapperStructInput, wrapperStructOutput);
- // Set to single-thread running (to debug and/or reduce latency)
- if (FLAGS_disable_multi_thread)
- opWrapper.disableMultiThreading();
-
- // Start processing
- // Two different ways of running the program on multithread environment
- op::log("Starting thread(s)...", op::Priority::High);
- // Start, run & stop threads - it blocks this thread until all others have finished
- opWrapper.exec();
-
- // // Option b) Keeping this thread free in case you want to do something else meanwhile, e.g. profiling the GPU
- // memory
- // // VERY IMPORTANT NOTE: if OpenCV is compiled with Qt support, this option will not work. Qt needs the main
- // // thread to plot visual results, so the final GUI (which uses OpenCV) would return an exception similar to:
- // // `QMetaMethod::invoke: Unable to invoke methods with return values in queued connections`
- // // Start threads
- // opWrapper.start();
- // // Profile used GPU memory
- // // 1: wait ~10sec so the memory has been totally loaded on GPU
- // // 2: profile the GPU memory
- // const auto sleepTimeMs = 10;
- // for (auto i = 0 ; i < 10000/sleepTimeMs && opWrapper.isRunning() ; i++)
- // std::this_thread::sleep_for(std::chrono::milliseconds{sleepTimeMs});
- // op::Profiler::profileGpuMemory(__LINE__, __FUNCTION__, __FILE__);
- // // Keep program alive while running threads
- // while (opWrapper.isRunning())
- // std::this_thread::sleep_for(std::chrono::milliseconds{sleepTimeMs});
- // // Stop and join threads
- // op::log("Stopping thread(s)", op::Priority::High);
- // opWrapper.stop();
-
- // Measuring total time
- const auto now = std::chrono::high_resolution_clock::now();
- const auto totalTimeSec = (double)std::chrono::duration_cast(now-timerBegin).count()
- * 1e-9;
- const auto message = "OpenPose demo successfully finished. Total time: "
- + std::to_string(totalTimeSec) + " seconds.";
- op::log(message, op::Priority::High);
-
- // Return successful message
- return 0;
- }
- catch (const std::exception& e)
- {
- op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
- return -1;
- }
-}
-
-int main(int argc, char *argv[])
-{
- // Parsing command line flags
- gflags::ParseCommandLineFlags(&argc, &argv, true);
-
- // Running openPoseDemo
- return openPoseDemo();
-}
+#endif // OPENPOSE_FLAGS_HPP
diff --git a/include/openpose/hand/handExtractorNet.hpp b/include/openpose/hand/handExtractorNet.hpp
index e5d17ff82..10ef59c9c 100644
--- a/include/openpose/hand/handExtractorNet.hpp
+++ b/include/openpose/hand/handExtractorNet.hpp
@@ -2,7 +2,6 @@
#define OPENPOSE_HAND_HAND_EXTRACTOR_HPP
#include
-#include
#include // cv::Mat
#include
#include
diff --git a/include/openpose/pose/bodyPartConnectorBase.hpp b/include/openpose/net/bodyPartConnectorBase.hpp
similarity index 100%
rename from include/openpose/pose/bodyPartConnectorBase.hpp
rename to include/openpose/net/bodyPartConnectorBase.hpp
diff --git a/include/openpose/pose/bodyPartConnectorCaffe.hpp b/include/openpose/net/bodyPartConnectorCaffe.hpp
similarity index 100%
rename from include/openpose/pose/bodyPartConnectorCaffe.hpp
rename to include/openpose/net/bodyPartConnectorCaffe.hpp
diff --git a/include/openpose/net/headers.hpp b/include/openpose/net/headers.hpp
index 1d8cc4127..93fceae03 100644
--- a/include/openpose/net/headers.hpp
+++ b/include/openpose/net/headers.hpp
@@ -2,6 +2,8 @@
#define OPENPOSE_NET_HEADERS_HPP
// net module
+#include
+#include
#include