From e7b8c0be5f9916524122d3117df641429497ee95 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Wed, 8 May 2024 12:55:47 +0530 Subject: [PATCH 01/49] timelapse sample --- base/CMakeLists.txt | 1 + base/include/MotionVectorExtractor.h | 3 +- base/src/MotionVectorExtractor.cpp | 53 +++++++++-- ...tionvector_extractor_and_overlay_tests.cpp | 91 ++++++++++++++++++- base/test/timelapse.cpp | 76 ++++++++++++++++ 5 files changed, 209 insertions(+), 15 deletions(-) create mode 100644 base/test/timelapse.cpp diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index 9f2cd1470..2b775d2bc 100755 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -562,6 +562,7 @@ SET(UT_FILES test/overlaymodule_tests.cpp test/testSignalGeneratorSrc_tests.cpp test/audioToTextXform_tests.cpp + test/timelapse.cpp ${ARM64_UT_FILES} ${CUDA_UT_FILES} ) diff --git a/base/include/MotionVectorExtractor.h b/base/include/MotionVectorExtractor.h index a50cd1b55..e0e11af3f 100644 --- a/base/include/MotionVectorExtractor.h +++ b/base/include/MotionVectorExtractor.h @@ -15,7 +15,7 @@ class MotionVectorExtractorProps : public ModuleProps OPENH264 }; - MotionVectorExtractorProps(MVExtractMethod _MVExtractMethod = MVExtractMethod::FFMPEG, bool _sendDecodedFrame = false, int _motionVectorThreshold = 2) : MVExtract(_MVExtractMethod), sendDecodedFrame(_sendDecodedFrame), motionVectorThreshold(_motionVectorThreshold) + MotionVectorExtractorProps(MVExtractMethod _MVExtractMethod = MVExtractMethod::FFMPEG, bool _sendDecodedFrame = false, int _motionVectorThreshold = 2, bool _sendOverlayFrame = true) : MVExtract(_MVExtractMethod), sendDecodedFrame(_sendDecodedFrame), motionVectorThreshold(_motionVectorThreshold), sendOverlayFrame(_sendOverlayFrame) { } @@ -24,6 +24,7 @@ class MotionVectorExtractorProps : public ModuleProps return ModuleProps::getSerializeSize() + sizeof(sendDecodedFrame) + sizeof(motionVectorThreshold); } bool sendDecodedFrame = false; + bool sendOverlayFrame = false; int motionVectorThreshold; MVExtractMethod MVExtract = MVExtractMethod::FFMPEG; private: diff --git a/base/src/MotionVectorExtractor.cpp b/base/src/MotionVectorExtractor.cpp index e274351b7..471c19fc8 100644 --- a/base/src/MotionVectorExtractor.cpp +++ b/base/src/MotionVectorExtractor.cpp @@ -23,6 +23,7 @@ class MvExtractDetailAbs makeFrameWithPinId = _makeFrameWithPinId; makeframe = _makeframe; sendDecodedFrame = props.sendDecodedFrame; + sendOverlayFrame = props.sendOverlayFrame; threshold = props.motionVectorThreshold; }; ~MvExtractDetailAbs() @@ -31,10 +32,11 @@ class MvExtractDetailAbs virtual void setProps(MotionVectorExtractorProps props) { sendDecodedFrame = props.sendDecodedFrame; + sendOverlayFrame = props.sendOverlayFrame; } virtual void getMotionVectors(frame_container& frames, frame_sp& outFrame, frame_sp& decodedFrame) = 0; virtual void initDecoder() = 0; -public: +public: int mWidth = 0; int mHeight = 0; std::string rawFramePinId; @@ -42,8 +44,10 @@ class MvExtractDetailAbs std::function makeframe; std::function makeFrameWithPinId; bool sendDecodedFrame = false; + bool sendOverlayFrame = true; int threshold; cv::Mat bgrImg; + bool motionFound = false; }; class DetailFfmpeg : public MvExtractDetailAbs { @@ -171,7 +175,7 @@ int DetailFfmpeg::decodeAndGetMotionVectors(AVPacket* pkt, frame_container& fram { const AVMotionVector* mv = &mvs[i]; - if (std::abs(mv->motion_x) > threshold || std::abs(mv->motion_y) > threshold) + if (sendOverlayFrame == true && std::abs(mv->motion_x) > threshold || std::abs(mv->motion_y) > threshold) { LineOverlay lineOverlay; lineOverlay.x1 = mv->src_x; @@ -180,6 +184,8 @@ int DetailFfmpeg::decodeAndGetMotionVectors(AVPacket* pkt, frame_container& fram lineOverlay.y2 = mv->dst_y; lineOverlays.push_back(lineOverlay); + + motionFound = true; } } @@ -202,6 +208,19 @@ int DetailFfmpeg::decodeAndGetMotionVectors(AVPacket* pkt, frame_container& fram { outFrame = makeFrameWithPinId(0, motionVectorPinId); } + if (sendOverlayFrame == false) { + const AVMotionVector* mvs = (const AVMotionVector*)sideData->data; + for (int i = 0; i < sideData->size / sizeof(*mvs); i++) + { + const AVMotionVector* mv = &mvs[i]; + + if (std::abs(mv->motion_x) > threshold || std::abs(mv->motion_y) > threshold) + { + motionFound = true; + break; + } + } + } av_packet_unref(pkt); av_frame_unref(avFrame); return 0; @@ -232,6 +251,7 @@ void DetailOpenH264::initDecoder() void DetailOpenH264::getMotionVectors(frame_container& frames, frame_sp& outFrame, frame_sp& decodedFrame) { + motionFound = false; uint8_t* pData[3] = { NULL }; pData[0] = NULL; pData[1] = NULL; @@ -244,8 +264,8 @@ void DetailOpenH264::getMotionVectors(frame_container& frames, frame_sp& outFram int32_t mMotionVectorSize = mWidth * mHeight * 8; int16_t* mMotionVectorData = nullptr; memset(&pDstInfo, 0, sizeof(SBufferInfo)); - outFrame = makeFrameWithPinId(mMotionVectorSize, motionVectorPinId); - mMotionVectorData = static_cast(outFrame->data()); + //outFrame = makeFrameWithPinId(mMotionVectorSize, motionVectorPinId); + mMotionVectorData = static_cast(malloc(mMotionVectorSize*sizeof(int16_t))); if (sDecParam.bParseOnly) { @@ -256,7 +276,7 @@ void DetailOpenH264::getMotionVectors(frame_container& frames, frame_sp& outFram pDecoder->DecodeFrameGetMotionVectorsNoDelay(pSrc, iSrcLen, ppDst, &pDstInfo, &mMotionVectorSize, &mMotionVectorData); } - if (mMotionVectorSize != mWidth * mHeight * 8) + if (mMotionVectorSize != mWidth * mHeight * 8 && sendOverlayFrame == true) { std::vector circleOverlays; CompositeOverlay compositeOverlay; @@ -273,6 +293,7 @@ void DetailOpenH264::getMotionVectors(frame_container& frames, frame_sp& outFram circleOverlay.radius = 1; circleOverlays.push_back(circleOverlay); + motionFound = true; } } @@ -290,8 +311,19 @@ void DetailOpenH264::getMotionVectors(frame_container& frames, frame_sp& outFram frames.insert(make_pair(motionVectorPinId, outFrame)); } } - - if ((!sDecParam.bParseOnly) && (pDstInfo.pDst[0] != nullptr) && (mMotionVectorSize != mWidth * mHeight * 8)) + if(sendOverlayFrame == false){ + for (int i = 0; i < mMotionVectorSize; i += 4) + { + auto motionX = mMotionVectorData[i]; + auto motionY = mMotionVectorData[i + 1]; + if (abs(motionX) > threshold || abs(motionY) > threshold) + { + motionFound = true; + break; + } + } + } + if ((!sDecParam.bParseOnly) && (pDstInfo.pDst[0] != nullptr) && (mMotionVectorSize != mWidth * mHeight * 8) && motionFound == true) { decodedFrame = makeFrameWithPinId(mHeight * 3 * mWidth, rawFramePinId); uint8_t* yuvImagePtr = (uint8_t*)malloc(mHeight * 1.5 * pDstInfo.UsrData.sSystemBuffer.iStride[0]); @@ -322,9 +354,12 @@ MotionVectorExtractor::MotionVectorExtractor(MotionVectorExtractorProps props) : { mDetail.reset(new DetailOpenH264(props, [&](size_t size, string& pinId) -> frame_sp { return makeFrame(size, pinId); }, [&](frame_sp& frame, size_t& size, string& pinId) -> frame_sp { return makeFrame(frame, size, pinId); })); } - auto motionVectorOutputMetadata = framemetadata_sp(new FrameMetadata(FrameMetadata::OVERLAY_INFO_IMAGE)); + if (props.sendOverlayFrame) + { + auto motionVectorOutputMetadata = framemetadata_sp(new FrameMetadata(FrameMetadata::OVERLAY_INFO_IMAGE)); + mDetail->motionVectorPinId = addOutputPin(motionVectorOutputMetadata); + } rawOutputMetadata = framemetadata_sp(new RawImageMetadata()); - mDetail->motionVectorPinId = addOutputPin(motionVectorOutputMetadata); mDetail->rawFramePinId = addOutputPin(rawOutputMetadata); } bool MotionVectorExtractor::init() diff --git a/base/test/motionvector_extractor_and_overlay_tests.cpp b/base/test/motionvector_extractor_and_overlay_tests.cpp index e999c7987..30816d5f4 100644 --- a/base/test/motionvector_extractor_and_overlay_tests.cpp +++ b/base/test/motionvector_extractor_and_overlay_tests.cpp @@ -148,9 +148,7 @@ void motionVectorExtractAndOverlaySetProps(MotionVectorExtractorProps::MVExtract p.term(); p.wait_for_all(); } - -void motionVectorExtractAndOverlay_Render(MotionVectorExtractorProps::MVExtractMethod MvExtract) -{ +void motionVectorExtractAndOverlay_Render(MotionVectorExtractorProps::MVExtractMethod MvExtract) { LoggerProps loggerProps; loggerProps.logLevel = boost::log::trivial::severity_level::info; Logger::setLogLevel(boost::log::trivial::severity_level::info); @@ -181,6 +179,83 @@ void motionVectorExtractAndOverlay_Render(MotionVectorExtractorProps::MVExtractM p.run_all_threaded(); boost::this_thread::sleep_for(boost::chrono::seconds(10)); + LOG_INFO << "profiling done - stopping the pipeline"; + p.stop(); + p.term(); + p.wait_for_all(); + } + +void motionVectorExtractAndOverlay_sendOverlayFrames_Render(MotionVectorExtractorProps::MVExtractMethod MvExtract) { + LoggerProps loggerProps; + loggerProps.logLevel = boost::log::trivial::severity_level::info; + Logger::setLogLevel(boost::log::trivial::severity_level::info); + Logger::initLogger(loggerProps); + + bool sendDecodedFrames = true; + bool sendOverlayFrames = true; + + FileReaderModuleProps fileReaderProps("./data/h264_data/FVDO_Freeway_4cif_???.H264"); + fileReaderProps.fps = 30; + fileReaderProps.readLoop = true; + auto fileReader = boost::shared_ptr(new FileReaderModule(fileReaderProps)); + auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); + fileReader->addOutputPin(h264ImageMetadata); + + auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(MotionVectorExtractorProps(MvExtract, sendDecodedFrames, 2, sendOverlayFrames))); + fileReader->setNext(motionExtractor); + + auto overlay = boost::shared_ptr(new OverlayModule(OverlayModuleProps())); + motionExtractor->setNext(overlay); + + auto sink = boost::shared_ptr(new ImageViewerModule(ImageViewerModuleProps("MotionVectorsOverlay"))); + overlay->setNext(sink); + + PipeLine p("test"); + p.appendModule(fileReader); + p.init(); + + p.run_all_threaded(); + boost::this_thread::sleep_for(boost::chrono::seconds(10)); + + LOG_INFO << "profiling done - stopping the pipeline"; + p.stop(); + p.term(); + p.wait_for_all(); +} + +void motionVectorExtractAndOverlay_dontSendOverlayFrames_Render(MotionVectorExtractorProps::MVExtractMethod MvExtract) +{ + LoggerProps loggerProps; + loggerProps.logLevel = boost::log::trivial::severity_level::info; + Logger::setLogLevel(boost::log::trivial::severity_level::info); + Logger::initLogger(loggerProps); + + bool sendDecodedFrames = true; + bool sendOverlayFrames = false; + + FileReaderModuleProps fileReaderProps("./data/h264_data/FVDO_Freeway_4cif_???.H264"); + fileReaderProps.fps = 30; + fileReaderProps.readLoop = true; + auto fileReader = boost::shared_ptr(new FileReaderModule(fileReaderProps)); + auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); + fileReader->addOutputPin(h264ImageMetadata); + + auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(MotionVectorExtractorProps(MvExtract, sendDecodedFrames, 2, sendOverlayFrames))); + fileReader->setNext(motionExtractor); + + auto overlay = boost::shared_ptr(new OverlayModule(OverlayModuleProps())); + motionExtractor->setNext(overlay); + + auto sink = boost::shared_ptr(new ImageViewerModule(ImageViewerModuleProps("MotionVectorsOverlay"))); + overlay->setNext(sink); + + PipeLine p("test"); + p.appendModule(fileReader); + p.init(); + + p.run_all_threaded(); + boost::this_thread::sleep_for(boost::chrono::seconds(10)); + LOG_INFO << "profiling done - stopping the pipeline"; p.stop(); p.term(); @@ -197,6 +272,7 @@ void rtspCamMotionVectorExtractAndOverlay_Render(MotionVectorExtractorProps::MVE rtsp_client_tests_data d; bool overlayFrames = false; + const std::string url = ""; std::string username = ""; std::string password = ""; @@ -261,9 +337,14 @@ BOOST_AUTO_TEST_CASE(extract_motion_vectors_and_overlay_openh264) motionVectorExtractAndOverlay(MotionVectorExtractorProps::OPENH264); } -BOOST_AUTO_TEST_CASE(extract_motion_vectors_and_overlay_render_openh264, *boost::unit_test::disabled()) +BOOST_AUTO_TEST_CASE(extract_motion_vectors_no_overlay_frames_and_overlay_render_openh264, *boost::unit_test::disabled()) +{ + motionVectorExtractAndOverlay_dontSendOverlayFrames_Render(MotionVectorExtractorProps::OPENH264); +} + +BOOST_AUTO_TEST_CASE(extract_motion_vectors_send_overlay_frames_and_overlay_render_openh264, *boost::unit_test::disabled()) { - motionVectorExtractAndOverlay_Render(MotionVectorExtractorProps::OPENH264); + motionVectorExtractAndOverlay_sendOverlayFrames_Render(MotionVectorExtractorProps::OPENH264); } BOOST_AUTO_TEST_CASE(rtspcam_extract_motion_vectors_and_overlay_render_openh264, *boost::unit_test::disabled()) diff --git a/base/test/timelapse.cpp b/base/test/timelapse.cpp new file mode 100644 index 000000000..105f95eb4 --- /dev/null +++ b/base/test/timelapse.cpp @@ -0,0 +1,76 @@ +#include + +#include "Mp4ReaderSource.h" +#include "MotionVectorExtractor.h" +#include "Mp4WriterSink.h" +#include "H264Metadata.h" +#include "PipeLine.h" +#include "OverlayModule.h" +#include "ImageViewerModule.h" +#include "Mp4VideoMetadata.h" +#include "H264EncoderNVCodec.h" +#include "CudaMemCopy.h" +#include "CudaStreamSynchronize.h" +#include "ColorConversionXForm.h" +#include "FileWriterModule.h" + +BOOST_AUTO_TEST_SUITE(timeplase_using_motion_detection) + +BOOST_AUTO_TEST_CASE(generateTimeLapseUsingMotionDetection) +{ + std::string outFolderPath = "./data/timeplase_videos/"; + auto cuContext = apracucontext_sp(new ApraCUcontext()); + uint32_t gopLength = 25; + uint32_t bitRateKbps = 1000; + uint32_t frameRate = 30; + H264EncoderNVCodecProps::H264CodecProfile profile = H264EncoderNVCodecProps::MAIN; + bool enableBFrames = false; + bool sendDecodedFrames = true; + bool sendOverlayFrames = false; + std::string videoPath = "./data/Mp4_videos/h264_video_metadata/20230514/0011/1707478361303.mp4"; + auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); + auto mp4ReaderProps = Mp4ReaderSourceProps(videoPath, false, 0, true, false, false); + mp4ReaderProps.fps = 24; + auto mp4Reader = boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); + mp4Reader -> addOutPutPin(h264ImageMetadata); + auto mp4Metadata = framemetadata_sp(new Mp4VideoMetadata("v_1")); + mp4Reader->addOutPutPin(mp4Metadata); + std::vector mImagePin; + mImagePin = mp4Reader->getAllOutputPinsByType(FrameMetadata::H264_DATA); + auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(MotionVectorExtractorProps(MotionVectorExtractorProps::MVExtractMethod::OPENH264, sendDecodedFrames, 50, sendOverlayFrames))); + auto m2 = boost::shared_ptr(new FileWriterModule(FileWriterModuleProps("./data/testOutput/timelapseframe_????.raw"))); + mp4Reader -> setNext(motionExtractor, mImagePin); + + auto colorchange1 = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::BGR_TO_RGB))); + motionExtractor -> setNext(m2); + motionExtractor -> setNext(colorchange1); + + auto colorchange2 = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::RGB_TO_YUV420PLANAR))); + colorchange1->setNext(colorchange2); + + cudastream_sp cudaStream_ = boost::shared_ptr(new ApraCudaStream()); + auto copy = boost::shared_ptr(new CudaMemCopy(CudaMemCopyProps(cudaMemcpyHostToDevice, cudaStream_))); + colorchange2->setNext(copy); + + auto sync = boost::shared_ptr(new CudaStreamSynchronize(CudaStreamSynchronizeProps(cudaStream_))); + copy->setNext(sync); + auto encoder = boost::shared_ptr(new H264EncoderNVCodec(H264EncoderNVCodecProps(bitRateKbps, cuContext, gopLength, frameRate, profile, enableBFrames))); + sync->setNext(encoder); + auto mp4WriterSinkProps = Mp4WriterSinkProps(1, 10, 24, outFolderPath, false); + auto mp4WriterSink = boost::shared_ptr(new Mp4WriterSink(mp4WriterSinkProps)); + encoder -> setNext(mp4WriterSink); + + PipeLine p("test"); + p.appendModule(mp4Reader); + p.init(); + + p.run_all_threaded(); + boost::this_thread::sleep_for(boost::chrono::seconds(20)); + + LOG_INFO << "profiling done - stopping the pipeline"; + p.stop(); + p.term(); + p.wait_for_all(); +} + +BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file From 0b02ac2b4f4d66ab0d9a295a8f49cd2e7f28e5d7 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Wed, 8 May 2024 16:32:06 +0530 Subject: [PATCH 02/49] timelaspe file changes --- base/test/timelapse.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/base/test/timelapse.cpp b/base/test/timelapse.cpp index 105f95eb4..a651d806f 100644 --- a/base/test/timelapse.cpp +++ b/base/test/timelapse.cpp @@ -27,7 +27,7 @@ BOOST_AUTO_TEST_CASE(generateTimeLapseUsingMotionDetection) bool enableBFrames = false; bool sendDecodedFrames = true; bool sendOverlayFrames = false; - std::string videoPath = "./data/Mp4_videos/h264_video_metadata/20230514/0011/1707478361303.mp4"; + std::string videoPath = "./data/Mp4_videos/h264_video_metadata/20230514/0011/video1.mp4"; auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); auto mp4ReaderProps = Mp4ReaderSourceProps(videoPath, false, 0, true, false, false); mp4ReaderProps.fps = 24; @@ -40,9 +40,9 @@ BOOST_AUTO_TEST_CASE(generateTimeLapseUsingMotionDetection) auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(MotionVectorExtractorProps(MotionVectorExtractorProps::MVExtractMethod::OPENH264, sendDecodedFrames, 50, sendOverlayFrames))); auto m2 = boost::shared_ptr(new FileWriterModule(FileWriterModuleProps("./data/testOutput/timelapseframe_????.raw"))); mp4Reader -> setNext(motionExtractor, mImagePin); + // motionExtractor -> setNext(m2); auto colorchange1 = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::BGR_TO_RGB))); - motionExtractor -> setNext(m2); motionExtractor -> setNext(colorchange1); auto colorchange2 = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::RGB_TO_YUV420PLANAR))); From 55235e40c48c223c4f8412cb0751a666db7fdf62 Mon Sep 17 00:00:00 2001 From: nsabale7 Date: Fri, 10 May 2024 12:05:06 +0530 Subject: [PATCH 03/49] wip sample --- samples/timelapse_sample/CMakeLists.txt | 1 + .../timelapse_sample/timelapse_summary.cpp | 48 +++++++++++++++++++ samples/timelapse_sample/timelapse_summary.h | 12 +++++ .../timelapse_summary_test.cpp | 18 +++++++ 4 files changed, 79 insertions(+) create mode 100644 samples/timelapse_sample/CMakeLists.txt create mode 100644 samples/timelapse_sample/timelapse_summary.cpp create mode 100644 samples/timelapse_sample/timelapse_summary.h create mode 100644 samples/timelapse_sample/timelapse_summary_test.cpp diff --git a/samples/timelapse_sample/CMakeLists.txt b/samples/timelapse_sample/CMakeLists.txt new file mode 100644 index 000000000..e6c221c6e --- /dev/null +++ b/samples/timelapse_sample/CMakeLists.txt @@ -0,0 +1 @@ +cmake_minimum_required(VERSION 3.22) diff --git a/samples/timelapse_sample/timelapse_summary.cpp b/samples/timelapse_sample/timelapse_summary.cpp new file mode 100644 index 000000000..4fac1d661 --- /dev/null +++ b/samples/timelapse_sample/timelapse_summary.cpp @@ -0,0 +1,48 @@ +#include "Mp4ReaderSource.h" +#include "TimelapsePipeline.h" +#include "H264Metadata.h" + + +TimelapsePipeline::TimelapsePipeline():pipeline("test") { +} + + +TimelapsePipeline::~TimelapsePipeline() { +} + + +bool TimelapsePipeline::setupPipeline() { + bool overlayFrames = true; + + FileReaderModuleProps fileReaderProps("./data/h264_data/FVDO_Freeway_4cif_???.H264"); + fileReaderProps.fps = 30; + fileReaderProps.readLoop = true; + auto fileReader = boost::shared_ptr(new FileReaderModule(fileReaderProps)); + auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); + fileReader->addOutputPin(h264ImageMetadata); + + auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(MotionVectorExtractorProps(MotionVectorExtractorProps::OPENH264, overlayFrames))); + fileReader->setNext(motionExtractor); + + auto overlay = boost::shared_ptr(new OverlayModule(OverlayModuleProps())); + motionExtractor->setNext(overlay); + + auto sink = boost::shared_ptr(new ImageViewerModule(ImageViewerModuleProps("MotionVectorsOverlay"))); + overlay->setNext(sink); + + pipeline.appendModule(fileReader); + pipeline.init(); + return true +} + +bool TimelapsePipeline::startPipeline() { + pipeline.run_all_threaded(); + return true; +} + +bool TimelapsePipeline::stopPipeline() { + m_cameraPipeline.stop(); + m_cameraPipeline.term(); + m_cameraPipeline.wait_for_all(); + return true; +} \ No newline at end of file diff --git a/samples/timelapse_sample/timelapse_summary.h b/samples/timelapse_sample/timelapse_summary.h new file mode 100644 index 000000000..f595d3cd1 --- /dev/null +++ b/samples/timelapse_sample/timelapse_summary.h @@ -0,0 +1,12 @@ +#include + +class TimelapsePipeline : +{ +public: + bool setupPipeline(): + bool startPipeline(): + bool stopPipeline(): + +private: + PipeLine pipeline; +} \ No newline at end of file diff --git a/samples/timelapse_sample/timelapse_summary_test.cpp b/samples/timelapse_sample/timelapse_summary_test.cpp new file mode 100644 index 000000000..2332320f1 --- /dev/null +++ b/samples/timelapse_sample/timelapse_summary_test.cpp @@ -0,0 +1,18 @@ +#include + +#include "TimelapsePipeline.h" + +BOOST_AUTO_TEST_SUITE(timeplase_summary) + +BOOST_AUTO_TEST_CASE(generateTimeLapse) +{ + auto timelapsePipeline = boost::shared_ptr(new TimelapsePipeline()); + timelapsePipeline.setupPipeline(); + + timelapsePipeline.startPipeline(); + boost::this_thread::sleep_for(boost::chrono::seconds(10)); + + timelapsePipeline.stopPipeline(); +} + +BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file From c116aadf5790f6d61589429c3e080ae6738d5301 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Mon, 13 May 2024 21:35:04 +0530 Subject: [PATCH 04/49] samples WIP. --- base/CMakeLists.txt | 3 +- base/include/RawImagePlanarMetadata.h | 4 +- base/test/timelapse.cpp | 6 +- base/vcpkg.json | 2 +- samples/CMakeLists.txt | 10 +++ samples/timelapse-sample/CMakeLists.txt | 53 +++++++++++++ .../timelapse-sample/timelapse_summary.cpp | 75 +++++++++++++++++++ samples/timelapse-sample/timelapse_summary.h | 13 ++++ .../timelapse_summary_test.cpp | 17 +++++ vcpkg | 2 +- 10 files changed, 177 insertions(+), 8 deletions(-) create mode 100644 samples/CMakeLists.txt create mode 100644 samples/timelapse-sample/CMakeLists.txt create mode 100644 samples/timelapse-sample/timelapse_summary.cpp create mode 100644 samples/timelapse-sample/timelapse_summary.h create mode 100644 samples/timelapse-sample/timelapse_summary_test.cpp diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index 2b775d2bc..f895b1da4 100755 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -562,7 +562,6 @@ SET(UT_FILES test/overlaymodule_tests.cpp test/testSignalGeneratorSrc_tests.cpp test/audioToTextXform_tests.cpp - test/timelapse.cpp ${ARM64_UT_FILES} ${CUDA_UT_FILES} ) @@ -619,3 +618,5 @@ IF(ENABLE_WINDOWS) file(COPY ${RUNTIME_DLLS} DESTINATION RelWithDebInfo/) ENDIF(GHA) ENDIF(ENABLE_WINDOWS) + +add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../samples ${CMAKE_CURRENT_BINARY_DIR}/samples) diff --git a/base/include/RawImagePlanarMetadata.h b/base/include/RawImagePlanarMetadata.h index 30a3514a9..93d5da86a 100644 --- a/base/include/RawImagePlanarMetadata.h +++ b/base/include/RawImagePlanarMetadata.h @@ -28,8 +28,8 @@ class RawImagePlanarMetadata : public FrameMetadata break; case ImageMetadata::YUV420: _step[0] = _width + FrameMetadata::getPaddingLength(_width, alignLength); - _step[1] = _width >> 1; - _step[1] = _step[1] + FrameMetadata::getPaddingLength(_step[1], alignLength); + _step[1] = _step[0] >> 1; + // _step[1] = _step[1] + FrameMetadata::getPaddingLength(_step[1], alignLength); _step[2] = _step[1]; break; default: diff --git a/base/test/timelapse.cpp b/base/test/timelapse.cpp index a651d806f..5028e563d 100644 --- a/base/test/timelapse.cpp +++ b/base/test/timelapse.cpp @@ -40,9 +40,9 @@ BOOST_AUTO_TEST_CASE(generateTimeLapseUsingMotionDetection) auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(MotionVectorExtractorProps(MotionVectorExtractorProps::MVExtractMethod::OPENH264, sendDecodedFrames, 50, sendOverlayFrames))); auto m2 = boost::shared_ptr(new FileWriterModule(FileWriterModuleProps("./data/testOutput/timelapseframe_????.raw"))); mp4Reader -> setNext(motionExtractor, mImagePin); - // motionExtractor -> setNext(m2); + motionExtractor -> setNext(m2); - auto colorchange1 = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::BGR_TO_RGB))); + /*auto colorchange1 = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::BGR_TO_RGB))); motionExtractor -> setNext(colorchange1); auto colorchange2 = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::RGB_TO_YUV420PLANAR))); @@ -58,7 +58,7 @@ BOOST_AUTO_TEST_CASE(generateTimeLapseUsingMotionDetection) sync->setNext(encoder); auto mp4WriterSinkProps = Mp4WriterSinkProps(1, 10, 24, outFolderPath, false); auto mp4WriterSink = boost::shared_ptr(new Mp4WriterSink(mp4WriterSinkProps)); - encoder -> setNext(mp4WriterSink); + encoder -> setNext(mp4WriterSink);*/ PipeLine p("test"); p.appendModule(mp4Reader); diff --git a/base/vcpkg.json b/base/vcpkg.json index 4df4664c0..7a9afe9f8 100644 --- a/base/vcpkg.json +++ b/base/vcpkg.json @@ -2,7 +2,7 @@ "$schema": "https://raw.githubusercontent.com/microsoft/vcpkg/master/scripts/vcpkg.schema.json", "name": "apra-pipes-cuda", "version": "0.0.1", - "builtin-baseline": "eac79fc7bda260819c646d10c97dec825305aecd", + "builtin-baseline": "3860aea7304f2b9eac61f37a3095fd5fd57428d8", "dependencies": [ { "name": "whisper", diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt new file mode 100644 index 000000000..080530fec --- /dev/null +++ b/samples/CMakeLists.txt @@ -0,0 +1,10 @@ +#dependencies + +find_package(Threads REQUIRED) + +include_directories( +../base +${CMAKE_CURRENT_SOURCE_DIR} +) + +add_subdirectory(timelapse-sample) \ No newline at end of file diff --git a/samples/timelapse-sample/CMakeLists.txt b/samples/timelapse-sample/CMakeLists.txt new file mode 100644 index 000000000..31ffe93ca --- /dev/null +++ b/samples/timelapse-sample/CMakeLists.txt @@ -0,0 +1,53 @@ +set(TARGET timelapse-sample) +SET(CORE_FILES + timelapse_summary.cpp +) +SET(CORE_FILES_H + timelapse_summary.h +) +SET(SOURCE + ${CORE_FILES} + ${CORE_FILES_H} +) +set(UT_FILE + timelapse_summary_test.cpp +) +find_library(APRA_PIPES_LIB NAMES aprapipes) +add_library(example_aprapipes STATIC ${SOURCE}) +add_executable(runner_test ${UT_FILE}) +target_include_directories ( example_aprapipes PRIVATE + ${JETSON_MULTIMEDIA_LIB_INCLUDE} + ${FFMPEG_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIRS} + ${LIBMP4_INC_DIR} + ${BARESIP_INC_DIR} + ${LIBRE_INC_DIR} + ${NVCODEC_INCLUDE_DIR} +) +target_include_directories ( runner_test PUBLIC + ${JETSON_MULTIMEDIA_LIB_INCLUDE} + ${FFMPEG_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIRS} + ${LIBMP4_INC_DIR} + ${BARESIP_INC_DIR} + ${LIBRE_INC_DIR} + ${NVCODEC_INCLUDE_DIR} +) +target_link_libraries( + runner_test + aprapipes + ${JPEG_LIBRARIES} + ${LIBMP4_LIB} + ${OPENH264_LIB} + ${Boost_LIBRARIES} + ${FFMPEG_LIBRARIES} + ${OpenCV_LIBRARIES} + ${JETSON_LIBS} + ${NVCUDAToolkit_LIBS} + ${NVCODEC_LIB} + ${NVJPEGLIB_L4T} + ${CURSES_LIBRARIES} + example_aprapipes + ) \ No newline at end of file diff --git a/samples/timelapse-sample/timelapse_summary.cpp b/samples/timelapse-sample/timelapse_summary.cpp new file mode 100644 index 000000000..5bff2c06d --- /dev/null +++ b/samples/timelapse-sample/timelapse_summary.cpp @@ -0,0 +1,75 @@ +#include "Mp4ReaderSource.h" +#include "MotionVectorExtractor.h" +#include "Mp4WriterSink.h" +#include "H264Metadata.h" +#include "PipeLine.h" +#include "OverlayModule.h" +#include "ImageViewerModule.h" +#include "Mp4VideoMetadata.h" +#include "H264EncoderNVCodec.h" +#include "CudaMemCopy.h" +#include "CudaStreamSynchronize.h" +#include "ColorConversionXForm.h" +#include "FileWriterModule.h" +#include "timelapse_summary.h" + +TimelapsePipeline::TimelapsePipeline() : pipeline("test") {} + +bool TimelapsePipeline::setupPipeline() { + std::string outFolderPath = "./data/timeplase_videos/"; + auto cuContext = apracucontext_sp(new ApraCUcontext()); + uint32_t gopLength = 25; + uint32_t bitRateKbps = 1000; + uint32_t frameRate = 30; + H264EncoderNVCodecProps::H264CodecProfile profile = H264EncoderNVCodecProps::MAIN; + bool enableBFrames = false; + bool sendDecodedFrames = true; + bool sendOverlayFrames = false; + std::string videoPath = "./data/Mp4_videos/h264_video_metadata/20230514/0011/video1.mp4"; + auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); + auto mp4ReaderProps = Mp4ReaderSourceProps(videoPath, false, 0, true, false, false); + mp4ReaderProps.fps = 24; + auto mp4Reader = boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); + mp4Reader -> addOutPutPin(h264ImageMetadata); + auto mp4Metadata = framemetadata_sp(new Mp4VideoMetadata("v_1")); + mp4Reader->addOutPutPin(mp4Metadata); + std::vector mImagePin; + mImagePin = mp4Reader->getAllOutputPinsByType(FrameMetadata::H264_DATA); + auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(MotionVectorExtractorProps(MotionVectorExtractorProps::MVExtractMethod::OPENH264,sendDecodedFrames, 50, sendOverlayFrames))); + mp4Reader -> setNext(motionExtractor, mImagePin); + + auto colorchange1 = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::BGR_TO_RGB))); + motionExtractor -> setNext(colorchange1); + + auto colorchange2 = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::RGB_TO_YUV420PLANAR))); + colorchange1 -> setNext(colorchange2); + + cudastream_sp cudaStream_ = boost::shared_ptr(new ApraCudaStream()); + auto copy = boost::shared_ptr(new CudaMemCopy(CudaMemCopyProps(cudaMemcpyHostToDevice, cudaStream_))); + colorchange2 -> setNext(copy); + + auto sync = boost::shared_ptr(new CudaStreamSynchronize(CudaStreamSynchronizeProps(cudaStream_))); + copy -> setNext(sync); + auto encoder = boost::shared_ptr(new H264EncoderNVCodec(H264EncoderNVCodecProps(bitRateKbps, cuContext, gopLength, frameRate, profile, enableBFrames))); + sync -> setNext(encoder); + auto mp4WriterSinkProps = Mp4WriterSinkProps(1, 10, 24, outFolderPath, false); + auto mp4WriterSink = boost::shared_ptr(new Mp4WriterSink(mp4WriterSinkProps)); + encoder -> setNext(mp4WriterSink); + + pipeline.appendModule(mp4Reader); + pipeline.init(); + return true; +} + +bool TimelapsePipeline::startPipeline() { + pipeline.run_all_threaded(); + boost::this_thread::sleep_for(boost::chrono::seconds(10)); + return true; +} + +bool TimelapsePipeline::stopPipeline() { + pipeline.stop(); + pipeline.term(); + pipeline.wait_for_all(); + return true; +} \ No newline at end of file diff --git a/samples/timelapse-sample/timelapse_summary.h b/samples/timelapse-sample/timelapse_summary.h new file mode 100644 index 000000000..d06d0aeb8 --- /dev/null +++ b/samples/timelapse-sample/timelapse_summary.h @@ -0,0 +1,13 @@ + +#include + +class TimelapsePipeline { +public: + TimelapsePipeline(); + bool setupPipeline(); + bool startPipeline(); + bool stopPipeline(); + +private: + PipeLine pipeline; +}; \ No newline at end of file diff --git a/samples/timelapse-sample/timelapse_summary_test.cpp b/samples/timelapse-sample/timelapse_summary_test.cpp new file mode 100644 index 000000000..3c579e511 --- /dev/null +++ b/samples/timelapse-sample/timelapse_summary_test.cpp @@ -0,0 +1,17 @@ +#include +#include +#include "timelapse_summary.h" + +BOOST_AUTO_TEST_SUITE(generate_timelapse) + +BOOST_AUTO_TEST_CASE(generateTimeLapse) +{ + auto timelapsePipeline = boost::shared_ptr(new TimelapsePipeline()); + timelapsePipeline -> setupPipeline(); + + timelapsePipeline -> startPipeline(); + + timelapsePipeline -> stopPipeline(); +} + +BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file diff --git a/vcpkg b/vcpkg index 7754d62d1..3860aea73 160000 --- a/vcpkg +++ b/vcpkg @@ -1 +1 @@ -Subproject commit 7754d62d19501a3bb4e2d4f2eab80e8de9703e41 +Subproject commit 3860aea7304f2b9eac61f37a3095fd5fd57428d8 From 213bf2c326efc51e34cfe79ac2427172080f5645 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Tue, 14 May 2024 10:25:33 +0530 Subject: [PATCH 05/49] added main to sample cpp file. --- .../timelapse-sample/timelapse_summary.cpp | 35 ++++++++++++++++--- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/samples/timelapse-sample/timelapse_summary.cpp b/samples/timelapse-sample/timelapse_summary.cpp index 5bff2c06d..29ada856c 100644 --- a/samples/timelapse-sample/timelapse_summary.cpp +++ b/samples/timelapse-sample/timelapse_summary.cpp @@ -16,7 +16,7 @@ TimelapsePipeline::TimelapsePipeline() : pipeline("test") {} bool TimelapsePipeline::setupPipeline() { - std::string outFolderPath = "./data/timeplase_videos/"; + std::string outFolderPath = "C:\\Workspace\\ApraPipesFork\\ApraPipes\\data\\timeplase_videos"; auto cuContext = apracucontext_sp(new ApraCUcontext()); uint32_t gopLength = 25; uint32_t bitRateKbps = 1000; @@ -25,7 +25,7 @@ bool TimelapsePipeline::setupPipeline() { bool enableBFrames = false; bool sendDecodedFrames = true; bool sendOverlayFrames = false; - std::string videoPath = "./data/Mp4_videos/h264_video_metadata/20230514/0011/video1.mp4"; + std::string videoPath = "C:\\Workspace\\ApraPipesFork\\ApraPipes\\data\\Mp4_videos\\h264_video_metadata\\20230514\\0011\\video1.mp4"; auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); auto mp4ReaderProps = Mp4ReaderSourceProps(videoPath, false, 0, true, false, false); mp4ReaderProps.fps = 24; @@ -35,7 +35,7 @@ bool TimelapsePipeline::setupPipeline() { mp4Reader->addOutPutPin(mp4Metadata); std::vector mImagePin; mImagePin = mp4Reader->getAllOutputPinsByType(FrameMetadata::H264_DATA); - auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(MotionVectorExtractorProps(MotionVectorExtractorProps::MVExtractMethod::OPENH264,sendDecodedFrames, 50, sendOverlayFrames))); + auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(MotionVectorExtractorProps(MotionVectorExtractorProps::MVExtractMethod::OPENH264,sendDecodedFrames, 20, sendOverlayFrames))); mp4Reader -> setNext(motionExtractor, mImagePin); auto colorchange1 = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::BGR_TO_RGB))); @@ -72,4 +72,31 @@ bool TimelapsePipeline::stopPipeline() { pipeline.term(); pipeline.wait_for_all(); return true; -} \ No newline at end of file +} + +int main() { + TimelapsePipeline pipelineInstance; + + // Setup the pipeline + if (!pipelineInstance.setupPipeline()) { + std::cerr << "Failed to setup pipeline." << std::endl; + return 1; // Or any error code indicating failure + } + + // Start the pipeline + if (!pipelineInstance.startPipeline()) { + std::cerr << "Failed to start pipeline." << std::endl; + return 1; // Or any error code indicating failure + } + + // Wait for the pipeline to run for 10 seconds + boost::this_thread::sleep_for(boost::chrono::seconds(10)); + + // Stop the pipeline + if (!pipelineInstance.stopPipeline()) { + std::cerr << "Failed to stop pipeline." << std::endl; + return 1; // Or any error code indicating failure + } + + return 0; // Or any success code +} From 5cb1025c01fea708b370d7c0116441a07f237124 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Tue, 14 May 2024 10:27:17 +0530 Subject: [PATCH 06/49] removed sample test file from tests. --- base/test/timelapse.cpp | 76 ----------------------------------------- 1 file changed, 76 deletions(-) delete mode 100644 base/test/timelapse.cpp diff --git a/base/test/timelapse.cpp b/base/test/timelapse.cpp deleted file mode 100644 index 5028e563d..000000000 --- a/base/test/timelapse.cpp +++ /dev/null @@ -1,76 +0,0 @@ -#include - -#include "Mp4ReaderSource.h" -#include "MotionVectorExtractor.h" -#include "Mp4WriterSink.h" -#include "H264Metadata.h" -#include "PipeLine.h" -#include "OverlayModule.h" -#include "ImageViewerModule.h" -#include "Mp4VideoMetadata.h" -#include "H264EncoderNVCodec.h" -#include "CudaMemCopy.h" -#include "CudaStreamSynchronize.h" -#include "ColorConversionXForm.h" -#include "FileWriterModule.h" - -BOOST_AUTO_TEST_SUITE(timeplase_using_motion_detection) - -BOOST_AUTO_TEST_CASE(generateTimeLapseUsingMotionDetection) -{ - std::string outFolderPath = "./data/timeplase_videos/"; - auto cuContext = apracucontext_sp(new ApraCUcontext()); - uint32_t gopLength = 25; - uint32_t bitRateKbps = 1000; - uint32_t frameRate = 30; - H264EncoderNVCodecProps::H264CodecProfile profile = H264EncoderNVCodecProps::MAIN; - bool enableBFrames = false; - bool sendDecodedFrames = true; - bool sendOverlayFrames = false; - std::string videoPath = "./data/Mp4_videos/h264_video_metadata/20230514/0011/video1.mp4"; - auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); - auto mp4ReaderProps = Mp4ReaderSourceProps(videoPath, false, 0, true, false, false); - mp4ReaderProps.fps = 24; - auto mp4Reader = boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); - mp4Reader -> addOutPutPin(h264ImageMetadata); - auto mp4Metadata = framemetadata_sp(new Mp4VideoMetadata("v_1")); - mp4Reader->addOutPutPin(mp4Metadata); - std::vector mImagePin; - mImagePin = mp4Reader->getAllOutputPinsByType(FrameMetadata::H264_DATA); - auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(MotionVectorExtractorProps(MotionVectorExtractorProps::MVExtractMethod::OPENH264, sendDecodedFrames, 50, sendOverlayFrames))); - auto m2 = boost::shared_ptr(new FileWriterModule(FileWriterModuleProps("./data/testOutput/timelapseframe_????.raw"))); - mp4Reader -> setNext(motionExtractor, mImagePin); - motionExtractor -> setNext(m2); - - /*auto colorchange1 = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::BGR_TO_RGB))); - motionExtractor -> setNext(colorchange1); - - auto colorchange2 = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::RGB_TO_YUV420PLANAR))); - colorchange1->setNext(colorchange2); - - cudastream_sp cudaStream_ = boost::shared_ptr(new ApraCudaStream()); - auto copy = boost::shared_ptr(new CudaMemCopy(CudaMemCopyProps(cudaMemcpyHostToDevice, cudaStream_))); - colorchange2->setNext(copy); - - auto sync = boost::shared_ptr(new CudaStreamSynchronize(CudaStreamSynchronizeProps(cudaStream_))); - copy->setNext(sync); - auto encoder = boost::shared_ptr(new H264EncoderNVCodec(H264EncoderNVCodecProps(bitRateKbps, cuContext, gopLength, frameRate, profile, enableBFrames))); - sync->setNext(encoder); - auto mp4WriterSinkProps = Mp4WriterSinkProps(1, 10, 24, outFolderPath, false); - auto mp4WriterSink = boost::shared_ptr(new Mp4WriterSink(mp4WriterSinkProps)); - encoder -> setNext(mp4WriterSink);*/ - - PipeLine p("test"); - p.appendModule(mp4Reader); - p.init(); - - p.run_all_threaded(); - boost::this_thread::sleep_for(boost::chrono::seconds(20)); - - LOG_INFO << "profiling done - stopping the pipeline"; - p.stop(); - p.term(); - p.wait_for_all(); -} - -BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file From 9c70041f377ea46b3825455b9e277fe26175d2a7 Mon Sep 17 00:00:00 2001 From: SatwikSShanbhag Date: Tue, 14 May 2024 11:40:31 +0530 Subject: [PATCH 07/49] added sample example to create thumbnail from mp4 video --- base/CMakeLists.txt | 2 + samples/CMakeLists.txt | 10 ++ .../CMakeLists.txt | 52 ++++++++++ .../GenerateThumbnailsPipeline.h | 15 +++ .../generate_thumbnail_from_mp4_video.cpp | 94 +++++++++++++++++++ ...test_generate_thumbnail_from_mp4_video.cpp | 18 ++++ 6 files changed, 191 insertions(+) create mode 100644 samples/CMakeLists.txt create mode 100644 samples/create_thumbnail_from_mp4_video/CMakeLists.txt create mode 100644 samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h create mode 100644 samples/create_thumbnail_from_mp4_video/generate_thumbnail_from_mp4_video.cpp create mode 100644 samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index 9f2cd1470..ba5a36776 100755 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -588,6 +588,8 @@ ENDIF (ENABLE_CUDA) find_library(OPENH264_LIB NAMES openh264.lib libopenh264.a REQUIRED) find_library(LIBMP4_LIB NAMES mp4lib.lib libmp4lib.a REQUIRED) +add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../samples ${CMAKE_CURRENT_BINARY_DIR}/samples) + target_link_libraries(aprapipesut aprapipes ${JPEG_LIBRARIES} diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt new file mode 100644 index 000000000..56e4855e6 --- /dev/null +++ b/samples/CMakeLists.txt @@ -0,0 +1,10 @@ +#dependencies + +find_package(Threads REQUIRED) + +include_directories( +../base +${CMAKE_CURRENT_SOURCE_DIR} +) + +add_subdirectory(create_thumbnail_from_mp4_video) \ No newline at end of file diff --git a/samples/create_thumbnail_from_mp4_video/CMakeLists.txt b/samples/create_thumbnail_from_mp4_video/CMakeLists.txt new file mode 100644 index 000000000..f30f87f9f --- /dev/null +++ b/samples/create_thumbnail_from_mp4_video/CMakeLists.txt @@ -0,0 +1,52 @@ +set(TARGET create_thumbnail_from_mp4_video) +SET(CORE_FILES + generate_thumbnail_from_mp4_video.cpp +) +SET(CORE_FILES_H + GenerateThumbnailsPipeline.h +) +SET(SOURCE + ${CORE_FILES} + ${CORE_FILES_H} +) +set(UT_FILE + test_generate_thumbnail_from_mp4_video.cpp +) +add_library(example_aprapipes STATIC ${SOURCE}) +add_executable(${TARGET} ${UT_FILE}) +target_include_directories ( example_aprapipes PRIVATE + ${JETSON_MULTIMEDIA_LIB_INCLUDE} + ${FFMPEG_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIRS} + ${LIBMP4_INC_DIR} + ${BARESIP_INC_DIR} + ${LIBRE_INC_DIR} + ${NVCODEC_INCLUDE_DIR} +) +target_include_directories ( ${TARGET} PUBLIC + ${JETSON_MULTIMEDIA_LIB_INCLUDE} + ${FFMPEG_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIRS} + ${LIBMP4_INC_DIR} + ${BARESIP_INC_DIR} + ${LIBRE_INC_DIR} + ${NVCODEC_INCLUDE_DIR} +) +target_link_libraries( + ${TARGET} + aprapipes + ${JPEG_LIBRARIES} + ${LIBMP4_LIB} + ${OPENH264_LIB} + ${Boost_LIBRARIES} + ${FFMPEG_LIBRARIES} + ${OpenCV_LIBRARIES} + ${JETSON_LIBS} + ${NVCUDAToolkit_LIBS} + ${NVCODEC_LIB} + ${NVJPEGLIB_L4T} + ${CURSES_LIBRARIES} + example_aprapipes + ) \ No newline at end of file diff --git a/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h b/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h new file mode 100644 index 000000000..dbf4a6fe8 --- /dev/null +++ b/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h @@ -0,0 +1,15 @@ +#include +#include + +class GenerateThumbnailsPipeline +{ +public: + GenerateThumbnailsPipeline(); + bool setUpPipeLine(); + bool startPipeLine(); + bool stopPipeLine(); + +private: + PipeLine pipeLine; + boost::shared_ptr valve; +}; \ No newline at end of file diff --git a/samples/create_thumbnail_from_mp4_video/generate_thumbnail_from_mp4_video.cpp b/samples/create_thumbnail_from_mp4_video/generate_thumbnail_from_mp4_video.cpp new file mode 100644 index 000000000..f280cf81c --- /dev/null +++ b/samples/create_thumbnail_from_mp4_video/generate_thumbnail_from_mp4_video.cpp @@ -0,0 +1,94 @@ + +#include "GenerateThumbnailsPipeline.h" +#include "FrameMetadata.h" +#include "H264Metadata.h" +#include "Mp4ReaderSource.h" +#include "Mp4VideoMetadata.h" +#include "H264Decoder.h" +#include "ValveModule.h" +#include "ColorConversionXForm.h" +#include "CudaMemCopy.h" +#include "FileWriterModule.h" +#include "JPEGEncoderNVJPEG.h" + +GenerateThumbnailsPipeline::GenerateThumbnailsPipeline() : pipeLine("pipeline") { + +} +bool GenerateThumbnailsPipeline::setUpPipeLine() { + // Implementation + std::string videoPath = "../.././data/Mp4_videos/h264_video_metadata/20230514/0011/1686723796848.mp4"; + std::string outPath = "../.././data/mp4Reader_saveOrCompare/h264/frame_000???.jpg"; + + bool parseFS = false; + auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); + auto frameType = FrameMetadata::FrameType::H264_DATA; + auto mp4ReaderProps = Mp4ReaderSourceProps(videoPath, parseFS, 0, true, false, false); + auto mp4Reader = boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); + mp4Reader->addOutPutPin(h264ImageMetadata); + + auto mp4Metadata = framemetadata_sp(new Mp4VideoMetadata("v_1")); + mp4Reader->addOutPutPin(mp4Metadata); + + std::vector mImagePin; + mImagePin = mp4Reader->getAllOutputPinsByType(frameType); + + auto decoder = boost::shared_ptr(new H264Decoder(H264DecoderProps())); + mp4Reader->setNext(decoder, mImagePin); + + valve = boost::shared_ptr(new ValveModule(ValveModuleProps(2))); + decoder->setNext(valve); + + auto conversionType = ColorConversionProps::ConversionType::YUV420PLANAR_TO_RGB; + auto metadata = framemetadata_sp(new RawImagePlanarMetadata(1280, 720, ImageMetadata::ImageType::YUV420, size_t(0), CV_8U)); + auto colorchange = boost::shared_ptr(new ColorConversion(ColorConversionProps(conversionType))); + valve->setNext(colorchange); + + auto stream = cudastream_sp(new ApraCudaStream); + auto copy = boost::shared_ptr(new CudaMemCopy(CudaMemCopyProps(cudaMemcpyHostToDevice, stream))); + colorchange->setNext(copy); + + auto m2 = boost::shared_ptr(new JPEGEncoderNVJPEG(JPEGEncoderNVJPEGProps(stream))); + copy->setNext(m2); + + /*auto valve = boost::shared_ptr(new ValveModule(ValveModuleProps(10))); + m2->setNext(valve);*/ + + auto fileWriter = boost::shared_ptr(new FileWriterModule(FileWriterModuleProps(outPath))); + m2->setNext(fileWriter); + + pipeLine.appendModule(mp4Reader); + pipeLine.init(); + return true; +} + +bool GenerateThumbnailsPipeline::startPipeLine() { + pipeLine.run_all_threaded(); + valve->allowFrames(1); + return true; +} + +bool GenerateThumbnailsPipeline::stopPipeLine() { + pipeLine.stop(); + pipeLine.term(); + pipeLine.wait_for_all(); + return true; +} +int main() { + GenerateThumbnailsPipeline pipelineInstance; + if (!pipelineInstance.setUpPipeLine()) { + std::cerr << "Failed to setup pipeline." << std::endl; + return 1; + } + if (!pipelineInstance.startPipeLine()) { + std::cerr << "Failed to start pipeline." << std::endl; + return 1; // Or any error code indicating failure + } + // Wait for the pipeline to run for 10 seconds + boost::this_thread::sleep_for(boost::chrono::seconds(10)); + // Stop the pipeline + if (!pipelineInstance.stopPipeLine()) { + std::cerr << "Failed to stop pipeline." << std::endl; + return 1; // Or any error code indicating failure + } + return 0; // Or any success code +} diff --git a/samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp b/samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp new file mode 100644 index 000000000..74022d2a3 --- /dev/null +++ b/samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp @@ -0,0 +1,18 @@ +#include + +#include "GenerateThumbnailsPipeline.h" + +BOOST_AUTO_TEST_SUITE(generateThumbnails) + +BOOST_AUTO_TEST_CASE(generateThumbnails_from_mp4) +{ + auto generateThumbnailPipeline = boost::shared_ptr(new GenerateThumbnailsPipeline()); + generateThumbnailPipeline->setUpPipeLine(); + generateThumbnailPipeline->startPipeLine(); + + boost::this_thread::sleep_for(boost::chrono::seconds(5)); + + generateThumbnailPipeline->stopPipeLine(); +} + +BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file From 2f3adb6a0d85747e518d9993615b4dd5c6dd11f2 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Tue, 14 May 2024 12:33:03 +0530 Subject: [PATCH 08/49] Improved motion vector data code. --- base/src/MotionVectorExtractor.cpp | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/base/src/MotionVectorExtractor.cpp b/base/src/MotionVectorExtractor.cpp index 471c19fc8..25d25eee6 100644 --- a/base/src/MotionVectorExtractor.cpp +++ b/base/src/MotionVectorExtractor.cpp @@ -264,8 +264,15 @@ void DetailOpenH264::getMotionVectors(frame_container& frames, frame_sp& outFram int32_t mMotionVectorSize = mWidth * mHeight * 8; int16_t* mMotionVectorData = nullptr; memset(&pDstInfo, 0, sizeof(SBufferInfo)); - //outFrame = makeFrameWithPinId(mMotionVectorSize, motionVectorPinId); - mMotionVectorData = static_cast(malloc(mMotionVectorSize*sizeof(int16_t))); + if (sendOverlayFrame) + { + outFrame = makeFrameWithPinId(mMotionVectorSize,motionVectorPinId); + mMotionVectorData = static_cast(outFrame->data()); + } + else + { + mMotionVectorData = static_cast(malloc(mMotionVectorSize * sizeof(int16_t))); + } if (sDecParam.bParseOnly) { From 738763b8e3cf91fb2293399fd6c46963e88fc95d Mon Sep 17 00:00:00 2001 From: SatwikSShanbhag Date: Wed, 15 May 2024 12:46:04 +0530 Subject: [PATCH 09/49] reduced thread sleep seconds --- .../generate_thumbnail_from_mp4_video.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/create_thumbnail_from_mp4_video/generate_thumbnail_from_mp4_video.cpp b/samples/create_thumbnail_from_mp4_video/generate_thumbnail_from_mp4_video.cpp index f280cf81c..393f38cb5 100644 --- a/samples/create_thumbnail_from_mp4_video/generate_thumbnail_from_mp4_video.cpp +++ b/samples/create_thumbnail_from_mp4_video/generate_thumbnail_from_mp4_video.cpp @@ -84,7 +84,7 @@ int main() { return 1; // Or any error code indicating failure } // Wait for the pipeline to run for 10 seconds - boost::this_thread::sleep_for(boost::chrono::seconds(10)); + boost::this_thread::sleep_for(boost::chrono::seconds(5)); // Stop the pipeline if (!pipelineInstance.stopPipeLine()) { std::cerr << "Failed to stop pipeline." << std::endl; From 5315e628b88e52e469825ea5286f855d7ca2d2c4 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Wed, 15 May 2024 16:08:37 +0530 Subject: [PATCH 10/49] using absolute path. --- samples/timelapse-sample/timelapse_summary.cpp | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/samples/timelapse-sample/timelapse_summary.cpp b/samples/timelapse-sample/timelapse_summary.cpp index 29ada856c..bc9f0836e 100644 --- a/samples/timelapse-sample/timelapse_summary.cpp +++ b/samples/timelapse-sample/timelapse_summary.cpp @@ -16,7 +16,7 @@ TimelapsePipeline::TimelapsePipeline() : pipeline("test") {} bool TimelapsePipeline::setupPipeline() { - std::string outFolderPath = "C:\\Workspace\\ApraPipesFork\\ApraPipes\\data\\timeplase_videos"; + std::string outFolderPath = "../.././data/timeplase_videos"; auto cuContext = apracucontext_sp(new ApraCUcontext()); uint32_t gopLength = 25; uint32_t bitRateKbps = 1000; @@ -25,7 +25,7 @@ bool TimelapsePipeline::setupPipeline() { bool enableBFrames = false; bool sendDecodedFrames = true; bool sendOverlayFrames = false; - std::string videoPath = "C:\\Workspace\\ApraPipesFork\\ApraPipes\\data\\Mp4_videos\\h264_video_metadata\\20230514\\0011\\video1.mp4"; + std::string videoPath = "../.././data/Mp4_videos/h264_video_metadata/20230514/0011/1715748702000.mp4"; auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); auto mp4ReaderProps = Mp4ReaderSourceProps(videoPath, false, 0, true, false, false); mp4ReaderProps.fps = 24; @@ -35,8 +35,9 @@ bool TimelapsePipeline::setupPipeline() { mp4Reader->addOutPutPin(mp4Metadata); std::vector mImagePin; mImagePin = mp4Reader->getAllOutputPinsByType(FrameMetadata::H264_DATA); - auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(MotionVectorExtractorProps(MotionVectorExtractorProps::MVExtractMethod::OPENH264,sendDecodedFrames, 20, sendOverlayFrames))); - mp4Reader -> setNext(motionExtractor, mImagePin); + auto motionExtractorProps = MotionVectorExtractorProps(MotionVectorExtractorProps::MVExtractMethod::OPENH264,sendDecodedFrames, 10, sendOverlayFrames); + auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(motionExtractorProps)); + mp4Reader->setNext(motionExtractor, mImagePin); auto colorchange1 = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::BGR_TO_RGB))); motionExtractor -> setNext(colorchange1); @@ -63,7 +64,6 @@ bool TimelapsePipeline::setupPipeline() { bool TimelapsePipeline::startPipeline() { pipeline.run_all_threaded(); - boost::this_thread::sleep_for(boost::chrono::seconds(10)); return true; } @@ -75,6 +75,11 @@ bool TimelapsePipeline::stopPipeline() { } int main() { + LoggerProps loggerProps; + loggerProps.logLevel = boost::log::trivial::severity_level::info; + Logger::setLogLevel(boost::log::trivial::severity_level::info); + Logger::initLogger(loggerProps); + TimelapsePipeline pipelineInstance; // Setup the pipeline From b89e3a6bf8149d081ea6e28234df4b5958e3ffe0 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Wed, 15 May 2024 17:08:45 +0530 Subject: [PATCH 11/49] Reverting unwanted changes. --- base/include/RawImagePlanarMetadata.h | 4 ++-- base/vcpkg.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/base/include/RawImagePlanarMetadata.h b/base/include/RawImagePlanarMetadata.h index 93d5da86a..30a3514a9 100644 --- a/base/include/RawImagePlanarMetadata.h +++ b/base/include/RawImagePlanarMetadata.h @@ -28,8 +28,8 @@ class RawImagePlanarMetadata : public FrameMetadata break; case ImageMetadata::YUV420: _step[0] = _width + FrameMetadata::getPaddingLength(_width, alignLength); - _step[1] = _step[0] >> 1; - // _step[1] = _step[1] + FrameMetadata::getPaddingLength(_step[1], alignLength); + _step[1] = _width >> 1; + _step[1] = _step[1] + FrameMetadata::getPaddingLength(_step[1], alignLength); _step[2] = _step[1]; break; default: diff --git a/base/vcpkg.json b/base/vcpkg.json index 7a9afe9f8..4df4664c0 100644 --- a/base/vcpkg.json +++ b/base/vcpkg.json @@ -2,7 +2,7 @@ "$schema": "https://raw.githubusercontent.com/microsoft/vcpkg/master/scripts/vcpkg.schema.json", "name": "apra-pipes-cuda", "version": "0.0.1", - "builtin-baseline": "3860aea7304f2b9eac61f37a3095fd5fd57428d8", + "builtin-baseline": "eac79fc7bda260819c646d10c97dec825305aecd", "dependencies": [ { "name": "whisper", From ab3f182647ee41e42448c93db912600ba354992d Mon Sep 17 00:00:00 2001 From: SatwikSShanbhag Date: Fri, 17 May 2024 13:07:28 +0530 Subject: [PATCH 12/49] added new sample which will start video from beginning using flushAllQueues and seek feature --- samples/CMakeLists.txt | 3 +- samples/play_mp4_from_begining/CMakeLists.txt | 52 ++++++++++ .../PlayMp4VideoFromBeginning.h | 17 ++++ .../play_mp4_video_from_begining.cpp | 97 +++++++++++++++++++ .../test_play_mp4_video_from_begining.cpp | 18 ++++ 5 files changed, 186 insertions(+), 1 deletion(-) create mode 100644 samples/play_mp4_from_begining/CMakeLists.txt create mode 100644 samples/play_mp4_from_begining/PlayMp4VideoFromBeginning.h create mode 100644 samples/play_mp4_from_begining/play_mp4_video_from_begining.cpp create mode 100644 samples/play_mp4_from_begining/test_play_mp4_video_from_begining.cpp diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt index 56e4855e6..e4364c078 100644 --- a/samples/CMakeLists.txt +++ b/samples/CMakeLists.txt @@ -7,4 +7,5 @@ include_directories( ${CMAKE_CURRENT_SOURCE_DIR} ) -add_subdirectory(create_thumbnail_from_mp4_video) \ No newline at end of file +add_subdirectory(create_thumbnail_from_mp4_video) +add_subdirectory(play_mp4_from_begining) diff --git a/samples/play_mp4_from_begining/CMakeLists.txt b/samples/play_mp4_from_begining/CMakeLists.txt new file mode 100644 index 000000000..8c825c4c8 --- /dev/null +++ b/samples/play_mp4_from_begining/CMakeLists.txt @@ -0,0 +1,52 @@ +set(TARGET play_mp4_from_begining) +SET(CORE_FILES + play_mp4_video_from_begining.cpp +) +SET(CORE_FILES_H + PlayMp4VideoFromBegining.h +) +SET(SOURCE + ${CORE_FILES} + ${CORE_FILES_H} +) +set(UT_FILE + test_play_mp4_video_from_begining.cpp +) +add_library(example_aprapipes1 STATIC ${SOURCE}) +add_executable(${TARGET} ${UT_FILE}) +target_include_directories ( example_aprapipes1 PRIVATE + ${JETSON_MULTIMEDIA_LIB_INCLUDE} + ${FFMPEG_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIRS} + ${LIBMP4_INC_DIR} + ${BARESIP_INC_DIR} + ${LIBRE_INC_DIR} + ${NVCODEC_INCLUDE_DIR} +) +target_include_directories ( ${TARGET} PUBLIC + ${JETSON_MULTIMEDIA_LIB_INCLUDE} + ${FFMPEG_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIRS} + ${LIBMP4_INC_DIR} + ${BARESIP_INC_DIR} + ${LIBRE_INC_DIR} + ${NVCODEC_INCLUDE_DIR} +) +target_link_libraries( + ${TARGET} + aprapipes + ${JPEG_LIBRARIES} + ${LIBMP4_LIB} + ${OPENH264_LIB} + ${Boost_LIBRARIES} + ${FFMPEG_LIBRARIES} + ${OpenCV_LIBRARIES} + ${JETSON_LIBS} + ${NVCUDAToolkit_LIBS} + ${NVCODEC_LIB} + ${NVJPEGLIB_L4T} + ${CURSES_LIBRARIES} + example_aprapipes1 + ) \ No newline at end of file diff --git a/samples/play_mp4_from_begining/PlayMp4VideoFromBeginning.h b/samples/play_mp4_from_begining/PlayMp4VideoFromBeginning.h new file mode 100644 index 000000000..f831aa0cc --- /dev/null +++ b/samples/play_mp4_from_begining/PlayMp4VideoFromBeginning.h @@ -0,0 +1,17 @@ +#include +#include "Mp4ReaderSource.h" +#include "ImageViewerModule.h" + +class PlayMp4VideoFromBeginning { +public: + PlayMp4VideoFromBeginning(); + bool setUpPipeLine(); + bool startPipeLine(); + bool stopPipeLine(); + bool flushQueuesAndSeek(); + +private: + PipeLine pipeLine; + boost::shared_ptr mp4Reader; + boost::shared_ptr sink; +}; \ No newline at end of file diff --git a/samples/play_mp4_from_begining/play_mp4_video_from_begining.cpp b/samples/play_mp4_from_begining/play_mp4_video_from_begining.cpp new file mode 100644 index 000000000..844b20ed2 --- /dev/null +++ b/samples/play_mp4_from_begining/play_mp4_video_from_begining.cpp @@ -0,0 +1,97 @@ + +#include "PlayMp4VideoFromBeginning.h" +#include "FrameMetadata.h" +#include "H264Metadata.h" +#include "Mp4ReaderSource.h" +#include "Mp4VideoMetadata.h" +#include "H264Decoder.h" +#include "ValveModule.h" +#include "ColorConversionXForm.h" +#include "CudaMemCopy.h" +#include "FileWriterModule.h" +#include "JPEGEncoderNVJPEG.h" +#include "ImageViewerModule.h" + + +PlayMp4VideoFromBeginning::PlayMp4VideoFromBeginning() : pipeLine("pipeline") { + +} +bool PlayMp4VideoFromBeginning::setUpPipeLine() { + // Implementation + std::string videoPath = "../.././data/Mp4_videos/h264_video_metadata/20230514/0011/1686723796848.mp4"; + std::string outPath = "../.././data/mp4Reader_saveOrCompare/h264/frame_000???.jpg"; + + bool parseFS = false; + auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); + auto frameType = FrameMetadata::FrameType::H264_DATA; + auto mp4ReaderProps = Mp4ReaderSourceProps(videoPath, parseFS, 0, true, true, false); + mp4ReaderProps.fps = 24; + mp4Reader = boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); + mp4Reader->addOutPutPin(h264ImageMetadata); + + auto mp4Metadata = framemetadata_sp(new Mp4VideoMetadata("v_1")); + mp4Reader->addOutPutPin(mp4Metadata); + + std::vector mImagePin; + mImagePin = mp4Reader->getAllOutputPinsByType(frameType); + + auto decoder = boost::shared_ptr(new H264Decoder(H264DecoderProps())); + mp4Reader->setNext(decoder, mImagePin); + + auto conversionType = ColorConversionProps::ConversionType::YUV420PLANAR_TO_RGB; + auto metadata = framemetadata_sp(new RawImagePlanarMetadata(1280, 720, ImageMetadata::ImageType::YUV420, size_t(0), CV_8U)); + auto colorchange = boost::shared_ptr(new ColorConversion(ColorConversionProps(conversionType))); + decoder->setNext(colorchange); + + sink = boost::shared_ptr( + new ImageViewerModule(ImageViewerModuleProps("imageview"))); + colorchange->setNext(sink); + + pipeLine.appendModule(mp4Reader); + pipeLine.init(); + return true; +} + +bool PlayMp4VideoFromBeginning::startPipeLine() { + pipeLine.run_all_threaded(); + return true; +} + +bool PlayMp4VideoFromBeginning::stopPipeLine() { + pipeLine.stop(); + pipeLine.term(); + pipeLine.wait_for_all(); + return true; +} + +bool PlayMp4VideoFromBeginning::flushQueuesAndSeek() { + pipeLine.flushAllQueues(); + mp4Reader->randomSeek(1686723796848, false); + return true; +} + +int main() { + PlayMp4VideoFromBeginning pipelineInstance; + if (!pipelineInstance.setUpPipeLine()) { + std::cerr << "Failed to setup pipeline." << std::endl; + return 1; + } + if (!pipelineInstance.startPipeLine()) { + std::cerr << "Failed to start pipeline." << std::endl; + return 1; // Or any error code indicating failure + } + // Wait for the pipeline to run for 10 seconds + boost::this_thread::sleep_for(boost::chrono::seconds(3)); + if (!pipelineInstance.flushQueuesAndSeek()) { + std::cerr << "Failed to flush Queues." << std::endl; + return 1; + } + boost::this_thread::sleep_for(boost::chrono::seconds(5)); + // Stop the pipeline + if (!pipelineInstance.stopPipeLine()) { + std::cerr << "Failed to stop pipeline." << std::endl; + return 1; // Or any error code indicating failure + } + + return 0; // Or any success code +} diff --git a/samples/play_mp4_from_begining/test_play_mp4_video_from_begining.cpp b/samples/play_mp4_from_begining/test_play_mp4_video_from_begining.cpp new file mode 100644 index 000000000..5dc3d8c5e --- /dev/null +++ b/samples/play_mp4_from_begining/test_play_mp4_video_from_begining.cpp @@ -0,0 +1,18 @@ +#include + +#include "PlayMp4VideoFromBeginning.h" + +BOOST_AUTO_TEST_SUITE(start_video_from_beginning) + +BOOST_AUTO_TEST_CASE(start_from_beginning) +{ + auto playMp4VideoFromBeginning = boost::shared_ptr(new PlayMp4VideoFromBeginning()); + playMp4VideoFromBeginning->setUpPipeLine(); + playMp4VideoFromBeginning->startPipeLine(); + + boost::this_thread::sleep_for(boost::chrono::seconds(10)); + + playMp4VideoFromBeginning->stopPipeLine(); +} + +BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file From f1d8cc0690651e36e79dfe0d2f43fdf5281c4f9e Mon Sep 17 00:00:00 2001 From: SatwikSShanbhag Date: Fri, 17 May 2024 16:21:45 +0530 Subject: [PATCH 13/49] refactored CMake list and renamed files --- samples/CMakeLists.txt | 2 +- .../create_thumbnail_from_mp4_video/CMakeLists.txt | 6 +++--- .../CMakeLists.txt | 14 +++++++------- .../PlayMp4VideoFromBeginning.h | 0 .../play_mp4_video_from_beginning.cpp} | 0 .../test_play_mp4_video_from_beginning.cpp} | 0 6 files changed, 11 insertions(+), 11 deletions(-) rename samples/{play_mp4_from_begining => play_mp4_from_beginning}/CMakeLists.txt (73%) rename samples/{play_mp4_from_begining => play_mp4_from_beginning}/PlayMp4VideoFromBeginning.h (100%) rename samples/{play_mp4_from_begining/play_mp4_video_from_begining.cpp => play_mp4_from_beginning/play_mp4_video_from_beginning.cpp} (100%) rename samples/{play_mp4_from_begining/test_play_mp4_video_from_begining.cpp => play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp} (100%) diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt index e4364c078..9da737abf 100644 --- a/samples/CMakeLists.txt +++ b/samples/CMakeLists.txt @@ -8,4 +8,4 @@ ${CMAKE_CURRENT_SOURCE_DIR} ) add_subdirectory(create_thumbnail_from_mp4_video) -add_subdirectory(play_mp4_from_begining) +add_subdirectory(play_mp4_from_beginning) diff --git a/samples/create_thumbnail_from_mp4_video/CMakeLists.txt b/samples/create_thumbnail_from_mp4_video/CMakeLists.txt index f30f87f9f..23aee9cfa 100644 --- a/samples/create_thumbnail_from_mp4_video/CMakeLists.txt +++ b/samples/create_thumbnail_from_mp4_video/CMakeLists.txt @@ -12,9 +12,9 @@ SET(SOURCE set(UT_FILE test_generate_thumbnail_from_mp4_video.cpp ) -add_library(example_aprapipes STATIC ${SOURCE}) +add_library(create_thumbnail_from_mp4_video_source STATIC ${SOURCE}) add_executable(${TARGET} ${UT_FILE}) -target_include_directories ( example_aprapipes PRIVATE +target_include_directories ( create_thumbnail_from_mp4_video_source PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} @@ -48,5 +48,5 @@ target_link_libraries( ${NVCODEC_LIB} ${NVJPEGLIB_L4T} ${CURSES_LIBRARIES} - example_aprapipes + create_thumbnail_from_mp4_video_source ) \ No newline at end of file diff --git a/samples/play_mp4_from_begining/CMakeLists.txt b/samples/play_mp4_from_beginning/CMakeLists.txt similarity index 73% rename from samples/play_mp4_from_begining/CMakeLists.txt rename to samples/play_mp4_from_beginning/CMakeLists.txt index 8c825c4c8..43e1b3063 100644 --- a/samples/play_mp4_from_begining/CMakeLists.txt +++ b/samples/play_mp4_from_beginning/CMakeLists.txt @@ -1,20 +1,20 @@ -set(TARGET play_mp4_from_begining) +set(TARGET play_mp4_from_beginning) SET(CORE_FILES - play_mp4_video_from_begining.cpp + play_mp4_video_from_beginning.cpp ) SET(CORE_FILES_H - PlayMp4VideoFromBegining.h + PlayMp4VideoFromBeginning.h ) SET(SOURCE ${CORE_FILES} ${CORE_FILES_H} ) set(UT_FILE - test_play_mp4_video_from_begining.cpp + test_play_mp4_video_from_beginning.cpp ) -add_library(example_aprapipes1 STATIC ${SOURCE}) +add_library(play_mp4_from_beginning_source STATIC ${SOURCE}) add_executable(${TARGET} ${UT_FILE}) -target_include_directories ( example_aprapipes1 PRIVATE +target_include_directories ( play_mp4_from_beginning_source PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} @@ -48,5 +48,5 @@ target_link_libraries( ${NVCODEC_LIB} ${NVJPEGLIB_L4T} ${CURSES_LIBRARIES} - example_aprapipes1 + play_mp4_from_beginning_source ) \ No newline at end of file diff --git a/samples/play_mp4_from_begining/PlayMp4VideoFromBeginning.h b/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h similarity index 100% rename from samples/play_mp4_from_begining/PlayMp4VideoFromBeginning.h rename to samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h diff --git a/samples/play_mp4_from_begining/play_mp4_video_from_begining.cpp b/samples/play_mp4_from_beginning/play_mp4_video_from_beginning.cpp similarity index 100% rename from samples/play_mp4_from_begining/play_mp4_video_from_begining.cpp rename to samples/play_mp4_from_beginning/play_mp4_video_from_beginning.cpp diff --git a/samples/play_mp4_from_begining/test_play_mp4_video_from_begining.cpp b/samples/play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp similarity index 100% rename from samples/play_mp4_from_begining/test_play_mp4_video_from_begining.cpp rename to samples/play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp From da51b44eddbdd641b32a74831cf89c3a9c49d1de Mon Sep 17 00:00:00 2001 From: SatwikSShanbhag Date: Mon, 20 May 2024 13:18:56 +0530 Subject: [PATCH 14/49] using resolved time stamp when seeking --- base/src/Mp4ReaderSource.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/base/src/Mp4ReaderSource.cpp b/base/src/Mp4ReaderSource.cpp index e67b26624..94e272f49 100644 --- a/base/src/Mp4ReaderSource.cpp +++ b/base/src/Mp4ReaderSource.cpp @@ -571,20 +571,20 @@ class Mp4ReaderDetailAbs { int seekedToFrame = -1; uint64_t skipMsecsInFile = 0; - - if (!mState.startTimeStampFromFile) + + if (!mState.resolvedStartingTS) { LOG_ERROR << "Start timestamp is not saved in the file. Can't support seeking with timestamps."; return false; } - if (skipTS < mState.startTimeStampFromFile) + if (skipTS < mState.resolvedStartingTS) { LOG_INFO << "seek time outside range. Seeking to start of video."; skipMsecsInFile = 0; } else { - skipMsecsInFile = skipTS - mState.startTimeStampFromFile; + skipMsecsInFile = skipTS - mState.resolvedStartingTS; } LOG_INFO << "Attempting seek <" << mState.mVideoPath << "> @skipMsecsInFile <" << skipMsecsInFile << ">"; From ad0752f704751b07f200d712b68243b90031fe58 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Mon, 20 May 2024 16:58:24 +0530 Subject: [PATCH 15/49] Fixed the ghost channel issue in MotionVectorExtractor. --- base/src/MotionVectorExtractor.cpp | 35 +++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/base/src/MotionVectorExtractor.cpp b/base/src/MotionVectorExtractor.cpp index 25d25eee6..52f920691 100644 --- a/base/src/MotionVectorExtractor.cpp +++ b/base/src/MotionVectorExtractor.cpp @@ -332,25 +332,40 @@ void DetailOpenH264::getMotionVectors(frame_container& frames, frame_sp& outFram } if ((!sDecParam.bParseOnly) && (pDstInfo.pDst[0] != nullptr) && (mMotionVectorSize != mWidth * mHeight * 8) && motionFound == true) { + int rowIndex; + unsigned char *pPtr = NULL; decodedFrame = makeFrameWithPinId(mHeight * 3 * mWidth, rawFramePinId); - uint8_t* yuvImagePtr = (uint8_t*)malloc(mHeight * 1.5 * pDstInfo.UsrData.sSystemBuffer.iStride[0]); + uint8_t* yuvImagePtr = (uint8_t*)malloc(mHeight * 1.5 * mWidth); auto yuvStartPointer = yuvImagePtr; - unsigned char* pY = pDstInfo.pDst[0]; - memcpy(yuvImagePtr, pY, pDstInfo.UsrData.sSystemBuffer.iStride[0] * mHeight); - unsigned char* pU = pDstInfo.pDst[1]; - yuvImagePtr += pDstInfo.UsrData.sSystemBuffer.iStride[0] * mHeight; - memcpy(yuvImagePtr, pU, pDstInfo.UsrData.sSystemBuffer.iStride[1] * mHeight / 2); - unsigned char* pV = pDstInfo.pDst[2]; - yuvImagePtr += pDstInfo.UsrData.sSystemBuffer.iStride[1] * mHeight / 2; - memcpy(yuvImagePtr, pV, pDstInfo.UsrData.sSystemBuffer.iStride[1] * mHeight / 2); + pPtr = pData[0]; + for (rowIndex = 0; rowIndex < mHeight; rowIndex++) { + memcpy(yuvImagePtr, pPtr, mWidth); + pPtr += pDstInfo.UsrData.sSystemBuffer.iStride[0]; + yuvImagePtr += mWidth; + } + int halfHeight = mHeight / 2; + int halfWidth = mWidth / 2; + pPtr = pData[1]; + for (rowIndex = 0; rowIndex < halfHeight; rowIndex++) { + memcpy(yuvImagePtr, pPtr, halfWidth); + pPtr += pDstInfo.UsrData.sSystemBuffer.iStride[1]; + yuvImagePtr += halfWidth; + } + pPtr = pData[2]; + for (rowIndex = 0; rowIndex < halfHeight; rowIndex++) { + memcpy(yuvImagePtr, pPtr, halfWidth); + pPtr += pDstInfo.UsrData.sSystemBuffer.iStride[1]; + yuvImagePtr += halfWidth; + } - cv::Mat yuvImgCV = cv::Mat(mHeight + mHeight / 2, mWidth, CV_8UC1, yuvStartPointer, pDstInfo.UsrData.sSystemBuffer.iStride[0]); + cv::Mat yuvImgCV = cv::Mat(mHeight + mHeight / 2, mWidth, CV_8UC1, yuvStartPointer, mWidth); bgrImg.data = static_cast(decodedFrame->data()); cv::cvtColor(yuvImgCV, bgrImg, cv::COLOR_YUV2BGR_I420); frames.insert(make_pair(rawFramePinId, decodedFrame)); } } + MotionVectorExtractor::MotionVectorExtractor(MotionVectorExtractorProps props) : Module(TRANSFORM, "MotionVectorExtractor", props) { if (props.MVExtract == MotionVectorExtractorProps::MVExtractMethod::FFMPEG) From 005cd59d27b4df1f162db89a6b6857ef744744ba Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Mon, 20 May 2024 19:02:15 +0530 Subject: [PATCH 16/49] revert vcpkg. --- base/vcpkg.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/vcpkg.json b/base/vcpkg.json index 4df4664c0..a8342901f 100644 --- a/base/vcpkg.json +++ b/base/vcpkg.json @@ -2,7 +2,7 @@ "$schema": "https://raw.githubusercontent.com/microsoft/vcpkg/master/scripts/vcpkg.schema.json", "name": "apra-pipes-cuda", "version": "0.0.1", - "builtin-baseline": "eac79fc7bda260819c646d10c97dec825305aecd", + "builtin-baseline": "7754d62d19501a3bb4e2d4f2eab80e8de9703e41", "dependencies": [ { "name": "whisper", From e9ccde51bc2f5a4cc7eab660a01e7ece9e9a7ba9 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Tue, 21 May 2024 19:13:52 +0530 Subject: [PATCH 17/49] changes. --- base/include/MotionVectorExtractor.h | 3 +- base/src/MotionVectorExtractor.cpp | 850 +++++++++--------- .../timelapse-sample/timelapse_summary.cpp | 79 +- samples/timelapse-sample/timelapse_summary.h | 1 + .../timelapse_summary_test.cpp | 6 +- 5 files changed, 481 insertions(+), 458 deletions(-) diff --git a/base/include/MotionVectorExtractor.h b/base/include/MotionVectorExtractor.h index e0e11af3f..77744f553 100644 --- a/base/include/MotionVectorExtractor.h +++ b/base/include/MotionVectorExtractor.h @@ -21,7 +21,7 @@ class MotionVectorExtractorProps : public ModuleProps size_t getSerializeSize() { - return ModuleProps::getSerializeSize() + sizeof(sendDecodedFrame) + sizeof(motionVectorThreshold); + return ModuleProps::getSerializeSize() + sizeof(sendDecodedFrame) + sizeof(motionVectorThreshold) + sizeof(sendOverlayFrame); } bool sendDecodedFrame = false; bool sendOverlayFrame = false; @@ -36,6 +36,7 @@ class MotionVectorExtractorProps : public ModuleProps ar& boost::serialization::base_object(*this); ar& sendDecodedFrame; ar& motionVectorThreshold; + ar& sendOverlayFrame; } }; diff --git a/base/src/MotionVectorExtractor.cpp b/base/src/MotionVectorExtractor.cpp index 52f920691..236a33a16 100644 --- a/base/src/MotionVectorExtractor.cpp +++ b/base/src/MotionVectorExtractor.cpp @@ -1,477 +1,477 @@ -#include #include -extern "C" -{ -#include +#include +extern "C" { #include #include #include -#include +#include #include +#include } -#include "MotionVectorExtractor.h" #include "H264Metadata.h" #include "H264ParserUtils.h" -#include "Utils.h" +#include "MotionVectorExtractor.h" #include "Overlay.h" +#include "Utils.h" + +class MvExtractDetailAbs { +public: + MvExtractDetailAbs( + MotionVectorExtractorProps props, + std::function _makeFrameWithPinId, + std::function + _makeframe) { + makeFrameWithPinId = _makeFrameWithPinId; + makeframe = _makeframe; + sendDecodedFrame = props.sendDecodedFrame; + sendOverlayFrame = props.sendOverlayFrame; + threshold = props.motionVectorThreshold; + }; + ~MvExtractDetailAbs() {} + virtual void setProps(MotionVectorExtractorProps props) { + sendDecodedFrame = props.sendDecodedFrame; + sendOverlayFrame = props.sendOverlayFrame; + } + virtual void getMotionVectors(frame_container &frames, frame_sp &outFrame, + frame_sp &decodedFrame) = 0; + virtual void initDecoder() = 0; -class MvExtractDetailAbs -{ public: - MvExtractDetailAbs(MotionVectorExtractorProps props, std::function _makeFrameWithPinId, std::function _makeframe) - { - makeFrameWithPinId = _makeFrameWithPinId; - makeframe = _makeframe; - sendDecodedFrame = props.sendDecodedFrame; - sendOverlayFrame = props.sendOverlayFrame; - threshold = props.motionVectorThreshold; - }; - ~MvExtractDetailAbs() - { - } - virtual void setProps(MotionVectorExtractorProps props) - { - sendDecodedFrame = props.sendDecodedFrame; - sendOverlayFrame = props.sendOverlayFrame; - } - virtual void getMotionVectors(frame_container& frames, frame_sp& outFrame, frame_sp& decodedFrame) = 0; - virtual void initDecoder() = 0; -public: - int mWidth = 0; - int mHeight = 0; - std::string rawFramePinId; - std::string motionVectorPinId; - std::function makeframe; - std::function makeFrameWithPinId; - bool sendDecodedFrame = false; - bool sendOverlayFrame = true; - int threshold; - cv::Mat bgrImg; - bool motionFound = false; + int mWidth = 0; + int mHeight = 0; + std::string rawFramePinId; + std::string motionVectorPinId; + std::function + makeframe; + std::function makeFrameWithPinId; + bool sendDecodedFrame = false; + bool sendOverlayFrame = true; + int threshold; + cv::Mat bgrImg; + bool motionFound = false; }; -class DetailFfmpeg : public MvExtractDetailAbs -{ +class DetailFfmpeg : public MvExtractDetailAbs { public: - DetailFfmpeg(MotionVectorExtractorProps props, std::function _makeFrameWithPinId, std::function _makeframe) : MvExtractDetailAbs(props, _makeFrameWithPinId, _makeframe) {} - ~DetailFfmpeg() - { - avcodec_free_context(&decoderContext); - } - void getMotionVectors(frame_container& frames, frame_sp& outFrame, frame_sp& decodedFrame); - void initDecoder(); - int decodeAndGetMotionVectors(AVPacket* pkt, frame_container& frames, frame_sp& outFrame, frame_sp& decodedFrame); + DetailFfmpeg( + MotionVectorExtractorProps props, + std::function _makeFrameWithPinId, + std::function + _makeframe) + : MvExtractDetailAbs(props, _makeFrameWithPinId, _makeframe) {} + ~DetailFfmpeg() { avcodec_free_context(&decoderContext); } + void getMotionVectors(frame_container &frames, frame_sp &outFrame, + frame_sp &decodedFrame); + void initDecoder(); + int decodeAndGetMotionVectors(AVPacket *pkt, frame_container &frames, + frame_sp &outFrame, frame_sp &decodedFrame); + private: - AVFrame* avFrame = NULL; - AVCodecContext* decoderContext = NULL; + AVFrame *avFrame = NULL; + AVCodecContext *decoderContext = NULL; }; -class DetailOpenH264 : public MvExtractDetailAbs -{ +class DetailOpenH264 : public MvExtractDetailAbs { public: - DetailOpenH264(MotionVectorExtractorProps props, std::function _makeFrameWithPinId, std::function _makeframe) : MvExtractDetailAbs(props, _makeFrameWithPinId, _makeframe) {} - ~DetailOpenH264() { - if (pDecoder) { - pDecoder->Uninitialize(); - WelsDestroyDecoder(pDecoder); - } - } - void getMotionVectors(frame_container& frames, frame_sp& outFrame, frame_sp& decodedFrame); - void initDecoder(); + DetailOpenH264( + MotionVectorExtractorProps props, + std::function _makeFrameWithPinId, + std::function + _makeframe) + : MvExtractDetailAbs(props, _makeFrameWithPinId, _makeframe) {} + ~DetailOpenH264() { + if (pDecoder) { + pDecoder->Uninitialize(); + WelsDestroyDecoder(pDecoder); + } + } + void getMotionVectors(frame_container &frames, frame_sp &outFrame, + frame_sp &decodedFrame); + void initDecoder(); + private: - ISVCDecoder* pDecoder; - SBufferInfo pDstInfo; - SDecodingParam sDecParam; - SParserBsInfo parseInfo; + ISVCDecoder *pDecoder; + SBufferInfo pDstInfo; + SDecodingParam sDecParam; + SParserBsInfo parseInfo; }; -void DetailFfmpeg::initDecoder() -{ - int ret; - AVCodec* dec = NULL; - AVDictionary* opts = NULL; - dec = avcodec_find_decoder(AV_CODEC_ID_H264); - decoderContext = avcodec_alloc_context3(dec); - if (!decoderContext) - { - throw AIPException(AIP_FATAL, "Failed to allocate codec"); - } - /* Init the decoder */ - av_dict_set(&opts, "flags2", "+export_mvs", 0); - ret = avcodec_open2(decoderContext, dec, &opts); - av_dict_free(&opts); - if (ret < 0) - { - throw AIPException(AIP_FATAL, "failed open decoder"); - } +void DetailFfmpeg::initDecoder() { + int ret; + AVCodec *dec = NULL; + AVDictionary *opts = NULL; + dec = avcodec_find_decoder(AV_CODEC_ID_H264); + decoderContext = avcodec_alloc_context3(dec); + if (!decoderContext) { + throw AIPException(AIP_FATAL, "Failed to allocate codec"); + } + /* Init the decoder */ + av_dict_set(&opts, "flags2", "+export_mvs", 0); + ret = avcodec_open2(decoderContext, dec, &opts); + av_dict_free(&opts); + if (ret < 0) { + throw AIPException(AIP_FATAL, "failed open decoder"); + } } -void DetailFfmpeg::getMotionVectors(frame_container& frames, frame_sp& outFrame, frame_sp& decodedFrame) -{ - int ret = 0; - AVPacket* pkt = NULL; - avFrame = av_frame_alloc(); - if (!avFrame) - { - LOG_ERROR << "Could not allocate frame\n"; - } - pkt = av_packet_alloc(); - if (!pkt) - { - LOG_ERROR << "Could not allocate AVPacket\n"; - } - ret = decodeAndGetMotionVectors(pkt, frames, outFrame, decodedFrame); - av_packet_free(&pkt); - av_frame_free(&avFrame); +void DetailFfmpeg::getMotionVectors(frame_container &frames, frame_sp &outFrame, + frame_sp &decodedFrame) { + int ret = 0; + AVPacket *pkt = NULL; + avFrame = av_frame_alloc(); + if (!avFrame) { + LOG_ERROR << "Could not allocate frame\n"; + } + pkt = av_packet_alloc(); + if (!pkt) { + LOG_ERROR << "Could not allocate AVPacket\n"; + } + ret = decodeAndGetMotionVectors(pkt, frames, outFrame, decodedFrame); + av_packet_free(&pkt); + av_frame_free(&avFrame); } -int DetailFfmpeg::decodeAndGetMotionVectors(AVPacket* pkt, frame_container& frames, frame_sp& outFrame, frame_sp& decodedFrame) -{ - auto inFrame = frames.begin()->second; - pkt->data = (uint8_t*)inFrame->data(); - pkt->size = (int)inFrame->size(); - int ret = avcodec_send_packet(decoderContext, pkt); - if (ret < 0) - { - LOG_ERROR << stderr << "Error while sending a packet to the decoder: %s\n"; - return ret; - } - while (ret >= 0) - { - ret = avcodec_receive_frame(decoderContext, avFrame); - if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) - { - outFrame = makeFrameWithPinId(0, motionVectorPinId); - break; - } - else if (ret < 0) - { - LOG_ERROR << stderr << "Error while receiving a frame from the decoder: %s\n"; - return ret; - } - if (sendDecodedFrame) - { - SwsContext* sws_context = sws_getContext( - decoderContext->width, decoderContext->height, decoderContext->pix_fmt, - decoderContext->width, decoderContext->height, AV_PIX_FMT_BGR24, - SWS_BICUBIC | SWS_FULL_CHR_H_INT, NULL, NULL, NULL); - if (!sws_context) { - // Handle error - } - decodedFrame = makeFrameWithPinId(mWidth * mHeight * 3, rawFramePinId); - int dstStrides[AV_NUM_DATA_POINTERS]; - dstStrides[0] = decoderContext->width * 3; // Assuming BGR format - uint8_t* dstData[AV_NUM_DATA_POINTERS]; - dstData[0] = static_cast(decodedFrame->data()); - sws_scale(sws_context, avFrame->data, avFrame->linesize, 0, decoderContext->height, dstData, dstStrides); - frames.insert(make_pair(rawFramePinId, decodedFrame)); - } - if (ret >= 0) - { - AVFrameSideData* sideData; - sideData = av_frame_get_side_data(avFrame, AV_FRAME_DATA_MOTION_VECTORS); - if (sideData) - { - std::vector lineOverlays; - CompositeOverlay compositeOverlay; +int DetailFfmpeg::decodeAndGetMotionVectors(AVPacket *pkt, + frame_container &frames, + frame_sp &outFrame, + frame_sp &decodedFrame) { + auto inFrame = frames.begin()->second; + pkt->data = (uint8_t *)inFrame->data(); + pkt->size = (int)inFrame->size(); + int ret = avcodec_send_packet(decoderContext, pkt); + if (ret < 0) { + LOG_ERROR << stderr << "Error while sending a packet to the decoder: %s\n"; + return ret; + } + while (ret >= 0) { + ret = avcodec_receive_frame(decoderContext, avFrame); + if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { + outFrame = makeFrameWithPinId(0, motionVectorPinId); + break; + } else if (ret < 0) { + LOG_ERROR << stderr + << "Error while receiving a frame from the decoder: %s\n"; + return ret; + } + if (sendDecodedFrame) { + SwsContext *sws_context = + sws_getContext(decoderContext->width, decoderContext->height, + decoderContext->pix_fmt, decoderContext->width, + decoderContext->height, AV_PIX_FMT_BGR24, + SWS_BICUBIC | SWS_FULL_CHR_H_INT, NULL, NULL, NULL); + if (!sws_context) { + // Handle error + } + decodedFrame = makeFrameWithPinId(mWidth * mHeight * 3, rawFramePinId); + int dstStrides[AV_NUM_DATA_POINTERS]; + dstStrides[0] = decoderContext->width * 3; // Assuming BGR format + uint8_t *dstData[AV_NUM_DATA_POINTERS]; + dstData[0] = static_cast(decodedFrame->data()); + sws_scale(sws_context, avFrame->data, avFrame->linesize, 0, + decoderContext->height, dstData, dstStrides); + frames.insert(make_pair(rawFramePinId, decodedFrame)); + } + if (ret >= 0) { + AVFrameSideData *sideData; + sideData = av_frame_get_side_data(avFrame, AV_FRAME_DATA_MOTION_VECTORS); + if (sideData) { + std::vector lineOverlays; + CompositeOverlay compositeOverlay; - const AVMotionVector* mvs = (const AVMotionVector*)sideData->data; - for (int i = 0; i < sideData->size / sizeof(*mvs); i++) - { - const AVMotionVector* mv = &mvs[i]; + const AVMotionVector *mvs = (const AVMotionVector *)sideData->data; + for (int i = 0; i < sideData->size / sizeof(*mvs); i++) { + const AVMotionVector *mv = &mvs[i]; - if (sendOverlayFrame == true && std::abs(mv->motion_x) > threshold || std::abs(mv->motion_y) > threshold) - { - LineOverlay lineOverlay; - lineOverlay.x1 = mv->src_x; - lineOverlay.y1 = mv->src_y; - lineOverlay.x2 = mv->dst_x; - lineOverlay.y2 = mv->dst_y; + if (sendOverlayFrame == true && std::abs(mv->motion_x) > threshold || + std::abs(mv->motion_y) > threshold) { + LineOverlay lineOverlay; + lineOverlay.x1 = mv->src_x; + lineOverlay.y1 = mv->src_y; + lineOverlay.x2 = mv->dst_x; + lineOverlay.y2 = mv->dst_y; - lineOverlays.push_back(lineOverlay); + lineOverlays.push_back(lineOverlay); - motionFound = true; - } - } + motionFound = true; + } + } - for (auto& lineOverlay : lineOverlays) { - compositeOverlay.add(&lineOverlay); - } + for (auto &lineOverlay : lineOverlays) { + compositeOverlay.add(&lineOverlay); + } - if (lineOverlays.size()) - { - DrawingOverlay drawingOverlay; - drawingOverlay.add(&compositeOverlay); - auto serializedSize = drawingOverlay.mGetSerializeSize(); - outFrame = makeFrameWithPinId(serializedSize, motionVectorPinId); - memcpy(outFrame->data(), sideData->data, serializedSize); - drawingOverlay.serialize(outFrame); - frames.insert(make_pair(motionVectorPinId, outFrame)); - } - } - else - { - outFrame = makeFrameWithPinId(0, motionVectorPinId); - } - if (sendOverlayFrame == false) { - const AVMotionVector* mvs = (const AVMotionVector*)sideData->data; - for (int i = 0; i < sideData->size / sizeof(*mvs); i++) - { - const AVMotionVector* mv = &mvs[i]; + if (lineOverlays.size()) { + DrawingOverlay drawingOverlay; + drawingOverlay.add(&compositeOverlay); + auto serializedSize = drawingOverlay.mGetSerializeSize(); + outFrame = makeFrameWithPinId(serializedSize, motionVectorPinId); + memcpy(outFrame->data(), sideData->data, serializedSize); + drawingOverlay.serialize(outFrame); + frames.insert(make_pair(motionVectorPinId, outFrame)); + } + } else { + outFrame = makeFrameWithPinId(0, motionVectorPinId); + } + if (sendOverlayFrame == false) { + const AVMotionVector *mvs = (const AVMotionVector *)sideData->data; + for (int i = 0; i < sideData->size / sizeof(*mvs); i++) { + const AVMotionVector *mv = &mvs[i]; - if (std::abs(mv->motion_x) > threshold || std::abs(mv->motion_y) > threshold) - { - motionFound = true; - break; - } - } - } - av_packet_unref(pkt); - av_frame_unref(avFrame); - return 0; - } - } - return 0; + if (std::abs(mv->motion_x) > threshold || + std::abs(mv->motion_y) > threshold) { + motionFound = true; + break; + } + } + } + av_packet_unref(pkt); + av_frame_unref(avFrame); + return 0; + } + } + return 0; } -void DetailOpenH264::initDecoder() -{ - sDecParam = { 0 }; - if (!sendDecodedFrame) { - sDecParam.bParseOnly = true; - } - else { - sDecParam.bParseOnly = false; - } - memset(&pDstInfo, 0, sizeof(SBufferInfo)); - pDstInfo.uiInBsTimeStamp = 0; +void DetailOpenH264::initDecoder() { + sDecParam = {0}; + if (!sendDecodedFrame) { + sDecParam.bParseOnly = true; + } else { + sDecParam.bParseOnly = false; + } + memset(&pDstInfo, 0, sizeof(SBufferInfo)); + pDstInfo.uiInBsTimeStamp = 0; - if (WelsCreateDecoder(&pDecoder) || (NULL == pDecoder)) { - LOG_ERROR << "Create Decoder failed.\n"; - } - if (pDecoder->Initialize(&sDecParam)) { - LOG_ERROR << "Decoder initialization failed.\n"; - } + if (WelsCreateDecoder(&pDecoder) || (NULL == pDecoder)) { + LOG_ERROR << "Create Decoder failed.\n"; + } + if (pDecoder->Initialize(&sDecParam)) { + LOG_ERROR << "Decoder initialization failed.\n"; + } } -void DetailOpenH264::getMotionVectors(frame_container& frames, frame_sp& outFrame, frame_sp& decodedFrame) -{ - motionFound = false; - uint8_t* pData[3] = { NULL }; - pData[0] = NULL; - pData[1] = NULL; - pData[2] = NULL; - auto h264Frame = frames.begin()->second; +void DetailOpenH264::getMotionVectors(frame_container &frames, + frame_sp &outFrame, + frame_sp &decodedFrame) { + motionFound = false; + uint8_t *pData[3] = {NULL}; + pData[0] = NULL; + pData[1] = NULL; + pData[2] = NULL; + auto h264Frame = frames.begin()->second; - unsigned char* pSrc = static_cast(h264Frame->data()); - int iSrcLen = h264Frame->size(); - unsigned char** ppDst = pData; - int32_t mMotionVectorSize = mWidth * mHeight * 8; - int16_t* mMotionVectorData = nullptr; - memset(&pDstInfo, 0, sizeof(SBufferInfo)); - if (sendOverlayFrame) - { - outFrame = makeFrameWithPinId(mMotionVectorSize,motionVectorPinId); - mMotionVectorData = static_cast(outFrame->data()); - } - else - { - mMotionVectorData = static_cast(malloc(mMotionVectorSize * sizeof(int16_t))); - } + unsigned char *pSrc = static_cast(h264Frame->data()); + int iSrcLen = h264Frame->size(); + unsigned char **ppDst = pData; + int32_t mMotionVectorSize = mWidth * mHeight * 8; + int16_t *mMotionVectorData = nullptr; + memset(&pDstInfo, 0, sizeof(SBufferInfo)); + if (sendOverlayFrame) { + outFrame = makeFrameWithPinId(mMotionVectorSize, motionVectorPinId); + mMotionVectorData = static_cast(outFrame->data()); + } else { + mMotionVectorData = + static_cast(malloc(mMotionVectorSize * sizeof(int16_t))); + } - if (sDecParam.bParseOnly) - { - pDecoder->ParseBitstreamGetMotionVectors(pSrc, iSrcLen, ppDst, &parseInfo, &pDstInfo, &mMotionVectorSize, &mMotionVectorData); - } - else - { - pDecoder->DecodeFrameGetMotionVectorsNoDelay(pSrc, iSrcLen, ppDst, &pDstInfo, &mMotionVectorSize, &mMotionVectorData); - } + if (sDecParam.bParseOnly) { + pDecoder->ParseBitstreamGetMotionVectors(pSrc, iSrcLen, ppDst, &parseInfo, + &pDstInfo, &mMotionVectorSize, + &mMotionVectorData); + } else { + pDecoder->DecodeFrameGetMotionVectorsNoDelay(pSrc, iSrcLen, ppDst, + &pDstInfo, &mMotionVectorSize, + &mMotionVectorData); + } - if (mMotionVectorSize != mWidth * mHeight * 8 && sendOverlayFrame == true) - { - std::vector circleOverlays; - CompositeOverlay compositeOverlay; + if (mMotionVectorSize != mWidth * mHeight * 8 && sendOverlayFrame == true) { + std::vector circleOverlays; + CompositeOverlay compositeOverlay; - for (int i = 0; i < mMotionVectorSize; i += 4) - { - auto motionX = mMotionVectorData[i]; - auto motionY = mMotionVectorData[i + 1]; - if (abs(motionX) > threshold || abs(motionY) > threshold) - { - CircleOverlay circleOverlay; - circleOverlay.x1 = mMotionVectorData[i + 2]; - circleOverlay.y1 = mMotionVectorData[i + 3]; - circleOverlay.radius = 1; + for (int i = 0; i < mMotionVectorSize; i += 4) { + auto motionX = mMotionVectorData[i]; + auto motionY = mMotionVectorData[i + 1]; + if (abs(motionX) > threshold || abs(motionY) > threshold) { + CircleOverlay circleOverlay; + circleOverlay.x1 = mMotionVectorData[i + 2]; + circleOverlay.y1 = mMotionVectorData[i + 3]; + circleOverlay.radius = 1; - circleOverlays.push_back(circleOverlay); - motionFound = true; - } - } + circleOverlays.push_back(circleOverlay); + motionFound = true; + } + } - for (auto& circleOverlay : circleOverlays) { - compositeOverlay.add(&circleOverlay); - } + for (auto &circleOverlay : circleOverlays) { + compositeOverlay.add(&circleOverlay); + } - if (circleOverlays.size()) - { - DrawingOverlay drawingOverlay; - drawingOverlay.add(&compositeOverlay); - auto mvSize = drawingOverlay.mGetSerializeSize(); - outFrame = makeframe(outFrame, mvSize, motionVectorPinId); - drawingOverlay.serialize(outFrame); - frames.insert(make_pair(motionVectorPinId, outFrame)); - } - } - if(sendOverlayFrame == false){ - for (int i = 0; i < mMotionVectorSize; i += 4) - { - auto motionX = mMotionVectorData[i]; - auto motionY = mMotionVectorData[i + 1]; - if (abs(motionX) > threshold || abs(motionY) > threshold) - { - motionFound = true; - break; - } - } - } - if ((!sDecParam.bParseOnly) && (pDstInfo.pDst[0] != nullptr) && (mMotionVectorSize != mWidth * mHeight * 8) && motionFound == true) - { - int rowIndex; - unsigned char *pPtr = NULL; - decodedFrame = makeFrameWithPinId(mHeight * 3 * mWidth, rawFramePinId); - uint8_t* yuvImagePtr = (uint8_t*)malloc(mHeight * 1.5 * mWidth); - auto yuvStartPointer = yuvImagePtr; - pPtr = pData[0]; - for (rowIndex = 0; rowIndex < mHeight; rowIndex++) { - memcpy(yuvImagePtr, pPtr, mWidth); - pPtr += pDstInfo.UsrData.sSystemBuffer.iStride[0]; - yuvImagePtr += mWidth; - } - int halfHeight = mHeight / 2; - int halfWidth = mWidth / 2; - pPtr = pData[1]; - for (rowIndex = 0; rowIndex < halfHeight; rowIndex++) { - memcpy(yuvImagePtr, pPtr, halfWidth); - pPtr += pDstInfo.UsrData.sSystemBuffer.iStride[1]; - yuvImagePtr += halfWidth; - } - pPtr = pData[2]; - for (rowIndex = 0; rowIndex < halfHeight; rowIndex++) { - memcpy(yuvImagePtr, pPtr, halfWidth); - pPtr += pDstInfo.UsrData.sSystemBuffer.iStride[1]; - yuvImagePtr += halfWidth; - } + if (circleOverlays.size()) { + DrawingOverlay drawingOverlay; + drawingOverlay.add(&compositeOverlay); + auto mvSize = drawingOverlay.mGetSerializeSize(); + outFrame = makeframe(outFrame, mvSize, motionVectorPinId); + drawingOverlay.serialize(outFrame); + frames.insert(make_pair(motionVectorPinId, outFrame)); + } + } + if (sendOverlayFrame == false) { + for (int i = 0; i < mMotionVectorSize; i += 4) { + auto motionX = mMotionVectorData[i]; + auto motionY = mMotionVectorData[i + 1]; + if (abs(motionX) > threshold || abs(motionY) > threshold) { + motionFound = true; + break; + } + } + } + if ((!sDecParam.bParseOnly) && (pDstInfo.pDst[0] != nullptr) && + (mMotionVectorSize != mWidth * mHeight * 8) && motionFound == true) { + int rowIndex; + unsigned char *pPtr = NULL; + decodedFrame = makeFrameWithPinId(mHeight * 3 * mWidth, rawFramePinId); + uint8_t *yuvImagePtr = (uint8_t *)malloc(mHeight * 1.5 * mWidth); + auto yuvStartPointer = yuvImagePtr; + pPtr = pData[0]; + for (rowIndex = 0; rowIndex < mHeight; rowIndex++) { + memcpy(yuvImagePtr, pPtr, mWidth); + pPtr += pDstInfo.UsrData.sSystemBuffer.iStride[0]; + yuvImagePtr += mWidth; + } + int halfHeight = mHeight / 2; + int halfWidth = mWidth / 2; + pPtr = pData[1]; + for (rowIndex = 0; rowIndex < halfHeight; rowIndex++) { + memcpy(yuvImagePtr, pPtr, halfWidth); + pPtr += pDstInfo.UsrData.sSystemBuffer.iStride[1]; + yuvImagePtr += halfWidth; + } + pPtr = pData[2]; + for (rowIndex = 0; rowIndex < halfHeight; rowIndex++) { + memcpy(yuvImagePtr, pPtr, halfWidth); + pPtr += pDstInfo.UsrData.sSystemBuffer.iStride[1]; + yuvImagePtr += halfWidth; + } - cv::Mat yuvImgCV = cv::Mat(mHeight + mHeight / 2, mWidth, CV_8UC1, yuvStartPointer, mWidth); - bgrImg.data = static_cast(decodedFrame->data()); + cv::Mat yuvImgCV = cv::Mat(mHeight + mHeight / 2, mWidth, CV_8UC1, + yuvStartPointer, mWidth); + bgrImg.data = static_cast(decodedFrame->data()); - cv::cvtColor(yuvImgCV, bgrImg, cv::COLOR_YUV2BGR_I420); - frames.insert(make_pair(rawFramePinId, decodedFrame)); - } + cv::cvtColor(yuvImgCV, bgrImg, cv::COLOR_YUV2BGR_I420); + frames.insert(make_pair(rawFramePinId, decodedFrame)); + } } -MotionVectorExtractor::MotionVectorExtractor(MotionVectorExtractorProps props) : Module(TRANSFORM, "MotionVectorExtractor", props) -{ - if (props.MVExtract == MotionVectorExtractorProps::MVExtractMethod::FFMPEG) - { - mDetail.reset(new DetailFfmpeg(props, [&](size_t size, string& pinId) -> frame_sp { return makeFrame(size, pinId); }, [&](frame_sp& frame, size_t& size, string& pinId) -> frame_sp { return makeFrame(frame, size, pinId); })); - } - else if (props.MVExtract == MotionVectorExtractorProps::MVExtractMethod::OPENH264) - { - mDetail.reset(new DetailOpenH264(props, [&](size_t size, string& pinId) -> frame_sp { return makeFrame(size, pinId); }, [&](frame_sp& frame, size_t& size, string& pinId) -> frame_sp { return makeFrame(frame, size, pinId); })); - } - if (props.sendOverlayFrame) - { - auto motionVectorOutputMetadata = framemetadata_sp(new FrameMetadata(FrameMetadata::OVERLAY_INFO_IMAGE)); - mDetail->motionVectorPinId = addOutputPin(motionVectorOutputMetadata); - } - rawOutputMetadata = framemetadata_sp(new RawImageMetadata()); - mDetail->rawFramePinId = addOutputPin(rawOutputMetadata); -} -bool MotionVectorExtractor::init() -{ - mDetail->initDecoder(); - return Module::init(); -} -bool MotionVectorExtractor::term() -{ - return Module::term(); +MotionVectorExtractor::MotionVectorExtractor(MotionVectorExtractorProps props) + : Module(TRANSFORM, "MotionVectorExtractor", props) { + if (props.MVExtract == MotionVectorExtractorProps::MVExtractMethod::FFMPEG) { + mDetail.reset(new DetailFfmpeg( + props, + [&](size_t size, string &pinId) -> frame_sp { + return makeFrame(size, pinId); + }, + [&](frame_sp &frame, size_t &size, string &pinId) -> frame_sp { + return makeFrame(frame, size, pinId); + })); + } else if (props.MVExtract == + MotionVectorExtractorProps::MVExtractMethod::OPENH264) { + mDetail.reset(new DetailOpenH264( + props, + [&](size_t size, string &pinId) -> frame_sp { + return makeFrame(size, pinId); + }, + [&](frame_sp &frame, size_t &size, string &pinId) -> frame_sp { + return makeFrame(frame, size, pinId); + })); + } + if (props.sendOverlayFrame) { + auto motionVectorOutputMetadata = + framemetadata_sp(new FrameMetadata(FrameMetadata::OVERLAY_INFO_IMAGE)); + mDetail->motionVectorPinId = addOutputPin(motionVectorOutputMetadata); + } + rawOutputMetadata = framemetadata_sp(new RawImageMetadata()); + mDetail->rawFramePinId = addOutputPin(rawOutputMetadata); } -bool MotionVectorExtractor::validateInputPins() -{ - if (getNumberOfInputPins() != 1) - { - LOG_ERROR << "<" << getId() << ">::validateInputPins size is expected to be 1. Actual<" << getNumberOfInputPins() << ">"; - return false; - } - framemetadata_sp metadata = getFirstInputMetadata(); - FrameMetadata::FrameType frameType = metadata->getFrameType(); - if (frameType != FrameMetadata::H264_DATA) - { - LOG_ERROR << "<" << getId() << ">::validateInputPins input frameType is expected to be H264_DATA. Actual<" << frameType << ">"; - return false; - } - return true; +bool MotionVectorExtractor::init() { + mDetail->initDecoder(); + return Module::init(); } -bool MotionVectorExtractor::validateOutputPins() -{ - auto size = getNumberOfOutputPins(); - if (getNumberOfOutputPins() > 2) - { - LOG_ERROR << "<" << getId() << ">::validateOutputPins size is expected to be 2. Actual<" << getNumberOfOutputPins() << ">"; - return false; - } - pair me; // map element - auto framefactoryByPin = getOutputFrameFactory(); - BOOST_FOREACH(me, framefactoryByPin) - { - FrameMetadata::FrameType frameType = me.second->getFrameMetadata()->getFrameType(); - if (frameType != FrameMetadata::OVERLAY_INFO_IMAGE && frameType != FrameMetadata::RAW_IMAGE) - { - LOG_ERROR << "<" << getId() << ">::validateOutputPins input frameType is expected to be MOTION_VECTOR_DATA or RAW_IMAGE. Actual<" << frameType << ">"; - return false; - } - } - return true; +bool MotionVectorExtractor::term() { return Module::term(); } +bool MotionVectorExtractor::validateInputPins() { + if (getNumberOfInputPins() != 1) { + LOG_ERROR << "<" << getId() + << ">::validateInputPins size is expected to be 1. Actual<" + << getNumberOfInputPins() << ">"; + return false; + } + framemetadata_sp metadata = getFirstInputMetadata(); + FrameMetadata::FrameType frameType = metadata->getFrameType(); + if (frameType != FrameMetadata::H264_DATA) { + LOG_ERROR << "<" << getId() + << ">::validateInputPins input frameType is expected to be " + "H264_DATA. Actual<" + << frameType << ">"; + return false; + } + return true; } -bool MotionVectorExtractor::shouldTriggerSOS() -{ - return mShouldTriggerSOS; +bool MotionVectorExtractor::validateOutputPins() { + auto size = getNumberOfOutputPins(); + if (getNumberOfOutputPins() > 2) { + LOG_ERROR << "<" << getId() + << ">::validateOutputPins size is expected to be 2. Actual<" + << getNumberOfOutputPins() << ">"; + return false; + } + pair me; // map element + auto framefactoryByPin = getOutputFrameFactory(); + BOOST_FOREACH (me, framefactoryByPin) { + FrameMetadata::FrameType frameType = + me.second->getFrameMetadata()->getFrameType(); + if (frameType != FrameMetadata::OVERLAY_INFO_IMAGE && + frameType != FrameMetadata::RAW_IMAGE) { + LOG_ERROR << "<" << getId() + << ">::validateOutputPins input frameType is expected to be " + "MOTION_VECTOR_DATA or RAW_IMAGE. Actual<" + << frameType << ">"; + return false; + } + } + return true; } -bool MotionVectorExtractor::process(frame_container& frames) -{ - frame_sp motionVectorFrame; - frame_sp decodedFrame; - mDetail->getMotionVectors(frames, motionVectorFrame, decodedFrame); - send(frames); - return true; +bool MotionVectorExtractor::shouldTriggerSOS() { return mShouldTriggerSOS; } +bool MotionVectorExtractor::process(frame_container &frames) { + frame_sp motionVectorFrame; + frame_sp decodedFrame; + mDetail->getMotionVectors(frames, motionVectorFrame, decodedFrame); + send(frames); + return true; } -void MotionVectorExtractor::setMetadata(frame_sp frame) -{ - auto metadata = frame->getMetadata(); - if (!metadata->isSet()) - { - return; - } - sps_pps_properties p; - H264ParserUtils::parse_sps(((const char*)frame->data()) + 5, frame->size() > 5 ? frame->size() - 5 : frame->size(), &p); - mDetail->mWidth = p.width; - mDetail->mHeight = p.height; - RawImageMetadata outputMetadata(mDetail->mWidth, mDetail->mHeight, ImageMetadata::BGR, CV_8UC3, 0, CV_8U, FrameMetadata::HOST, true); - auto rawOutMetadata = FrameMetadataFactory::downcast(rawOutputMetadata); - rawOutMetadata->setData(outputMetadata); - mDetail->bgrImg = Utils::getMatHeader(rawOutMetadata); +void MotionVectorExtractor::setMetadata(frame_sp frame) { + auto metadata = frame->getMetadata(); + if (!metadata->isSet()) { + return; + } + sps_pps_properties p; + H264ParserUtils::parse_sps( + ((const char *)frame->data()) + 5, + frame->size() > 5 ? frame->size() - 5 : frame->size(), &p); + mDetail->mWidth = p.width; + mDetail->mHeight = p.height; + RawImageMetadata outputMetadata(mDetail->mWidth, mDetail->mHeight, + ImageMetadata::BGR, CV_8UC3, 0, CV_8U, + FrameMetadata::HOST, true); + auto rawOutMetadata = + FrameMetadataFactory::downcast(rawOutputMetadata); + rawOutMetadata->setData(outputMetadata); + mDetail->bgrImg = Utils::getMatHeader(rawOutMetadata); } -bool MotionVectorExtractor::processSOS(frame_sp& frame) -{ - setMetadata(frame); - mShouldTriggerSOS = false; - return true; +bool MotionVectorExtractor::processSOS(frame_sp &frame) { + setMetadata(frame); + mShouldTriggerSOS = false; + return true; } -bool MotionVectorExtractor::handlePropsChange(frame_sp& frame) -{ - MotionVectorExtractorProps props; - auto ret = Module::handlePropsChange(frame, props); - mDetail->setProps(props); - return ret; +bool MotionVectorExtractor::handlePropsChange(frame_sp &frame) { + MotionVectorExtractorProps props; + auto ret = Module::handlePropsChange(frame, props); + mDetail->setProps(props); + return ret; } -void MotionVectorExtractor::setProps(MotionVectorExtractorProps& props) -{ - Module::addPropsToQueue(props); +void MotionVectorExtractor::setProps(MotionVectorExtractorProps &props) { + Module::addPropsToQueue(props); } \ No newline at end of file diff --git a/samples/timelapse-sample/timelapse_summary.cpp b/samples/timelapse-sample/timelapse_summary.cpp index bc9f0836e..0598e996d 100644 --- a/samples/timelapse-sample/timelapse_summary.cpp +++ b/samples/timelapse-sample/timelapse_summary.cpp @@ -12,49 +12,74 @@ #include "ColorConversionXForm.h" #include "FileWriterModule.h" #include "timelapse_summary.h" +#include +#include "ExternalSinkModule.h" TimelapsePipeline::TimelapsePipeline() : pipeline("test") {} +cudastream_sp cudaStream_ = + boost::shared_ptr(new ApraCudaStream()); +std::string outFolderPath = "C:\\Workspace\\ApraPipesFork\\ApraPipes\\data\\Mp4_videos\\timelapse_videos"; +auto cuContext = apracucontext_sp(new ApraCUcontext()); +uint32_t gopLength = 25; +uint32_t bitRateKbps = 1000; +uint32_t frameRate = 30; +H264EncoderNVCodecProps::H264CodecProfile profile = H264EncoderNVCodecProps::MAIN; +bool enableBFrames = false; +bool sendDecodedFrames = true; +bool sendOverlayFrames = false; +std::string videoPath = "C:\\Workspace\\ApraPipesFork\\ApraPipes\\data\\Mp4_videos\\h264_video_metadata\\20230514\\0011\\1707478361303.mp4"; +auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); +auto mp4ReaderProps = Mp4ReaderSourceProps(videoPath, false, 0, true, false, false); +auto mp4Reader = boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); +auto motionExtractorProps = MotionVectorExtractorProps(MotionVectorExtractorProps::MVExtractMethod::OPENH264, sendDecodedFrames, 10, sendOverlayFrames); +auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(motionExtractorProps)); +auto colorchange1 = boost::shared_ptr(new ColorConversion( + ColorConversionProps(ColorConversionProps::BGR_TO_RGB))); +auto colorchange2 = boost::shared_ptr(new ColorConversion( + ColorConversionProps(ColorConversionProps::RGB_TO_YUV420PLANAR))); +auto sync = boost::shared_ptr( + new CudaStreamSynchronize(CudaStreamSynchronizeProps(cudaStream_))); +auto encoder = + boost::shared_ptr(new H264EncoderNVCodec(H264EncoderNVCodecProps( + bitRateKbps, cuContext, gopLength, frameRate, profile, enableBFrames))); +auto mp4WriterSinkProps = Mp4WriterSinkProps(1, 10, 24, outFolderPath, true); +auto mp4WriterSink = + boost::shared_ptr(new Mp4WriterSink(mp4WriterSinkProps)); + +void TimelapsePipeline::test() { + auto sink = boost::shared_ptr(new ExternalSinkModule()); + BOOST_TEST(mp4Reader->init()); + BOOST_TEST(motionExtractor->init()); + BOOST_TEST(colorchange1->init()); + BOOST_TEST(colorchange2->init()); + BOOST_TEST(sync->init()); + BOOST_TEST(encoder->init()); + BOOST_TEST(mp4WriterSink->init()); + mp4Reader->step(); + motionExtractor->step(); + colorchange1->step(); + colorchange2->step(); + sync->step(); + encoder->step(); + encoder->setNext(sink); + auto frames = sink->pop(); + BOOST_CHECK_EQUAL(frames.size(), 1); +} + bool TimelapsePipeline::setupPipeline() { - std::string outFolderPath = "../.././data/timeplase_videos"; - auto cuContext = apracucontext_sp(new ApraCUcontext()); - uint32_t gopLength = 25; - uint32_t bitRateKbps = 1000; - uint32_t frameRate = 30; - H264EncoderNVCodecProps::H264CodecProfile profile = H264EncoderNVCodecProps::MAIN; - bool enableBFrames = false; - bool sendDecodedFrames = true; - bool sendOverlayFrames = false; - std::string videoPath = "../.././data/Mp4_videos/h264_video_metadata/20230514/0011/1715748702000.mp4"; - auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); - auto mp4ReaderProps = Mp4ReaderSourceProps(videoPath, false, 0, true, false, false); - mp4ReaderProps.fps = 24; - auto mp4Reader = boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); mp4Reader -> addOutPutPin(h264ImageMetadata); auto mp4Metadata = framemetadata_sp(new Mp4VideoMetadata("v_1")); mp4Reader->addOutPutPin(mp4Metadata); std::vector mImagePin; mImagePin = mp4Reader->getAllOutputPinsByType(FrameMetadata::H264_DATA); - auto motionExtractorProps = MotionVectorExtractorProps(MotionVectorExtractorProps::MVExtractMethod::OPENH264,sendDecodedFrames, 10, sendOverlayFrames); - auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(motionExtractorProps)); mp4Reader->setNext(motionExtractor, mImagePin); - - auto colorchange1 = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::BGR_TO_RGB))); motionExtractor -> setNext(colorchange1); - - auto colorchange2 = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::RGB_TO_YUV420PLANAR))); colorchange1 -> setNext(colorchange2); - - cudastream_sp cudaStream_ = boost::shared_ptr(new ApraCudaStream()); auto copy = boost::shared_ptr(new CudaMemCopy(CudaMemCopyProps(cudaMemcpyHostToDevice, cudaStream_))); colorchange2 -> setNext(copy); - - auto sync = boost::shared_ptr(new CudaStreamSynchronize(CudaStreamSynchronizeProps(cudaStream_))); copy -> setNext(sync); - auto encoder = boost::shared_ptr(new H264EncoderNVCodec(H264EncoderNVCodecProps(bitRateKbps, cuContext, gopLength, frameRate, profile, enableBFrames))); sync -> setNext(encoder); - auto mp4WriterSinkProps = Mp4WriterSinkProps(1, 10, 24, outFolderPath, false); - auto mp4WriterSink = boost::shared_ptr(new Mp4WriterSink(mp4WriterSinkProps)); encoder -> setNext(mp4WriterSink); pipeline.appendModule(mp4Reader); diff --git a/samples/timelapse-sample/timelapse_summary.h b/samples/timelapse-sample/timelapse_summary.h index d06d0aeb8..0cbca67e5 100644 --- a/samples/timelapse-sample/timelapse_summary.h +++ b/samples/timelapse-sample/timelapse_summary.h @@ -7,6 +7,7 @@ class TimelapsePipeline { bool setupPipeline(); bool startPipeline(); bool stopPipeline(); + void test(); private: PipeLine pipeline; diff --git a/samples/timelapse-sample/timelapse_summary_test.cpp b/samples/timelapse-sample/timelapse_summary_test.cpp index 3c579e511..a84a88dd6 100644 --- a/samples/timelapse-sample/timelapse_summary_test.cpp +++ b/samples/timelapse-sample/timelapse_summary_test.cpp @@ -7,11 +7,7 @@ BOOST_AUTO_TEST_SUITE(generate_timelapse) BOOST_AUTO_TEST_CASE(generateTimeLapse) { auto timelapsePipeline = boost::shared_ptr(new TimelapsePipeline()); - timelapsePipeline -> setupPipeline(); - - timelapsePipeline -> startPipeline(); - - timelapsePipeline -> stopPipeline(); + timelapsePipeline->test(); } BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file From a5df32a367eaef8553432eac3ec1ed14d357049c Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Thu, 23 May 2024 15:45:25 +0530 Subject: [PATCH 18/49] taking video paths from terminal and modified cmake. --- base/CMakeLists.txt | 1 + samples/CMakeLists.txt | 5 +- samples/timelapse-sample/CMakeLists.txt | 34 ++-- .../timelapse-sample/timelapse_summary.cpp | 168 ++++++++++-------- samples/timelapse-sample/timelapse_summary.h | 18 +- 5 files changed, 125 insertions(+), 101 deletions(-) diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index f895b1da4..70a0c89e3 100755 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -456,6 +456,7 @@ ${LIBMP4_INC_DIR} ${BARESIP_INC_DIR} ${LIBRE_INC_DIR} ${NVCODEC_INCLUDE_DIR} +./../samples/timelapse-sample ) diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt index 080530fec..a24ea02f7 100644 --- a/samples/CMakeLists.txt +++ b/samples/CMakeLists.txt @@ -1,7 +1,4 @@ -#dependencies - -find_package(Threads REQUIRED) - +cmake_minimum_required(VERSION 3.22) include_directories( ../base ${CMAKE_CURRENT_SOURCE_DIR} diff --git a/samples/timelapse-sample/CMakeLists.txt b/samples/timelapse-sample/CMakeLists.txt index 31ffe93ca..323ed850a 100644 --- a/samples/timelapse-sample/CMakeLists.txt +++ b/samples/timelapse-sample/CMakeLists.txt @@ -1,3 +1,4 @@ +cmake_minimum_required(VERSION 3.22) set(TARGET timelapse-sample) SET(CORE_FILES timelapse_summary.cpp @@ -9,23 +10,11 @@ SET(SOURCE ${CORE_FILES} ${CORE_FILES_H} ) -set(UT_FILE - timelapse_summary_test.cpp +SET(UT_FILE + timelapse_summary_test.cpp ) -find_library(APRA_PIPES_LIB NAMES aprapipes) -add_library(example_aprapipes STATIC ${SOURCE}) -add_executable(runner_test ${UT_FILE}) -target_include_directories ( example_aprapipes PRIVATE - ${JETSON_MULTIMEDIA_LIB_INCLUDE} - ${FFMPEG_INCLUDE_DIRS} - ${OpenCV_INCLUDE_DIRS} - ${Boost_INCLUDE_DIRS} - ${LIBMP4_INC_DIR} - ${BARESIP_INC_DIR} - ${LIBRE_INC_DIR} - ${NVCODEC_INCLUDE_DIR} -) -target_include_directories ( runner_test PUBLIC +add_executable(timelapsesample ${SOURCE} ${UT_FILE}) +target_include_directories (timelapsesample PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} @@ -36,7 +25,7 @@ target_include_directories ( runner_test PUBLIC ${NVCODEC_INCLUDE_DIR} ) target_link_libraries( - runner_test + timelapsesample aprapipes ${JPEG_LIBRARIES} ${LIBMP4_LIB} @@ -49,5 +38,12 @@ target_link_libraries( ${NVCODEC_LIB} ${NVJPEGLIB_L4T} ${CURSES_LIBRARIES} - example_aprapipes - ) \ No newline at end of file + ZXing::Core + ZXing::ZXing + BZip2::BZip2 + ZLIB::ZLIB + liblzma::liblzma + bigint::bigint + sfml-audio + whisper::whisper +) \ No newline at end of file diff --git a/samples/timelapse-sample/timelapse_summary.cpp b/samples/timelapse-sample/timelapse_summary.cpp index 0598e996d..3215f662d 100644 --- a/samples/timelapse-sample/timelapse_summary.cpp +++ b/samples/timelapse-sample/timelapse_summary.cpp @@ -15,91 +15,105 @@ #include #include "ExternalSinkModule.h" -TimelapsePipeline::TimelapsePipeline() : pipeline("test") {} - -cudastream_sp cudaStream_ = - boost::shared_ptr(new ApraCudaStream()); -std::string outFolderPath = "C:\\Workspace\\ApraPipesFork\\ApraPipes\\data\\Mp4_videos\\timelapse_videos"; -auto cuContext = apracucontext_sp(new ApraCUcontext()); -uint32_t gopLength = 25; -uint32_t bitRateKbps = 1000; -uint32_t frameRate = 30; -H264EncoderNVCodecProps::H264CodecProfile profile = H264EncoderNVCodecProps::MAIN; -bool enableBFrames = false; -bool sendDecodedFrames = true; -bool sendOverlayFrames = false; -std::string videoPath = "C:\\Workspace\\ApraPipesFork\\ApraPipes\\data\\Mp4_videos\\h264_video_metadata\\20230514\\0011\\1707478361303.mp4"; -auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); -auto mp4ReaderProps = Mp4ReaderSourceProps(videoPath, false, 0, true, false, false); -auto mp4Reader = boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); -auto motionExtractorProps = MotionVectorExtractorProps(MotionVectorExtractorProps::MVExtractMethod::OPENH264, sendDecodedFrames, 10, sendOverlayFrames); -auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(motionExtractorProps)); -auto colorchange1 = boost::shared_ptr(new ColorConversion( - ColorConversionProps(ColorConversionProps::BGR_TO_RGB))); -auto colorchange2 = boost::shared_ptr(new ColorConversion( - ColorConversionProps(ColorConversionProps::RGB_TO_YUV420PLANAR))); -auto sync = boost::shared_ptr( - new CudaStreamSynchronize(CudaStreamSynchronizeProps(cudaStream_))); -auto encoder = - boost::shared_ptr(new H264EncoderNVCodec(H264EncoderNVCodecProps( - bitRateKbps, cuContext, gopLength, frameRate, profile, enableBFrames))); -auto mp4WriterSinkProps = Mp4WriterSinkProps(1, 10, 24, outFolderPath, true); -auto mp4WriterSink = - boost::shared_ptr(new Mp4WriterSink(mp4WriterSinkProps)); +TimelapsePipeline::TimelapsePipeline() + : pipeline("test"), cudaStream_(new ApraCudaStream()), + cuContext(new ApraCUcontext()), + h264ImageMetadata(new H264Metadata(0, 0)) {} -void TimelapsePipeline::test() { - auto sink = boost::shared_ptr(new ExternalSinkModule()); - BOOST_TEST(mp4Reader->init()); - BOOST_TEST(motionExtractor->init()); - BOOST_TEST(colorchange1->init()); - BOOST_TEST(colorchange2->init()); - BOOST_TEST(sync->init()); - BOOST_TEST(encoder->init()); - BOOST_TEST(mp4WriterSink->init()); - mp4Reader->step(); - motionExtractor->step(); - colorchange1->step(); - colorchange2->step(); - sync->step(); - encoder->step(); - encoder->setNext(sink); - auto frames = sink->pop(); - BOOST_CHECK_EQUAL(frames.size(), 1); -} +bool TimelapsePipeline::setupPipeline(const std::string &videoPath, + const std::string &outFolderPath) { + uint32_t gopLength = 25; + uint32_t bitRateKbps = 1000; + uint32_t frameRate = 30; + H264EncoderNVCodecProps::H264CodecProfile profile = + H264EncoderNVCodecProps::MAIN; + bool enableBFrames = false; + bool sendDecodedFrames = true; + bool sendOverlayFrames = false; + + auto mp4ReaderProps = + Mp4ReaderSourceProps(videoPath, false, 0, true, false, false); + mp4Reader = + boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); + auto motionExtractorProps = MotionVectorExtractorProps( + MotionVectorExtractorProps::MVExtractMethod::OPENH264, sendDecodedFrames, + 10, sendOverlayFrames); + motionExtractor = boost::shared_ptr( + new MotionVectorExtractor(motionExtractorProps)); + colorchange1 = boost::shared_ptr(new ColorConversion( + ColorConversionProps(ColorConversionProps::BGR_TO_RGB))); + colorchange2 = boost::shared_ptr(new ColorConversion( + ColorConversionProps(ColorConversionProps::RGB_TO_YUV420PLANAR))); + sync = boost::shared_ptr( + new CudaStreamSynchronize(CudaStreamSynchronizeProps(cudaStream_))); + encoder = boost::shared_ptr(new H264EncoderNVCodec( + H264EncoderNVCodecProps(bitRateKbps, cuContext, gopLength, frameRate, + profile, enableBFrames))); + auto mp4WriterSinkProps = Mp4WriterSinkProps(1, 10, 24, outFolderPath, true); + mp4WriterSink = + boost::shared_ptr(new Mp4WriterSink(mp4WriterSinkProps)); -bool TimelapsePipeline::setupPipeline() { - mp4Reader -> addOutPutPin(h264ImageMetadata); - auto mp4Metadata = framemetadata_sp(new Mp4VideoMetadata("v_1")); - mp4Reader->addOutPutPin(mp4Metadata); - std::vector mImagePin; - mImagePin = mp4Reader->getAllOutputPinsByType(FrameMetadata::H264_DATA); - mp4Reader->setNext(motionExtractor, mImagePin); - motionExtractor -> setNext(colorchange1); - colorchange1 -> setNext(colorchange2); - auto copy = boost::shared_ptr(new CudaMemCopy(CudaMemCopyProps(cudaMemcpyHostToDevice, cudaStream_))); - colorchange2 -> setNext(copy); - copy -> setNext(sync); - sync -> setNext(encoder); - encoder -> setNext(mp4WriterSink); - - pipeline.appendModule(mp4Reader); - pipeline.init(); - return true; + mp4Reader->addOutPutPin(h264ImageMetadata); + auto mp4Metadata = framemetadata_sp(new Mp4VideoMetadata("v_1")); + mp4Reader->addOutPutPin(mp4Metadata); + std::vector mImagePin = + mp4Reader->getAllOutputPinsByType(FrameMetadata::H264_DATA); + mp4Reader->setNext(motionExtractor, mImagePin); + motionExtractor->setNext(colorchange1); + colorchange1->setNext(colorchange2); + auto copy = boost::shared_ptr( + new CudaMemCopy(CudaMemCopyProps(cudaMemcpyHostToDevice, cudaStream_))); + colorchange2->setNext(copy); + copy->setNext(sync); + sync->setNext(encoder); + encoder->setNext(mp4WriterSink); + + pipeline.appendModule(mp4Reader); + pipeline.init(); + return true; } bool TimelapsePipeline::startPipeline() { - pipeline.run_all_threaded(); - return true; + pipeline.run_all_threaded(); + return true; } bool TimelapsePipeline::stopPipeline() { - pipeline.stop(); - pipeline.term(); - pipeline.wait_for_all(); - return true; + pipeline.stop(); + pipeline.term(); + pipeline.wait_for_all(); + return true; } -int main() { +void TimelapsePipeline::test() { + auto sink = boost::shared_ptr(new ExternalSinkModule()); + BOOST_TEST(mp4Reader->init()); + BOOST_TEST(motionExtractor->init()); + BOOST_TEST(colorchange1->init()); + BOOST_TEST(colorchange2->init()); + BOOST_TEST(sync->init()); + BOOST_TEST(encoder->init()); + BOOST_TEST(mp4WriterSink->init()); + mp4Reader->step(); + motionExtractor->step(); + colorchange1->step(); + colorchange2->step(); + sync->step(); + encoder->step(); + encoder->setNext(sink); + auto frames = sink->pop(); + BOOST_CHECK_EQUAL(frames.size(), 1); +} + +int main(int argc, char *argv[]) { + if (argc < 3) { + std::cerr << "Usage: " << argv[0] << " " << std::endl; + return 1; + } + + std::string videoPath = argv[argc - 2]; + std::string outFolderPath = argv[argc - 1]; + LoggerProps loggerProps; loggerProps.logLevel = boost::log::trivial::severity_level::info; Logger::setLogLevel(boost::log::trivial::severity_level::info); @@ -108,7 +122,7 @@ int main() { TimelapsePipeline pipelineInstance; // Setup the pipeline - if (!pipelineInstance.setupPipeline()) { + if (!pipelineInstance.setupPipeline(videoPath, outFolderPath)) { std::cerr << "Failed to setup pipeline." << std::endl; return 1; // Or any error code indicating failure } @@ -129,4 +143,4 @@ int main() { } return 0; // Or any success code -} +} \ No newline at end of file diff --git a/samples/timelapse-sample/timelapse_summary.h b/samples/timelapse-sample/timelapse_summary.h index 0cbca67e5..27c043020 100644 --- a/samples/timelapse-sample/timelapse_summary.h +++ b/samples/timelapse-sample/timelapse_summary.h @@ -1,14 +1,30 @@ #include +#include +#include "CudaStreamSynchronize.h" +#include +#include +#include class TimelapsePipeline { public: TimelapsePipeline(); - bool setupPipeline(); + bool setupPipeline(const std::string &videoPath, + const std::string &outFolderPath); bool startPipeline(); bool stopPipeline(); void test(); private: PipeLine pipeline; + cudastream_sp cudaStream_; + apracucontext_sp cuContext; + framemetadata_sp h264ImageMetadata; + boost::shared_ptr mp4Reader; + boost::shared_ptr motionExtractor; + boost::shared_ptr colorchange1; + boost::shared_ptr colorchange2; + boost::shared_ptr sync; + boost::shared_ptr encoder; + boost::shared_ptr mp4WriterSink; }; \ No newline at end of file From cce5ba27f97f58fea7eb85685874b86e9c9dd79d Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Thu, 23 May 2024 16:09:24 +0530 Subject: [PATCH 19/49] vcpkg baseline revert. --- base/vcpkg.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/vcpkg.json b/base/vcpkg.json index a8342901f..4df4664c0 100644 --- a/base/vcpkg.json +++ b/base/vcpkg.json @@ -2,7 +2,7 @@ "$schema": "https://raw.githubusercontent.com/microsoft/vcpkg/master/scripts/vcpkg.schema.json", "name": "apra-pipes-cuda", "version": "0.0.1", - "builtin-baseline": "7754d62d19501a3bb4e2d4f2eab80e8de9703e41", + "builtin-baseline": "eac79fc7bda260819c646d10c97dec825305aecd", "dependencies": [ { "name": "whisper", From 87c26d2923d41e6b64a30455fa2398a2a4d7d334 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Thu, 23 May 2024 16:15:42 +0530 Subject: [PATCH 20/49] reset vcpkg head. --- vcpkg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vcpkg b/vcpkg index 3860aea73..2e37ddfc2 160000 --- a/vcpkg +++ b/vcpkg @@ -1 +1 @@ -Subproject commit 3860aea7304f2b9eac61f37a3095fd5fd57428d8 +Subproject commit 2e37ddfc28f7379cbbc11495ca285514a3cac316 From 4627b3611b04c98687dbfe4f97fb22cf8ea01247 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Thu, 23 May 2024 16:20:06 +0530 Subject: [PATCH 21/49] vcpkg revert --- vcpkg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vcpkg b/vcpkg index 2e37ddfc2..29a017687 160000 --- a/vcpkg +++ b/vcpkg @@ -1 +1 @@ -Subproject commit 2e37ddfc28f7379cbbc11495ca285514a3cac316 +Subproject commit 29a017687d56121cb9d200a7dc519c0de2c78a4a From 31d8bf5c4c754af5c5105350875794614d2e2c06 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Thu, 23 May 2024 16:44:25 +0530 Subject: [PATCH 22/49] vcpkg revert. --- vcpkg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vcpkg b/vcpkg index 29a017687..2979bf2d7 160000 --- a/vcpkg +++ b/vcpkg @@ -1 +1 @@ -Subproject commit 29a017687d56121cb9d200a7dc519c0de2c78a4a +Subproject commit 2979bf2d70b7e65aa3c3a0d9f779bbffd7d0ff62 From 518a75e3ca4578f580ac2213ec97e9b5d8497a95 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Thu, 23 May 2024 17:00:24 +0530 Subject: [PATCH 23/49] update submodule --- vcpkg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vcpkg b/vcpkg index 2979bf2d7..1fff108c2 160000 --- a/vcpkg +++ b/vcpkg @@ -1 +1 @@ -Subproject commit 2979bf2d70b7e65aa3c3a0d9f779bbffd7d0ff62 +Subproject commit 1fff108c2754e53cee9ba205400e11ab2ae4c94a From b45c423911a923064c2256927b83c8e30e9e0da3 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Thu, 23 May 2024 17:02:59 +0530 Subject: [PATCH 24/49] update vcpkg final --- vcpkg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vcpkg b/vcpkg index 1fff108c2..2979bf2d7 160000 --- a/vcpkg +++ b/vcpkg @@ -1 +1 @@ -Subproject commit 1fff108c2754e53cee9ba205400e11ab2ae4c94a +Subproject commit 2979bf2d70b7e65aa3c3a0d9f779bbffd7d0ff62 From e0f0e9165e0fd62c6325ff783ef3a643f671797b Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Thu, 23 May 2024 18:25:11 +0530 Subject: [PATCH 25/49] Corrected cmake for samples. --- base/CMakeLists.txt | 29 +++++++++++++++++++++++++ samples/timelapse-sample/CMakeLists.txt | 5 +---- 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index b2171ade0..1725e7ec5 100755 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -567,6 +567,10 @@ SET(UT_FILES ${CUDA_UT_FILES} ) +SET(SAMPLE_UT_FILES + .././samples/timelapse-sample/timelapse_summary_test.cpp +) + IF(ENABLE_LINUX) list(APPEND UT_FILES test/virtualcamerasink_tests.cpp @@ -577,6 +581,8 @@ ENDIF(ENABLE_LINUX) add_executable(aprapipesut ${UT_FILES}) +add_executable(aprapipessampleut ${SAMPLE_UT_FILES}) + IF(ENABLE_ARM64) target_include_directories ( aprapipesut PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_ROOT} ${JPEG_INCLUDE_DIR}) ENDIF(ENABLE_ARM64) @@ -589,6 +595,29 @@ ENDIF (ENABLE_CUDA) find_library(OPENH264_LIB NAMES openh264.lib libopenh264.a REQUIRED) find_library(LIBMP4_LIB NAMES mp4lib.lib libmp4lib.a REQUIRED) +target_link_libraries(aprapipessampleut + aprapipes + ${JPEG_LIBRARIES} + ${LIBMP4_LIB} + ${OPENH264_LIB} + ${Boost_LIBRARIES} + ${FFMPEG_LIBRARIES} + ${OpenCV_LIBRARIES} + ${JETSON_LIBS} + ${NVCUDAToolkit_LIBS} + ${NVCODEC_LIB} + ${NVJPEGLIB_L4T} + ${CURSES_LIBRARIES} + ZXing::Core + ZXing::ZXing + BZip2::BZip2 + ZLIB::ZLIB + liblzma::liblzma + bigint::bigint + sfml-audio + whisper::whisper +) + target_link_libraries(aprapipesut aprapipes ${JPEG_LIBRARIES} diff --git a/samples/timelapse-sample/CMakeLists.txt b/samples/timelapse-sample/CMakeLists.txt index 323ed850a..957916ea2 100644 --- a/samples/timelapse-sample/CMakeLists.txt +++ b/samples/timelapse-sample/CMakeLists.txt @@ -10,10 +10,7 @@ SET(SOURCE ${CORE_FILES} ${CORE_FILES_H} ) -SET(UT_FILE - timelapse_summary_test.cpp -) -add_executable(timelapsesample ${SOURCE} ${UT_FILE}) +add_executable(timelapsesample ${SOURCE}) target_include_directories (timelapsesample PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_INCLUDE_DIRS} From dc1c0a6f3739c80f498cd0378596652496a5b961 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Thu, 23 May 2024 18:58:10 +0530 Subject: [PATCH 26/49] Corrected base cmake for samples. --- base/CMakeLists.txt | 49 +++++++++++++++++++++++---------------------- 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index 1725e7ec5..d6dadadde 100755 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -163,6 +163,7 @@ SET(CORE_FILES src/MotionVectorExtractor.cpp src/OverlayModule.cpp src/OrderedCacheOfFiles.cpp + .././samples/timelapse-sample/timelapse_summary.h ) SET(CORE_FILES_H @@ -224,6 +225,7 @@ SET(CORE_FILES_H include/OverlayModule.h include/OrderedCacheOfFiles.h include/TestSignalGeneratorSrc.h + .././samples/timelapse-sample/timelapse_summary.cpp ) IF(ENABLE_WINDOWS) @@ -568,9 +570,8 @@ SET(UT_FILES ) SET(SAMPLE_UT_FILES - .././samples/timelapse-sample/timelapse_summary_test.cpp +.././samples/timelapse-sample/timelapse_summary_test.cpp ) - IF(ENABLE_LINUX) list(APPEND UT_FILES test/virtualcamerasink_tests.cpp @@ -595,28 +596,28 @@ ENDIF (ENABLE_CUDA) find_library(OPENH264_LIB NAMES openh264.lib libopenh264.a REQUIRED) find_library(LIBMP4_LIB NAMES mp4lib.lib libmp4lib.a REQUIRED) -target_link_libraries(aprapipessampleut - aprapipes - ${JPEG_LIBRARIES} - ${LIBMP4_LIB} - ${OPENH264_LIB} - ${Boost_LIBRARIES} - ${FFMPEG_LIBRARIES} - ${OpenCV_LIBRARIES} - ${JETSON_LIBS} - ${NVCUDAToolkit_LIBS} - ${NVCODEC_LIB} - ${NVJPEGLIB_L4T} - ${CURSES_LIBRARIES} - ZXing::Core - ZXing::ZXing - BZip2::BZip2 - ZLIB::ZLIB - liblzma::liblzma - bigint::bigint - sfml-audio - whisper::whisper -) +target_link_libraries(aprapipessampleut + aprapipes + ${JPEG_LIBRARIES} + ${LIBMP4_LIB} + ${OPENH264_LIB} + ${Boost_LIBRARIES} + ${FFMPEG_LIBRARIES} + ${OpenCV_LIBRARIES} + ${JETSON_LIBS} + ${NVCUDAToolkit_LIBS} + ${NVCODEC_LIB} + ${NVJPEGLIB_L4T} + ${CURSES_LIBRARIES} + ZXing::Core + ZXing::ZXing + BZip2::BZip2 + ZLIB::ZLIB + liblzma::liblzma + bigint::bigint + sfml-audio + whisper::whisper + ) target_link_libraries(aprapipesut aprapipes From 29287b1fdc4d283ba6c0f42c0449f8920570a140 Mon Sep 17 00:00:00 2001 From: nsabale7 Date: Mon, 27 May 2024 14:28:13 +0530 Subject: [PATCH 27/49] Running pipeline for face detection --- base/CMakeLists.txt | 2 + base/include/FaceDetectorXform.h | 5 +- base/include/FacialLandmarksCV.h | 1 + base/src/FaceDetectorXform.cpp | 46 ++++++++--- base/src/FacialLandmarksCV.cpp | 78 +++++++++++++++---- base/src/OverlayModule.cpp | 9 ++- base/test/facedetectorXform_tests.cpp | 36 +++++++++ samples/CMakeLists.txt | 10 +++ samples/face_detection_cpu/CMakeLists.txt | 52 +++++++++++++ .../face_detection_cpu/face_detection_cpu.cpp | 78 +++++++++++++++++++ .../face_detection_cpu/face_detection_cpu.h | 11 +++ .../test_face_detection_cpu.cpp | 11 +++ samples/timelapse_sample/CMakeLists.txt | 1 - .../timelapse_sample/timelapse_summary.cpp | 48 ------------ samples/timelapse_sample/timelapse_summary.h | 12 --- .../timelapse_summary_test.cpp | 18 ----- 16 files changed, 311 insertions(+), 107 deletions(-) create mode 100644 samples/CMakeLists.txt create mode 100644 samples/face_detection_cpu/CMakeLists.txt create mode 100644 samples/face_detection_cpu/face_detection_cpu.cpp create mode 100644 samples/face_detection_cpu/face_detection_cpu.h create mode 100644 samples/face_detection_cpu/test_face_detection_cpu.cpp delete mode 100644 samples/timelapse_sample/CMakeLists.txt delete mode 100644 samples/timelapse_sample/timelapse_summary.cpp delete mode 100644 samples/timelapse_sample/timelapse_summary.h delete mode 100644 samples/timelapse_sample/timelapse_summary_test.cpp diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index 9f2cd1470..ba5a36776 100755 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -588,6 +588,8 @@ ENDIF (ENABLE_CUDA) find_library(OPENH264_LIB NAMES openh264.lib libopenh264.a REQUIRED) find_library(LIBMP4_LIB NAMES mp4lib.lib libmp4lib.a REQUIRED) +add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../samples ${CMAKE_CURRENT_BINARY_DIR}/samples) + target_link_libraries(aprapipesut aprapipes ${JPEG_LIBRARIES} diff --git a/base/include/FaceDetectorXform.h b/base/include/FaceDetectorXform.h index bbbca3ebc..9ccf07e46 100644 --- a/base/include/FaceDetectorXform.h +++ b/base/include/FaceDetectorXform.h @@ -6,11 +6,14 @@ class FaceDetectorXformProps : public ModuleProps { public: - FaceDetectorXformProps(double _scaleFactor = 1.0, float _confidenceThreshold = 0.5) : scaleFactor(_scaleFactor), confidenceThreshold(_confidenceThreshold) + FaceDetectorXformProps(double _scaleFactor = 1.0, float _confidenceThreshold = 0.5, std::string _Face_Detection_Configuration= "./data/assets/deploy.prototxt", std::string _Face_Detection_Weights= "./data/assets/res10_300x300_ssd_iter_140000_fp16.caffemodel") + : scaleFactor(_scaleFactor), confidenceThreshold(_confidenceThreshold), FACE_DETECTION_CONFIGURATION(_Face_Detection_Configuration), FACE_DETECTION_WEIGHTS(_Face_Detection_Weights) { } double scaleFactor; float confidenceThreshold; + std::string FACE_DETECTION_CONFIGURATION; + std::string FACE_DETECTION_WEIGHTS; size_t getSerializeSize() { diff --git a/base/include/FacialLandmarksCV.h b/base/include/FacialLandmarksCV.h index ef52acbaa..3f94b1a0c 100644 --- a/base/include/FacialLandmarksCV.h +++ b/base/include/FacialLandmarksCV.h @@ -81,4 +81,5 @@ class FacialLandmarkCV : public Module FacialLandmarkCVProps mProp; bool handlePropsChange(frame_sp& frame); std::string mOutputPinId1; + std::string rawFramePinId; }; \ No newline at end of file diff --git a/base/src/FaceDetectorXform.cpp b/base/src/FaceDetectorXform.cpp index 9fc912cfb..84f2836b2 100644 --- a/base/src/FaceDetectorXform.cpp +++ b/base/src/FaceDetectorXform.cpp @@ -11,6 +11,7 @@ #include "AIPExceptions.h" #include "ApraFaceInfo.h" #include "FaceDetectsInfo.h" +#include "Overlay.h" class FaceDetectorXform::Detail { @@ -34,13 +35,12 @@ class FaceDetectorXform::Detail framemetadata_sp mOutputMetadata; FaceDetectorXformProps mProps; std::string mOutputPinId; + std::string rawFramePinId; cv::Mat mInputImg; int mFrameType; cv::dnn::Net network; cv::Mat inputBlob; cv::Mat detection; - const std::string FACE_DETECTION_CONFIGURATION = "./data/assets/deploy.prototxt"; - const std::string FACE_DETECTION_WEIGHTS = "./data/assets/res10_300x300_ssd_iter_140000_fp16.caffemodel"; // scalar with mean values which are subtracted from channels. // Values are intended to be in (mean-R, mean-G, mean-B) order if image has BGR ordering and swapRB is true. const cv::Scalar meanValuesRGB = cv::Scalar({104., 177.0, 123.0}); @@ -75,7 +75,7 @@ bool FaceDetectorXform::validateInputPins() bool FaceDetectorXform::validateOutputPins() { - if (getNumberOfOutputPins() != 1) + if (getNumberOfOutputPins() > 2) { LOG_ERROR << "<" << getId() << ">::validateOutputPins size is expected to be 1. Actual<" << getNumberOfOutputPins() << ">"; return false; @@ -88,11 +88,12 @@ void FaceDetectorXform::addInputPin(framemetadata_sp &metadata, string &pinId) Module::addInputPin(metadata, pinId); mDetail->mOutputMetadata = framemetadata_sp(new FrameMetadata(FrameMetadata::FACEDETECTS_INFO)); mDetail->mOutputPinId = addOutputPin(mDetail->mOutputMetadata); + mDetail->rawFramePinId = addOutputPin(metadata); } bool FaceDetectorXform::init() { - mDetail->network = cv::dnn::readNetFromCaffe(mDetail->FACE_DETECTION_CONFIGURATION, mDetail->FACE_DETECTION_WEIGHTS); + mDetail->network = cv::dnn::readNetFromCaffe(mDetail->mProps.FACE_DETECTION_CONFIGURATION, mDetail->mProps.FACE_DETECTION_WEIGHTS); if (mDetail->network.empty()) { LOG_ERROR << "Failed to load network with the given settings. Please check the loaded parameters."; @@ -121,11 +122,14 @@ bool FaceDetectorXform::process(frame_container &frames) mDetail->inputBlob = cv::dnn::blobFromImage(mDetail->mInputImg, mDetail->mProps.scaleFactor, cv::Size(mDetail->mInputImg.cols, mDetail->mInputImg.rows), mDetail->meanValuesRGB, false, false); mDetail->network.setInput(mDetail->inputBlob, "data"); - + mDetail->detection = mDetail->network.forward("detection_out"); cv::Mat detectionMatrix(mDetail->detection.size[2], mDetail->detection.size[3], CV_32F, mDetail->detection.ptr()); + std::vector rectangleOverlays; + CompositeOverlay compositeOverlay; + for (int i = 0; i < detectionMatrix.rows; i++) { float confidence = detectionMatrix.at(i, 2); @@ -136,20 +140,42 @@ bool FaceDetectorXform::process(frame_container &frames) } mDetail->faceInfo.x1 = detectionMatrix.at(i, 3) * mDetail->mInputImg.cols; - mDetail->faceInfo.y2 = detectionMatrix.at(i, 4) * mDetail->mInputImg.rows; + mDetail->faceInfo.y1 = detectionMatrix.at(i, 4) * mDetail->mInputImg.rows; mDetail->faceInfo.x2 = detectionMatrix.at(i, 5) * mDetail->mInputImg.cols; - mDetail->faceInfo.y1 = detectionMatrix.at(i, 6) * mDetail->mInputImg.rows; + mDetail->faceInfo.y2 = detectionMatrix.at(i, 6) * mDetail->mInputImg.rows; mDetail->faceInfo.score = confidence; mDetail->faces.emplace_back(mDetail->faceInfo); + + RectangleOverlay rectangleOverlay; + rectangleOverlay.x1 = mDetail->faceInfo.x1; + rectangleOverlay.y1 = mDetail->faceInfo.y1; + rectangleOverlay.x2 = mDetail->faceInfo.x2; + rectangleOverlay.y2 = mDetail->faceInfo.y2; + rectangleOverlays.push_back(rectangleOverlay); } + for (auto &rectangleOverlay : rectangleOverlays) { + compositeOverlay.add(&rectangleOverlay); + } + + auto rawFrame = frames.cbegin()->second; + frames.insert(make_pair(mDetail->rawFramePinId, rawFrame)); + mDetail->faceDetectsInfo.faces = mDetail->faces; - auto outFrame = makeFrame(mDetail->faceDetectsInfo.getSerializeSize()); - mDetail->faceDetectsInfo.serialize(outFrame->data(), mDetail->faceDetectsInfo.getSerializeSize()); - frames.insert(make_pair(mDetail->mOutputPinId, outFrame)); + + if (rectangleOverlays.size() > 0) { + DrawingOverlay drawingOverlay; + drawingOverlay.add(&compositeOverlay); + auto mvSize = drawingOverlay.mGetSerializeSize(); + auto outFrame = makeFrame(mvSize, mDetail->mOutputPinId); + drawingOverlay.serialize(outFrame); + frames.insert(make_pair(mDetail->mOutputPinId, outFrame)); + } + mDetail->faces.clear(); mDetail->faceDetectsInfo.faces.clear(); + send(frames); return true; } diff --git a/base/src/FacialLandmarksCV.cpp b/base/src/FacialLandmarksCV.cpp index a9ef0ce5a..02b55b1cd 100644 --- a/base/src/FacialLandmarksCV.cpp +++ b/base/src/FacialLandmarksCV.cpp @@ -12,6 +12,7 @@ #include "Logger.h" #include "Utils.h" #include "AIPExceptions.h" +#include "Overlay.h" class Detail { @@ -87,6 +88,7 @@ class Detail FacialLandmarkCVProps props; cv::Mat iImg; vector>landmarks; + vector faces; protected: framemetadata_sp mInputMetadata; @@ -113,6 +115,7 @@ class DetailSSD : public Detail { //input must be 3 channel image(RGB) // Create a 4-dimensional blob from the image. Optionally resizes and crops image from center, subtract mean values, scales values by scalefactor, swap Blue and Red channels. + iImg.data = static_cast(buffer->data()); cv::Mat inputBlob = cv::dnn::blobFromImage(iImg, 1.0, cv::Size(300, 300), cv::Scalar(104, 177, 123), false, false); // Set the input blob as input to the face detector network @@ -123,8 +126,6 @@ class DetailSSD : public Detail cv::Mat detectionMatrix(detection.size[2], detection.size[3], CV_32F, detection.ptr()); - vector faces; - for (int i = 0; i < detectionMatrix.rows; i++) { float confidence = detectionMatrix.at(i, 2); @@ -139,13 +140,16 @@ class DetailSSD : public Detail cv::Rect faceRect(x1, y1, x2 - x1, y2 - y1); faces.push_back(faceRect); - cv::rectangle(iImg, faceRect, cv::Scalar(0, 255, 0), 2); } } + if (faces.size() == 0) { + return false; + } + bool success = facemark->fit(iImg, faces, landmarks); - return true; + return success; } private: @@ -164,17 +168,15 @@ class DetailHCASCADE : public Detail bool compute(frame_sp buffer) { - vector faces; faceDetector.detectMultiScale(iImg, faces); - for (int i = 0; i < faces.size(); i++) - { - rectangle(iImg, faces[i], cv::Scalar(0, 255, 0), 2); + if (faces.size() == 0) { + return false; } - bool success = facemark->fit(iImg, faces, landmarks); - - return true; + bool success = facemark->fit(iImg, faces, landmarks); + + return success; } private: @@ -243,6 +245,7 @@ void FacialLandmarkCV::addInputPin(framemetadata_sp &metadata, string &pinId) Module::addInputPin(metadata, pinId); auto landmarksOutputMetadata = framemetadata_sp(new FrameMetadata(FrameMetadata::FACE_LANDMARKS_INFO)); mOutputPinId1 = addOutputPin(landmarksOutputMetadata); + rawFramePinId = addOutputPin(metadata); } bool FacialLandmarkCV::init() @@ -278,8 +281,26 @@ bool FacialLandmarkCV::term() bool FacialLandmarkCV::process(frame_container& frames) { auto frame = frames.cbegin()->second; + bool computeValue = mDetail->compute(frame); + + if (computeValue == false) { + send(frames); + return true; + } + + std::vector rectangleOverlays; + + for (const auto& face :mDetail->faces) { + RectangleOverlay rectangleOverlay; + rectangleOverlay.x1 = face.x; + rectangleOverlay.y1 = face.y; + rectangleOverlay.x2 = face.x + face.width; + rectangleOverlay.y2 = face.y + face.height; - mDetail->compute(frame); + rectangleOverlays.push_back(rectangleOverlay); + } + + std::vector circleOverlays; // Convert the landmarks from cv::Point2f to ApraPoint2f vector> apralandmarks; @@ -287,6 +308,13 @@ bool FacialLandmarkCV::process(frame_container& frames) vector apralandmark; for (const auto& point : landmark) { apralandmark.emplace_back(ApraPoint2f(point)); + + CircleOverlay circleOverlay; + circleOverlay.x1 = point.x; + circleOverlay.y1 = point.y; + circleOverlay.radius = 1; + + circleOverlays.push_back(circleOverlay); } apralandmarks.emplace_back(std::move(apralandmark)); } @@ -297,11 +325,31 @@ bool FacialLandmarkCV::process(frame_container& frames) bufferSize += sizeof(apralandmarks[i]) + (sizeof(ApraPoint2f) + 2 * sizeof(int)) * apralandmarks[i].size(); } - auto landmarksFrame = makeFrame(bufferSize); + CompositeOverlay compositeOverlay; - Utils::serialize>>(apralandmarks, landmarksFrame->data(), bufferSize); + for (auto &rectangleOverlay : rectangleOverlays) { + compositeOverlay.add(&rectangleOverlay); + } + + for (auto &circleOverlay : circleOverlays) { + compositeOverlay.add(&circleOverlay); + } + + auto rawFrame = frames.cbegin()->second; + + frames.insert(make_pair(rawFramePinId, rawFrame)); + + if (rectangleOverlays.size() > 0 || circleOverlays.size() > 0) { + DrawingOverlay drawingOverlay; + drawingOverlay.add(&compositeOverlay); + auto mvSize = drawingOverlay.mGetSerializeSize(); + auto landmarksFrame = makeFrame(mvSize, mOutputPinId1); + drawingOverlay.serialize(landmarksFrame); + frames.insert(make_pair(mOutputPinId1, landmarksFrame)); + } - frames.insert(make_pair(mOutputPinId1, landmarksFrame)); + mDetail->faces.clear(); + mDetail->landmarks.clear(); send(frames); diff --git a/base/src/OverlayModule.cpp b/base/src/OverlayModule.cpp index c0ea6a0fa..a76ba4776 100644 --- a/base/src/OverlayModule.cpp +++ b/base/src/OverlayModule.cpp @@ -32,7 +32,7 @@ bool OverlayModule::validateInputPins() BOOST_FOREACH(me, inputMetadataByPin) { FrameMetadata::FrameType frameType = me.second->getFrameType(); - if (frameType != FrameMetadata::RAW_IMAGE && frameType != FrameMetadata::OVERLAY_INFO_IMAGE) + if (frameType != FrameMetadata::RAW_IMAGE && frameType != FrameMetadata::OVERLAY_INFO_IMAGE && frameType != FrameMetadata::FACE_LANDMARKS_INFO && frameType != FrameMetadata::FACEDETECTS_INFO) { LOG_ERROR << "<" << getId() << ">::validateInputPins input frameType is expected to be RAW_IMAGE OR OVERLAY_INFO_IMAGE. Actual<" << frameType << ">"; return false; @@ -70,8 +70,13 @@ bool OverlayModule::process(frame_container& frames) if (frameType == FrameMetadata::OVERLAY_INFO_IMAGE) { drawOverlay.deserialize(frame); + } + else if (frameType == FrameMetadata::FACE_LANDMARKS_INFO) { + drawOverlay.deserialize(frame); + } + else if (frameType == FrameMetadata::FACEDETECTS_INFO) { + drawOverlay.deserialize(frame); } - else if (frameType == FrameMetadata::RAW_IMAGE) { drawOverlay.draw(frame); diff --git a/base/test/facedetectorXform_tests.cpp b/base/test/facedetectorXform_tests.cpp index 3b3ae4528..cc04c2903 100644 --- a/base/test/facedetectorXform_tests.cpp +++ b/base/test/facedetectorXform_tests.cpp @@ -13,6 +13,8 @@ #include "ApraFaceInfo.h" #include "FaceDetectsInfo.h" #include "ImageDecoderCV.h" +#include "OverlayModule.h" +#include "ImageViewerModule.h" BOOST_AUTO_TEST_SUITE(facedetector_tests) #ifdef ARM64 @@ -59,6 +61,40 @@ BOOST_AUTO_TEST_CASE(basic) Test_Utils::saveOrCompare("./data/testOutput/facesResult.raw", const_cast(static_cast(frame.data)), frame.step[0] * frame.rows, 0); } +BOOST_AUTO_TEST_CASE(basic_faces) +{ + auto fileReader = boost::shared_ptr(new FileReaderModule(FileReaderModuleProps("./data/faces.jpg"))); + auto metadata = framemetadata_sp(new FrameMetadata(FrameMetadata::ENCODED_IMAGE)); + fileReader->addOutputPin(metadata); + + auto decoder = boost::shared_ptr(new ImageDecoderCV(ImageDecoderCVProps())); + auto metadata2 = framemetadata_sp(new RawImageMetadata()); + decoder->addOutputPin(metadata2); + fileReader->setNext(decoder); + + FaceDetectorXformProps faceDetectorProps; + auto faceDetector = boost::shared_ptr(new FaceDetectorXform(faceDetectorProps)); + decoder->setNext(faceDetector); + + auto overlay = boost::shared_ptr(new OverlayModule(OverlayModuleProps())); + faceDetector->setNext(overlay); + + auto sink = boost::shared_ptr(new ImageViewerModule(ImageViewerModuleProps("imageview"))); + overlay->setNext(sink); + + PipeLine p("test"); + p.appendModule(fileReader); + p.init(); + + p.run_all_threaded(); + boost::this_thread::sleep_for(boost::chrono::seconds(10)); + + LOG_INFO << "profiling done - stopping the pipeline"; + p.stop(); + p.term(); + p.wait_for_all(); +} + BOOST_AUTO_TEST_CASE(serialization_test) { diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt new file mode 100644 index 000000000..a85b7d3a8 --- /dev/null +++ b/samples/CMakeLists.txt @@ -0,0 +1,10 @@ +#dependencies + +find_package(Threads REQUIRED) + +include_directories( +../base +${CMAKE_CURRENT_SOURCE_DIR} +) + +add_subdirectory(face_detection_cpu) diff --git a/samples/face_detection_cpu/CMakeLists.txt b/samples/face_detection_cpu/CMakeLists.txt new file mode 100644 index 000000000..09135dfbe --- /dev/null +++ b/samples/face_detection_cpu/CMakeLists.txt @@ -0,0 +1,52 @@ +set(TARGET face_detection_cpu) +SET(CORE_FILES + face_detection_cpu.cpp +) +SET(CORE_FILES_H + face_detection_cpu.h +) +SET(SOURCE + ${CORE_FILES} + ${CORE_FILES_H} +) +set(UT_FILE + test_face_detection_cpu.cpp +) +add_library(face_detection_cpu_source STATIC ${SOURCE}) +add_executable(${TARGET} ${UT_FILE}) +target_include_directories ( face_detection_cpu_source PRIVATE + ${JETSON_MULTIMEDIA_LIB_INCLUDE} + ${FFMPEG_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIRS} + ${LIBMP4_INC_DIR} + ${BARESIP_INC_DIR} + ${LIBRE_INC_DIR} + ${NVCODEC_INCLUDE_DIR} +) +target_include_directories ( ${TARGET} PUBLIC + ${JETSON_MULTIMEDIA_LIB_INCLUDE} + ${FFMPEG_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIRS} + ${LIBMP4_INC_DIR} + ${BARESIP_INC_DIR} + ${LIBRE_INC_DIR} + ${NVCODEC_INCLUDE_DIR} +) +target_link_libraries( + ${TARGET} + aprapipes + ${JPEG_LIBRARIES} + ${LIBMP4_LIB} + ${OPENH264_LIB} + ${Boost_LIBRARIES} + ${FFMPEG_LIBRARIES} + ${OpenCV_LIBRARIES} + ${JETSON_LIBS} + ${NVCUDAToolkit_LIBS} + ${NVCODEC_LIB} + ${NVJPEGLIB_L4T} + ${CURSES_LIBRARIES} + face_detection_cpu_source + ) \ No newline at end of file diff --git a/samples/face_detection_cpu/face_detection_cpu.cpp b/samples/face_detection_cpu/face_detection_cpu.cpp new file mode 100644 index 000000000..dcee16a44 --- /dev/null +++ b/samples/face_detection_cpu/face_detection_cpu.cpp @@ -0,0 +1,78 @@ +#include "WebCamSource.h" +#include "ImageViewerModule.h" +#include "face_detection_cpu.h" +#include "OverlayModule.h" +#include "FacialLandmarksCV.h" +#include "ImageDecoderCV.h" +#include "FaceDetectorXform.h" +#include "ColorConversionXForm.h" + + + +FaceDetectionCPU::FaceDetectionCPU() : pipeline("test") {} + +bool FaceDetectionCPU::setupPipeline() { + WebCamSourceProps webCamSourceprops(0, 640, 480); + auto source = boost::shared_ptr(new WebCamSource(webCamSourceprops)); + + FaceDetectorXformProps faceDetectorProps(1.0, 0.8, "../../data/assets/deploy.prototxt", "../../data/assets/res10_300x300_ssd_iter_140000_fp16.caffemodel"); + auto faceDetector = boost::shared_ptr(new FaceDetectorXform(faceDetectorProps)); + source->setNext(faceDetector); + + auto overlay = boost::shared_ptr(new OverlayModule(OverlayModuleProps())); + faceDetector->setNext(overlay); + + auto colorConversion = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::RGB_TO_BGR))); + overlay->setNext(colorConversion); + + auto sink = boost::shared_ptr(new ImageViewerModule(ImageViewerModuleProps("imageview"))); + colorConversion->setNext(sink); + + pipeline.appendModule(source); + pipeline.init(); + return true; +} + +bool FaceDetectionCPU::startPipeline() { + pipeline.run_all_threaded(); + return true; +} + +bool FaceDetectionCPU::stopPipeline() { + pipeline.stop(); + pipeline.term(); + pipeline.wait_for_all(); + return true; +} + +int main() { + LoggerProps loggerProps; + loggerProps.logLevel = boost::log::trivial::severity_level::info; + Logger::setLogLevel(boost::log::trivial::severity_level::info); + Logger::initLogger(loggerProps); + + FaceDetectionCPU pipelineInstance; + + // Setup the pipeline + if (!pipelineInstance.setupPipeline()) { + std::cerr << "Failed to setup pipeline." << std::endl; + return 1; // Or any error code indicating failure + } + + // Start the pipeline + if (!pipelineInstance.startPipeline()) { + std::cerr << "Failed to start pipeline." << std::endl; + return 1; // Or any error code indicating failure + } + + // Wait for the pipeline to run for 10 seconds + boost::this_thread::sleep_for(boost::chrono::seconds(50)); + + // Stop the pipeline + if (!pipelineInstance.stopPipeline()) { + std::cerr << "Failed to stop pipeline." << std::endl; + return 1; // Or any error code indicating failure + } + + return 0; // Or any success code +} \ No newline at end of file diff --git a/samples/face_detection_cpu/face_detection_cpu.h b/samples/face_detection_cpu/face_detection_cpu.h new file mode 100644 index 000000000..c03195454 --- /dev/null +++ b/samples/face_detection_cpu/face_detection_cpu.h @@ -0,0 +1,11 @@ +#include + +class FaceDetectionCPU { +public: + FaceDetectionCPU(); + bool setupPipeline(); + bool startPipeline(); + bool stopPipeline(); +private: + PipeLine pipeline; +}; \ No newline at end of file diff --git a/samples/face_detection_cpu/test_face_detection_cpu.cpp b/samples/face_detection_cpu/test_face_detection_cpu.cpp new file mode 100644 index 000000000..6228b293e --- /dev/null +++ b/samples/face_detection_cpu/test_face_detection_cpu.cpp @@ -0,0 +1,11 @@ +#include + +#include "face_detection_cpu.h" + +BOOST_AUTO_TEST_SUITE(start_face_tracking) + +BOOST_AUTO_TEST_CASE(face_tracking_simple) +{ +} + +BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file diff --git a/samples/timelapse_sample/CMakeLists.txt b/samples/timelapse_sample/CMakeLists.txt deleted file mode 100644 index e6c221c6e..000000000 --- a/samples/timelapse_sample/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -cmake_minimum_required(VERSION 3.22) diff --git a/samples/timelapse_sample/timelapse_summary.cpp b/samples/timelapse_sample/timelapse_summary.cpp deleted file mode 100644 index 4fac1d661..000000000 --- a/samples/timelapse_sample/timelapse_summary.cpp +++ /dev/null @@ -1,48 +0,0 @@ -#include "Mp4ReaderSource.h" -#include "TimelapsePipeline.h" -#include "H264Metadata.h" - - -TimelapsePipeline::TimelapsePipeline():pipeline("test") { -} - - -TimelapsePipeline::~TimelapsePipeline() { -} - - -bool TimelapsePipeline::setupPipeline() { - bool overlayFrames = true; - - FileReaderModuleProps fileReaderProps("./data/h264_data/FVDO_Freeway_4cif_???.H264"); - fileReaderProps.fps = 30; - fileReaderProps.readLoop = true; - auto fileReader = boost::shared_ptr(new FileReaderModule(fileReaderProps)); - auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); - fileReader->addOutputPin(h264ImageMetadata); - - auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(MotionVectorExtractorProps(MotionVectorExtractorProps::OPENH264, overlayFrames))); - fileReader->setNext(motionExtractor); - - auto overlay = boost::shared_ptr(new OverlayModule(OverlayModuleProps())); - motionExtractor->setNext(overlay); - - auto sink = boost::shared_ptr(new ImageViewerModule(ImageViewerModuleProps("MotionVectorsOverlay"))); - overlay->setNext(sink); - - pipeline.appendModule(fileReader); - pipeline.init(); - return true -} - -bool TimelapsePipeline::startPipeline() { - pipeline.run_all_threaded(); - return true; -} - -bool TimelapsePipeline::stopPipeline() { - m_cameraPipeline.stop(); - m_cameraPipeline.term(); - m_cameraPipeline.wait_for_all(); - return true; -} \ No newline at end of file diff --git a/samples/timelapse_sample/timelapse_summary.h b/samples/timelapse_sample/timelapse_summary.h deleted file mode 100644 index f595d3cd1..000000000 --- a/samples/timelapse_sample/timelapse_summary.h +++ /dev/null @@ -1,12 +0,0 @@ -#include - -class TimelapsePipeline : -{ -public: - bool setupPipeline(): - bool startPipeline(): - bool stopPipeline(): - -private: - PipeLine pipeline; -} \ No newline at end of file diff --git a/samples/timelapse_sample/timelapse_summary_test.cpp b/samples/timelapse_sample/timelapse_summary_test.cpp deleted file mode 100644 index 2332320f1..000000000 --- a/samples/timelapse_sample/timelapse_summary_test.cpp +++ /dev/null @@ -1,18 +0,0 @@ -#include - -#include "TimelapsePipeline.h" - -BOOST_AUTO_TEST_SUITE(timeplase_summary) - -BOOST_AUTO_TEST_CASE(generateTimeLapse) -{ - auto timelapsePipeline = boost::shared_ptr(new TimelapsePipeline()); - timelapsePipeline.setupPipeline(); - - timelapsePipeline.startPipeline(); - boost::this_thread::sleep_for(boost::chrono::seconds(10)); - - timelapsePipeline.stopPipeline(); -} - -BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file From 79355331ddb071ce39a3ce10876c44454ced1d60 Mon Sep 17 00:00:00 2001 From: SatwikSShanbhag Date: Mon, 27 May 2024 18:25:22 +0530 Subject: [PATCH 28/49] added test thumbnail sample --- .../CMakeLists.txt | 22 +-- .../GenerateThumbnailsPipeline.h | 18 ++- .../generate_thumbnail_from_mp4_video.cpp | 142 ++++++++++-------- ...test_generate_thumbnail_from_mp4_video.cpp | 14 +- 4 files changed, 115 insertions(+), 81 deletions(-) diff --git a/samples/create_thumbnail_from_mp4_video/CMakeLists.txt b/samples/create_thumbnail_from_mp4_video/CMakeLists.txt index 23aee9cfa..73e3a6910 100644 --- a/samples/create_thumbnail_from_mp4_video/CMakeLists.txt +++ b/samples/create_thumbnail_from_mp4_video/CMakeLists.txt @@ -1,30 +1,19 @@ +cmake_minimum_required(VERSION 3.22) set(TARGET create_thumbnail_from_mp4_video) SET(CORE_FILES generate_thumbnail_from_mp4_video.cpp + ../../base/test/test_utils.cpp ) SET(CORE_FILES_H GenerateThumbnailsPipeline.h + ../../base/test/test_utils.h ) SET(SOURCE ${CORE_FILES} ${CORE_FILES_H} ) -set(UT_FILE - test_generate_thumbnail_from_mp4_video.cpp -) -add_library(create_thumbnail_from_mp4_video_source STATIC ${SOURCE}) -add_executable(${TARGET} ${UT_FILE}) -target_include_directories ( create_thumbnail_from_mp4_video_source PRIVATE - ${JETSON_MULTIMEDIA_LIB_INCLUDE} - ${FFMPEG_INCLUDE_DIRS} - ${OpenCV_INCLUDE_DIRS} - ${Boost_INCLUDE_DIRS} - ${LIBMP4_INC_DIR} - ${BARESIP_INC_DIR} - ${LIBRE_INC_DIR} - ${NVCODEC_INCLUDE_DIR} -) -target_include_directories ( ${TARGET} PUBLIC +add_executable(${TARGET} ${SOURCE}) +target_include_directories ( ${TARGET} PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} @@ -48,5 +37,4 @@ target_link_libraries( ${NVCODEC_LIB} ${NVJPEGLIB_L4T} ${CURSES_LIBRARIES} - create_thumbnail_from_mp4_video_source ) \ No newline at end of file diff --git a/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h b/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h index dbf4a6fe8..2137e6d62 100644 --- a/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h +++ b/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h @@ -1,5 +1,13 @@ #include #include +#include "Mp4ReaderSource.h" +#include "H264Decoder.h" +#include "ColorConversionXForm.h" +#include "CudaMemCopy.h" +#include "FileWriterModule.h" +#include "JPEGEncoderNVJPEG.h" + + class GenerateThumbnailsPipeline { @@ -8,8 +16,16 @@ class GenerateThumbnailsPipeline bool setUpPipeLine(); bool startPipeLine(); bool stopPipeLine(); + bool testPipeLine(); private: PipeLine pipeLine; boost::shared_ptr valve; -}; \ No newline at end of file + boost::shared_ptr mp4Reader; + boost::shared_ptr decoder; + boost::shared_ptr colorchange; + boost::shared_ptr cudaCopy; + boost::shared_ptr jpegEncoder; + boost::shared_ptr fileWriter; +}; + diff --git a/samples/create_thumbnail_from_mp4_video/generate_thumbnail_from_mp4_video.cpp b/samples/create_thumbnail_from_mp4_video/generate_thumbnail_from_mp4_video.cpp index 393f38cb5..73ab50b04 100644 --- a/samples/create_thumbnail_from_mp4_video/generate_thumbnail_from_mp4_video.cpp +++ b/samples/create_thumbnail_from_mp4_video/generate_thumbnail_from_mp4_video.cpp @@ -1,90 +1,108 @@ -#include "GenerateThumbnailsPipeline.h" +#include "ColorConversionXForm.h" +#include "CudaMemCopy.h" +#include "ExternalSinkModule.h" +#include "FileReaderModule.h" +#include "FileWriterModule.h" #include "FrameMetadata.h" +#include "GenerateThumbnailsPipeline.h" +#include "H264Decoder.h" #include "H264Metadata.h" +#include "JPEGEncoderNVJPEG.h" #include "Mp4ReaderSource.h" #include "Mp4VideoMetadata.h" -#include "H264Decoder.h" #include "ValveModule.h" -#include "ColorConversionXForm.h" -#include "CudaMemCopy.h" -#include "FileWriterModule.h" -#include "JPEGEncoderNVJPEG.h" -GenerateThumbnailsPipeline::GenerateThumbnailsPipeline() : pipeLine("pipeline") { -} + +GenerateThumbnailsPipeline::GenerateThumbnailsPipeline() + : pipeLine("pipeline") {} + + bool GenerateThumbnailsPipeline::setUpPipeLine() { - // Implementation - std::string videoPath = "../.././data/Mp4_videos/h264_video_metadata/20230514/0011/1686723796848.mp4"; - std::string outPath = "../.././data/mp4Reader_saveOrCompare/h264/frame_000???.jpg"; - - bool parseFS = false; - auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); - auto frameType = FrameMetadata::FrameType::H264_DATA; - auto mp4ReaderProps = Mp4ReaderSourceProps(videoPath, parseFS, 0, true, false, false); - auto mp4Reader = boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); - mp4Reader->addOutPutPin(h264ImageMetadata); - - auto mp4Metadata = framemetadata_sp(new Mp4VideoMetadata("v_1")); - mp4Reader->addOutPutPin(mp4Metadata); - - std::vector mImagePin; - mImagePin = mp4Reader->getAllOutputPinsByType(frameType); - - auto decoder = boost::shared_ptr(new H264Decoder(H264DecoderProps())); - mp4Reader->setNext(decoder, mImagePin); - - valve = boost::shared_ptr(new ValveModule(ValveModuleProps(2))); - decoder->setNext(valve); - - auto conversionType = ColorConversionProps::ConversionType::YUV420PLANAR_TO_RGB; - auto metadata = framemetadata_sp(new RawImagePlanarMetadata(1280, 720, ImageMetadata::ImageType::YUV420, size_t(0), CV_8U)); - auto colorchange = boost::shared_ptr(new ColorConversion(ColorConversionProps(conversionType))); - valve->setNext(colorchange); - - auto stream = cudastream_sp(new ApraCudaStream); - auto copy = boost::shared_ptr(new CudaMemCopy(CudaMemCopyProps(cudaMemcpyHostToDevice, stream))); - colorchange->setNext(copy); - - auto m2 = boost::shared_ptr(new JPEGEncoderNVJPEG(JPEGEncoderNVJPEGProps(stream))); - copy->setNext(m2); - - /*auto valve = boost::shared_ptr(new ValveModule(ValveModuleProps(10))); - m2->setNext(valve);*/ - - auto fileWriter = boost::shared_ptr(new FileWriterModule(FileWriterModuleProps(outPath))); - m2->setNext(fileWriter); - - pipeLine.appendModule(mp4Reader); - pipeLine.init(); - return true; + // Implementation + std::string videoPath = "./data/Mp4_videos/h264_video_metadata/" + "20230514/0011/1686723796848.mp4"; + std::string outPath = + "./data/mp4Reader_saveOrCompare/h264/thumbnail.jpg"; + + bool parseFS = false; + auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); + auto frameType = FrameMetadata::FrameType::H264_DATA; + auto mp4ReaderProps = + Mp4ReaderSourceProps(videoPath, parseFS, 0, true, false, false); + mp4Reader = + boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); + mp4Reader->addOutPutPin(h264ImageMetadata); + + auto mp4Metadata = framemetadata_sp(new Mp4VideoMetadata("v_1")); + mp4Reader->addOutPutPin(mp4Metadata); + + std::vector mImagePin; + mImagePin = mp4Reader->getAllOutputPinsByType(frameType); + + decoder = boost::shared_ptr(new H264Decoder(H264DecoderProps())); + mp4Reader->setNext(decoder, mImagePin); + + valve = boost::shared_ptr(new ValveModule(ValveModuleProps(0))); + decoder->setNext(valve); + + /* auto conversionType = + ColorConversionProps::ConversionType::YUV420PLANAR_TO_RGB; auto metadata = + framemetadata_sp(new RawImagePlanarMetadata(1280, 720, + ImageMetadata::ImageType::YUV420,size_t(0), + CV_8U,FrameMetadata::MemType::CUDA_DEVICE));*/ + /* colorchange = boost::shared_ptr(new + ColorConversion(ColorConversionProps(conversionType))); + valve->setNext(colorchange);*/ + + auto stream = cudastream_sp(new ApraCudaStream); + cudaCopy = boost::shared_ptr( + new CudaMemCopy(CudaMemCopyProps(cudaMemcpyHostToDevice, stream))); + valve->setNext(cudaCopy); + + jpegEncoder = boost::shared_ptr( + new JPEGEncoderNVJPEG(JPEGEncoderNVJPEGProps(stream))); + cudaCopy->setNext(jpegEncoder); + + fileWriter = boost::shared_ptr( + new FileWriterModule(FileWriterModuleProps(outPath))); + jpegEncoder->setNext(fileWriter); + + return true; } bool GenerateThumbnailsPipeline::startPipeLine() { - pipeLine.run_all_threaded(); - valve->allowFrames(1); - return true; + pipeLine.appendModule(mp4Reader); + pipeLine.init(); + pipeLine.run_all_threaded(); + valve->allowFrames(1); + + return true; } bool GenerateThumbnailsPipeline::stopPipeLine() { - pipeLine.stop(); - pipeLine.term(); - pipeLine.wait_for_all(); - return true; + pipeLine.stop(); + pipeLine.term(); + pipeLine.wait_for_all(); + return true; } -int main() { - GenerateThumbnailsPipeline pipelineInstance; +int main() { + GenerateThumbnailsPipeline pipelineInstance; if (!pipelineInstance.setUpPipeLine()) { - std::cerr << "Failed to setup pipeline." << std::endl; + std::cerr << "Failed to setup pipeline." << std::endl; return 1; } + + if (!pipelineInstance.startPipeLine()) { std::cerr << "Failed to start pipeline." << std::endl; return 1; // Or any error code indicating failure } + // Wait for the pipeline to run for 10 seconds boost::this_thread::sleep_for(boost::chrono::seconds(5)); + // Stop the pipeline if (!pipelineInstance.stopPipeLine()) { std::cerr << "Failed to stop pipeline." << std::endl; diff --git a/samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp b/samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp index 74022d2a3..60b8830fe 100644 --- a/samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp +++ b/samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp @@ -1,5 +1,6 @@ #include - +#include +#include "../../base/test/test_utils.h" #include "GenerateThumbnailsPipeline.h" BOOST_AUTO_TEST_SUITE(generateThumbnails) @@ -12,6 +13,17 @@ BOOST_AUTO_TEST_CASE(generateThumbnails_from_mp4) boost::this_thread::sleep_for(boost::chrono::seconds(5)); + const uint8_t *pReadDataTest = nullptr; + unsigned int readDataSizeTest = 0U; + + BOOST_TEST( + Test_Utils::readFile("./data/thumbnail/sample_thumbnail.jpg", + pReadDataTest, readDataSizeTest)); + Test_Utils::saveOrCompare( + "./data/mp4Reader_saveOrCompare/h264/test_thumbnail.jpg", + pReadDataTest, readDataSizeTest, 0); + + generateThumbnailPipeline->stopPipeLine(); } From cc202695eac4183261fb4c5e40acd7c57c64bb40 Mon Sep 17 00:00:00 2001 From: nsabale7 Date: Mon, 27 May 2024 18:48:56 +0530 Subject: [PATCH 29/49] wip as per the structure --- base/CMakeLists.txt | 34 +++++++++++++- samples/CMakeLists.txt | 5 +- samples/face_detection_cpu/CMakeLists.txt | 22 ++------- .../face_detection_cpu/face_detection_cpu.cpp | 46 ++++++++++++++----- .../face_detection_cpu/face_detection_cpu.h | 13 ++++++ 5 files changed, 85 insertions(+), 35 deletions(-) diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index ba5a36776..7965833af 100755 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -163,6 +163,7 @@ SET(CORE_FILES src/MotionVectorExtractor.cpp src/OverlayModule.cpp src/OrderedCacheOfFiles.cpp + .././samples/face_detection_cpu/face_detection_cpu.cpp ) SET(CORE_FILES_H @@ -224,6 +225,7 @@ SET(CORE_FILES_H include/OverlayModule.h include/OrderedCacheOfFiles.h include/TestSignalGeneratorSrc.h + .././samples/face_detection_cpu/face_detection_cpu.h ) IF(ENABLE_WINDOWS) @@ -456,6 +458,7 @@ ${LIBMP4_INC_DIR} ${BARESIP_INC_DIR} ${LIBRE_INC_DIR} ${NVCODEC_INCLUDE_DIR} +.././samples/face_detection_cpu ) @@ -573,8 +576,12 @@ IF(ENABLE_LINUX) ) ENDIF(ENABLE_LINUX) +SET(SAMPLE_UT_FILES +.././samples/face_detection_cpu/test_face_detection_cpu.cpp +) add_executable(aprapipesut ${UT_FILES}) +add_executable(aprapipessampleut ${SAMPLE_UT_FILES}) IF(ENABLE_ARM64) target_include_directories ( aprapipesut PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_ROOT} ${JPEG_INCLUDE_DIR}) @@ -590,7 +597,30 @@ find_library(LIBMP4_LIB NAMES mp4lib.lib libmp4lib.a REQUIRED) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../samples ${CMAKE_CURRENT_BINARY_DIR}/samples) -target_link_libraries(aprapipesut +target_link_libraries(aprapipessampleut + aprapipes + ${JPEG_LIBRARIES} + ${LIBMP4_LIB} + ${OPENH264_LIB} + ${Boost_LIBRARIES} + ${FFMPEG_LIBRARIES} + ${OpenCV_LIBRARIES} + ${JETSON_LIBS} + ${NVCUDAToolkit_LIBS} + ${NVCODEC_LIB} + ${NVJPEGLIB_L4T} + ${CURSES_LIBRARIES} + ZXing::Core + ZXing::ZXing + BZip2::BZip2 + ZLIB::ZLIB + liblzma::liblzma + bigint::bigint + sfml-audio + whisper::whisper + ) + + target_link_libraries(aprapipesut aprapipes ${JPEG_LIBRARIES} ${LIBMP4_LIB} @@ -619,4 +649,4 @@ IF(ENABLE_WINDOWS) IF(GHA) file(COPY ${RUNTIME_DLLS} DESTINATION RelWithDebInfo/) ENDIF(GHA) -ENDIF(ENABLE_WINDOWS) +ENDIF(ENABLE_WINDOWS) \ No newline at end of file diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt index a85b7d3a8..4739336d8 100644 --- a/samples/CMakeLists.txt +++ b/samples/CMakeLists.txt @@ -1,7 +1,4 @@ -#dependencies - -find_package(Threads REQUIRED) - +cmake_minimum_required(VERSION 3.22) include_directories( ../base ${CMAKE_CURRENT_SOURCE_DIR} diff --git a/samples/face_detection_cpu/CMakeLists.txt b/samples/face_detection_cpu/CMakeLists.txt index 09135dfbe..19b385c4a 100644 --- a/samples/face_detection_cpu/CMakeLists.txt +++ b/samples/face_detection_cpu/CMakeLists.txt @@ -9,22 +9,10 @@ SET(SOURCE ${CORE_FILES} ${CORE_FILES_H} ) -set(UT_FILE - test_face_detection_cpu.cpp -) -add_library(face_detection_cpu_source STATIC ${SOURCE}) -add_executable(${TARGET} ${UT_FILE}) -target_include_directories ( face_detection_cpu_source PRIVATE - ${JETSON_MULTIMEDIA_LIB_INCLUDE} - ${FFMPEG_INCLUDE_DIRS} - ${OpenCV_INCLUDE_DIRS} - ${Boost_INCLUDE_DIRS} - ${LIBMP4_INC_DIR} - ${BARESIP_INC_DIR} - ${LIBRE_INC_DIR} - ${NVCODEC_INCLUDE_DIR} -) -target_include_directories ( ${TARGET} PUBLIC + +add_executable(${TARGET} ${SOURCE}) + +target_include_directories ( ${TARGET} PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} @@ -34,6 +22,7 @@ target_include_directories ( ${TARGET} PUBLIC ${LIBRE_INC_DIR} ${NVCODEC_INCLUDE_DIR} ) + target_link_libraries( ${TARGET} aprapipes @@ -48,5 +37,4 @@ target_link_libraries( ${NVCODEC_LIB} ${NVJPEGLIB_L4T} ${CURSES_LIBRARIES} - face_detection_cpu_source ) \ No newline at end of file diff --git a/samples/face_detection_cpu/face_detection_cpu.cpp b/samples/face_detection_cpu/face_detection_cpu.cpp index dcee16a44..e904f3473 100644 --- a/samples/face_detection_cpu/face_detection_cpu.cpp +++ b/samples/face_detection_cpu/face_detection_cpu.cpp @@ -6,34 +6,56 @@ #include "ImageDecoderCV.h" #include "FaceDetectorXform.h" #include "ColorConversionXForm.h" +#include +#include "ExternalSinkModule.h" FaceDetectionCPU::FaceDetectionCPU() : pipeline("test") {} +bool FaceDetectionCPU::testPipeline(){ + auto sink = boost::shared_ptr(new ExternalSinkModule()); + colorConversion->setNext(sink); + + BOOST_TEST(source->init()); + BOOST_TEST(faceDetector->init()); + BOOST_TEST(overlay->init()); + BOOST_TEST(colorConversion->init()); + BOOST_TEST(sink->init()); + + source->step(); + faceDetector->step(); + overlay->step(); + colorConversion->step(); + + auto frames = sink->pop(); + BOOST_TEST(frames.size() == 1); + return true; +} + bool FaceDetectionCPU::setupPipeline() { WebCamSourceProps webCamSourceprops(0, 640, 480); - auto source = boost::shared_ptr(new WebCamSource(webCamSourceprops)); + source = boost::shared_ptr(new WebCamSource(webCamSourceprops)); - FaceDetectorXformProps faceDetectorProps(1.0, 0.8, "../../data/assets/deploy.prototxt", "../../data/assets/res10_300x300_ssd_iter_140000_fp16.caffemodel"); - auto faceDetector = boost::shared_ptr(new FaceDetectorXform(faceDetectorProps)); + FaceDetectorXformProps faceDetectorProps(1.0, 0.8, "./data/assets/deploy.prototxt", "./data/assets/res10_300x300_ssd_iter_140000_fp16.caffemodel"); + faceDetector = boost::shared_ptr(new FaceDetectorXform(faceDetectorProps)); source->setNext(faceDetector); - auto overlay = boost::shared_ptr(new OverlayModule(OverlayModuleProps())); + overlay = boost::shared_ptr(new OverlayModule(OverlayModuleProps())); faceDetector->setNext(overlay); - auto colorConversion = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::RGB_TO_BGR))); + colorConversion = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::RGB_TO_BGR))); overlay->setNext(colorConversion); - auto sink = boost::shared_ptr(new ImageViewerModule(ImageViewerModuleProps("imageview"))); - colorConversion->setNext(sink); - - pipeline.appendModule(source); - pipeline.init(); + imageViewerSink = boost::shared_ptr(new ImageViewerModule(ImageViewerModuleProps("imageview"))); + colorConversion->setNext(imageViewerSink); + return true; } bool FaceDetectionCPU::startPipeline() { + pipeline.appendModule(source); + pipeline.init(); pipeline.run_all_threaded(); return true; } @@ -60,13 +82,13 @@ int main() { } // Start the pipeline - if (!pipelineInstance.startPipeline()) { + if (!pipelineInstance.testPipeline()) { std::cerr << "Failed to start pipeline." << std::endl; return 1; // Or any error code indicating failure } // Wait for the pipeline to run for 10 seconds - boost::this_thread::sleep_for(boost::chrono::seconds(50)); + boost::this_thread::sleep_for(boost::chrono::seconds(5)); // Stop the pipeline if (!pipelineInstance.stopPipeline()) { diff --git a/samples/face_detection_cpu/face_detection_cpu.h b/samples/face_detection_cpu/face_detection_cpu.h index c03195454..46016e76f 100644 --- a/samples/face_detection_cpu/face_detection_cpu.h +++ b/samples/face_detection_cpu/face_detection_cpu.h @@ -1,4 +1,11 @@ #include +#include "WebCamSource.h" +#include "ImageViewerModule.h" +#include "OverlayModule.h" +#include "FacialLandmarksCV.h" +#include "ImageDecoderCV.h" +#include "FaceDetectorXform.h" +#include "ColorConversionXForm.h" class FaceDetectionCPU { public: @@ -6,6 +13,12 @@ class FaceDetectionCPU { bool setupPipeline(); bool startPipeline(); bool stopPipeline(); + bool testPipeline(); private: PipeLine pipeline; + boost::shared_ptr source; + boost::shared_ptr faceDetector; + boost::shared_ptr overlay; + boost::shared_ptr colorConversion; + boost::shared_ptr imageViewerSink; }; \ No newline at end of file From c38f63f6d7a49bf0a8a95eefa76b3668f0b0fbf0 Mon Sep 17 00:00:00 2001 From: SatwikSShanbhag Date: Mon, 27 May 2024 19:06:22 +0530 Subject: [PATCH 30/49] test for play mp4 from beginning --- .../play_mp4_from_beginning/CMakeLists.txt | 21 +---- .../PlayMp4VideoFromBeginning.h | 9 +- .../play_mp4_video_from_beginning.cpp | 93 +++++++++++++++++-- .../test_play_mp4_video_from_beginning.cpp | 2 +- 4 files changed, 100 insertions(+), 25 deletions(-) diff --git a/samples/play_mp4_from_beginning/CMakeLists.txt b/samples/play_mp4_from_beginning/CMakeLists.txt index 43e1b3063..e5fb1bd53 100644 --- a/samples/play_mp4_from_beginning/CMakeLists.txt +++ b/samples/play_mp4_from_beginning/CMakeLists.txt @@ -1,30 +1,18 @@ set(TARGET play_mp4_from_beginning) SET(CORE_FILES play_mp4_video_from_beginning.cpp + ../../base/test/test_utils.cpp ) SET(CORE_FILES_H PlayMp4VideoFromBeginning.h + ../../base/test/test_utils.h ) SET(SOURCE ${CORE_FILES} ${CORE_FILES_H} ) -set(UT_FILE - test_play_mp4_video_from_beginning.cpp -) -add_library(play_mp4_from_beginning_source STATIC ${SOURCE}) -add_executable(${TARGET} ${UT_FILE}) -target_include_directories ( play_mp4_from_beginning_source PRIVATE - ${JETSON_MULTIMEDIA_LIB_INCLUDE} - ${FFMPEG_INCLUDE_DIRS} - ${OpenCV_INCLUDE_DIRS} - ${Boost_INCLUDE_DIRS} - ${LIBMP4_INC_DIR} - ${BARESIP_INC_DIR} - ${LIBRE_INC_DIR} - ${NVCODEC_INCLUDE_DIR} -) -target_include_directories ( ${TARGET} PUBLIC +add_executable(${TARGET} ${SOURCE}) +target_include_directories ( ${TARGET} PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} @@ -48,5 +36,4 @@ target_link_libraries( ${NVCODEC_LIB} ${NVJPEGLIB_L4T} ${CURSES_LIBRARIES} - play_mp4_from_beginning_source ) \ No newline at end of file diff --git a/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h b/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h index f831aa0cc..6fa0c45bc 100644 --- a/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h +++ b/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h @@ -1,6 +1,9 @@ #include #include "Mp4ReaderSource.h" #include "ImageViewerModule.h" +#include "ColorConversionXForm.h" +#include "H264Decoder.h" + class PlayMp4VideoFromBeginning { public: @@ -9,9 +12,13 @@ class PlayMp4VideoFromBeginning { bool startPipeLine(); bool stopPipeLine(); bool flushQueuesAndSeek(); + bool testPipeLineForFlushQue(); + bool testPipeLineForSeek(); private: PipeLine pipeLine; boost::shared_ptr mp4Reader; - boost::shared_ptr sink; + boost::shared_ptr imageViewerSink; + boost::shared_ptr decoder; + boost::shared_ptr colorchange; }; \ No newline at end of file diff --git a/samples/play_mp4_from_beginning/play_mp4_video_from_beginning.cpp b/samples/play_mp4_from_beginning/play_mp4_video_from_beginning.cpp index 844b20ed2..0a78e2364 100644 --- a/samples/play_mp4_from_beginning/play_mp4_video_from_beginning.cpp +++ b/samples/play_mp4_from_beginning/play_mp4_video_from_beginning.cpp @@ -10,12 +10,85 @@ #include "CudaMemCopy.h" #include "FileWriterModule.h" #include "JPEGEncoderNVJPEG.h" +#include "ExternalSinkModule.h" #include "ImageViewerModule.h" +#include +#include +#include "Frame.h" +#include "FrameContainerQueue.h" +#include "../../base/test/test_utils.h" +class SinkModuleProps : public ModuleProps { +public: + SinkModuleProps() : ModuleProps(){}; +}; + +class SinkModule : public Module { +public: + SinkModule(SinkModuleProps props) : Module(SINK, "sinkModule", props){}; + boost::shared_ptr getQue() { return Module::getQue(); } + frame_container pop() { return Module::pop(); } + +protected: + bool process() { return false; } + bool validateOutputPins() { return true; } + bool validateInputPins() { return true; } +}; PlayMp4VideoFromBeginning::PlayMp4VideoFromBeginning() : pipeLine("pipeline") { } + +bool PlayMp4VideoFromBeginning::testPipeLineForFlushQue() { + auto sink = boost::shared_ptr(new SinkModule(SinkModuleProps())); + colorchange->setNext(sink); + + BOOST_CHECK(mp4Reader->init()); + BOOST_CHECK(decoder->init()); + BOOST_CHECK(colorchange->init()); + BOOST_CHECK(sink->init()); + + for (int i = 0; i <= 20; i++) { + mp4Reader->step(); + decoder->step(); + } + colorchange->step(); + + auto frames = sink->pop(); + auto sinkQue = sink->getQue(); + BOOST_TEST(sinkQue->size() == 1); + sinkQue->flush(); + BOOST_TEST(sinkQue->size() == 0); + frame_sp outputFrame = frames.cbegin()->second; + + + + Test_Utils::saveOrCompare("../.././data/mp4Reader_saveOrCompare/h264/testplay.raw", const_cast(static_cast(outputFrame->data())), outputFrame->size(), 0); + return true; +} +bool PlayMp4VideoFromBeginning::testPipeLineForSeek() { + auto sink = boost::shared_ptr(new SinkModule(SinkModuleProps())); + mp4Reader->setNext(sink); + + BOOST_CHECK(mp4Reader->init()); + BOOST_CHECK(decoder->init()); + BOOST_CHECK(colorchange->init()); + BOOST_CHECK(sink->init()); + + mp4Reader->step(); + + auto frames = sink->pop(); + auto imgFrame = frames.begin()->second; + + uint64_t skipTS = 1686723797848; + mp4Reader->randomSeek(skipTS, false); + mp4Reader->step(); + BOOST_TEST(imgFrame->timestamp == 1686723797856); + LOG_INFO << "Found next available frame " << imgFrame->timestamp - skipTS + << " msecs later from skipTS"; + + return true; +} bool PlayMp4VideoFromBeginning::setUpPipeLine() { // Implementation std::string videoPath = "../.././data/Mp4_videos/h264_video_metadata/20230514/0011/1686723796848.mp4"; @@ -35,24 +108,24 @@ bool PlayMp4VideoFromBeginning::setUpPipeLine() { std::vector mImagePin; mImagePin = mp4Reader->getAllOutputPinsByType(frameType); - auto decoder = boost::shared_ptr(new H264Decoder(H264DecoderProps())); + decoder = boost::shared_ptr(new H264Decoder(H264DecoderProps())); mp4Reader->setNext(decoder, mImagePin); auto conversionType = ColorConversionProps::ConversionType::YUV420PLANAR_TO_RGB; auto metadata = framemetadata_sp(new RawImagePlanarMetadata(1280, 720, ImageMetadata::ImageType::YUV420, size_t(0), CV_8U)); - auto colorchange = boost::shared_ptr(new ColorConversion(ColorConversionProps(conversionType))); + colorchange = boost::shared_ptr(new ColorConversion(ColorConversionProps(conversionType))); decoder->setNext(colorchange); - sink = boost::shared_ptr( + imageViewerSink = boost::shared_ptr( new ImageViewerModule(ImageViewerModuleProps("imageview"))); - colorchange->setNext(sink); + colorchange->setNext(imageViewerSink); - pipeLine.appendModule(mp4Reader); - pipeLine.init(); return true; } bool PlayMp4VideoFromBeginning::startPipeLine() { + pipeLine.appendModule(mp4Reader); + pipeLine.init(); pipeLine.run_all_threaded(); return true; } @@ -76,6 +149,14 @@ int main() { std::cerr << "Failed to setup pipeline." << std::endl; return 1; } + if (!pipelineInstance.testPipeLineForFlushQue()) { + std::cerr << "Failed to test pipeline." << std::endl; + return 1; + } + if (!pipelineInstance.testPipeLineForSeek()) { + std::cerr << "Failed to test pipeline." << std::endl; + return 1; + } if (!pipelineInstance.startPipeLine()) { std::cerr << "Failed to start pipeline." << std::endl; return 1; // Or any error code indicating failure diff --git a/samples/play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp b/samples/play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp index 5dc3d8c5e..8586303be 100644 --- a/samples/play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp +++ b/samples/play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp @@ -4,7 +4,7 @@ BOOST_AUTO_TEST_SUITE(start_video_from_beginning) -BOOST_AUTO_TEST_CASE(start_from_beginning) +BOOST_AUTO_TEST_CASE(play_mp4_from_beginning_flush_queue_test) { auto playMp4VideoFromBeginning = boost::shared_ptr(new PlayMp4VideoFromBeginning()); playMp4VideoFromBeginning->setUpPipeLine(); From 41ff3ae470da5c41932801489f8d0b218cf21fd6 Mon Sep 17 00:00:00 2001 From: SatwikSShanbhag Date: Mon, 27 May 2024 19:07:26 +0530 Subject: [PATCH 31/49] updated main CMake --- base/CMakeLists.txt | 32 ++++++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index d6dadadde..74a575e41 100755 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -163,6 +163,7 @@ SET(CORE_FILES src/MotionVectorExtractor.cpp src/OverlayModule.cpp src/OrderedCacheOfFiles.cpp + .././samples/create_thumbnail_from_mp4_video/generate_thumbnail_from_mp4_video.cpp .././samples/timelapse-sample/timelapse_summary.h ) @@ -225,6 +226,7 @@ SET(CORE_FILES_H include/OverlayModule.h include/OrderedCacheOfFiles.h include/TestSignalGeneratorSrc.h + .././samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h .././samples/timelapse-sample/timelapse_summary.cpp ) @@ -458,6 +460,7 @@ ${LIBMP4_INC_DIR} ${BARESIP_INC_DIR} ${LIBRE_INC_DIR} ${NVCODEC_INCLUDE_DIR} +.././samples/create_thumbnail_from_mp4_video ./../samples/timelapse-sample ) @@ -570,7 +573,10 @@ SET(UT_FILES ) SET(SAMPLE_UT_FILES -.././samples/timelapse-sample/timelapse_summary_test.cpp + test/test_utils.cpp + test/test_utils.h + .././samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp + .././samples/play_mp4_from_beginningtest_play_mp4_video_from_beginning.cpp ) IF(ENABLE_LINUX) list(APPEND UT_FILES @@ -581,7 +587,6 @@ ENDIF(ENABLE_LINUX) add_executable(aprapipesut ${UT_FILES}) - add_executable(aprapipessampleut ${SAMPLE_UT_FILES}) IF(ENABLE_ARM64) @@ -596,6 +601,29 @@ ENDIF (ENABLE_CUDA) find_library(OPENH264_LIB NAMES openh264.lib libopenh264.a REQUIRED) find_library(LIBMP4_LIB NAMES mp4lib.lib libmp4lib.a REQUIRED) +target_link_libraries(aprapipessampleut + aprapipes + ${JPEG_LIBRARIES} + ${LIBMP4_LIB} + ${OPENH264_LIB} + ${Boost_LIBRARIES} + ${FFMPEG_LIBRARIES} + ${OpenCV_LIBRARIES} + ${JETSON_LIBS} + ${NVCUDAToolkit_LIBS} + ${NVCODEC_LIB} + ${NVJPEGLIB_L4T} + ${CURSES_LIBRARIES} + ZXing::Core + ZXing::ZXing + BZip2::BZip2 + ZLIB::ZLIB + liblzma::liblzma + bigint::bigint + sfml-audio + whisper::whisper + ) + target_link_libraries(aprapipessampleut aprapipes ${JPEG_LIBRARIES} From 02fc58832b02851c6b1f043ae154946def95eb13 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Mon, 27 May 2024 19:23:29 +0530 Subject: [PATCH 32/49] 1. Fixed the props for Mp4ReaderSource and Mp4WriterSink 2. Fixed the memory leak issue in MotionVectorExtractor --- base/src/MotionVectorExtractor.cpp | 5 +++++ .../timelapse-sample/timelapse_summary.cpp | 21 +++++++++++-------- samples/timelapse-sample/timelapse_summary.h | 1 + 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/base/src/MotionVectorExtractor.cpp b/base/src/MotionVectorExtractor.cpp index 236a33a16..04b70747a 100644 --- a/base/src/MotionVectorExtractor.cpp +++ b/base/src/MotionVectorExtractor.cpp @@ -261,6 +261,7 @@ void DetailOpenH264::getMotionVectors(frame_container &frames, int32_t mMotionVectorSize = mWidth * mHeight * 8; int16_t *mMotionVectorData = nullptr; memset(&pDstInfo, 0, sizeof(SBufferInfo)); + if (sendOverlayFrame) { outFrame = makeFrameWithPinId(mMotionVectorSize, motionVectorPinId); mMotionVectorData = static_cast(outFrame->data()); @@ -294,6 +295,7 @@ void DetailOpenH264::getMotionVectors(frame_container &frames, circleOverlays.push_back(circleOverlay); motionFound = true; + free(mMotionVectorData); } } @@ -310,6 +312,7 @@ void DetailOpenH264::getMotionVectors(frame_container &frames, frames.insert(make_pair(motionVectorPinId, outFrame)); } } + if (sendOverlayFrame == false) { for (int i = 0; i < mMotionVectorSize; i += 4) { auto motionX = mMotionVectorData[i]; @@ -319,6 +322,7 @@ void DetailOpenH264::getMotionVectors(frame_container &frames, break; } } + free(mMotionVectorData); } if ((!sDecParam.bParseOnly) && (pDstInfo.pDst[0] != nullptr) && (mMotionVectorSize != mWidth * mHeight * 8) && motionFound == true) { @@ -354,6 +358,7 @@ void DetailOpenH264::getMotionVectors(frame_container &frames, cv::cvtColor(yuvImgCV, bgrImg, cv::COLOR_YUV2BGR_I420); frames.insert(make_pair(rawFramePinId, decodedFrame)); + free(yuvStartPointer); } } diff --git a/samples/timelapse-sample/timelapse_summary.cpp b/samples/timelapse-sample/timelapse_summary.cpp index 3215f662d..536341f68 100644 --- a/samples/timelapse-sample/timelapse_summary.cpp +++ b/samples/timelapse-sample/timelapse_summary.cpp @@ -33,11 +33,13 @@ bool TimelapsePipeline::setupPipeline(const std::string &videoPath, auto mp4ReaderProps = Mp4ReaderSourceProps(videoPath, false, 0, true, false, false); + mp4ReaderProps.parseFS = true; + mp4ReaderProps.readLoop = false; mp4Reader = boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); auto motionExtractorProps = MotionVectorExtractorProps( MotionVectorExtractorProps::MVExtractMethod::OPENH264, sendDecodedFrames, - 10, sendOverlayFrames); + 50, sendOverlayFrames); motionExtractor = boost::shared_ptr( new MotionVectorExtractor(motionExtractorProps)); colorchange1 = boost::shared_ptr(new ColorConversion( @@ -49,7 +51,8 @@ bool TimelapsePipeline::setupPipeline(const std::string &videoPath, encoder = boost::shared_ptr(new H264EncoderNVCodec( H264EncoderNVCodecProps(bitRateKbps, cuContext, gopLength, frameRate, profile, enableBFrames))); - auto mp4WriterSinkProps = Mp4WriterSinkProps(1, 10, 24, outFolderPath, true); + auto mp4WriterSinkProps = Mp4WriterSinkProps(UINT32_MAX, 10, 24, outFolderPath, true); + mp4WriterSinkProps.recordedTSBasedDTS = false; mp4WriterSink = boost::shared_ptr(new Mp4WriterSink(mp4WriterSinkProps)); @@ -61,7 +64,7 @@ bool TimelapsePipeline::setupPipeline(const std::string &videoPath, mp4Reader->setNext(motionExtractor, mImagePin); motionExtractor->setNext(colorchange1); colorchange1->setNext(colorchange2); - auto copy = boost::shared_ptr( + copy = boost::shared_ptr( new CudaMemCopy(CudaMemCopyProps(cudaMemcpyHostToDevice, cudaStream_))); colorchange2->setNext(copy); copy->setNext(sync); @@ -87,20 +90,21 @@ bool TimelapsePipeline::stopPipeline() { void TimelapsePipeline::test() { auto sink = boost::shared_ptr(new ExternalSinkModule()); + encoder->setNext(sink); BOOST_TEST(mp4Reader->init()); BOOST_TEST(motionExtractor->init()); BOOST_TEST(colorchange1->init()); BOOST_TEST(colorchange2->init()); + BOOST_TEST(copy->init()); BOOST_TEST(sync->init()); BOOST_TEST(encoder->init()); - BOOST_TEST(mp4WriterSink->init()); mp4Reader->step(); motionExtractor->step(); colorchange1->step(); colorchange2->step(); + copy->step(); sync->step(); encoder->step(); - encoder->setNext(sink); auto frames = sink->pop(); BOOST_CHECK_EQUAL(frames.size(), 1); } @@ -120,21 +124,20 @@ int main(int argc, char *argv[]) { Logger::initLogger(loggerProps); TimelapsePipeline pipelineInstance; - + // Setup the pipeline if (!pipelineInstance.setupPipeline(videoPath, outFolderPath)) { std::cerr << "Failed to setup pipeline." << std::endl; return 1; // Or any error code indicating failure } - + // Start the pipeline if (!pipelineInstance.startPipeline()) { std::cerr << "Failed to start pipeline." << std::endl; return 1; // Or any error code indicating failure } - // Wait for the pipeline to run for 10 seconds - boost::this_thread::sleep_for(boost::chrono::seconds(10)); + boost::this_thread::sleep_for(boost::chrono::seconds(600)); // Stop the pipeline if (!pipelineInstance.stopPipeline()) { diff --git a/samples/timelapse-sample/timelapse_summary.h b/samples/timelapse-sample/timelapse_summary.h index 27c043020..e5caa1a6a 100644 --- a/samples/timelapse-sample/timelapse_summary.h +++ b/samples/timelapse-sample/timelapse_summary.h @@ -27,4 +27,5 @@ class TimelapsePipeline { boost::shared_ptr sync; boost::shared_ptr encoder; boost::shared_ptr mp4WriterSink; + boost::shared_ptr copy; }; \ No newline at end of file From 744d40d6700d05370df80d98a71347b5eb8e5d82 Mon Sep 17 00:00:00 2001 From: SatwikSShanbhag Date: Mon, 27 May 2024 19:24:51 +0530 Subject: [PATCH 33/49] refactored test and CMake file --- .../play_mp4_from_beginning/CMakeLists.txt | 5 ++-- .../play_mp4_video_from_beginning.cpp | 26 +++++++++---------- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/samples/play_mp4_from_beginning/CMakeLists.txt b/samples/play_mp4_from_beginning/CMakeLists.txt index e5fb1bd53..69878b0b5 100644 --- a/samples/play_mp4_from_beginning/CMakeLists.txt +++ b/samples/play_mp4_from_beginning/CMakeLists.txt @@ -1,3 +1,4 @@ +cmake_minimum_required(VERSION 3.22) set(TARGET play_mp4_from_beginning) SET(CORE_FILES play_mp4_video_from_beginning.cpp @@ -11,8 +12,8 @@ SET(SOURCE ${CORE_FILES} ${CORE_FILES_H} ) -add_executable(${TARGET} ${SOURCE}) -target_include_directories ( ${TARGET} PRIVATE +add_executable(${TARGET} ${SOURCE}) +target_include_directories ( ${TARGET} PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} diff --git a/samples/play_mp4_from_beginning/play_mp4_video_from_beginning.cpp b/samples/play_mp4_from_beginning/play_mp4_video_from_beginning.cpp index 0a78e2364..70eda7a5a 100644 --- a/samples/play_mp4_from_beginning/play_mp4_video_from_beginning.cpp +++ b/samples/play_mp4_from_beginning/play_mp4_video_from_beginning.cpp @@ -55,14 +55,12 @@ bool PlayMp4VideoFromBeginning::testPipeLineForFlushQue() { colorchange->step(); auto frames = sink->pop(); + auto frame = frames.size(); auto sinkQue = sink->getQue(); - BOOST_TEST(sinkQue->size() == 1); + BOOST_CHECK_EQUAL(frames.size(), 1); sinkQue->flush(); - BOOST_TEST(sinkQue->size() == 0); - frame_sp outputFrame = frames.cbegin()->second; - - - + BOOST_CHECK_EQUAL(sinkQue->size(), 0); + frame_sp outputFrame = frames.cbegin()->second; Test_Utils::saveOrCompare("../.././data/mp4Reader_saveOrCompare/h264/testplay.raw", const_cast(static_cast(outputFrame->data())), outputFrame->size(), 0); return true; } @@ -153,14 +151,14 @@ int main() { std::cerr << "Failed to test pipeline." << std::endl; return 1; } - if (!pipelineInstance.testPipeLineForSeek()) { - std::cerr << "Failed to test pipeline." << std::endl; - return 1; - } - if (!pipelineInstance.startPipeLine()) { - std::cerr << "Failed to start pipeline." << std::endl; - return 1; // Or any error code indicating failure - } + //if (!pipelineInstance.testPipeLineForSeek()) { + // std::cerr << "Failed to test pipeline." << std::endl; + // return 1; + //} + //if (!pipelineInstance.startPipeLine()) { + // std::cerr << "Failed to start pipeline." << std::endl; + // return 1; // Or any error code indicating failure + //} // Wait for the pipeline to run for 10 seconds boost::this_thread::sleep_for(boost::chrono::seconds(3)); if (!pipelineInstance.flushQueuesAndSeek()) { From 574fc11191987302a1c931db48c5cc93b57d9486 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Tue, 28 May 2024 18:25:13 +0530 Subject: [PATCH 34/49] Corrected cmake and test for timelapse sample. --- base/CMakeLists.txt | 4 ++-- samples/timelapse-sample/CMakeLists.txt | 1 - samples/timelapse-sample/timelapse_summary.cpp | 13 ++++++++++--- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index d6dadadde..ee115b9c7 100755 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -163,7 +163,7 @@ SET(CORE_FILES src/MotionVectorExtractor.cpp src/OverlayModule.cpp src/OrderedCacheOfFiles.cpp - .././samples/timelapse-sample/timelapse_summary.h + .././samples/timelapse-sample/timelapse_summary.cpp ) SET(CORE_FILES_H @@ -225,7 +225,7 @@ SET(CORE_FILES_H include/OverlayModule.h include/OrderedCacheOfFiles.h include/TestSignalGeneratorSrc.h - .././samples/timelapse-sample/timelapse_summary.cpp + .././samples/timelapse-sample/timelapse_summary.h ) IF(ENABLE_WINDOWS) diff --git a/samples/timelapse-sample/CMakeLists.txt b/samples/timelapse-sample/CMakeLists.txt index 957916ea2..13fa38e88 100644 --- a/samples/timelapse-sample/CMakeLists.txt +++ b/samples/timelapse-sample/CMakeLists.txt @@ -1,5 +1,4 @@ cmake_minimum_required(VERSION 3.22) -set(TARGET timelapse-sample) SET(CORE_FILES timelapse_summary.cpp ) diff --git a/samples/timelapse-sample/timelapse_summary.cpp b/samples/timelapse-sample/timelapse_summary.cpp index 536341f68..c98d1f2df 100644 --- a/samples/timelapse-sample/timelapse_summary.cpp +++ b/samples/timelapse-sample/timelapse_summary.cpp @@ -91,6 +91,7 @@ bool TimelapsePipeline::stopPipeline() { void TimelapsePipeline::test() { auto sink = boost::shared_ptr(new ExternalSinkModule()); encoder->setNext(sink); + BOOST_TEST(mp4Reader->init()); BOOST_TEST(motionExtractor->init()); BOOST_TEST(colorchange1->init()); @@ -98,13 +99,18 @@ void TimelapsePipeline::test() { BOOST_TEST(copy->init()); BOOST_TEST(sync->init()); BOOST_TEST(encoder->init()); - mp4Reader->step(); - motionExtractor->step(); + BOOST_TEST(sink->init()); + + for (int i = 0; i <= 20; i++) { + mp4Reader->step(); + motionExtractor->step(); + } colorchange1->step(); colorchange2->step(); copy->step(); sync->step(); encoder->step(); + auto frames = sink->pop(); BOOST_CHECK_EQUAL(frames.size(), 1); } @@ -131,13 +137,14 @@ int main(int argc, char *argv[]) { return 1; // Or any error code indicating failure } + //pipelineInstance.test(); // Start the pipeline if (!pipelineInstance.startPipeline()) { std::cerr << "Failed to start pipeline." << std::endl; return 1; // Or any error code indicating failure } - boost::this_thread::sleep_for(boost::chrono::seconds(600)); + boost::this_thread::sleep_for(boost::chrono::minutes(10)); // Stop the pipeline if (!pipelineInstance.stopPipeline()) { From 773a376e0394e438656f54608d457bf766ed42ac Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Thu, 30 May 2024 17:48:58 +0530 Subject: [PATCH 35/49] 1. Removed the sendOverlayFrames prop to remove overlay frames, instead setting the RAW_IMAGE output pin to MotionVectorExtractor. 2. Enhanced the threshold logic to get the frames based on average of the vectors. 3. Removed not required tests. --- base/include/MotionVectorExtractor.h | 92 +++++++++---------- base/src/MotionVectorExtractor.cpp | 74 +++++---------- ...tionvector_extractor_and_overlay_tests.cpp | 87 ------------------ .../timelapse-sample/timelapse_summary.cpp | 9 +- 4 files changed, 71 insertions(+), 191 deletions(-) diff --git a/base/include/MotionVectorExtractor.h b/base/include/MotionVectorExtractor.h index 77744f553..d37cac5d2 100644 --- a/base/include/MotionVectorExtractor.h +++ b/base/include/MotionVectorExtractor.h @@ -6,58 +6,54 @@ class MvExtractDetailAbs; class DetailFfmpeg; class DetailOpenH264; -class MotionVectorExtractorProps : public ModuleProps -{ +class MotionVectorExtractorProps : public ModuleProps { public: - enum MVExtractMethod - { - FFMPEG, - OPENH264 - }; - - MotionVectorExtractorProps(MVExtractMethod _MVExtractMethod = MVExtractMethod::FFMPEG, bool _sendDecodedFrame = false, int _motionVectorThreshold = 2, bool _sendOverlayFrame = true) : MVExtract(_MVExtractMethod), sendDecodedFrame(_sendDecodedFrame), motionVectorThreshold(_motionVectorThreshold), sendOverlayFrame(_sendOverlayFrame) - { - } - - size_t getSerializeSize() - { - return ModuleProps::getSerializeSize() + sizeof(sendDecodedFrame) + sizeof(motionVectorThreshold) + sizeof(sendOverlayFrame); - } - bool sendDecodedFrame = false; - bool sendOverlayFrame = false; - int motionVectorThreshold; - MVExtractMethod MVExtract = MVExtractMethod::FFMPEG; + enum MVExtractMethod { FFMPEG, OPENH264 }; + + MotionVectorExtractorProps( + MVExtractMethod _MVExtractMethod = MVExtractMethod::FFMPEG, + bool _sendDecodedFrame = false, int _motionVectorThreshold = 2) + : MVExtract(_MVExtractMethod), sendDecodedFrame(_sendDecodedFrame), + motionVectorThreshold(_motionVectorThreshold) {} + + size_t getSerializeSize() { + return ModuleProps::getSerializeSize() + sizeof(sendDecodedFrame) + + sizeof(motionVectorThreshold); + } + bool sendDecodedFrame = false; + int motionVectorThreshold; + MVExtractMethod MVExtract = MVExtractMethod::FFMPEG; + private: - friend class boost::serialization::access; - - template - void serialize(Archive& ar, const unsigned int version) - { - ar& boost::serialization::base_object(*this); - ar& sendDecodedFrame; - ar& motionVectorThreshold; - ar& sendOverlayFrame; - } + friend class boost::serialization::access; + + template + void serialize(Archive &ar, const unsigned int version) { + ar &boost::serialization::base_object(*this); + ar & sendDecodedFrame; + ar & motionVectorThreshold; + } }; -class MotionVectorExtractor : public Module -{ +class MotionVectorExtractor : public Module { public: - MotionVectorExtractor(MotionVectorExtractorProps _props); - virtual ~MotionVectorExtractor() {}; - bool init(); - bool term(); - void setProps(MotionVectorExtractorProps& props); + MotionVectorExtractor(MotionVectorExtractorProps _props); + virtual ~MotionVectorExtractor(){}; + bool init(); + bool term(); + void setProps(MotionVectorExtractorProps &props); + protected: - bool process(frame_container& frame); - bool validateInputPins(); - bool validateOutputPins(); - bool shouldTriggerSOS(); - bool processSOS(frame_sp& frame); - void setMetadata(frame_sp metadata); - bool handlePropsChange(frame_sp& frame); + bool process(frame_container &frame); + bool validateInputPins(); + bool validateOutputPins(); + bool shouldTriggerSOS(); + bool processSOS(frame_sp &frame); + void setMetadata(frame_sp metadata); + bool handlePropsChange(frame_sp &frame); + private: - boost::shared_ptr mDetail; - framemetadata_sp rawOutputMetadata; - bool mShouldTriggerSOS = true; -}; + boost::shared_ptr mDetail; + framemetadata_sp rawOutputMetadata; + bool mShouldTriggerSOS = true; +}; \ No newline at end of file diff --git a/base/src/MotionVectorExtractor.cpp b/base/src/MotionVectorExtractor.cpp index 04b70747a..64e7888a6 100644 --- a/base/src/MotionVectorExtractor.cpp +++ b/base/src/MotionVectorExtractor.cpp @@ -24,13 +24,11 @@ class MvExtractDetailAbs { makeFrameWithPinId = _makeFrameWithPinId; makeframe = _makeframe; sendDecodedFrame = props.sendDecodedFrame; - sendOverlayFrame = props.sendOverlayFrame; threshold = props.motionVectorThreshold; }; ~MvExtractDetailAbs() {} virtual void setProps(MotionVectorExtractorProps props) { sendDecodedFrame = props.sendDecodedFrame; - sendOverlayFrame = props.sendOverlayFrame; } virtual void getMotionVectors(frame_container &frames, frame_sp &outFrame, frame_sp &decodedFrame) = 0; @@ -45,7 +43,6 @@ class MvExtractDetailAbs { makeframe; std::function makeFrameWithPinId; bool sendDecodedFrame = false; - bool sendOverlayFrame = true; int threshold; cv::Mat bgrImg; bool motionFound = false; @@ -126,6 +123,7 @@ void DetailFfmpeg::getMotionVectors(frame_container &frames, frame_sp &outFrame, av_packet_free(&pkt); av_frame_free(&avFrame); } + int DetailFfmpeg::decodeAndGetMotionVectors(AVPacket *pkt, frame_container &frames, frame_sp &outFrame, @@ -177,7 +175,7 @@ int DetailFfmpeg::decodeAndGetMotionVectors(AVPacket *pkt, for (int i = 0; i < sideData->size / sizeof(*mvs); i++) { const AVMotionVector *mv = &mvs[i]; - if (sendOverlayFrame == true && std::abs(mv->motion_x) > threshold || + if (std::abs(mv->motion_x) > threshold || std::abs(mv->motion_y) > threshold) { LineOverlay lineOverlay; lineOverlay.x1 = mv->src_x; @@ -186,8 +184,6 @@ int DetailFfmpeg::decodeAndGetMotionVectors(AVPacket *pkt, lineOverlay.y2 = mv->dst_y; lineOverlays.push_back(lineOverlay); - - motionFound = true; } } @@ -207,18 +203,6 @@ int DetailFfmpeg::decodeAndGetMotionVectors(AVPacket *pkt, } else { outFrame = makeFrameWithPinId(0, motionVectorPinId); } - if (sendOverlayFrame == false) { - const AVMotionVector *mvs = (const AVMotionVector *)sideData->data; - for (int i = 0; i < sideData->size / sizeof(*mvs); i++) { - const AVMotionVector *mv = &mvs[i]; - - if (std::abs(mv->motion_x) > threshold || - std::abs(mv->motion_y) > threshold) { - motionFound = true; - break; - } - } - } av_packet_unref(pkt); av_frame_unref(avFrame); return 0; @@ -248,7 +232,6 @@ void DetailOpenH264::initDecoder() { void DetailOpenH264::getMotionVectors(frame_container &frames, frame_sp &outFrame, frame_sp &decodedFrame) { - motionFound = false; uint8_t *pData[3] = {NULL}; pData[0] = NULL; pData[1] = NULL; @@ -261,14 +244,8 @@ void DetailOpenH264::getMotionVectors(frame_container &frames, int32_t mMotionVectorSize = mWidth * mHeight * 8; int16_t *mMotionVectorData = nullptr; memset(&pDstInfo, 0, sizeof(SBufferInfo)); - - if (sendOverlayFrame) { - outFrame = makeFrameWithPinId(mMotionVectorSize, motionVectorPinId); - mMotionVectorData = static_cast(outFrame->data()); - } else { - mMotionVectorData = - static_cast(malloc(mMotionVectorSize * sizeof(int16_t))); - } + outFrame = makeFrameWithPinId(mMotionVectorSize, motionVectorPinId); + mMotionVectorData = static_cast(malloc(mMotionVectorSize * sizeof(int16_t))); if (sDecParam.bParseOnly) { pDecoder->ParseBitstreamGetMotionVectors(pSrc, iSrcLen, ppDst, &parseInfo, @@ -279,14 +256,20 @@ void DetailOpenH264::getMotionVectors(frame_container &frames, &pDstInfo, &mMotionVectorSize, &mMotionVectorData); } - - if (mMotionVectorSize != mWidth * mHeight * 8 && sendOverlayFrame == true) { + int avgX = 0; + int avgY = 0; + if (mMotionVectorSize != mWidth * mHeight * 8) { std::vector circleOverlays; CompositeOverlay compositeOverlay; - + int totalX = 0; + int totalY = 0; + avgX = 0; + avgY = 0; for (int i = 0; i < mMotionVectorSize; i += 4) { auto motionX = mMotionVectorData[i]; auto motionY = mMotionVectorData[i + 1]; + totalX = totalX + motionX; + totalY = totalY + motionY; if (abs(motionX) > threshold || abs(motionY) > threshold) { CircleOverlay circleOverlay; circleOverlay.x1 = mMotionVectorData[i + 2]; @@ -294,11 +277,10 @@ void DetailOpenH264::getMotionVectors(frame_container &frames, circleOverlay.radius = 1; circleOverlays.push_back(circleOverlay); - motionFound = true; - free(mMotionVectorData); } } - + avgX = totalX / (mMotionVectorSize / 4); + avgY = totalY / (mMotionVectorSize / 4); for (auto &circleOverlay : circleOverlays) { compositeOverlay.add(&circleOverlay); } @@ -312,20 +294,8 @@ void DetailOpenH264::getMotionVectors(frame_container &frames, frames.insert(make_pair(motionVectorPinId, outFrame)); } } - - if (sendOverlayFrame == false) { - for (int i = 0; i < mMotionVectorSize; i += 4) { - auto motionX = mMotionVectorData[i]; - auto motionY = mMotionVectorData[i + 1]; - if (abs(motionX) > threshold || abs(motionY) > threshold) { - motionFound = true; - break; - } - } - free(mMotionVectorData); - } - if ((!sDecParam.bParseOnly) && (pDstInfo.pDst[0] != nullptr) && - (mMotionVectorSize != mWidth * mHeight * 8) && motionFound == true) { + if ((!sDecParam.bParseOnly) && (pDstInfo.pDst[0] != nullptr) && (mMotionVectorSize != mWidth * mHeight * 8) + && (abs(avgX) > threshold || abs(avgY) > threshold)) { int rowIndex; unsigned char *pPtr = NULL; decodedFrame = makeFrameWithPinId(mHeight * 3 * mWidth, rawFramePinId); @@ -360,6 +330,7 @@ void DetailOpenH264::getMotionVectors(frame_container &frames, frames.insert(make_pair(rawFramePinId, decodedFrame)); free(yuvStartPointer); } + free(mMotionVectorData); } MotionVectorExtractor::MotionVectorExtractor(MotionVectorExtractorProps props) @@ -384,14 +355,13 @@ MotionVectorExtractor::MotionVectorExtractor(MotionVectorExtractorProps props) return makeFrame(frame, size, pinId); })); } - if (props.sendOverlayFrame) { - auto motionVectorOutputMetadata = - framemetadata_sp(new FrameMetadata(FrameMetadata::OVERLAY_INFO_IMAGE)); - mDetail->motionVectorPinId = addOutputPin(motionVectorOutputMetadata); - } + auto motionVectorOutputMetadata = + framemetadata_sp(new FrameMetadata(FrameMetadata::OVERLAY_INFO_IMAGE)); rawOutputMetadata = framemetadata_sp(new RawImageMetadata()); + mDetail->motionVectorPinId = addOutputPin(motionVectorOutputMetadata); mDetail->rawFramePinId = addOutputPin(rawOutputMetadata); } + bool MotionVectorExtractor::init() { mDetail->initDecoder(); return Module::init(); diff --git a/base/test/motionvector_extractor_and_overlay_tests.cpp b/base/test/motionvector_extractor_and_overlay_tests.cpp index 30816d5f4..f08cae573 100644 --- a/base/test/motionvector_extractor_and_overlay_tests.cpp +++ b/base/test/motionvector_extractor_and_overlay_tests.cpp @@ -185,83 +185,6 @@ void motionVectorExtractAndOverlay_Render(MotionVectorExtractorProps::MVExtractM p.wait_for_all(); } -void motionVectorExtractAndOverlay_sendOverlayFrames_Render(MotionVectorExtractorProps::MVExtractMethod MvExtract) { - LoggerProps loggerProps; - loggerProps.logLevel = boost::log::trivial::severity_level::info; - Logger::setLogLevel(boost::log::trivial::severity_level::info); - Logger::initLogger(loggerProps); - - bool sendDecodedFrames = true; - bool sendOverlayFrames = true; - - FileReaderModuleProps fileReaderProps("./data/h264_data/FVDO_Freeway_4cif_???.H264"); - fileReaderProps.fps = 30; - fileReaderProps.readLoop = true; - auto fileReader = boost::shared_ptr(new FileReaderModule(fileReaderProps)); - auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); - fileReader->addOutputPin(h264ImageMetadata); - - auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(MotionVectorExtractorProps(MvExtract, sendDecodedFrames, 2, sendOverlayFrames))); - fileReader->setNext(motionExtractor); - - auto overlay = boost::shared_ptr(new OverlayModule(OverlayModuleProps())); - motionExtractor->setNext(overlay); - - auto sink = boost::shared_ptr(new ImageViewerModule(ImageViewerModuleProps("MotionVectorsOverlay"))); - overlay->setNext(sink); - - PipeLine p("test"); - p.appendModule(fileReader); - p.init(); - - p.run_all_threaded(); - boost::this_thread::sleep_for(boost::chrono::seconds(10)); - - LOG_INFO << "profiling done - stopping the pipeline"; - p.stop(); - p.term(); - p.wait_for_all(); -} - -void motionVectorExtractAndOverlay_dontSendOverlayFrames_Render(MotionVectorExtractorProps::MVExtractMethod MvExtract) -{ - LoggerProps loggerProps; - loggerProps.logLevel = boost::log::trivial::severity_level::info; - Logger::setLogLevel(boost::log::trivial::severity_level::info); - Logger::initLogger(loggerProps); - - bool sendDecodedFrames = true; - bool sendOverlayFrames = false; - - FileReaderModuleProps fileReaderProps("./data/h264_data/FVDO_Freeway_4cif_???.H264"); - fileReaderProps.fps = 30; - fileReaderProps.readLoop = true; - auto fileReader = boost::shared_ptr(new FileReaderModule(fileReaderProps)); - auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); - fileReader->addOutputPin(h264ImageMetadata); - - auto motionExtractor = boost::shared_ptr(new MotionVectorExtractor(MotionVectorExtractorProps(MvExtract, sendDecodedFrames, 2, sendOverlayFrames))); - fileReader->setNext(motionExtractor); - - auto overlay = boost::shared_ptr(new OverlayModule(OverlayModuleProps())); - motionExtractor->setNext(overlay); - - auto sink = boost::shared_ptr(new ImageViewerModule(ImageViewerModuleProps("MotionVectorsOverlay"))); - overlay->setNext(sink); - - PipeLine p("test"); - p.appendModule(fileReader); - p.init(); - - p.run_all_threaded(); - boost::this_thread::sleep_for(boost::chrono::seconds(10)); - - LOG_INFO << "profiling done - stopping the pipeline"; - p.stop(); - p.term(); - p.wait_for_all(); -} - void rtspCamMotionVectorExtractAndOverlay_Render(MotionVectorExtractorProps::MVExtractMethod MvExtract) { LoggerProps loggerProps; @@ -337,16 +260,6 @@ BOOST_AUTO_TEST_CASE(extract_motion_vectors_and_overlay_openh264) motionVectorExtractAndOverlay(MotionVectorExtractorProps::OPENH264); } -BOOST_AUTO_TEST_CASE(extract_motion_vectors_no_overlay_frames_and_overlay_render_openh264, *boost::unit_test::disabled()) -{ - motionVectorExtractAndOverlay_dontSendOverlayFrames_Render(MotionVectorExtractorProps::OPENH264); -} - -BOOST_AUTO_TEST_CASE(extract_motion_vectors_send_overlay_frames_and_overlay_render_openh264, *boost::unit_test::disabled()) -{ - motionVectorExtractAndOverlay_sendOverlayFrames_Render(MotionVectorExtractorProps::OPENH264); -} - BOOST_AUTO_TEST_CASE(rtspcam_extract_motion_vectors_and_overlay_render_openh264, *boost::unit_test::disabled()) { rtspCamMotionVectorExtractAndOverlay_Render(MotionVectorExtractorProps::OPENH264); diff --git a/samples/timelapse-sample/timelapse_summary.cpp b/samples/timelapse-sample/timelapse_summary.cpp index c98d1f2df..e9d23aa90 100644 --- a/samples/timelapse-sample/timelapse_summary.cpp +++ b/samples/timelapse-sample/timelapse_summary.cpp @@ -29,7 +29,6 @@ bool TimelapsePipeline::setupPipeline(const std::string &videoPath, H264EncoderNVCodecProps::MAIN; bool enableBFrames = false; bool sendDecodedFrames = true; - bool sendOverlayFrames = false; auto mp4ReaderProps = Mp4ReaderSourceProps(videoPath, false, 0, true, false, false); @@ -39,7 +38,7 @@ bool TimelapsePipeline::setupPipeline(const std::string &videoPath, boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); auto motionExtractorProps = MotionVectorExtractorProps( MotionVectorExtractorProps::MVExtractMethod::OPENH264, sendDecodedFrames, - 50, sendOverlayFrames); + 1); motionExtractor = boost::shared_ptr( new MotionVectorExtractor(motionExtractorProps)); colorchange1 = boost::shared_ptr(new ColorConversion( @@ -61,8 +60,10 @@ bool TimelapsePipeline::setupPipeline(const std::string &videoPath, mp4Reader->addOutPutPin(mp4Metadata); std::vector mImagePin = mp4Reader->getAllOutputPinsByType(FrameMetadata::H264_DATA); + std::vector mDecodedPin = + motionExtractor->getAllOutputPinsByType(FrameMetadata::RAW_IMAGE); mp4Reader->setNext(motionExtractor, mImagePin); - motionExtractor->setNext(colorchange1); + motionExtractor->setNext(colorchange1, mDecodedPin); colorchange1->setNext(colorchange2); copy = boost::shared_ptr( new CudaMemCopy(CudaMemCopyProps(cudaMemcpyHostToDevice, cudaStream_))); @@ -144,7 +145,7 @@ int main(int argc, char *argv[]) { return 1; // Or any error code indicating failure } - boost::this_thread::sleep_for(boost::chrono::minutes(10)); + boost::this_thread::sleep_for(boost::chrono::minutes(120)); // Stop the pipeline if (!pipelineInstance.stopPipeline()) { From 3c26a39e082447c4376df2b818264d1e5d656243 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Mon, 3 Jun 2024 18:55:28 +0530 Subject: [PATCH 36/49] Algo to remove noisy black frames. --- base/src/MotionVectorExtractor.cpp | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/base/src/MotionVectorExtractor.cpp b/base/src/MotionVectorExtractor.cpp index 64e7888a6..4a3a09a72 100644 --- a/base/src/MotionVectorExtractor.cpp +++ b/base/src/MotionVectorExtractor.cpp @@ -107,6 +107,24 @@ void DetailFfmpeg::initDecoder() { throw AIPException(AIP_FATAL, "failed open decoder"); } } + +bool discardNoisyFrames(int black_pixel_percent, cv::Mat image) { + cv::Mat gray; + cv::cvtColor(image, gray, cv::COLOR_BGR2GRAY); + + cv::Mat binary; + cv::threshold(gray, binary, 128, 255, cv::THRESH_BINARY); + int totalPixels = binary.total(); + int whitePixels = cv::countNonZero(binary); + int blackPixels = totalPixels - whitePixels; + + double blackPixelPercentage = (static_cast(blackPixels) / static_cast(totalPixels)) * 100.0; + if (blackPixelPercentage > black_pixel_percent) + return true; + else + return false; +} + void DetailFfmpeg::getMotionVectors(frame_container &frames, frame_sp &outFrame, frame_sp &decodedFrame) { int ret = 0; @@ -325,9 +343,10 @@ void DetailOpenH264::getMotionVectors(frame_container &frames, cv::Mat yuvImgCV = cv::Mat(mHeight + mHeight / 2, mWidth, CV_8UC1, yuvStartPointer, mWidth); bgrImg.data = static_cast(decodedFrame->data()); - - cv::cvtColor(yuvImgCV, bgrImg, cv::COLOR_YUV2BGR_I420); - frames.insert(make_pair(rawFramePinId, decodedFrame)); + if (!discardNoisyFrames(90, bgrImg)) { + cv::cvtColor(yuvImgCV, bgrImg, cv::COLOR_YUV2BGR_I420); + frames.insert(make_pair(rawFramePinId, decodedFrame)); + } free(yuvStartPointer); } free(mMotionVectorData); From 6dae9cda426b27c51fc14bb4d579361ed6d08db2 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Mon, 3 Jun 2024 19:14:45 +0530 Subject: [PATCH 37/49] files format. --- base/include/MotionVectorExtractor.h | 76 +-- base/src/MotionVectorExtractor.cpp | 799 ++++++++++++++------------- 2 files changed, 442 insertions(+), 433 deletions(-) diff --git a/base/include/MotionVectorExtractor.h b/base/include/MotionVectorExtractor.h index d37cac5d2..124d85af6 100644 --- a/base/include/MotionVectorExtractor.h +++ b/base/include/MotionVectorExtractor.h @@ -8,52 +8,52 @@ class DetailOpenH264; class MotionVectorExtractorProps : public ModuleProps { public: - enum MVExtractMethod { FFMPEG, OPENH264 }; - - MotionVectorExtractorProps( - MVExtractMethod _MVExtractMethod = MVExtractMethod::FFMPEG, - bool _sendDecodedFrame = false, int _motionVectorThreshold = 2) - : MVExtract(_MVExtractMethod), sendDecodedFrame(_sendDecodedFrame), - motionVectorThreshold(_motionVectorThreshold) {} - - size_t getSerializeSize() { - return ModuleProps::getSerializeSize() + sizeof(sendDecodedFrame) + - sizeof(motionVectorThreshold); - } - bool sendDecodedFrame = false; - int motionVectorThreshold; - MVExtractMethod MVExtract = MVExtractMethod::FFMPEG; + enum MVExtractMethod { FFMPEG, OPENH264 }; + + MotionVectorExtractorProps( + MVExtractMethod _MVExtractMethod = MVExtractMethod::FFMPEG, + bool _sendDecodedFrame = false, int _motionVectorThreshold = 2) + : MVExtract(_MVExtractMethod), sendDecodedFrame(_sendDecodedFrame), + motionVectorThreshold(_motionVectorThreshold) {} + + size_t getSerializeSize() { + return ModuleProps::getSerializeSize() + sizeof(sendDecodedFrame) + + sizeof(motionVectorThreshold); + } + bool sendDecodedFrame = false; + int motionVectorThreshold; + MVExtractMethod MVExtract = MVExtractMethod::FFMPEG; private: - friend class boost::serialization::access; - - template - void serialize(Archive &ar, const unsigned int version) { - ar &boost::serialization::base_object(*this); - ar & sendDecodedFrame; - ar & motionVectorThreshold; - } + friend class boost::serialization::access; + + template + void serialize(Archive& ar, const unsigned int version) { + ar& boost::serialization::base_object(*this); + ar& sendDecodedFrame; + ar& motionVectorThreshold; + } }; class MotionVectorExtractor : public Module { public: - MotionVectorExtractor(MotionVectorExtractorProps _props); - virtual ~MotionVectorExtractor(){}; - bool init(); - bool term(); - void setProps(MotionVectorExtractorProps &props); + MotionVectorExtractor(MotionVectorExtractorProps _props); + virtual ~MotionVectorExtractor() {}; + bool init(); + bool term(); + void setProps(MotionVectorExtractorProps& props); protected: - bool process(frame_container &frame); - bool validateInputPins(); - bool validateOutputPins(); - bool shouldTriggerSOS(); - bool processSOS(frame_sp &frame); - void setMetadata(frame_sp metadata); - bool handlePropsChange(frame_sp &frame); + bool process(frame_container& frame); + bool validateInputPins(); + bool validateOutputPins(); + bool shouldTriggerSOS(); + bool processSOS(frame_sp& frame); + void setMetadata(frame_sp metadata); + bool handlePropsChange(frame_sp& frame); private: - boost::shared_ptr mDetail; - framemetadata_sp rawOutputMetadata; - bool mShouldTriggerSOS = true; + boost::shared_ptr mDetail; + framemetadata_sp rawOutputMetadata; + bool mShouldTriggerSOS = true; }; \ No newline at end of file diff --git a/base/src/MotionVectorExtractor.cpp b/base/src/MotionVectorExtractor.cpp index 4a3a09a72..a0a7467b9 100644 --- a/base/src/MotionVectorExtractor.cpp +++ b/base/src/MotionVectorExtractor.cpp @@ -16,456 +16,465 @@ extern "C" { class MvExtractDetailAbs { public: - MvExtractDetailAbs( - MotionVectorExtractorProps props, - std::function _makeFrameWithPinId, - std::function - _makeframe) { - makeFrameWithPinId = _makeFrameWithPinId; - makeframe = _makeframe; - sendDecodedFrame = props.sendDecodedFrame; - threshold = props.motionVectorThreshold; - }; - ~MvExtractDetailAbs() {} - virtual void setProps(MotionVectorExtractorProps props) { - sendDecodedFrame = props.sendDecodedFrame; - } - virtual void getMotionVectors(frame_container &frames, frame_sp &outFrame, - frame_sp &decodedFrame) = 0; - virtual void initDecoder() = 0; + MvExtractDetailAbs( + MotionVectorExtractorProps props, + std::function _makeFrameWithPinId, + std::function + _makeframe) { + makeFrameWithPinId = _makeFrameWithPinId; + makeframe = _makeframe; + sendDecodedFrame = props.sendDecodedFrame; + threshold = props.motionVectorThreshold; + }; + ~MvExtractDetailAbs() {} + virtual void setProps(MotionVectorExtractorProps props) { + sendDecodedFrame = props.sendDecodedFrame; + } + virtual void getMotionVectors(frame_container& frames, frame_sp& outFrame, + frame_sp& decodedFrame) = 0; + virtual void initDecoder() = 0; public: - int mWidth = 0; - int mHeight = 0; - std::string rawFramePinId; - std::string motionVectorPinId; - std::function - makeframe; - std::function makeFrameWithPinId; - bool sendDecodedFrame = false; - int threshold; - cv::Mat bgrImg; - bool motionFound = false; + int mWidth = 0; + int mHeight = 0; + std::string rawFramePinId; + std::string motionVectorPinId; + std::function + makeframe; + std::function makeFrameWithPinId; + bool sendDecodedFrame = false; + int threshold; + cv::Mat bgrImg; + bool motionFound = false; }; class DetailFfmpeg : public MvExtractDetailAbs { public: - DetailFfmpeg( - MotionVectorExtractorProps props, - std::function _makeFrameWithPinId, - std::function - _makeframe) - : MvExtractDetailAbs(props, _makeFrameWithPinId, _makeframe) {} - ~DetailFfmpeg() { avcodec_free_context(&decoderContext); } - void getMotionVectors(frame_container &frames, frame_sp &outFrame, - frame_sp &decodedFrame); - void initDecoder(); - int decodeAndGetMotionVectors(AVPacket *pkt, frame_container &frames, - frame_sp &outFrame, frame_sp &decodedFrame); + DetailFfmpeg( + MotionVectorExtractorProps props, + std::function _makeFrameWithPinId, + std::function + _makeframe) + : MvExtractDetailAbs(props, _makeFrameWithPinId, _makeframe) {} + ~DetailFfmpeg() { avcodec_free_context(&decoderContext); } + void getMotionVectors(frame_container& frames, frame_sp& outFrame, + frame_sp& decodedFrame); + void initDecoder(); + int decodeAndGetMotionVectors(AVPacket* pkt, frame_container& frames, + frame_sp& outFrame, frame_sp& decodedFrame); private: - AVFrame *avFrame = NULL; - AVCodecContext *decoderContext = NULL; + AVFrame* avFrame = NULL; + AVCodecContext* decoderContext = NULL; }; class DetailOpenH264 : public MvExtractDetailAbs { public: - DetailOpenH264( - MotionVectorExtractorProps props, - std::function _makeFrameWithPinId, - std::function - _makeframe) - : MvExtractDetailAbs(props, _makeFrameWithPinId, _makeframe) {} - ~DetailOpenH264() { - if (pDecoder) { - pDecoder->Uninitialize(); - WelsDestroyDecoder(pDecoder); - } - } - void getMotionVectors(frame_container &frames, frame_sp &outFrame, - frame_sp &decodedFrame); - void initDecoder(); + DetailOpenH264( + MotionVectorExtractorProps props, + std::function _makeFrameWithPinId, + std::function + _makeframe) + : MvExtractDetailAbs(props, _makeFrameWithPinId, _makeframe) {} + ~DetailOpenH264() { + if (pDecoder) { + pDecoder->Uninitialize(); + WelsDestroyDecoder(pDecoder); + } + } + void getMotionVectors(frame_container& frames, frame_sp& outFrame, + frame_sp& decodedFrame); + void initDecoder(); private: - ISVCDecoder *pDecoder; - SBufferInfo pDstInfo; - SDecodingParam sDecParam; - SParserBsInfo parseInfo; + ISVCDecoder* pDecoder; + SBufferInfo pDstInfo; + SDecodingParam sDecParam; + SParserBsInfo parseInfo; }; void DetailFfmpeg::initDecoder() { - int ret; - AVCodec *dec = NULL; - AVDictionary *opts = NULL; - dec = avcodec_find_decoder(AV_CODEC_ID_H264); - decoderContext = avcodec_alloc_context3(dec); - if (!decoderContext) { - throw AIPException(AIP_FATAL, "Failed to allocate codec"); - } - /* Init the decoder */ - av_dict_set(&opts, "flags2", "+export_mvs", 0); - ret = avcodec_open2(decoderContext, dec, &opts); - av_dict_free(&opts); - if (ret < 0) { - throw AIPException(AIP_FATAL, "failed open decoder"); - } + int ret; + AVCodec* dec = NULL; + AVDictionary* opts = NULL; + dec = avcodec_find_decoder(AV_CODEC_ID_H264); + decoderContext = avcodec_alloc_context3(dec); + if (!decoderContext) { + throw AIPException(AIP_FATAL, "Failed to allocate codec"); + } + /* Init the decoder */ + av_dict_set(&opts, "flags2", "+export_mvs", 0); + ret = avcodec_open2(decoderContext, dec, &opts); + av_dict_free(&opts); + if (ret < 0) { + throw AIPException(AIP_FATAL, "failed open decoder"); + } } bool discardNoisyFrames(int black_pixel_percent, cv::Mat image) { - cv::Mat gray; - cv::cvtColor(image, gray, cv::COLOR_BGR2GRAY); + cv::Mat gray; + cv::cvtColor(image, gray, cv::COLOR_BGR2GRAY); - cv::Mat binary; - cv::threshold(gray, binary, 128, 255, cv::THRESH_BINARY); - int totalPixels = binary.total(); - int whitePixels = cv::countNonZero(binary); - int blackPixels = totalPixels - whitePixels; + cv::Mat binary; + cv::threshold(gray, binary, 128, 255, cv::THRESH_BINARY); + int totalPixels = binary.total(); + int whitePixels = cv::countNonZero(binary); + int blackPixels = totalPixels - whitePixels; - double blackPixelPercentage = (static_cast(blackPixels) / static_cast(totalPixels)) * 100.0; - if (blackPixelPercentage > black_pixel_percent) - return true; - else - return false; + double blackPixelPercentage = + (static_cast(blackPixels) / static_cast(totalPixels)) * + 100.0; + if (blackPixelPercentage > black_pixel_percent) + return true; + else + return false; } -void DetailFfmpeg::getMotionVectors(frame_container &frames, frame_sp &outFrame, - frame_sp &decodedFrame) { - int ret = 0; - AVPacket *pkt = NULL; - avFrame = av_frame_alloc(); - if (!avFrame) { - LOG_ERROR << "Could not allocate frame\n"; - } - pkt = av_packet_alloc(); - if (!pkt) { - LOG_ERROR << "Could not allocate AVPacket\n"; - } - ret = decodeAndGetMotionVectors(pkt, frames, outFrame, decodedFrame); - av_packet_free(&pkt); - av_frame_free(&avFrame); +void DetailFfmpeg::getMotionVectors(frame_container& frames, frame_sp& outFrame, + frame_sp& decodedFrame) { + int ret = 0; + AVPacket* pkt = NULL; + avFrame = av_frame_alloc(); + if (!avFrame) { + LOG_ERROR << "Could not allocate frame\n"; + } + pkt = av_packet_alloc(); + if (!pkt) { + LOG_ERROR << "Could not allocate AVPacket\n"; + } + ret = decodeAndGetMotionVectors(pkt, frames, outFrame, decodedFrame); + av_packet_free(&pkt); + av_frame_free(&avFrame); } -int DetailFfmpeg::decodeAndGetMotionVectors(AVPacket *pkt, - frame_container &frames, - frame_sp &outFrame, - frame_sp &decodedFrame) { - auto inFrame = frames.begin()->second; - pkt->data = (uint8_t *)inFrame->data(); - pkt->size = (int)inFrame->size(); - int ret = avcodec_send_packet(decoderContext, pkt); - if (ret < 0) { - LOG_ERROR << stderr << "Error while sending a packet to the decoder: %s\n"; - return ret; - } - while (ret >= 0) { - ret = avcodec_receive_frame(decoderContext, avFrame); - if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { - outFrame = makeFrameWithPinId(0, motionVectorPinId); - break; - } else if (ret < 0) { - LOG_ERROR << stderr - << "Error while receiving a frame from the decoder: %s\n"; - return ret; - } - if (sendDecodedFrame) { - SwsContext *sws_context = - sws_getContext(decoderContext->width, decoderContext->height, - decoderContext->pix_fmt, decoderContext->width, - decoderContext->height, AV_PIX_FMT_BGR24, - SWS_BICUBIC | SWS_FULL_CHR_H_INT, NULL, NULL, NULL); - if (!sws_context) { - // Handle error - } - decodedFrame = makeFrameWithPinId(mWidth * mHeight * 3, rawFramePinId); - int dstStrides[AV_NUM_DATA_POINTERS]; - dstStrides[0] = decoderContext->width * 3; // Assuming BGR format - uint8_t *dstData[AV_NUM_DATA_POINTERS]; - dstData[0] = static_cast(decodedFrame->data()); - sws_scale(sws_context, avFrame->data, avFrame->linesize, 0, - decoderContext->height, dstData, dstStrides); - frames.insert(make_pair(rawFramePinId, decodedFrame)); - } - if (ret >= 0) { - AVFrameSideData *sideData; - sideData = av_frame_get_side_data(avFrame, AV_FRAME_DATA_MOTION_VECTORS); - if (sideData) { - std::vector lineOverlays; - CompositeOverlay compositeOverlay; +int DetailFfmpeg::decodeAndGetMotionVectors(AVPacket* pkt, + frame_container& frames, + frame_sp& outFrame, + frame_sp& decodedFrame) { + auto inFrame = frames.begin()->second; + pkt->data = (uint8_t*)inFrame->data(); + pkt->size = (int)inFrame->size(); + int ret = avcodec_send_packet(decoderContext, pkt); + if (ret < 0) { + LOG_ERROR << stderr << "Error while sending a packet to the decoder: %s\n"; + return ret; + } + while (ret >= 0) { + ret = avcodec_receive_frame(decoderContext, avFrame); + if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { + outFrame = makeFrameWithPinId(0, motionVectorPinId); + break; + } + else if (ret < 0) { + LOG_ERROR << stderr + << "Error while receiving a frame from the decoder: %s\n"; + return ret; + } + if (sendDecodedFrame) { + SwsContext* sws_context = + sws_getContext(decoderContext->width, decoderContext->height, + decoderContext->pix_fmt, decoderContext->width, + decoderContext->height, AV_PIX_FMT_BGR24, + SWS_BICUBIC | SWS_FULL_CHR_H_INT, NULL, NULL, NULL); + if (!sws_context) { + // Handle error + } + decodedFrame = makeFrameWithPinId(mWidth * mHeight * 3, rawFramePinId); + int dstStrides[AV_NUM_DATA_POINTERS]; + dstStrides[0] = decoderContext->width * 3; // Assuming BGR format + uint8_t* dstData[AV_NUM_DATA_POINTERS]; + dstData[0] = static_cast(decodedFrame->data()); + sws_scale(sws_context, avFrame->data, avFrame->linesize, 0, + decoderContext->height, dstData, dstStrides); + frames.insert(make_pair(rawFramePinId, decodedFrame)); + } + if (ret >= 0) { + AVFrameSideData* sideData; + sideData = av_frame_get_side_data(avFrame, AV_FRAME_DATA_MOTION_VECTORS); + if (sideData) { + std::vector lineOverlays; + CompositeOverlay compositeOverlay; - const AVMotionVector *mvs = (const AVMotionVector *)sideData->data; - for (int i = 0; i < sideData->size / sizeof(*mvs); i++) { - const AVMotionVector *mv = &mvs[i]; + const AVMotionVector* mvs = (const AVMotionVector*)sideData->data; + for (int i = 0; i < sideData->size / sizeof(*mvs); i++) { + const AVMotionVector* mv = &mvs[i]; - if (std::abs(mv->motion_x) > threshold || - std::abs(mv->motion_y) > threshold) { - LineOverlay lineOverlay; - lineOverlay.x1 = mv->src_x; - lineOverlay.y1 = mv->src_y; - lineOverlay.x2 = mv->dst_x; - lineOverlay.y2 = mv->dst_y; + if (std::abs(mv->motion_x) > threshold || + std::abs(mv->motion_y) > threshold) { + LineOverlay lineOverlay; + lineOverlay.x1 = mv->src_x; + lineOverlay.y1 = mv->src_y; + lineOverlay.x2 = mv->dst_x; + lineOverlay.y2 = mv->dst_y; - lineOverlays.push_back(lineOverlay); - } - } + lineOverlays.push_back(lineOverlay); + } + } - for (auto &lineOverlay : lineOverlays) { - compositeOverlay.add(&lineOverlay); - } + for (auto& lineOverlay : lineOverlays) { + compositeOverlay.add(&lineOverlay); + } - if (lineOverlays.size()) { - DrawingOverlay drawingOverlay; - drawingOverlay.add(&compositeOverlay); - auto serializedSize = drawingOverlay.mGetSerializeSize(); - outFrame = makeFrameWithPinId(serializedSize, motionVectorPinId); - memcpy(outFrame->data(), sideData->data, serializedSize); - drawingOverlay.serialize(outFrame); - frames.insert(make_pair(motionVectorPinId, outFrame)); - } - } else { - outFrame = makeFrameWithPinId(0, motionVectorPinId); - } - av_packet_unref(pkt); - av_frame_unref(avFrame); - return 0; - } - } - return 0; + if (lineOverlays.size()) { + DrawingOverlay drawingOverlay; + drawingOverlay.add(&compositeOverlay); + auto serializedSize = drawingOverlay.mGetSerializeSize(); + outFrame = makeFrameWithPinId(serializedSize, motionVectorPinId); + memcpy(outFrame->data(), sideData->data, serializedSize); + drawingOverlay.serialize(outFrame); + frames.insert(make_pair(motionVectorPinId, outFrame)); + } + } + else { + outFrame = makeFrameWithPinId(0, motionVectorPinId); + } + av_packet_unref(pkt); + av_frame_unref(avFrame); + return 0; + } + } + return 0; } void DetailOpenH264::initDecoder() { - sDecParam = {0}; - if (!sendDecodedFrame) { - sDecParam.bParseOnly = true; - } else { - sDecParam.bParseOnly = false; - } - memset(&pDstInfo, 0, sizeof(SBufferInfo)); - pDstInfo.uiInBsTimeStamp = 0; + sDecParam = { 0 }; + if (!sendDecodedFrame) { + sDecParam.bParseOnly = true; + } + else { + sDecParam.bParseOnly = false; + } + memset(&pDstInfo, 0, sizeof(SBufferInfo)); + pDstInfo.uiInBsTimeStamp = 0; - if (WelsCreateDecoder(&pDecoder) || (NULL == pDecoder)) { - LOG_ERROR << "Create Decoder failed.\n"; - } - if (pDecoder->Initialize(&sDecParam)) { - LOG_ERROR << "Decoder initialization failed.\n"; - } + if (WelsCreateDecoder(&pDecoder) || (NULL == pDecoder)) { + LOG_ERROR << "Create Decoder failed.\n"; + } + if (pDecoder->Initialize(&sDecParam)) { + LOG_ERROR << "Decoder initialization failed.\n"; + } } -void DetailOpenH264::getMotionVectors(frame_container &frames, - frame_sp &outFrame, - frame_sp &decodedFrame) { - uint8_t *pData[3] = {NULL}; - pData[0] = NULL; - pData[1] = NULL; - pData[2] = NULL; - auto h264Frame = frames.begin()->second; +void DetailOpenH264::getMotionVectors(frame_container& frames, + frame_sp& outFrame, + frame_sp& decodedFrame) { + uint8_t* pData[3] = { NULL }; + pData[0] = NULL; + pData[1] = NULL; + pData[2] = NULL; + auto h264Frame = frames.begin()->second; - unsigned char *pSrc = static_cast(h264Frame->data()); - int iSrcLen = h264Frame->size(); - unsigned char **ppDst = pData; - int32_t mMotionVectorSize = mWidth * mHeight * 8; - int16_t *mMotionVectorData = nullptr; - memset(&pDstInfo, 0, sizeof(SBufferInfo)); - outFrame = makeFrameWithPinId(mMotionVectorSize, motionVectorPinId); - mMotionVectorData = static_cast(malloc(mMotionVectorSize * sizeof(int16_t))); + unsigned char* pSrc = static_cast(h264Frame->data()); + int iSrcLen = h264Frame->size(); + unsigned char** ppDst = pData; + int32_t mMotionVectorSize = mWidth * mHeight * 8; + int16_t* mMotionVectorData = nullptr; + memset(&pDstInfo, 0, sizeof(SBufferInfo)); + outFrame = makeFrameWithPinId(mMotionVectorSize, motionVectorPinId); + mMotionVectorData = + static_cast(malloc(mMotionVectorSize * sizeof(int16_t))); - if (sDecParam.bParseOnly) { - pDecoder->ParseBitstreamGetMotionVectors(pSrc, iSrcLen, ppDst, &parseInfo, - &pDstInfo, &mMotionVectorSize, - &mMotionVectorData); - } else { - pDecoder->DecodeFrameGetMotionVectorsNoDelay(pSrc, iSrcLen, ppDst, - &pDstInfo, &mMotionVectorSize, - &mMotionVectorData); - } - int avgX = 0; - int avgY = 0; - if (mMotionVectorSize != mWidth * mHeight * 8) { - std::vector circleOverlays; - CompositeOverlay compositeOverlay; - int totalX = 0; - int totalY = 0; - avgX = 0; - avgY = 0; - for (int i = 0; i < mMotionVectorSize; i += 4) { - auto motionX = mMotionVectorData[i]; - auto motionY = mMotionVectorData[i + 1]; - totalX = totalX + motionX; - totalY = totalY + motionY; - if (abs(motionX) > threshold || abs(motionY) > threshold) { - CircleOverlay circleOverlay; - circleOverlay.x1 = mMotionVectorData[i + 2]; - circleOverlay.y1 = mMotionVectorData[i + 3]; - circleOverlay.radius = 1; + if (sDecParam.bParseOnly) { + pDecoder->ParseBitstreamGetMotionVectors(pSrc, iSrcLen, ppDst, &parseInfo, + &pDstInfo, &mMotionVectorSize, + &mMotionVectorData); + } + else { + pDecoder->DecodeFrameGetMotionVectorsNoDelay(pSrc, iSrcLen, ppDst, + &pDstInfo, &mMotionVectorSize, + &mMotionVectorData); + } + int avgX = 0; + int avgY = 0; + if (mMotionVectorSize != mWidth * mHeight * 8) { + std::vector circleOverlays; + CompositeOverlay compositeOverlay; + int totalX = 0; + int totalY = 0; + avgX = 0; + avgY = 0; + for (int i = 0; i < mMotionVectorSize; i += 4) { + auto motionX = mMotionVectorData[i]; + auto motionY = mMotionVectorData[i + 1]; + totalX = totalX + motionX; + totalY = totalY + motionY; + if (abs(motionX) > threshold || abs(motionY) > threshold) { + CircleOverlay circleOverlay; + circleOverlay.x1 = mMotionVectorData[i + 2]; + circleOverlay.y1 = mMotionVectorData[i + 3]; + circleOverlay.radius = 1; - circleOverlays.push_back(circleOverlay); - } - } - avgX = totalX / (mMotionVectorSize / 4); - avgY = totalY / (mMotionVectorSize / 4); - for (auto &circleOverlay : circleOverlays) { - compositeOverlay.add(&circleOverlay); - } + circleOverlays.push_back(circleOverlay); + } + } + avgX = totalX / (mMotionVectorSize / 4); + avgY = totalY / (mMotionVectorSize / 4); + for (auto& circleOverlay : circleOverlays) { + compositeOverlay.add(&circleOverlay); + } - if (circleOverlays.size()) { - DrawingOverlay drawingOverlay; - drawingOverlay.add(&compositeOverlay); - auto mvSize = drawingOverlay.mGetSerializeSize(); - outFrame = makeframe(outFrame, mvSize, motionVectorPinId); - drawingOverlay.serialize(outFrame); - frames.insert(make_pair(motionVectorPinId, outFrame)); - } - } - if ((!sDecParam.bParseOnly) && (pDstInfo.pDst[0] != nullptr) && (mMotionVectorSize != mWidth * mHeight * 8) - && (abs(avgX) > threshold || abs(avgY) > threshold)) { - int rowIndex; - unsigned char *pPtr = NULL; - decodedFrame = makeFrameWithPinId(mHeight * 3 * mWidth, rawFramePinId); - uint8_t *yuvImagePtr = (uint8_t *)malloc(mHeight * 1.5 * mWidth); - auto yuvStartPointer = yuvImagePtr; - pPtr = pData[0]; - for (rowIndex = 0; rowIndex < mHeight; rowIndex++) { - memcpy(yuvImagePtr, pPtr, mWidth); - pPtr += pDstInfo.UsrData.sSystemBuffer.iStride[0]; - yuvImagePtr += mWidth; - } - int halfHeight = mHeight / 2; - int halfWidth = mWidth / 2; - pPtr = pData[1]; - for (rowIndex = 0; rowIndex < halfHeight; rowIndex++) { - memcpy(yuvImagePtr, pPtr, halfWidth); - pPtr += pDstInfo.UsrData.sSystemBuffer.iStride[1]; - yuvImagePtr += halfWidth; - } - pPtr = pData[2]; - for (rowIndex = 0; rowIndex < halfHeight; rowIndex++) { - memcpy(yuvImagePtr, pPtr, halfWidth); - pPtr += pDstInfo.UsrData.sSystemBuffer.iStride[1]; - yuvImagePtr += halfWidth; - } + if (circleOverlays.size()) { + DrawingOverlay drawingOverlay; + drawingOverlay.add(&compositeOverlay); + auto mvSize = drawingOverlay.mGetSerializeSize(); + outFrame = makeframe(outFrame, mvSize, motionVectorPinId); + drawingOverlay.serialize(outFrame); + frames.insert(make_pair(motionVectorPinId, outFrame)); + } + } + if ((!sDecParam.bParseOnly) && (pDstInfo.pDst[0] != nullptr) && + (mMotionVectorSize != mWidth * mHeight * 8) && + (abs(avgX) > threshold || abs(avgY) > threshold)) { + int rowIndex; + unsigned char* pPtr = NULL; + decodedFrame = makeFrameWithPinId(mHeight * 3 * mWidth, rawFramePinId); + uint8_t* yuvImagePtr = (uint8_t*)malloc(mHeight * 1.5 * mWidth); + auto yuvStartPointer = yuvImagePtr; + pPtr = pData[0]; + for (rowIndex = 0; rowIndex < mHeight; rowIndex++) { + memcpy(yuvImagePtr, pPtr, mWidth); + pPtr += pDstInfo.UsrData.sSystemBuffer.iStride[0]; + yuvImagePtr += mWidth; + } + int halfHeight = mHeight / 2; + int halfWidth = mWidth / 2; + pPtr = pData[1]; + for (rowIndex = 0; rowIndex < halfHeight; rowIndex++) { + memcpy(yuvImagePtr, pPtr, halfWidth); + pPtr += pDstInfo.UsrData.sSystemBuffer.iStride[1]; + yuvImagePtr += halfWidth; + } + pPtr = pData[2]; + for (rowIndex = 0; rowIndex < halfHeight; rowIndex++) { + memcpy(yuvImagePtr, pPtr, halfWidth); + pPtr += pDstInfo.UsrData.sSystemBuffer.iStride[1]; + yuvImagePtr += halfWidth; + } - cv::Mat yuvImgCV = cv::Mat(mHeight + mHeight / 2, mWidth, CV_8UC1, - yuvStartPointer, mWidth); - bgrImg.data = static_cast(decodedFrame->data()); - if (!discardNoisyFrames(90, bgrImg)) { - cv::cvtColor(yuvImgCV, bgrImg, cv::COLOR_YUV2BGR_I420); - frames.insert(make_pair(rawFramePinId, decodedFrame)); - } - free(yuvStartPointer); - } - free(mMotionVectorData); + cv::Mat yuvImgCV = cv::Mat(mHeight + mHeight / 2, mWidth, CV_8UC1, + yuvStartPointer, mWidth); + bgrImg.data = static_cast(decodedFrame->data()); + if (!discardNoisyFrames(97, bgrImg)) { + cv::cvtColor(yuvImgCV, bgrImg, cv::COLOR_YUV2BGR_I420); + frames.insert(make_pair(rawFramePinId, decodedFrame)); + } + free(yuvStartPointer); + } + free(mMotionVectorData); } MotionVectorExtractor::MotionVectorExtractor(MotionVectorExtractorProps props) - : Module(TRANSFORM, "MotionVectorExtractor", props) { - if (props.MVExtract == MotionVectorExtractorProps::MVExtractMethod::FFMPEG) { - mDetail.reset(new DetailFfmpeg( - props, - [&](size_t size, string &pinId) -> frame_sp { - return makeFrame(size, pinId); - }, - [&](frame_sp &frame, size_t &size, string &pinId) -> frame_sp { - return makeFrame(frame, size, pinId); - })); - } else if (props.MVExtract == - MotionVectorExtractorProps::MVExtractMethod::OPENH264) { - mDetail.reset(new DetailOpenH264( - props, - [&](size_t size, string &pinId) -> frame_sp { - return makeFrame(size, pinId); - }, - [&](frame_sp &frame, size_t &size, string &pinId) -> frame_sp { - return makeFrame(frame, size, pinId); - })); - } - auto motionVectorOutputMetadata = - framemetadata_sp(new FrameMetadata(FrameMetadata::OVERLAY_INFO_IMAGE)); - rawOutputMetadata = framemetadata_sp(new RawImageMetadata()); - mDetail->motionVectorPinId = addOutputPin(motionVectorOutputMetadata); - mDetail->rawFramePinId = addOutputPin(rawOutputMetadata); + : Module(TRANSFORM, "MotionVectorExtractor", props) { + if (props.MVExtract == MotionVectorExtractorProps::MVExtractMethod::FFMPEG) { + mDetail.reset(new DetailFfmpeg( + props, + [&](size_t size, string& pinId) -> frame_sp { + return makeFrame(size, pinId); + }, + [&](frame_sp& frame, size_t& size, string& pinId) -> frame_sp { + return makeFrame(frame, size, pinId); + })); + } + else if (props.MVExtract == + MotionVectorExtractorProps::MVExtractMethod::OPENH264) { + mDetail.reset(new DetailOpenH264( + props, + [&](size_t size, string& pinId) -> frame_sp { + return makeFrame(size, pinId); + }, + [&](frame_sp& frame, size_t& size, string& pinId) -> frame_sp { + return makeFrame(frame, size, pinId); + })); + } + auto motionVectorOutputMetadata = + framemetadata_sp(new FrameMetadata(FrameMetadata::OVERLAY_INFO_IMAGE)); + rawOutputMetadata = framemetadata_sp(new RawImageMetadata()); + mDetail->motionVectorPinId = addOutputPin(motionVectorOutputMetadata); + mDetail->rawFramePinId = addOutputPin(rawOutputMetadata); } bool MotionVectorExtractor::init() { - mDetail->initDecoder(); - return Module::init(); + mDetail->initDecoder(); + return Module::init(); } bool MotionVectorExtractor::term() { return Module::term(); } bool MotionVectorExtractor::validateInputPins() { - if (getNumberOfInputPins() != 1) { - LOG_ERROR << "<" << getId() - << ">::validateInputPins size is expected to be 1. Actual<" - << getNumberOfInputPins() << ">"; - return false; - } - framemetadata_sp metadata = getFirstInputMetadata(); - FrameMetadata::FrameType frameType = metadata->getFrameType(); - if (frameType != FrameMetadata::H264_DATA) { - LOG_ERROR << "<" << getId() - << ">::validateInputPins input frameType is expected to be " - "H264_DATA. Actual<" - << frameType << ">"; - return false; - } - return true; + if (getNumberOfInputPins() != 1) { + LOG_ERROR << "<" << getId() + << ">::validateInputPins size is expected to be 1. Actual<" + << getNumberOfInputPins() << ">"; + return false; + } + framemetadata_sp metadata = getFirstInputMetadata(); + FrameMetadata::FrameType frameType = metadata->getFrameType(); + if (frameType != FrameMetadata::H264_DATA) { + LOG_ERROR << "<" << getId() + << ">::validateInputPins input frameType is expected to be " + "H264_DATA. Actual<" + << frameType << ">"; + return false; + } + return true; } bool MotionVectorExtractor::validateOutputPins() { - auto size = getNumberOfOutputPins(); - if (getNumberOfOutputPins() > 2) { - LOG_ERROR << "<" << getId() - << ">::validateOutputPins size is expected to be 2. Actual<" - << getNumberOfOutputPins() << ">"; - return false; - } - pair me; // map element - auto framefactoryByPin = getOutputFrameFactory(); - BOOST_FOREACH (me, framefactoryByPin) { - FrameMetadata::FrameType frameType = - me.second->getFrameMetadata()->getFrameType(); - if (frameType != FrameMetadata::OVERLAY_INFO_IMAGE && - frameType != FrameMetadata::RAW_IMAGE) { - LOG_ERROR << "<" << getId() - << ">::validateOutputPins input frameType is expected to be " - "MOTION_VECTOR_DATA or RAW_IMAGE. Actual<" - << frameType << ">"; - return false; - } - } - return true; + auto size = getNumberOfOutputPins(); + if (getNumberOfOutputPins() > 2) { + LOG_ERROR << "<" << getId() + << ">::validateOutputPins size is expected to be 2. Actual<" + << getNumberOfOutputPins() << ">"; + return false; + } + pair me; // map element + auto framefactoryByPin = getOutputFrameFactory(); + BOOST_FOREACH(me, framefactoryByPin) { + FrameMetadata::FrameType frameType = + me.second->getFrameMetadata()->getFrameType(); + if (frameType != FrameMetadata::OVERLAY_INFO_IMAGE && + frameType != FrameMetadata::RAW_IMAGE) { + LOG_ERROR << "<" << getId() + << ">::validateOutputPins input frameType is expected to be " + "MOTION_VECTOR_DATA or RAW_IMAGE. Actual<" + << frameType << ">"; + return false; + } + } + return true; } bool MotionVectorExtractor::shouldTriggerSOS() { return mShouldTriggerSOS; } -bool MotionVectorExtractor::process(frame_container &frames) { - frame_sp motionVectorFrame; - frame_sp decodedFrame; - mDetail->getMotionVectors(frames, motionVectorFrame, decodedFrame); - send(frames); - return true; +bool MotionVectorExtractor::process(frame_container& frames) { + frame_sp motionVectorFrame; + frame_sp decodedFrame; + mDetail->getMotionVectors(frames, motionVectorFrame, decodedFrame); + send(frames); + return true; } void MotionVectorExtractor::setMetadata(frame_sp frame) { - auto metadata = frame->getMetadata(); - if (!metadata->isSet()) { - return; - } - sps_pps_properties p; - H264ParserUtils::parse_sps( - ((const char *)frame->data()) + 5, - frame->size() > 5 ? frame->size() - 5 : frame->size(), &p); - mDetail->mWidth = p.width; - mDetail->mHeight = p.height; - RawImageMetadata outputMetadata(mDetail->mWidth, mDetail->mHeight, - ImageMetadata::BGR, CV_8UC3, 0, CV_8U, - FrameMetadata::HOST, true); - auto rawOutMetadata = - FrameMetadataFactory::downcast(rawOutputMetadata); - rawOutMetadata->setData(outputMetadata); - mDetail->bgrImg = Utils::getMatHeader(rawOutMetadata); + auto metadata = frame->getMetadata(); + if (!metadata->isSet()) { + return; + } + sps_pps_properties p; + H264ParserUtils::parse_sps( + ((const char*)frame->data()) + 5, + frame->size() > 5 ? frame->size() - 5 : frame->size(), &p); + mDetail->mWidth = p.width; + mDetail->mHeight = p.height; + RawImageMetadata outputMetadata(mDetail->mWidth, mDetail->mHeight, + ImageMetadata::BGR, CV_8UC3, 0, CV_8U, + FrameMetadata::HOST, true); + auto rawOutMetadata = + FrameMetadataFactory::downcast(rawOutputMetadata); + rawOutMetadata->setData(outputMetadata); + mDetail->bgrImg = Utils::getMatHeader(rawOutMetadata); } -bool MotionVectorExtractor::processSOS(frame_sp &frame) { - setMetadata(frame); - mShouldTriggerSOS = false; - return true; +bool MotionVectorExtractor::processSOS(frame_sp& frame) { + setMetadata(frame); + mShouldTriggerSOS = false; + return true; } -bool MotionVectorExtractor::handlePropsChange(frame_sp &frame) { - MotionVectorExtractorProps props; - auto ret = Module::handlePropsChange(frame, props); - mDetail->setProps(props); - return ret; +bool MotionVectorExtractor::handlePropsChange(frame_sp& frame) { + MotionVectorExtractorProps props; + auto ret = Module::handlePropsChange(frame, props); + mDetail->setProps(props); + return ret; } -void MotionVectorExtractor::setProps(MotionVectorExtractorProps &props) { - Module::addPropsToQueue(props); +void MotionVectorExtractor::setProps(MotionVectorExtractorProps& props) { + Module::addPropsToQueue(props); } \ No newline at end of file From f499d5f493cabc6d56817a49beda579c0b1355cf Mon Sep 17 00:00:00 2001 From: SatwikSShanbhag Date: Mon, 3 Jun 2024 19:19:03 +0530 Subject: [PATCH 38/49] refactored CMake and file structure for thumbnail samples --- .../CMakeLists.txt | 7 ++- ...deo.cpp => GenerateThumbnailsPipeline.cpp} | 48 ++----------------- .../GenerateThumbnailsPipeline.h | 3 +- .../pipelineMain.cpp | 30 ++++++++++++ ...test_generate_thumbnail_from_mp4_video.cpp | 37 +++++++------- 5 files changed, 58 insertions(+), 67 deletions(-) rename samples/create_thumbnail_from_mp4_video/{generate_thumbnail_from_mp4_video.cpp => GenerateThumbnailsPipeline.cpp} (58%) create mode 100644 samples/create_thumbnail_from_mp4_video/pipelineMain.cpp diff --git a/samples/create_thumbnail_from_mp4_video/CMakeLists.txt b/samples/create_thumbnail_from_mp4_video/CMakeLists.txt index 73e3a6910..0a210d28f 100644 --- a/samples/create_thumbnail_from_mp4_video/CMakeLists.txt +++ b/samples/create_thumbnail_from_mp4_video/CMakeLists.txt @@ -1,12 +1,11 @@ cmake_minimum_required(VERSION 3.22) -set(TARGET create_thumbnail_from_mp4_video) +set(TARGET generateThumbnailFromMp4Video) SET(CORE_FILES - generate_thumbnail_from_mp4_video.cpp - ../../base/test/test_utils.cpp + GenerateThumbnailsPipeline.cpp + pipelineMain.cpp ) SET(CORE_FILES_H GenerateThumbnailsPipeline.h - ../../base/test/test_utils.h ) SET(SOURCE ${CORE_FILES} diff --git a/samples/create_thumbnail_from_mp4_video/generate_thumbnail_from_mp4_video.cpp b/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.cpp similarity index 58% rename from samples/create_thumbnail_from_mp4_video/generate_thumbnail_from_mp4_video.cpp rename to samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.cpp index 73ab50b04..a0bba2abd 100644 --- a/samples/create_thumbnail_from_mp4_video/generate_thumbnail_from_mp4_video.cpp +++ b/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.cpp @@ -1,30 +1,24 @@ -#include "ColorConversionXForm.h" +#include "GenerateThumbnailsPipeline.h" #include "CudaMemCopy.h" #include "ExternalSinkModule.h" #include "FileReaderModule.h" #include "FileWriterModule.h" #include "FrameMetadata.h" -#include "GenerateThumbnailsPipeline.h" #include "H264Decoder.h" #include "H264Metadata.h" #include "JPEGEncoderNVJPEG.h" #include "Mp4ReaderSource.h" #include "Mp4VideoMetadata.h" #include "ValveModule.h" - - +#include GenerateThumbnailsPipeline::GenerateThumbnailsPipeline() : pipeLine("pipeline") {} - -bool GenerateThumbnailsPipeline::setUpPipeLine() { +bool GenerateThumbnailsPipeline::setUpPipeLine( + const std::string &videoPath, const std::string &outFolderPath) { // Implementation - std::string videoPath = "./data/Mp4_videos/h264_video_metadata/" - "20230514/0011/1686723796848.mp4"; - std::string outPath = - "./data/mp4Reader_saveOrCompare/h264/thumbnail.jpg"; bool parseFS = false; auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); @@ -47,15 +41,6 @@ bool GenerateThumbnailsPipeline::setUpPipeLine() { valve = boost::shared_ptr(new ValveModule(ValveModuleProps(0))); decoder->setNext(valve); - /* auto conversionType = - ColorConversionProps::ConversionType::YUV420PLANAR_TO_RGB; auto metadata = - framemetadata_sp(new RawImagePlanarMetadata(1280, 720, - ImageMetadata::ImageType::YUV420,size_t(0), - CV_8U,FrameMetadata::MemType::CUDA_DEVICE));*/ - /* colorchange = boost::shared_ptr(new - ColorConversion(ColorConversionProps(conversionType))); - valve->setNext(colorchange);*/ - auto stream = cudastream_sp(new ApraCudaStream); cudaCopy = boost::shared_ptr( new CudaMemCopy(CudaMemCopyProps(cudaMemcpyHostToDevice, stream))); @@ -66,7 +51,7 @@ bool GenerateThumbnailsPipeline::setUpPipeLine() { cudaCopy->setNext(jpegEncoder); fileWriter = boost::shared_ptr( - new FileWriterModule(FileWriterModuleProps(outPath))); + new FileWriterModule(FileWriterModuleProps(outFolderPath))); jpegEncoder->setNext(fileWriter); return true; @@ -87,26 +72,3 @@ bool GenerateThumbnailsPipeline::stopPipeLine() { pipeLine.wait_for_all(); return true; } -int main() { - GenerateThumbnailsPipeline pipelineInstance; - if (!pipelineInstance.setUpPipeLine()) { - std::cerr << "Failed to setup pipeline." << std::endl; - return 1; - } - - - if (!pipelineInstance.startPipeLine()) { - std::cerr << "Failed to start pipeline." << std::endl; - return 1; // Or any error code indicating failure - } - - // Wait for the pipeline to run for 10 seconds - boost::this_thread::sleep_for(boost::chrono::seconds(5)); - - // Stop the pipeline - if (!pipelineInstance.stopPipeLine()) { - std::cerr << "Failed to stop pipeline." << std::endl; - return 1; // Or any error code indicating failure - } - return 0; // Or any success code -} diff --git a/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h b/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h index 2137e6d62..cabeeff16 100644 --- a/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h +++ b/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h @@ -13,10 +13,9 @@ class GenerateThumbnailsPipeline { public: GenerateThumbnailsPipeline(); - bool setUpPipeLine(); + bool setUpPipeLine(const std::string &videoPath,const std::string &outFolderPath); bool startPipeLine(); bool stopPipeLine(); - bool testPipeLine(); private: PipeLine pipeLine; diff --git a/samples/create_thumbnail_from_mp4_video/pipelineMain.cpp b/samples/create_thumbnail_from_mp4_video/pipelineMain.cpp new file mode 100644 index 000000000..759d47475 --- /dev/null +++ b/samples/create_thumbnail_from_mp4_video/pipelineMain.cpp @@ -0,0 +1,30 @@ +#include "GenerateThumbnailsPipeline.h" +#include +#include + +void main(int argc, char *argv[]) { + if (argc < 3) { + std::cerr << "Usage: " << argv[0] << " " + << std::endl; + } + + std::string videoPath = argv[argc - 2]; + std::string outFolderPath = argv[argc - 1]; + + GenerateThumbnailsPipeline pipelineInstance; + if (!pipelineInstance.setUpPipeLine(videoPath, outFolderPath)) { + std::cerr << "Failed to setup pipeline." << std::endl; + } + + if (!pipelineInstance.startPipeLine()) { + std::cerr << "Failed to start pipeline." << std::endl; + } + + // Wait for the pipeline to run for 10 seconds + boost::this_thread::sleep_for(boost::chrono::seconds(5)); + + // Stop the pipeline + if (!pipelineInstance.stopPipeLine()) { + std::cerr << "Failed to stop pipeline." << std::endl; + } +} \ No newline at end of file diff --git a/samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp b/samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp index 60b8830fe..9246eb4b8 100644 --- a/samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp +++ b/samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp @@ -1,30 +1,31 @@ +#include "GenerateThumbnailsPipeline.h" +#include "test_utils.h" #include #include -#include "../../base/test/test_utils.h" -#include "GenerateThumbnailsPipeline.h" BOOST_AUTO_TEST_SUITE(generateThumbnails) -BOOST_AUTO_TEST_CASE(generateThumbnails_from_mp4) -{ - auto generateThumbnailPipeline = boost::shared_ptr(new GenerateThumbnailsPipeline()); - generateThumbnailPipeline->setUpPipeLine(); - generateThumbnailPipeline->startPipeLine(); - - boost::this_thread::sleep_for(boost::chrono::seconds(5)); +BOOST_AUTO_TEST_CASE(generateThumbnails_from_mp4) { + auto generateThumbnailPipeline = + boost::shared_ptr( + new GenerateThumbnailsPipeline()); + std::string videoPath = "C:/APRA/fork/ApraPipes/data/Mp4_videos/h264_video_metadata/20230514/0011/1686723796848.mp4"; + std::string outFolderPath = "C:/APRA/fork/ApraPipes/samples/create_thumbnail_from_mp4_video/data/generated_thumbnail/thumbnail_????.jpg"; + generateThumbnailPipeline->setUpPipeLine(videoPath, outFolderPath); + generateThumbnailPipeline->startPipeLine(); - const uint8_t *pReadDataTest = nullptr; - unsigned int readDataSizeTest = 0U; + boost::this_thread::sleep_for(boost::chrono::seconds(5)); - BOOST_TEST( - Test_Utils::readFile("./data/thumbnail/sample_thumbnail.jpg", - pReadDataTest, readDataSizeTest)); - Test_Utils::saveOrCompare( - "./data/mp4Reader_saveOrCompare/h264/test_thumbnail.jpg", - pReadDataTest, readDataSizeTest, 0); + const uint8_t *pReadDataTest = nullptr; + unsigned int readDataSizeTest = 0U; + BOOST_TEST(Test_Utils::readFile("C:/APRA/fork/ApraPipes/samples/create_thumbnail_from_mp4_video/data/test_thumbnail/sample_thumbnail.jpg", + pReadDataTest, readDataSizeTest)); + Test_Utils::saveOrCompare( + "C:/APRA/fork/ApraPipes/samples/create_thumbnail_from_mp4_video/data/generated_thumbnail/thumbnail_0000.jpg", pReadDataTest, + readDataSizeTest, 0); - generateThumbnailPipeline->stopPipeLine(); + generateThumbnailPipeline->stopPipeLine(); } BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file From 53a3c59c2c7f671ca85a4212eb81352464661ad5 Mon Sep 17 00:00:00 2001 From: SatwikSShanbhag Date: Mon, 3 Jun 2024 19:21:17 +0530 Subject: [PATCH 39/49] refactored CMake and file structure for play mp4 from beginning sample --- .../play_mp4_from_beginning/CMakeLists.txt | 5 +- .../PlayMp4VideoFromBeginning.cpp | 74 ++++++++ .../PlayMp4VideoFromBeginning.h | 30 +-- .../play_mp4_from_beginning/pipelineMain.cpp | 31 +++ .../play_mp4_video_from_beginning.cpp | 176 ------------------ .../test_play_mp4_video_from_beginning.cpp | 87 ++++++++- 6 files changed, 200 insertions(+), 203 deletions(-) create mode 100644 samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.cpp create mode 100644 samples/play_mp4_from_beginning/pipelineMain.cpp delete mode 100644 samples/play_mp4_from_beginning/play_mp4_video_from_beginning.cpp diff --git a/samples/play_mp4_from_beginning/CMakeLists.txt b/samples/play_mp4_from_beginning/CMakeLists.txt index 69878b0b5..8d14c5e00 100644 --- a/samples/play_mp4_from_beginning/CMakeLists.txt +++ b/samples/play_mp4_from_beginning/CMakeLists.txt @@ -1,12 +1,11 @@ cmake_minimum_required(VERSION 3.22) set(TARGET play_mp4_from_beginning) SET(CORE_FILES - play_mp4_video_from_beginning.cpp - ../../base/test/test_utils.cpp + PlayMp4VideoFromBeginning.cpp + pipelineMain.cpp ) SET(CORE_FILES_H PlayMp4VideoFromBeginning.h - ../../base/test/test_utils.h ) SET(SOURCE ${CORE_FILES} diff --git a/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.cpp b/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.cpp new file mode 100644 index 000000000..7b6a011a1 --- /dev/null +++ b/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.cpp @@ -0,0 +1,74 @@ + +#include "PlayMp4VideoFromBeginning.h" +#include "ColorConversionXForm.h" +#include "CudaMemCopy.h" +#include "ExternalSinkModule.h" +#include "FileWriterModule.h" +#include "Frame.h" +#include "FrameContainerQueue.h" +#include "FrameMetadata.h" +#include "H264Decoder.h" +#include "H264Metadata.h" +#include "ImageViewerModule.h" +#include "JPEGEncoderNVJPEG.h" +#include "Mp4ReaderSource.h" +#include "Mp4VideoMetadata.h" +#include + +PlayMp4VideoFromBeginning::PlayMp4VideoFromBeginning() : pipeLine("pipeline") {} + +bool PlayMp4VideoFromBeginning::setUpPipeLine(const std::string &videoPath) { + // Implementation + bool parseFS = false; + auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); + auto frameType = FrameMetadata::FrameType::H264_DATA; + auto mp4ReaderProps = + Mp4ReaderSourceProps(videoPath, parseFS, 0, true, true, false); + mp4ReaderProps.fps = 24; + mp4Reader = + boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); + mp4Reader->addOutPutPin(h264ImageMetadata); + + auto mp4Metadata = framemetadata_sp(new Mp4VideoMetadata("v_1")); + mp4Reader->addOutPutPin(mp4Metadata); + + std::vector mImagePin; + mImagePin = mp4Reader->getAllOutputPinsByType(frameType); + + decoder = boost::shared_ptr(new H264Decoder(H264DecoderProps())); + mp4Reader->setNext(decoder, mImagePin); + + auto conversionType = + ColorConversionProps::ConversionType::YUV420PLANAR_TO_RGB; + auto metadata = framemetadata_sp(new RawImagePlanarMetadata( + 1280, 720, ImageMetadata::ImageType::YUV420, size_t(0), CV_8U)); + colorchange = boost::shared_ptr( + new ColorConversion(ColorConversionProps(conversionType))); + decoder->setNext(colorchange); + + imageViewerSink = boost::shared_ptr( + new ImageViewerModule(ImageViewerModuleProps("imageview"))); + colorchange->setNext(imageViewerSink); + + return true; +} + +bool PlayMp4VideoFromBeginning::startPipeLine() { + pipeLine.appendModule(mp4Reader); + pipeLine.init(); + pipeLine.run_all_threaded(); + return true; +} + +bool PlayMp4VideoFromBeginning::stopPipeLine() { + pipeLine.stop(); + pipeLine.term(); + pipeLine.wait_for_all(); + return true; +} + +bool PlayMp4VideoFromBeginning::flushQueuesAndSeek() { + pipeLine.flushAllQueues(); + mp4Reader->randomSeek(1686723796848, false); + return true; +} diff --git a/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h b/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h index 6fa0c45bc..6f4458452 100644 --- a/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h +++ b/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h @@ -1,24 +1,24 @@ -#include -#include "Mp4ReaderSource.h" -#include "ImageViewerModule.h" #include "ColorConversionXForm.h" #include "H264Decoder.h" +#include "ImageViewerModule.h" +#include "Mp4ReaderSource.h" +#include class PlayMp4VideoFromBeginning { public: - PlayMp4VideoFromBeginning(); - bool setUpPipeLine(); - bool startPipeLine(); - bool stopPipeLine(); - bool flushQueuesAndSeek(); - bool testPipeLineForFlushQue(); - bool testPipeLineForSeek(); + PlayMp4VideoFromBeginning(); + bool setUpPipeLine(const std::string &videoPath); + bool startPipeLine(); + bool stopPipeLine(); + bool flushQueuesAndSeek(); + + + boost::shared_ptr mp4Reader; + boost::shared_ptr imageViewerSink; + boost::shared_ptr decoder; + boost::shared_ptr colorchange; private: - PipeLine pipeLine; - boost::shared_ptr mp4Reader; - boost::shared_ptr imageViewerSink; - boost::shared_ptr decoder; - boost::shared_ptr colorchange; + PipeLine pipeLine; }; \ No newline at end of file diff --git a/samples/play_mp4_from_beginning/pipelineMain.cpp b/samples/play_mp4_from_beginning/pipelineMain.cpp new file mode 100644 index 000000000..6eba9548b --- /dev/null +++ b/samples/play_mp4_from_beginning/pipelineMain.cpp @@ -0,0 +1,31 @@ +#include "PlayMp4VideoFromBeginning.h" +#include +#include + +void main(int argc, char *argv[]) { + if (argc < 2) { + std::cerr << "Usage: " << argv[0] << " " + << std::endl; + } + std::string videoPath = argv[argc - 1]; + + PlayMp4VideoFromBeginning pipelineInstance; + + + if (!pipelineInstance.setUpPipeLine(videoPath)){ + std::cerr << "Failed to setup pipeline." << std::endl; + } + if (!pipelineInstance.startPipeLine()) { + std::cerr << "Failed to start pipeline." << std::endl; + } + // Wait for the pipeline to run for 10 seconds + boost::this_thread::sleep_for(boost::chrono::seconds(3)); + if (!pipelineInstance.flushQueuesAndSeek()) { + std::cerr << "Failed to flush Queues." << std::endl; + } + boost::this_thread::sleep_for(boost::chrono::seconds(5)); + // Stop the pipeline + if (!pipelineInstance.stopPipeLine()) { + std::cerr << "Failed to stop pipeline." << std::endl; + } +} \ No newline at end of file diff --git a/samples/play_mp4_from_beginning/play_mp4_video_from_beginning.cpp b/samples/play_mp4_from_beginning/play_mp4_video_from_beginning.cpp deleted file mode 100644 index 70eda7a5a..000000000 --- a/samples/play_mp4_from_beginning/play_mp4_video_from_beginning.cpp +++ /dev/null @@ -1,176 +0,0 @@ - -#include "PlayMp4VideoFromBeginning.h" -#include "FrameMetadata.h" -#include "H264Metadata.h" -#include "Mp4ReaderSource.h" -#include "Mp4VideoMetadata.h" -#include "H264Decoder.h" -#include "ValveModule.h" -#include "ColorConversionXForm.h" -#include "CudaMemCopy.h" -#include "FileWriterModule.h" -#include "JPEGEncoderNVJPEG.h" -#include "ExternalSinkModule.h" -#include "ImageViewerModule.h" -#include -#include -#include "Frame.h" -#include "FrameContainerQueue.h" -#include "../../base/test/test_utils.h" - -class SinkModuleProps : public ModuleProps { -public: - SinkModuleProps() : ModuleProps(){}; -}; - -class SinkModule : public Module { -public: - SinkModule(SinkModuleProps props) : Module(SINK, "sinkModule", props){}; - boost::shared_ptr getQue() { return Module::getQue(); } - frame_container pop() { return Module::pop(); } - -protected: - bool process() { return false; } - bool validateOutputPins() { return true; } - bool validateInputPins() { return true; } -}; - -PlayMp4VideoFromBeginning::PlayMp4VideoFromBeginning() : pipeLine("pipeline") { - -} - -bool PlayMp4VideoFromBeginning::testPipeLineForFlushQue() { - auto sink = boost::shared_ptr(new SinkModule(SinkModuleProps())); - colorchange->setNext(sink); - - BOOST_CHECK(mp4Reader->init()); - BOOST_CHECK(decoder->init()); - BOOST_CHECK(colorchange->init()); - BOOST_CHECK(sink->init()); - - for (int i = 0; i <= 20; i++) { - mp4Reader->step(); - decoder->step(); - } - colorchange->step(); - - auto frames = sink->pop(); - auto frame = frames.size(); - auto sinkQue = sink->getQue(); - BOOST_CHECK_EQUAL(frames.size(), 1); - sinkQue->flush(); - BOOST_CHECK_EQUAL(sinkQue->size(), 0); - frame_sp outputFrame = frames.cbegin()->second; - Test_Utils::saveOrCompare("../.././data/mp4Reader_saveOrCompare/h264/testplay.raw", const_cast(static_cast(outputFrame->data())), outputFrame->size(), 0); - return true; -} -bool PlayMp4VideoFromBeginning::testPipeLineForSeek() { - auto sink = boost::shared_ptr(new SinkModule(SinkModuleProps())); - mp4Reader->setNext(sink); - - BOOST_CHECK(mp4Reader->init()); - BOOST_CHECK(decoder->init()); - BOOST_CHECK(colorchange->init()); - BOOST_CHECK(sink->init()); - - mp4Reader->step(); - - auto frames = sink->pop(); - auto imgFrame = frames.begin()->second; - - uint64_t skipTS = 1686723797848; - mp4Reader->randomSeek(skipTS, false); - mp4Reader->step(); - BOOST_TEST(imgFrame->timestamp == 1686723797856); - LOG_INFO << "Found next available frame " << imgFrame->timestamp - skipTS - << " msecs later from skipTS"; - - return true; -} -bool PlayMp4VideoFromBeginning::setUpPipeLine() { - // Implementation - std::string videoPath = "../.././data/Mp4_videos/h264_video_metadata/20230514/0011/1686723796848.mp4"; - std::string outPath = "../.././data/mp4Reader_saveOrCompare/h264/frame_000???.jpg"; - - bool parseFS = false; - auto h264ImageMetadata = framemetadata_sp(new H264Metadata(0, 0)); - auto frameType = FrameMetadata::FrameType::H264_DATA; - auto mp4ReaderProps = Mp4ReaderSourceProps(videoPath, parseFS, 0, true, true, false); - mp4ReaderProps.fps = 24; - mp4Reader = boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); - mp4Reader->addOutPutPin(h264ImageMetadata); - - auto mp4Metadata = framemetadata_sp(new Mp4VideoMetadata("v_1")); - mp4Reader->addOutPutPin(mp4Metadata); - - std::vector mImagePin; - mImagePin = mp4Reader->getAllOutputPinsByType(frameType); - - decoder = boost::shared_ptr(new H264Decoder(H264DecoderProps())); - mp4Reader->setNext(decoder, mImagePin); - - auto conversionType = ColorConversionProps::ConversionType::YUV420PLANAR_TO_RGB; - auto metadata = framemetadata_sp(new RawImagePlanarMetadata(1280, 720, ImageMetadata::ImageType::YUV420, size_t(0), CV_8U)); - colorchange = boost::shared_ptr(new ColorConversion(ColorConversionProps(conversionType))); - decoder->setNext(colorchange); - - imageViewerSink = boost::shared_ptr( - new ImageViewerModule(ImageViewerModuleProps("imageview"))); - colorchange->setNext(imageViewerSink); - - return true; -} - -bool PlayMp4VideoFromBeginning::startPipeLine() { - pipeLine.appendModule(mp4Reader); - pipeLine.init(); - pipeLine.run_all_threaded(); - return true; -} - -bool PlayMp4VideoFromBeginning::stopPipeLine() { - pipeLine.stop(); - pipeLine.term(); - pipeLine.wait_for_all(); - return true; -} - -bool PlayMp4VideoFromBeginning::flushQueuesAndSeek() { - pipeLine.flushAllQueues(); - mp4Reader->randomSeek(1686723796848, false); - return true; -} - -int main() { - PlayMp4VideoFromBeginning pipelineInstance; - if (!pipelineInstance.setUpPipeLine()) { - std::cerr << "Failed to setup pipeline." << std::endl; - return 1; - } - if (!pipelineInstance.testPipeLineForFlushQue()) { - std::cerr << "Failed to test pipeline." << std::endl; - return 1; - } - //if (!pipelineInstance.testPipeLineForSeek()) { - // std::cerr << "Failed to test pipeline." << std::endl; - // return 1; - //} - //if (!pipelineInstance.startPipeLine()) { - // std::cerr << "Failed to start pipeline." << std::endl; - // return 1; // Or any error code indicating failure - //} - // Wait for the pipeline to run for 10 seconds - boost::this_thread::sleep_for(boost::chrono::seconds(3)); - if (!pipelineInstance.flushQueuesAndSeek()) { - std::cerr << "Failed to flush Queues." << std::endl; - return 1; - } - boost::this_thread::sleep_for(boost::chrono::seconds(5)); - // Stop the pipeline - if (!pipelineInstance.stopPipeLine()) { - std::cerr << "Failed to stop pipeline." << std::endl; - return 1; // Or any error code indicating failure - } - - return 0; // Or any success code -} diff --git a/samples/play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp b/samples/play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp index 8586303be..e99a02cc4 100644 --- a/samples/play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp +++ b/samples/play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp @@ -1,18 +1,87 @@ -#include - +#include "Frame.h" +#include "FrameContainerQueue.h" #include "PlayMp4VideoFromBeginning.h" +#include "test_utils.h" +#include BOOST_AUTO_TEST_SUITE(start_video_from_beginning) +class SinkModuleProps : public ModuleProps { +public: + SinkModuleProps() : ModuleProps(){}; +}; + +class SinkModule : public Module { +public: + SinkModule(SinkModuleProps props) : Module(SINK, "sinkModule", props){}; + boost::shared_ptr getQue() { return Module::getQue(); } + frame_container pop() { return Module::pop(); } + +protected: + bool process() { return false; } + bool validateOutputPins() { return true; } + bool validateInputPins() { return true; } +}; + +BOOST_AUTO_TEST_CASE(play_mp4_from_beginning_flush_queue_test) { + auto playMp4VideoFromBeginning = boost::shared_ptr( + new PlayMp4VideoFromBeginning()); + std::string videoPath = "C:/APRA/fork/ApraPipes/data/Mp4_videos/" + "h264_video_metadata/20230514/0011/1686723796848.mp4"; + playMp4VideoFromBeginning->setUpPipeLine(videoPath); + + auto sink = boost::shared_ptr(new SinkModule(SinkModuleProps())); + playMp4VideoFromBeginning->colorchange->setNext(sink); + BOOST_CHECK(playMp4VideoFromBeginning->mp4Reader->init()); + BOOST_CHECK(playMp4VideoFromBeginning->decoder->init()); + BOOST_CHECK(playMp4VideoFromBeginning->colorchange->init()); + BOOST_CHECK(sink->init()); + + for (int i = 0; i <= 20; i++) { + playMp4VideoFromBeginning->mp4Reader->step(); + playMp4VideoFromBeginning->decoder->step(); + } + playMp4VideoFromBeginning->colorchange->step(); + + auto frames = sink->pop(); + auto frame = frames.size(); + auto sinkQue = sink->getQue(); + BOOST_CHECK_EQUAL(frames.size(), 1); + sinkQue->flush(); + BOOST_CHECK_EQUAL(sinkQue->size(), 0); + frame_sp outputFrame = frames.cbegin()->second; + Test_Utils::saveOrCompare( + "./data/mp4Reader_saveOrCompare/h264/testplay.raw", + const_cast(static_cast(outputFrame->data())), + outputFrame->size(), 0); +} + +BOOST_AUTO_TEST_CASE(play_mp4_from_beginning_seek_test) { + auto playMp4VideoFromBeginning = boost::shared_ptr( + new PlayMp4VideoFromBeginning()); + std::string videoPath = "C:/APRA/fork/ApraPipes/data/Mp4_videos/" + "h264_video_metadata/20230514/0011/1686723796848.mp4"; + playMp4VideoFromBeginning->setUpPipeLine(videoPath); + auto sink = boost::shared_ptr(new SinkModule(SinkModuleProps())); + playMp4VideoFromBeginning->mp4Reader->setNext(sink); + + BOOST_CHECK(playMp4VideoFromBeginning->mp4Reader->init()); + BOOST_CHECK(playMp4VideoFromBeginning->decoder->init()); + BOOST_CHECK(playMp4VideoFromBeginning->colorchange->init()); + BOOST_CHECK(sink->init()); -BOOST_AUTO_TEST_CASE(play_mp4_from_beginning_flush_queue_test) -{ - auto playMp4VideoFromBeginning = boost::shared_ptr(new PlayMp4VideoFromBeginning()); - playMp4VideoFromBeginning->setUpPipeLine(); - playMp4VideoFromBeginning->startPipeLine(); + playMp4VideoFromBeginning->mp4Reader->step(); - boost::this_thread::sleep_for(boost::chrono::seconds(10)); + auto frames = sink->pop(); + auto imgFrame = frames.begin()->second; - playMp4VideoFromBeginning->stopPipeLine(); + uint64_t skipTS = 1686723796848; + playMp4VideoFromBeginning->mp4Reader->randomSeek(skipTS, false); + playMp4VideoFromBeginning->mp4Reader->step(); + frames = sink->pop(); + imgFrame = frames.begin()->second; + BOOST_TEST(imgFrame->timestamp == 1686723796848); + LOG_INFO << "Found next available frame " << imgFrame->timestamp - skipTS + << " msecs later from skipTS"; } BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file From b26a87197e1ea237d21799821dc39745917b7ea0 Mon Sep 17 00:00:00 2001 From: SatwikSShanbhag Date: Mon, 3 Jun 2024 19:25:04 +0530 Subject: [PATCH 40/49] base CMake changes --- base/CMakeLists.txt | 61 +++++++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 32 deletions(-) diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index 74a575e41..e41315ed6 100755 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -163,8 +163,6 @@ SET(CORE_FILES src/MotionVectorExtractor.cpp src/OverlayModule.cpp src/OrderedCacheOfFiles.cpp - .././samples/create_thumbnail_from_mp4_video/generate_thumbnail_from_mp4_video.cpp - .././samples/timelapse-sample/timelapse_summary.h ) SET(CORE_FILES_H @@ -226,8 +224,6 @@ SET(CORE_FILES_H include/OverlayModule.h include/OrderedCacheOfFiles.h include/TestSignalGeneratorSrc.h - .././samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h - .././samples/timelapse-sample/timelapse_summary.cpp ) IF(ENABLE_WINDOWS) @@ -460,8 +456,6 @@ ${LIBMP4_INC_DIR} ${BARESIP_INC_DIR} ${LIBRE_INC_DIR} ${NVCODEC_INCLUDE_DIR} -.././samples/create_thumbnail_from_mp4_video -./../samples/timelapse-sample ) @@ -573,11 +567,25 @@ SET(UT_FILES ) SET(SAMPLE_UT_FILES + test/utmain.cpp test/test_utils.cpp test/test_utils.h - .././samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp - .././samples/play_mp4_from_beginningtest_play_mp4_video_from_beginning.cpp + ../samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp + ../samples/play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp +) + +SET(SAMPLE_CORE_FILES + ../samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h + ../samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.cpp + ../samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h + ../samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.cpp +) + +SET(SAMPLE_SOURCE + ${SAMPLE_CORE_FILES} + ${SAMPLE_UT_FILES} ) + IF(ENABLE_LINUX) list(APPEND UT_FILES test/virtualcamerasink_tests.cpp @@ -585,9 +593,20 @@ IF(ENABLE_LINUX) ) ENDIF(ENABLE_LINUX) - add_executable(aprapipesut ${UT_FILES}) -add_executable(aprapipessampleut ${SAMPLE_UT_FILES}) +add_executable(aprapipessampleut ${SAMPLE_SOURCE}) + +target_include_directories ( aprapipessampleut PRIVATE +${JETSON_MULTIMEDIA_LIB_INCLUDE} +${FFMPEG_INCLUDE_DIRS} +${OpenCV_INCLUDE_DIRS} +${Boost_INCLUDE_DIRS} +${LIBMP4_INC_DIR} +${BARESIP_INC_DIR} +${LIBRE_INC_DIR} +${NVCODEC_INCLUDE_DIR} +test +) IF(ENABLE_ARM64) target_include_directories ( aprapipesut PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_ROOT} ${JPEG_INCLUDE_DIR}) @@ -601,28 +620,6 @@ ENDIF (ENABLE_CUDA) find_library(OPENH264_LIB NAMES openh264.lib libopenh264.a REQUIRED) find_library(LIBMP4_LIB NAMES mp4lib.lib libmp4lib.a REQUIRED) -target_link_libraries(aprapipessampleut - aprapipes - ${JPEG_LIBRARIES} - ${LIBMP4_LIB} - ${OPENH264_LIB} - ${Boost_LIBRARIES} - ${FFMPEG_LIBRARIES} - ${OpenCV_LIBRARIES} - ${JETSON_LIBS} - ${NVCUDAToolkit_LIBS} - ${NVCODEC_LIB} - ${NVJPEGLIB_L4T} - ${CURSES_LIBRARIES} - ZXing::Core - ZXing::ZXing - BZip2::BZip2 - ZLIB::ZLIB - liblzma::liblzma - bigint::bigint - sfml-audio - whisper::whisper - ) target_link_libraries(aprapipessampleut aprapipes From f07bc4e74127589a1119ffd6112c4dd7e8c4f5c9 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Wed, 5 Jun 2024 14:27:25 +0530 Subject: [PATCH 41/49] Addressed comments. --- base/src/MotionVectorExtractor.cpp | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/base/src/MotionVectorExtractor.cpp b/base/src/MotionVectorExtractor.cpp index a0a7467b9..dd3101d7b 100644 --- a/base/src/MotionVectorExtractor.cpp +++ b/base/src/MotionVectorExtractor.cpp @@ -118,10 +118,8 @@ bool discardNoisyFrames(int black_pixel_percent, cv::Mat image) { int whitePixels = cv::countNonZero(binary); int blackPixels = totalPixels - whitePixels; - double blackPixelPercentage = - (static_cast(blackPixels) / static_cast(totalPixels)) * - 100.0; - if (blackPixelPercentage > black_pixel_percent) + double blackPixelPercentage = (static_cast(blackPixels) / static_cast(totalPixels)) * 100.0; + if (blackPixelPercentage > black_pixel_percent || whitePixels == totalPixels) return true; else return false; @@ -139,9 +137,12 @@ void DetailFfmpeg::getMotionVectors(frame_container& frames, frame_sp& outFrame, if (!pkt) { LOG_ERROR << "Could not allocate AVPacket\n"; } - ret = decodeAndGetMotionVectors(pkt, frames, outFrame, decodedFrame); - av_packet_free(&pkt); - av_frame_free(&avFrame); + bgrImg.data = static_cast(decodedFrame->data()); + if (!discardNoisyFrames(99, bgrImg)) { + ret = decodeAndGetMotionVectors(pkt, frames, outFrame, decodedFrame); + av_packet_free(&pkt); + av_frame_free(&avFrame); + } } int DetailFfmpeg::decodeAndGetMotionVectors(AVPacket* pkt, @@ -351,7 +352,7 @@ void DetailOpenH264::getMotionVectors(frame_container& frames, cv::Mat yuvImgCV = cv::Mat(mHeight + mHeight / 2, mWidth, CV_8UC1, yuvStartPointer, mWidth); bgrImg.data = static_cast(decodedFrame->data()); - if (!discardNoisyFrames(97, bgrImg)) { + if (!discardNoisyFrames(99, bgrImg)) { cv::cvtColor(yuvImgCV, bgrImg, cv::COLOR_YUV2BGR_I420); frames.insert(make_pair(rawFramePinId, decodedFrame)); } From a2102f1861919a7e6b8f3dad69d9211797c0ee2c Mon Sep 17 00:00:00 2001 From: nsabale7 Date: Fri, 14 Jun 2024 14:27:28 +0530 Subject: [PATCH 42/49] changes in cmake --- base/CMakeLists.txt | 38 ++++++-- samples/face_detection_cpu/CMakeLists.txt | 10 ++- .../face_detection_cpu/face_detection_cpu.cpp | 87 ++++--------------- .../face_detection_cpu/face_detection_cpu.h | 13 ++- samples/face_detection_cpu/pipelineMain.cpp | 22 +++++ .../test_face_detection_cpu.cpp | 11 ++- 6 files changed, 89 insertions(+), 92 deletions(-) create mode 100644 samples/face_detection_cpu/pipelineMain.cpp diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index 7965833af..6d3f983fd 100755 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -163,7 +163,6 @@ SET(CORE_FILES src/MotionVectorExtractor.cpp src/OverlayModule.cpp src/OrderedCacheOfFiles.cpp - .././samples/face_detection_cpu/face_detection_cpu.cpp ) SET(CORE_FILES_H @@ -225,7 +224,6 @@ SET(CORE_FILES_H include/OverlayModule.h include/OrderedCacheOfFiles.h include/TestSignalGeneratorSrc.h - .././samples/face_detection_cpu/face_detection_cpu.h ) IF(ENABLE_WINDOWS) @@ -458,7 +456,6 @@ ${LIBMP4_INC_DIR} ${BARESIP_INC_DIR} ${LIBRE_INC_DIR} ${NVCODEC_INCLUDE_DIR} -.././samples/face_detection_cpu ) @@ -569,6 +566,23 @@ SET(UT_FILES ${CUDA_UT_FILES} ) +SET(SAMPLE_UT_FILES + test/utmain.cpp + test/test_utils.cpp + test/test_utils.h + ../samples/face_detection_cpu/test_face_detection_cpu.cpp +) + +SET(SAMPLE_CORE_FILES + ../samples/face_detection_cpu/face_detection_cpu.h + ../samples/face_detection_cpu/face_detection_cpu.cpp +) + +SET(SAMPLE_SOURCE + ${SAMPLE_CORE_FILES} + ${SAMPLE_UT_FILES} +) + IF(ENABLE_LINUX) list(APPEND UT_FILES test/virtualcamerasink_tests.cpp @@ -576,12 +590,20 @@ IF(ENABLE_LINUX) ) ENDIF(ENABLE_LINUX) -SET(SAMPLE_UT_FILES -.././samples/face_detection_cpu/test_face_detection_cpu.cpp -) - add_executable(aprapipesut ${UT_FILES}) -add_executable(aprapipessampleut ${SAMPLE_UT_FILES}) +add_executable(aprapipessampleut ${SAMPLE_SOURCE}) + +target_include_directories ( aprapipessampleut PRIVATE + ${JETSON_MULTIMEDIA_LIB_INCLUDE} + ${FFMPEG_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIRS} + ${LIBMP4_INC_DIR} + ${BARESIP_INC_DIR} + ${LIBRE_INC_DIR} + ${NVCODEC_INCLUDE_DIR} + test +) IF(ENABLE_ARM64) target_include_directories ( aprapipesut PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_ROOT} ${JPEG_INCLUDE_DIR}) diff --git a/samples/face_detection_cpu/CMakeLists.txt b/samples/face_detection_cpu/CMakeLists.txt index 19b385c4a..14a0db98e 100644 --- a/samples/face_detection_cpu/CMakeLists.txt +++ b/samples/face_detection_cpu/CMakeLists.txt @@ -1,13 +1,15 @@ +cmake_minimum_required(VERSION 3.22) set(TARGET face_detection_cpu) -SET(CORE_FILES +SET(SAMPLE_FILES face_detection_cpu.cpp + pipelineMain.cpp ) -SET(CORE_FILES_H +SET(SAMPLE_FILES_H face_detection_cpu.h ) SET(SOURCE - ${CORE_FILES} - ${CORE_FILES_H} + ${SAMPLE_FILES} + ${SAMPLE_FILES_H} ) add_executable(${TARGET} ${SOURCE}) diff --git a/samples/face_detection_cpu/face_detection_cpu.cpp b/samples/face_detection_cpu/face_detection_cpu.cpp index e904f3473..c115d3d68 100644 --- a/samples/face_detection_cpu/face_detection_cpu.cpp +++ b/samples/face_detection_cpu/face_detection_cpu.cpp @@ -7,94 +7,39 @@ #include "FaceDetectorXform.h" #include "ColorConversionXForm.h" #include -#include "ExternalSinkModule.h" - - -FaceDetectionCPU::FaceDetectionCPU() : pipeline("test") {} - -bool FaceDetectionCPU::testPipeline(){ - auto sink = boost::shared_ptr(new ExternalSinkModule()); - colorConversion->setNext(sink); - - BOOST_TEST(source->init()); - BOOST_TEST(faceDetector->init()); - BOOST_TEST(overlay->init()); - BOOST_TEST(colorConversion->init()); - BOOST_TEST(sink->init()); - - source->step(); - faceDetector->step(); - overlay->step(); - colorConversion->step(); - - auto frames = sink->pop(); - BOOST_TEST(frames.size() == 1); - return true; -} +FaceDetectionCPU::FaceDetectionCPU() : faceDetectionCPUSamplePipeline("faceDetectionCPUSamplePipeline") {} bool FaceDetectionCPU::setupPipeline() { WebCamSourceProps webCamSourceprops(0, 640, 480); - source = boost::shared_ptr(new WebCamSource(webCamSourceprops)); + mSource = boost::shared_ptr(new WebCamSource(webCamSourceprops)); FaceDetectorXformProps faceDetectorProps(1.0, 0.8, "./data/assets/deploy.prototxt", "./data/assets/res10_300x300_ssd_iter_140000_fp16.caffemodel"); - faceDetector = boost::shared_ptr(new FaceDetectorXform(faceDetectorProps)); - source->setNext(faceDetector); + mFaceDetector = boost::shared_ptr(new FaceDetectorXform(faceDetectorProps)); + mSource->setNext(mFaceDetector); - overlay = boost::shared_ptr(new OverlayModule(OverlayModuleProps())); - faceDetector->setNext(overlay); + mOverlay = boost::shared_ptr(new OverlayModule(OverlayModuleProps())); + mFaceDetector->setNext(mOverlay); - colorConversion = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::RGB_TO_BGR))); - overlay->setNext(colorConversion); + mColorConversion = boost::shared_ptr(new ColorConversion(ColorConversionProps(ColorConversionProps::RGB_TO_BGR))); + mOverlay->setNext(mColorConversion); - imageViewerSink = boost::shared_ptr(new ImageViewerModule(ImageViewerModuleProps("imageview"))); - colorConversion->setNext(imageViewerSink); + mImageViewerSink = boost::shared_ptr(new ImageViewerModule(ImageViewerModuleProps("imageview"))); + mColorConversion->setNext(mImageViewerSink); return true; } bool FaceDetectionCPU::startPipeline() { - pipeline.appendModule(source); - pipeline.init(); - pipeline.run_all_threaded(); + faceDetectionCPUSamplePipeline.appendModule(mSource); + faceDetectionCPUSamplePipeline.init(); + faceDetectionCPUSamplePipeline.run_all_threaded(); return true; } bool FaceDetectionCPU::stopPipeline() { - pipeline.stop(); - pipeline.term(); - pipeline.wait_for_all(); + faceDetectionCPUSamplePipeline.stop(); + faceDetectionCPUSamplePipeline.term(); + faceDetectionCPUSamplePipeline.wait_for_all(); return true; -} - -int main() { - LoggerProps loggerProps; - loggerProps.logLevel = boost::log::trivial::severity_level::info; - Logger::setLogLevel(boost::log::trivial::severity_level::info); - Logger::initLogger(loggerProps); - - FaceDetectionCPU pipelineInstance; - - // Setup the pipeline - if (!pipelineInstance.setupPipeline()) { - std::cerr << "Failed to setup pipeline." << std::endl; - return 1; // Or any error code indicating failure - } - - // Start the pipeline - if (!pipelineInstance.testPipeline()) { - std::cerr << "Failed to start pipeline." << std::endl; - return 1; // Or any error code indicating failure - } - - // Wait for the pipeline to run for 10 seconds - boost::this_thread::sleep_for(boost::chrono::seconds(5)); - - // Stop the pipeline - if (!pipelineInstance.stopPipeline()) { - std::cerr << "Failed to stop pipeline." << std::endl; - return 1; // Or any error code indicating failure - } - - return 0; // Or any success code } \ No newline at end of file diff --git a/samples/face_detection_cpu/face_detection_cpu.h b/samples/face_detection_cpu/face_detection_cpu.h index 46016e76f..8bf01b4c1 100644 --- a/samples/face_detection_cpu/face_detection_cpu.h +++ b/samples/face_detection_cpu/face_detection_cpu.h @@ -13,12 +13,11 @@ class FaceDetectionCPU { bool setupPipeline(); bool startPipeline(); bool stopPipeline(); - bool testPipeline(); private: - PipeLine pipeline; - boost::shared_ptr source; - boost::shared_ptr faceDetector; - boost::shared_ptr overlay; - boost::shared_ptr colorConversion; - boost::shared_ptr imageViewerSink; + PipeLine faceDetectionCPUSamplePipeline; + boost::shared_ptr mSource; + boost::shared_ptr mFaceDetector; + boost::shared_ptr mOverlay; + boost::shared_ptr mColorConversion; + boost::shared_ptr mImageViewerSink; }; \ No newline at end of file diff --git a/samples/face_detection_cpu/pipelineMain.cpp b/samples/face_detection_cpu/pipelineMain.cpp new file mode 100644 index 000000000..08d4f22c7 --- /dev/null +++ b/samples/face_detection_cpu/pipelineMain.cpp @@ -0,0 +1,22 @@ +#include "face_detection_cpu.h" +#include +#include + +void main() { + FaceDetectionCPU faceDetectionCPUSamplePipeline; + if (!faceDetectionCPUSamplePipeline.setupPipeline()) { + std::cerr << "Failed to setup pipeline." << std::endl; + } + + if (!faceDetectionCPUSamplePipeline.startPipeline()) { + std::cerr << "Failed to start pipeline." << std::endl; + } + + // Wait for the pipeline to run for 10 seconds + boost::this_thread::sleep_for(boost::chrono::seconds(50)); + + // Stop the pipeline + if (!faceDetectionCPUSamplePipeline.stopPipeline()) { + std::cerr << "Failed to stop pipeline." << std::endl; + } +} \ No newline at end of file diff --git a/samples/face_detection_cpu/test_face_detection_cpu.cpp b/samples/face_detection_cpu/test_face_detection_cpu.cpp index 6228b293e..9f0817d76 100644 --- a/samples/face_detection_cpu/test_face_detection_cpu.cpp +++ b/samples/face_detection_cpu/test_face_detection_cpu.cpp @@ -2,10 +2,17 @@ #include "face_detection_cpu.h" -BOOST_AUTO_TEST_SUITE(start_face_tracking) +BOOST_AUTO_TEST_SUITE(test_face_detection_cpu) -BOOST_AUTO_TEST_CASE(face_tracking_simple) +BOOST_AUTO_TEST_CASE(face_detection_cpu) { + auto faceDetectionCPUPipeline = boost::shared_ptr(new FaceDetectionCPU()); + faceDetectionCPUPipeline->setupPipeline(); + faceDetectionCPUPipeline->startPipeline(); + + boost::this_thread::sleep_for(boost::chrono::seconds(50)); + + faceDetectionCPUPipeline->stopPipeline(); } BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file From 06f679eaae942792030a2acee2c055891c308ec0 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Mon, 17 Jun 2024 15:48:23 +0530 Subject: [PATCH 43/49] Added readme file for samples. code refactoring and renaming. Addressed comments. --- base/CMakeLists.txt | 37 ++++- samples/README.md | 28 ++++ samples/timelapse-sample/CMakeLists.txt | 36 ++--- samples/timelapse-sample/PipelineMain.cpp | 31 ++++ .../timelapse-sample/timelapse_summary.cpp | 143 +++++------------- samples/timelapse-sample/timelapse_summary.h | 25 ++- .../timelapse_summary_test.cpp | 11 +- 7 files changed, 165 insertions(+), 146 deletions(-) create mode 100644 samples/README.md create mode 100644 samples/timelapse-sample/PipelineMain.cpp diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index ee115b9c7..d66e17f64 100755 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -163,7 +163,6 @@ SET(CORE_FILES src/MotionVectorExtractor.cpp src/OverlayModule.cpp src/OrderedCacheOfFiles.cpp - .././samples/timelapse-sample/timelapse_summary.cpp ) SET(CORE_FILES_H @@ -225,7 +224,6 @@ SET(CORE_FILES_H include/OverlayModule.h include/OrderedCacheOfFiles.h include/TestSignalGeneratorSrc.h - .././samples/timelapse-sample/timelapse_summary.h ) IF(ENABLE_WINDOWS) @@ -458,10 +456,8 @@ ${LIBMP4_INC_DIR} ${BARESIP_INC_DIR} ${LIBRE_INC_DIR} ${NVCODEC_INCLUDE_DIR} -./../samples/timelapse-sample ) - # aprapipes Unit Tests IF (ENABLE_ARM64) @@ -570,8 +566,22 @@ SET(UT_FILES ) SET(SAMPLE_UT_FILES -.././samples/timelapse-sample/timelapse_summary_test.cpp + test/utmain.cpp + test/test_utils.cpp + test/test_utils.h + .././samples/timelapse-sample/timelapse_summary_test.cpp +) + +SET(SAMPLE_CORE_FILES + ../samples/timelapse-sample/timelapse_summary.h + ../samples/timelapse-sample/timelapse_summary.cpp +) + +SET(SAMPLE_SOURCE + ${SAMPLE_CORE_FILES} + ${SAMPLE_UT_FILES} ) + IF(ENABLE_LINUX) list(APPEND UT_FILES test/virtualcamerasink_tests.cpp @@ -582,7 +592,7 @@ ENDIF(ENABLE_LINUX) add_executable(aprapipesut ${UT_FILES}) -add_executable(aprapipessampleut ${SAMPLE_UT_FILES}) +add_executable(aprapipessampleut ${SAMPLE_SOURCE}) IF(ENABLE_ARM64) target_include_directories ( aprapipesut PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_ROOT} ${JPEG_INCLUDE_DIR}) @@ -596,6 +606,19 @@ ENDIF (ENABLE_CUDA) find_library(OPENH264_LIB NAMES openh264.lib libopenh264.a REQUIRED) find_library(LIBMP4_LIB NAMES mp4lib.lib libmp4lib.a REQUIRED) +target_include_directories ( aprapipessampleut PRIVATE +${JETSON_MULTIMEDIA_LIB_INCLUDE} +${FFMPEG_INCLUDE_DIRS} +${OpenCV_INCLUDE_DIRS} +${Boost_INCLUDE_DIRS} +${LIBMP4_INC_DIR} +${BARESIP_INC_DIR} +${LIBRE_INC_DIR} +${NVCODEC_INCLUDE_DIR} +test +) + + target_link_libraries(aprapipessampleut aprapipes ${JPEG_LIBRARIES} @@ -618,7 +641,7 @@ target_link_libraries(aprapipessampleut sfml-audio whisper::whisper ) - + target_link_libraries(aprapipesut aprapipes ${JPEG_LIBRARIES} diff --git a/samples/README.md b/samples/README.md new file mode 100644 index 000000000..846187ef3 --- /dev/null +++ b/samples/README.md @@ -0,0 +1,28 @@ +# ApraPipes Samples + +This Samples demonstrate the capability and usage of ApraPipes Modules into different applications. + +## Running the samples: +- The samples will be build along with the ApraPipes build. +- Each sample has their own executable. +- The executables can be found in the location: + - _build/samples/sample/Debug. Note that here sample in the path is the particular sample. +- To run the samples run this command: +``` +./filename.exe arg1 arg2 ... +``` +- Note that samples may have the arguments. The details are given in the below section. + +## Samples +### 1. Timelapse: +This samples demonstrates the usage of ApraPipes modules which can be used to generate a timelapse or summary of the video. +#### - Modules Used: +- Mp4Reader: To Read the input video. +- MotionVectorExtractor: To extract the frames from the input video for configured thershold. +- ColorChange: To convert frames given by MotionVectorExtractor from BGR to RGB and then RGB to YUV420PLANAR. +- H264Encoder +- Mp4Writer: To write the generated timelapse video to the output path. +- arguments: + - arg1: input video path + - arg2: output path +- The Unit test "timelapse_summary_test.cpp" is located inside the aprapipessampleut executable. Make sure to replace the "inputVideoPath" and "outputPath" with suitable paths. diff --git a/samples/timelapse-sample/CMakeLists.txt b/samples/timelapse-sample/CMakeLists.txt index 13fa38e88..4aa9c58ea 100644 --- a/samples/timelapse-sample/CMakeLists.txt +++ b/samples/timelapse-sample/CMakeLists.txt @@ -1,29 +1,26 @@ cmake_minimum_required(VERSION 3.22) -SET(CORE_FILES +set(TARGET generateTimeLapse) +SET(SAMPLE_FILES timelapse_summary.cpp + pipelineMain.cpp ) -SET(CORE_FILES_H - timelapse_summary.h +SET(SAMPLE_FILES_H +timelapse_summary.h ) SET(SOURCE - ${CORE_FILES} - ${CORE_FILES_H} + ${SAMPLE_FILES} + ${SAMPLE_FILES_H} ) -add_executable(timelapsesample ${SOURCE}) -target_include_directories (timelapsesample PRIVATE - ${JETSON_MULTIMEDIA_LIB_INCLUDE} - ${FFMPEG_INCLUDE_DIRS} +add_executable(${TARGET} ${SOURCE}) +target_include_directories ( ${TARGET} PRIVATE +${JETSON_MULTIMEDIA_LIB_INCLUDE} ${OpenCV_INCLUDE_DIRS} ${Boost_INCLUDE_DIRS} ${LIBMP4_INC_DIR} - ${BARESIP_INC_DIR} - ${LIBRE_INC_DIR} - ${NVCODEC_INCLUDE_DIR} ) target_link_libraries( - timelapsesample + ${TARGET} aprapipes - ${JPEG_LIBRARIES} ${LIBMP4_LIB} ${OPENH264_LIB} ${Boost_LIBRARIES} @@ -33,13 +30,4 @@ target_link_libraries( ${NVCUDAToolkit_LIBS} ${NVCODEC_LIB} ${NVJPEGLIB_L4T} - ${CURSES_LIBRARIES} - ZXing::Core - ZXing::ZXing - BZip2::BZip2 - ZLIB::ZLIB - liblzma::liblzma - bigint::bigint - sfml-audio - whisper::whisper -) \ No newline at end of file + ) \ No newline at end of file diff --git a/samples/timelapse-sample/PipelineMain.cpp b/samples/timelapse-sample/PipelineMain.cpp new file mode 100644 index 000000000..42c0d54f9 --- /dev/null +++ b/samples/timelapse-sample/PipelineMain.cpp @@ -0,0 +1,31 @@ +#include "timelapse_summary.h" +#include +#include + +void main(int argc, char *argv[]) { + if (argc < 3) { + std::cerr << "Usage: " << argv[0] << " " + << std::endl; + } + + std::string videoPath = argv[argc - 2]; + std::string outFolderPath = argv[argc - 1]; + + TimelapsePipeline timelapsePipeline; + if (!timelapsePipeline.setupPipeline(videoPath, outFolderPath)) { + std::cerr << "Failed to setup pipeline." << std::endl; + } + + if (!timelapsePipeline.startPipeline()) { + std::cerr << "Failed to start pipeline." << std::endl; + } + + // Wait for the pipeline to run for 10 seconds + boost::this_thread::sleep_for(boost::chrono::minutes(2)); + + // Stop the pipeline + if (!timelapsePipeline.stopPipeline()) { + std::cerr << "Failed to stop pipeline." << std::endl; + } + std::cerr << "Saved Generated TimeLapse in" << outFolderPath << std::endl; +} \ No newline at end of file diff --git a/samples/timelapse-sample/timelapse_summary.cpp b/samples/timelapse-sample/timelapse_summary.cpp index e9d23aa90..dd6a2bb6e 100644 --- a/samples/timelapse-sample/timelapse_summary.cpp +++ b/samples/timelapse-sample/timelapse_summary.cpp @@ -16,9 +16,9 @@ #include "ExternalSinkModule.h" TimelapsePipeline::TimelapsePipeline() - : pipeline("test"), cudaStream_(new ApraCudaStream()), - cuContext(new ApraCUcontext()), - h264ImageMetadata(new H264Metadata(0, 0)) {} + : timelapseSamplePipeline("test"), mCudaStream_(new ApraCudaStream()), + mCuContext(new ApraCUcontext()), + mH264ImageMetadata(new H264Metadata(0, 0)) {} bool TimelapsePipeline::setupPipeline(const std::string &videoPath, const std::string &outFolderPath) { @@ -34,124 +34,65 @@ bool TimelapsePipeline::setupPipeline(const std::string &videoPath, Mp4ReaderSourceProps(videoPath, false, 0, true, false, false); mp4ReaderProps.parseFS = true; mp4ReaderProps.readLoop = false; - mp4Reader = + //mp4Reader module is being used here to read the .mp4 videos + mMp4Reader = boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); auto motionExtractorProps = MotionVectorExtractorProps( MotionVectorExtractorProps::MVExtractMethod::OPENH264, sendDecodedFrames, - 1); - motionExtractor = boost::shared_ptr( + 2); + //motionVectorExtractor module is being used to get the frames for the defined thershold + mMotionExtractor = boost::shared_ptr( new MotionVectorExtractor(motionExtractorProps)); - colorchange1 = boost::shared_ptr(new ColorConversion( + //convert frames from BGR to RGB + mColorchange1 = boost::shared_ptr(new ColorConversion( ColorConversionProps(ColorConversionProps::BGR_TO_RGB))); - colorchange2 = boost::shared_ptr(new ColorConversion( + //convert frames from RGB to YUV420PLANAR + //the two step color change is done because H264Encoder takes YUV data and we don't have direct BGR to YUV. + mColorchange2 = boost::shared_ptr(new ColorConversion( ColorConversionProps(ColorConversionProps::RGB_TO_YUV420PLANAR))); - sync = boost::shared_ptr( - new CudaStreamSynchronize(CudaStreamSynchronizeProps(cudaStream_))); - encoder = boost::shared_ptr(new H264EncoderNVCodec( - H264EncoderNVCodecProps(bitRateKbps, cuContext, gopLength, frameRate, + mSync = boost::shared_ptr( + new CudaStreamSynchronize(CudaStreamSynchronizeProps(mCudaStream_))); + mEncoder = boost::shared_ptr(new H264EncoderNVCodec( + H264EncoderNVCodecProps(bitRateKbps, mCuContext, gopLength, frameRate, profile, enableBFrames))); + //write the output video auto mp4WriterSinkProps = Mp4WriterSinkProps(UINT32_MAX, 10, 24, outFolderPath, true); mp4WriterSinkProps.recordedTSBasedDTS = false; - mp4WriterSink = + mMp4WriterSink = boost::shared_ptr(new Mp4WriterSink(mp4WriterSinkProps)); - mp4Reader->addOutPutPin(h264ImageMetadata); + mMp4Reader->addOutPutPin(mH264ImageMetadata); auto mp4Metadata = framemetadata_sp(new Mp4VideoMetadata("v_1")); - mp4Reader->addOutPutPin(mp4Metadata); + mMp4Reader->addOutPutPin(mp4Metadata); std::vector mImagePin = - mp4Reader->getAllOutputPinsByType(FrameMetadata::H264_DATA); + mMp4Reader->getAllOutputPinsByType(FrameMetadata::H264_DATA); std::vector mDecodedPin = - motionExtractor->getAllOutputPinsByType(FrameMetadata::RAW_IMAGE); - mp4Reader->setNext(motionExtractor, mImagePin); - motionExtractor->setNext(colorchange1, mDecodedPin); - colorchange1->setNext(colorchange2); - copy = boost::shared_ptr( - new CudaMemCopy(CudaMemCopyProps(cudaMemcpyHostToDevice, cudaStream_))); - colorchange2->setNext(copy); - copy->setNext(sync); - sync->setNext(encoder); - encoder->setNext(mp4WriterSink); - - pipeline.appendModule(mp4Reader); - pipeline.init(); - return true; + mMotionExtractor->getAllOutputPinsByType(FrameMetadata::RAW_IMAGE); + mMp4Reader->setNext(mMotionExtractor, mImagePin); + mMotionExtractor->setNext(mColorchange1, mDecodedPin); + mColorchange1->setNext(mColorchange2); + mCopy = boost::shared_ptr( + new CudaMemCopy(CudaMemCopyProps(cudaMemcpyHostToDevice, mCudaStream_))); + mColorchange2->setNext(mCopy); + mCopy->setNext(mSync); + mSync->setNext(mEncoder); + mEncoder->setNext(mMp4WriterSink); + + timelapseSamplePipeline.appendModule(mMp4Reader); + if (timelapseSamplePipeline.init()) { + return true; + } + return false; } bool TimelapsePipeline::startPipeline() { - pipeline.run_all_threaded(); + timelapseSamplePipeline.run_all_threaded(); return true; } bool TimelapsePipeline::stopPipeline() { - pipeline.stop(); - pipeline.term(); - pipeline.wait_for_all(); + timelapseSamplePipeline.stop(); + timelapseSamplePipeline.term(); + timelapseSamplePipeline.wait_for_all(); return true; -} - -void TimelapsePipeline::test() { - auto sink = boost::shared_ptr(new ExternalSinkModule()); - encoder->setNext(sink); - - BOOST_TEST(mp4Reader->init()); - BOOST_TEST(motionExtractor->init()); - BOOST_TEST(colorchange1->init()); - BOOST_TEST(colorchange2->init()); - BOOST_TEST(copy->init()); - BOOST_TEST(sync->init()); - BOOST_TEST(encoder->init()); - BOOST_TEST(sink->init()); - - for (int i = 0; i <= 20; i++) { - mp4Reader->step(); - motionExtractor->step(); - } - colorchange1->step(); - colorchange2->step(); - copy->step(); - sync->step(); - encoder->step(); - - auto frames = sink->pop(); - BOOST_CHECK_EQUAL(frames.size(), 1); -} - -int main(int argc, char *argv[]) { - if (argc < 3) { - std::cerr << "Usage: " << argv[0] << " " << std::endl; - return 1; - } - - std::string videoPath = argv[argc - 2]; - std::string outFolderPath = argv[argc - 1]; - - LoggerProps loggerProps; - loggerProps.logLevel = boost::log::trivial::severity_level::info; - Logger::setLogLevel(boost::log::trivial::severity_level::info); - Logger::initLogger(loggerProps); - - TimelapsePipeline pipelineInstance; - - // Setup the pipeline - if (!pipelineInstance.setupPipeline(videoPath, outFolderPath)) { - std::cerr << "Failed to setup pipeline." << std::endl; - return 1; // Or any error code indicating failure - } - - //pipelineInstance.test(); - // Start the pipeline - if (!pipelineInstance.startPipeline()) { - std::cerr << "Failed to start pipeline." << std::endl; - return 1; // Or any error code indicating failure - } - - boost::this_thread::sleep_for(boost::chrono::minutes(120)); - - // Stop the pipeline - if (!pipelineInstance.stopPipeline()) { - std::cerr << "Failed to stop pipeline." << std::endl; - return 1; // Or any error code indicating failure - } - - return 0; // Or any success code } \ No newline at end of file diff --git a/samples/timelapse-sample/timelapse_summary.h b/samples/timelapse-sample/timelapse_summary.h index e5caa1a6a..d1a90efac 100644 --- a/samples/timelapse-sample/timelapse_summary.h +++ b/samples/timelapse-sample/timelapse_summary.h @@ -13,19 +13,18 @@ class TimelapsePipeline { const std::string &outFolderPath); bool startPipeline(); bool stopPipeline(); - void test(); private: - PipeLine pipeline; - cudastream_sp cudaStream_; - apracucontext_sp cuContext; - framemetadata_sp h264ImageMetadata; - boost::shared_ptr mp4Reader; - boost::shared_ptr motionExtractor; - boost::shared_ptr colorchange1; - boost::shared_ptr colorchange2; - boost::shared_ptr sync; - boost::shared_ptr encoder; - boost::shared_ptr mp4WriterSink; - boost::shared_ptr copy; + PipeLine timelapseSamplePipeline; + cudastream_sp mCudaStream_; + apracucontext_sp mCuContext; + framemetadata_sp mH264ImageMetadata; + boost::shared_ptr mMp4Reader; + boost::shared_ptr mMotionExtractor; + boost::shared_ptr mColorchange1; + boost::shared_ptr mColorchange2; + boost::shared_ptr mSync; + boost::shared_ptr mEncoder; + boost::shared_ptr mMp4WriterSink; + boost::shared_ptr mCopy; }; \ No newline at end of file diff --git a/samples/timelapse-sample/timelapse_summary_test.cpp b/samples/timelapse-sample/timelapse_summary_test.cpp index a84a88dd6..e7bbce6b8 100644 --- a/samples/timelapse-sample/timelapse_summary_test.cpp +++ b/samples/timelapse-sample/timelapse_summary_test.cpp @@ -1,5 +1,6 @@ #include #include +#include "test_utils.h" #include "timelapse_summary.h" BOOST_AUTO_TEST_SUITE(generate_timelapse) @@ -7,7 +8,15 @@ BOOST_AUTO_TEST_SUITE(generate_timelapse) BOOST_AUTO_TEST_CASE(generateTimeLapse) { auto timelapsePipeline = boost::shared_ptr(new TimelapsePipeline()); - timelapsePipeline->test(); + + std::string inputVideoPath = "./data/timelapse_sample_videos"; + std::string outputPath = "./data/timelapse_videos"; + + BOOST_ASSERT(timelapsePipeline->setupPipeline(inputVideoPath,outputPath)); + BOOST_ASSERT(timelapsePipeline->startPipeline()); + boost::this_thread::sleep(boost::chrono::seconds(10)); + + BOOST_ASSERT(timelapsePipeline->stopPipeline()); } BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file From 133d93b13c9e36109004b4a363b944a5e918cb15 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Tue, 18 Jun 2024 16:15:04 +0530 Subject: [PATCH 44/49] removed not required libs and file formatting. --- base/CMakeLists.txt | 14 +------ samples/timelapse-sample/CMakeLists.txt | 2 - .../timelapse-sample/timelapse_summary.cpp | 40 ++++++++++--------- 3 files changed, 22 insertions(+), 34 deletions(-) diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index d66e17f64..1c8de0105 100755 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -621,27 +621,15 @@ test target_link_libraries(aprapipessampleut aprapipes - ${JPEG_LIBRARIES} ${LIBMP4_LIB} ${OPENH264_LIB} ${Boost_LIBRARIES} ${FFMPEG_LIBRARIES} ${OpenCV_LIBRARIES} - ${JETSON_LIBS} ${NVCUDAToolkit_LIBS} ${NVCODEC_LIB} - ${NVJPEGLIB_L4T} - ${CURSES_LIBRARIES} - ZXing::Core - ZXing::ZXing - BZip2::BZip2 - ZLIB::ZLIB - liblzma::liblzma - bigint::bigint - sfml-audio - whisper::whisper ) - + target_link_libraries(aprapipesut aprapipes ${JPEG_LIBRARIES} diff --git a/samples/timelapse-sample/CMakeLists.txt b/samples/timelapse-sample/CMakeLists.txt index 4aa9c58ea..877797d1d 100644 --- a/samples/timelapse-sample/CMakeLists.txt +++ b/samples/timelapse-sample/CMakeLists.txt @@ -26,8 +26,6 @@ target_link_libraries( ${Boost_LIBRARIES} ${FFMPEG_LIBRARIES} ${OpenCV_LIBRARIES} - ${JETSON_LIBS} ${NVCUDAToolkit_LIBS} ${NVCODEC_LIB} - ${NVJPEGLIB_L4T} ) \ No newline at end of file diff --git a/samples/timelapse-sample/timelapse_summary.cpp b/samples/timelapse-sample/timelapse_summary.cpp index dd6a2bb6e..df5ae9efc 100644 --- a/samples/timelapse-sample/timelapse_summary.cpp +++ b/samples/timelapse-sample/timelapse_summary.cpp @@ -1,19 +1,19 @@ -#include "Mp4ReaderSource.h" -#include "MotionVectorExtractor.h" -#include "Mp4WriterSink.h" -#include "H264Metadata.h" -#include "PipeLine.h" -#include "OverlayModule.h" -#include "ImageViewerModule.h" -#include "Mp4VideoMetadata.h" -#include "H264EncoderNVCodec.h" +#include "timelapse_summary.h" +#include "ColorConversionXForm.h" #include "CudaMemCopy.h" #include "CudaStreamSynchronize.h" -#include "ColorConversionXForm.h" +#include "ExternalSinkModule.h" #include "FileWriterModule.h" -#include "timelapse_summary.h" +#include "H264EncoderNVCodec.h" +#include "H264Metadata.h" +#include "ImageViewerModule.h" +#include "MotionVectorExtractor.h" +#include "Mp4ReaderSource.h" +#include "Mp4VideoMetadata.h" +#include "Mp4WriterSink.h" +#include "OverlayModule.h" +#include "PipeLine.h" #include -#include "ExternalSinkModule.h" TimelapsePipeline::TimelapsePipeline() : timelapseSamplePipeline("test"), mCudaStream_(new ApraCudaStream()), @@ -34,20 +34,21 @@ bool TimelapsePipeline::setupPipeline(const std::string &videoPath, Mp4ReaderSourceProps(videoPath, false, 0, true, false, false); mp4ReaderProps.parseFS = true; mp4ReaderProps.readLoop = false; - //mp4Reader module is being used here to read the .mp4 videos + // mp4Reader module is being used here to read the .mp4 videos mMp4Reader = boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); auto motionExtractorProps = MotionVectorExtractorProps( MotionVectorExtractorProps::MVExtractMethod::OPENH264, sendDecodedFrames, 2); - //motionVectorExtractor module is being used to get the frames for the defined thershold + // motionVectorExtractor module is being used to get the frames for the + // defined thershold mMotionExtractor = boost::shared_ptr( new MotionVectorExtractor(motionExtractorProps)); - //convert frames from BGR to RGB + // convert frames from BGR to RGB mColorchange1 = boost::shared_ptr(new ColorConversion( ColorConversionProps(ColorConversionProps::BGR_TO_RGB))); - //convert frames from RGB to YUV420PLANAR - //the two step color change is done because H264Encoder takes YUV data and we don't have direct BGR to YUV. + // convert frames from RGB to YUV420PLANAR + // the two step color change is done because H264Encoder takes YUV data mColorchange2 = boost::shared_ptr(new ColorConversion( ColorConversionProps(ColorConversionProps::RGB_TO_YUV420PLANAR))); mSync = boost::shared_ptr( @@ -55,8 +56,9 @@ bool TimelapsePipeline::setupPipeline(const std::string &videoPath, mEncoder = boost::shared_ptr(new H264EncoderNVCodec( H264EncoderNVCodecProps(bitRateKbps, mCuContext, gopLength, frameRate, profile, enableBFrames))); - //write the output video - auto mp4WriterSinkProps = Mp4WriterSinkProps(UINT32_MAX, 10, 24, outFolderPath, true); + // write the output video + auto mp4WriterSinkProps = + Mp4WriterSinkProps(UINT32_MAX, 10, 24, outFolderPath, true); mp4WriterSinkProps.recordedTSBasedDTS = false; mMp4WriterSink = boost::shared_ptr(new Mp4WriterSink(mp4WriterSinkProps)); From fab05b3b2683c8b170ab3720533d0674e701b89f Mon Sep 17 00:00:00 2001 From: SatwikSShanbhag Date: Mon, 24 Jun 2024 20:10:45 +0530 Subject: [PATCH 45/49] restored files --- .../CMakeLists.txt | 13 ++--- .../GenerateThumbnailsPipeline.cpp | 51 ++++++++++++------- .../GenerateThumbnailsPipeline.h | 14 ++--- .../pipelineMain.cpp | 10 ++-- ...test_generate_thumbnail_from_mp4_video.cpp | 12 ++--- .../play_mp4_from_beginning/CMakeLists.txt | 22 +++----- .../PlayMp4VideoFromBeginning.cpp | 41 ++++++++++----- .../PlayMp4VideoFromBeginning.h | 8 +-- .../play_mp4_from_beginning/pipelineMain.cpp | 3 +- .../test_play_mp4_video_from_beginning.cpp | 28 +++++----- 10 files changed, 110 insertions(+), 92 deletions(-) diff --git a/samples/create_thumbnail_from_mp4_video/CMakeLists.txt b/samples/create_thumbnail_from_mp4_video/CMakeLists.txt index 0a210d28f..f80673668 100644 --- a/samples/create_thumbnail_from_mp4_video/CMakeLists.txt +++ b/samples/create_thumbnail_from_mp4_video/CMakeLists.txt @@ -1,25 +1,22 @@ cmake_minimum_required(VERSION 3.22) set(TARGET generateThumbnailFromMp4Video) -SET(CORE_FILES +SET(SAMPLE_FILES GenerateThumbnailsPipeline.cpp pipelineMain.cpp ) -SET(CORE_FILES_H +SET(SAMPLE_FILES_H GenerateThumbnailsPipeline.h ) SET(SOURCE - ${CORE_FILES} - ${CORE_FILES_H} + ${SAMPLE_FILES} + ${SAMPLE_FILES_H} ) add_executable(${TARGET} ${SOURCE}) target_include_directories ( ${TARGET} PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} - ${FFMPEG_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} ${Boost_INCLUDE_DIRS} ${LIBMP4_INC_DIR} - ${BARESIP_INC_DIR} - ${LIBRE_INC_DIR} ${NVCODEC_INCLUDE_DIR} ) target_link_libraries( @@ -31,9 +28,7 @@ target_link_libraries( ${Boost_LIBRARIES} ${FFMPEG_LIBRARIES} ${OpenCV_LIBRARIES} - ${JETSON_LIBS} ${NVCUDAToolkit_LIBS} ${NVCODEC_LIB} ${NVJPEGLIB_L4T} - ${CURSES_LIBRARIES} ) \ No newline at end of file diff --git a/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.cpp b/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.cpp index a0bba2abd..16ffe45cb 100644 --- a/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.cpp +++ b/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.cpp @@ -8,13 +8,14 @@ #include "H264Decoder.h" #include "H264Metadata.h" #include "JPEGEncoderNVJPEG.h" +#include "Logger.h" #include "Mp4ReaderSource.h" #include "Mp4VideoMetadata.h" #include "ValveModule.h" #include GenerateThumbnailsPipeline::GenerateThumbnailsPipeline() - : pipeLine("pipeline") {} + : pipeLine("thumnailSamplePipeline") {} bool GenerateThumbnailsPipeline::setUpPipeLine( const std::string &videoPath, const std::string &outFolderPath) { @@ -25,43 +26,57 @@ bool GenerateThumbnailsPipeline::setUpPipeLine( auto frameType = FrameMetadata::FrameType::H264_DATA; auto mp4ReaderProps = Mp4ReaderSourceProps(videoPath, parseFS, 0, true, false, false); - mp4Reader = + //initializing source Mp4 reader to read Mp4 video + mMp4Reader = boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); - mp4Reader->addOutPutPin(h264ImageMetadata); + mMp4Reader->addOutPutPin(h264ImageMetadata); auto mp4Metadata = framemetadata_sp(new Mp4VideoMetadata("v_1")); - mp4Reader->addOutPutPin(mp4Metadata); + mMp4Reader->addOutPutPin(mp4Metadata); std::vector mImagePin; - mImagePin = mp4Reader->getAllOutputPinsByType(frameType); + mImagePin = mMp4Reader->getAllOutputPinsByType(frameType); - decoder = boost::shared_ptr(new H264Decoder(H264DecoderProps())); - mp4Reader->setNext(decoder, mImagePin); + //initializing H264 decoder to decode frame in H264 format + mDecoder = + boost::shared_ptr(new H264Decoder(H264DecoderProps())); + //Selecting an image pin of H264 data frame type is necessary because the decoder processes H264 frames for decoding. + mMp4Reader->setNext(mDecoder, mImagePin); - valve = boost::shared_ptr(new ValveModule(ValveModuleProps(0))); - decoder->setNext(valve); + //Initializing the valve to send only one frame. It is currently set to 0, meaning no frames are captured. + mValve = boost::shared_ptr(new ValveModule(ValveModuleProps(0))); + mDecoder->setNext(mValve); + //initialize cuda memory auto stream = cudastream_sp(new ApraCudaStream); - cudaCopy = boost::shared_ptr( + mCudaCopy = boost::shared_ptr( new CudaMemCopy(CudaMemCopyProps(cudaMemcpyHostToDevice, stream))); - valve->setNext(cudaCopy); + mValve->setNext(mCudaCopy); - jpegEncoder = boost::shared_ptr( + //initializing Jpeg encoder to encode the frame in jpeg format + mJpegEncoder = boost::shared_ptr( new JPEGEncoderNVJPEG(JPEGEncoderNVJPEGProps(stream))); - cudaCopy->setNext(jpegEncoder); + mCudaCopy->setNext(mJpegEncoder); - fileWriter = boost::shared_ptr( + //initilizing file writer as sink to write frame at given path + mFileWriter = boost::shared_ptr( new FileWriterModule(FileWriterModuleProps(outFolderPath))); - jpegEncoder->setNext(fileWriter); + mJpegEncoder->setNext(mFileWriter); return true; } bool GenerateThumbnailsPipeline::startPipeLine() { - pipeLine.appendModule(mp4Reader); - pipeLine.init(); + pipeLine.appendModule(mMp4Reader); + if (!pipeLine.init()) { + throw AIPException( + AIP_FATAL, + "Engine Pipeline init failed. Check IPEngine Logs for more details."); + return false; + } pipeLine.run_all_threaded(); - valve->allowFrames(1); + //allowing only one frame to get captured. + mValve->allowFrames(1); return true; } diff --git a/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h b/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h index cabeeff16..b421c74f0 100644 --- a/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h +++ b/samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h @@ -19,12 +19,12 @@ class GenerateThumbnailsPipeline private: PipeLine pipeLine; - boost::shared_ptr valve; - boost::shared_ptr mp4Reader; - boost::shared_ptr decoder; - boost::shared_ptr colorchange; - boost::shared_ptr cudaCopy; - boost::shared_ptr jpegEncoder; - boost::shared_ptr fileWriter; + boost::shared_ptr mValve; + boost::shared_ptr mMp4Reader; + boost::shared_ptr mDecoder; + boost::shared_ptr mColorchange; + boost::shared_ptr mCudaCopy; + boost::shared_ptr mJpegEncoder; + boost::shared_ptr mFileWriter; }; diff --git a/samples/create_thumbnail_from_mp4_video/pipelineMain.cpp b/samples/create_thumbnail_from_mp4_video/pipelineMain.cpp index 759d47475..0b0d4f11b 100644 --- a/samples/create_thumbnail_from_mp4_video/pipelineMain.cpp +++ b/samples/create_thumbnail_from_mp4_video/pipelineMain.cpp @@ -11,12 +11,12 @@ void main(int argc, char *argv[]) { std::string videoPath = argv[argc - 2]; std::string outFolderPath = argv[argc - 1]; - GenerateThumbnailsPipeline pipelineInstance; - if (!pipelineInstance.setUpPipeLine(videoPath, outFolderPath)) { + GenerateThumbnailsPipeline thumbnailPipeline; + if (!thumbnailPipeline.setUpPipeLine(videoPath, outFolderPath)) { std::cerr << "Failed to setup pipeline." << std::endl; } - if (!pipelineInstance.startPipeLine()) { + if (!thumbnailPipeline.startPipeLine()) { std::cerr << "Failed to start pipeline." << std::endl; } @@ -24,7 +24,9 @@ void main(int argc, char *argv[]) { boost::this_thread::sleep_for(boost::chrono::seconds(5)); // Stop the pipeline - if (!pipelineInstance.stopPipeLine()) { + if (!thumbnailPipeline.stopPipeLine()) { std::cerr << "Failed to stop pipeline." << std::endl; + } else { + std::cerr << "Saved Generated Thumbnail in <" << outFolderPath << ">"; } } \ No newline at end of file diff --git a/samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp b/samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp index 9246eb4b8..a4232aa65 100644 --- a/samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp +++ b/samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp @@ -9,20 +9,20 @@ BOOST_AUTO_TEST_CASE(generateThumbnails_from_mp4) { auto generateThumbnailPipeline = boost::shared_ptr( new GenerateThumbnailsPipeline()); - std::string videoPath = "C:/APRA/fork/ApraPipes/data/Mp4_videos/h264_video_metadata/20230514/0011/1686723796848.mp4"; - std::string outFolderPath = "C:/APRA/fork/ApraPipes/samples/create_thumbnail_from_mp4_video/data/generated_thumbnail/thumbnail_????.jpg"; - generateThumbnailPipeline->setUpPipeLine(videoPath, outFolderPath); - generateThumbnailPipeline->startPipeLine(); + std::string videoPath = "../../data/Mp4_videos/h264_video_metadata/20230514/0011/1686723796848.mp4"; + std::string outFolderPath = "data/generated_thumbnail/thumbnail_????.jpg"; + BOOST_CHECK_NO_THROW(generateThumbnailPipeline->setUpPipeLine(videoPath, outFolderPath)); + BOOST_CHECK_NO_THROW(generateThumbnailPipeline->startPipeLine()); boost::this_thread::sleep_for(boost::chrono::seconds(5)); const uint8_t *pReadDataTest = nullptr; unsigned int readDataSizeTest = 0U; - BOOST_TEST(Test_Utils::readFile("C:/APRA/fork/ApraPipes/samples/create_thumbnail_from_mp4_video/data/test_thumbnail/sample_thumbnail.jpg", + BOOST_TEST(Test_Utils::readFile("data/test_thumbnail/sample_thumbnail.jpg", pReadDataTest, readDataSizeTest)); Test_Utils::saveOrCompare( - "C:/APRA/fork/ApraPipes/samples/create_thumbnail_from_mp4_video/data/generated_thumbnail/thumbnail_0000.jpg", pReadDataTest, + "data/generated_thumbnail/thumbnail_0000.jpg", pReadDataTest, readDataSizeTest, 0); generateThumbnailPipeline->stopPipeLine(); diff --git a/samples/play_mp4_from_beginning/CMakeLists.txt b/samples/play_mp4_from_beginning/CMakeLists.txt index 8d14c5e00..7d46995bc 100644 --- a/samples/play_mp4_from_beginning/CMakeLists.txt +++ b/samples/play_mp4_from_beginning/CMakeLists.txt @@ -1,26 +1,21 @@ cmake_minimum_required(VERSION 3.22) set(TARGET play_mp4_from_beginning) -SET(CORE_FILES +SET(SAMPLE_FILES PlayMp4VideoFromBeginning.cpp pipelineMain.cpp ) -SET(CORE_FILES_H +SET(SAMPLE_FILES_H PlayMp4VideoFromBeginning.h ) SET(SOURCE - ${CORE_FILES} - ${CORE_FILES_H} + ${SAMPLE_FILES} + ${SAMPLE_FILES_H} ) add_executable(${TARGET} ${SOURCE}) target_include_directories ( ${TARGET} PRIVATE - ${JETSON_MULTIMEDIA_LIB_INCLUDE} - ${FFMPEG_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} ${Boost_INCLUDE_DIRS} ${LIBMP4_INC_DIR} - ${BARESIP_INC_DIR} - ${LIBRE_INC_DIR} - ${NVCODEC_INCLUDE_DIR} ) target_link_libraries( ${TARGET} @@ -29,11 +24,10 @@ target_link_libraries( ${LIBMP4_LIB} ${OPENH264_LIB} ${Boost_LIBRARIES} - ${FFMPEG_LIBRARIES} ${OpenCV_LIBRARIES} - ${JETSON_LIBS} ${NVCUDAToolkit_LIBS} - ${NVCODEC_LIB} ${NVJPEGLIB_L4T} - ${CURSES_LIBRARIES} - ) \ No newline at end of file + ${NVCODEC_LIB} + ${FFMPEG_LIBRARIES} + ) + \ No newline at end of file diff --git a/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.cpp b/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.cpp index 7b6a011a1..a3d9a0892 100644 --- a/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.cpp +++ b/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.cpp @@ -15,7 +15,8 @@ #include "Mp4VideoMetadata.h" #include -PlayMp4VideoFromBeginning::PlayMp4VideoFromBeginning() : pipeLine("pipeline") {} +PlayMp4VideoFromBeginning::PlayMp4VideoFromBeginning() + : pipeLine("PlayMp4videoFromBeginingSamplePipline") {} bool PlayMp4VideoFromBeginning::setUpPipeLine(const std::string &videoPath) { // Implementation @@ -25,37 +26,49 @@ bool PlayMp4VideoFromBeginning::setUpPipeLine(const std::string &videoPath) { auto mp4ReaderProps = Mp4ReaderSourceProps(videoPath, parseFS, 0, true, true, false); mp4ReaderProps.fps = 24; - mp4Reader = + // initializing source Mp4 reader to read Mp4 video + mMp4Reader = boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); - mp4Reader->addOutPutPin(h264ImageMetadata); + mMp4Reader->addOutPutPin(h264ImageMetadata); auto mp4Metadata = framemetadata_sp(new Mp4VideoMetadata("v_1")); - mp4Reader->addOutPutPin(mp4Metadata); + mMp4Reader->addOutPutPin(mp4Metadata); std::vector mImagePin; - mImagePin = mp4Reader->getAllOutputPinsByType(frameType); + mImagePin = mMp4Reader->getAllOutputPinsByType(frameType); - decoder = boost::shared_ptr(new H264Decoder(H264DecoderProps())); - mp4Reader->setNext(decoder, mImagePin); + // initializing H264 decoder to decode frame in H264 format + mDecoder = boost::shared_ptr(new H264Decoder(H264DecoderProps())); + // Selecting an image pin of H264 data frame type is necessary because the + // decoder processes H264 frames for decoding. + mMp4Reader->setNext(mDecoder, mImagePin); + + //initializing conversion type module to convert YUV420 frame to RGB auto conversionType = ColorConversionProps::ConversionType::YUV420PLANAR_TO_RGB; auto metadata = framemetadata_sp(new RawImagePlanarMetadata( 1280, 720, ImageMetadata::ImageType::YUV420, size_t(0), CV_8U)); - colorchange = boost::shared_ptr( + mColorchange = boost::shared_ptr( new ColorConversion(ColorConversionProps(conversionType))); - decoder->setNext(colorchange); + mDecoder->setNext(mColorchange); - imageViewerSink = boost::shared_ptr( + //initializing imageViewer module as sink to show video on screen + mImageViewerSink = boost::shared_ptr( new ImageViewerModule(ImageViewerModuleProps("imageview"))); - colorchange->setNext(imageViewerSink); + mColorchange->setNext(mImageViewerSink); return true; } bool PlayMp4VideoFromBeginning::startPipeLine() { - pipeLine.appendModule(mp4Reader); - pipeLine.init(); + pipeLine.appendModule(mMp4Reader); + if (!pipeLine.init()) { + throw AIPException( + AIP_FATAL, + "Engine Pipeline init failed. Check IPEngine Logs for more details."); + return false; + } pipeLine.run_all_threaded(); return true; } @@ -69,6 +82,6 @@ bool PlayMp4VideoFromBeginning::stopPipeLine() { bool PlayMp4VideoFromBeginning::flushQueuesAndSeek() { pipeLine.flushAllQueues(); - mp4Reader->randomSeek(1686723796848, false); + mMp4Reader->randomSeek(1686723796848, false); return true; } diff --git a/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h b/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h index 6f4458452..d489e752c 100644 --- a/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h +++ b/samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h @@ -14,10 +14,10 @@ class PlayMp4VideoFromBeginning { bool flushQueuesAndSeek(); - boost::shared_ptr mp4Reader; - boost::shared_ptr imageViewerSink; - boost::shared_ptr decoder; - boost::shared_ptr colorchange; + boost::shared_ptr mMp4Reader; + boost::shared_ptr mImageViewerSink; + boost::shared_ptr mDecoder; + boost::shared_ptr mColorchange; private: PipeLine pipeLine; diff --git a/samples/play_mp4_from_beginning/pipelineMain.cpp b/samples/play_mp4_from_beginning/pipelineMain.cpp index 6eba9548b..3274cf83e 100644 --- a/samples/play_mp4_from_beginning/pipelineMain.cpp +++ b/samples/play_mp4_from_beginning/pipelineMain.cpp @@ -11,8 +11,7 @@ void main(int argc, char *argv[]) { PlayMp4VideoFromBeginning pipelineInstance; - - if (!pipelineInstance.setUpPipeLine(videoPath)){ + if (!pipelineInstance.setUpPipeLine(videoPath)) { std::cerr << "Failed to setup pipeline." << std::endl; } if (!pipelineInstance.startPipeLine()) { diff --git a/samples/play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp b/samples/play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp index e99a02cc4..e3e170a23 100644 --- a/samples/play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp +++ b/samples/play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp @@ -30,17 +30,17 @@ BOOST_AUTO_TEST_CASE(play_mp4_from_beginning_flush_queue_test) { playMp4VideoFromBeginning->setUpPipeLine(videoPath); auto sink = boost::shared_ptr(new SinkModule(SinkModuleProps())); - playMp4VideoFromBeginning->colorchange->setNext(sink); - BOOST_CHECK(playMp4VideoFromBeginning->mp4Reader->init()); - BOOST_CHECK(playMp4VideoFromBeginning->decoder->init()); - BOOST_CHECK(playMp4VideoFromBeginning->colorchange->init()); + playMp4VideoFromBeginning->mColorchange->setNext(sink); + BOOST_CHECK(playMp4VideoFromBeginning->mMp4Reader->init()); + BOOST_CHECK(playMp4VideoFromBeginning->mDecoder->init()); + BOOST_CHECK(playMp4VideoFromBeginning->mColorchange->init()); BOOST_CHECK(sink->init()); for (int i = 0; i <= 20; i++) { - playMp4VideoFromBeginning->mp4Reader->step(); - playMp4VideoFromBeginning->decoder->step(); + playMp4VideoFromBeginning->mMp4Reader->step(); + playMp4VideoFromBeginning->mDecoder->step(); } - playMp4VideoFromBeginning->colorchange->step(); + playMp4VideoFromBeginning->mColorchange->step(); auto frames = sink->pop(); auto frame = frames.size(); @@ -62,21 +62,21 @@ BOOST_AUTO_TEST_CASE(play_mp4_from_beginning_seek_test) { "h264_video_metadata/20230514/0011/1686723796848.mp4"; playMp4VideoFromBeginning->setUpPipeLine(videoPath); auto sink = boost::shared_ptr(new SinkModule(SinkModuleProps())); - playMp4VideoFromBeginning->mp4Reader->setNext(sink); + playMp4VideoFromBeginning->mMp4Reader->setNext(sink); - BOOST_CHECK(playMp4VideoFromBeginning->mp4Reader->init()); - BOOST_CHECK(playMp4VideoFromBeginning->decoder->init()); - BOOST_CHECK(playMp4VideoFromBeginning->colorchange->init()); + BOOST_CHECK(playMp4VideoFromBeginning->mMp4Reader->init()); + BOOST_CHECK(playMp4VideoFromBeginning->mDecoder->init()); + BOOST_CHECK(playMp4VideoFromBeginning->mColorchange->init()); BOOST_CHECK(sink->init()); - playMp4VideoFromBeginning->mp4Reader->step(); + playMp4VideoFromBeginning->mMp4Reader->step(); auto frames = sink->pop(); auto imgFrame = frames.begin()->second; uint64_t skipTS = 1686723796848; - playMp4VideoFromBeginning->mp4Reader->randomSeek(skipTS, false); - playMp4VideoFromBeginning->mp4Reader->step(); + playMp4VideoFromBeginning->mMp4Reader->randomSeek(skipTS, false); + playMp4VideoFromBeginning->mMp4Reader->step(); frames = sink->pop(); imgFrame = frames.begin()->second; BOOST_TEST(imgFrame->timestamp == 1686723796848); From 064cce51b5b0baa478169a139540935235c60fe0 Mon Sep 17 00:00:00 2001 From: nsabale7 Date: Tue, 9 Jul 2024 12:20:01 +0530 Subject: [PATCH 46/49] changes in constructor --- samples/face_detection_cpu/CMakeLists.txt | 14 -------------- samples/face_detection_cpu/face_detection_cpu.cpp | 7 +++---- samples/face_detection_cpu/face_detection_cpu.h | 2 +- samples/face_detection_cpu/pipelineMain.cpp | 2 +- .../face_detection_cpu/test_face_detection_cpu.cpp | 4 ++-- 5 files changed, 7 insertions(+), 22 deletions(-) diff --git a/samples/face_detection_cpu/CMakeLists.txt b/samples/face_detection_cpu/CMakeLists.txt index 14a0db98e..97cdd62de 100644 --- a/samples/face_detection_cpu/CMakeLists.txt +++ b/samples/face_detection_cpu/CMakeLists.txt @@ -15,28 +15,14 @@ SET(SOURCE add_executable(${TARGET} ${SOURCE}) target_include_directories ( ${TARGET} PRIVATE - ${JETSON_MULTIMEDIA_LIB_INCLUDE} - ${FFMPEG_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} ${Boost_INCLUDE_DIRS} - ${LIBMP4_INC_DIR} - ${BARESIP_INC_DIR} - ${LIBRE_INC_DIR} - ${NVCODEC_INCLUDE_DIR} ) target_link_libraries( ${TARGET} aprapipes ${JPEG_LIBRARIES} - ${LIBMP4_LIB} - ${OPENH264_LIB} ${Boost_LIBRARIES} - ${FFMPEG_LIBRARIES} ${OpenCV_LIBRARIES} - ${JETSON_LIBS} - ${NVCUDAToolkit_LIBS} - ${NVCODEC_LIB} - ${NVJPEGLIB_L4T} - ${CURSES_LIBRARIES} ) \ No newline at end of file diff --git a/samples/face_detection_cpu/face_detection_cpu.cpp b/samples/face_detection_cpu/face_detection_cpu.cpp index c115d3d68..a24f5a2ba 100644 --- a/samples/face_detection_cpu/face_detection_cpu.cpp +++ b/samples/face_detection_cpu/face_detection_cpu.cpp @@ -2,7 +2,6 @@ #include "ImageViewerModule.h" #include "face_detection_cpu.h" #include "OverlayModule.h" -#include "FacialLandmarksCV.h" #include "ImageDecoderCV.h" #include "FaceDetectorXform.h" #include "ColorConversionXForm.h" @@ -10,11 +9,11 @@ FaceDetectionCPU::FaceDetectionCPU() : faceDetectionCPUSamplePipeline("faceDetectionCPUSamplePipeline") {} -bool FaceDetectionCPU::setupPipeline() { - WebCamSourceProps webCamSourceprops(0, 640, 480); +bool FaceDetectionCPU::setupPipeline(const int &cameraId, const double &scaleFactor, const double &threshold, const std::string &faceDetectionConfiguration, const std::string &faceDetectionWeight) { + WebCamSourceProps webCamSourceprops(cameraId); mSource = boost::shared_ptr(new WebCamSource(webCamSourceprops)); - FaceDetectorXformProps faceDetectorProps(1.0, 0.8, "./data/assets/deploy.prototxt", "./data/assets/res10_300x300_ssd_iter_140000_fp16.caffemodel"); + FaceDetectorXformProps faceDetectorProps(scaleFactor, threshold, faceDetectionConfiguration, faceDetectionWeight); mFaceDetector = boost::shared_ptr(new FaceDetectorXform(faceDetectorProps)); mSource->setNext(mFaceDetector); diff --git a/samples/face_detection_cpu/face_detection_cpu.h b/samples/face_detection_cpu/face_detection_cpu.h index 8bf01b4c1..3d6360962 100644 --- a/samples/face_detection_cpu/face_detection_cpu.h +++ b/samples/face_detection_cpu/face_detection_cpu.h @@ -10,7 +10,7 @@ class FaceDetectionCPU { public: FaceDetectionCPU(); - bool setupPipeline(); + bool setupPipeline(const int &cameraId, const double &scaleFactor, const double &threshold, const std::string &faceDetectionConfiguration, const std::string &faceDetectionWeight); bool startPipeline(); bool stopPipeline(); private: diff --git a/samples/face_detection_cpu/pipelineMain.cpp b/samples/face_detection_cpu/pipelineMain.cpp index 08d4f22c7..3f35b8687 100644 --- a/samples/face_detection_cpu/pipelineMain.cpp +++ b/samples/face_detection_cpu/pipelineMain.cpp @@ -4,7 +4,7 @@ void main() { FaceDetectionCPU faceDetectionCPUSamplePipeline; - if (!faceDetectionCPUSamplePipeline.setupPipeline()) { + if (!faceDetectionCPUSamplePipeline.setupPipeline(0, 1.0, 0.7, "../../data/assets/deploy.prototxt", "../../data/assets/res10_300x300_ssd_iter_140000_fp16.caffemodel")) { std::cerr << "Failed to setup pipeline." << std::endl; } diff --git a/samples/face_detection_cpu/test_face_detection_cpu.cpp b/samples/face_detection_cpu/test_face_detection_cpu.cpp index 9f0817d76..6440070d2 100644 --- a/samples/face_detection_cpu/test_face_detection_cpu.cpp +++ b/samples/face_detection_cpu/test_face_detection_cpu.cpp @@ -7,10 +7,10 @@ BOOST_AUTO_TEST_SUITE(test_face_detection_cpu) BOOST_AUTO_TEST_CASE(face_detection_cpu) { auto faceDetectionCPUPipeline = boost::shared_ptr(new FaceDetectionCPU()); - faceDetectionCPUPipeline->setupPipeline(); + faceDetectionCPUPipeline->setupPipeline(0, 1.0, 0.7, "./data/assets/deploy.prototxt", "./data/assets/res10_300x300_ssd_iter_140000_fp16.caffemodel"); faceDetectionCPUPipeline->startPipeline(); - boost::this_thread::sleep_for(boost::chrono::seconds(50)); + boost::this_thread::sleep_for(boost::chrono::seconds(25)); faceDetectionCPUPipeline->stopPipeline(); } From 8cbb00bbb873d8d797987648d0f56b61c42e31ce Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Tue, 9 Jul 2024 16:35:57 +0530 Subject: [PATCH 47/49] relay sample. --- base/CMakeLists.txt | 45 +++++++ base/include/MotionVectorExtractor.h | 6 +- base/src/MotionVectorExtractor.cpp | 26 ++-- base/src/RTSPClientSrc.cpp | 111 +++++++--------- samples/README.md | 16 ++- samples/cMakeLists.txt | 10 ++ samples/relay-sample/PipelineMain.cpp | 50 ++++++++ samples/relay-sample/cMakeLists.txt | 42 ++++++ samples/relay-sample/relay_sample.cpp | 120 ++++++++++++++++++ samples/relay-sample/relay_sample.h | 31 +++++ samples/relay-sample/relay_sample_test.cpp | 52 ++++++++ samples/timelapse-sample/CMakeLists.txt | 15 ++- .../timelapse-sample/timelapse_summary.cpp | 2 +- samples/timelapse-sample/timelapse_summary.h | 8 +- 14 files changed, 445 insertions(+), 89 deletions(-) create mode 100644 samples/cMakeLists.txt create mode 100644 samples/relay-sample/PipelineMain.cpp create mode 100644 samples/relay-sample/cMakeLists.txt create mode 100644 samples/relay-sample/relay_sample.cpp create mode 100644 samples/relay-sample/relay_sample.h create mode 100644 samples/relay-sample/relay_sample_test.cpp diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index f9ba91892..515ab41c7 100755 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -195,6 +195,9 @@ SET(CORE_FILES src/MotionVectorExtractor.cpp src/OverlayModule.cpp src/OrderedCacheOfFiles.cpp + .././samples/relay-sample/relay_sample.cpp + .././samples/relay-sample/relay_sample.cpp + .././samples/timelapse-sample/timelapse_summary.h ) SET(CORE_FILES_H @@ -257,6 +260,9 @@ SET(CORE_FILES_H include/OrderedCacheOfFiles.h include/TestSignalGeneratorSrc.h include/AbsControlModule.h + .././samples/relay-sample/relay_sample.h + .././samples/relay-sample/relay_sample.h + .././samples/timelapse-sample/timelapse_summary.cpp ) IF(ENABLE_WINDOWS) @@ -515,6 +521,9 @@ target_include_directories ( aprapipes PRIVATE ${BARESIP_INC_DIR} ${LIBRE_INC_DIR} ${NVCODEC_INCLUDE_DIR} +./../samples/relay-sample +./../samples/relay-sample +./../samples/timelapse-sample ) # aprapipes Unit Tests @@ -628,9 +637,13 @@ SET(SAMPLE_UT_FILES test/test_utils.cpp test/test_utils.h ../samples/face_detection_cpu/test_face_detection_cpu.cpp + ../samples/relay-sample/relay_sample_test.cpp + .././samples/timelapse-sample/timelapse_summary_test.cpp ) SET(SAMPLE_CORE_FILES + ../samples/relay-sample/relay_sample.cpp + ../samples/relay-sample/relay_sample.h ../samples/face_detection_cpu/face_detection_cpu.h ../samples/face_detection_cpu/face_detection_cpu.cpp test/test_utils.h @@ -640,6 +653,8 @@ SET(SAMPLE_CORE_FILES ) SET(SAMPLE_CORE_FILES + ../samples/relay-sample/relay_sample.cpp + ../samples/relay-sample/relay_sample.h ../samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h ../samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.cpp ../samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h @@ -668,6 +683,8 @@ ENDIF(ENABLE_LINUX) add_executable(aprapipesut ${UT_FILES}) add_executable(aprapipessampleut ${SAMPLE_SOURCE}) +add_executable(aprapipessampleut ${SAMPLE_SOURCE}) + target_include_directories ( aprapipessampleut PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_INCLUDE_DIRS} @@ -784,6 +801,33 @@ target_link_libraries(aprapipesut whisper::whisper ) + target_include_directories ( aprapipessampleut PRIVATE + ${JETSON_MULTIMEDIA_LIB_INCLUDE} + ${FFMPEG_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIRS} + ${LIBMP4_INC_DIR} + ${BARESIP_INC_DIR} + ${LIBRE_INC_DIR} + ${NVCODEC_INCLUDE_DIR} + test + ) + + target_link_libraries(aprapipessampleut + aprapipes + ${JPEG_LIBRARIES} + ${LIBMP4_LIB} + ${OPENH264_LIB} + ${Boost_LIBRARIES} + ${FFMPEG_LIBRARIES} + ${OpenCV_LIBRARIES} + ${JETSON_LIBS} + ${NVCUDAToolkit_LIBS} + ${NVCODEC_LIB} + ${NVJPEGLIB_L4T} + ${CURSES_LIBRARIES} + ) + IF(ENABLE_WINDOWS) file(COPY ${RUNTIME_DLLS} DESTINATION Debug/) file(COPY ${RUNTIME_DLLS} DESTINATION Release/) @@ -793,6 +837,7 @@ IF(ENABLE_WINDOWS) ENDIF(ENABLE_WINDOWS) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../samples ${CMAKE_CURRENT_BINARY_DIR}/samples) include(GNUInstallDirs) diff --git a/base/include/MotionVectorExtractor.h b/base/include/MotionVectorExtractor.h index 124d85af6..6aa39f3d7 100644 --- a/base/include/MotionVectorExtractor.h +++ b/base/include/MotionVectorExtractor.h @@ -16,9 +16,9 @@ class MotionVectorExtractorProps : public ModuleProps { : MVExtract(_MVExtractMethod), sendDecodedFrame(_sendDecodedFrame), motionVectorThreshold(_motionVectorThreshold) {} - size_t getSerializeSize() { - return ModuleProps::getSerializeSize() + sizeof(sendDecodedFrame) + - sizeof(motionVectorThreshold); + size_t getSerializeSize() + { + return ModuleProps::getSerializeSize() + sizeof(sendDecodedFrame) + sizeof(motionVectorThreshold); } bool sendDecodedFrame = false; int motionVectorThreshold; diff --git a/base/src/MotionVectorExtractor.cpp b/base/src/MotionVectorExtractor.cpp index dd3101d7b..fdfdf29ac 100644 --- a/base/src/MotionVectorExtractor.cpp +++ b/base/src/MotionVectorExtractor.cpp @@ -1,20 +1,22 @@ -#include #include -extern "C" { +#include +extern "C" +{ +#include #include #include #include -#include -#include #include +#include } +#include "MotionVectorExtractor.h" #include "H264Metadata.h" #include "H264ParserUtils.h" -#include "MotionVectorExtractor.h" -#include "Overlay.h" #include "Utils.h" +#include "Overlay.h" -class MvExtractDetailAbs { +class MvExtractDetailAbs +{ public: MvExtractDetailAbs( MotionVectorExtractorProps props, @@ -47,7 +49,8 @@ class MvExtractDetailAbs { cv::Mat bgrImg; bool motionFound = false; }; -class DetailFfmpeg : public MvExtractDetailAbs { +class DetailFfmpeg : public MvExtractDetailAbs +{ public: DetailFfmpeg( MotionVectorExtractorProps props, @@ -66,7 +69,8 @@ class DetailFfmpeg : public MvExtractDetailAbs { AVFrame* avFrame = NULL; AVCodecContext* decoderContext = NULL; }; -class DetailOpenH264 : public MvExtractDetailAbs { +class DetailOpenH264 : public MvExtractDetailAbs +{ public: DetailOpenH264( MotionVectorExtractorProps props, @@ -310,6 +314,10 @@ void DetailOpenH264::getMotionVectors(frame_container& frames, for (auto& circleOverlay : circleOverlays) { compositeOverlay.add(&circleOverlay); } + if (mMotionVectorSize != mWidth * mHeight * 8 && sendOverlayFrame == true) + { + std::vector circleOverlays; + CompositeOverlay compositeOverlay; if (circleOverlays.size()) { DrawingOverlay drawingOverlay; diff --git a/base/src/RTSPClientSrc.cpp b/base/src/RTSPClientSrc.cpp index c5fca7213..656d67a58 100644 --- a/base/src/RTSPClientSrc.cpp +++ b/base/src/RTSPClientSrc.cpp @@ -5,7 +5,6 @@ using namespace std; - #include #include @@ -38,24 +37,23 @@ FrameMetadata::FrameType getFrameType_fromFFMPEG(AVMediaType avMediaType, AVCode case AV_CODEC_ID_BMP: return FrameMetadata::BMP_IMAGE; default: - return FrameMetadata::GENERAL; + return FrameMetadata::GENERAL; } } - return FrameMetadata::GENERAL; //for everything else we may not have a match + return FrameMetadata::GENERAL; // for everything else we may not have a match } - class RTSPClientSrc::Detail { public: - Detail(RTSPClientSrc* m,std::string path, bool useTCP) :myModule(m), path(path), bConnected(false), bUseTCP(useTCP){} + Detail(RTSPClientSrc* m, std::string path, bool useTCP) : myModule(m), path(path), bConnected(false), bUseTCP(useTCP) {} ~Detail() { destroy(); } void destroy() { - if(nullptr!= pFormatCtx) + if (nullptr != pFormatCtx) avformat_close_input(&pFormatCtx); } - + bool connect() { avformat_network_init(); @@ -65,12 +63,12 @@ class RTSPClientSrc::Detail AVDictionary* avdic = NULL; - av_dict_set(&avdic, "rtsp_transport", (bUseTCP)?"tcp":"udp", 0); + av_dict_set(&avdic, "rtsp_transport", (bUseTCP) ? "tcp" : "udp", 0); av_dict_set(&avdic, "max_delay", "100", 0); if (avformat_open_input(&pFormatCtx, path.c_str(), NULL, &avdic) != 0) { - LOG_ERROR << "can't open the URL." << path <nb_streams; i++) { auto pCodecCtx = pFormatCtx->streams[i]->codec; - LOG_INFO << av_get_media_type_string (pCodecCtx->codec_type)<<" Codec: " << avcodec_get_name(pCodecCtx->codec_id); - auto fType=getFrameType_fromFFMPEG(pCodecCtx->codec_type,pCodecCtx->codec_id); - string outPin=myModule->getOutputPinIdByType(fType); + LOG_INFO << av_get_media_type_string(pCodecCtx->codec_type) << " Codec: " << avcodec_get_name(pCodecCtx->codec_id); + auto fType = getFrameType_fromFFMPEG(pCodecCtx->codec_type, pCodecCtx->codec_id); + string outPin = myModule->getOutputPinIdByType(fType); if (!outPin.empty()) { streamsMap[i] = outPin; if (pCodecCtx->codec_type == AVMEDIA_TYPE_VIDEO) { videoStream = i; - auto meta= FrameMetadataFactory::downcast(myModule->getOutputMetadata(outPin)); + auto meta = FrameMetadataFactory::downcast(myModule->getOutputMetadata(outPin)); H264Metadata tmp(pCodecCtx->width, pCodecCtx->height, pCodecCtx->gop_size, pCodecCtx->max_b_frames); meta->setData(tmp); } } } - //by this time all the output pins should be satisfied if not we have a problem + // by this time all the output pins should be satisfied if not we have a problem if (streamsMap.size() < myModule->getNumberOfOutputPins()) { LOG_ERROR << "some output pins can not be satsified" << std::endl; @@ -128,31 +126,26 @@ class RTSPClientSrc::Detail bool readBuffer() { - if(!initDone) - { - std::chrono::time_point t = std::chrono::system_clock::now(); - beginTs = std::chrono::duration_cast(t.time_since_epoch()); - initDone = true; - } frame_container outFrames; bool got_something = false; - while(!got_something) + while (!got_something) { if (av_read_frame(pFormatCtx, &packet) >= 0) { - if (videoStream >= 0) //source has video + if (videoStream >= 0) // source has video { - if (packet.stream_index == videoStream) //got video + if (packet.stream_index == videoStream) // got video { got_something = true; } } else { - got_something = true; //does not need video and got something + got_something = true; // does not need video and got something } auto it = streamsMap.find(packet.stream_index); - if (it != streamsMap.end()) { // so we have an interest in sending this + if (it != streamsMap.end()) + { // so we have an interest in sending this frame_sp frm; auto naluType = H264Utils::getNALUType((const char*)packet.data); if (naluType == H264Utils::H264_NAL_TYPE_SEI) @@ -176,92 +169,80 @@ class RTSPClientSrc::Detail } std::chrono::time_point t = std::chrono::system_clock::now(); - auto dur = std::chrono::duration_cast(t.time_since_epoch()); + auto dur = std::chrono::duration_cast(t.time_since_epoch()); frm->timestamp = dur.count(); if (!outFrames.insert(make_pair(it->second, frm)).second) { LOG_WARNING << "oops! there is already another packet for pin " << it->second; } - auto diff = dur - beginTs; - if(diff.count() > 1000) - { - currentCameraFps = frameCount; - frameCount = 0; - beginTs = dur; - } - frameCount++; } av_packet_unref(&packet); } } - - if(outFrames.size()>0) - myModule->send(outFrames); + if (outFrames.size() > 0) + myModule->send(outFrames); return true; } bool isConncected() const { return bConnected; } - int frameCount = 0; - int currentCameraFps = 0; + private: AVPacket packet; AVFormatContext* pFormatCtx = nullptr; std::string path; bool bConnected; - int videoStream=-1; + int videoStream = -1; bool bUseTCP; std::map streamsMap; RTSPClientSrc* myModule; - std::chrono::milliseconds beginTs; - bool initDone = false; }; RTSPClientSrc::RTSPClientSrc(RTSPClientSrcProps _props) : Module(SOURCE, "RTSPClientSrc", _props), mProps(_props) { - mDetail.reset(new Detail(this,mProps.rtspURL, mProps.useTCP)); + mDetail.reset(new Detail(this, mProps.rtspURL, mProps.useTCP)); } -RTSPClientSrc::~RTSPClientSrc() { +RTSPClientSrc::~RTSPClientSrc() +{ } -bool RTSPClientSrc::init() { +bool RTSPClientSrc::init() +{ if (mDetail->connect()) { return Module::init(); } return false; } -bool RTSPClientSrc::term() { +bool RTSPClientSrc::term() +{ mDetail.reset(); return true; } void RTSPClientSrc::setProps(RTSPClientSrcProps& props) { mProps = props; - //TBD need to also reset the whole connection + // TBD need to also reset the whole connection } -RTSPClientSrcProps RTSPClientSrc::getProps() { + +RTSPClientSrcProps RTSPClientSrc::getProps() +{ return mProps; } -bool RTSPClientSrc::produce() { +bool RTSPClientSrc::produce() +{ return mDetail->readBuffer(); } -bool RTSPClientSrc::validateOutputPins() { - //smallest check at least one output pin should be there +bool RTSPClientSrc::validateOutputPins() +{ + // smallest check at least one output pin should be there return this->getNumberOfOutputPins() > 0; } void RTSPClientSrc::notifyPlay(bool play) {} -bool RTSPClientSrc::handleCommand(Command::CommandType type, frame_sp& frame) -{ - if (type == Command::CommandType::Relay) - { - return Module::handleCommand(type, frame); - } - return true; +//bool RTSPClientSrc::handleCommand(Command::CommandType type, frame_sp& frame) { return true; } +bool RTSPClientSrc::handleCommand(Command::CommandType type, frame_sp &frame) { + if (type == Command::CommandType::Relay) { + return Module::handleCommand(type, frame); + } + return true; } - -int RTSPClientSrc::getCurrentFps() -{ - return mDetail->currentCameraFps; -} - bool RTSPClientSrc::handlePropsChange(frame_sp& frame) { return true; } diff --git a/samples/README.md b/samples/README.md index 846187ef3..64a63f76c 100644 --- a/samples/README.md +++ b/samples/README.md @@ -1,4 +1,5 @@ -# ApraPipes Samples + + This Samples demonstrate the capability and usage of ApraPipes Modules into different applications. @@ -26,3 +27,16 @@ This samples demonstrates the usage of ApraPipes modules which can be used to ge - arg1: input video path - arg2: output path - The Unit test "timelapse_summary_test.cpp" is located inside the aprapipessampleut executable. Make sure to replace the "inputVideoPath" and "outputPath" with suitable paths. + +### 2. Relay sample: +This sample demonstrate the use of relay command in the ApraPipes which can be used to swith the stream from live to recorder and vice versa. +#### - Modules Used: +- RtspClientSrc: To read the live stream from RTSP camera. +- Mp4Reader: To Read recorded stream from the Mp4Video. +- H264Decoder: To decode the h264 frames from RTSP and MP4 sources. +- colorConversion: To transform YUV420 frame to RAW RGB format. +- ImageViewer: To render RGB frames on the screen. +- arguments: + - arg1: RTSP camera URL + - arg2: MP4 video file path +- The Unit test "relay_sample_test.cpp" is located inside the aprapipessampleut executable. Make sure to replace the `rtspUrl` and `mp4VideoPath` with suitable paths. diff --git a/samples/cMakeLists.txt b/samples/cMakeLists.txt new file mode 100644 index 000000000..c2f9918a3 --- /dev/null +++ b/samples/cMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.22) +include_directories( +../base +${CMAKE_CURRENT_SOURCE_DIR} +) + +add_subdirectory(face_detection_cpu) +add_subdirectory(create_thumbnail_from_mp4_video) +add_subdirectory(play_mp4_from_beginning) +add_subdirectory(timelapse-sample) \ No newline at end of file diff --git a/samples/relay-sample/PipelineMain.cpp b/samples/relay-sample/PipelineMain.cpp new file mode 100644 index 000000000..0cf06427b --- /dev/null +++ b/samples/relay-sample/PipelineMain.cpp @@ -0,0 +1,50 @@ +#include "relay_sample.h" +#include +#include +#include + +int main(int argc, char *argv[]) { + + if (argc < 3) { + std::cerr << "Usage: " << argv[0] << " " << std::endl; + return 1; + } + + std::string rtspUrl = argv[argc - 2]; + std::string mp4VideoPath = argv[argc - 1]; + + LoggerProps loggerProps; + loggerProps.logLevel = boost::log::trivial::severity_level::info; + Logger::setLogLevel(boost::log::trivial::severity_level::info); + Logger::initLogger(loggerProps); + + RelayPipeline pipelineInstance; + + if (!pipelineInstance.setupPipeline(rtspUrl, mp4VideoPath)) { + std::cerr << "Failed to setup pipeline." << std::endl; + return 1; + } + + if (!pipelineInstance.startPipeline()) { + std::cerr << "Failed to start pipeline." << std::endl; + return 1; + } + + while (true) { + int k = getchar(); + if (k == 114) { + pipelineInstance.addRelayToRtsp(false); + pipelineInstance.addRelayToMp4(true); + } + if (k == 108) { + pipelineInstance.addRelayToMp4(false); + pipelineInstance.addRelayToRtsp(true); + } + if (k == 115) { + pipelineInstance.stopPipeline(); + break; + } + } + + return 0; +} \ No newline at end of file diff --git a/samples/relay-sample/cMakeLists.txt b/samples/relay-sample/cMakeLists.txt new file mode 100644 index 000000000..3443a9a98 --- /dev/null +++ b/samples/relay-sample/cMakeLists.txt @@ -0,0 +1,42 @@ +cmake_minimum_required(VERSION 3.22) +set(TARGET relaysample) +SET(CORE_FILES + relay_sample.cpp + PipelineMain.cpp +) +SET(CORE_FILES_H + relay_sample.h +) +SET(SOURCE + ${CORE_FILES} + ${CORE_FILES_H} +) + +add_executable(${TARGET} ${SOURCE}) + +target_include_directories (${TARGET} PRIVATE + ${JETSON_MULTIMEDIA_LIB_INCLUDE} + ${FFMPEG_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIRS} + ${LIBMP4_INC_DIR} + ${BARESIP_INC_DIR} + ${LIBRE_INC_DIR} + ${NVCODEC_INCLUDE_DIR} +) + +target_link_libraries( + ${TARGET} + aprapipes + ${JPEG_LIBRARIES} + ${LIBMP4_LIB} + ${OPENH264_LIB} + ${Boost_LIBRARIES} + ${FFMPEG_LIBRARIES} + ${OpenCV_LIBRARIES} + ${JETSON_LIBS} + ${NVCUDAToolkit_LIBS} + ${NVCODEC_LIB} + ${NVJPEGLIB_L4T} + ${CURSES_LIBRARIES} +) \ No newline at end of file diff --git a/samples/relay-sample/relay_sample.cpp b/samples/relay-sample/relay_sample.cpp new file mode 100644 index 000000000..f656dbbb8 --- /dev/null +++ b/samples/relay-sample/relay_sample.cpp @@ -0,0 +1,120 @@ +#include "relay_sample.h" +#include "ExternalSinkModule.h" +#include "FrameMetadata.h" +#include "H264Metadata.h" +#include "ImageViewerModule.h" +#include "KeyboardListener.h" +#include "Logger.h" +#include "Mp4VideoMetadata.h" +#include "PipeLine.h" +#include "RTSPClientSrc.h" +#include "stdafx.h" +#include +#include +#include +#include + +RelayPipeline::RelayPipeline() : pipeline("RelaySample") {} + +void RelayPipeline::addRelayToRtsp(bool open) { + rtspSource->relay(h264Decoder, open); +} + +void RelayPipeline::addRelayToMp4(bool open) { + mp4ReaderSource->relay(h264Decoder, open); +} + +//bool RelayPipeline::testPipeline() { +// +// auto sink = boost::shared_ptr(new ExternalSinkModule()); +// colorConversion->setNext(sink); +// +// BOOST_TEST(rtspSource->init()); +// BOOST_TEST(mp4ReaderSource->init()); +// BOOST_TEST(h264Decoder->init()); +// BOOST_TEST(colorConversion->init()); +// BOOST_TEST(sink->init()); +// +// for (int i = 0; i <= 10; i++) { +// mp4ReaderSource->step(); +// h264Decoder->step(); +// } +// colorConversion->step(); +// +// auto frames = sink->pop(); +// frame_sp outputFrame = frames.cbegin()->second; +// Test_Utils::saveOrCompare( +// "../.././data/frame_from_mp4.raw", +// const_cast(static_cast(outputFrame->data())), +// outputFrame->size(), 0); +// +// for (int i = 0; i <= 10; i++) { +// rtspSource->step(); +// h264Decoder->step(); +// } +// colorConversion->step(); +// +// frames = sink->pop(); +// outputFrame = frames.cbegin()->second; +// Test_Utils::saveOrCompare( +// "../.././data/frame_from_rtsp.raw", +// const_cast(static_cast(outputFrame->data())), +// outputFrame->size(), 0); +// +// return true; +//} + +bool RelayPipeline::setupPipeline(const std::string &rtspUrl, const std::string &mp4VideoPath) { + // RTSP + auto url = std::string(rtspUrl); + rtspSource = boost::shared_ptr( + new RTSPClientSrc(RTSPClientSrcProps(url, "", ""))); + auto rtspMetaData = framemetadata_sp(new H264Metadata(1280, 720)); + rtspSource->addOutputPin(rtspMetaData); + + // MP4 + bool parseFS = false; + auto h264ImageMetadata = framemetadata_sp(new H264Metadata(1280, 720)); + auto frameType = FrameMetadata::FrameType::H264_DATA; + auto mp4ReaderProps = + Mp4ReaderSourceProps(mp4VideoPath, parseFS, 0, true, true, false); + mp4ReaderProps.fps = 9; + mp4ReaderSource = + boost::shared_ptr(new Mp4ReaderSource(mp4ReaderProps)); + mp4ReaderSource->addOutPutPin(h264ImageMetadata); + auto mp4Metadata = framemetadata_sp(new Mp4VideoMetadata("v_1")); + mp4ReaderSource->addOutPutPin(mp4Metadata); + std::vector mImagePin; + mImagePin = mp4ReaderSource->getAllOutputPinsByType(frameType); + + h264Decoder = + boost::shared_ptr(new H264Decoder(H264DecoderProps())); + rtspSource->setNext(h264Decoder); + mp4ReaderSource->setNext(h264Decoder, mImagePin); + + colorConversion = boost::shared_ptr(new ColorConversion( + ColorConversionProps(ColorConversionProps::YUV420PLANAR_TO_RGB))); + h264Decoder->setNext(colorConversion); + + imageViewer = boost::shared_ptr( + new ImageViewerModule(ImageViewerModuleProps("Relay Sample"))); + colorConversion->setNext(imageViewer); + return true; +} + +bool RelayPipeline::startPipeline() { + pipeline.appendModule(rtspSource); + pipeline.appendModule(mp4ReaderSource); + pipeline.init(); + pipeline.run_all_threaded(); + addRelayToMp4(false); + return true; +} + +bool RelayPipeline::stopPipeline() { + pipeline.stop(); + pipeline.term(); + pipeline.wait_for_all(); + return true; +} + diff --git a/samples/relay-sample/relay_sample.h b/samples/relay-sample/relay_sample.h new file mode 100644 index 000000000..d12fd41f5 --- /dev/null +++ b/samples/relay-sample/relay_sample.h @@ -0,0 +1,31 @@ +#include "ImageViewerModule.h" +#include "KeyboardListener.h" +#include "Mp4VideoMetadata.h" +#include +#include + +#include "ImageViewerModule.h" +#include "RTSPClientSrc.h" +#include +#include +#include + +class RelayPipeline { +public: + RelayPipeline(); + + boost::shared_ptr rtspSource; + boost::shared_ptr mp4ReaderSource; + boost::shared_ptr h264Decoder; + boost::shared_ptr colorConversion; + boost::shared_ptr imageViewer; + + bool setupPipeline(const std::string &rtspUrl, const std::string &mp4VideoPath); + bool startPipeline(); + bool stopPipeline(); + void addRelayToRtsp(bool open); + void addRelayToMp4(bool open); + +private: + PipeLine pipeline; +}; \ No newline at end of file diff --git a/samples/relay-sample/relay_sample_test.cpp b/samples/relay-sample/relay_sample_test.cpp new file mode 100644 index 000000000..699e59557 --- /dev/null +++ b/samples/relay-sample/relay_sample_test.cpp @@ -0,0 +1,52 @@ +#include "relay_sample.h" +#include "test_utils.h" +#include +#include +#include "ExternalSinkModule.h" + +BOOST_AUTO_TEST_SUITE(relay_sample) + +BOOST_AUTO_TEST_CASE(relaySample) { + RelayPipeline relayPipeline; + std::string rtspUrl = "rtsp://root:m4m1g0@10.102.10.75/axis-media/media.amp?resolution=1280x720"; + std::string mp4VideoPath = "data/1714992199120.mp4"; + relayPipeline.setupPipeline(rtspUrl, mp4VideoPath); + + + auto sink = boost::shared_ptr(new ExternalSinkModule()); + relayPipeline.colorConversion->setNext(sink); + + BOOST_TEST(relayPipeline.rtspSource->init()); + BOOST_TEST(relayPipeline.mp4ReaderSource->init()); + BOOST_TEST(relayPipeline.h264Decoder->init()); + BOOST_TEST(relayPipeline.colorConversion->init()); + BOOST_TEST(sink->init()); + + for (int i = 0; i <= 10; i++) { + relayPipeline.mp4ReaderSource->step(); + relayPipeline.h264Decoder->step(); + } + relayPipeline.colorConversion->step(); + + auto frames = sink->pop(); + frame_sp outputFrame = frames.cbegin()->second; + Test_Utils::saveOrCompare( + "../.././data/frame_from_mp4.raw", + const_cast(static_cast(outputFrame->data())), + outputFrame->size(), 0); + + for (int i = 0; i <= 10; i++) { + relayPipeline.rtspSource->step(); + relayPipeline.h264Decoder->step(); + } + relayPipeline.colorConversion->step(); + + frames = sink->pop(); + outputFrame = frames.cbegin()->second; + Test_Utils::saveOrCompare( + "../.././data/frame_from_rtsp.raw", + const_cast(static_cast(outputFrame->data())), + outputFrame->size(), 0); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/samples/timelapse-sample/CMakeLists.txt b/samples/timelapse-sample/CMakeLists.txt index 877797d1d..5faa1d5e0 100644 --- a/samples/timelapse-sample/CMakeLists.txt +++ b/samples/timelapse-sample/CMakeLists.txt @@ -1,4 +1,3 @@ -cmake_minimum_required(VERSION 3.22) set(TARGET generateTimeLapse) SET(SAMPLE_FILES timelapse_summary.cpp @@ -18,8 +17,18 @@ ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${Boost_INCLUDE_DIRS} ${LIBMP4_INC_DIR} ) +target_include_directories ( runner_test PUBLIC + ${JETSON_MULTIMEDIA_LIB_INCLUDE} + ${FFMPEG_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIRS} + ${LIBMP4_INC_DIR} + ${BARESIP_INC_DIR} + ${LIBRE_INC_DIR} + ${NVCODEC_INCLUDE_DIR} +) target_link_libraries( - ${TARGET} + runner_test aprapipes ${LIBMP4_LIB} ${OPENH264_LIB} @@ -28,4 +37,4 @@ target_link_libraries( ${OpenCV_LIBRARIES} ${NVCUDAToolkit_LIBS} ${NVCODEC_LIB} - ) \ No newline at end of file + ) diff --git a/samples/timelapse-sample/timelapse_summary.cpp b/samples/timelapse-sample/timelapse_summary.cpp index df5ae9efc..2bc18052a 100644 --- a/samples/timelapse-sample/timelapse_summary.cpp +++ b/samples/timelapse-sample/timelapse_summary.cpp @@ -97,4 +97,4 @@ bool TimelapsePipeline::stopPipeline() { timelapseSamplePipeline.term(); timelapseSamplePipeline.wait_for_all(); return true; -} \ No newline at end of file +} diff --git a/samples/timelapse-sample/timelapse_summary.h b/samples/timelapse-sample/timelapse_summary.h index d1a90efac..0102add50 100644 --- a/samples/timelapse-sample/timelapse_summary.h +++ b/samples/timelapse-sample/timelapse_summary.h @@ -1,16 +1,10 @@ #include -#include -#include "CudaStreamSynchronize.h" -#include -#include -#include class TimelapsePipeline { public: TimelapsePipeline(); - bool setupPipeline(const std::string &videoPath, - const std::string &outFolderPath); + bool setupPipeline(); bool startPipeline(); bool stopPipeline(); From 61690e5291ef5ed217764cb32898f14d067df5f8 Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Tue, 9 Jul 2024 19:30:02 +0530 Subject: [PATCH 48/49] base cmake changes. --- base/CMakeLists.txt | 170 ++++++++++++-------------------------------- 1 file changed, 45 insertions(+), 125 deletions(-) diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt index 515ab41c7..ef126279e 100755 --- a/base/CMakeLists.txt +++ b/base/CMakeLists.txt @@ -195,9 +195,6 @@ SET(CORE_FILES src/MotionVectorExtractor.cpp src/OverlayModule.cpp src/OrderedCacheOfFiles.cpp - .././samples/relay-sample/relay_sample.cpp - .././samples/relay-sample/relay_sample.cpp - .././samples/timelapse-sample/timelapse_summary.h ) SET(CORE_FILES_H @@ -260,9 +257,6 @@ SET(CORE_FILES_H include/OrderedCacheOfFiles.h include/TestSignalGeneratorSrc.h include/AbsControlModule.h - .././samples/relay-sample/relay_sample.h - .././samples/relay-sample/relay_sample.h - .././samples/timelapse-sample/timelapse_summary.cpp ) IF(ENABLE_WINDOWS) @@ -521,9 +515,6 @@ target_include_directories ( aprapipes PRIVATE ${BARESIP_INC_DIR} ${LIBRE_INC_DIR} ${NVCODEC_INCLUDE_DIR} -./../samples/relay-sample -./../samples/relay-sample -./../samples/timelapse-sample ) # aprapipes Unit Tests @@ -573,6 +564,35 @@ IF (ENABLE_CUDA) ENDIF(NOT ENABLE_ARM64) ENDIF(ENABLE_CUDA) +SET(SAMPLE_UT_FILES + test/utmain.cpp + test/test_utils.cpp + test/test_utils.h + .././samples/timelapse-sample/timelapse_summary_test.cpp + .././samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp + .././samples/face_detection_cpu/test_face_detection_cpu.cpp + .././samples/play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp + .././samples/relay-sample/relay_sample_test.cpp +) + +SET(SAMPLE_CORE_FILES + .././samples/timelapse-sample/timelapse_summary.h + .././samples/timelapse-sample/timelapse_summary.cpp + .././samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.cpp + .././samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h + .././samples/face_detection_cpu/face_detection_cpu.h + .././samples/face_detection_cpu/face_detection_cpu.cpp + .././samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h + .././samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.cpp + .././samples/relay-sample/relay_sample.h + .././samples/relay-sample/relay_sample.cpp +) + +SET(SAMPLE_SOURCE + ${SAMPLE_CORE_FILES} + ${SAMPLE_UT_FILES} +) + SET(UT_FILES test/utmain.cpp test/unit_tests.cpp @@ -632,42 +652,6 @@ SET(UT_FILES ${CUDA_UT_FILES} ) -SET(SAMPLE_UT_FILES - test/utmain.cpp - test/test_utils.cpp - test/test_utils.h - ../samples/face_detection_cpu/test_face_detection_cpu.cpp - ../samples/relay-sample/relay_sample_test.cpp - .././samples/timelapse-sample/timelapse_summary_test.cpp -) - -SET(SAMPLE_CORE_FILES - ../samples/relay-sample/relay_sample.cpp - ../samples/relay-sample/relay_sample.h - ../samples/face_detection_cpu/face_detection_cpu.h - ../samples/face_detection_cpu/face_detection_cpu.cpp - test/test_utils.h - ../samples/create_thumbnail_from_mp4_video/test_generate_thumbnail_from_mp4_video.cpp - ../samples/play_mp4_from_beginning/test_play_mp4_video_from_beginning.cpp - .././samples/timelapse-sample/timelapse_summary_test.cpp -) - -SET(SAMPLE_CORE_FILES - ../samples/relay-sample/relay_sample.cpp - ../samples/relay-sample/relay_sample.h - ../samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.h - ../samples/create_thumbnail_from_mp4_video/GenerateThumbnailsPipeline.cpp - ../samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.h - ../samples/play_mp4_from_beginning/PlayMp4VideoFromBeginning.cpp - ../samples/timelapse-sample/timelapse_summary.h - ../samples/timelapse-sample/timelapse_summary.cpp -) - -SET(SAMPLE_SOURCE - ${SAMPLE_CORE_FILES} - ${SAMPLE_UT_FILES} -) - IF(ENABLE_LINUX) list(APPEND UT_FILES test/gtkglrenderer_tests.cpp @@ -681,21 +665,9 @@ IF(ENABLE_LINUX) ENDIF(ENABLE_LINUX) add_executable(aprapipesut ${UT_FILES}) -add_executable(aprapipessampleut ${SAMPLE_SOURCE}) add_executable(aprapipessampleut ${SAMPLE_SOURCE}) -target_include_directories ( aprapipessampleut PRIVATE - ${JETSON_MULTIMEDIA_LIB_INCLUDE} - ${FFMPEG_INCLUDE_DIRS} - ${OpenCV_INCLUDE_DIRS} - ${Boost_INCLUDE_DIRS} - ${LIBMP4_INC_DIR} - ${BARESIP_INC_DIR} - ${LIBRE_INC_DIR} - ${NVCODEC_INCLUDE_DIR} - test - IF(ENABLE_ARM64) target_include_directories ( aprapipesut PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_ROOT} ${JPEG_INCLUDE_DIR}) ENDIF(ENABLE_ARM64) @@ -707,6 +679,18 @@ ENDIF (ENABLE_CUDA) find_library(OPENH264_LIB NAMES openh264.lib libopenh264.a REQUIRED) find_library(LIBMP4_LIB NAMES mp4lib.lib libmp4lib.a REQUIRED) +IF(ENABLE_ARM64) + target_include_directories(aprapipesut PRIVATE ${VCPKG_GTK_INCLUDE_DIRS}) +ENDIF(ENABLE_ARM64) + +IF(ENABLE_LINUX) + target_include_directories(aprapipesut PRIVATE ${GTK3_INCLUDE_DIRS}) + target_link_libraries(aprapipesut + ${GDK3_LIBRARIES} + ${GTK3_LIBRARIES} + ) +ENDIF(ENABLE_LINUX) + target_include_directories ( aprapipessampleut PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_INCLUDE_DIRS} @@ -719,27 +703,9 @@ ${NVCODEC_INCLUDE_DIR} test ) -IF(ENABLE_ARM64) - target_include_directories ( aprapipesut PRIVATE ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_ROOT} ${JPEG_INCLUDE_DIR}) -ENDIF(ENABLE_ARM64) - -IF (ENABLE_CUDA) - target_include_directories ( aprapipesut PRIVATE ${NVCODEC_INCLUDE_DIR}) -ENDIF (ENABLE_CUDA) - - -find_library(OPENH264_LIB NAMES openh264.lib libopenh264.a REQUIRED) -find_library(LIBMP4_LIB NAMES mp4lib.lib libmp4lib.a REQUIRED) - - -add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../samples ${CMAKE_CURRENT_BINARY_DIR}/samples) - target_link_libraries(aprapipessampleut aprapipes ${JPEG_LIBRARIES} - -target_link_libraries(aprapipessampleut - aprapipes ${LIBMP4_LIB} ${OPENH264_LIB} ${Boost_LIBRARIES} @@ -760,24 +726,7 @@ target_link_libraries(aprapipessampleut whisper::whisper ) - target_link_libraries(aprapipesut - ${NVCUDAToolkit_LIBS} - ${NVCODEC_LIB} - ) - -IF(ENABLE_ARM64) - target_include_directories(aprapipesut PRIVATE ${VCPKG_GTK_INCLUDE_DIRS}) -ENDIF(ENABLE_ARM64) - -IF(ENABLE_LINUX) - target_include_directories(aprapipesut PRIVATE ${GTK3_INCLUDE_DIRS}) - target_link_libraries(aprapipesut - ${GDK3_LIBRARIES} - ${GTK3_LIBRARIES} - ) -ENDIF(ENABLE_LINUX) - -target_link_libraries(aprapipesut +target_link_libraries(aprapipesut aprapipes ${GLEW_LIBRARIES} ${JPEG_LIBRARIES} @@ -801,45 +750,14 @@ target_link_libraries(aprapipesut whisper::whisper ) - target_include_directories ( aprapipessampleut PRIVATE - ${JETSON_MULTIMEDIA_LIB_INCLUDE} - ${FFMPEG_INCLUDE_DIRS} - ${OpenCV_INCLUDE_DIRS} - ${Boost_INCLUDE_DIRS} - ${LIBMP4_INC_DIR} - ${BARESIP_INC_DIR} - ${LIBRE_INC_DIR} - ${NVCODEC_INCLUDE_DIR} - test - ) - - target_link_libraries(aprapipessampleut - aprapipes - ${JPEG_LIBRARIES} - ${LIBMP4_LIB} - ${OPENH264_LIB} - ${Boost_LIBRARIES} - ${FFMPEG_LIBRARIES} - ${OpenCV_LIBRARIES} - ${JETSON_LIBS} - ${NVCUDAToolkit_LIBS} - ${NVCODEC_LIB} - ${NVJPEGLIB_L4T} - ${CURSES_LIBRARIES} - ) - IF(ENABLE_WINDOWS) file(COPY ${RUNTIME_DLLS} DESTINATION Debug/) file(COPY ${RUNTIME_DLLS} DESTINATION Release/) IF(GHA) file(COPY ${RUNTIME_DLLS} DESTINATION RelWithDebInfo/) ENDIF(GHA) - ENDIF(ENABLE_WINDOWS) - -add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../samples ${CMAKE_CURRENT_BINARY_DIR}/samples) - include(GNUInstallDirs) # BUILD_INTERFACE specifies where to find includes during build time @@ -873,4 +791,6 @@ install( DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/aprapipes) install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/include - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/aprapipes) \ No newline at end of file + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/aprapipes) + +add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../samples ${CMAKE_CURRENT_BINARY_DIR}/samples) \ No newline at end of file From 59031176710de32eebda4c4fced4a429dd17f5ae Mon Sep 17 00:00:00 2001 From: AdityaKBhadragond14 Date: Tue, 16 Jul 2024 13:09:47 +0530 Subject: [PATCH 49/49] Corrected linking libraries in timelapse sample cmake. --- samples/timelapse-sample/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/timelapse-sample/CMakeLists.txt b/samples/timelapse-sample/CMakeLists.txt index 5faa1d5e0..97b852ba8 100644 --- a/samples/timelapse-sample/CMakeLists.txt +++ b/samples/timelapse-sample/CMakeLists.txt @@ -17,7 +17,7 @@ ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${Boost_INCLUDE_DIRS} ${LIBMP4_INC_DIR} ) -target_include_directories ( runner_test PUBLIC +target_include_directories (${TARGET} PUBLIC ${JETSON_MULTIMEDIA_LIB_INCLUDE} ${FFMPEG_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} @@ -28,7 +28,7 @@ target_include_directories ( runner_test PUBLIC ${NVCODEC_INCLUDE_DIR} ) target_link_libraries( - runner_test + ${TARGET} aprapipes ${LIBMP4_LIB} ${OPENH264_LIB}