diff --git a/data/settings_yolov11.ini b/data/settings_yolov11.ini index c82412cd7..fb008fcfc 100644 --- a/data/settings_yolov11.ini +++ b/data/settings_yolov11.ini @@ -4,7 +4,7 @@ # opencv_dnn = 12 # darknet_cudnn = 10 # tensorrt = 11 -detector_backend = 12 +detector_backend = 11 #----------------------------- # Target and backend for opencv_dnn detector @@ -32,7 +32,7 @@ nn_config = C:/work/home/mtracker/Multitarget-tracker/data/coco/yolo11s.onnx class_names = C:/work/home/mtracker/Multitarget-tracker/data/coco/coco.names #----------------------------- -confidence_threshold = 0.3 +confidence_threshold = 0.5 max_crop_ratio = 0 max_batch = 1 diff --git a/data/yolov3-tiny.cfg b/data/yolov3-tiny.cfg deleted file mode 100644 index 42c0fcf91..000000000 --- a/data/yolov3-tiny.cfg +++ /dev/null @@ -1,182 +0,0 @@ -[net] -# Testing -batch=1 -subdivisions=1 -# Training -# batch=64 -# subdivisions=2 -width=416 -height=416 -channels=3 -momentum=0.9 -decay=0.0005 -angle=0 -saturation = 1.5 -exposure = 1.5 -hue=.1 - -learning_rate=0.001 -burn_in=1000 -max_batches = 500200 -policy=steps -steps=400000,450000 -scales=.1,.1 - -[convolutional] -batch_normalize=1 -filters=16 -size=3 -stride=1 -pad=1 -activation=leaky - -[maxpool] -size=2 -stride=2 - -[convolutional] -batch_normalize=1 -filters=32 -size=3 -stride=1 -pad=1 -activation=leaky - -[maxpool] -size=2 -stride=2 - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=1 -pad=1 -activation=leaky - -[maxpool] -size=2 -stride=2 - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=leaky - -[maxpool] -size=2 -stride=2 - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=leaky - -[maxpool] -size=2 -stride=2 - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=leaky - -[maxpool] -size=2 -stride=1 - -[convolutional] -batch_normalize=1 -filters=1024 -size=3 -stride=1 -pad=1 -activation=leaky - -########### - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - - - -[yolo] -mask = 3,4,5 -anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 -classes=80 -num=6 -jitter=.3 -ignore_thresh = .7 -truth_thresh = 1 -random=1 - -[route] -layers = -4 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[upsample] -stride=2 - -[route] -layers = -1, 8 - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - -[yolo] -mask = 1,2,3 -anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 -classes=80 -num=6 -jitter=.3 -ignore_thresh = .7 -truth_thresh = 1 -random=1 diff --git a/data/yolov3-tiny.weights b/data/yolov3-tiny.weights deleted file mode 100644 index aad7e6c80..000000000 Binary files a/data/yolov3-tiny.weights and /dev/null differ diff --git a/data/yolov3.cfg b/data/yolov3.cfg deleted file mode 100644 index 938ffff23..000000000 --- a/data/yolov3.cfg +++ /dev/null @@ -1,789 +0,0 @@ -[net] -# Testing -# batch=1 -# subdivisions=1 -# Training -batch=64 -subdivisions=16 -width=608 -height=608 -channels=3 -momentum=0.9 -decay=0.0005 -angle=0 -saturation = 1.5 -exposure = 1.5 -hue=.1 - -learning_rate=0.001 -burn_in=1000 -max_batches = 500200 -policy=steps -steps=400000,450000 -scales=.1,.1 - -[convolutional] -batch_normalize=1 -filters=32 -size=3 -stride=1 -pad=1 -activation=leaky - -# Downsample - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=2 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=32 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - -# Downsample - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=2 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - -# Downsample - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=2 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - -# Downsample - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=2 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - -# Downsample - -[convolutional] -batch_normalize=1 -filters=1024 -size=3 -stride=2 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=1024 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=1024 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=1024 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=1024 -size=3 -stride=1 -pad=1 -activation=leaky - -[shortcut] -from=-3 -activation=linear - -###################### - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - - -[yolo] -mask = 6,7,8 -anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 -classes=80 -num=9 -jitter=.3 -ignore_thresh = .7 -truth_thresh = 1 -random=1 - - -[route] -layers = -4 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[upsample] -stride=2 - -[route] -layers = -1, 61 - - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - - -[yolo] -mask = 3,4,5 -anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 -classes=80 -num=9 -jitter=.3 -ignore_thresh = .7 -truth_thresh = 1 -random=1 - - - -[route] -layers = -4 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[upsample] -stride=2 - -[route] -layers = -1, 36 - - - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=256 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=256 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=256 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - - -[yolo] -mask = 0,1,2 -anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 -classes=80 -num=9 -jitter=.3 -ignore_thresh = .7 -truth_thresh = 1 -random=1 - diff --git a/data/yolov4.cfg b/data/yolov4.cfg deleted file mode 100644 index 47b9db61a..000000000 --- a/data/yolov4.cfg +++ /dev/null @@ -1,1155 +0,0 @@ -[net] -batch=64 -subdivisions=8 -# Training -#width=512 -#height=512 -width=608 -height=608 -channels=3 -momentum=0.949 -decay=0.0005 -angle=0 -saturation = 1.5 -exposure = 1.5 -hue=.1 - -learning_rate=0.00261 -burn_in=1000 -max_batches = 500500 -policy=steps -steps=400000,450000 -scales=.1,.1 - -#cutmix=1 -mosaic=1 - -#:104x104 54:52x52 85:26x26 104:13x13 for 416 - -[convolutional] -batch_normalize=1 -filters=32 -size=3 -stride=1 -pad=1 -activation=mish - -# Downsample - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=2 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -2 - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=32 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -1,-7 - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -# Downsample - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=2 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -2 - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=64 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=64 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -1,-10 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -# Downsample - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=2 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -2 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=128 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -1,-28 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -# Downsample - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=2 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -2 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=256 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -1,-28 - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -# Downsample - -[convolutional] -batch_normalize=1 -filters=1024 -size=3 -stride=2 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -2 - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[convolutional] -batch_normalize=1 -filters=512 -size=3 -stride=1 -pad=1 -activation=mish - -[shortcut] -from=-3 -activation=linear - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=mish - -[route] -layers = -1,-16 - -[convolutional] -batch_normalize=1 -filters=1024 -size=1 -stride=1 -pad=1 -activation=mish - -########################## - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -### SPP ### -[maxpool] -stride=1 -size=5 - -[route] -layers=-2 - -[maxpool] -stride=1 -size=9 - -[route] -layers=-4 - -[maxpool] -stride=1 -size=13 - -[route] -layers=-1,-3,-5,-6 -### End SPP ### - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[upsample] -stride=2 - -[route] -layers = 85 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -1, -3 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[upsample] -stride=2 - -[route] -layers = 54 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[route] -layers = -1, -3 - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=256 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=256 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=128 -size=1 -stride=1 -pad=1 -activation=leaky - -########################## - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=256 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - - -[yolo] -mask = 0,1,2 -anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401 -classes=80 -num=9 -jitter=.3 -ignore_thresh = .7 -truth_thresh = 1 -scale_x_y = 1.2 -iou_thresh=0.213 -cls_normalizer=1.0 -iou_normalizer=0.07 -iou_loss=ciou -nms_kind=greedynms -beta_nms=0.6 - - -[route] -layers = -4 - -[convolutional] -batch_normalize=1 -size=3 -stride=2 -pad=1 -filters=256 -activation=leaky - -[route] -layers = -1, -16 - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=256 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=512 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - - -[yolo] -mask = 3,4,5 -anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401 -classes=80 -num=9 -jitter=.3 -ignore_thresh = .7 -truth_thresh = 1 -scale_x_y = 1.1 -iou_thresh=0.213 -cls_normalizer=1.0 -iou_normalizer=0.07 -iou_loss=ciou -nms_kind=greedynms -beta_nms=0.6 - - -[route] -layers = -4 - -[convolutional] -batch_normalize=1 -size=3 -stride=2 -pad=1 -filters=512 -activation=leaky - -[route] -layers = -1, -37 - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -batch_normalize=1 -filters=512 -size=1 -stride=1 -pad=1 -activation=leaky - -[convolutional] -batch_normalize=1 -size=3 -stride=1 -pad=1 -filters=1024 -activation=leaky - -[convolutional] -size=1 -stride=1 -pad=1 -filters=255 -activation=linear - - -[yolo] -mask = 6,7,8 -anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401 -classes=80 -num=9 -jitter=.3 -ignore_thresh = .7 -truth_thresh = 1 -random=1 -scale_x_y = 1.05 -iou_thresh=0.213 -cls_normalizer=1.0 -iou_normalizer=0.07 -iou_loss=ciou -nms_kind=greedynms -beta_nms=0.6 - diff --git a/src/Detector/OCVDNNDetector.cpp b/src/Detector/OCVDNNDetector.cpp index 3da659679..66c7a1e59 100644 --- a/src/Detector/OCVDNNDetector.cpp +++ b/src/Detector/OCVDNNDetector.cpp @@ -351,7 +351,9 @@ void OCVDNNDetector::DetectInCrop(const cv::UMat& colorFrame, const cv::Rect& cr } else { - if (m_netType == ModelType::YOLOV8 || m_netType == ModelType::YOLOV5 || m_netType == ModelType::YOLOV9 || m_netType == ModelType::YOLOV10 || m_netType == ModelType::YOLOV11) + if (m_netType == ModelType::YOLOV8 || m_netType == ModelType::YOLOV5 || + m_netType == ModelType::YOLOV9 || m_netType == ModelType::YOLOV10 || + m_netType == ModelType::YOLOV11) { int rows = detections[0].size[1]; int dimensions = detections[0].size[2]; diff --git a/src/Detector/tensorrt_yolo/CMakeLists.txt b/src/Detector/tensorrt_yolo/CMakeLists.txt index d09a22431..127724752 100644 --- a/src/Detector/tensorrt_yolo/CMakeLists.txt +++ b/src/Detector/tensorrt_yolo/CMakeLists.txt @@ -81,7 +81,7 @@ set(TENSORRT_LIBS ${TensorRT_LIBRARIES}) if (CMAKE_COMPILER_IS_GNUCXX) - set(TENSORRT_LIBS ${TENSORRT_LIBS} stdc++fs) + set(TENSORRT_LIBS ${TENSORRT_LIBS} stdc++fs nvinfer_plugin nvonnxparser) endif(CMAKE_COMPILER_IS_GNUCXX) message("TENSORRT_LIBS: ${TENSORRT_LIBS}") diff --git a/src/Detector/tensorrt_yolo/YoloONNX.cpp b/src/Detector/tensorrt_yolo/YoloONNX.cpp index 3ea99ec46..08bd2bc34 100644 --- a/src/Detector/tensorrt_yolo/YoloONNX.cpp +++ b/src/Detector/tensorrt_yolo/YoloONNX.cpp @@ -146,6 +146,8 @@ bool YoloONNX::ConstructNetwork(YoloONNXUniquePtr& builder, YoloONNXUniquePtr& network, YoloONNXUniquePtr& config, YoloONNXUniquePtr& parser) { + bool res = false; + // Parse ONNX model file to populate TensorRT INetwork //int verbosity = (int) nvinfer1::ILogger::Severity::kERROR; int verbosity = (int)nvinfer1::ILogger::Severity::kVERBOSE; @@ -155,7 +157,7 @@ bool YoloONNX::ConstructNetwork(YoloONNXUniquePtr& builder, if (!parser->parseFromFile(m_params.onnxFileName.c_str(), verbosity)) { sample::gLogError << "Unable to parse ONNX model file: " << m_params.onnxFileName << std::endl; - return false; + return res; } #if (NV_TENSORRT_MAJOR < 8) @@ -207,12 +209,15 @@ bool YoloONNX::ConstructNetwork(YoloONNXUniquePtr& builder, if (m_params.dlaCore >= 0) infer->setDLACore(m_params.dlaCore); nvinfer1::IHostMemory* mem = builder->buildSerializedNetwork(*network, *config); - m_engine = std::shared_ptr(infer->deserializeCudaEngine(mem->data(), mem->size()), samplesCommon::InferDeleter()); + if (mem) + m_engine = std::shared_ptr(infer->deserializeCudaEngine(mem->data(), mem->size()), samplesCommon::InferDeleter()); + else + sample::gLogError << "Unable to buildSerializedNetwork" << std::endl; delete infer; #endif if (!m_engine) - return false; + return res; if (m_params.engineFileName.size() > 0) { @@ -231,8 +236,9 @@ bool YoloONNX::ConstructNetwork(YoloONNXUniquePtr& builder, p.close(); sample::gLogInfo << "TRT Engine file saved to: " << m_params.engineFileName << std::endl; } + res = true; - return true; + return res; } //! diff --git a/src/Detector/tensorrt_yolo/cmake/FindTensorRT.cmake b/src/Detector/tensorrt_yolo/cmake/FindTensorRT.cmake index f4f9f42c0..fc9e92d72 100644 --- a/src/Detector/tensorrt_yolo/cmake/FindTensorRT.cmake +++ b/src/Detector/tensorrt_yolo/cmake/FindTensorRT.cmake @@ -1,124 +1,75 @@ -# ~~~ -# Copyright 2021 Olivier Le Doeuff -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # This module defines the following variables: # -# - TensorRT_FOUND: A boolean specifying whether or not TensorRT was found. -# - TensorRT_VERSION: The exact version of TensorRT found -# - TensorRT_VERSION_MAJOR: The major version of TensorRT. -# - TensorRT_VERSION_MINOR: The minor version of TensorRT. -# - TensorRT_VERSION_PATCH: The patch version of TensorRT. -# - TensorRT_VERSION_TWEAK: The tweak version of TensorRT. -# - TensorRT_INCLUDE_DIRS: The path to TensorRT ``include`` folder containing the header files required to compile a project linking against TensorRT. -# - TensorRT_LIBRARY_DIRS: The path to TensorRT library directory that contains libraries. +# :: # -# This module create following targets: -# - trt::nvinfer -# - trt::nvinfer_plugin -# - trt::nvonnxparser -# - trt::nvparsers -# This script was inspired from https://github.com/NicolasIRAGNE/CMakeScripts -# This script was inspired from https://github.com/NVIDIA/tensorrt-laboratory/blob/master/cmake/FindTensorRT.cmake +# TensorRT_INCLUDE_DIRS +# TensorRT_LIBRARIES +# TensorRT_FOUND +# +# :: +# +# TensorRT_VERSION_STRING - version (x.y.z) +# TensorRT_VERSION_MAJOR - major version (x) +# TensorRT_VERSION_MINOR - minor version (y) +# TensorRT_VERSION_PATCH - patch version (z) # # Hints # ^^^^^ # A user may set ``TensorRT_ROOT`` to an installation root to tell this module where to look. -# ~~~ +# +set(_TensorRT_SEARCHES) -if(NOT TensorRT_FIND_COMPONENTS) - set(TensorRT_FIND_COMPONENTS nvinfer nvinfer_plugin nvonnxparser) +if(TensorRT_ROOT) + set(_TensorRT_SEARCH_ROOT PATHS ${TensorRT_ROOT} NO_DEFAULT_PATH) + list(APPEND _TensorRT_SEARCHES _TensorRT_SEARCH_ROOT) endif() -set(TensorRT_LIBRARIES) -# find the include directory of TensorRT -find_path( - TensorRT_INCLUDE_DIR - NAMES NvInfer.h - PATHS ${TensorRT_ROOT} ENV TensorRT_ROOT - PATH_SUFFIXES include +# appends some common paths +set(_TensorRT_SEARCH_NORMAL + PATHS "/usr" ) +list(APPEND _TensorRT_SEARCHES _TensorRT_SEARCH_NORMAL) -string(FIND ${TensorRT_INCLUDE_DIR} "NOTFOUND" _include_dir_notfound) -if(NOT _include_dir_notfound EQUAL -1) - if(TensorRT_FIND_REQUIRED) - message(FATAL_ERROR "Fail to find TensorRT, please set TensorRT_ROOT. Include path not found.") - endif() - return() +# Include dir +foreach(search ${_TensorRT_SEARCHES}) + find_path(TensorRT_INCLUDE_DIR NAMES NvInfer.h ${${search}} PATH_SUFFIXES include) +endforeach() if(NOT TensorRT_LIBRARY) foreach(search ${_TensorRT_SEARCHES}) - find_library(TRT_NVONNX_PARSER NAMES nvonnxparser ${${search}} PATH_SUFFIXES lib lib64 lib/x64) - find_library(TRT_NVINFER NAMES nvinfer ${${search}} PATH_SUFFIXES lib lib64 lib/x64) - find_library(TRT_NVINFER_PLUGIN NAMES nvinfer_plugin ${${search}} PATH_SUFFIXES lib lib64 lib/x64) + find_library(TensorRT_LIBRARY NAMES nvinfer ${${search}} PATH_SUFFIXES lib) endforeach() - list(APPEND TensorRT_LIBRARY ${TRT_NVINFER} ${TRT_NVINFER_PLUGIN} ${TRT_NVONNX_PARSER}) - endif() -set(TensorRT_INCLUDE_DIRS ${TensorRT_INCLUDE_DIR}) - -# Extract version of tensorrt -if(EXISTS "${TensorRT_INCLUDE_DIR}/NvInferVersion.h") - file(STRINGS "${TensorRT_INCLUDE_DIR}/NvInferVersion.h" TensorRT_MAJOR REGEX "^#define NV_TENSORRT_MAJOR [0-9]+.*$") - file(STRINGS "${TensorRT_INCLUDE_DIR}/NvInferVersion.h" TensorRT_MINOR REGEX "^#define NV_TENSORRT_MINOR [0-9]+.*$") - file(STRINGS "${TensorRT_INCLUDE_DIR}/NvInferVersion.h" TensorRT_PATCH REGEX "^#define NV_TENSORRT_PATCH [0-9]+.*$") - file(STRINGS "${TensorRT_INCLUDE_DIR}/NvInferVersion.h" TensorRT_TWEAK REGEX "^#define NV_TENSORRT_BUILD [0-9]+.*$") - string(REGEX REPLACE "^#define NV_TENSORRT_MAJOR ([0-9]+).*$" "\\1" TensorRT_VERSION_MAJOR "${TensorRT_MAJOR}") - string(REGEX REPLACE "^#define NV_TENSORRT_MINOR ([0-9]+).*$" "\\1" TensorRT_VERSION_MINOR "${TensorRT_MINOR}") - string(REGEX REPLACE "^#define NV_TENSORRT_PATCH ([0-9]+).*$" "\\1" TensorRT_VERSION_PATCH "${TensorRT_PATCH}") - string(REGEX REPLACE "^#define NV_TENSORRT_BUILD ([0-9]+).*$" "\\1" TensorRT_VERSION_TWEAK "${TensorRT_TWEAK}") - set(TensorRT_VERSION "${TensorRT_VERSION_MAJOR}.${TensorRT_VERSION_MINOR}.${TensorRT_VERSION_PATCH}.${TensorRT_VERSION_TWEAK}") -endif() +mark_as_advanced(TensorRT_INCLUDE_DIR) -function(_find_trt_component component) +if(TensorRT_INCLUDE_DIR AND EXISTS "${TensorRT_INCLUDE_DIR}/NvInferVersion.h") + file(STRINGS "${TensorRT_INCLUDE_DIR}/NvInferVersion.h" TensorRT_MAJOR REGEX "^#define NV_TENSORRT_MAJOR [0-9]+.*$") + file(STRINGS "${TensorRT_INCLUDE_DIR}/NvInferVersion.h" TensorRT_MINOR REGEX "^#define NV_TENSORRT_MINOR [0-9]+.*$") + file(STRINGS "${TensorRT_INCLUDE_DIR}/NvInferVersion.h" TensorRT_PATCH REGEX "^#define NV_TENSORRT_PATCH [0-9]+.*$") - # Find library for component (ie nvinfer, nvparsers, etc...) - find_library( - TensorRT_${component}_LIBRARY - NAMES ${component} - PATHS ${TensorRT_ROOT} ${TENSORRT_LIBRARY_DIR} ENV TensorRT_ROOT - ) + string(REGEX REPLACE "^#define NV_TENSORRT_MAJOR ([0-9]+).*$" "\\1" TensorRT_VERSION_MAJOR "${TensorRT_MAJOR}") + string(REGEX REPLACE "^#define NV_TENSORRT_MINOR ([0-9]+).*$" "\\1" TensorRT_VERSION_MINOR "${TensorRT_MINOR}") + string(REGEX REPLACE "^#define NV_TENSORRT_PATCH ([0-9]+).*$" "\\1" TensorRT_VERSION_PATCH "${TensorRT_PATCH}") + set(TensorRT_VERSION_STRING "${TensorRT_VERSION_MAJOR}.${TensorRT_VERSION_MINOR}.${TensorRT_VERSION_PATCH}") + message("TensorRT_VERSION_STRING from ${TensorRT_INCLUDE_DIR}/NvInferVersion.h: ${TensorRT_VERSION_STRING}") +else() + message("${TensorRT_INCLUDE_DIR}/NvInferVersion.h not found") +endif() - string(FIND ${TensorRT_${component}_LIBRARY} "NOTFOUND" _library_not_found) +include(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(TensorRT REQUIRED_VARS TensorRT_LIBRARY TensorRT_INCLUDE_DIR VERSION_VAR TensorRT_VERSION_STRING) - if(NOT TensorRT_LIBRARY_DIR) - get_filename_component(_path ${TensorRT_${component}_LIBRARY} DIRECTORY) - set(TensorRT_LIBRARY_DIR - "${_path}" - CACHE INTERNAL "TensorRT_LIBRARY_DIR" - ) - endif() +if(TensorRT_FOUND) + set(TensorRT_INCLUDE_DIRS ${TensorRT_INCLUDE_DIR}) - if(NOT TensorRT_LIBRARY_DIRS) - get_filename_component(_path ${TensorRT_${component}_LIBRARY} DIRECTORY) - set(TensorRT_LIBRARY_DIRS - "${_path}" - CACHE INTERNAL "TensorRT_LIBRARY_DIRS" - ) + if(NOT TensorRT_LIBRARIES) + set(TensorRT_LIBRARIES ${TensorRT_LIBRARY}) endif() - # Library found, and doesn't already exists - if(_library_not_found EQUAL -1 AND NOT TARGET trt::${component}) - set(TensorRT_${component}_FOUND - TRUE - CACHE INTERNAL "Found ${component}" - ) - - # Create a target - add_library(trt::${component} IMPORTED INTERFACE) - target_include_directories(trt::${component} SYSTEM INTERFACE "${TensorRT_INCLUDE_DIRS}") - target_link_libraries(trt::${component} INTERFACE "${TensorRT_${component}_LIBRARY}") - set(TensorRT_LIBRARIES ${TensorRT_LIBRARIES} ${TensorRT_${component}_LIBRARY}) + if(NOT TARGET TensorRT::TensorRT) + add_library(TensorRT::TensorRT UNKNOWN IMPORTED) + set_target_properties(TensorRT::TensorRT PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${TensorRT_INCLUDE_DIRS}") + set_property(TARGET TensorRT::TensorRT APPEND PROPERTY IMPORTED_LOCATION "${TensorRT_LIBRARY}") endif() - -endfunction() - -# Find each components -foreach(component IN LISTS TensorRT_FIND_COMPONENTS) - _find_trt_component(${component}) -endforeach() - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(TensorRT HANDLE_COMPONENTS VERSION_VAR TensorRT_VERSION REQUIRED_VARS TensorRT_INCLUDE_DIR) +endif() diff --git a/src/Detector/tensorrt_yolo/cmake/FindTensorRT.cmake_ b/src/Detector/tensorrt_yolo/cmake/FindTensorRT.cmake_ new file mode 100644 index 000000000..f4f9f42c0 --- /dev/null +++ b/src/Detector/tensorrt_yolo/cmake/FindTensorRT.cmake_ @@ -0,0 +1,124 @@ +# ~~~ +# Copyright 2021 Olivier Le Doeuff +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# This module defines the following variables: +# +# - TensorRT_FOUND: A boolean specifying whether or not TensorRT was found. +# - TensorRT_VERSION: The exact version of TensorRT found +# - TensorRT_VERSION_MAJOR: The major version of TensorRT. +# - TensorRT_VERSION_MINOR: The minor version of TensorRT. +# - TensorRT_VERSION_PATCH: The patch version of TensorRT. +# - TensorRT_VERSION_TWEAK: The tweak version of TensorRT. +# - TensorRT_INCLUDE_DIRS: The path to TensorRT ``include`` folder containing the header files required to compile a project linking against TensorRT. +# - TensorRT_LIBRARY_DIRS: The path to TensorRT library directory that contains libraries. +# +# This module create following targets: +# - trt::nvinfer +# - trt::nvinfer_plugin +# - trt::nvonnxparser +# - trt::nvparsers +# This script was inspired from https://github.com/NicolasIRAGNE/CMakeScripts +# This script was inspired from https://github.com/NVIDIA/tensorrt-laboratory/blob/master/cmake/FindTensorRT.cmake +# +# Hints +# ^^^^^ +# A user may set ``TensorRT_ROOT`` to an installation root to tell this module where to look. +# ~~~ + +if(NOT TensorRT_FIND_COMPONENTS) + set(TensorRT_FIND_COMPONENTS nvinfer nvinfer_plugin nvonnxparser) +endif() +set(TensorRT_LIBRARIES) + +# find the include directory of TensorRT +find_path( + TensorRT_INCLUDE_DIR + NAMES NvInfer.h + PATHS ${TensorRT_ROOT} ENV TensorRT_ROOT + PATH_SUFFIXES include +) + +string(FIND ${TensorRT_INCLUDE_DIR} "NOTFOUND" _include_dir_notfound) +if(NOT _include_dir_notfound EQUAL -1) + if(TensorRT_FIND_REQUIRED) + message(FATAL_ERROR "Fail to find TensorRT, please set TensorRT_ROOT. Include path not found.") + endif() + return() + +if(NOT TensorRT_LIBRARY) + foreach(search ${_TensorRT_SEARCHES}) + find_library(TRT_NVONNX_PARSER NAMES nvonnxparser ${${search}} PATH_SUFFIXES lib lib64 lib/x64) + find_library(TRT_NVINFER NAMES nvinfer ${${search}} PATH_SUFFIXES lib lib64 lib/x64) + find_library(TRT_NVINFER_PLUGIN NAMES nvinfer_plugin ${${search}} PATH_SUFFIXES lib lib64 lib/x64) + endforeach() + list(APPEND TensorRT_LIBRARY ${TRT_NVINFER} ${TRT_NVINFER_PLUGIN} ${TRT_NVONNX_PARSER}) + +endif() +set(TensorRT_INCLUDE_DIRS ${TensorRT_INCLUDE_DIR}) + +# Extract version of tensorrt +if(EXISTS "${TensorRT_INCLUDE_DIR}/NvInferVersion.h") + file(STRINGS "${TensorRT_INCLUDE_DIR}/NvInferVersion.h" TensorRT_MAJOR REGEX "^#define NV_TENSORRT_MAJOR [0-9]+.*$") + file(STRINGS "${TensorRT_INCLUDE_DIR}/NvInferVersion.h" TensorRT_MINOR REGEX "^#define NV_TENSORRT_MINOR [0-9]+.*$") + file(STRINGS "${TensorRT_INCLUDE_DIR}/NvInferVersion.h" TensorRT_PATCH REGEX "^#define NV_TENSORRT_PATCH [0-9]+.*$") + file(STRINGS "${TensorRT_INCLUDE_DIR}/NvInferVersion.h" TensorRT_TWEAK REGEX "^#define NV_TENSORRT_BUILD [0-9]+.*$") + + string(REGEX REPLACE "^#define NV_TENSORRT_MAJOR ([0-9]+).*$" "\\1" TensorRT_VERSION_MAJOR "${TensorRT_MAJOR}") + string(REGEX REPLACE "^#define NV_TENSORRT_MINOR ([0-9]+).*$" "\\1" TensorRT_VERSION_MINOR "${TensorRT_MINOR}") + string(REGEX REPLACE "^#define NV_TENSORRT_PATCH ([0-9]+).*$" "\\1" TensorRT_VERSION_PATCH "${TensorRT_PATCH}") + string(REGEX REPLACE "^#define NV_TENSORRT_BUILD ([0-9]+).*$" "\\1" TensorRT_VERSION_TWEAK "${TensorRT_TWEAK}") + set(TensorRT_VERSION "${TensorRT_VERSION_MAJOR}.${TensorRT_VERSION_MINOR}.${TensorRT_VERSION_PATCH}.${TensorRT_VERSION_TWEAK}") +endif() + +function(_find_trt_component component) + + # Find library for component (ie nvinfer, nvparsers, etc...) + find_library( + TensorRT_${component}_LIBRARY + NAMES ${component} + PATHS ${TensorRT_ROOT} ${TENSORRT_LIBRARY_DIR} ENV TensorRT_ROOT + ) + + string(FIND ${TensorRT_${component}_LIBRARY} "NOTFOUND" _library_not_found) + + if(NOT TensorRT_LIBRARY_DIR) + get_filename_component(_path ${TensorRT_${component}_LIBRARY} DIRECTORY) + set(TensorRT_LIBRARY_DIR + "${_path}" + CACHE INTERNAL "TensorRT_LIBRARY_DIR" + ) + endif() + + if(NOT TensorRT_LIBRARY_DIRS) + get_filename_component(_path ${TensorRT_${component}_LIBRARY} DIRECTORY) + set(TensorRT_LIBRARY_DIRS + "${_path}" + CACHE INTERNAL "TensorRT_LIBRARY_DIRS" + ) + endif() + + # Library found, and doesn't already exists + if(_library_not_found EQUAL -1 AND NOT TARGET trt::${component}) + set(TensorRT_${component}_FOUND + TRUE + CACHE INTERNAL "Found ${component}" + ) + + # Create a target + add_library(trt::${component} IMPORTED INTERFACE) + target_include_directories(trt::${component} SYSTEM INTERFACE "${TensorRT_INCLUDE_DIRS}") + target_link_libraries(trt::${component} INTERFACE "${TensorRT_${component}_LIBRARY}") + set(TensorRT_LIBRARIES ${TensorRT_LIBRARIES} ${TensorRT_${component}_LIBRARY}) + endif() + +endfunction() + +# Find each components +foreach(component IN LISTS TensorRT_FIND_COMPONENTS) + _find_trt_component(${component}) +endforeach() + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(TensorRT HANDLE_COMPONENTS VERSION_VAR TensorRT_VERSION REQUIRED_VARS TensorRT_INCLUDE_DIR) diff --git a/src/Detector/tensorrt_yolo/yolo.cpp b/src/Detector/tensorrt_yolo/yolo.cpp index 4ee202b6f..4d7f1a71a 100644 --- a/src/Detector/tensorrt_yolo/yolo.cpp +++ b/src/Detector/tensorrt_yolo/yolo.cpp @@ -104,7 +104,9 @@ Yolo::Yolo(const NetworkInfo& networkInfo, const InferParams& inferParams) m_InputBindingIndex = m_Engine->getBindingIndex(m_InputBlobName.c_str()); #endif assert(m_InputBindingIndex != -1); +#if (NV_TENSORRT_MAJOR < 9) assert(m_BatchSize <= static_cast(m_Engine->getMaxBatchSize())); +#endif allocateBuffers(); NV_CUDA_CHECK(cudaStreamCreate(&m_CudaStream)); assert(verifyYoloEngine()); @@ -1327,6 +1329,7 @@ void Yolo::allocateBuffers() bool Yolo::verifyYoloEngine() { +#if (NV_TENSORRT_MAJOR < 9) assert((m_Engine->getNbBindings() == (1 + m_OutputTensors.size()) && "Binding info doesn't match between cfg and engine file \n")); @@ -1343,6 +1346,7 @@ bool Yolo::verifyYoloEngine() assert(m_Engine->getBindingName(m_InputBindingIndex) == m_InputBlobName && "Input blob name doesn't match between config and engine file"); assert(get3DTensorVolume(m_Engine->getBindingDimensions(m_InputBindingIndex)) == m_InputSize); +#endif return true; }