From d7c7058b809e98d64f4704541af3635f873e6155 Mon Sep 17 00:00:00 2001 From: Burhan <62214284+Burhan-Q@users.noreply.github.com> Date: Sat, 3 Aug 2024 11:36:38 -0400 Subject: [PATCH 1/7] minor fixes train and predict kwords examples, add track examples #11 --- snippets/examples.json | 93 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 88 insertions(+), 5 deletions(-) diff --git a/snippets/examples.json b/snippets/examples.json index 4fa1514..0973d39 100644 --- a/snippets/examples.json +++ b/snippets/examples.json @@ -187,8 +187,8 @@ " save_txt=${19:False}, # (bool) save results as .txt file", " save_conf=${20:False}, # (bool) save results with confidence scores", " save_crop=${21:False}, # (bool) save cropped images with results", - " stream=${22:False} # (bool) for processing long videos or numerous images with reduced memory usage by returning a generator", - " verbose=${23:True} # (bool) enable/disable verbose inference logging in the terminal", + " stream=${22:False}, # (bool) for processing long videos or numerous images with reduced memory usage by returning a generator", + " verbose=${23:True}, # (bool) enable/disable verbose inference logging in the terminal", ")", "# reference https://docs.ultralytics.com/modes/predict/" ], @@ -206,7 +206,7 @@ "'''", "model = YOLO(\"yolov${1|8,5,9,10|}${2|n,s,m,l,x,c,e|}${3|.,-cls.,-seg.,-obb.,-pose.,-world.,-worldv2.|}pt\")", "results: list = model.train(", - " data=${4:\"coco8.yaml\"}, # (str, optional) path to data file, i.e. coco8.yaml", + " data=\"${4:coco8.yaml}\", # (str, optional) path to data file, i.e. coco8.yaml", " epochs=${5:100}, # (int) number of epochs to train for", " time=${6:None}, # (float, optional) number of hours to train for, overrides epochs if supplied", " patience=${7:100}, # (int) epochs to wait for no observable improvement for early stopping of training", @@ -272,8 +272,8 @@ " mixup=${64:0.0}, # (float) image mixup (probability)", " copy_paste=${65:0.0}, # (float) segment copy-paste (probability)", " auto_augment=\"${66|randaugment,autoaugment,augmix|}\", # (str) auto augmentation policy for classification (randaugment, autoaugment, augmix)", - " erasing=${67:0.4}, # (float) probability of random erasing during classification training (0-0.9), 0 means no erasing, must be less than 1.0.", - " crop_fraction=${68:1.0}, # (float) image crop fraction for classification (0.1-1), 1.0 means no crop, must be greater than 0.", + " erasing=${67:0.4}, # (float) probability of random erasing during classification training [0-0.9], 0 is no erasing, must be < 1.0.", + " crop_fraction=${68:1.0}, # (float) image crop fraction for classify [0.1-1], 1.0 is no cropping, must be > 0.", ")", "# reference https://docs.ultralytics.com/modes/train/" ], @@ -315,5 +315,88 @@ "# See docs page about SAM2 https://docs.ultralytics.com/models/sam-2 for more information" ], "description": "Example showing use of SAM2 with bounding box and point prompts." + }, + + "Ultralytics Track Looping Frames with Persistence":{ + "prefix":"ultra.example-tracking-loop-persist", + "body":[ + "import cv2", + "", + "from ultralytics import YOLO", + "", + "# Load the YOLOv8 model", + "model = YOLO(\"yolov8${1|n,s,m,l,x|}.pt\", task=\"detect\")", + "", + "# Open the video file", + "video_path = \"${2:path/to/video.mp4}\"", + "cap = cv2.VideoCapture(video_path)", + "", + "# Loop through the video frames", + "while cap.isOpened():", + " # Read a frame from the video", + " success, frame = cap.read()", + "", + " if success:", + " # Run YOLOv8 tracking on the frame, persisting tracks between frames", + " results = model.track(frame, persist=True)", + "", + " # Visualize the results on the frame", + " annotated_frame = results[0].plot()", + "", + " # Display the annotated frame", + " cv2.imshow(\"YOLOv8 Tracking\", annotated_frame)", + "", + " # Break the loop if 'q' is pressed", + " if cv2.waitKey(1) & 0xFF == ord(\"q\"):", + " break", + " else:", + " # Break the loop if the end of the video is reached", + " break", + "", + "# Release the video capture object and close the display window", + "cap.release()", + "cv2.destroyAllWindows()", + "# reference https://docs.ultralytics.com/modes/track/", + "$0" + ], + "description": "Example of how to open video, loop frames, and maintain tracked object IDs." + }, + + "Ultralytics Track with all Keywords":{ + "prefix":"ultra.example-track-kwords", + "body":[ + "from ultralytics import YOLO", + "", + "src=\"${1:https://youtu.be/LNwODJXcvt4}\"", + "model = YOLO(\"yolov8${2|n,s,m,l,x|}${3|.,-seg.,-obb.,-pose.|}pt\")", + "results = model.track(", + " source=src, # (str, optional) source directory for images or videos", + " imgsz=${5:640}, # (int | list) input images size as int or list[w,h] for predict", + " conf=${6:0.25}, # (float) minimum confidence threshold", + " iou=${7:0.7}, # (float) intersection over union (IoU) threshold for NMS", + " persist=${8:False}, # (bool) persist track-ids across frames", + " tracker=\"${9|botsort,bytetrack|}\", # (str) tracker type, choices=[botsort.yaml, bytetrack.yaml]", + " vid_stride=${10:1}, # (int) video frame-rate stride", + " stream_buffer=${11:False}, # (bool) buffer all streaming frames (True) or return the most recent frame (False)", + " visualize=${12:False}, # (bool) visualize model features", + " augment=${13:False}, # (bool) apply image augmentation to prediction sources", + " agnostic_nms=${14:False}, # (bool) class-agnostic NMS", + " classes=${15:None}, # (int | list[int], optional) filter results by class, i.e. classes=0, or classes=[0,2,3]", + " retina_masks=${16:False}, # (bool) use high-resolution segmentation masks", + " embed=${17:None}, # (list[int], optional) return feature vectors/embeddings from given layers", + " show=${18:False}, # (bool) show predicted images and videos if environment allows", + " save=${19:True}, # (bool) save prediction results", + " save_frames=${20:False}, # (bool) save predicted individual video frames", + " save_txt=${21:False}, # (bool) save results as .txt file", + " save_conf=${20:False}, # (bool) save results with confidence scores", + " save_crop=${21:False}, # (bool) save cropped images with results", + " stream=${22:False}, # (bool) for processing long videos or numerous images with reduced memory usage by returning a generator", + " verbose=${23:True}, # (bool) enable/disable verbose inference logging in the terminal", + ")", + "# reference https://docs.ultralytics.com/modes/track/", + "# reference https://docs.ultralytics.com/modes/predict/ (tracking accepts same keyword arguments as predict)", + "$0" + ], + "description": "Example showing all keyword arguments available for track mode." } } \ No newline at end of file From 13e21a0629a1e04b4bd8a6a7fa1c8d3ad95d4889 Mon Sep 17 00:00:00 2001 From: Burhan <62214284+Burhan-Q@users.noreply.github.com> Date: Sat, 3 Aug 2024 11:37:19 -0400 Subject: [PATCH 2/7] add new kwargs snippets for quick insert of model methods with all keyword arguments --- snippets/kwargs.json | 140 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 snippets/kwargs.json diff --git a/snippets/kwargs.json b/snippets/kwargs.json new file mode 100644 index 0000000..2ca3cc0 --- /dev/null +++ b/snippets/kwargs.json @@ -0,0 +1,140 @@ +// Ultralytics YOLO 🚀, AGPL-3.0 license + +{ + "Ultralytics YOLO Predict Keyword Arguments":{ + "prefix": "ultra.kwargs-predict", + "body":[ + "model.predict(", + " source=${1:src}, # (str, optional) source directory for images or videos", + " imgsz=${5:640}, # (int | list) input images size as int or list[w,h] for predict", + " conf=${6:0.25}, # (float) minimum confidence threshold", + " iou=${7:0.7}, # (float) intersection over union (IoU) threshold for NMS", + " vid_stride=${8:1}, # (int) video frame-rate stride", + " stream_buffer=${9:False}, # (bool) buffer all streaming frames (True) or return the most recent frame (False)", + " visualize=${10:False}, # (bool) visualize model features", + " augment=${11:False}, # (bool) apply image augmentation to prediction sources", + " agnostic_nms=${12:False}, # (bool) class-agnostic NMS", + " classes=${13:None}, # (int | list[int], optional) filter results by class, i.e. classes=0, or classes=[0,2,3]", + " retina_masks=${14:False}, # (bool) use high-resolution segmentation masks", + " embed=${15:None}, # (list[int], optional) return feature vectors/embeddings from given layers", + " show=${16:False}, # (bool) show predicted images and videos if environment allows", + " save=${17:True}, # (bool) save prediction results", + " save_frames=${18:False}, # (bool) save predicted individual video frames", + " save_txt=${19:False}, # (bool) save results as .txt file", + " save_conf=${20:False}, # (bool) save results with confidence scores", + " save_crop=${21:False}, # (bool) save cropped images with results", + " stream=${22:False}, # (bool) for processing long videos or numerous images with reduced memory usage by returning a generator", + " verbose=${23:True}, # (bool) enable/disable verbose inference logging in the terminal", + ")$0" + ], + "description": "Snippet using model predict method, including all keyword arguments and defaults." + }, + + "Ultralytics Track Keyword Arguments":{ + "prefix":"ultra.kwargs-track", + "body":[ + "model.track(", + " source=${1:src}, # (str, optional) source directory for images or videos", + " imgsz=${5:640}, # (int | list) input images size as int or list[w,h] for predict", + " conf=${6:0.25}, # (float) minimum confidence threshold", + " iou=${7:0.7}, # (float) intersection over union (IoU) threshold for NMS", + " persist=${8:False}, # (bool) persist track-ids across frames", + " tracker=\"${9|botsort,bytetrack|}\", # (str) tracker type, choices=[botsort.yaml, bytetrack.yaml]", + " vid_stride=${10:1}, # (int) video frame-rate stride", + " stream_buffer=${11:False}, # (bool) buffer all streaming frames (True) or return the most recent frame (False)", + " visualize=${12:False}, # (bool) visualize model features", + " augment=${13:False}, # (bool) apply image augmentation to prediction sources", + " agnostic_nms=${14:False}, # (bool) class-agnostic NMS", + " classes=${15:None}, # (int | list[int], optional) filter results by class, i.e. classes=0, or classes=[0,2,3]", + " retina_masks=${16:False}, # (bool) use high-resolution segmentation masks", + " embed=${17:None}, # (list[int], optional) return feature vectors/embeddings from given layers", + " show=${18:False}, # (bool) show predicted images and videos if environment allows", + " save=${19:True}, # (bool) save prediction results", + " save_frames=${20:False}, # (bool) save predicted individual video frames", + " save_txt=${21:False}, # (bool) save results as .txt file", + " save_conf=${20:False}, # (bool) save results with confidence scores", + " save_crop=${21:False}, # (bool) save cropped images with results", + " stream=${22:False}, # (bool) for processing long videos or numerous images with reduced memory usage by returning a generator", + " verbose=${23:True}, # (bool) enable/disable verbose inference logging in the terminal", + ")$0" + ], + "description": "Snippet using model track method, including all keyword arguments and defaults." + }, + + "Ultralytics YOLO Train Keyword Arguments":{ + "prefix": "ultra.kwargs-train", + "body":[ + "${3:model}.train(", + " data=\"${4:coco8.yaml}\", # (str, optional) path to data file, i.e. coco8.yaml", + " epochs=${5:100}, # (int) number of epochs to train for", + " time=${6:None}, # (float, optional) number of hours to train for, overrides epochs if supplied", + " patience=${7:100}, # (int) epochs to wait for no observable improvement for early stopping of training", + " batch=${8:16}, # (int) number of images per batch (-1 for AutoBatch)", + " imgsz=${9:640}, # (int | list) input images size as int for train and val modes, or list[w,h] for predict and export modes", + " save=${10:True}, # (bool) save train checkpoints and predict results", + " save_period=${11:-1}, # (int) Save checkpoint every x epochs (disabled if < 1)", + " cache=${12:False}, # (bool) True/ram, disk or False. Use cache for data loading", + " device=${13:None}, # (int | str | list, optional) device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu", + " workers=${14:8}, # (int) number of worker threads for data loading (per RANK if DDP)", + " project=${15:None}, # (str, optional) project name", + " name=${16:None}, # (str, optional) experiment name, results saved to 'project/name' directory", + " exist_ok=${17:False}, # (bool) whether to overwrite existing experiment", + " val=${18:True}, # (bool) validate/test during training", + " pretrained=${19:True}, # (bool | str) whether to use a pretrained model (bool) or a model to load weights from (str)", + " optimizer=\"${20|SGD,Adam,Adamax,AdamW,NAdam,RAdam,RMSProp,auto|}\", # (str) optimizer to use, choices=[SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, auto]", + " verbose=${21:True}, # (bool) whether to print verbose output", + " seed=${22:0}, # (int) random seed for reproducibility", + " deterministic=${23:True}, # (bool) whether to enable deterministic mode", + " single_cls=${24:False}, # (bool) train multi-class data as single-class", + " rect=${25:False}, # (bool) rectangular training if mode='train' or rectangular validation if mode='val'", + " cos_lr=${26:False}, # (bool) use cosine learning rate scheduler", + " close_mosaic=${27:10}, # (int) disable mosaic augmentation for final epochs (0 to disable)", + " resume=${28:False}, # (bool) resume training from last checkpoint", + " amp=${29:True}, # (bool) Automatic Mixed Precision (AMP) training, choices=[True, False], True runs AMP check", + " fraction=${30:1.0}, # (float) dataset fraction to train on (default is 1.0, all images in train set)", + " profile=${31:False}, # (bool) profile ONNX and TensorRT speeds during training for loggers", + " freeze=${32:None}, # (int | list, optional) freeze first n layers, or freeze list of layer indices during training", + " multi_scale=${33:False}, # (bool) Whether to use multiscale during training", + " plots=${34:True}, # (bool) save plots and images during train/val", + " # Segmentation", + " overlap_mask=${35:True}, # (bool) masks should overlap during training (segment train only)", + " mask_ratio=${36:4}, # (int) mask downsample ratio (segment train only)", + " # Classification", + " dropout=${37:0.0}, # (float) use dropout regularization (classify train only)", + " # Hyperparameters", + " lr0=${38:0.01}, # (float) initial learning rate (i.e. SGD=1E-2, Adam=1E-3)", + " lrf=${39:0.01}, # (float) final learning rate (lr0 * lrf)", + " momentum=${40:0.937}, # (float) SGD momentum/Adam beta1", + " weight_decay=${41:0.0005}, # (float) optimizer weight decay 5e-4", + " warmup_epochs=${42:3.0}, # (float) warmup epochs (fractions ok)", + " warmup_momentum=${43:0.8}, # (float) warmup initial momentum", + " warmup_bias_lr=${44:0.1}, # (float) warmup initial bias lr", + " box=${45:7.5}, # (float) box loss gain", + " cls=${46:0.5}, # (float) cls loss gain (scale with pixels)", + " dfl=${47:1.5}, # (float) dfl loss gain", + " pose=${48:12.0}, # (float) pose loss gain", + " kobj=${49:1.0}, # (float) keypoint obj loss gain", + " label_smoothing=${50:0.0}, # (float) label smoothing (fraction)", + " nbs=${51:64}, # (int) nominal batch size", + " hsv_h=${52:0.015}, # (float) image HSV-Hue augmentation (fraction)", + " hsv_s=${53:0.7}, # (float) image HSV-Saturation augmentation (fraction)", + " hsv_v=${54:0.4}, # (float) image HSV-Value augmentation (fraction)", + " degrees=${55:0.0}, # (float) image rotation (+/- deg)", + " translate=${56:0.1}, # (float) image translation (+/- fraction)", + " scale=${57:0.5}, # (float) image scale (+/- gain)", + " shear=${58:0.0}, # (float) image shear (+/- deg)", + " perspective=${59:0.0}, # (float) image perspective (+/- fraction), range 0-0.001", + " flipud=${60:0.0}, # (float) image flip up-down (probability)", + " fliplr=${61:0.5}, # (float) image flip left-right (probability)", + " bgr=${62:0.0}, # (float) image channel BGR (probability)", + " mosaic=${63:1.0}, # (float) image mosaic (probability)", + " mixup=${64:0.0}, # (float) image mixup (probability)", + " copy_paste=${65:0.0}, # (float) segment copy-paste (probability)", + " auto_augment=\"${66|randaugment,autoaugment,augmix|}\", # (str) auto augmentation policy for classification (randaugment, autoaugment, augmix)", + " erasing=${67:0.4}, # (float) probability of random erasing during classify training [0-0.9], 0 is no erasing, must be < 1.0.", + " crop_fraction=${68:1.0}, # (float) image crop fraction for classification [0.1-1], 1.0 is no crop, must be > 0.", + ")$0" + ], + "description": "Snippet using model train method, including all keyword arguments and defaults." + } +} \ No newline at end of file From 8bea621d8c1e9d8755316c4f924257372f16f139 Mon Sep 17 00:00:00 2001 From: Burhan <62214284+Burhan-Q@users.noreply.github.com> Date: Sat, 3 Aug 2024 11:38:18 -0400 Subject: [PATCH 3/7] add model name as placeholder so user can change if desired --- snippets/kwargs.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/snippets/kwargs.json b/snippets/kwargs.json index 2ca3cc0..c60cb2d 100644 --- a/snippets/kwargs.json +++ b/snippets/kwargs.json @@ -4,8 +4,8 @@ "Ultralytics YOLO Predict Keyword Arguments":{ "prefix": "ultra.kwargs-predict", "body":[ - "model.predict(", - " source=${1:src}, # (str, optional) source directory for images or videos", + "${1:model}.predict(", + " source=${4:src}, # (str, optional) source directory for images or videos", " imgsz=${5:640}, # (int | list) input images size as int or list[w,h] for predict", " conf=${6:0.25}, # (float) minimum confidence threshold", " iou=${7:0.7}, # (float) intersection over union (IoU) threshold for NMS", @@ -33,8 +33,8 @@ "Ultralytics Track Keyword Arguments":{ "prefix":"ultra.kwargs-track", "body":[ - "model.track(", - " source=${1:src}, # (str, optional) source directory for images or videos", + "${1:model}.track(", + " source=${4:src}, # (str, optional) source directory for images or videos", " imgsz=${5:640}, # (int | list) input images size as int or list[w,h] for predict", " conf=${6:0.25}, # (float) minimum confidence threshold", " iou=${7:0.7}, # (float) intersection over union (IoU) threshold for NMS", @@ -64,7 +64,7 @@ "Ultralytics YOLO Train Keyword Arguments":{ "prefix": "ultra.kwargs-train", "body":[ - "${3:model}.train(", + "${1:model}.train(", " data=\"${4:coco8.yaml}\", # (str, optional) path to data file, i.e. coco8.yaml", " epochs=${5:100}, # (int) number of epochs to train for", " time=${6:None}, # (float, optional) number of hours to train for, overrides epochs if supplied", From b52a1a69f4f8a90942118e7c30cfe5d3fd2b2a91 Mon Sep 17 00:00:00 2001 From: Burhan <62214284+Burhan-Q@users.noreply.github.com> Date: Sat, 3 Aug 2024 11:41:03 -0400 Subject: [PATCH 4/7] shorten prefix to use "track" instead of "tracking" --- snippets/examples.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snippets/examples.json b/snippets/examples.json index 0973d39..65e8639 100644 --- a/snippets/examples.json +++ b/snippets/examples.json @@ -318,7 +318,7 @@ }, "Ultralytics Track Looping Frames with Persistence":{ - "prefix":"ultra.example-tracking-loop-persist", + "prefix":"ultra.example-track-loop-persist", "body":[ "import cv2", "", From 6894b008d6c007b7f31c4c95b7669d86ef20e003 Mon Sep 17 00:00:00 2001 From: Burhan <62214284+Burhan-Q@users.noreply.github.com> Date: Sat, 3 Aug 2024 12:16:45 -0400 Subject: [PATCH 5/7] add ultra.kwargs-val snippet --- snippets/kwargs.json | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/snippets/kwargs.json b/snippets/kwargs.json index c60cb2d..c1eb117 100644 --- a/snippets/kwargs.json +++ b/snippets/kwargs.json @@ -136,5 +136,28 @@ ")$0" ], "description": "Snippet using model train method, including all keyword arguments and defaults." + }, + + "Ultralytics YOLO Validation Keyword Arguments":{ + "prefix": "ultra.kwargs-val", + "body":[ + "${1:model}.val(", + " data=${2:None}, # (str) Specifies the path to the dataset configuration file (e.g., coco8.yaml). This file includes paths to validation data, class names, and number of classes.", + " imgsz=${3:640}, # (int) Defines the size of input images. All images are resized to this dimension before processing.", + " batch=${4:16}, # (int) Sets the number of images per batch. Use -1 for AutoBatch, which automatically adjusts based on GPU memory availability.", + " save_json=${5|False,True|}, # (bool) If True, saves the results to a JSON file for further analysis or integration with other tools.", + " save_hybrid=${6|False,True|}, # (bool) If True, saves a hybrid version of labels that combines original annotations with additional model predictions.", + " conf=${7:0.001}, # (float) Sets the minimum confidence threshold for detections. Detections with confidence below this threshold are discarded.", + " iou=${8:0.6}, # (float) Sets the Intersection Over Union (IoU) threshold for Non-Maximum Suppression (NMS). Helps in reducing duplicate detections.", + " max_det=${9:300}, # (int) Limits the maximum number of detections per image. Useful in dense scenes to prevent excessive detections.", + " half=${10|True,False|}, # (bool) Enables half-precision (FP16) computation, reducing memory usage and potentially increasing speed with minimal impact on accuracy.", + " device=${11:None}, # (str) Specifies the device for validation (cpu, cuda:0, etc.). Allows flexibility in utilizing CPU or GPU resources.", + " dnn=${12|False,True|}, # (bool) If True, uses the OpenCV DNN module for ONNX model inference, offering an alternative to PyTorch inference methods.", + " plots=${13|False,True|}, # (bool) When set to True, generates and saves plots of predictions versus ground truth for visual evaluation of the model's performance.", + " rect=${14|False,True|}, # (bool) If True, uses rectangular inference for batching, reducing padding and potentially increasing speed and efficiency.", + " split=\"${15|val,test,train|}\", # (str) Determines the dataset split to use for validation (val, test, or train). Allows flexibility in choosing the data segment for performance evaluation.", + ")$0" + ], + "description": "Snippet using model val method, including all keyword arguments and defaults." } } \ No newline at end of file From fca50911201f1f79b631672281c0ecdd9bbda33c Mon Sep 17 00:00:00 2001 From: Burhan <62214284+Burhan-Q@users.noreply.github.com> Date: Sat, 3 Aug 2024 12:17:55 -0400 Subject: [PATCH 6/7] add kwargs snippets and increment version --- package.json | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/package.json b/package.json index 5a5f5aa..88de576 100644 --- a/package.json +++ b/package.json @@ -2,7 +2,7 @@ "name": "ultralytics-snippets", "displayName": "Ultralytics Snippets", "description": "Snippets to use with the Ultralytics Python library.", - "version": "0.1.6", + "version": "0.1.7", "publisher": "Ultralytics", "repository": { "type": "git", @@ -47,6 +47,10 @@ { "language": "python", "path": "./snippets/results.json" + }, + { + "language": "python", + "path": "./snippets/kwargs.json" } ], "qna": false From 5e512157100bd39c6acc1e4e45f37e0cb4870df6 Mon Sep 17 00:00:00 2001 From: Burhan <62214284+Burhan-Q@users.noreply.github.com> Date: Sat, 3 Aug 2024 12:18:34 -0400 Subject: [PATCH 7/7] update Readme with new snippet entries --- README.md | 53 +++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index f615238..34df4d9 100644 --- a/README.md +++ b/README.md @@ -210,6 +210,8 @@ The Example snippets are more "complete" blocks of code that can be used for boi | `ultra.example-nas-predict` | Setup Ultralytics NAS to perform inference (simple). | | `ultra.example-rtdetr-predict` | Setup Ultralytics RT-DETR to perform inference (simple). | | `ultra.example-callback` | Example showing how to add a custom callback function. | +| `ultra.example-track-loop-persist` | Example of how to open video, loop frames, and maintain tracked object IDs. | +| `ultra.example-track-kwords` | Example showing all keyword arguments available for track mode. | ### Snippet Example @@ -231,6 +233,48 @@ for result in results:

+## KWARGS + +Use these to insert the various model methods defined in [modes] with all keyword arguments, default values, and commented descriptions quickly into your code. Includes `model` as default variable, but is an editable field accessible using tab stops. + +| Prefix | Description | Reference | +| ---------------------- | ---------------------------------------------------------------------------------------- | ---------- | +| `ultra.kwargs-predict` | Snippet using model `predict` method, including all keyword arguments and defaults. | [predict] | +| `ultra.kwargs-train` | Snippet using model `train` method, including all keyword arguments and defaults. | [train] | +| `ultra.kwargs-track` | Snippet using model `track` method, including all keyword arguments and defaults. | [track] | +| `ultra.kwargs-val` | Snippet using model `val` method, including all keyword arguments and defaults. | [val] | + +### Snippet Example + +
ultra.kwargs-predict +

+ +```py +model.predict( + source=src, # (str, optional) source directory for images or videos + imgsz=640, # (int | list) input images size as int or list[w,h] for predict + conf=0.25, # (float) minimum confidence threshold + iou=0.7, # (float) intersection over union (IoU) threshold for NMS + vid_stride=1, # (int) video frame-rate stride + stream_buffer=False, # (bool) buffer all streaming frames (True) or return the most recent frame (False) + visualize=False, # (bool) visualize model features + augment=False, # (bool) apply image augmentation to prediction sources + agnostic_nms=False, # (bool) class-agnostic NMS + classes=None, # (int | list[int], optional) filter results by class, i.e. classes=0, or classes=[0,2,3] + retina_masks=False, # (bool) use high-resolution segmentation masks + embed=None, # (list[int], optional) return feature vectors/embeddings from given layers + show=False, # (bool) show predicted images and videos if environment allows + save=True, # (bool) save prediction results + save_frames=False, # (bool) save predicted individual video frames + save_txt=False, # (bool) save results as .txt file + save_conf=False, # (bool) save results with confidence scores + save_crop=False, # (bool) save cropped images with results + stream=False, # (bool) for processing long videos or numerous images with reduced memory usage by returning a generator + verbose=True, # (bool) enable/disable verbose inference logging in the terminal +) +``` + +

## Use with `neovim` @@ -248,10 +292,11 @@ Make sure that the path `"./ultralytics-snippets/"` is valid for your install lo [ann]: https://docs.ultralytics.com/usage/simple-utilities/#drawing-annotations [models]: https://docs.ultralytics.com/models -[_modes]: https://docs.ultralytics.com/modes -[_predict]: https://docs.ultralytics.com/modes/predict -[_train]: https://docs.ultralytics.com/modes/train -[_val]: https://docs.ultralytics.com/modes/val +[modes]: https://docs.ultralytics.com/modes +[predict]: https://docs.ultralytics.com/modes/predict +[train]: https://docs.ultralytics.com/modes/train +[track]: https://docs.ultralytics.com/modes/track +[val]: https://docs.ultralytics.com/modes/val [YOLOv8]: https://docs.ultralytics.com/models/yolov8 [YOLOv5]: https://docs.ultralytics.com/models/yolov5 [YOLOv9]: https://docs.ultralytics.com/models/yolov9