diff --git a/docs/source/conf.py b/docs/source/conf.py index 837d369..1ff9002 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -16,11 +16,11 @@ import os import re import sys -sys.path.append(os.path.abspath('../../plugin/')) +sys.path.append(os.path.abspath('../../src/')) # -- Project information ----------------------------------------------------- -metadata_file_path = os.path.join('..', '..', 'plugin', 'deepness', 'metadata.txt') +metadata_file_path = os.path.join('..', '..', 'src', 'deepness', 'metadata.txt') metadata_file_path = os.path.abspath(metadata_file_path) with open(metadata_file_path, 'rt') as file: file_content = file.read() diff --git a/docs/source/creators/creators_tutorial.rst b/docs/source/creators/creators_tutorial.rst new file mode 100644 index 0000000..3f469cc --- /dev/null +++ b/docs/source/creators/creators_tutorial.rst @@ -0,0 +1,24 @@ +Model creation tutorial +======================= + + +========= +Detection +========= + +For one of models in our zoo - specifically for cars detection on aerial images - a complete tutorial is provided in a jupyter notebook: + + .. code-block:: + + ./tutorials/detection/cars_yolov7/car_detection__prepare_and_train.ipynb + + +The notebook covers: + * downloading yolov7 repository + * downloading the training dataset + * preparing training data and labels in yolov7 format + * running th training and testing + * conversion to ONNX model + * adding default parameters for Deepness plugin + +Example model inference can be found in the :code:`Examples` section. diff --git a/docs/source/example/example_detection_cars_yolov7.rst b/docs/source/example/example_detection_cars_yolov7.rst new file mode 100644 index 0000000..639dcd1 --- /dev/null +++ b/docs/source/example/example_detection_cars_yolov7.rst @@ -0,0 +1,46 @@ +YOLOv7 cars detection +=================================== + +The following example shows how to use the YOLOv7 model for cars (and other vehicles) detection in aerial or satellite images. + +======= +Dataset +======= + +The example is based on the `ITCVD cars detection dataset `_. It provides aerial images with 10 cm/px resolution. Annotation bounding boxes for the cars are provided. + +========================= +Training tutorial +========================= + +The entire training process has been gathered in a tutorial notebook in jupyter notebook: + + + .. code-block:: + + ./tutorials/detection/cars_yolov7/car_detection__prepare_and_train.ipynb + + +================== +Example inference +================== + +Run QGIS, next add "Poznan 2022 aerial" map using :code:`QuickMapServices` plugin. + +Alternatively you can use any other aerial or satellite map with resolution of at least 10 cm/pixel + +.. image:: ../images/cars_near_poznan_university_of_technology_on_ortophoto__zoom_in.webp + +Then run our plugin and set parameters like in the screenshot below. You can find the pre-trained onnx model at :code:`https://chmura.put.poznan.pl/s/vgOeUN4H4tGsrGm`. Push the Run button to start processing. + +.. image:: ../images/cars_near_poznan_university_of_technology_on_ortophoto.webp + + +Another inference on random street in Poznan: + + .. image:: ../images/cars_on_ransom_street_in_poznan.webp + + +And output mask for an Grunwald district in Poznan: + + .. image:: ../images/ecars_in_poznan_grunwald_district.webp diff --git a/docs/source/images/cars_in_poznan_grunwald_district.webp b/docs/source/images/cars_in_poznan_grunwald_district.webp new file mode 100644 index 0000000..d7795f5 Binary files /dev/null and b/docs/source/images/cars_in_poznan_grunwald_district.webp differ diff --git a/docs/source/images/cars_near_poznan_university_of_technology_on_ortophoto.webp b/docs/source/images/cars_near_poznan_university_of_technology_on_ortophoto.webp new file mode 100644 index 0000000..fc544c4 Binary files /dev/null and b/docs/source/images/cars_near_poznan_university_of_technology_on_ortophoto.webp differ diff --git a/docs/source/images/cars_near_poznan_university_of_technology_on_ortophoto__zoom_in.webp b/docs/source/images/cars_near_poznan_university_of_technology_on_ortophoto__zoom_in.webp new file mode 100644 index 0000000..43fddcd Binary files /dev/null and b/docs/source/images/cars_near_poznan_university_of_technology_on_ortophoto__zoom_in.webp differ diff --git a/docs/source/images/cars_on_ransom_street_in_poznan.webp b/docs/source/images/cars_on_ransom_street_in_poznan.webp new file mode 100644 index 0000000..e0c062c Binary files /dev/null and b/docs/source/images/cars_on_ransom_street_in_poznan.webp differ diff --git a/docs/source/index.rst b/docs/source/index.rst index 264db3f..b07a0c0 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -38,6 +38,7 @@ Home :maxdepth: 1 :caption: Examples + example/example_detection_cars_yolov7 example/example_segmentation_landcover example/example_detection_planes_yolov7 example/example_detection_oils_yolov5 @@ -48,6 +49,7 @@ Home :caption: For Model Creators creators/creators_description_classes + creators/creators_tutorial creators/creators_export_training_data_tool creators/creators_example_onnx_model creators/creators_add_metadata_to_model diff --git a/docs/source/main/model_zoo/MODEL_ZOO.md b/docs/source/main/model_zoo/MODEL_ZOO.md index 4346bdf..f710db5 100644 --- a/docs/source/main/model_zoo/MODEL_ZOO.md +++ b/docs/source/main/model_zoo/MODEL_ZOO.md @@ -4,25 +4,26 @@ The [Model ZOO](https://chmura.put.poznan.pl/s/2pJk4izRurzQwu3) is a collection ## Segmentation models -| Model name | Input size | CM/PX | Description | Example image | -|------------------------------------------------------------------------------------|---|---|---|---------------------------------------------------------| -| [Corn Field Damage Segmentation](https://chmura.put.poznan.pl/s/abWFTVYSDIcncWs) | 512 | 3 | [PUT Vision](https://putvision.github.io/) model for Corn Field Damage Segmentation created on own dataset labeled by experts. We used the classical UNet++ model. It generates 3 outputs: healthy crop, damaged crop, and out-of-field area. | [Image](https://chmura.put.poznan.pl/s/i5WVmcfqPNdBTAQ) | -| [Land Cover Segmentation](https://chmura.put.poznan.pl/s/PnAFJw27uneROkV) | 512 | 40 | The model is trained on the [LandCover.ai dataset](https://landcover.ai.linuxpolska.com/). It provides satellite images with 25 cm/px and 50 cm/px resolution. Annotation masks for the following classes are provided for the images: building (1), woodland (2), water(3), road(4). We use `DeepLabV3+` model with `tu-semnasnet_100` backend and `FocalDice` as a loss function. | [Image](https://chmura.put.poznan.pl/s/Xa29vnieNQTvSt5) | -| [Roads Segmentation](https://chmura.put.poznan.pl/s/y6S3CmodPy1fYYz) | 512 | 21 | The model segments the Google Earth satellite images into 'road' and 'not-road' classes. Model works best on wide car roads, crossroads and roundabouts. | [Image](https://chmura.put.poznan.pl/s/rln6mpbjpsXWpKg) | +| Model | Input size | CM/PX | Description | Example image | +|----------------------------------------------------------------------------------|------------|-------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| [Corn Field Damage Segmentation](https://chmura.put.poznan.pl/s/abWFTVYSDIcncWs) | 512 | 3 | [PUT Vision](https://putvision.github.io/) model for Corn Field Damage Segmentation created on own dataset labeled by experts. We used the classical UNet++ model. It generates 3 outputs: healthy crop, damaged crop, and out-of-field area. | [Image](https://chmura.put.poznan.pl/s/i5WVmcfqPNdBTAQ) | +| [Land Cover Segmentation](https://chmura.put.poznan.pl/s/PnAFJw27uneROkV) | 512 | 40 | The model is trained on the [LandCover.ai dataset](https://landcover.ai.linuxpolska.com/). It provides satellite images with 25 cm/px and 50 cm/px resolution. Annotation masks for the following classes are provided for the images: building (1), woodland (2), water(3), road(4). We use `DeepLabV3+` model with `tu-semnasnet_100` backend and `FocalDice` as a loss function. | [Image](https://chmura.put.poznan.pl/s/Xa29vnieNQTvSt5) | +| [Roads Segmentation](https://chmura.put.poznan.pl/s/y6S3CmodPy1fYYz) | 512 | 21 | The model segments the Google Earth satellite images into 'road' and 'not-road' classes. Model works best on wide car roads, crossroads and roundabouts. | [Image](https://chmura.put.poznan.pl/s/rln6mpbjpsXWpKg) | ## Regression models -| Model name | Input size | CM/PX | Description | Example image | -|---|---|---|---|---| -| | | | | | -| | | | | | +| Model | Input size | CM/PX | Description | Example image | +|---------|---|---|---|---| +| | | | | | +| | | | | | ## Object detection models -| Model name | Input size | CM/PX | Description | Example image | -|---|---|---|---|---| -| [Airbus Planes Detection](https://chmura.put.poznan.pl/s/bBIJ5FDPgyQvJ49) | 256 | 70 | YOLOv7 tiny model for object detection on satellite images. Based on the [Airbus Aircraft Detection dataset](https://www.kaggle.com/datasets/airbusgeo/airbus-aircrafts-sample-dataset). | [Image](https://chmura.put.poznan.pl/s/VfLmcWhvWf0UJfI) | -| [Airbus Oil Storage Detection](https://chmura.put.poznan.pl/s/gMundpKsYUC7sNb) | 512 | 150 | YOLOv5-m model for object detection on satellite images. Based on the [Airbus Oil Storage Detection dataset](https://www.kaggle.com/datasets/airbusgeo/airbus-oil-storage-detection-dataset). | [Image](https://chmura.put.poznan.pl/s/T3pwaKlbFDBB2C3) | +| Model | Input size | CM/PX | Description | Example image | +|--------------------------------------------------------------------------------|------------|-------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| [Airbus Planes Detection](https://chmura.put.poznan.pl/s/bBIJ5FDPgyQvJ49) | 256 | 70 | YOLOv7 tiny model for object detection on satellite images. Based on the [Airbus Aircraft Detection dataset](https://www.kaggle.com/datasets/airbusgeo/airbus-aircrafts-sample-dataset). | [Image](https://chmura.put.poznan.pl/s/VfLmcWhvWf0UJfI) | +| [Airbus Oil Storage Detection](https://chmura.put.poznan.pl/s/gMundpKsYUC7sNb) | 512 | 150 | YOLOv5-m model for object detection on satellite images. Based on the [Airbus Oil Storage Detection dataset](https://www.kaggle.com/datasets/airbusgeo/airbus-oil-storage-detection-dataset). | [Image](https://chmura.put.poznan.pl/s/T3pwaKlbFDBB2C3) | +| [Aerial Cars Detection](https://chmura.put.poznan.pl/s/vgOeUN4H4tGsrGm) | 640 | 10 | YOLOv7-m model for cars detection on aerial images. Based on the [ITCVD](https://arxiv.org/pdf/1801.07339.pdf). | [Image](https://chmura.put.poznan.pl/s/cPzw1mkXlprSUIJ) | ## Contributing diff --git a/src/deepness/metadata.txt b/src/deepness/metadata.txt index ae802c8..bab2bfb 100644 --- a/src/deepness/metadata.txt +++ b/src/deepness/metadata.txt @@ -6,7 +6,7 @@ name=Deepness: Deep Neural Remote Sensing qgisMinimumVersion=3.22 description=Inference of deep neural network models (ONNX) for segmentation, detection and regression -version=0.4.1 +version=0.5.0 author=PUT Vision email=przemyslaw.aszkowski@gmail.com @@ -17,7 +17,7 @@ about= - limiting processing range to predefined area (visible part or area defined by vector layer polygons) - common types of models are supported: segmentation, regression, detection - integration with layers (both for input data and model output layers). Once an output layer is created, it can be saved as a file manually - - model ZOO under development (planes detection on Bing Aerial, Corn field damage, Oil Storage tanks detection, ...) + - model ZOO under development (planes detection on Bing Aerial, Corn field damage, Oil Storage tanks detection, cars detection, ...) - training data Export Tool - exporting raster and mask as small tiles - parametrization of the processing for advanced users (spatial resolution, overlap, postprocessing) Plugin requires external python packages to be installed. After the first plugin startup, a Dialog will show, to assist in this process. Please visit plugin the documentation for details. diff --git a/tutorials/detection/cars_yolov7/car_detection__prepare_and_train.ipynb b/tutorials/detection/cars_yolov7/car_detection__prepare_and_train.ipynb new file mode 100644 index 0000000..5e0f4e0 --- /dev/null +++ b/tutorials/detection/cars_yolov7/car_detection__prepare_and_train.ipynb @@ -0,0 +1,464 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Car detecion from aerial ortophoto with YOLOv7\n", + "\n", + "This notebook is just a code suplementary for `Deepness` QGIS plugin documentation.\n", + "Please visit the documentation to see how easily use the model in QGIS: https://qgis-plugin-deepness.readthedocs.io/\n", + "\n", + "\n", + "Model trained with dataset ITCVD ( https://arxiv.org/pdf/1801.07339.pdf )\n", + "Spatial resolution of model: 10 cm/pixel.\n", + "\n", + "In this notebook we go though the training process, including data preprocessing and model export." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Download yolov7\n", + "Download yolov7 repository, install requirements, and put this notebook in the root directory of the repository:\n", + "\n", + "``` \n", + "git clone https://github.com/WongKinYiu/yolov7 \n", + "pip install -r yolov7/requirements.txt\n", + "pip install scipy onnx\n", + "cp $THIS_NOTEBOOK ./yolov7 \n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import numpy as np\n", + "import scipy.io\n", + "from PIL import Image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Dataset manual download\n", + "\n", + "Download manually ITCVD dataset.\n", + "\n", + "At the moment of writing available at https://easy.dans.knaw.nl/ui/datasets/id/easy-dataset:157920/tab/2\n", + "\n", + "Create subdirectory for our data:\n", + "```\n", + "mkdir -p car/itcvd\n", + "```\n", + "\n", + "Put the content of downloaded data `ITCVD/ITC_VD_Training_Testing_set/Training` in `./car/itcvd` (So that we will have a directory `./car/itcvd/GT` and `./car/itcvd/Images`)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "BASE_INPUT_DATA_DIR = 'car/itcvd/'\n", + "BASE_OUTPUT_DATA_DIR = 'car/'\n", + "\n", + "img_output_dir = os.path.join(BASE_OUTPUT_DATA_DIR, 'images', 'all')\n", + "label_output_dir = os.path.join(BASE_OUTPUT_DATA_DIR, 'labels', 'all')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Data preprocessing and preparation\n", + "\n", + "The dataset images and labels are not suitable for yolov7. The images need to be cut into smaller parts." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "subimg_number = 0\n", + "\n", + "img_overlap = 0.05\n", + "target_img_size = 640 # both dimensions equal\n", + "stride = int(target_img_size * (1 - img_overlap))\n", + "\n", + "\n", + "os.makedirs(img_output_dir, exist_ok=True)\n", + "os.makedirs(label_output_dir, exist_ok=True)\n", + "\n", + "input_img_dir = os.path.join(BASE_INPUT_DATA_DIR, 'Image/')\n", + "all_output_img_names = []\n", + "\n", + "for file_name in sorted(os.listdir(input_img_dir)):\n", + " if not file_name.endswith('.jpg'):\n", + " continue\n", + "\n", + " input_image_number = file_name.strip('.jpeg')\n", + " mat_file_name = input_image_number + '.mat'\n", + " labels_mat_path = os.path.join(BASE_INPUT_DATA_DIR, f'GT/{mat_file_name}')\n", + " img_path = os.path.join(input_img_dir, f'{file_name}')\n", + "\n", + " labels_mat = scipy.io.loadmat(labels_mat_path)\n", + " full_img = Image.open(img_path)\n", + " full_img_numpy = np.array(full_img)\n", + "\n", + " car_labels = labels_mat[f'x{input_image_number}']\n", + "\n", + " input_img_size = full_img_numpy.shape\n", + " bins_x = (input_img_size[1] - target_img_size) // stride + 1\n", + " bins_y = (input_img_size[0] - target_img_size) // stride + 1\n", + "\n", + " for i in range(bins_x):\n", + " for j in range(bins_y):\n", + " x_start = i * stride\n", + " y_start = j * stride\n", + " x_end = x_start + target_img_size\n", + " y_end = y_start + target_img_size\n", + "\n", + " if subimg_number == 1:\n", + " a = 1\n", + "\n", + " yolo_labels = []\n", + " for car_label in car_labels:\n", + " x_left, y_upper, x_right, y_bottom, _, _ = car_label\n", + " x_centre, y_centre = (x_left + x_right) / 2, (y_upper + y_bottom) / 2\n", + " if x_centre > x_start and x_centre < x_end and y_centre < y_end and y_centre > y_start:\n", + " x_relative_centre = (x_centre - x_start) / target_img_size\n", + " y_relative_centre = (y_centre - y_start) / target_img_size\n", + " x_relative_width = (x_right - x_left) / target_img_size\n", + " y_relative_width = (y_bottom - y_upper) / target_img_size\n", + " class_id = 0\n", + " # yolo_label = [class_id, x_relative_centre, y_relative_centre, x_relative_width, y_relative_width]\n", + " yolo_label = [class_id, x_relative_centre, y_relative_centre, x_relative_width, y_relative_width]\n", + " yolo_labels.append(list(map(str, yolo_label)))\n", + "\n", + " img_slice = full_img_numpy[y_start:y_end, x_start:x_end]\n", + " \n", + " file_base_name = 's' + str(subimg_number).zfill(5)\n", + " img_file_path = os.path.join(img_output_dir, f'{file_base_name}.jpg')\n", + " subimg_number += 1\n", + " im = Image.fromarray(img_slice)\n", + " im.save(img_file_path)\n", + " all_output_img_names.append(f'{file_base_name}.jpg')\n", + "\n", + " label_file_path = os.path.join(label_output_dir, f'{file_base_name}.txt')\n", + " txt = '\\n'.join([' '.join(label) for label in yolo_labels])\n", + " with open(label_file_path, 'wt') as file:\n", + " file.write(txt) " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create configuration files for yolo, with list of training/testing files" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "images_numbers = list(range(subimg_number))\n", + "\n", + "x = np.array(all_output_img_names)\n", + "N = len(x)\n", + "np.random.seed(44)\n", + "indices = np.random.permutation(N)\n", + "training_idx, test_idx, val_idx = indices[:int(0.8*N)], indices[int(0.8*N):int(0.9*N)], indices[int(0.9*N):]\n", + "training, test, val = x[training_idx], x[test_idx], x[val_idx]\n", + "\n", + "\n", + "def create_as_images_listing(set_name, file_numbers):\n", + " txts = []\n", + " for file_number in file_numbers:\n", + " img_file_name = f'{file_number}'\n", + " txts.append(f'./images/all/{img_file_name}') \n", + "\n", + " txt = '\\n'.join(txts)\n", + " txt_file_path = os.path.join(BASE_OUTPUT_DATA_DIR, set_name + '.txt')\n", + " with open(txt_file_path, 'wt') as file:\n", + " file.write(txt) \n", + " \n", + "\n", + "create_as_images_listing('train', sorted(training))\n", + "create_as_images_listing('test', sorted(test))\n", + "create_as_images_listing('val', sorted(val))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "CAR_YAML_CONENT = \"\"\"\n", + "# CAR\n", + "\n", + "# download command/URL (optional)\n", + "download: echo \"no download\"\n", + "\n", + "# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]\n", + "train: ./car/train.txt\n", + "val: ./car/val.txt\n", + "test: ./car/test.txt\n", + "\n", + "# number of classes\n", + "nc: 1\n", + "\n", + "# class names\n", + "names: [\"car\"]\n", + "\"\"\"\n", + "\n", + "with open('./car/car.yaml', 'wt') as file:\n", + " file.write(CAR_YAML_CONENT) " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Run the training" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python train.py \\\n", + " --workers 8 \\\n", + " --device 0 \\\n", + " --batch-size 2 \\\n", + " --data car/car.yaml \\\n", + " --img 640 640 \\\n", + " --cfg cfg/training/yolov7-tiny.yaml \\\n", + " --weights yolov7-tiny.pt \\\n", + " --name yolov7-car-detector \\\n", + " --hyp data/hyp.scratch.custom.yaml \\\n", + " --epochs 15 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run the testing" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python test.py \\\n", + " --device 0 \\\n", + " --batch-size 2 \\ \n", + " --data car/car.yaml \\\n", + " --img 640 \\\n", + " --weights runs/train/yolov7-car-detector/weights/best.pt_xx \\\n", + " --name yolov7-car-detector" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Export the model to ONNX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python export.py \\\n", + " --weights runs/train/yolov7-car-detector/weights/best.pt \\\n", + " --grid \\\n", + " --simplify \\\n", + " --img-size 640 640" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Add metadata for `Deepness` plugin to run the model smoothly" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "import onnx\n", + "\n", + "model = onnx.load('runs/train/yolov7-car-detector3/weights/best.onnx')\n", + "\n", + "class_names = {\n", + " 0: 'car',\n", + "}\n", + "\n", + "m1 = model.metadata_props.add()\n", + "m1.key = 'model_type'\n", + "m1.value = json.dumps('Detector')\n", + "\n", + "m2 = model.metadata_props.add()\n", + "m2.key = 'class_names'\n", + "m2.value = json.dumps(class_names)\n", + "\n", + "m3 = model.metadata_props.add()\n", + "m3.key = 'resolution'\n", + "m3.value = json.dumps(10)\n", + "\n", + "m4 = model.metadata_props.add()\n", + "m4.key = 'tiles_overlap'\n", + "m4.value = json.dumps(10)\n", + "\n", + "m4 = model.metadata_props.add()\n", + "m4.key = 'det_conf'\n", + "m4.value = json.dumps(0.3)\n", + "\n", + "m4 = model.metadata_props.add()\n", + "m4.key = 'det_iou_thresh'\n", + "m4.value = json.dumps(0.7)\n", + "\n", + "\n", + "FIANL_MODEL_FILE_PATH = os.path.abspath('runs/train/yolov7-car-detector2/weights/car_aerial_detection_yolo7_ITCVD_deepness.onnx')\n", + "onnx.save(model, FIANL_MODEL_FILE_PATH)\n", + "\n", + "print(f'Your ONNX model with metadata is at: {FIANL_MODEL_FILE_PATH}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Now you can load the model in Deepness!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + }, + "vscode": { + "interpreter": { + "hash": "57069042d30620f0a3856c40b208c9eb0a4618b3f277ee0b3ee7d5bf24466b9e" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}