Skip to content

Commit

Permalink
Merge pull request #134 from karolmajek/devel
Browse files Browse the repository at this point in the history
Place Recognition support
  • Loading branch information
przemyslaw-aszkowski authored Feb 6, 2024
2 parents 468417e + 48713a3 commit 4579af0
Show file tree
Hide file tree
Showing 9 changed files with 501 additions and 4 deletions.
7 changes: 7 additions & 0 deletions docs/source/main/model_zoo/MODEL_ZOO.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,13 @@ The [Model ZOO](https://chmura.put.poznan.pl/s/2pJk4izRurzQwu3) is a collection
| | | | | |
| | | | | |

## Recognition models

| Model | Input size | CM/PX | Description | Example image |
|---------|---|---|---|---|
| NAIP Place recognition | 224 | 100 | ConvNeXt nano trained using SimSiam onn NAIP imagery | |
| | | | | |

## Object detection models

| Model | Input size | CM/PX | Description | Example image |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ class ModelOutputFormat(enum.Enum):
ALL_CLASSES_AS_SEPARATE_LAYERS = 'All classes as separate layers'
CLASSES_AS_SEPARATE_LAYERS_WITHOUT_ZERO_CLASS = 'Classes as separate layers (without 0 class)'
ONLY_SINGLE_CLASS_AS_LAYER = 'Single class as a vector layer'
RECOGNITION_RESULT = 'Cosine distance between query image and map'

@classmethod
def get_all_names(cls):
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import enum
from dataclasses import dataclass
from typing import Optional

from deepness.common.processing_parameters.map_processing_parameters import \
MapProcessingParameters
from deepness.processing.models.model_base import ModelBase


@dataclass
class RecognitionParameters(MapProcessingParameters):
"""
Parameters for Inference of Recognition model (including pre/post-processing) obtained from UI.
"""

query_image_path: str # path to query image
model: ModelBase # wrapper of the loaded model
31 changes: 29 additions & 2 deletions src/deepness/deepness_dockwidget.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from deepness.common.processing_parameters.detection_parameters import DetectionParameters, DetectorType
from deepness.common.processing_parameters.map_processing_parameters import (MapProcessingParameters, ModelOutputFormat,
ProcessedAreaType)
from deepness.common.processing_parameters.recognition_parameters import RecognitionParameters
from deepness.common.processing_parameters.regression_parameters import RegressionParameters
from deepness.common.processing_parameters.segmentation_parameters import SegmentationParameters
from deepness.common.processing_parameters.superresolution_parameters import SuperresolutionParameters
Expand Down Expand Up @@ -196,6 +197,7 @@ def get_selected_processed_area_type(self) -> ProcessedAreaType:
def _create_connections(self):
self.pushButton_runInference.clicked.connect(self._run_inference)
self.pushButton_runTrainingDataExport.clicked.connect(self._run_training_data_export)
self.pushButton_browseQueryImagePath.clicked.connect(self._browse_query_image_path)
self.pushButton_browseModelPath.clicked.connect(self._browse_model_path)
self.comboBox_processedAreaSelection.currentIndexChanged.connect(self._set_processed_area_mask_options)
self.comboBox_modelType.currentIndexChanged.connect(self._model_type_changed)
Expand All @@ -216,6 +218,7 @@ def _model_type_changed(self):
detection_enabled = False
regression_enabled = False
superresolution_enabled = False
recognition_enabled = False

if model_type == ModelType.SEGMENTATION:
segmentation_enabled = True
Expand All @@ -225,15 +228,19 @@ def _model_type_changed(self):
regression_enabled = True
elif model_type == ModelType.SUPERRESOLUTION:
superresolution_enabled = True
elif model_type == ModelType.RECOGNITION:
recognition_enabled = True
else:
raise Exception(f"Unsupported model type ({model_type})!")

self.mGroupBox_segmentationParameters.setVisible(segmentation_enabled)
self.mGroupBox_detectionParameters.setVisible(detection_enabled)
self.mGroupBox_regressionParameters.setVisible(regression_enabled)
self.mGroupBox_superresolutionParameters.setVisible(superresolution_enabled)
# Disable output format options for super-resolution models.
self.mGroupBox_6.setEnabled(not superresolution_enabled)
self.mGroupBox_recognitionParameters.setVisible(recognition_enabled)
# Disable output format options for super-resolution or recognition models.
if recognition_enabled or superresolution_enabled:
self.mGroupBox_6.setEnabled(False)

def _detector_type_changed(self):
detector_type = DetectorType(self.comboBox_detectorType.currentText())
Expand Down Expand Up @@ -268,6 +275,16 @@ def _browse_model_path(self):
self.lineEdit_modelPath.setText(file_path)
self._load_model_and_display_info()

def _browse_query_image_path(self):
file_path, _ = QFileDialog.getOpenFileName(
self,
"Select image file...",
os.path.expanduser("~"),
"All files (*.*)",
)
if file_path:
self.lineEdit_recognitionPath.setText(file_path)

def _load_default_model_parameters(self):
"""
Load the default parameters from model metadata
Expand Down Expand Up @@ -478,6 +495,8 @@ def get_inference_parameters(self) -> MapProcessingParameters:
params = self.get_regression_parameters(map_processing_parameters)
elif model_type == ModelType.SUPERRESOLUTION:
params = self.get_superresolution_parameters(map_processing_parameters)
elif model_type == ModelType.RECOGNITION:
params = self.get_recognition_parameters(map_processing_parameters)
elif model_type == ModelType.DETECTION:
params = self.get_detection_parameters(map_processing_parameters)

Expand Down Expand Up @@ -515,6 +534,14 @@ def get_superresolution_parameters(self, map_processing_parameters: MapProcessin
)
return params

def get_recognition_parameters(self, map_processing_parameters: MapProcessingParameters) -> RecognitionParameters:
params = RecognitionParameters(
**map_processing_parameters.__dict__,
model=self._model,
query_image_path=self.lineEdit_recognitionPath.text(),
)
return params

def get_detection_parameters(self, map_processing_parameters: MapProcessingParameters) -> DetectionParameters:

params = DetectionParameters(
Expand Down
49 changes: 47 additions & 2 deletions src/deepness/deepness_dockwidget.ui
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@
<property name="geometry">
<rect>
<x>0</x>
<y>0</y>
<y>-325</y>
<width>452</width>
<height>1621</height>
<height>1742</height>
</rect>
</property>
<layout class="QVBoxLayout" name="verticalLayout_3">
Expand Down Expand Up @@ -626,6 +626,51 @@
</layout>
</widget>
</item>
<item>
<widget class="QgsCollapsibleGroupBox" name="mGroupBox_recognitionParameters">
<property name="sizePolicy">
<sizepolicy hsizetype="Preferred" vsizetype="Maximum">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="title">
<string>Recognition parameters</string>
</property>
<layout class="QGridLayout" name="gridLayout_12">
<item row="1" column="1" colspan="2">
<widget class="QLabel" name="label_23">
<property name="font">
<font>
<weight>75</weight>
<bold>true</bold>
</font>
</property>
<property name="text">
<string>NOTE: Applicable only if a recognition model is used</string>
</property>
</widget>
</item>
<item row="3" column="1" colspan="2">
<widget class="QLabel" name="label_24">
<property name="text">
<string>Image to localize path:</string>
</property>
</widget>
</item>
<item row="4" column="1">
<widget class="QLineEdit" name="lineEdit_recognitionPath"/>
</item>
<item row="4" column="2">
<widget class="QPushButton" name="pushButton_browseQueryImagePath">
<property name="text">
<string>Browse</string>
</property>
</widget>
</item>
</layout>
</widget>
</item>
<item>
<widget class="QgsCollapsibleGroupBox" name="mGroupBox_detectionParameters">
<property name="title">
Expand Down
Loading

0 comments on commit 4579af0

Please sign in to comment.