diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index 58d49e175e..0fa6d5c2b9 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -79,6 +79,9 @@ from inference.core.workflows.core_steps.transformations.relative_static_crop import ( RelativeStaticCropBlock, ) +from inference.core.workflows.core_steps.visualizations.background_color import ( + BackgroundColorVisualizationBlock, +) # Visualizers from inference.core.workflows.core_steps.visualizations.blur import ( @@ -207,6 +210,7 @@ def load_blocks() -> List[ PropertyDefinitionBlock, DimensionCollapseBlock, FirstNonEmptyOrDefaultBlock, + BackgroundColorVisualizationBlock, BlurVisualizationBlock, BoundingBoxVisualizationBlock, CircleVisualizationBlock, diff --git a/inference/core/workflows/core_steps/visualizations/annotators/__init__.py b/inference/core/workflows/core_steps/visualizations/annotators/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/inference/core/workflows/core_steps/visualizations/annotators/background_color.py b/inference/core/workflows/core_steps/visualizations/annotators/background_color.py new file mode 100644 index 0000000000..3af4391605 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/annotators/background_color.py @@ -0,0 +1,72 @@ +import cv2 +import numpy as np +from supervision.annotators.base import BaseAnnotator, ImageType +from supervision.detection.core import Detections +from supervision.draw.color import Color +from supervision.utils.conversion import ensure_cv2_image_for_annotation + + +class BackgroundColorAnnotator(BaseAnnotator): + """ + A class for drawing background colors outside of detected box or mask regions. + !!! warning + This annotator uses `sv.Detections.mask`. + """ + + def __init__( + self, + color: Color = Color.BLACK, + opacity: float = 0.5, + force_box: bool = False, + ): + """ + Args: + color (Color): The color to use for annotating detections. + opacity (float): Opacity of the overlay mask. Must be between `0` and `1`. + """ + self.color: Color = color + self.opacity = opacity + self.force_box = force_box + + @ensure_cv2_image_for_annotation + def annotate(self, scene: ImageType, detections: Detections) -> ImageType: + """ + Annotates the given scene with masks based on the provided detections. + Args: + scene (ImageType): The image where masks will be drawn. + `ImageType` is a flexible type, accepting either `numpy.ndarray` + or `PIL.Image.Image`. + detections (Detections): Object detections to annotate. + Returns: + The annotated image, matching the type of `scene` (`numpy.ndarray` + or `PIL.Image.Image`) + Example: + ```python + import supervision as sv + image = ... + detections = sv.Detections(...) + background_color_annotator = sv.BackgroundColorAnnotator() + annotated_frame = background_color_annotator.annotate( + scene=image.copy(), + detections=detections + ) + ``` + ![background-color-annotator-example](https://media.roboflow.com/ + supervision-annotator-examples/background-color-annotator-example-purple.png) + """ + + colored_mask = np.full_like(scene, self.color.as_bgr(), dtype=np.uint8) + + cv2.addWeighted( + scene, 1 - self.opacity, colored_mask, self.opacity, 0, dst=colored_mask + ) + + if detections.mask is None or self.force_box: + for detection_idx in range(len(detections)): + x1, y1, x2, y2 = detections.xyxy[detection_idx].astype(int) + colored_mask[y1:y2, x1:x2] = scene[y1:y2, x1:x2] + else: + for mask in detections.mask: + colored_mask[mask] = scene[mask] + + return colored_mask diff --git a/inference/core/workflows/core_steps/visualizations/background_color.py b/inference/core/workflows/core_steps/visualizations/background_color.py new file mode 100644 index 0000000000..83bd2aabc6 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/background_color.py @@ -0,0 +1,115 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.core_steps.visualizations.annotators.background_color import ( + BackgroundColorAnnotator, +) +from inference.core.workflows.core_steps.visualizations.base import ( + OUTPUT_IMAGE_KEY, + VisualizationBlock, + VisualizationManifest, +) +from inference.core.workflows.core_steps.visualizations.utils import str_to_color +from inference.core.workflows.entities.base import WorkflowImageData +from inference.core.workflows.entities.types import ( + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + FLOAT_ZERO_TO_ONE_KIND, + STRING_KIND, + FloatZeroToOne, + StepOutputSelector, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest + +TYPE: str = "BackgroundColorVisualization" +SHORT_DESCRIPTION = ( + "Paints a mask over all areas outside of detected regions in an image." +) +LONG_DESCRIPTION = """ +The `BackgroundColorVisualization` block draws all areas +outside of detected regions in an image with a specified +color. +""" + + +class BackgroundColorManifest(VisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore + description="Color of the background.", + default="BLACK", + examples=["WHITE", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.background_color"], + ) + + opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + description="Transparency of the Mask overlay.", + default=0.5, + examples=[0.5, "$inputs.opacity"], + ) + + +class BackgroundColorVisualizationBlock(VisualizationBlock): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BackgroundColorManifest + + def getAnnotator( + self, + color: str, + opacity: float, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join( + map( + str, + [ + color, + opacity, + ], + ) + ) + + if key not in self.annotatorCache: + background_color = str_to_color(color) + self.annotatorCache[key] = BackgroundColorAnnotator( + color=background_color, + opacity=opacity, + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color: str, + opacity: Optional[float], + ) -> BlockResult: + annotator = self.getAnnotator(color, opacity) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions, + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/usage_tracking/collector.py b/inference/usage_tracking/collector.py index 1a73f1fe5b..df12c3d956 100644 --- a/inference/usage_tracking/collector.py +++ b/inference/usage_tracking/collector.py @@ -298,7 +298,20 @@ def system_info( if ip_address: ip_address_hash_hex = UsageCollector._hash(ip_address) else: - ip_address: str = socket.gethostbyname(socket.gethostname()) + try: + ip_address: str = socket.gethostbyname(socket.gethostname()) + except: + s = None + try: + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.connect(("8.8.8.8", 80)) + ip_address = s.getsockname()[0] + except: + ip_address: str = socket.gethostbyname("localhost") + + if s: + s.close() + ip_address_hash_hex = UsageCollector._hash(ip_address) if not time_ns: