diff --git a/.env-devel b/.env-devel
index 17f909698c2..b13d55b97a9 100644
--- a/.env-devel
+++ b/.env-devel
@@ -126,6 +126,7 @@ DIRECTOR_V2_TRACING={}
# DYNAMIC_SCHEDULER ----
DYNAMIC_SCHEDULER_LOGLEVEL=DEBUG
DYNAMIC_SCHEDULER_PROFILING=1
+DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER=0
DYNAMIC_SCHEDULER_STOP_SERVICE_TIMEOUT=01:00:00
DYNAMIC_SCHEDULER_TRACING={}
DYNAMIC_SCHEDULER_UI_STORAGE_SECRET=adminadmin
diff --git a/packages/aws-library/requirements/_base.txt b/packages/aws-library/requirements/_base.txt
index a3a10ea494a..f4ed9a597c8 100644
--- a/packages/aws-library/requirements/_base.txt
+++ b/packages/aws-library/requirements/_base.txt
@@ -97,7 +97,7 @@ email-validator==2.2.0
# via pydantic
fast-depends==2.4.12
# via faststream
-faststream==0.5.28
+faststream==0.5.31
# via -r requirements/../../../packages/service-library/requirements/_base.in
frozenlist==1.4.1
# via
@@ -218,7 +218,7 @@ protobuf==4.25.5
# opentelemetry-proto
psutil==6.0.0
# via -r requirements/../../../packages/service-library/requirements/_base.in
-pydantic==2.9.2
+pydantic==2.10.3
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -247,7 +247,7 @@ pydantic==2.9.2
# fast-depends
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
@@ -307,7 +307,6 @@ redis==5.0.4
# -r requirements/../../../packages/service-library/requirements/_base.in
referencing==0.29.3
# via
- # -c requirements/../../../packages/service-library/requirements/./constraints.txt
# jsonschema
# jsonschema-specifications
repro-zipfile==0.3.1
@@ -393,6 +392,7 @@ wrapt==1.16.0
# opentelemetry-instrumentation-redis
yarl==1.12.1
# via
+ # -r requirements/../../../packages/service-library/requirements/_base.in
# aio-pika
# aiohttp
# aiormq
diff --git a/packages/aws-library/requirements/_test.txt b/packages/aws-library/requirements/_test.txt
index eef079aef0f..02fdd02fb32 100644
--- a/packages/aws-library/requirements/_test.txt
+++ b/packages/aws-library/requirements/_test.txt
@@ -158,12 +158,12 @@ py-partiql-parser==0.5.6
# via moto
pycparser==2.22
# via cffi
-pydantic==2.9.2
+pydantic==2.10.3
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
# aws-sam-translator
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via
# -c requirements/_base.txt
# pydantic
diff --git a/packages/common-library/requirements/_base.txt b/packages/common-library/requirements/_base.txt
index a0162daa052..6b03deb40ed 100644
--- a/packages/common-library/requirements/_base.txt
+++ b/packages/common-library/requirements/_base.txt
@@ -4,12 +4,12 @@ orjson==3.10.10
# via
# -c requirements/../../../requirements/constraints.txt
# -r requirements/_base.in
-pydantic==2.9.2
+pydantic==2.10.3
# via
# -c requirements/../../../requirements/constraints.txt
# -r requirements/_base.in
# pydantic-extra-types
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via -r requirements/_base.in
diff --git a/packages/common-library/requirements/_test.txt b/packages/common-library/requirements/_test.txt
index abf7c70b23c..fa3c44bbc85 100644
--- a/packages/common-library/requirements/_test.txt
+++ b/packages/common-library/requirements/_test.txt
@@ -20,12 +20,12 @@ pluggy==1.5.0
# via pytest
pprintpp==0.4.0
# via pytest-icdiff
-pydantic==2.9.2
+pydantic==2.10.3
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via
# -c requirements/_base.txt
# pydantic
diff --git a/packages/common-library/src/common_library/basic_types.py b/packages/common-library/src/common_library/basic_types.py
index ab5278cd4f4..c01d76ac328 100644
--- a/packages/common-library/src/common_library/basic_types.py
+++ b/packages/common-library/src/common_library/basic_types.py
@@ -1,4 +1,13 @@
from enum import StrEnum
+from typing import Any
+
+from pydantic_core import PydanticUndefined
+
+# SEE https://github.com/fastapi/fastapi/blob/master/fastapi/_compat.py#L75-L78
+Undefined = PydanticUndefined
+DEFAULT_FACTORY: Any = Undefined
+# Use `UNSET` as default when default_factory
+# SEE https://github.com/ITISFoundation/osparc-simcore/pull/6882
class LogLevel(StrEnum):
diff --git a/packages/dask-task-models-library/requirements/_base.txt b/packages/dask-task-models-library/requirements/_base.txt
index 82af72057cd..7e813c8de2e 100644
--- a/packages/dask-task-models-library/requirements/_base.txt
+++ b/packages/dask-task-models-library/requirements/_base.txt
@@ -76,7 +76,7 @@ partd==1.4.2
# via dask
psutil==6.0.0
# via distributed
-pydantic==2.9.2
+pydantic==2.10.3
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -92,7 +92,7 @@ pydantic==2.9.2
# -r requirements/_base.in
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
diff --git a/packages/models-library/requirements/_base.txt b/packages/models-library/requirements/_base.txt
index b4bda5f971d..3728bf08162 100644
--- a/packages/models-library/requirements/_base.txt
+++ b/packages/models-library/requirements/_base.txt
@@ -22,7 +22,7 @@ orjson==3.10.7
# -c requirements/../../../requirements/constraints.txt
# -r requirements/../../../packages/common-library/requirements/_base.in
# -r requirements/_base.in
-pydantic==2.9.2
+pydantic==2.10.3
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../requirements/constraints.txt
@@ -30,7 +30,7 @@ pydantic==2.9.2
# -r requirements/_base.in
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
diff --git a/packages/models-library/src/models_library/api_schemas_directorv2/clusters.py b/packages/models-library/src/models_library/api_schemas_directorv2/clusters.py
index 41951a1d06d..26b7d10d0be 100644
--- a/packages/models-library/src/models_library/api_schemas_directorv2/clusters.py
+++ b/packages/models-library/src/models_library/api_schemas_directorv2/clusters.py
@@ -1,4 +1,4 @@
-from typing import Any, TypeAlias
+from typing import Annotated, Any, TypeAlias
from pydantic import (
BaseModel,
@@ -55,7 +55,7 @@ class Worker(BaseModel):
class Scheduler(BaseModel):
status: str = Field(..., description="The running status of the scheduler")
- workers: WorkersDict | None = Field(default_factory=dict)
+ workers: Annotated[WorkersDict | None, Field(default_factory=dict)]
@field_validator("workers", mode="before")
@classmethod
diff --git a/packages/models-library/src/models_library/api_schemas_webserver/activity.py b/packages/models-library/src/models_library/api_schemas_webserver/activity.py
index 85761df0303..ce1683bed78 100644
--- a/packages/models-library/src/models_library/api_schemas_webserver/activity.py
+++ b/packages/models-library/src/models_library/api_schemas_webserver/activity.py
@@ -1,6 +1,6 @@
from typing import TypeAlias
-from pydantic import BaseModel, Field, PositiveFloat
+from pydantic import BaseModel, PositiveFloat
from ..projects_nodes_io import NodeID
@@ -18,7 +18,7 @@ class Limits(BaseModel):
class Activity(BaseModel):
stats: Stats
limits: Limits
- queued: bool = Field(None) # TODO: review since it in NOT filled
+ queued: bool | None = None # TODO: review since it in NOT filled
ActivityStatusDict: TypeAlias = dict[NodeID, Activity]
diff --git a/packages/models-library/src/models_library/api_schemas_webserver/projects.py b/packages/models-library/src/models_library/api_schemas_webserver/projects.py
index a918ece3b92..a595245e331 100644
--- a/packages/models-library/src/models_library/api_schemas_webserver/projects.py
+++ b/packages/models-library/src/models_library/api_schemas_webserver/projects.py
@@ -88,9 +88,9 @@ class ProjectGet(OutputSchema):
)
state: ProjectState | None = None
ui: EmptyModel | StudyUI | None = None
- quality: dict[str, Any] = Field(
- default_factory=dict, json_schema_extra={"default": {}}
- )
+ quality: Annotated[
+ dict[str, Any], Field(default_factory=dict, json_schema_extra={"default": {}})
+ ]
dev: dict | None
permalink: ProjectPermalink | None = None
workspace_id: WorkspaceID | None
@@ -122,16 +122,20 @@ class ProjectReplace(InputSchema):
last_change_date: DateTimeStr
workbench: NodesDict
access_rights: dict[GroupIDStr, AccessRights]
- tags: list[int] | None = Field(
- default_factory=list, json_schema_extra={"default": []}
- )
- classifiers: list[ClassifierID] | None = Field(
- default_factory=list, json_schema_extra={"default": []}
- )
+ tags: Annotated[
+ list[int] | None, Field(default_factory=list, json_schema_extra={"default": []})
+ ]
+
+ classifiers: Annotated[
+ list[ClassifierID] | None,
+ Field(default_factory=list, json_schema_extra={"default": []}),
+ ]
+
ui: StudyUI | None = None
- quality: dict[str, Any] = Field(
- default_factory=dict, json_schema_extra={"default": {}}
- )
+
+ quality: Annotated[
+ dict[str, Any], Field(default_factory=dict, json_schema_extra={"default": {}})
+ ]
class ProjectPatch(InputSchema):
@@ -141,7 +145,7 @@ class ProjectPatch(InputSchema):
HttpUrl | None,
BeforeValidator(empty_str_to_none_pre_validator),
PlainSerializer(lambda x: str(x) if x is not None else None),
- ] = Field(default=None)
+ ] = None
access_rights: dict[GroupIDStr, AccessRights] | None = Field(default=None)
classifiers: list[ClassifierID] | None = Field(default=None)
dev: dict | None = Field(default=None)
diff --git a/packages/models-library/src/models_library/api_schemas_webserver/projects_nodes.py b/packages/models-library/src/models_library/api_schemas_webserver/projects_nodes.py
index 81eaa893d60..45e3c87643b 100644
--- a/packages/models-library/src/models_library/api_schemas_webserver/projects_nodes.py
+++ b/packages/models-library/src/models_library/api_schemas_webserver/projects_nodes.py
@@ -1,5 +1,5 @@
# mypy: disable-error-code=truthy-function
-from typing import Any, Literal, TypeAlias
+from typing import Annotated, Any, Literal, TypeAlias
from pydantic import ConfigDict, Field
@@ -29,16 +29,23 @@ class NodePatch(InputSchemaWithoutCamelCase):
service_key: ServiceKey | None = Field(default=None, alias="key")
service_version: ServiceVersion | None = Field(default=None, alias="version")
label: str | None = Field(default=None)
- inputs: InputsDict = Field(default=None)
+ inputs: Annotated[
+ InputsDict, Field(default_factory=dict, json_schema_extra={"default": {}})
+ ]
inputs_required: list[InputID] | None = Field(default=None, alias="inputsRequired")
input_nodes: list[NodeID] | None = Field(default=None, alias="inputNodes")
- progress: float | None = Field(
- default=None, ge=0, le=100
- ) # NOTE: it is used by frontend for File Picker progress
- boot_options: BootOptions | None = Field(default=None, alias="bootOptions")
- outputs: dict[str, Any] | None = Field(
- default=None
- ) # NOTE: it is used by frontend for File Picker
+ progress: Annotated[
+ float | None,
+ Field(
+ # NOTE: it is used by frontend for File Picker progress
+ ge=0,
+ le=100,
+ ),
+ ] = None
+ boot_options: Annotated[BootOptions | None, Field(alias="bootOptions")] = None
+ outputs: dict[
+ str, Any
+ ] | None = None # NOTE: it is used by frontend for File Picker
class NodeCreated(OutputSchema):
diff --git a/packages/models-library/src/models_library/app_diagnostics.py b/packages/models-library/src/models_library/app_diagnostics.py
index a8652e84db2..dee4dc726cc 100644
--- a/packages/models-library/src/models_library/app_diagnostics.py
+++ b/packages/models-library/src/models_library/app_diagnostics.py
@@ -1,28 +1,38 @@
-from typing import Any
+from typing import Annotated, Any
+from common_library.basic_types import DEFAULT_FACTORY
from pydantic import AnyUrl, BaseModel, Field
class AppStatusCheck(BaseModel):
app_name: str = Field(..., description="Application name")
version: str = Field(..., description="Application's version")
- services: dict[str, Any] = Field(
- default_factory=dict,
- description="Other backend services connected from this service",
- json_schema_extra={"default": {}},
- )
+ services: Annotated[
+ dict[str, Any],
+ Field(
+ default_factory=dict,
+ description="Other backend services connected from this service",
+ json_schema_extra={"default": {}},
+ ),
+ ] = DEFAULT_FACTORY
- sessions: dict[str, Any] | None = Field(
- default_factory=dict,
- description="Client sessions info. If single session per app, then is denoted as main",
- json_schema_extra={"default": {}},
- )
+ sessions: Annotated[
+ dict[str, Any] | None,
+ Field(
+ default_factory=dict,
+ description="Client sessions info. If single session per app, then is denoted as main",
+ json_schema_extra={"default": {}},
+ ),
+ ] = DEFAULT_FACTORY
url: AnyUrl | None = Field(
default=None,
description="Link to current resource",
)
- diagnostics_url: AnyUrl | None = Field(
- default=None,
- description="Link to diagnostics report sub-resource. This MIGHT take some time to compute",
- )
+
+ diagnostics_url: Annotated[
+ AnyUrl | None,
+ Field(
+ description="Link to diagnostics report sub-resource. This MIGHT take some time to compute",
+ ),
+ ] = None
diff --git a/packages/models-library/src/models_library/callbacks_mapping.py b/packages/models-library/src/models_library/callbacks_mapping.py
index 498766ed750..475ff3c823a 100644
--- a/packages/models-library/src/models_library/callbacks_mapping.py
+++ b/packages/models-library/src/models_library/callbacks_mapping.py
@@ -1,5 +1,5 @@
from collections.abc import Sequence
-from typing import Final
+from typing import Annotated, Final
from pydantic import BaseModel, ConfigDict, Field, NonNegativeFloat, field_validator
@@ -31,14 +31,17 @@ class CallbacksMapping(BaseModel):
None,
description="command to recover prometheus metrics from a specific user service",
)
- before_shutdown: list[UserServiceCommand] = Field(
- default_factory=list,
- description=(
- "commands to run before shutting down the user services"
- "commands get executed first to last, multiple commands for the same"
- "user services are allowed"
+ before_shutdown: Annotated[
+ list[UserServiceCommand],
+ Field(
+ default_factory=list,
+ description=(
+ "commands to run before shutting down the user services"
+ "commands get executed first to last, multiple commands for the same"
+ "user services are allowed"
+ ),
),
- )
+ ]
inactivity: UserServiceCommand | None = Field(
None,
description=(
diff --git a/packages/models-library/src/models_library/generated_models/docker_rest_api.py b/packages/models-library/src/models_library/generated_models/docker_rest_api.py
index 83f88080da5..961628fffc5 100644
--- a/packages/models-library/src/models_library/generated_models/docker_rest_api.py
+++ b/packages/models-library/src/models_library/generated_models/docker_rest_api.py
@@ -1,12 +1,12 @@
# generated by datamodel-codegen:
# filename: https://docs.docker.com/reference/api/engine/version/v1.41.yaml
-# timestamp: 2024-11-08T08:47:46+00:00
+# timestamp: 2024-12-03T18:55:58+00:00
from __future__ import annotations
from datetime import datetime
from enum import Enum
-from typing import Any
+from typing import Annotated, Any
from pydantic import BaseModel, ConfigDict, Field, RootModel
@@ -32,18 +32,20 @@ class Port(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- ip: str | None = Field(
- default=None,
- alias="IP",
- description="Host IP address that the container's port is mapped to",
- )
- private_port: int = Field(
- ..., alias="PrivatePort", description="Port on the container"
- )
- public_port: int | None = Field(
- default=None, alias="PublicPort", description="Port exposed on the host"
- )
- type: Type = Field(..., alias="Type")
+ ip: Annotated[
+ str | None,
+ Field(
+ alias="IP",
+ description="Host IP address that the container's port is mapped to",
+ ),
+ ] = None
+ private_port: Annotated[
+ int, Field(alias="PrivatePort", description="Port on the container")
+ ]
+ public_port: Annotated[
+ int | None, Field(alias="PublicPort", description="Port exposed on the host")
+ ] = None
+ type: Annotated[Type, Field(alias="Type")]
class Type1(str, Enum):
@@ -73,54 +75,70 @@ class MountPoint(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- type: Type1 | None = Field(
- default=None,
- alias="Type",
- description="The mount type:\n\n- `bind` a mount of a file or directory from the host into the container.\n- `volume` a docker volume with the given `Name`.\n- `tmpfs` a `tmpfs`.\n- `npipe` a named pipe from the host into the container.\n",
- examples=["volume"],
- )
- name: str | None = Field(
- default=None,
- alias="Name",
- description="Name is the name reference to the underlying data defined by `Source`\ne.g., the volume name.\n",
- examples=["myvolume"],
- )
- source: str | None = Field(
- default=None,
- alias="Source",
- description="Source location of the mount.\n\nFor volumes, this contains the storage location of the volume (within\n`/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains\nthe source (host) part of the bind-mount. For `tmpfs` mount points, this\nfield is empty.\n",
- examples=["/var/lib/docker/volumes/myvolume/_data"],
- )
- destination: str | None = Field(
- default=None,
- alias="Destination",
- description="Destination is the path relative to the container root (`/`) where\nthe `Source` is mounted inside the container.\n",
- examples=["/usr/share/nginx/html/"],
- )
- driver: str | None = Field(
- default=None,
- alias="Driver",
- description="Driver is the volume driver used to create the volume (if it is a volume).\n",
- examples=["local"],
- )
- mode: str | None = Field(
- default=None,
- alias="Mode",
- description='Mode is a comma separated list of options supplied by the user when\ncreating the bind/volume mount.\n\nThe default is platform-specific (`"z"` on Linux, empty on Windows).\n',
- examples=["z"],
- )
- rw: bool | None = Field(
- default=None,
- alias="RW",
- description="Whether the mount is mounted writable (read-write).\n",
- examples=[True],
- )
- propagation: str | None = Field(
- default=None,
- alias="Propagation",
- description="Propagation describes how mounts are propagated from the host into the\nmount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt)\nfor details. This field is not used on Windows.\n",
- examples=[""],
- )
+ type: Annotated[
+ Type1 | None,
+ Field(
+ alias="Type",
+ description="The mount type:\n\n- `bind` a mount of a file or directory from the host into the container.\n- `volume` a docker volume with the given `Name`.\n- `tmpfs` a `tmpfs`.\n- `npipe` a named pipe from the host into the container.\n",
+ examples=["volume"],
+ ),
+ ] = None
+ name: Annotated[
+ str | None,
+ Field(
+ alias="Name",
+ description="Name is the name reference to the underlying data defined by `Source`\ne.g., the volume name.\n",
+ examples=["myvolume"],
+ ),
+ ] = None
+ source: Annotated[
+ str | None,
+ Field(
+ alias="Source",
+ description="Source location of the mount.\n\nFor volumes, this contains the storage location of the volume (within\n`/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains\nthe source (host) part of the bind-mount. For `tmpfs` mount points, this\nfield is empty.\n",
+ examples=["/var/lib/docker/volumes/myvolume/_data"],
+ ),
+ ] = None
+ destination: Annotated[
+ str | None,
+ Field(
+ alias="Destination",
+ description="Destination is the path relative to the container root (`/`) where\nthe `Source` is mounted inside the container.\n",
+ examples=["/usr/share/nginx/html/"],
+ ),
+ ] = None
+ driver: Annotated[
+ str | None,
+ Field(
+ alias="Driver",
+ description="Driver is the volume driver used to create the volume (if it is a volume).\n",
+ examples=["local"],
+ ),
+ ] = None
+ mode: Annotated[
+ str | None,
+ Field(
+ alias="Mode",
+ description='Mode is a comma separated list of options supplied by the user when\ncreating the bind/volume mount.\n\nThe default is platform-specific (`"z"` on Linux, empty on Windows).\n',
+ examples=["z"],
+ ),
+ ] = None
+ rw: Annotated[
+ bool | None,
+ Field(
+ alias="RW",
+ description="Whether the mount is mounted writable (read-write).\n",
+ examples=[True],
+ ),
+ ] = None
+ propagation: Annotated[
+ str | None,
+ Field(
+ alias="Propagation",
+ description="Propagation describes how mounts are propagated from the host into the\nmount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt)\nfor details. This field is not used on Windows.\n",
+ examples=[""],
+ ),
+ ] = None
class DeviceMapping(BaseModel):
@@ -131,9 +149,9 @@ class DeviceMapping(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- path_on_host: str | None = Field(default=None, alias="PathOnHost")
- path_in_container: str | None = Field(default=None, alias="PathInContainer")
- cgroup_permissions: str | None = Field(default=None, alias="CgroupPermissions")
+ path_on_host: Annotated[str | None, Field(alias="PathOnHost")] = None
+ path_in_container: Annotated[str | None, Field(alias="PathInContainer")] = None
+ cgroup_permissions: Annotated[str | None, Field(alias="CgroupPermissions")] = None
class DeviceRequest(BaseModel):
@@ -144,32 +162,38 @@ class DeviceRequest(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- driver: str | None = Field(default=None, alias="Driver", examples=["nvidia"])
- count: int | None = Field(default=None, alias="Count", examples=[-1])
- device_i_ds: list[str] | None = Field(
- default=None,
- alias="DeviceIDs",
- examples=[["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"]],
- )
- capabilities: list[list[str]] | None = Field(
- default=None,
- alias="Capabilities",
- description="A list of capabilities; an OR list of AND lists of capabilities.\n",
- examples=[[["gpu", "nvidia", "compute"]]],
- )
- options: dict[str, str] | None = Field(
- default=None,
- alias="Options",
- description="Driver-specific options, specified as a key/value pairs. These options\nare passed directly to the driver.\n",
- )
+ driver: Annotated[str | None, Field(alias="Driver", examples=["nvidia"])] = None
+ count: Annotated[int | None, Field(alias="Count", examples=[-1])] = None
+ device_i_ds: Annotated[
+ list[str] | None,
+ Field(
+ alias="DeviceIDs",
+ examples=[["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"]],
+ ),
+ ] = None
+ capabilities: Annotated[
+ list[list[str]] | None,
+ Field(
+ alias="Capabilities",
+ description="A list of capabilities; an OR list of AND lists of capabilities.\n",
+ examples=[[["gpu", "nvidia", "compute"]]],
+ ),
+ ] = None
+ options: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="Options",
+ description="Driver-specific options, specified as a key/value pairs. These options\nare passed directly to the driver.\n",
+ ),
+ ] = None
class ThrottleDevice(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- path: str | None = Field(default=None, alias="Path", description="Device path")
- rate: int | None = Field(default=None, alias="Rate", description="Rate", ge=0)
+ path: Annotated[str | None, Field(alias="Path", description="Device path")] = None
+ rate: Annotated[int | None, Field(alias="Rate", description="Rate", ge=0)] = None
class Type2(str, Enum):
@@ -210,14 +234,17 @@ class BindOptions(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- propagation: Propagation | None = Field(
- default=None,
- alias="Propagation",
- description="A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`.",
- )
- non_recursive: bool | None = Field(
- default=False, alias="NonRecursive", description="Disable recursive bind mount."
- )
+ propagation: Annotated[
+ Propagation | None,
+ Field(
+ alias="Propagation",
+ description="A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`.",
+ ),
+ ] = None
+ non_recursive: Annotated[
+ bool | None,
+ Field(alias="NonRecursive", description="Disable recursive bind mount."),
+ ] = False
class DriverConfig(BaseModel):
@@ -228,16 +255,16 @@ class DriverConfig(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(
- default=None,
- alias="Name",
- description="Name of the driver to use to create the volume.",
- )
- options: dict[str, str] | None = Field(
- default=None,
- alias="Options",
- description="key/value map of driver specific options.",
- )
+ name: Annotated[
+ str | None,
+ Field(
+ alias="Name", description="Name of the driver to use to create the volume."
+ ),
+ ] = None
+ options: Annotated[
+ dict[str, str] | None,
+ Field(alias="Options", description="key/value map of driver specific options."),
+ ] = None
class VolumeOptions(BaseModel):
@@ -248,17 +275,18 @@ class VolumeOptions(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- no_copy: bool | None = Field(
- default=False,
- alias="NoCopy",
- description="Populate volume with data from the target.",
- )
- labels: dict[str, str] | None = Field(
- default=None, alias="Labels", description="User-defined key/value metadata."
- )
- driver_config: DriverConfig | None = Field(
- default=None, alias="DriverConfig", description="Map of driver specific options"
- )
+ no_copy: Annotated[
+ bool | None,
+ Field(alias="NoCopy", description="Populate volume with data from the target."),
+ ] = False
+ labels: Annotated[
+ dict[str, str] | None,
+ Field(alias="Labels", description="User-defined key/value metadata."),
+ ] = None
+ driver_config: Annotated[
+ DriverConfig | None,
+ Field(alias="DriverConfig", description="Map of driver specific options"),
+ ] = None
class TmpfsOptions(BaseModel):
@@ -269,60 +297,72 @@ class TmpfsOptions(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- size_bytes: int | None = Field(
- default=None,
- alias="SizeBytes",
- description="The size for the tmpfs mount in bytes.",
- )
- mode: int | None = Field(
- default=None,
- alias="Mode",
- description="The permission mode for the tmpfs mount in an integer.",
- )
+ size_bytes: Annotated[
+ int | None,
+ Field(alias="SizeBytes", description="The size for the tmpfs mount in bytes."),
+ ] = None
+ mode: Annotated[
+ int | None,
+ Field(
+ alias="Mode",
+ description="The permission mode for the tmpfs mount in an integer.",
+ ),
+ ] = None
class Mount(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- target: str | None = Field(
- default=None, alias="Target", description="Container path."
- )
- source: str | None = Field(
- default=None,
- alias="Source",
- description="Mount source (e.g. a volume name, a host path).",
- )
- type: Type2 | None = Field(
- default=None,
- alias="Type",
- description="The mount type. Available types:\n\n- `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container.\n- `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.\n- `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.\n- `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container.\n",
- )
- read_only: bool | None = Field(
- default=None,
- alias="ReadOnly",
- description="Whether the mount should be read-only.",
- )
- consistency: str | None = Field(
- default=None,
- alias="Consistency",
- description="The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`.",
- )
- bind_options: BindOptions | None = Field(
- default=None,
- alias="BindOptions",
- description="Optional configuration for the `bind` type.",
- )
- volume_options: VolumeOptions | None = Field(
- default=None,
- alias="VolumeOptions",
- description="Optional configuration for the `volume` type.",
- )
- tmpfs_options: TmpfsOptions | None = Field(
- default=None,
- alias="TmpfsOptions",
- description="Optional configuration for the `tmpfs` type.",
- )
+ target: Annotated[
+ str | None, Field(alias="Target", description="Container path.")
+ ] = None
+ source: Annotated[
+ str | None,
+ Field(
+ alias="Source",
+ description="Mount source (e.g. a volume name, a host path).",
+ ),
+ ] = None
+ type: Annotated[
+ Type2 | None,
+ Field(
+ alias="Type",
+ description="The mount type. Available types:\n\n- `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container.\n- `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.\n- `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.\n- `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container.\n",
+ ),
+ ] = None
+ read_only: Annotated[
+ bool | None,
+ Field(alias="ReadOnly", description="Whether the mount should be read-only."),
+ ] = None
+ consistency: Annotated[
+ str | None,
+ Field(
+ alias="Consistency",
+ description="The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`.",
+ ),
+ ] = None
+ bind_options: Annotated[
+ BindOptions | None,
+ Field(
+ alias="BindOptions",
+ description="Optional configuration for the `bind` type.",
+ ),
+ ] = None
+ volume_options: Annotated[
+ VolumeOptions | None,
+ Field(
+ alias="VolumeOptions",
+ description="Optional configuration for the `volume` type.",
+ ),
+ ] = None
+ tmpfs_options: Annotated[
+ TmpfsOptions | None,
+ Field(
+ alias="TmpfsOptions",
+ description="Optional configuration for the `tmpfs` type.",
+ ),
+ ] = None
class Name(str, Enum):
@@ -355,33 +395,39 @@ class RestartPolicy(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: Name | None = Field(
- default=None,
- alias="Name",
- description="- Empty string means not to restart\n- `no` Do not automatically restart\n- `always` Always restart\n- `unless-stopped` Restart always except when the user has manually stopped the container\n- `on-failure` Restart only when the container exit code is non-zero\n",
- )
- maximum_retry_count: int | None = Field(
- default=None,
- alias="MaximumRetryCount",
- description="If `on-failure` is used, the number of times to retry before giving up.\n",
- )
+ name: Annotated[
+ Name | None,
+ Field(
+ alias="Name",
+ description="- Empty string means not to restart\n- `no` Do not automatically restart\n- `always` Always restart\n- `unless-stopped` Restart always except when the user has manually stopped the container\n- `on-failure` Restart only when the container exit code is non-zero\n",
+ ),
+ ] = None
+ maximum_retry_count: Annotated[
+ int | None,
+ Field(
+ alias="MaximumRetryCount",
+ description="If `on-failure` is used, the number of times to retry before giving up.\n",
+ ),
+ ] = None
class BlkioWeightDeviceItem(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- path: str | None = Field(default=None, alias="Path")
- weight: int | None = Field(default=None, alias="Weight", ge=0)
+ path: Annotated[str | None, Field(alias="Path")] = None
+ weight: Annotated[int | None, Field(alias="Weight", ge=0)] = None
class Ulimit(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(default=None, alias="Name", description="Name of ulimit")
- soft: int | None = Field(default=None, alias="Soft", description="Soft limit")
- hard: int | None = Field(default=None, alias="Hard", description="Hard limit")
+ name: Annotated[
+ str | None, Field(alias="Name", description="Name of ulimit")
+ ] = None
+ soft: Annotated[int | None, Field(alias="Soft", description="Soft limit")] = None
+ hard: Annotated[int | None, Field(alias="Hard", description="Hard limit")] = None
class Resources(BaseModel):
@@ -392,170 +438,225 @@ class Resources(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- cpu_shares: int | None = Field(
- default=None,
- alias="CpuShares",
- description="An integer value representing this container's relative CPU weight\nversus other containers.\n",
- )
- memory: int | None = Field(
- default=0, alias="Memory", description="Memory limit in bytes."
- )
- cgroup_parent: str | None = Field(
- default=None,
- alias="CgroupParent",
- description="Path to `cgroups` under which the container's `cgroup` is created. If\nthe path is not absolute, the path is considered to be relative to the\n`cgroups` path of the init process. Cgroups are created if they do not\nalready exist.\n",
- )
- blkio_weight: int | None = Field(
- default=None,
- alias="BlkioWeight",
- description="Block IO weight (relative weight).",
- ge=0,
- le=1000,
- )
- blkio_weight_device: list[BlkioWeightDeviceItem] | None = Field(
- default=None,
- alias="BlkioWeightDevice",
- description='Block IO weight (relative device weight) in the form:\n\n```\n[{"Path": "device_path", "Weight": weight}]\n```\n',
- )
- blkio_device_read_bps: list[ThrottleDevice] | None = Field(
- default=None,
- alias="BlkioDeviceReadBps",
- description='Limit read rate (bytes per second) from a device, in the form:\n\n```\n[{"Path": "device_path", "Rate": rate}]\n```\n',
- )
- blkio_device_write_bps: list[ThrottleDevice] | None = Field(
- default=None,
- alias="BlkioDeviceWriteBps",
- description='Limit write rate (bytes per second) to a device, in the form:\n\n```\n[{"Path": "device_path", "Rate": rate}]\n```\n',
- )
- blkio_device_read_i_ops: list[ThrottleDevice] | None = Field(
- default=None,
- alias="BlkioDeviceReadIOps",
- description='Limit read rate (IO per second) from a device, in the form:\n\n```\n[{"Path": "device_path", "Rate": rate}]\n```\n',
- )
- blkio_device_write_i_ops: list[ThrottleDevice] | None = Field(
- default=None,
- alias="BlkioDeviceWriteIOps",
- description='Limit write rate (IO per second) to a device, in the form:\n\n```\n[{"Path": "device_path", "Rate": rate}]\n```\n',
- )
- cpu_period: int | None = Field(
- default=None,
- alias="CpuPeriod",
- description="The length of a CPU period in microseconds.",
- )
- cpu_quota: int | None = Field(
- default=None,
- alias="CpuQuota",
- description="Microseconds of CPU time that the container can get in a CPU period.\n",
- )
- cpu_realtime_period: int | None = Field(
- default=None,
- alias="CpuRealtimePeriod",
- description="The length of a CPU real-time period in microseconds. Set to 0 to\nallocate no time allocated to real-time tasks.\n",
- )
- cpu_realtime_runtime: int | None = Field(
- default=None,
- alias="CpuRealtimeRuntime",
- description="The length of a CPU real-time runtime in microseconds. Set to 0 to\nallocate no time allocated to real-time tasks.\n",
- )
- cpuset_cpus: str | None = Field(
- default=None,
- alias="CpusetCpus",
- description="CPUs in which to allow execution (e.g., `0-3`, `0,1`).\n",
- examples=["0-3"],
- )
- cpuset_mems: str | None = Field(
- default=None,
- alias="CpusetMems",
- description="Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only\neffective on NUMA systems.\n",
- )
- devices: list[DeviceMapping] | None = Field(
- default=None,
- alias="Devices",
- description="A list of devices to add to the container.",
- )
- device_cgroup_rules: list[str] | None = Field(
- default=None,
- alias="DeviceCgroupRules",
- description="a list of cgroup rules to apply to the container",
- )
- device_requests: list[DeviceRequest] | None = Field(
- default=None,
- alias="DeviceRequests",
- description="A list of requests for devices to be sent to device drivers.\n",
- )
- kernel_memory: int | None = Field(
- default=None,
- alias="KernelMemory",
- description="Kernel memory limit in bytes.\n\n
\n\n> **Deprecated**: This field is deprecated as the kernel 5.4 deprecated\n> `kmem.limit_in_bytes`.\n",
- examples=[209715200],
- )
- kernel_memory_tcp: int | None = Field(
- default=None,
- alias="KernelMemoryTCP",
- description="Hard limit for kernel TCP buffer memory (in bytes).",
- )
- memory_reservation: int | None = Field(
- default=None,
- alias="MemoryReservation",
- description="Memory soft limit in bytes.",
- )
- memory_swap: int | None = Field(
- default=None,
- alias="MemorySwap",
- description="Total memory limit (memory + swap). Set as `-1` to enable unlimited\nswap.\n",
- )
- memory_swappiness: int | None = Field(
- default=None,
- alias="MemorySwappiness",
- description="Tune a container's memory swappiness behavior. Accepts an integer\nbetween 0 and 100.\n",
- ge=0,
- le=100,
- )
- nano_cpus: int | None = Field(
- default=None,
- alias="NanoCpus",
- description="CPU quota in units of 10-9 CPUs.",
- )
- oom_kill_disable: bool | None = Field(
- default=None,
- alias="OomKillDisable",
- description="Disable OOM Killer for the container.",
- )
- init: bool | None = Field(
- default=None,
- alias="Init",
- description="Run an init inside the container that forwards signals and reaps\nprocesses. This field is omitted if empty, and the default (as\nconfigured on the daemon) is used.\n",
- )
- pids_limit: int | None = Field(
- default=None,
- alias="PidsLimit",
- description="Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null`\nto not change.\n",
- )
- ulimits: list[Ulimit] | None = Field(
- default=None,
- alias="Ulimits",
- description='A list of resource limits to set in the container. For example:\n\n```\n{"Name": "nofile", "Soft": 1024, "Hard": 2048}\n```\n',
- )
- cpu_count: int | None = Field(
- default=None,
- alias="CpuCount",
- description="The number of usable CPUs (Windows only).\n\nOn Windows Server containers, the processor resource controls are\nmutually exclusive. The order of precedence is `CPUCount` first, then\n`CPUShares`, and `CPUPercent` last.\n",
- )
- cpu_percent: int | None = Field(
- default=None,
- alias="CpuPercent",
- description="The usable percentage of the available CPUs (Windows only).\n\nOn Windows Server containers, the processor resource controls are\nmutually exclusive. The order of precedence is `CPUCount` first, then\n`CPUShares`, and `CPUPercent` last.\n",
- )
- io_maximum_i_ops: int | None = Field(
- default=None,
- alias="IOMaximumIOps",
- description="Maximum IOps for the container system drive (Windows only)",
- )
- io_maximum_bandwidth: int | None = Field(
- default=None,
- alias="IOMaximumBandwidth",
- description="Maximum IO in bytes per second for the container system drive\n(Windows only).\n",
- )
+ cpu_shares: Annotated[
+ int | None,
+ Field(
+ alias="CpuShares",
+ description="An integer value representing this container's relative CPU weight\nversus other containers.\n",
+ ),
+ ] = None
+ memory: Annotated[
+ int | None, Field(alias="Memory", description="Memory limit in bytes.")
+ ] = 0
+ cgroup_parent: Annotated[
+ str | None,
+ Field(
+ alias="CgroupParent",
+ description="Path to `cgroups` under which the container's `cgroup` is created. If\nthe path is not absolute, the path is considered to be relative to the\n`cgroups` path of the init process. Cgroups are created if they do not\nalready exist.\n",
+ ),
+ ] = None
+ blkio_weight: Annotated[
+ int | None,
+ Field(
+ alias="BlkioWeight",
+ description="Block IO weight (relative weight).",
+ ge=0,
+ le=1000,
+ ),
+ ] = None
+ blkio_weight_device: Annotated[
+ list[BlkioWeightDeviceItem] | None,
+ Field(
+ alias="BlkioWeightDevice",
+ description='Block IO weight (relative device weight) in the form:\n\n```\n[{"Path": "device_path", "Weight": weight}]\n```\n',
+ ),
+ ] = None
+ blkio_device_read_bps: Annotated[
+ list[ThrottleDevice] | None,
+ Field(
+ alias="BlkioDeviceReadBps",
+ description='Limit read rate (bytes per second) from a device, in the form:\n\n```\n[{"Path": "device_path", "Rate": rate}]\n```\n',
+ ),
+ ] = None
+ blkio_device_write_bps: Annotated[
+ list[ThrottleDevice] | None,
+ Field(
+ alias="BlkioDeviceWriteBps",
+ description='Limit write rate (bytes per second) to a device, in the form:\n\n```\n[{"Path": "device_path", "Rate": rate}]\n```\n',
+ ),
+ ] = None
+ blkio_device_read_i_ops: Annotated[
+ list[ThrottleDevice] | None,
+ Field(
+ alias="BlkioDeviceReadIOps",
+ description='Limit read rate (IO per second) from a device, in the form:\n\n```\n[{"Path": "device_path", "Rate": rate}]\n```\n',
+ ),
+ ] = None
+ blkio_device_write_i_ops: Annotated[
+ list[ThrottleDevice] | None,
+ Field(
+ alias="BlkioDeviceWriteIOps",
+ description='Limit write rate (IO per second) to a device, in the form:\n\n```\n[{"Path": "device_path", "Rate": rate}]\n```\n',
+ ),
+ ] = None
+ cpu_period: Annotated[
+ int | None,
+ Field(
+ alias="CpuPeriod", description="The length of a CPU period in microseconds."
+ ),
+ ] = None
+ cpu_quota: Annotated[
+ int | None,
+ Field(
+ alias="CpuQuota",
+ description="Microseconds of CPU time that the container can get in a CPU period.\n",
+ ),
+ ] = None
+ cpu_realtime_period: Annotated[
+ int | None,
+ Field(
+ alias="CpuRealtimePeriod",
+ description="The length of a CPU real-time period in microseconds. Set to 0 to\nallocate no time allocated to real-time tasks.\n",
+ ),
+ ] = None
+ cpu_realtime_runtime: Annotated[
+ int | None,
+ Field(
+ alias="CpuRealtimeRuntime",
+ description="The length of a CPU real-time runtime in microseconds. Set to 0 to\nallocate no time allocated to real-time tasks.\n",
+ ),
+ ] = None
+ cpuset_cpus: Annotated[
+ str | None,
+ Field(
+ alias="CpusetCpus",
+ description="CPUs in which to allow execution (e.g., `0-3`, `0,1`).\n",
+ examples=["0-3"],
+ ),
+ ] = None
+ cpuset_mems: Annotated[
+ str | None,
+ Field(
+ alias="CpusetMems",
+ description="Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only\neffective on NUMA systems.\n",
+ ),
+ ] = None
+ devices: Annotated[
+ list[DeviceMapping] | None,
+ Field(
+ alias="Devices", description="A list of devices to add to the container."
+ ),
+ ] = None
+ device_cgroup_rules: Annotated[
+ list[str] | None,
+ Field(
+ alias="DeviceCgroupRules",
+ description="a list of cgroup rules to apply to the container",
+ ),
+ ] = None
+ device_requests: Annotated[
+ list[DeviceRequest] | None,
+ Field(
+ alias="DeviceRequests",
+ description="A list of requests for devices to be sent to device drivers.\n",
+ ),
+ ] = None
+ kernel_memory: Annotated[
+ int | None,
+ Field(
+ alias="KernelMemory",
+ description="Kernel memory limit in bytes.\n\n
\n\n> **Deprecated**: This field is deprecated as the kernel 5.4 deprecated\n> `kmem.limit_in_bytes`.\n",
+ examples=[209715200],
+ ),
+ ] = None
+ kernel_memory_tcp: Annotated[
+ int | None,
+ Field(
+ alias="KernelMemoryTCP",
+ description="Hard limit for kernel TCP buffer memory (in bytes).",
+ ),
+ ] = None
+ memory_reservation: Annotated[
+ int | None,
+ Field(alias="MemoryReservation", description="Memory soft limit in bytes."),
+ ] = None
+ memory_swap: Annotated[
+ int | None,
+ Field(
+ alias="MemorySwap",
+ description="Total memory limit (memory + swap). Set as `-1` to enable unlimited\nswap.\n",
+ ),
+ ] = None
+ memory_swappiness: Annotated[
+ int | None,
+ Field(
+ alias="MemorySwappiness",
+ description="Tune a container's memory swappiness behavior. Accepts an integer\nbetween 0 and 100.\n",
+ ge=0,
+ le=100,
+ ),
+ ] = None
+ nano_cpus: Annotated[
+ int | None,
+ Field(
+ alias="NanoCpus", description="CPU quota in units of 10-9 CPUs."
+ ),
+ ] = None
+ oom_kill_disable: Annotated[
+ bool | None,
+ Field(
+ alias="OomKillDisable", description="Disable OOM Killer for the container."
+ ),
+ ] = None
+ init: Annotated[
+ bool | None,
+ Field(
+ alias="Init",
+ description="Run an init inside the container that forwards signals and reaps\nprocesses. This field is omitted if empty, and the default (as\nconfigured on the daemon) is used.\n",
+ ),
+ ] = None
+ pids_limit: Annotated[
+ int | None,
+ Field(
+ alias="PidsLimit",
+ description="Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null`\nto not change.\n",
+ ),
+ ] = None
+ ulimits: Annotated[
+ list[Ulimit] | None,
+ Field(
+ alias="Ulimits",
+ description='A list of resource limits to set in the container. For example:\n\n```\n{"Name": "nofile", "Soft": 1024, "Hard": 2048}\n```\n',
+ ),
+ ] = None
+ cpu_count: Annotated[
+ int | None,
+ Field(
+ alias="CpuCount",
+ description="The number of usable CPUs (Windows only).\n\nOn Windows Server containers, the processor resource controls are\nmutually exclusive. The order of precedence is `CPUCount` first, then\n`CPUShares`, and `CPUPercent` last.\n",
+ ),
+ ] = None
+ cpu_percent: Annotated[
+ int | None,
+ Field(
+ alias="CpuPercent",
+ description="The usable percentage of the available CPUs (Windows only).\n\nOn Windows Server containers, the processor resource controls are\nmutually exclusive. The order of precedence is `CPUCount` first, then\n`CPUShares`, and `CPUPercent` last.\n",
+ ),
+ ] = None
+ io_maximum_i_ops: Annotated[
+ int | None,
+ Field(
+ alias="IOMaximumIOps",
+ description="Maximum IOps for the container system drive (Windows only)",
+ ),
+ ] = None
+ io_maximum_bandwidth: Annotated[
+ int | None,
+ Field(
+ alias="IOMaximumBandwidth",
+ description="Maximum IO in bytes per second for the container system drive\n(Windows only).\n",
+ ),
+ ] = None
class Limit(BaseModel):
@@ -567,46 +668,48 @@ class Limit(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- nano_cp_us: int | None = Field(
- default=None, alias="NanoCPUs", examples=[4000000000]
- )
- memory_bytes: int | None = Field(
- default=None, alias="MemoryBytes", examples=[8272408576]
- )
- pids: int | None = Field(
- default=0,
- alias="Pids",
- description="Limits the maximum number of PIDs in the container. Set `0` for unlimited.\n",
- examples=[100],
- )
+ nano_cp_us: Annotated[
+ int | None, Field(alias="NanoCPUs", examples=[4000000000])
+ ] = None
+ memory_bytes: Annotated[
+ int | None, Field(alias="MemoryBytes", examples=[8272408576])
+ ] = None
+ pids: Annotated[
+ int | None,
+ Field(
+ alias="Pids",
+ description="Limits the maximum number of PIDs in the container. Set `0` for unlimited.\n",
+ examples=[100],
+ ),
+ ] = 0
class NamedResourceSpec(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- kind: str | None = Field(default=None, alias="Kind")
- value: str | None = Field(default=None, alias="Value")
+ kind: Annotated[str | None, Field(alias="Kind")] = None
+ value: Annotated[str | None, Field(alias="Value")] = None
class DiscreteResourceSpec(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- kind: str | None = Field(default=None, alias="Kind")
- value: int | None = Field(default=None, alias="Value")
+ kind: Annotated[str | None, Field(alias="Kind")] = None
+ value: Annotated[int | None, Field(alias="Value")] = None
class GenericResource(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- named_resource_spec: NamedResourceSpec | None = Field(
- default=None, alias="NamedResourceSpec"
- )
- discrete_resource_spec: DiscreteResourceSpec | None = Field(
- default=None, alias="DiscreteResourceSpec"
- )
+ named_resource_spec: Annotated[
+ NamedResourceSpec | None, Field(alias="NamedResourceSpec")
+ ] = None
+ discrete_resource_spec: Annotated[
+ DiscreteResourceSpec | None, Field(alias="DiscreteResourceSpec")
+ ] = None
class GenericResources(RootModel[list[GenericResource]]):
@@ -619,17 +722,19 @@ class GenericResources(RootModel[list[GenericResource]]):
model_config = ConfigDict(
populate_by_name=True,
)
- root: list[GenericResource] = Field(
- ...,
- description="User-defined resources can be either Integer resources (e.g, `SSD=3`) or\nString resources (e.g, `GPU=UUID1`).\n",
- examples=[
- [
- {"DiscreteResourceSpec": {"Kind": "SSD", "Value": 3}},
- {"NamedResourceSpec": {"Kind": "GPU", "Value": "UUID1"}},
- {"NamedResourceSpec": {"Kind": "GPU", "Value": "UUID2"}},
- ]
- ],
- )
+ root: Annotated[
+ list[GenericResource],
+ Field(
+ description="User-defined resources can be either Integer resources (e.g, `SSD=3`) or\nString resources (e.g, `GPU=UUID1`).\n",
+ examples=[
+ [
+ {"DiscreteResourceSpec": {"Kind": "SSD", "Value": 3}},
+ {"NamedResourceSpec": {"Kind": "GPU", "Value": "UUID1"}},
+ {"NamedResourceSpec": {"Kind": "GPU", "Value": "UUID2"}},
+ ]
+ ],
+ ),
+ ]
class HealthConfig(BaseModel):
@@ -640,31 +745,41 @@ class HealthConfig(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- test: list[str] | None = Field(
- default=None,
- alias="Test",
- description='The test to perform. Possible values are:\n\n- `[]` inherit healthcheck from image or parent image\n- `["NONE"]` disable healthcheck\n- `["CMD", args...]` exec arguments directly\n- `["CMD-SHELL", command]` run command with system\'s default shell\n',
- )
- interval: int | None = Field(
- default=None,
- alias="Interval",
- description="The time to wait between checks in nanoseconds. It should be 0 or at\nleast 1000000 (1 ms). 0 means inherit.\n",
- )
- timeout: int | None = Field(
- default=None,
- alias="Timeout",
- description="The time to wait before considering the check to have hung. It should\nbe 0 or at least 1000000 (1 ms). 0 means inherit.\n",
- )
- retries: int | None = Field(
- default=None,
- alias="Retries",
- description="The number of consecutive failures needed to consider a container as\nunhealthy. 0 means inherit.\n",
- )
- start_period: int | None = Field(
- default=None,
- alias="StartPeriod",
- description="Start period for the container to initialize before starting\nhealth-retries countdown in nanoseconds. It should be 0 or at least\n1000000 (1 ms). 0 means inherit.\n",
- )
+ test: Annotated[
+ list[str] | None,
+ Field(
+ alias="Test",
+ description='The test to perform. Possible values are:\n\n- `[]` inherit healthcheck from image or parent image\n- `["NONE"]` disable healthcheck\n- `["CMD", args...]` exec arguments directly\n- `["CMD-SHELL", command]` run command with system\'s default shell\n',
+ ),
+ ] = None
+ interval: Annotated[
+ int | None,
+ Field(
+ alias="Interval",
+ description="The time to wait between checks in nanoseconds. It should be 0 or at\nleast 1000000 (1 ms). 0 means inherit.\n",
+ ),
+ ] = None
+ timeout: Annotated[
+ int | None,
+ Field(
+ alias="Timeout",
+ description="The time to wait before considering the check to have hung. It should\nbe 0 or at least 1000000 (1 ms). 0 means inherit.\n",
+ ),
+ ] = None
+ retries: Annotated[
+ int | None,
+ Field(
+ alias="Retries",
+ description="The number of consecutive failures needed to consider a container as\nunhealthy. 0 means inherit.\n",
+ ),
+ ] = None
+ start_period: Annotated[
+ int | None,
+ Field(
+ alias="StartPeriod",
+ description="Start period for the container to initialize before starting\nhealth-retries countdown in nanoseconds. It should be 0 or at least\n1000000 (1 ms). 0 means inherit.\n",
+ ),
+ ] = None
class Status(str, Enum):
@@ -693,27 +808,33 @@ class HealthcheckResult(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- start: datetime | None = Field(
- default=None,
- alias="Start",
- description="Date and time at which this check started in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
- examples=["2020-01-04T10:44:24.496525531Z"],
- )
- end: str | None = Field(
- default=None,
- alias="End",
- description="Date and time at which this check ended in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
- examples=["2020-01-04T10:45:21.364524523Z"],
- )
- exit_code: int | None = Field(
- default=None,
- alias="ExitCode",
- description="ExitCode meanings:\n\n- `0` healthy\n- `1` unhealthy\n- `2` reserved (considered unhealthy)\n- other values: error running probe\n",
- examples=[0],
- )
- output: str | None = Field(
- default=None, alias="Output", description="Output from last check"
- )
+ start: Annotated[
+ datetime | None,
+ Field(
+ alias="Start",
+ description="Date and time at which this check started in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
+ examples=["2020-01-04T10:44:24.496525531Z"],
+ ),
+ ] = None
+ end: Annotated[
+ str | None,
+ Field(
+ alias="End",
+ description="Date and time at which this check ended in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
+ examples=["2020-01-04T10:45:21.364524523Z"],
+ ),
+ ] = None
+ exit_code: Annotated[
+ int | None,
+ Field(
+ alias="ExitCode",
+ description="ExitCode meanings:\n\n- `0` healthy\n- `1` unhealthy\n- `2` reserved (considered unhealthy)\n- other values: error running probe\n",
+ examples=[0],
+ ),
+ ] = None
+ output: Annotated[
+ str | None, Field(alias="Output", description="Output from last check")
+ ] = None
class Type3(str, Enum):
@@ -736,8 +857,8 @@ class LogConfig(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- type: Type3 | None = Field(default=None, alias="Type")
- config: dict[str, str] | None = Field(default=None, alias="Config")
+ type: Annotated[Type3 | None, Field(alias="Type")] = None
+ config: Annotated[dict[str, str] | None, Field(alias="Config")] = None
class CgroupnsMode(str, Enum):
@@ -760,7 +881,7 @@ class ConsoleSizeItem(RootModel[int]):
model_config = ConfigDict(
populate_by_name=True,
)
- root: int = Field(..., ge=0)
+ root: Annotated[int, Field(ge=0)]
class Isolation(str, Enum):
@@ -790,136 +911,176 @@ class ContainerConfig(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- hostname: str | None = Field(
- default=None,
- alias="Hostname",
- description="The hostname to use for the container, as a valid RFC 1123 hostname.\n",
- examples=["439f4e91bd1d"],
- )
- domainname: str | None = Field(
- default=None,
- alias="Domainname",
- description="The domain name to use for the container.\n",
- )
- user: str | None = Field(
- default=None,
- alias="User",
- description="The user that commands are run as inside the container.",
- )
- attach_stdin: bool | None = Field(
- default=False, alias="AttachStdin", description="Whether to attach to `stdin`."
- )
- attach_stdout: bool | None = Field(
- default=True, alias="AttachStdout", description="Whether to attach to `stdout`."
- )
- attach_stderr: bool | None = Field(
- default=True, alias="AttachStderr", description="Whether to attach to `stderr`."
- )
- exposed_ports: dict[str, dict[str, Any]] | None = Field(
- default=None,
- alias="ExposedPorts",
- description='An object mapping ports to an empty object in the form:\n\n`{"/": {}}`\n',
- examples=[{"80/tcp": {}, "443/tcp": {}}],
- )
- tty: bool | None = Field(
- default=False,
- alias="Tty",
- description="Attach standard streams to a TTY, including `stdin` if it is not closed.\n",
- )
- open_stdin: bool | None = Field(
- default=False, alias="OpenStdin", description="Open `stdin`"
- )
- stdin_once: bool | None = Field(
- default=False,
- alias="StdinOnce",
- description="Close `stdin` after one attached client disconnects",
- )
- env: list[str] | None = Field(
- default=None,
- alias="Env",
- description='A list of environment variables to set inside the container in the\nform `["VAR=value", ...]`. A variable without `=` is removed from the\nenvironment, rather than to have an empty value.\n',
- examples=[
- ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"]
- ],
- )
- cmd: list[str] | None = Field(
- default=None,
- alias="Cmd",
- description="Command to run specified as a string or an array of strings.\n",
- examples=[["/bin/sh"]],
- )
- healthcheck: HealthConfig | None = Field(default=None, alias="Healthcheck")
- args_escaped: bool | None = Field(
- default=False,
- alias="ArgsEscaped",
- description="Command is already escaped (Windows only)",
- examples=[False],
- )
- image: str | None = Field(
- default=None,
- alias="Image",
- description="The name (or reference) of the image to use when creating the container,\nor which was used when the container was created.\n",
- examples=["example-image:1.0"],
- )
- volumes: dict[str, dict[str, Any]] | None = Field(
- default=None,
- alias="Volumes",
- description="An object mapping mount point paths inside the container to empty\nobjects.\n",
- )
- working_dir: str | None = Field(
- default=None,
- alias="WorkingDir",
- description="The working directory for commands to run in.",
- examples=["/public/"],
- )
- entrypoint: list[str] | None = Field(
- default=None,
- alias="Entrypoint",
- description='The entry point for the container as a string or an array of strings.\n\nIf the array consists of exactly one empty string (`[""]`) then the\nentry point is reset to system default (i.e., the entry point used by\ndocker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).\n',
- examples=[[]],
- )
- network_disabled: bool | None = Field(
- default=None,
- alias="NetworkDisabled",
- description="Disable networking for the container.",
- )
- mac_address: str | None = Field(
- default=None, alias="MacAddress", description="MAC address of the container."
- )
- on_build: list[str] | None = Field(
- default=None,
- alias="OnBuild",
- description="`ONBUILD` metadata that were defined in the image's `Dockerfile`.\n",
- examples=[[]],
- )
- labels: dict[str, str] | None = Field(
- default=None,
- alias="Labels",
- description="User-defined key/value metadata.",
- examples=[
- {
- "com.example.some-label": "some-value",
- "com.example.some-other-label": "some-other-value",
- }
- ],
- )
- stop_signal: str | None = Field(
- default=None,
- alias="StopSignal",
- description="Signal to stop a container as a string or unsigned integer.\n",
- examples=["SIGTERM"],
- )
- stop_timeout: int | None = Field(
- default=10,
- alias="StopTimeout",
- description="Timeout to stop a container in seconds.",
- )
- shell: list[str] | None = Field(
- default=None,
- alias="Shell",
- description="Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.\n",
- examples=[["/bin/sh", "-c"]],
- )
+ hostname: Annotated[
+ str | None,
+ Field(
+ alias="Hostname",
+ description="The hostname to use for the container, as a valid RFC 1123 hostname.\n",
+ examples=["439f4e91bd1d"],
+ ),
+ ] = None
+ domainname: Annotated[
+ str | None,
+ Field(
+ alias="Domainname",
+ description="The domain name to use for the container.\n",
+ ),
+ ] = None
+ user: Annotated[
+ str | None,
+ Field(
+ alias="User",
+ description="The user that commands are run as inside the container.",
+ ),
+ ] = None
+ attach_stdin: Annotated[
+ bool | None,
+ Field(alias="AttachStdin", description="Whether to attach to `stdin`."),
+ ] = False
+ attach_stdout: Annotated[
+ bool | None,
+ Field(alias="AttachStdout", description="Whether to attach to `stdout`."),
+ ] = True
+ attach_stderr: Annotated[
+ bool | None,
+ Field(alias="AttachStderr", description="Whether to attach to `stderr`."),
+ ] = True
+ exposed_ports: Annotated[
+ dict[str, dict[str, Any]] | None,
+ Field(
+ alias="ExposedPorts",
+ description='An object mapping ports to an empty object in the form:\n\n`{"/": {}}`\n',
+ examples=[{"80/tcp": {}, "443/tcp": {}}],
+ ),
+ ] = None
+ tty: Annotated[
+ bool | None,
+ Field(
+ alias="Tty",
+ description="Attach standard streams to a TTY, including `stdin` if it is not closed.\n",
+ ),
+ ] = False
+ open_stdin: Annotated[
+ bool | None, Field(alias="OpenStdin", description="Open `stdin`")
+ ] = False
+ stdin_once: Annotated[
+ bool | None,
+ Field(
+ alias="StdinOnce",
+ description="Close `stdin` after one attached client disconnects",
+ ),
+ ] = False
+ env: Annotated[
+ list[str] | None,
+ Field(
+ alias="Env",
+ description='A list of environment variables to set inside the container in the\nform `["VAR=value", ...]`. A variable without `=` is removed from the\nenvironment, rather than to have an empty value.\n',
+ examples=[
+ ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"]
+ ],
+ ),
+ ] = None
+ cmd: Annotated[
+ list[str] | None,
+ Field(
+ alias="Cmd",
+ description="Command to run specified as a string or an array of strings.\n",
+ examples=[["/bin/sh"]],
+ ),
+ ] = None
+ healthcheck: Annotated[HealthConfig | None, Field(alias="Healthcheck")] = None
+ args_escaped: Annotated[
+ bool | None,
+ Field(
+ alias="ArgsEscaped",
+ description="Command is already escaped (Windows only)",
+ examples=[False],
+ ),
+ ] = False
+ image: Annotated[
+ str | None,
+ Field(
+ alias="Image",
+ description="The name (or reference) of the image to use when creating the container,\nor which was used when the container was created.\n",
+ examples=["example-image:1.0"],
+ ),
+ ] = None
+ volumes: Annotated[
+ dict[str, dict[str, Any]] | None,
+ Field(
+ alias="Volumes",
+ description="An object mapping mount point paths inside the container to empty\nobjects.\n",
+ ),
+ ] = None
+ working_dir: Annotated[
+ str | None,
+ Field(
+ alias="WorkingDir",
+ description="The working directory for commands to run in.",
+ examples=["/public/"],
+ ),
+ ] = None
+ entrypoint: Annotated[
+ list[str] | None,
+ Field(
+ alias="Entrypoint",
+ description='The entry point for the container as a string or an array of strings.\n\nIf the array consists of exactly one empty string (`[""]`) then the\nentry point is reset to system default (i.e., the entry point used by\ndocker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).\n',
+ examples=[[]],
+ ),
+ ] = None
+ network_disabled: Annotated[
+ bool | None,
+ Field(
+ alias="NetworkDisabled", description="Disable networking for the container."
+ ),
+ ] = None
+ mac_address: Annotated[
+ str | None,
+ Field(alias="MacAddress", description="MAC address of the container."),
+ ] = None
+ on_build: Annotated[
+ list[str] | None,
+ Field(
+ alias="OnBuild",
+ description="`ONBUILD` metadata that were defined in the image's `Dockerfile`.\n",
+ examples=[[]],
+ ),
+ ] = None
+ labels: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="Labels",
+ description="User-defined key/value metadata.",
+ examples=[
+ {
+ "com.example.some-label": "some-value",
+ "com.example.some-other-label": "some-other-value",
+ }
+ ],
+ ),
+ ] = None
+ stop_signal: Annotated[
+ str | None,
+ Field(
+ alias="StopSignal",
+ description="Signal to stop a container as a string or unsigned integer.\n",
+ examples=["SIGTERM"],
+ ),
+ ] = None
+ stop_timeout: Annotated[
+ int | None,
+ Field(
+ alias="StopTimeout", description="Timeout to stop a container in seconds."
+ ),
+ ] = 10
+ shell: Annotated[
+ list[str] | None,
+ Field(
+ alias="Shell",
+ description="Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.\n",
+ examples=[["/bin/sh", "-c"]],
+ ),
+ ] = None
class ImageConfig(BaseModel):
@@ -932,157 +1093,205 @@ class ImageConfig(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- hostname: str | None = Field(
- default=None,
- alias="Hostname",
- description="The hostname to use for the container, as a valid RFC 1123 hostname.\n\n
\n\n> **Note**: this field is always empty and must not be used.\n",
- examples=[""],
- )
- domainname: str | None = Field(
- default=None,
- alias="Domainname",
- description="The domain name to use for the container.\n\n
\n\n> **Note**: this field is always empty and must not be used.\n",
- examples=[""],
- )
- user: str | None = Field(
- default=None,
- alias="User",
- description="The user that commands are run as inside the container.",
- examples=["web:web"],
- )
- attach_stdin: bool | None = Field(
- default=False,
- alias="AttachStdin",
- description="Whether to attach to `stdin`.\n\n
\n\n> **Note**: this field is always false and must not be used.\n",
- examples=[False],
- )
- attach_stdout: bool | None = Field(
- default=False,
- alias="AttachStdout",
- description="Whether to attach to `stdout`.\n\n
\n\n> **Note**: this field is always false and must not be used.\n",
- examples=[False],
- )
- attach_stderr: bool | None = Field(
- default=False,
- alias="AttachStderr",
- description="Whether to attach to `stderr`.\n\n
\n\n> **Note**: this field is always false and must not be used.\n",
- examples=[False],
- )
- exposed_ports: dict[str, dict[str, Any]] | None = Field(
- default=None,
- alias="ExposedPorts",
- description='An object mapping ports to an empty object in the form:\n\n`{"/": {}}`\n',
- examples=[{"80/tcp": {}, "443/tcp": {}}],
- )
- tty: bool | None = Field(
- default=False,
- alias="Tty",
- description="Attach standard streams to a TTY, including `stdin` if it is not closed.\n\n
\n\n> **Note**: this field is always false and must not be used.\n",
- examples=[False],
- )
- open_stdin: bool | None = Field(
- default=False,
- alias="OpenStdin",
- description="Open `stdin`\n\n
\n\n> **Note**: this field is always false and must not be used.\n",
- examples=[False],
- )
- stdin_once: bool | None = Field(
- default=False,
- alias="StdinOnce",
- description="Close `stdin` after one attached client disconnects.\n\n
\n\n> **Note**: this field is always false and must not be used.\n",
- examples=[False],
- )
- env: list[str] | None = Field(
- default=None,
- alias="Env",
- description='A list of environment variables to set inside the container in the\nform `["VAR=value", ...]`. A variable without `=` is removed from the\nenvironment, rather than to have an empty value.\n',
- examples=[
- ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"]
- ],
- )
- cmd: list[str] | None = Field(
- default=None,
- alias="Cmd",
- description="Command to run specified as a string or an array of strings.\n",
- examples=[["/bin/sh"]],
- )
- healthcheck: HealthConfig | None = Field(default=None, alias="Healthcheck")
- args_escaped: bool | None = Field(
- default=False,
- alias="ArgsEscaped",
- description="Command is already escaped (Windows only)",
- examples=[False],
- )
- image: str | None = Field(
- default="",
- alias="Image",
- description="The name (or reference) of the image to use when creating the container,\nor which was used when the container was created.\n\n
\n\n> **Note**: this field is always empty and must not be used.\n",
- examples=[""],
- )
- volumes: dict[str, dict[str, Any]] | None = Field(
- default=None,
- alias="Volumes",
- description="An object mapping mount point paths inside the container to empty\nobjects.\n",
- examples=[{"/app/data": {}, "/app/config": {}}],
- )
- working_dir: str | None = Field(
- default=None,
- alias="WorkingDir",
- description="The working directory for commands to run in.",
- examples=["/public/"],
- )
- entrypoint: list[str] | None = Field(
- default=None,
- alias="Entrypoint",
- description='The entry point for the container as a string or an array of strings.\n\nIf the array consists of exactly one empty string (`[""]`) then the\nentry point is reset to system default (i.e., the entry point used by\ndocker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).\n',
- examples=[[]],
- )
- network_disabled: bool | None = Field(
- default=False,
- alias="NetworkDisabled",
- description="Disable networking for the container.\n\n
\n\n> **Note**: this field is always omitted and must not be used.\n",
- examples=[False],
- )
- mac_address: str | None = Field(
- default="",
- alias="MacAddress",
- description="MAC address of the container.\n\n
\n\n> **Note**: this field is always omitted and must not be used.\n",
- examples=[""],
- )
- on_build: list[str] | None = Field(
- default=None,
- alias="OnBuild",
- description="`ONBUILD` metadata that were defined in the image's `Dockerfile`.\n",
- examples=[[]],
- )
- labels: dict[str, str] | None = Field(
- default=None,
- alias="Labels",
- description="User-defined key/value metadata.",
- examples=[
- {
- "com.example.some-label": "some-value",
- "com.example.some-other-label": "some-other-value",
- }
- ],
- )
- stop_signal: str | None = Field(
- default=None,
- alias="StopSignal",
- description="Signal to stop a container as a string or unsigned integer.\n",
- examples=["SIGTERM"],
- )
- stop_timeout: int | None = Field(
- default=10,
- alias="StopTimeout",
- description="Timeout to stop a container in seconds.\n\n
\n\n> **Note**: this field is always omitted and must not be used.\n",
- )
- shell: list[str] | None = Field(
- default=None,
- alias="Shell",
- description="Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.\n",
- examples=[["/bin/sh", "-c"]],
- )
+ hostname: Annotated[
+ str | None,
+ Field(
+ alias="Hostname",
+ description="The hostname to use for the container, as a valid RFC 1123 hostname.\n\n
\n\n> **Note**: this field is always empty and must not be used.\n",
+ examples=[""],
+ ),
+ ] = None
+ domainname: Annotated[
+ str | None,
+ Field(
+ alias="Domainname",
+ description="The domain name to use for the container.\n\n
\n\n> **Note**: this field is always empty and must not be used.\n",
+ examples=[""],
+ ),
+ ] = None
+ user: Annotated[
+ str | None,
+ Field(
+ alias="User",
+ description="The user that commands are run as inside the container.",
+ examples=["web:web"],
+ ),
+ ] = None
+ attach_stdin: Annotated[
+ bool | None,
+ Field(
+ alias="AttachStdin",
+ description="Whether to attach to `stdin`.\n\n
\n\n> **Note**: this field is always false and must not be used.\n",
+ examples=[False],
+ ),
+ ] = False
+ attach_stdout: Annotated[
+ bool | None,
+ Field(
+ alias="AttachStdout",
+ description="Whether to attach to `stdout`.\n\n
\n\n> **Note**: this field is always false and must not be used.\n",
+ examples=[False],
+ ),
+ ] = False
+ attach_stderr: Annotated[
+ bool | None,
+ Field(
+ alias="AttachStderr",
+ description="Whether to attach to `stderr`.\n\n
\n\n> **Note**: this field is always false and must not be used.\n",
+ examples=[False],
+ ),
+ ] = False
+ exposed_ports: Annotated[
+ dict[str, dict[str, Any]] | None,
+ Field(
+ alias="ExposedPorts",
+ description='An object mapping ports to an empty object in the form:\n\n`{"/": {}}`\n',
+ examples=[{"80/tcp": {}, "443/tcp": {}}],
+ ),
+ ] = None
+ tty: Annotated[
+ bool | None,
+ Field(
+ alias="Tty",
+ description="Attach standard streams to a TTY, including `stdin` if it is not closed.\n\n
\n\n> **Note**: this field is always false and must not be used.\n",
+ examples=[False],
+ ),
+ ] = False
+ open_stdin: Annotated[
+ bool | None,
+ Field(
+ alias="OpenStdin",
+ description="Open `stdin`\n\n
\n\n> **Note**: this field is always false and must not be used.\n",
+ examples=[False],
+ ),
+ ] = False
+ stdin_once: Annotated[
+ bool | None,
+ Field(
+ alias="StdinOnce",
+ description="Close `stdin` after one attached client disconnects.\n\n
\n\n> **Note**: this field is always false and must not be used.\n",
+ examples=[False],
+ ),
+ ] = False
+ env: Annotated[
+ list[str] | None,
+ Field(
+ alias="Env",
+ description='A list of environment variables to set inside the container in the\nform `["VAR=value", ...]`. A variable without `=` is removed from the\nenvironment, rather than to have an empty value.\n',
+ examples=[
+ ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"]
+ ],
+ ),
+ ] = None
+ cmd: Annotated[
+ list[str] | None,
+ Field(
+ alias="Cmd",
+ description="Command to run specified as a string or an array of strings.\n",
+ examples=[["/bin/sh"]],
+ ),
+ ] = None
+ healthcheck: Annotated[HealthConfig | None, Field(alias="Healthcheck")] = None
+ args_escaped: Annotated[
+ bool | None,
+ Field(
+ alias="ArgsEscaped",
+ description="Command is already escaped (Windows only)",
+ examples=[False],
+ ),
+ ] = False
+ image: Annotated[
+ str | None,
+ Field(
+ alias="Image",
+ description="The name (or reference) of the image to use when creating the container,\nor which was used when the container was created.\n\n
\n\n> **Note**: this field is always empty and must not be used.\n",
+ examples=[""],
+ ),
+ ] = ""
+ volumes: Annotated[
+ dict[str, dict[str, Any]] | None,
+ Field(
+ alias="Volumes",
+ description="An object mapping mount point paths inside the container to empty\nobjects.\n",
+ examples=[{"/app/data": {}, "/app/config": {}}],
+ ),
+ ] = None
+ working_dir: Annotated[
+ str | None,
+ Field(
+ alias="WorkingDir",
+ description="The working directory for commands to run in.",
+ examples=["/public/"],
+ ),
+ ] = None
+ entrypoint: Annotated[
+ list[str] | None,
+ Field(
+ alias="Entrypoint",
+ description='The entry point for the container as a string or an array of strings.\n\nIf the array consists of exactly one empty string (`[""]`) then the\nentry point is reset to system default (i.e., the entry point used by\ndocker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).\n',
+ examples=[[]],
+ ),
+ ] = None
+ network_disabled: Annotated[
+ bool | None,
+ Field(
+ alias="NetworkDisabled",
+ description="Disable networking for the container.\n\n
\n\n> **Note**: this field is always omitted and must not be used.\n",
+ examples=[False],
+ ),
+ ] = False
+ mac_address: Annotated[
+ str | None,
+ Field(
+ alias="MacAddress",
+ description="MAC address of the container.\n\n
\n\n> **Note**: this field is always omitted and must not be used.\n",
+ examples=[""],
+ ),
+ ] = ""
+ on_build: Annotated[
+ list[str] | None,
+ Field(
+ alias="OnBuild",
+ description="`ONBUILD` metadata that were defined in the image's `Dockerfile`.\n",
+ examples=[[]],
+ ),
+ ] = None
+ labels: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="Labels",
+ description="User-defined key/value metadata.",
+ examples=[
+ {
+ "com.example.some-label": "some-value",
+ "com.example.some-other-label": "some-other-value",
+ }
+ ],
+ ),
+ ] = None
+ stop_signal: Annotated[
+ str | None,
+ Field(
+ alias="StopSignal",
+ description="Signal to stop a container as a string or unsigned integer.\n",
+ examples=["SIGTERM"],
+ ),
+ ] = None
+ stop_timeout: Annotated[
+ int | None,
+ Field(
+ alias="StopTimeout",
+ description="Timeout to stop a container in seconds.\n\n
\n\n> **Note**: this field is always omitted and must not be used.\n",
+ ),
+ ] = 10
+ shell: Annotated[
+ list[str] | None,
+ Field(
+ alias="Shell",
+ description="Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.\n",
+ examples=[["/bin/sh", "-c"]],
+ ),
+ ] = None
class Address(BaseModel):
@@ -1093,10 +1302,11 @@ class Address(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- addr: str | None = Field(default=None, alias="Addr", description="IP address.")
- prefix_len: int | None = Field(
- default=None, alias="PrefixLen", description="Mask length of the IP address."
- )
+ addr: Annotated[str | None, Field(alias="Addr", description="IP address.")] = None
+ prefix_len: Annotated[
+ int | None,
+ Field(alias="PrefixLen", description="Mask length of the IP address."),
+ ] = None
class PortBinding(BaseModel):
@@ -1109,18 +1319,22 @@ class PortBinding(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- host_ip: str | None = Field(
- default=None,
- alias="HostIp",
- description="Host IP address that the container's port is mapped to.",
- examples=["127.0.0.1"],
- )
- host_port: str | None = Field(
- default=None,
- alias="HostPort",
- description="Host port number that the container's port is mapped to.",
- examples=["4443"],
- )
+ host_ip: Annotated[
+ str | None,
+ Field(
+ alias="HostIp",
+ description="Host IP address that the container's port is mapped to.",
+ examples=["127.0.0.1"],
+ ),
+ ] = None
+ host_port: Annotated[
+ str | None,
+ Field(
+ alias="HostPort",
+ description="Host port number that the container's port is mapped to.",
+ examples=["4443"],
+ ),
+ ] = None
class GraphDriverData(BaseModel):
@@ -1133,24 +1347,28 @@ class GraphDriverData(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str = Field(
- ...,
- alias="Name",
- description="Name of the storage driver.",
- examples=["overlay2"],
- )
- data: dict[str, str] = Field(
- ...,
- alias="Data",
- description="Low-level storage metadata, provided as key/value pairs.\n\nThis information is driver-specific, and depends on the storage-driver\nin use, and should be used for informational purposes only.\n",
- examples=[
- {
- "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged",
- "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff",
- "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work",
- }
- ],
- )
+ name: Annotated[
+ str,
+ Field(
+ alias="Name",
+ description="Name of the storage driver.",
+ examples=["overlay2"],
+ ),
+ ]
+ data: Annotated[
+ dict[str, str],
+ Field(
+ alias="Data",
+ description="Low-level storage metadata, provided as key/value pairs.\n\nThis information is driver-specific, and depends on the storage-driver\nin use, and should be used for informational purposes only.\n",
+ examples=[
+ {
+ "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged",
+ "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff",
+ "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work",
+ }
+ ],
+ ),
+ ]
class RootFs(BaseModel):
@@ -1162,17 +1380,19 @@ class RootFs(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- type: str = Field(..., alias="Type", examples=["layers"])
- layers: list[str] | None = Field(
- default=None,
- alias="Layers",
- examples=[
- [
- "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6",
- "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
- ]
- ],
- )
+ type: Annotated[str, Field(alias="Type", examples=["layers"])]
+ layers: Annotated[
+ list[str] | None,
+ Field(
+ alias="Layers",
+ examples=[
+ [
+ "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6",
+ "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
+ ]
+ ],
+ ),
+ ] = None
class Metadata(BaseModel):
@@ -1185,12 +1405,14 @@ class Metadata(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- last_tag_time: str | None = Field(
- default=None,
- alias="LastTagTime",
- description="Date and time at which the image was last tagged in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n\nThis information is only available if the image was tagged locally,\nand omitted otherwise.\n",
- examples=["2022-02-28T14:40:02.623929178Z"],
- )
+ last_tag_time: Annotated[
+ str | None,
+ Field(
+ alias="LastTagTime",
+ description="Date and time at which the image was last tagged in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n\nThis information is only available if the image was tagged locally,\nand omitted otherwise.\n",
+ examples=["2022-02-28T14:40:02.623929178Z"],
+ ),
+ ] = None
class ImageInspect(BaseModel):
@@ -1202,210 +1424,266 @@ class ImageInspect(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- id: str | None = Field(
- default=None,
- alias="Id",
- description="ID is the content-addressable ID of an image.\n\nThis identifier is a content-addressable digest calculated from the\nimage's configuration (which includes the digests of layers used by\nthe image).\n\nNote that this digest differs from the `RepoDigests` below, which\nholds digests of image manifests that reference the image.\n",
- examples=[
- "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710"
- ],
- )
- repo_tags: list[str] | None = Field(
- default=None,
- alias="RepoTags",
- description='List of image names/tags in the local image cache that reference this\nimage.\n\nMultiple image tags can refer to the same image, and this list may be\nempty if no tags reference the image, in which case the image is\n"untagged", in which case it can still be referenced by its ID.\n',
- examples=[
- [
- "example:1.0",
- "example:latest",
- "example:stable",
- "internal.registry.example.com:5000/example:1.0",
- ]
- ],
- )
- repo_digests: list[str] | None = Field(
- default=None,
- alias="RepoDigests",
- description="List of content-addressable digests of locally available image manifests\nthat the image is referenced from. Multiple manifests can refer to the\nsame image.\n\nThese digests are usually only available if the image was either pulled\nfrom a registry, or if the image was pushed to a registry, which is when\nthe manifest is generated and its digest calculated.\n",
- examples=[
- [
- "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb",
- "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578",
- ]
- ],
- )
- parent: str | None = Field(
- default=None,
- alias="Parent",
- description="ID of the parent image.\n\nDepending on how the image was created, this field may be empty and\nis only set for images that were built/created locally. This field\nis empty if the image was pulled from an image registry.\n",
- examples=[""],
- )
- comment: str | None = Field(
- default=None,
- alias="Comment",
- description="Optional message that was set when committing or importing the image.\n",
- examples=[""],
- )
- created: str | None = Field(
- default=None,
- alias="Created",
- description="Date and time at which the image was created, formatted in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
- examples=["2022-02-04T21:20:12.497794809Z"],
- )
- container: str | None = Field(
- default=None,
- alias="Container",
- description="The ID of the container that was used to create the image.\n\nDepending on how the image was created, this field may be empty.\n",
- examples=["65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735"],
- )
- container_config: ContainerConfig | None = Field(
- default=None, alias="ContainerConfig"
- )
- docker_version: str | None = Field(
- default=None,
- alias="DockerVersion",
- description="The version of Docker that was used to build the image.\n\nDepending on how the image was created, this field may be empty.\n",
- examples=["20.10.7"],
- )
- author: str | None = Field(
- default=None,
- alias="Author",
- description="Name of the author that was specified when committing the image, or as\nspecified through MAINTAINER (deprecated) in the Dockerfile.\n",
- examples=[""],
- )
- config: ImageConfig | None = Field(default=None, alias="Config")
- architecture: str | None = Field(
- default=None,
- alias="Architecture",
- description="Hardware CPU architecture that the image runs on.\n",
- examples=["arm"],
- )
- variant: str | None = Field(
- default=None,
- alias="Variant",
- description="CPU architecture variant (presently ARM-only).\n",
- examples=["v7"],
- )
- os: str | None = Field(
- default=None,
- alias="Os",
- description="Operating System the image is built to run on.\n",
- examples=["linux"],
- )
- os_version: str | None = Field(
- default=None,
- alias="OsVersion",
- description="Operating System version the image is built to run on (especially\nfor Windows).\n",
- examples=[""],
- )
- size: int | None = Field(
- default=None,
- alias="Size",
- description="Total size of the image including all layers it is composed of.\n",
- examples=[1239828],
- )
- virtual_size: int | None = Field(
- default=None,
- alias="VirtualSize",
- description="Total size of the image including all layers it is composed of.\n\nIn versions of Docker before v1.10, this field was calculated from\nthe image itself and all of its parent images. Docker v1.10 and up\nstore images self-contained, and no longer use a parent-chain, making\nthis field an equivalent of the Size field.\n\nThis field is kept for backward compatibility, but may be removed in\na future version of the API.\n",
- examples=[1239828],
- )
- graph_driver: GraphDriverData | None = Field(default=None, alias="GraphDriver")
- root_fs: RootFs | None = Field(
- default=None,
- alias="RootFS",
- description="Information about the image's RootFS, including the layer IDs.\n",
- )
- metadata: Metadata | None = Field(
- default=None,
- alias="Metadata",
- description="Additional metadata of the image in the local cache. This information\nis local to the daemon, and not part of the image itself.\n",
- )
+ id: Annotated[
+ str | None,
+ Field(
+ alias="Id",
+ description="ID is the content-addressable ID of an image.\n\nThis identifier is a content-addressable digest calculated from the\nimage's configuration (which includes the digests of layers used by\nthe image).\n\nNote that this digest differs from the `RepoDigests` below, which\nholds digests of image manifests that reference the image.\n",
+ examples=[
+ "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710"
+ ],
+ ),
+ ] = None
+ repo_tags: Annotated[
+ list[str] | None,
+ Field(
+ alias="RepoTags",
+ description='List of image names/tags in the local image cache that reference this\nimage.\n\nMultiple image tags can refer to the same image, and this list may be\nempty if no tags reference the image, in which case the image is\n"untagged", in which case it can still be referenced by its ID.\n',
+ examples=[
+ [
+ "example:1.0",
+ "example:latest",
+ "example:stable",
+ "internal.registry.example.com:5000/example:1.0",
+ ]
+ ],
+ ),
+ ] = None
+ repo_digests: Annotated[
+ list[str] | None,
+ Field(
+ alias="RepoDigests",
+ description="List of content-addressable digests of locally available image manifests\nthat the image is referenced from. Multiple manifests can refer to the\nsame image.\n\nThese digests are usually only available if the image was either pulled\nfrom a registry, or if the image was pushed to a registry, which is when\nthe manifest is generated and its digest calculated.\n",
+ examples=[
+ [
+ "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb",
+ "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578",
+ ]
+ ],
+ ),
+ ] = None
+ parent: Annotated[
+ str | None,
+ Field(
+ alias="Parent",
+ description="ID of the parent image.\n\nDepending on how the image was created, this field may be empty and\nis only set for images that were built/created locally. This field\nis empty if the image was pulled from an image registry.\n",
+ examples=[""],
+ ),
+ ] = None
+ comment: Annotated[
+ str | None,
+ Field(
+ alias="Comment",
+ description="Optional message that was set when committing or importing the image.\n",
+ examples=[""],
+ ),
+ ] = None
+ created: Annotated[
+ str | None,
+ Field(
+ alias="Created",
+ description="Date and time at which the image was created, formatted in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
+ examples=["2022-02-04T21:20:12.497794809Z"],
+ ),
+ ] = None
+ container: Annotated[
+ str | None,
+ Field(
+ alias="Container",
+ description="The ID of the container that was used to create the image.\n\nDepending on how the image was created, this field may be empty.\n",
+ examples=[
+ "65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735"
+ ],
+ ),
+ ] = None
+ container_config: Annotated[
+ ContainerConfig | None, Field(alias="ContainerConfig")
+ ] = None
+ docker_version: Annotated[
+ str | None,
+ Field(
+ alias="DockerVersion",
+ description="The version of Docker that was used to build the image.\n\nDepending on how the image was created, this field may be empty.\n",
+ examples=["20.10.7"],
+ ),
+ ] = None
+ author: Annotated[
+ str | None,
+ Field(
+ alias="Author",
+ description="Name of the author that was specified when committing the image, or as\nspecified through MAINTAINER (deprecated) in the Dockerfile.\n",
+ examples=[""],
+ ),
+ ] = None
+ config: Annotated[ImageConfig | None, Field(alias="Config")] = None
+ architecture: Annotated[
+ str | None,
+ Field(
+ alias="Architecture",
+ description="Hardware CPU architecture that the image runs on.\n",
+ examples=["arm"],
+ ),
+ ] = None
+ variant: Annotated[
+ str | None,
+ Field(
+ alias="Variant",
+ description="CPU architecture variant (presently ARM-only).\n",
+ examples=["v7"],
+ ),
+ ] = None
+ os: Annotated[
+ str | None,
+ Field(
+ alias="Os",
+ description="Operating System the image is built to run on.\n",
+ examples=["linux"],
+ ),
+ ] = None
+ os_version: Annotated[
+ str | None,
+ Field(
+ alias="OsVersion",
+ description="Operating System version the image is built to run on (especially\nfor Windows).\n",
+ examples=[""],
+ ),
+ ] = None
+ size: Annotated[
+ int | None,
+ Field(
+ alias="Size",
+ description="Total size of the image including all layers it is composed of.\n",
+ examples=[1239828],
+ ),
+ ] = None
+ virtual_size: Annotated[
+ int | None,
+ Field(
+ alias="VirtualSize",
+ description="Total size of the image including all layers it is composed of.\n\nIn versions of Docker before v1.10, this field was calculated from\nthe image itself and all of its parent images. Docker v1.10 and up\nstore images self-contained, and no longer use a parent-chain, making\nthis field an equivalent of the Size field.\n\nThis field is kept for backward compatibility, but may be removed in\na future version of the API.\n",
+ examples=[1239828],
+ ),
+ ] = None
+ graph_driver: Annotated[GraphDriverData | None, Field(alias="GraphDriver")] = None
+ root_fs: Annotated[
+ RootFs | None,
+ Field(
+ alias="RootFS",
+ description="Information about the image's RootFS, including the layer IDs.\n",
+ ),
+ ] = None
+ metadata: Annotated[
+ Metadata | None,
+ Field(
+ alias="Metadata",
+ description="Additional metadata of the image in the local cache. This information\nis local to the daemon, and not part of the image itself.\n",
+ ),
+ ] = None
class ImageSummary(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- id: str = Field(
- ...,
- alias="Id",
- description="ID is the content-addressable ID of an image.\n\nThis identifier is a content-addressable digest calculated from the\nimage's configuration (which includes the digests of layers used by\nthe image).\n\nNote that this digest differs from the `RepoDigests` below, which\nholds digests of image manifests that reference the image.\n",
- examples=[
- "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710"
- ],
- )
- parent_id: str = Field(
- ...,
- alias="ParentId",
- description="ID of the parent image.\n\nDepending on how the image was created, this field may be empty and\nis only set for images that were built/created locally. This field\nis empty if the image was pulled from an image registry.\n",
- examples=[""],
- )
- repo_tags: list[str] = Field(
- ...,
- alias="RepoTags",
- description='List of image names/tags in the local image cache that reference this\nimage.\n\nMultiple image tags can refer to the same image, and this list may be\nempty if no tags reference the image, in which case the image is\n"untagged", in which case it can still be referenced by its ID.\n',
- examples=[
- [
- "example:1.0",
- "example:latest",
- "example:stable",
- "internal.registry.example.com:5000/example:1.0",
- ]
- ],
- )
- repo_digests: list[str] = Field(
- ...,
- alias="RepoDigests",
- description="List of content-addressable digests of locally available image manifests\nthat the image is referenced from. Multiple manifests can refer to the\nsame image.\n\nThese digests are usually only available if the image was either pulled\nfrom a registry, or if the image was pushed to a registry, which is when\nthe manifest is generated and its digest calculated.\n",
- examples=[
- [
- "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb",
- "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578",
- ]
- ],
- )
- created: int = Field(
- ...,
- alias="Created",
- description="Date and time at which the image was created as a Unix timestamp\n(number of seconds since EPOCH).\n",
- examples=["1644009612"],
- )
- size: int = Field(
- ...,
- alias="Size",
- description="Total size of the image including all layers it is composed of.\n",
- examples=[172064416],
- )
- shared_size: int = Field(
- ...,
- alias="SharedSize",
- description="Total size of image layers that are shared between this image and other\nimages.\n\nThis size is not calculated by default. `-1` indicates that the value\nhas not been set / calculated.\n",
- examples=[1239828],
- )
- virtual_size: int = Field(
- ...,
- alias="VirtualSize",
- description="Total size of the image including all layers it is composed of.\n\nIn versions of Docker before v1.10, this field was calculated from\nthe image itself and all of its parent images. Docker v1.10 and up\nstore images self-contained, and no longer use a parent-chain, making\nthis field an equivalent of the Size field.\n\nThis field is kept for backward compatibility, but may be removed in\na future version of the API.\n",
- examples=[172064416],
- )
- labels: dict[str, str] = Field(
- ...,
- alias="Labels",
- description="User-defined key/value metadata.",
- examples=[
- {
- "com.example.some-label": "some-value",
- "com.example.some-other-label": "some-other-value",
- }
- ],
- )
- containers: int = Field(
- ...,
- alias="Containers",
- description="Number of containers using this image. Includes both stopped and running\ncontainers.\n\nThis size is not calculated by default, and depends on which API endpoint\nis used. `-1` indicates that the value has not been set / calculated.\n",
- examples=[2],
- )
+ id: Annotated[
+ str,
+ Field(
+ alias="Id",
+ description="ID is the content-addressable ID of an image.\n\nThis identifier is a content-addressable digest calculated from the\nimage's configuration (which includes the digests of layers used by\nthe image).\n\nNote that this digest differs from the `RepoDigests` below, which\nholds digests of image manifests that reference the image.\n",
+ examples=[
+ "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710"
+ ],
+ ),
+ ]
+ parent_id: Annotated[
+ str,
+ Field(
+ alias="ParentId",
+ description="ID of the parent image.\n\nDepending on how the image was created, this field may be empty and\nis only set for images that were built/created locally. This field\nis empty if the image was pulled from an image registry.\n",
+ examples=[""],
+ ),
+ ]
+ repo_tags: Annotated[
+ list[str],
+ Field(
+ alias="RepoTags",
+ description='List of image names/tags in the local image cache that reference this\nimage.\n\nMultiple image tags can refer to the same image, and this list may be\nempty if no tags reference the image, in which case the image is\n"untagged", in which case it can still be referenced by its ID.\n',
+ examples=[
+ [
+ "example:1.0",
+ "example:latest",
+ "example:stable",
+ "internal.registry.example.com:5000/example:1.0",
+ ]
+ ],
+ ),
+ ]
+ repo_digests: Annotated[
+ list[str],
+ Field(
+ alias="RepoDigests",
+ description="List of content-addressable digests of locally available image manifests\nthat the image is referenced from. Multiple manifests can refer to the\nsame image.\n\nThese digests are usually only available if the image was either pulled\nfrom a registry, or if the image was pushed to a registry, which is when\nthe manifest is generated and its digest calculated.\n",
+ examples=[
+ [
+ "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb",
+ "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578",
+ ]
+ ],
+ ),
+ ]
+ created: Annotated[
+ int,
+ Field(
+ alias="Created",
+ description="Date and time at which the image was created as a Unix timestamp\n(number of seconds since EPOCH).\n",
+ examples=["1644009612"],
+ ),
+ ]
+ size: Annotated[
+ int,
+ Field(
+ alias="Size",
+ description="Total size of the image including all layers it is composed of.\n",
+ examples=[172064416],
+ ),
+ ]
+ shared_size: Annotated[
+ int,
+ Field(
+ alias="SharedSize",
+ description="Total size of image layers that are shared between this image and other\nimages.\n\nThis size is not calculated by default. `-1` indicates that the value\nhas not been set / calculated.\n",
+ examples=[1239828],
+ ),
+ ]
+ virtual_size: Annotated[
+ int,
+ Field(
+ alias="VirtualSize",
+ description="Total size of the image including all layers it is composed of.\n\nIn versions of Docker before v1.10, this field was calculated from\nthe image itself and all of its parent images. Docker v1.10 and up\nstore images self-contained, and no longer use a parent-chain, making\nthis field an equivalent of the Size field.\n\nThis field is kept for backward compatibility, but may be removed in\na future version of the API.\n",
+ examples=[172064416],
+ ),
+ ]
+ labels: Annotated[
+ dict[str, str],
+ Field(
+ alias="Labels",
+ description="User-defined key/value metadata.",
+ examples=[
+ {
+ "com.example.some-label": "some-value",
+ "com.example.some-other-label": "some-other-value",
+ }
+ ],
+ ),
+ ]
+ containers: Annotated[
+ int,
+ Field(
+ alias="Containers",
+ description="Number of containers using this image. Includes both stopped and running\ncontainers.\n\nThis size is not calculated by default, and depends on which API endpoint\nis used. `-1` indicates that the value has not been set / calculated.\n",
+ examples=[2],
+ ),
+ ]
class AuthConfig(BaseModel):
@@ -1450,77 +1728,97 @@ class UsageData(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- size: int = Field(
- ...,
- alias="Size",
- description='Amount of disk space used by the volume (in bytes). This information\nis only available for volumes created with the `"local"` volume\ndriver. For volumes created with other volume drivers, this field\nis set to `-1` ("not available")\n',
- )
- ref_count: int = Field(
- ...,
- alias="RefCount",
- description="The number of containers referencing this volume. This field\nis set to `-1` if the reference-count is not available.\n",
- )
+ size: Annotated[
+ int,
+ Field(
+ alias="Size",
+ description='Amount of disk space used by the volume (in bytes). This information\nis only available for volumes created with the `"local"` volume\ndriver. For volumes created with other volume drivers, this field\nis set to `-1` ("not available")\n',
+ ),
+ ]
+ ref_count: Annotated[
+ int,
+ Field(
+ alias="RefCount",
+ description="The number of containers referencing this volume. This field\nis set to `-1` if the reference-count is not available.\n",
+ ),
+ ]
class Volume(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str = Field(
- ..., alias="Name", description="Name of the volume.", examples=["tardis"]
- )
- driver: str = Field(
- ...,
- alias="Driver",
- description="Name of the volume driver used by the volume.",
- examples=["custom"],
- )
- mountpoint: str = Field(
- ...,
- alias="Mountpoint",
- description="Mount path of the volume on the host.",
- examples=["/var/lib/docker/volumes/tardis"],
- )
- created_at: str | None = Field(
- default=None,
- alias="CreatedAt",
- description="Date/Time the volume was created.",
- examples=["2016-06-07T20:31:11.853781916Z"],
- )
- status: dict[str, dict[str, Any]] | None = Field(
- default=None,
- alias="Status",
- description='Low-level details about the volume, provided by the volume driver.\nDetails are returned as a map with key/value pairs:\n`{"key":"value","key2":"value2"}`.\n\nThe `Status` field is optional, and is omitted if the volume driver\ndoes not support this feature.\n',
- examples=[{"hello": "world"}],
- )
- labels: dict[str, str] = Field(
- ...,
- alias="Labels",
- description="User-defined key/value metadata.",
- examples=[
- {
- "com.example.some-label": "some-value",
- "com.example.some-other-label": "some-other-value",
- }
- ],
- )
- scope: Scope = Field(
- ...,
- alias="Scope",
- description="The level at which the volume exists. Either `global` for cluster-wide,\nor `local` for machine level.\n",
- examples=["local"],
- )
- options: dict[str, str] = Field(
- ...,
- alias="Options",
- description="The driver specific options used when creating the volume.\n",
- examples=[{"device": "tmpfs", "o": "size=100m,uid=1000", "type": "tmpfs"}],
- )
- usage_data: UsageData | None = Field(
- default=None,
- alias="UsageData",
- description="Usage details about the volume. This information is used by the\n`GET /system/df` endpoint, and omitted in other endpoints.\n",
- )
+ name: Annotated[
+ str, Field(alias="Name", description="Name of the volume.", examples=["tardis"])
+ ]
+ driver: Annotated[
+ str,
+ Field(
+ alias="Driver",
+ description="Name of the volume driver used by the volume.",
+ examples=["custom"],
+ ),
+ ]
+ mountpoint: Annotated[
+ str,
+ Field(
+ alias="Mountpoint",
+ description="Mount path of the volume on the host.",
+ examples=["/var/lib/docker/volumes/tardis"],
+ ),
+ ]
+ created_at: Annotated[
+ str | None,
+ Field(
+ alias="CreatedAt",
+ description="Date/Time the volume was created.",
+ examples=["2016-06-07T20:31:11.853781916Z"],
+ ),
+ ] = None
+ status: Annotated[
+ dict[str, dict[str, Any]] | None,
+ Field(
+ alias="Status",
+ description='Low-level details about the volume, provided by the volume driver.\nDetails are returned as a map with key/value pairs:\n`{"key":"value","key2":"value2"}`.\n\nThe `Status` field is optional, and is omitted if the volume driver\ndoes not support this feature.\n',
+ examples=[{"hello": "world"}],
+ ),
+ ] = None
+ labels: Annotated[
+ dict[str, str],
+ Field(
+ alias="Labels",
+ description="User-defined key/value metadata.",
+ examples=[
+ {
+ "com.example.some-label": "some-value",
+ "com.example.some-other-label": "some-other-value",
+ }
+ ],
+ ),
+ ]
+ scope: Annotated[
+ Scope,
+ Field(
+ alias="Scope",
+ description="The level at which the volume exists. Either `global` for cluster-wide,\nor `local` for machine level.\n",
+ examples=["local"],
+ ),
+ ]
+ options: Annotated[
+ dict[str, str],
+ Field(
+ alias="Options",
+ description="The driver specific options used when creating the volume.\n",
+ examples=[{"device": "tmpfs", "o": "size=100m,uid=1000", "type": "tmpfs"}],
+ ),
+ ]
+ usage_data: Annotated[
+ UsageData | None,
+ Field(
+ alias="UsageData",
+ description="Usage details about the volume. This information is used by the\n`GET /system/df` endpoint, and omitted in other endpoints.\n",
+ ),
+ ] = None
class VolumeConfig(BaseModel):
@@ -1531,35 +1829,43 @@ class VolumeConfig(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(
- default=None,
- alias="Name",
- description="The new volume's name. If not specified, Docker generates a name.\n",
- examples=["tardis"],
- )
- driver: str | None = Field(
- default="local",
- alias="Driver",
- description="Name of the volume driver to use.",
- examples=["custom"],
- )
- driver_opts: dict[str, str] | None = Field(
- default=None,
- alias="DriverOpts",
- description="A mapping of driver options and values. These options are\npassed directly to the driver and are driver specific.\n",
- examples=[{"device": "tmpfs", "o": "size=100m,uid=1000", "type": "tmpfs"}],
- )
- labels: dict[str, str] | None = Field(
- default=None,
- alias="Labels",
- description="User-defined key/value metadata.",
- examples=[
- {
- "com.example.some-label": "some-value",
- "com.example.some-other-label": "some-other-value",
- }
- ],
- )
+ name: Annotated[
+ str | None,
+ Field(
+ alias="Name",
+ description="The new volume's name. If not specified, Docker generates a name.\n",
+ examples=["tardis"],
+ ),
+ ] = None
+ driver: Annotated[
+ str | None,
+ Field(
+ alias="Driver",
+ description="Name of the volume driver to use.",
+ examples=["custom"],
+ ),
+ ] = "local"
+ driver_opts: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="DriverOpts",
+ description="A mapping of driver options and values. These options are\npassed directly to the driver and are driver specific.\n",
+ examples=[{"device": "tmpfs", "o": "size=100m,uid=1000", "type": "tmpfs"}],
+ ),
+ ] = None
+ labels: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="Labels",
+ description="User-defined key/value metadata.",
+ examples=[
+ {
+ "com.example.some-label": "some-value",
+ "com.example.some-other-label": "some-other-value",
+ }
+ ],
+ ),
+ ] = None
class VolumeListResponse(BaseModel):
@@ -1570,15 +1876,17 @@ class VolumeListResponse(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- volumes: list[Volume] | None = Field(
- default=None, alias="Volumes", description="List of volumes"
- )
- warnings: list[str] | None = Field(
- default=None,
- alias="Warnings",
- description="Warnings that occurred when fetching the list of volumes.\n",
- examples=[[]],
- )
+ volumes: Annotated[
+ list[Volume] | None, Field(alias="Volumes", description="List of volumes")
+ ] = None
+ warnings: Annotated[
+ list[str] | None,
+ Field(
+ alias="Warnings",
+ description="Warnings that occurred when fetching the list of volumes.\n",
+ examples=[[]],
+ ),
+ ] = None
class ConfigReference(BaseModel):
@@ -1591,47 +1899,57 @@ class ConfigReference(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- network: str | None = Field(
- default=None,
- alias="Network",
- description="The name of the config-only network that provides the network's\nconfiguration. The specified network must be an existing config-only\nnetwork. Only network names are allowed, not network IDs.\n",
- examples=["config_only_network_01"],
- )
+ network: Annotated[
+ str | None,
+ Field(
+ alias="Network",
+ description="The name of the config-only network that provides the network's\nconfiguration. The specified network must be an existing config-only\nnetwork. Only network names are allowed, not network IDs.\n",
+ examples=["config_only_network_01"],
+ ),
+ ] = None
class IPAMConfig(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- subnet: str | None = Field(default=None, alias="Subnet", examples=["172.20.0.0/16"])
- ip_range: str | None = Field(
- default=None, alias="IPRange", examples=["172.20.10.0/24"]
- )
- gateway: str | None = Field(
- default=None, alias="Gateway", examples=["172.20.10.11"]
- )
- auxiliary_addresses: dict[str, str] | None = Field(
- default=None, alias="AuxiliaryAddresses"
- )
+ subnet: Annotated[
+ str | None, Field(alias="Subnet", examples=["172.20.0.0/16"])
+ ] = None
+ ip_range: Annotated[
+ str | None, Field(alias="IPRange", examples=["172.20.10.0/24"])
+ ] = None
+ gateway: Annotated[
+ str | None, Field(alias="Gateway", examples=["172.20.10.11"])
+ ] = None
+ auxiliary_addresses: Annotated[
+ dict[str, str] | None, Field(alias="AuxiliaryAddresses")
+ ] = None
class NetworkContainer(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(default=None, alias="Name", examples=["container_1"])
- endpoint_id: str | None = Field(
- default=None,
- alias="EndpointID",
- examples=["628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a"],
- )
- mac_address: str | None = Field(
- default=None, alias="MacAddress", examples=["02:42:ac:13:00:02"]
- )
- i_pv4_address: str | None = Field(
- default=None, alias="IPv4Address", examples=["172.19.0.2/16"]
- )
- i_pv6_address: str | None = Field(default=None, alias="IPv6Address", examples=[""])
+ name: Annotated[str | None, Field(alias="Name", examples=["container_1"])] = None
+ endpoint_id: Annotated[
+ str | None,
+ Field(
+ alias="EndpointID",
+ examples=[
+ "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a"
+ ],
+ ),
+ ] = None
+ mac_address: Annotated[
+ str | None, Field(alias="MacAddress", examples=["02:42:ac:13:00:02"])
+ ] = None
+ i_pv4_address: Annotated[
+ str | None, Field(alias="IPv4Address", examples=["172.19.0.2/16"])
+ ] = None
+ i_pv6_address: Annotated[
+ str | None, Field(alias="IPv6Address", examples=[""])
+ ] = None
class PeerInfo(BaseModel):
@@ -1643,18 +1961,22 @@ class PeerInfo(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(
- default=None,
- alias="Name",
- description="ID of the peer-node in the Swarm cluster.",
- examples=["6869d7c1732b"],
- )
- ip: str | None = Field(
- default=None,
- alias="IP",
- description="IP-address of the peer-node in the Swarm cluster.",
- examples=["10.133.77.91"],
- )
+ name: Annotated[
+ str | None,
+ Field(
+ alias="Name",
+ description="ID of the peer-node in the Swarm cluster.",
+ examples=["6869d7c1732b"],
+ ),
+ ] = None
+ ip: Annotated[
+ str | None,
+ Field(
+ alias="IP",
+ description="IP-address of the peer-node in the Swarm cluster.",
+ examples=["10.133.77.91"],
+ ),
+ ] = None
class Type4(str, Enum):
@@ -1680,63 +2002,77 @@ class BuildCache(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- id: str | None = Field(
- default=None,
- alias="ID",
- description="Unique ID of the build cache record.\n",
- examples=["ndlpt0hhvkqcdfkputsk4cq9c"],
- )
- parent: str | None = Field(
- default=None,
- alias="Parent",
- description="ID of the parent build cache record.\n",
- examples=["hw53o5aio51xtltp5xjp8v7fx"],
- )
- type: Type4 | None = Field(
- default=None,
- alias="Type",
- description="Cache record type.\n",
- examples=["regular"],
- )
- description: str | None = Field(
- default=None,
- alias="Description",
- description="Description of the build-step that produced the build cache.\n",
- examples=[
- "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache"
- ],
- )
- in_use: bool | None = Field(
- default=None,
- alias="InUse",
- description="Indicates if the build cache is in use.\n",
- examples=[False],
- )
- shared: bool | None = Field(
- default=None,
- alias="Shared",
- description="Indicates if the build cache is shared.\n",
- examples=[True],
- )
- size: int | None = Field(
- default=None,
- alias="Size",
- description="Amount of disk space used by the build cache (in bytes).\n",
- examples=[51],
- )
- created_at: str | None = Field(
- default=None,
- alias="CreatedAt",
- description="Date and time at which the build cache was created in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
- examples=["2016-08-18T10:44:24.496525531Z"],
- )
- last_used_at: str | None = Field(
- default=None,
- alias="LastUsedAt",
- description="Date and time at which the build cache was last used in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
- examples=["2017-08-09T07:09:37.632105588Z"],
- )
- usage_count: int | None = Field(default=None, alias="UsageCount", examples=[26])
+ id: Annotated[
+ str | None,
+ Field(
+ alias="ID",
+ description="Unique ID of the build cache record.\n",
+ examples=["ndlpt0hhvkqcdfkputsk4cq9c"],
+ ),
+ ] = None
+ parent: Annotated[
+ str | None,
+ Field(
+ alias="Parent",
+ description="ID of the parent build cache record.\n",
+ examples=["hw53o5aio51xtltp5xjp8v7fx"],
+ ),
+ ] = None
+ type: Annotated[
+ Type4 | None,
+ Field(alias="Type", description="Cache record type.\n", examples=["regular"]),
+ ] = None
+ description: Annotated[
+ str | None,
+ Field(
+ alias="Description",
+ description="Description of the build-step that produced the build cache.\n",
+ examples=[
+ "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache"
+ ],
+ ),
+ ] = None
+ in_use: Annotated[
+ bool | None,
+ Field(
+ alias="InUse",
+ description="Indicates if the build cache is in use.\n",
+ examples=[False],
+ ),
+ ] = None
+ shared: Annotated[
+ bool | None,
+ Field(
+ alias="Shared",
+ description="Indicates if the build cache is shared.\n",
+ examples=[True],
+ ),
+ ] = None
+ size: Annotated[
+ int | None,
+ Field(
+ alias="Size",
+ description="Amount of disk space used by the build cache (in bytes).\n",
+ examples=[51],
+ ),
+ ] = None
+ created_at: Annotated[
+ str | None,
+ Field(
+ alias="CreatedAt",
+ description="Date and time at which the build cache was created in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
+ examples=["2016-08-18T10:44:24.496525531Z"],
+ ),
+ ] = None
+ last_used_at: Annotated[
+ str | None,
+ Field(
+ alias="LastUsedAt",
+ description="Date and time at which the build cache was last used in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
+ examples=["2017-08-09T07:09:37.632105588Z"],
+ ),
+ ] = None
+ usage_count: Annotated[int | None, Field(alias="UsageCount", examples=[26])] = None
class ImageID(BaseModel):
@@ -1747,7 +2083,7 @@ class ImageID(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- id: str | None = Field(default=None, alias="ID")
+ id: Annotated[str | None, Field(alias="ID")] = None
class ErrorDetail(BaseModel):
@@ -1774,7 +2110,7 @@ class ErrorResponse(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- message: str = Field(..., description="The error message.")
+ message: Annotated[str, Field(description="The error message.")]
class IdResponse(BaseModel):
@@ -1785,7 +2121,9 @@ class IdResponse(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- id: str = Field(..., alias="Id", description="The id of the newly created object.")
+ id: Annotated[
+ str, Field(alias="Id", description="The id of the newly created object.")
+ ]
class EndpointIPAMConfig(BaseModel):
@@ -1797,61 +2135,63 @@ class EndpointIPAMConfig(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- i_pv4_address: str | None = Field(
- default=None, alias="IPv4Address", examples=["172.20.30.33"]
- )
- i_pv6_address: str | None = Field(
- default=None, alias="IPv6Address", examples=["2001:db8:abcd::3033"]
- )
- link_local_i_ps: list[str] | None = Field(
- default=None, alias="LinkLocalIPs", examples=[["169.254.34.68", "fe80::3468"]]
- )
+ i_pv4_address: Annotated[
+ str | None, Field(alias="IPv4Address", examples=["172.20.30.33"])
+ ] = None
+ i_pv6_address: Annotated[
+ str | None, Field(alias="IPv6Address", examples=["2001:db8:abcd::3033"])
+ ] = None
+ link_local_i_ps: Annotated[
+ list[str] | None,
+ Field(alias="LinkLocalIPs", examples=[["169.254.34.68", "fe80::3468"]]),
+ ] = None
class PluginMount(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str = Field(..., alias="Name", examples=["some-mount"])
- description: str = Field(
- ...,
- alias="Description",
- examples=["This is a mount that's used by the plugin."],
- )
- settable: list[str] = Field(..., alias="Settable")
- source: str = Field(..., alias="Source", examples=["/var/lib/docker/plugins/"])
- destination: str = Field(..., alias="Destination", examples=["/mnt/state"])
- type: str = Field(..., alias="Type", examples=["bind"])
- options: list[str] = Field(..., alias="Options", examples=[["rbind", "rw"]])
+ name: Annotated[str, Field(alias="Name", examples=["some-mount"])]
+ description: Annotated[
+ str,
+ Field(
+ alias="Description", examples=["This is a mount that's used by the plugin."]
+ ),
+ ]
+ settable: Annotated[list[str], Field(alias="Settable")]
+ source: Annotated[str, Field(alias="Source", examples=["/var/lib/docker/plugins/"])]
+ destination: Annotated[str, Field(alias="Destination", examples=["/mnt/state"])]
+ type: Annotated[str, Field(alias="Type", examples=["bind"])]
+ options: Annotated[list[str], Field(alias="Options", examples=[["rbind", "rw"]])]
class PluginDevice(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str = Field(..., alias="Name")
- description: str = Field(..., alias="Description")
- settable: list[str] = Field(..., alias="Settable")
- path: str = Field(..., alias="Path", examples=["/dev/fuse"])
+ name: Annotated[str, Field(alias="Name")]
+ description: Annotated[str, Field(alias="Description")]
+ settable: Annotated[list[str], Field(alias="Settable")]
+ path: Annotated[str, Field(alias="Path", examples=["/dev/fuse"])]
class PluginEnv(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str = Field(..., alias="Name")
- description: str = Field(..., alias="Description")
- settable: list[str] = Field(..., alias="Settable")
- value: str = Field(..., alias="Value")
+ name: Annotated[str, Field(alias="Name")]
+ description: Annotated[str, Field(alias="Description")]
+ settable: Annotated[list[str], Field(alias="Settable")]
+ value: Annotated[str, Field(alias="Value")]
class PluginInterfaceType(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- prefix: str = Field(..., alias="Prefix")
- capability: str = Field(..., alias="Capability")
- version: str = Field(..., alias="Version")
+ prefix: Annotated[str, Field(alias="Prefix")]
+ capability: Annotated[str, Field(alias="Capability")]
+ version: Annotated[str, Field(alias="Version")]
class PluginPrivilege(BaseModel):
@@ -1864,9 +2204,9 @@ class PluginPrivilege(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(default=None, alias="Name", examples=["network"])
- description: str | None = Field(default=None, alias="Description")
- value: list[str] | None = Field(default=None, alias="Value", examples=[["host"]])
+ name: Annotated[str | None, Field(alias="Name", examples=["network"])] = None
+ description: Annotated[str | None, Field(alias="Description")] = None
+ value: Annotated[list[str] | None, Field(alias="Value", examples=[["host"]])] = None
class Settings(BaseModel):
@@ -1877,10 +2217,10 @@ class Settings(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- mounts: list[PluginMount] = Field(..., alias="Mounts")
- env: list[str] = Field(..., alias="Env", examples=[["DEBUG=0"]])
- args: list[str] = Field(..., alias="Args")
- devices: list[PluginDevice] = Field(..., alias="Devices")
+ mounts: Annotated[list[PluginMount], Field(alias="Mounts")]
+ env: Annotated[list[str], Field(alias="Env", examples=[["DEBUG=0"]])]
+ args: Annotated[list[str], Field(alias="Args")]
+ devices: Annotated[list[PluginDevice], Field(alias="Devices")]
class ProtocolScheme(str, Enum):
@@ -1900,70 +2240,76 @@ class Interface(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- types: list[PluginInterfaceType] = Field(
- ..., alias="Types", examples=[["docker.volumedriver/1.0"]]
- )
- socket: str = Field(..., alias="Socket", examples=["plugins.sock"])
- protocol_scheme: ProtocolScheme | None = Field(
- default=None,
- alias="ProtocolScheme",
- description="Protocol to use for clients connecting to the plugin.",
- examples=["some.protocol/v1.0"],
- )
+ types: Annotated[
+ list[PluginInterfaceType],
+ Field(alias="Types", examples=[["docker.volumedriver/1.0"]]),
+ ]
+ socket: Annotated[str, Field(alias="Socket", examples=["plugins.sock"])]
+ protocol_scheme: Annotated[
+ ProtocolScheme | None,
+ Field(
+ alias="ProtocolScheme",
+ description="Protocol to use for clients connecting to the plugin.",
+ examples=["some.protocol/v1.0"],
+ ),
+ ] = None
class User(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- uid: int | None = Field(default=None, alias="UID", examples=[1000])
- gid: int | None = Field(default=None, alias="GID", examples=[1000])
+ uid: Annotated[int | None, Field(alias="UID", examples=[1000])] = None
+ gid: Annotated[int | None, Field(alias="GID", examples=[1000])] = None
class Network1(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- type: str = Field(..., alias="Type", examples=["host"])
+ type: Annotated[str, Field(alias="Type", examples=["host"])]
class Linux(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- capabilities: list[str] = Field(
- ..., alias="Capabilities", examples=[["CAP_SYS_ADMIN", "CAP_SYSLOG"]]
- )
- allow_all_devices: bool = Field(..., alias="AllowAllDevices", examples=[False])
- devices: list[PluginDevice] = Field(..., alias="Devices")
+ capabilities: Annotated[
+ list[str],
+ Field(alias="Capabilities", examples=[["CAP_SYS_ADMIN", "CAP_SYSLOG"]]),
+ ]
+ allow_all_devices: Annotated[bool, Field(alias="AllowAllDevices", examples=[False])]
+ devices: Annotated[list[PluginDevice], Field(alias="Devices")]
class Args(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str = Field(..., alias="Name", examples=["args"])
- description: str = Field(
- ..., alias="Description", examples=["command line arguments"]
- )
- settable: list[str] = Field(..., alias="Settable")
- value: list[str] = Field(..., alias="Value")
+ name: Annotated[str, Field(alias="Name", examples=["args"])]
+ description: Annotated[
+ str, Field(alias="Description", examples=["command line arguments"])
+ ]
+ settable: Annotated[list[str], Field(alias="Settable")]
+ value: Annotated[list[str], Field(alias="Value")]
class Rootfs(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- type: str | None = Field(default=None, examples=["layers"])
- diff_ids: list[str] | None = Field(
- default=None,
- examples=[
- [
- "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887",
- "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8",
+ type: Annotated[str | None, Field(examples=["layers"])] = None
+ diff_ids: Annotated[
+ list[str] | None,
+ Field(
+ examples=[
+ [
+ "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887",
+ "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8",
+ ]
]
- ],
- )
+ ),
+ ] = None
class Config(BaseModel):
@@ -1974,53 +2320,63 @@ class Config(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- docker_version: str | None = Field(
- default=None,
- alias="DockerVersion",
- description="Docker Version used to create the plugin",
- examples=["17.06.0-ce"],
- )
- description: str = Field(
- ..., alias="Description", examples=["A sample volume plugin for Docker"]
- )
- documentation: str = Field(
- ...,
- alias="Documentation",
- examples=["https://docs.docker.com/engine/extend/plugins/"],
- )
- interface: Interface = Field(
- ...,
- alias="Interface",
- description="The interface between Docker and the plugin",
- )
- entrypoint: list[str] = Field(
- ..., alias="Entrypoint", examples=[["/usr/bin/sample-volume-plugin", "/data"]]
- )
- work_dir: str = Field(..., alias="WorkDir", examples=["/bin/"])
- user: User | None = Field(default=None, alias="User")
- network: Network1 = Field(..., alias="Network")
- linux: Linux = Field(..., alias="Linux")
- propagated_mount: str = Field(
- ..., alias="PropagatedMount", examples=["/mnt/volumes"]
- )
- ipc_host: bool = Field(..., alias="IpcHost", examples=[False])
- pid_host: bool = Field(..., alias="PidHost", examples=[False])
- mounts: list[PluginMount] = Field(..., alias="Mounts")
- env: list[PluginEnv] = Field(
- ...,
- alias="Env",
- examples=[
- [
- {
- "Name": "DEBUG",
- "Description": "If set, prints debug messages",
- "Settable": None,
- "Value": "0",
- }
- ]
- ],
- )
- args: Args = Field(..., alias="Args")
+ docker_version: Annotated[
+ str | None,
+ Field(
+ alias="DockerVersion",
+ description="Docker Version used to create the plugin",
+ examples=["17.06.0-ce"],
+ ),
+ ] = None
+ description: Annotated[
+ str, Field(alias="Description", examples=["A sample volume plugin for Docker"])
+ ]
+ documentation: Annotated[
+ str,
+ Field(
+ alias="Documentation",
+ examples=["https://docs.docker.com/engine/extend/plugins/"],
+ ),
+ ]
+ interface: Annotated[
+ Interface,
+ Field(
+ alias="Interface", description="The interface between Docker and the plugin"
+ ),
+ ]
+ entrypoint: Annotated[
+ list[str],
+ Field(
+ alias="Entrypoint", examples=[["/usr/bin/sample-volume-plugin", "/data"]]
+ ),
+ ]
+ work_dir: Annotated[str, Field(alias="WorkDir", examples=["/bin/"])]
+ user: Annotated[User | None, Field(alias="User")] = None
+ network: Annotated[Network1, Field(alias="Network")]
+ linux: Annotated[Linux, Field(alias="Linux")]
+ propagated_mount: Annotated[
+ str, Field(alias="PropagatedMount", examples=["/mnt/volumes"])
+ ]
+ ipc_host: Annotated[bool, Field(alias="IpcHost", examples=[False])]
+ pid_host: Annotated[bool, Field(alias="PidHost", examples=[False])]
+ mounts: Annotated[list[PluginMount], Field(alias="Mounts")]
+ env: Annotated[
+ list[PluginEnv],
+ Field(
+ alias="Env",
+ examples=[
+ [
+ {
+ "Name": "DEBUG",
+ "Description": "If set, prints debug messages",
+ "Settable": None,
+ "Value": "0",
+ }
+ ]
+ ],
+ ),
+ ]
+ args: Annotated[Args, Field(alias="Args")]
rootfs: Rootfs | None = None
@@ -2032,28 +2388,41 @@ class Plugin(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- id: str | None = Field(
- default=None,
- alias="Id",
- examples=["5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078"],
- )
- name: str = Field(..., alias="Name", examples=["tiborvass/sample-volume-plugin"])
- enabled: bool = Field(
- ...,
- alias="Enabled",
- description="True if the plugin is running. False if the plugin is not running, only installed.",
- examples=[True],
- )
- settings: Settings = Field(
- ..., alias="Settings", description="Settings that can be modified by users."
- )
- plugin_reference: str | None = Field(
- default=None,
- alias="PluginReference",
- description="plugin remote reference used to push/pull the plugin",
- examples=["localhost:5000/tiborvass/sample-volume-plugin:latest"],
- )
- config: Config = Field(..., alias="Config", description="The config of a plugin.")
+ id: Annotated[
+ str | None,
+ Field(
+ alias="Id",
+ examples=[
+ "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078"
+ ],
+ ),
+ ] = None
+ name: Annotated[
+ str, Field(alias="Name", examples=["tiborvass/sample-volume-plugin"])
+ ]
+ enabled: Annotated[
+ bool,
+ Field(
+ alias="Enabled",
+ description="True if the plugin is running. False if the plugin is not running, only installed.",
+ examples=[True],
+ ),
+ ]
+ settings: Annotated[
+ Settings,
+ Field(alias="Settings", description="Settings that can be modified by users."),
+ ]
+ plugin_reference: Annotated[
+ str | None,
+ Field(
+ alias="PluginReference",
+ description="plugin remote reference used to push/pull the plugin",
+ examples=["localhost:5000/tiborvass/sample-volume-plugin:latest"],
+ ),
+ ] = None
+ config: Annotated[
+ Config, Field(alias="Config", description="The config of a plugin.")
+ ]
class ObjectVersion(BaseModel):
@@ -2074,7 +2443,7 @@ class ObjectVersion(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- index: int | None = Field(default=None, alias="Index", examples=[373531])
+ index: Annotated[int | None, Field(alias="Index", examples=[373531])] = None
class Role(str, Enum):
@@ -2100,27 +2469,26 @@ class NodeSpec(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(
- default=None,
- alias="Name",
- description="Name for the node.",
- examples=["my-node"],
- )
- labels: dict[str, str] | None = Field(
- default=None, alias="Labels", description="User-defined key/value metadata."
- )
- role: Role | None = Field(
- default=None,
- alias="Role",
- description="Role of the node.",
- examples=["manager"],
- )
- availability: Availability | None = Field(
- default=None,
- alias="Availability",
- description="Availability of the node.",
- examples=["active"],
- )
+ name: Annotated[
+ str | None,
+ Field(alias="Name", description="Name for the node.", examples=["my-node"]),
+ ] = None
+ labels: Annotated[
+ dict[str, str] | None,
+ Field(alias="Labels", description="User-defined key/value metadata."),
+ ] = None
+ role: Annotated[
+ Role | None,
+ Field(alias="Role", description="Role of the node.", examples=["manager"]),
+ ] = None
+ availability: Annotated[
+ Availability | None,
+ Field(
+ alias="Availability",
+ description="Availability of the node.",
+ examples=["active"],
+ ),
+ ] = None
class Platform(BaseModel):
@@ -2132,26 +2500,30 @@ class Platform(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- architecture: str | None = Field(
- default=None,
- alias="Architecture",
- description="Architecture represents the hardware architecture (for example,\n`x86_64`).\n",
- examples=["x86_64"],
- )
- os: str | None = Field(
- default=None,
- alias="OS",
- description="OS represents the Operating System (for example, `linux` or `windows`).\n",
- examples=["linux"],
- )
+ architecture: Annotated[
+ str | None,
+ Field(
+ alias="Architecture",
+ description="Architecture represents the hardware architecture (for example,\n`x86_64`).\n",
+ examples=["x86_64"],
+ ),
+ ] = None
+ os: Annotated[
+ str | None,
+ Field(
+ alias="OS",
+ description="OS represents the Operating System (for example, `linux` or `windows`).\n",
+ examples=["linux"],
+ ),
+ ] = None
class Plugin1(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- type: str | None = Field(default=None, alias="Type")
- name: str | None = Field(default=None, alias="Name")
+ type: Annotated[str | None, Field(alias="Type")] = None
+ name: Annotated[str | None, Field(alias="Name")] = None
class EngineDescription(BaseModel):
@@ -2162,37 +2534,39 @@ class EngineDescription(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- engine_version: str | None = Field(
- default=None, alias="EngineVersion", examples=["17.06.0"]
- )
- labels: dict[str, str] | None = Field(
- default=None, alias="Labels", examples=[{"foo": "bar"}]
- )
- plugins: list[Plugin1] | None = Field(
- default=None,
- alias="Plugins",
- examples=[
- [
- {"Type": "Log", "Name": "awslogs"},
- {"Type": "Log", "Name": "fluentd"},
- {"Type": "Log", "Name": "gcplogs"},
- {"Type": "Log", "Name": "gelf"},
- {"Type": "Log", "Name": "journald"},
- {"Type": "Log", "Name": "json-file"},
- {"Type": "Log", "Name": "splunk"},
- {"Type": "Log", "Name": "syslog"},
- {"Type": "Network", "Name": "bridge"},
- {"Type": "Network", "Name": "host"},
- {"Type": "Network", "Name": "ipvlan"},
- {"Type": "Network", "Name": "macvlan"},
- {"Type": "Network", "Name": "null"},
- {"Type": "Network", "Name": "overlay"},
- {"Type": "Volume", "Name": "local"},
- {"Type": "Volume", "Name": "localhost:5000/vieux/sshfs:latest"},
- {"Type": "Volume", "Name": "vieux/sshfs:latest"},
- ]
- ],
- )
+ engine_version: Annotated[
+ str | None, Field(alias="EngineVersion", examples=["17.06.0"])
+ ] = None
+ labels: Annotated[
+ dict[str, str] | None, Field(alias="Labels", examples=[{"foo": "bar"}])
+ ] = None
+ plugins: Annotated[
+ list[Plugin1] | None,
+ Field(
+ alias="Plugins",
+ examples=[
+ [
+ {"Type": "Log", "Name": "awslogs"},
+ {"Type": "Log", "Name": "fluentd"},
+ {"Type": "Log", "Name": "gcplogs"},
+ {"Type": "Log", "Name": "gelf"},
+ {"Type": "Log", "Name": "journald"},
+ {"Type": "Log", "Name": "json-file"},
+ {"Type": "Log", "Name": "splunk"},
+ {"Type": "Log", "Name": "syslog"},
+ {"Type": "Network", "Name": "bridge"},
+ {"Type": "Network", "Name": "host"},
+ {"Type": "Network", "Name": "ipvlan"},
+ {"Type": "Network", "Name": "macvlan"},
+ {"Type": "Network", "Name": "null"},
+ {"Type": "Network", "Name": "overlay"},
+ {"Type": "Volume", "Name": "local"},
+ {"Type": "Volume", "Name": "localhost:5000/vieux/sshfs:latest"},
+ {"Type": "Volume", "Name": "vieux/sshfs:latest"},
+ ]
+ ],
+ ),
+ ] = None
class TLSInfo(BaseModel):
@@ -2205,21 +2579,27 @@ class TLSInfo(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- trust_root: str | None = Field(
- default=None,
- alias="TrustRoot",
- description="The root CA certificate(s) that are used to validate leaf TLS\ncertificates.\n",
- )
- cert_issuer_subject: str | None = Field(
- default=None,
- alias="CertIssuerSubject",
- description="The base64-url-safe-encoded raw subject bytes of the issuer.",
- )
- cert_issuer_public_key: str | None = Field(
- default=None,
- alias="CertIssuerPublicKey",
- description="The base64-url-safe-encoded raw public key bytes of the issuer.\n",
- )
+ trust_root: Annotated[
+ str | None,
+ Field(
+ alias="TrustRoot",
+ description="The root CA certificate(s) that are used to validate leaf TLS\ncertificates.\n",
+ ),
+ ] = None
+ cert_issuer_subject: Annotated[
+ str | None,
+ Field(
+ alias="CertIssuerSubject",
+ description="The base64-url-safe-encoded raw subject bytes of the issuer.",
+ ),
+ ] = None
+ cert_issuer_public_key: Annotated[
+ str | None,
+ Field(
+ alias="CertIssuerPublicKey",
+ description="The base64-url-safe-encoded raw public key bytes of the issuer.\n",
+ ),
+ ] = None
class NodeState(str, Enum):
@@ -2251,12 +2631,14 @@ class Orchestration(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- task_history_retention_limit: int | None = Field(
- default=None,
- alias="TaskHistoryRetentionLimit",
- description="The number of historic tasks to keep per instance or node. If\nnegative, never remove completed or failed tasks.\n",
- examples=[10],
- )
+ task_history_retention_limit: Annotated[
+ int | None,
+ Field(
+ alias="TaskHistoryRetentionLimit",
+ description="The number of historic tasks to keep per instance or node. If\nnegative, never remove completed or failed tasks.\n",
+ examples=[10],
+ ),
+ ] = None
class Raft(BaseModel):
@@ -2267,35 +2649,45 @@ class Raft(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- snapshot_interval: int | None = Field(
- default=None,
- alias="SnapshotInterval",
- description="The number of log entries between snapshots.",
- examples=[10000],
- )
- keep_old_snapshots: int | None = Field(
- default=None,
- alias="KeepOldSnapshots",
- description="The number of snapshots to keep beyond the current snapshot.\n",
- )
- log_entries_for_slow_followers: int | None = Field(
- default=None,
- alias="LogEntriesForSlowFollowers",
- description="The number of log entries to keep around to sync up slow followers\nafter a snapshot is created.\n",
- examples=[500],
- )
- election_tick: int | None = Field(
- default=None,
- alias="ElectionTick",
- description="The number of ticks that a follower will wait for a message from\nthe leader before becoming a candidate and starting an election.\n`ElectionTick` must be greater than `HeartbeatTick`.\n\nA tick currently defaults to one second, so these translate\ndirectly to seconds currently, but this is NOT guaranteed.\n",
- examples=[3],
- )
- heartbeat_tick: int | None = Field(
- default=None,
- alias="HeartbeatTick",
- description="The number of ticks between heartbeats. Every HeartbeatTick ticks,\nthe leader will send a heartbeat to the followers.\n\nA tick currently defaults to one second, so these translate\ndirectly to seconds currently, but this is NOT guaranteed.\n",
- examples=[1],
- )
+ snapshot_interval: Annotated[
+ int | None,
+ Field(
+ alias="SnapshotInterval",
+ description="The number of log entries between snapshots.",
+ examples=[10000],
+ ),
+ ] = None
+ keep_old_snapshots: Annotated[
+ int | None,
+ Field(
+ alias="KeepOldSnapshots",
+ description="The number of snapshots to keep beyond the current snapshot.\n",
+ ),
+ ] = None
+ log_entries_for_slow_followers: Annotated[
+ int | None,
+ Field(
+ alias="LogEntriesForSlowFollowers",
+ description="The number of log entries to keep around to sync up slow followers\nafter a snapshot is created.\n",
+ examples=[500],
+ ),
+ ] = None
+ election_tick: Annotated[
+ int | None,
+ Field(
+ alias="ElectionTick",
+ description="The number of ticks that a follower will wait for a message from\nthe leader before becoming a candidate and starting an election.\n`ElectionTick` must be greater than `HeartbeatTick`.\n\nA tick currently defaults to one second, so these translate\ndirectly to seconds currently, but this is NOT guaranteed.\n",
+ examples=[3],
+ ),
+ ] = None
+ heartbeat_tick: Annotated[
+ int | None,
+ Field(
+ alias="HeartbeatTick",
+ description="The number of ticks between heartbeats. Every HeartbeatTick ticks,\nthe leader will send a heartbeat to the followers.\n\nA tick currently defaults to one second, so these translate\ndirectly to seconds currently, but this is NOT guaranteed.\n",
+ examples=[1],
+ ),
+ ] = None
class Dispatcher(BaseModel):
@@ -2306,12 +2698,14 @@ class Dispatcher(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- heartbeat_period: int | None = Field(
- default=None,
- alias="HeartbeatPeriod",
- description="The delay for an agent to send a heartbeat to the dispatcher.\n",
- examples=[5000000000],
- )
+ heartbeat_period: Annotated[
+ int | None,
+ Field(
+ alias="HeartbeatPeriod",
+ description="The delay for an agent to send a heartbeat to the dispatcher.\n",
+ examples=[5000000000],
+ ),
+ ] = None
class Protocol(str, Enum):
@@ -2328,26 +2722,34 @@ class ExternalCA(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- protocol: Protocol | None = Field(
- default=Protocol.cfssl,
- alias="Protocol",
- description="Protocol for communication with the external CA (currently\nonly `cfssl` is supported).\n",
- )
- url: str | None = Field(
- default=None,
- alias="URL",
- description="URL where certificate signing requests should be sent.\n",
- )
- options: dict[str, str] | None = Field(
- default=None,
- alias="Options",
- description="An object with key/value pairs that are interpreted as\nprotocol-specific options for the external CA driver.\n",
- )
- ca_cert: str | None = Field(
- default=None,
- alias="CACert",
- description="The root CA certificate (in PEM format) this external CA uses\nto issue TLS certificates (assumed to be to the current swarm\nroot CA certificate if not provided).\n",
- )
+ protocol: Annotated[
+ Protocol | None,
+ Field(
+ alias="Protocol",
+ description="Protocol for communication with the external CA (currently\nonly `cfssl` is supported).\n",
+ ),
+ ] = Protocol.cfssl
+ url: Annotated[
+ str | None,
+ Field(
+ alias="URL",
+ description="URL where certificate signing requests should be sent.\n",
+ ),
+ ] = None
+ options: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="Options",
+ description="An object with key/value pairs that are interpreted as\nprotocol-specific options for the external CA driver.\n",
+ ),
+ ] = None
+ ca_cert: Annotated[
+ str | None,
+ Field(
+ alias="CACert",
+ description="The root CA certificate (in PEM format) this external CA uses\nto issue TLS certificates (assumed to be to the current swarm\nroot CA certificate if not provided).\n",
+ ),
+ ] = None
class CaConfig(BaseModel):
@@ -2358,32 +2760,42 @@ class CaConfig(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- node_cert_expiry: int | None = Field(
- default=None,
- alias="NodeCertExpiry",
- description="The duration node certificates are issued for.",
- examples=[7776000000000000],
- )
- external_c_as: list[ExternalCA] | None = Field(
- default=None,
- alias="ExternalCAs",
- description="Configuration for forwarding signing requests to an external\ncertificate authority.\n",
- )
- signing_ca_cert: str | None = Field(
- default=None,
- alias="SigningCACert",
- description="The desired signing CA certificate for all swarm node TLS leaf\ncertificates, in PEM format.\n",
- )
- signing_ca_key: str | None = Field(
- default=None,
- alias="SigningCAKey",
- description="The desired signing CA key for all swarm node TLS leaf certificates,\nin PEM format.\n",
- )
- force_rotate: int | None = Field(
- default=None,
- alias="ForceRotate",
- description="An integer whose purpose is to force swarm to generate a new\nsigning CA certificate and key, if none have been specified in\n`SigningCACert` and `SigningCAKey`\n",
- )
+ node_cert_expiry: Annotated[
+ int | None,
+ Field(
+ alias="NodeCertExpiry",
+ description="The duration node certificates are issued for.",
+ examples=[7776000000000000],
+ ),
+ ] = None
+ external_c_as: Annotated[
+ list[ExternalCA] | None,
+ Field(
+ alias="ExternalCAs",
+ description="Configuration for forwarding signing requests to an external\ncertificate authority.\n",
+ ),
+ ] = None
+ signing_ca_cert: Annotated[
+ str | None,
+ Field(
+ alias="SigningCACert",
+ description="The desired signing CA certificate for all swarm node TLS leaf\ncertificates, in PEM format.\n",
+ ),
+ ] = None
+ signing_ca_key: Annotated[
+ str | None,
+ Field(
+ alias="SigningCAKey",
+ description="The desired signing CA key for all swarm node TLS leaf certificates,\nin PEM format.\n",
+ ),
+ ] = None
+ force_rotate: Annotated[
+ int | None,
+ Field(
+ alias="ForceRotate",
+ description="An integer whose purpose is to force swarm to generate a new\nsigning CA certificate and key, if none have been specified in\n`SigningCACert` and `SigningCAKey`\n",
+ ),
+ ] = None
class EncryptionConfig(BaseModel):
@@ -2394,12 +2806,14 @@ class EncryptionConfig(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- auto_lock_managers: bool | None = Field(
- default=None,
- alias="AutoLockManagers",
- description="If set, generate a key and use it to lock data stored on the\nmanagers.\n",
- examples=[False],
- )
+ auto_lock_managers: Annotated[
+ bool | None,
+ Field(
+ alias="AutoLockManagers",
+ description="If set, generate a key and use it to lock data stored on the\nmanagers.\n",
+ examples=[False],
+ ),
+ ] = None
class LogDriver(BaseModel):
@@ -2415,18 +2829,22 @@ class LogDriver(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(
- default=None,
- alias="Name",
- description="The log driver to use as a default for new tasks.\n",
- examples=["json-file"],
- )
- options: dict[str, str] | None = Field(
- default=None,
- alias="Options",
- description="Driver-specific options for the selected log driver, specified\nas key/value pairs.\n",
- examples=[{"max-file": "10", "max-size": "100m"}],
- )
+ name: Annotated[
+ str | None,
+ Field(
+ alias="Name",
+ description="The log driver to use as a default for new tasks.\n",
+ examples=["json-file"],
+ ),
+ ] = None
+ options: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="Options",
+ description="Driver-specific options for the selected log driver, specified\nas key/value pairs.\n",
+ examples=[{"max-file": "10", "max-size": "100m"}],
+ ),
+ ] = None
class TaskDefaults(BaseModel):
@@ -2437,11 +2855,13 @@ class TaskDefaults(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- log_driver: LogDriver | None = Field(
- default=None,
- alias="LogDriver",
- description="The log driver to use for tasks created in the orchestrator if\nunspecified by a service.\n\nUpdating this value only affects new tasks. Existing tasks continue\nto use their previously configured log driver until recreated.\n",
- )
+ log_driver: Annotated[
+ LogDriver | None,
+ Field(
+ alias="LogDriver",
+ description="The log driver to use for tasks created in the orchestrator if\nunspecified by a service.\n\nUpdating this value only affects new tasks. Existing tasks continue\nto use their previously configured log driver until recreated.\n",
+ ),
+ ] = None
class SwarmSpec(BaseModel):
@@ -2452,45 +2872,51 @@ class SwarmSpec(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(
- default=None,
- alias="Name",
- description="Name of the swarm.",
- examples=["default"],
- )
- labels: dict[str, str] | None = Field(
- default=None,
- alias="Labels",
- description="User-defined key/value metadata.",
- examples=[
- {
- "com.example.corp.type": "production",
- "com.example.corp.department": "engineering",
- }
- ],
- )
- orchestration: Orchestration | None = Field(
- default=None, alias="Orchestration", description="Orchestration configuration."
- )
- raft: Raft | None = Field(
- default=None, alias="Raft", description="Raft configuration."
- )
- dispatcher: Dispatcher | None = Field(
- default=None, alias="Dispatcher", description="Dispatcher configuration."
- )
- ca_config: CaConfig | None = Field(
- default=None, alias="CAConfig", description="CA configuration."
- )
- encryption_config: EncryptionConfig | None = Field(
- default=None,
- alias="EncryptionConfig",
- description="Parameters related to encryption-at-rest.",
- )
- task_defaults: TaskDefaults | None = Field(
- default=None,
- alias="TaskDefaults",
- description="Defaults for creating tasks in this cluster.",
- )
+ name: Annotated[
+ str | None,
+ Field(alias="Name", description="Name of the swarm.", examples=["default"]),
+ ] = None
+ labels: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="Labels",
+ description="User-defined key/value metadata.",
+ examples=[
+ {
+ "com.example.corp.type": "production",
+ "com.example.corp.department": "engineering",
+ }
+ ],
+ ),
+ ] = None
+ orchestration: Annotated[
+ Orchestration | None,
+ Field(alias="Orchestration", description="Orchestration configuration."),
+ ] = None
+ raft: Annotated[
+ Raft | None, Field(alias="Raft", description="Raft configuration.")
+ ] = None
+ dispatcher: Annotated[
+ Dispatcher | None,
+ Field(alias="Dispatcher", description="Dispatcher configuration."),
+ ] = None
+ ca_config: Annotated[
+ CaConfig | None, Field(alias="CAConfig", description="CA configuration.")
+ ] = None
+ encryption_config: Annotated[
+ EncryptionConfig | None,
+ Field(
+ alias="EncryptionConfig",
+ description="Parameters related to encryption-at-rest.",
+ ),
+ ] = None
+ task_defaults: Annotated[
+ TaskDefaults | None,
+ Field(
+ alias="TaskDefaults",
+ description="Defaults for creating tasks in this cluster.",
+ ),
+ ] = None
class ClusterInfo(BaseModel):
@@ -2503,51 +2929,65 @@ class ClusterInfo(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- id: str | None = Field(
- default=None,
- alias="ID",
- description="The ID of the swarm.",
- examples=["abajmipo7b4xz5ip2nrla6b11"],
- )
- version: ObjectVersion | None = Field(default=None, alias="Version")
- created_at: str | None = Field(
- default=None,
- alias="CreatedAt",
- description="Date and time at which the swarm was initialised in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
- examples=["2016-08-18T10:44:24.496525531Z"],
- )
- updated_at: str | None = Field(
- default=None,
- alias="UpdatedAt",
- description="Date and time at which the swarm was last updated in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
- examples=["2017-08-09T07:09:37.632105588Z"],
- )
- spec: SwarmSpec | None = Field(default=None, alias="Spec")
- tls_info: TLSInfo | None = Field(default=None, alias="TLSInfo")
- root_rotation_in_progress: bool | None = Field(
- default=None,
- alias="RootRotationInProgress",
- description="Whether there is currently a root CA rotation in progress for the swarm\n",
- examples=[False],
- )
- data_path_port: int | None = Field(
- default=4789,
- alias="DataPathPort",
- description="DataPathPort specifies the data path port number for data traffic.\nAcceptable port range is 1024 to 49151.\nIf no port is set or is set to 0, the default port (4789) is used.\n",
- examples=[4789],
- )
- default_addr_pool: list[str] | None = Field(
- default=None,
- alias="DefaultAddrPool",
- description="Default Address Pool specifies default subnet pools for global scope\nnetworks.\n",
- )
- subnet_size: int | None = Field(
- default=24,
- alias="SubnetSize",
- description="SubnetSize specifies the subnet size of the networks created from the\ndefault subnet pool.\n",
- examples=[24],
- le=29,
- )
+ id: Annotated[
+ str | None,
+ Field(
+ alias="ID",
+ description="The ID of the swarm.",
+ examples=["abajmipo7b4xz5ip2nrla6b11"],
+ ),
+ ] = None
+ version: Annotated[ObjectVersion | None, Field(alias="Version")] = None
+ created_at: Annotated[
+ str | None,
+ Field(
+ alias="CreatedAt",
+ description="Date and time at which the swarm was initialised in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
+ examples=["2016-08-18T10:44:24.496525531Z"],
+ ),
+ ] = None
+ updated_at: Annotated[
+ str | None,
+ Field(
+ alias="UpdatedAt",
+ description="Date and time at which the swarm was last updated in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
+ examples=["2017-08-09T07:09:37.632105588Z"],
+ ),
+ ] = None
+ spec: Annotated[SwarmSpec | None, Field(alias="Spec")] = None
+ tls_info: Annotated[TLSInfo | None, Field(alias="TLSInfo")] = None
+ root_rotation_in_progress: Annotated[
+ bool | None,
+ Field(
+ alias="RootRotationInProgress",
+ description="Whether there is currently a root CA rotation in progress for the swarm\n",
+ examples=[False],
+ ),
+ ] = None
+ data_path_port: Annotated[
+ int | None,
+ Field(
+ alias="DataPathPort",
+ description="DataPathPort specifies the data path port number for data traffic.\nAcceptable port range is 1024 to 49151.\nIf no port is set or is set to 0, the default port (4789) is used.\n",
+ examples=[4789],
+ ),
+ ] = 4789
+ default_addr_pool: Annotated[
+ list[str] | None,
+ Field(
+ alias="DefaultAddrPool",
+ description="Default Address Pool specifies default subnet pools for global scope\nnetworks.\n",
+ ),
+ ] = None
+ subnet_size: Annotated[
+ int | None,
+ Field(
+ alias="SubnetSize",
+ description="SubnetSize specifies the subnet size of the networks created from the\ndefault subnet pool.\n",
+ examples=[24],
+ le=29,
+ ),
+ ] = 24
class JoinTokens(BaseModel):
@@ -2559,29 +2999,33 @@ class JoinTokens(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- worker: str | None = Field(
- default=None,
- alias="Worker",
- description="The token workers can use to join the swarm.\n",
- examples=[
- "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx"
- ],
- )
- manager: str | None = Field(
- default=None,
- alias="Manager",
- description="The token managers can use to join the swarm.\n",
- examples=[
- "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
- ],
- )
+ worker: Annotated[
+ str | None,
+ Field(
+ alias="Worker",
+ description="The token workers can use to join the swarm.\n",
+ examples=[
+ "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx"
+ ],
+ ),
+ ] = None
+ manager: Annotated[
+ str | None,
+ Field(
+ alias="Manager",
+ description="The token managers can use to join the swarm.\n",
+ examples=[
+ "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
+ ],
+ ),
+ ] = None
class Swarm(ClusterInfo):
model_config = ConfigDict(
populate_by_name=True,
)
- join_tokens: JoinTokens | None = Field(default=None, alias="JoinTokens")
+ join_tokens: Annotated[JoinTokens | None, Field(alias="JoinTokens")] = None
class PluginSpec(BaseModel):
@@ -2600,20 +3044,21 @@ class PluginSpec(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(
- default=None,
- alias="Name",
- description="The name or 'alias' to use for the plugin.",
- )
- remote: str | None = Field(
- default=None, alias="Remote", description="The plugin image reference to use."
- )
- disabled: bool | None = Field(
- default=None, alias="Disabled", description="Disable the plugin once scheduled."
- )
- plugin_privilege: list[PluginPrivilege] | None = Field(
- default=None, alias="PluginPrivilege"
- )
+ name: Annotated[
+ str | None,
+ Field(alias="Name", description="The name or 'alias' to use for the plugin."),
+ ] = None
+ remote: Annotated[
+ str | None,
+ Field(alias="Remote", description="The plugin image reference to use."),
+ ] = None
+ disabled: Annotated[
+ bool | None,
+ Field(alias="Disabled", description="Disable the plugin once scheduled."),
+ ] = None
+ plugin_privilege: Annotated[
+ list[PluginPrivilege] | None, Field(alias="PluginPrivilege")
+ ] = None
class CredentialSpec(BaseModel):
@@ -2624,23 +3069,29 @@ class CredentialSpec(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- config: str | None = Field(
- default=None,
- alias="Config",
- description="Load credential spec from a Swarm Config with the given ID.\nThe specified config must also be present in the Configs\nfield with the Runtime property set.\n\n
\n\n\n> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,\n> and `CredentialSpec.Config` are mutually exclusive.\n",
- examples=["0bt9dmxjvjiqermk6xrop3ekq"],
- )
- file: str | None = Field(
- default=None,
- alias="File",
- description="Load credential spec from this file. The file is read by\nthe daemon, and must be present in the `CredentialSpecs`\nsubdirectory in the docker data directory, which defaults\nto `C:\\ProgramData\\Docker\\` on Windows.\n\nFor example, specifying `spec.json` loads\n`C:\\ProgramData\\Docker\\CredentialSpecs\\spec.json`.\n\n
\n\n> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,\n> and `CredentialSpec.Config` are mutually exclusive.\n",
- examples=["spec.json"],
- )
- registry: str | None = Field(
- default=None,
- alias="Registry",
- description="Load credential spec from this value in the Windows\nregistry. The specified registry value must be located in:\n\n`HKLM\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Virtualization\\Containers\\CredentialSpecs`\n\n
\n\n\n> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,\n> and `CredentialSpec.Config` are mutually exclusive.\n",
- )
+ config: Annotated[
+ str | None,
+ Field(
+ alias="Config",
+ description="Load credential spec from a Swarm Config with the given ID.\nThe specified config must also be present in the Configs\nfield with the Runtime property set.\n\n
\n\n\n> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,\n> and `CredentialSpec.Config` are mutually exclusive.\n",
+ examples=["0bt9dmxjvjiqermk6xrop3ekq"],
+ ),
+ ] = None
+ file: Annotated[
+ str | None,
+ Field(
+ alias="File",
+ description="Load credential spec from this file. The file is read by\nthe daemon, and must be present in the `CredentialSpecs`\nsubdirectory in the docker data directory, which defaults\nto `C:\\ProgramData\\Docker\\` on Windows.\n\nFor example, specifying `spec.json` loads\n`C:\\ProgramData\\Docker\\CredentialSpecs\\spec.json`.\n\n
\n\n> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,\n> and `CredentialSpec.Config` are mutually exclusive.\n",
+ examples=["spec.json"],
+ ),
+ ] = None
+ registry: Annotated[
+ str | None,
+ Field(
+ alias="Registry",
+ description="Load credential spec from this value in the Windows\nregistry. The specified registry value must be located in:\n\n`HKLM\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Virtualization\\Containers\\CredentialSpecs`\n\n
\n\n\n> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,\n> and `CredentialSpec.Config` are mutually exclusive.\n",
+ ),
+ ] = None
class SeLinuxContext(BaseModel):
@@ -2651,21 +3102,21 @@ class SeLinuxContext(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- disable: bool | None = Field(
- default=None, alias="Disable", description="Disable SELinux"
- )
- user: str | None = Field(
- default=None, alias="User", description="SELinux user label"
- )
- role: str | None = Field(
- default=None, alias="Role", description="SELinux role label"
- )
- type: str | None = Field(
- default=None, alias="Type", description="SELinux type label"
- )
- level: str | None = Field(
- default=None, alias="Level", description="SELinux level label"
- )
+ disable: Annotated[
+ bool | None, Field(alias="Disable", description="Disable SELinux")
+ ] = None
+ user: Annotated[
+ str | None, Field(alias="User", description="SELinux user label")
+ ] = None
+ role: Annotated[
+ str | None, Field(alias="Role", description="SELinux role label")
+ ] = None
+ type: Annotated[
+ str | None, Field(alias="Type", description="SELinux type label")
+ ] = None
+ level: Annotated[
+ str | None, Field(alias="Level", description="SELinux level label")
+ ] = None
class Privileges(BaseModel):
@@ -2676,16 +3127,17 @@ class Privileges(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- credential_spec: CredentialSpec | None = Field(
- default=None,
- alias="CredentialSpec",
- description="CredentialSpec for managed service account (Windows only)",
- )
- se_linux_context: SeLinuxContext | None = Field(
- default=None,
- alias="SELinuxContext",
- description="SELinux labels of the container",
- )
+ credential_spec: Annotated[
+ CredentialSpec | None,
+ Field(
+ alias="CredentialSpec",
+ description="CredentialSpec for managed service account (Windows only)",
+ ),
+ ] = None
+ se_linux_context: Annotated[
+ SeLinuxContext | None,
+ Field(alias="SELinuxContext", description="SELinux labels of the container"),
+ ] = None
class DnsConfig(BaseModel):
@@ -2698,19 +3150,21 @@ class DnsConfig(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- nameservers: list[str] | None = Field(
- default=None,
- alias="Nameservers",
- description="The IP addresses of the name servers.",
- )
- search: list[str] | None = Field(
- default=None, alias="Search", description="A search list for host-name lookup."
- )
- options: list[str] | None = Field(
- default=None,
- alias="Options",
- description="A list of internal resolver variables to be modified (e.g.,\n`debug`, `ndots:3`, etc.).\n",
- )
+ nameservers: Annotated[
+ list[str] | None,
+ Field(alias="Nameservers", description="The IP addresses of the name servers."),
+ ] = None
+ search: Annotated[
+ list[str] | None,
+ Field(alias="Search", description="A search list for host-name lookup."),
+ ] = None
+ options: Annotated[
+ list[str] | None,
+ Field(
+ alias="Options",
+ description="A list of internal resolver variables to be modified (e.g.,\n`debug`, `ndots:3`, etc.).\n",
+ ),
+ ] = None
class File(BaseModel):
@@ -2722,43 +3176,50 @@ class File(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(
- default=None,
- alias="Name",
- description="Name represents the final filename in the filesystem.\n",
- )
- uid: str | None = Field(
- default=None, alias="UID", description="UID represents the file UID."
- )
- gid: str | None = Field(
- default=None, alias="GID", description="GID represents the file GID."
- )
- mode: int | None = Field(
- default=None,
- alias="Mode",
- description="Mode represents the FileMode of the file.",
- )
+ name: Annotated[
+ str | None,
+ Field(
+ alias="Name",
+ description="Name represents the final filename in the filesystem.\n",
+ ),
+ ] = None
+ uid: Annotated[
+ str | None, Field(alias="UID", description="UID represents the file UID.")
+ ] = None
+ gid: Annotated[
+ str | None, Field(alias="GID", description="GID represents the file GID.")
+ ] = None
+ mode: Annotated[
+ int | None,
+ Field(alias="Mode", description="Mode represents the FileMode of the file."),
+ ] = None
class Secret(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- file: File | None = Field(
- default=None,
- alias="File",
- description="File represents a specific target that is backed by a file.\n",
- )
- secret_id: str | None = Field(
- default=None,
- alias="SecretID",
- description="SecretID represents the ID of the specific secret that we're\nreferencing.\n",
- )
- secret_name: str | None = Field(
- default=None,
- alias="SecretName",
- description="SecretName is the name of the secret that this references,\nbut this is just provided for lookup/display purposes. The\nsecret in the reference will be identified by its ID.\n",
- )
+ file: Annotated[
+ File | None,
+ Field(
+ alias="File",
+ description="File represents a specific target that is backed by a file.\n",
+ ),
+ ] = None
+ secret_id: Annotated[
+ str | None,
+ Field(
+ alias="SecretID",
+ description="SecretID represents the ID of the specific secret that we're\nreferencing.\n",
+ ),
+ ] = None
+ secret_name: Annotated[
+ str | None,
+ Field(
+ alias="SecretName",
+ description="SecretName is the name of the secret that this references,\nbut this is just provided for lookup/display purposes. The\nsecret in the reference will be identified by its ID.\n",
+ ),
+ ] = None
class File1(BaseModel):
@@ -2774,48 +3235,57 @@ class File1(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(
- default=None,
- alias="Name",
- description="Name represents the final filename in the filesystem.\n",
- )
- uid: str | None = Field(
- default=None, alias="UID", description="UID represents the file UID."
- )
- gid: str | None = Field(
- default=None, alias="GID", description="GID represents the file GID."
- )
- mode: int | None = Field(
- default=None,
- alias="Mode",
- description="Mode represents the FileMode of the file.",
- )
+ name: Annotated[
+ str | None,
+ Field(
+ alias="Name",
+ description="Name represents the final filename in the filesystem.\n",
+ ),
+ ] = None
+ uid: Annotated[
+ str | None, Field(alias="UID", description="UID represents the file UID.")
+ ] = None
+ gid: Annotated[
+ str | None, Field(alias="GID", description="GID represents the file GID.")
+ ] = None
+ mode: Annotated[
+ int | None,
+ Field(alias="Mode", description="Mode represents the FileMode of the file."),
+ ] = None
class Config1(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- file: File1 | None = Field(
- default=None,
- alias="File",
- description="File represents a specific target that is backed by a file.\n\n
\n\n> **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive\n",
- )
- runtime: dict[str, Any] | None = Field(
- default=None,
- alias="Runtime",
- description="Runtime represents a target that is not mounted into the\ncontainer but is used by the task\n\n
\n\n> **Note**: `Configs.File` and `Configs.Runtime` are mutually\n> exclusive\n",
- )
- config_id: str | None = Field(
- default=None,
- alias="ConfigID",
- description="ConfigID represents the ID of the specific config that we're\nreferencing.\n",
- )
- config_name: str | None = Field(
- default=None,
- alias="ConfigName",
- description="ConfigName is the name of the config that this references,\nbut this is just provided for lookup/display purposes. The\nconfig in the reference will be identified by its ID.\n",
- )
+ file: Annotated[
+ File1 | None,
+ Field(
+ alias="File",
+ description="File represents a specific target that is backed by a file.\n\n
\n\n> **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive\n",
+ ),
+ ] = None
+ runtime: Annotated[
+ dict[str, Any] | None,
+ Field(
+ alias="Runtime",
+ description="Runtime represents a target that is not mounted into the\ncontainer but is used by the task\n\n
\n\n> **Note**: `Configs.File` and `Configs.Runtime` are mutually\n> exclusive\n",
+ ),
+ ] = None
+ config_id: Annotated[
+ str | None,
+ Field(
+ alias="ConfigID",
+ description="ConfigID represents the ID of the specific config that we're\nreferencing.\n",
+ ),
+ ] = None
+ config_name: Annotated[
+ str | None,
+ Field(
+ alias="ConfigName",
+ description="ConfigName is the name of the config that this references,\nbut this is just provided for lookup/display purposes. The\nconfig in the reference will be identified by its ID.\n",
+ ),
+ ] = None
class Isolation1(str, Enum):
@@ -2846,127 +3316,158 @@ class ContainerSpec(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- image: str | None = Field(
- default=None,
- alias="Image",
- description="The image name to use for the container",
- )
- labels: dict[str, str] | None = Field(
- default=None, alias="Labels", description="User-defined key/value data."
- )
- command: list[str] | None = Field(
- default=None, alias="Command", description="The command to be run in the image."
- )
- args: list[str] | None = Field(
- default=None, alias="Args", description="Arguments to the command."
- )
- hostname: str | None = Field(
- default=None,
- alias="Hostname",
- description="The hostname to use for the container, as a valid\n[RFC 1123](https://tools.ietf.org/html/rfc1123) hostname.\n",
- )
- env: list[str] | None = Field(
- default=None,
- alias="Env",
- description="A list of environment variables in the form `VAR=value`.\n",
- )
- dir: str | None = Field(
- default=None,
- alias="Dir",
- description="The working directory for commands to run in.",
- )
- user: str | None = Field(
- default=None, alias="User", description="The user inside the container."
- )
- groups: list[str] | None = Field(
- default=None,
- alias="Groups",
- description="A list of additional groups that the container process will run as.\n",
- )
- privileges: Privileges | None = Field(
- default=None,
- alias="Privileges",
- description="Security options for the container",
- )
- tty: bool | None = Field(
- default=None,
- alias="TTY",
- description="Whether a pseudo-TTY should be allocated.",
- )
- open_stdin: bool | None = Field(
- default=None, alias="OpenStdin", description="Open `stdin`"
- )
- read_only: bool | None = Field(
- default=None,
- alias="ReadOnly",
- description="Mount the container's root filesystem as read only.",
- )
- mounts: list[Mount] | None = Field(
- default=None,
- alias="Mounts",
- description="Specification for mounts to be added to containers created as part\nof the service.\n",
- )
- stop_signal: str | None = Field(
- default=None, alias="StopSignal", description="Signal to stop the container."
- )
- stop_grace_period: int | None = Field(
- default=None,
- alias="StopGracePeriod",
- description="Amount of time to wait for the container to terminate before\nforcefully killing it.\n",
- )
- health_check: HealthConfig | None = Field(default=None, alias="HealthCheck")
- hosts: list[str] | None = Field(
- default=None,
- alias="Hosts",
- description="A list of hostname/IP mappings to add to the container's `hosts`\nfile. The format of extra hosts is specified in the\n[hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html)\nman page:\n\n IP_address canonical_hostname [aliases...]\n",
- )
- dns_config: DnsConfig | None = Field(
- default=None,
- alias="DNSConfig",
- description="Specification for DNS related configurations in resolver configuration\nfile (`resolv.conf`).\n",
- )
- secrets: list[Secret] | None = Field(
- default=None,
- alias="Secrets",
- description="Secrets contains references to zero or more secrets that will be\nexposed to the service.\n",
- )
- configs: list[Config1] | None = Field(
- default=None,
- alias="Configs",
- description="Configs contains references to zero or more configs that will be\nexposed to the service.\n",
- )
- isolation: Isolation1 | None = Field(
- default=None,
- alias="Isolation",
- description="Isolation technology of the containers running the service.\n(Windows only)\n",
- )
- init: bool | None = Field(
- default=None,
- alias="Init",
- description="Run an init inside the container that forwards signals and reaps\nprocesses. This field is omitted if empty, and the default (as\nconfigured on the daemon) is used.\n",
- )
- sysctls: dict[str, str] | None = Field(
- default=None,
- alias="Sysctls",
- description="Set kernel namedspaced parameters (sysctls) in the container.\nThe Sysctls option on services accepts the same sysctls as the\nare supported on containers. Note that while the same sysctls are\nsupported, no guarantees or checks are made about their\nsuitability for a clustered environment, and it's up to the user\nto determine whether a given sysctl will work properly in a\nService.\n",
- )
- capability_add: list[str] | None = Field(
- default=None,
- alias="CapabilityAdd",
- description="A list of kernel capabilities to add to the default set\nfor the container.\n",
- examples=[["CAP_NET_RAW", "CAP_SYS_ADMIN", "CAP_SYS_CHROOT", "CAP_SYSLOG"]],
- )
- capability_drop: list[str] | None = Field(
- default=None,
- alias="CapabilityDrop",
- description="A list of kernel capabilities to drop from the default set\nfor the container.\n",
- examples=[["CAP_NET_RAW"]],
- )
- ulimits: list[Ulimit] | None = Field(
- default=None,
- alias="Ulimits",
- description='A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`"\n',
- )
+ image: Annotated[
+ str | None,
+ Field(alias="Image", description="The image name to use for the container"),
+ ] = None
+ labels: Annotated[
+ dict[str, str] | None,
+ Field(alias="Labels", description="User-defined key/value data."),
+ ] = None
+ command: Annotated[
+ list[str] | None,
+ Field(alias="Command", description="The command to be run in the image."),
+ ] = None
+ args: Annotated[
+ list[str] | None, Field(alias="Args", description="Arguments to the command.")
+ ] = None
+ hostname: Annotated[
+ str | None,
+ Field(
+ alias="Hostname",
+ description="The hostname to use for the container, as a valid\n[RFC 1123](https://tools.ietf.org/html/rfc1123) hostname.\n",
+ ),
+ ] = None
+ env: Annotated[
+ list[str] | None,
+ Field(
+ alias="Env",
+ description="A list of environment variables in the form `VAR=value`.\n",
+ ),
+ ] = None
+ dir: Annotated[
+ str | None,
+ Field(alias="Dir", description="The working directory for commands to run in."),
+ ] = None
+ user: Annotated[
+ str | None, Field(alias="User", description="The user inside the container.")
+ ] = None
+ groups: Annotated[
+ list[str] | None,
+ Field(
+ alias="Groups",
+ description="A list of additional groups that the container process will run as.\n",
+ ),
+ ] = None
+ privileges: Annotated[
+ Privileges | None,
+ Field(alias="Privileges", description="Security options for the container"),
+ ] = None
+ tty: Annotated[
+ bool | None,
+ Field(alias="TTY", description="Whether a pseudo-TTY should be allocated."),
+ ] = None
+ open_stdin: Annotated[
+ bool | None, Field(alias="OpenStdin", description="Open `stdin`")
+ ] = None
+ read_only: Annotated[
+ bool | None,
+ Field(
+ alias="ReadOnly",
+ description="Mount the container's root filesystem as read only.",
+ ),
+ ] = None
+ mounts: Annotated[
+ list[Mount] | None,
+ Field(
+ alias="Mounts",
+ description="Specification for mounts to be added to containers created as part\nof the service.\n",
+ ),
+ ] = None
+ stop_signal: Annotated[
+ str | None,
+ Field(alias="StopSignal", description="Signal to stop the container."),
+ ] = None
+ stop_grace_period: Annotated[
+ int | None,
+ Field(
+ alias="StopGracePeriod",
+ description="Amount of time to wait for the container to terminate before\nforcefully killing it.\n",
+ ),
+ ] = None
+ health_check: Annotated[HealthConfig | None, Field(alias="HealthCheck")] = None
+ hosts: Annotated[
+ list[str] | None,
+ Field(
+ alias="Hosts",
+ description="A list of hostname/IP mappings to add to the container's `hosts`\nfile. The format of extra hosts is specified in the\n[hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html)\nman page:\n\n IP_address canonical_hostname [aliases...]\n",
+ ),
+ ] = None
+ dns_config: Annotated[
+ DnsConfig | None,
+ Field(
+ alias="DNSConfig",
+ description="Specification for DNS related configurations in resolver configuration\nfile (`resolv.conf`).\n",
+ ),
+ ] = None
+ secrets: Annotated[
+ list[Secret] | None,
+ Field(
+ alias="Secrets",
+ description="Secrets contains references to zero or more secrets that will be\nexposed to the service.\n",
+ ),
+ ] = None
+ configs: Annotated[
+ list[Config1] | None,
+ Field(
+ alias="Configs",
+ description="Configs contains references to zero or more configs that will be\nexposed to the service.\n",
+ ),
+ ] = None
+ isolation: Annotated[
+ Isolation1 | None,
+ Field(
+ alias="Isolation",
+ description="Isolation technology of the containers running the service.\n(Windows only)\n",
+ ),
+ ] = None
+ init: Annotated[
+ bool | None,
+ Field(
+ alias="Init",
+ description="Run an init inside the container that forwards signals and reaps\nprocesses. This field is omitted if empty, and the default (as\nconfigured on the daemon) is used.\n",
+ ),
+ ] = None
+ sysctls: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="Sysctls",
+ description="Set kernel namedspaced parameters (sysctls) in the container.\nThe Sysctls option on services accepts the same sysctls as the\nare supported on containers. Note that while the same sysctls are\nsupported, no guarantees or checks are made about their\nsuitability for a clustered environment, and it's up to the user\nto determine whether a given sysctl will work properly in a\nService.\n",
+ ),
+ ] = None
+ capability_add: Annotated[
+ list[str] | None,
+ Field(
+ alias="CapabilityAdd",
+ description="A list of kernel capabilities to add to the default set\nfor the container.\n",
+ examples=[["CAP_NET_RAW", "CAP_SYS_ADMIN", "CAP_SYS_CHROOT", "CAP_SYSLOG"]],
+ ),
+ ] = None
+ capability_drop: Annotated[
+ list[str] | None,
+ Field(
+ alias="CapabilityDrop",
+ description="A list of kernel capabilities to drop from the default set\nfor the container.\n",
+ examples=[["CAP_NET_RAW"]],
+ ),
+ ] = None
+ ulimits: Annotated[
+ list[Ulimit] | None,
+ Field(
+ alias="Ulimits",
+ description='A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`"\n',
+ ),
+ ] = None
class NetworkAttachmentSpec(BaseModel):
@@ -2986,11 +3487,13 @@ class NetworkAttachmentSpec(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- container_id: str | None = Field(
- default=None,
- alias="ContainerID",
- description="ID of the container represented by this task",
- )
+ container_id: Annotated[
+ str | None,
+ Field(
+ alias="ContainerID",
+ description="ID of the container represented by this task",
+ ),
+ ] = None
class Condition(str, Enum):
@@ -3013,81 +3516,95 @@ class RestartPolicy1(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- condition: Condition | None = Field(
- default=None, alias="Condition", description="Condition for restart."
- )
- delay: int | None = Field(
- default=None, alias="Delay", description="Delay between restart attempts."
- )
- max_attempts: int | None = Field(
- default=0,
- alias="MaxAttempts",
- description="Maximum attempts to restart a given container before giving up\n(default value is 0, which is ignored).\n",
- )
- window: int | None = Field(
- default=0,
- alias="Window",
- description="Windows is the time window used to evaluate the restart policy\n(default value is 0, which is unbounded).\n",
- )
+ condition: Annotated[
+ Condition | None, Field(alias="Condition", description="Condition for restart.")
+ ] = None
+ delay: Annotated[
+ int | None, Field(alias="Delay", description="Delay between restart attempts.")
+ ] = None
+ max_attempts: Annotated[
+ int | None,
+ Field(
+ alias="MaxAttempts",
+ description="Maximum attempts to restart a given container before giving up\n(default value is 0, which is ignored).\n",
+ ),
+ ] = 0
+ window: Annotated[
+ int | None,
+ Field(
+ alias="Window",
+ description="Windows is the time window used to evaluate the restart policy\n(default value is 0, which is unbounded).\n",
+ ),
+ ] = 0
class Spread(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- spread_descriptor: str | None = Field(
- default=None,
- alias="SpreadDescriptor",
- description="label descriptor, such as `engine.labels.az`.\n",
- )
+ spread_descriptor: Annotated[
+ str | None,
+ Field(
+ alias="SpreadDescriptor",
+ description="label descriptor, such as `engine.labels.az`.\n",
+ ),
+ ] = None
class Preference(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- spread: Spread | None = Field(default=None, alias="Spread")
+ spread: Annotated[Spread | None, Field(alias="Spread")] = None
class Placement(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- constraints: list[str] | None = Field(
- default=None,
- alias="Constraints",
- description="An array of constraint expressions to limit the set of nodes where\na task can be scheduled. Constraint expressions can either use a\n_match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find\nnodes that satisfy every expression (AND match). Constraints can\nmatch node or Docker Engine labels as follows:\n\nnode attribute | matches | example\n---------------------|--------------------------------|-----------------------------------------------\n`node.id` | Node ID | `node.id==2ivku8v2gvtg4`\n`node.hostname` | Node hostname | `node.hostname!=node-2`\n`node.role` | Node role (`manager`/`worker`) | `node.role==manager`\n`node.platform.os` | Node operating system | `node.platform.os==windows`\n`node.platform.arch` | Node architecture | `node.platform.arch==x86_64`\n`node.labels` | User-defined node labels | `node.labels.security==high`\n`engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04`\n\n`engine.labels` apply to Docker Engine labels like operating system,\ndrivers, etc. Swarm administrators add `node.labels` for operational\npurposes by using the [`node update endpoint`](#operation/NodeUpdate).\n",
- examples=[
- [
- "node.hostname!=node3.corp.example.com",
- "node.role!=manager",
- "node.labels.type==production",
- "node.platform.os==linux",
- "node.platform.arch==x86_64",
- ]
- ],
- )
- preferences: list[Preference] | None = Field(
- default=None,
- alias="Preferences",
- description="Preferences provide a way to make the scheduler aware of factors\nsuch as topology. They are provided in order from highest to\nlowest precedence.\n",
- examples=[
- [
- {"Spread": {"SpreadDescriptor": "node.labels.datacenter"}},
- {"Spread": {"SpreadDescriptor": "node.labels.rack"}},
- ]
- ],
- )
- max_replicas: int | None = Field(
- default=0,
- alias="MaxReplicas",
- description="Maximum number of replicas for per node (default value is 0, which\nis unlimited)\n",
- )
- platforms: list[Platform] | None = Field(
- default=None,
- alias="Platforms",
- description="Platforms stores all the platforms that the service's image can\nrun on. This field is used in the platform filter for scheduling.\nIf empty, then the platform filter is off, meaning there are no\nscheduling restrictions.\n",
- )
+ constraints: Annotated[
+ list[str] | None,
+ Field(
+ alias="Constraints",
+ description="An array of constraint expressions to limit the set of nodes where\na task can be scheduled. Constraint expressions can either use a\n_match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find\nnodes that satisfy every expression (AND match). Constraints can\nmatch node or Docker Engine labels as follows:\n\nnode attribute | matches | example\n---------------------|--------------------------------|-----------------------------------------------\n`node.id` | Node ID | `node.id==2ivku8v2gvtg4`\n`node.hostname` | Node hostname | `node.hostname!=node-2`\n`node.role` | Node role (`manager`/`worker`) | `node.role==manager`\n`node.platform.os` | Node operating system | `node.platform.os==windows`\n`node.platform.arch` | Node architecture | `node.platform.arch==x86_64`\n`node.labels` | User-defined node labels | `node.labels.security==high`\n`engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04`\n\n`engine.labels` apply to Docker Engine labels like operating system,\ndrivers, etc. Swarm administrators add `node.labels` for operational\npurposes by using the [`node update endpoint`](#operation/NodeUpdate).\n",
+ examples=[
+ [
+ "node.hostname!=node3.corp.example.com",
+ "node.role!=manager",
+ "node.labels.type==production",
+ "node.platform.os==linux",
+ "node.platform.arch==x86_64",
+ ]
+ ],
+ ),
+ ] = None
+ preferences: Annotated[
+ list[Preference] | None,
+ Field(
+ alias="Preferences",
+ description="Preferences provide a way to make the scheduler aware of factors\nsuch as topology. They are provided in order from highest to\nlowest precedence.\n",
+ examples=[
+ [
+ {"Spread": {"SpreadDescriptor": "node.labels.datacenter"}},
+ {"Spread": {"SpreadDescriptor": "node.labels.rack"}},
+ ]
+ ],
+ ),
+ ] = None
+ max_replicas: Annotated[
+ int | None,
+ Field(
+ alias="MaxReplicas",
+ description="Maximum number of replicas for per node (default value is 0, which\nis unlimited)\n",
+ ),
+ ] = 0
+ platforms: Annotated[
+ list[Platform] | None,
+ Field(
+ alias="Platforms",
+ description="Platforms stores all the platforms that the service's image can\nrun on. This field is used in the platform filter for scheduling.\nIf empty, then the platform filter is off, meaning there are no\nscheduling restrictions.\n",
+ ),
+ ] = None
class LogDriver1(BaseModel):
@@ -3101,8 +3618,8 @@ class LogDriver1(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(default=None, alias="Name")
- options: dict[str, str] | None = Field(default=None, alias="Options")
+ name: Annotated[str | None, Field(alias="Name")] = None
+ options: Annotated[dict[str, str] | None, Field(alias="Options")] = None
class TaskState(str, Enum):
@@ -3127,29 +3644,29 @@ class ContainerStatus(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- container_id: str | None = Field(default=None, alias="ContainerID")
- pid: int | None = Field(default=None, alias="PID")
- exit_code: int | None = Field(default=None, alias="ExitCode")
+ container_id: Annotated[str | None, Field(alias="ContainerID")] = None
+ pid: Annotated[int | None, Field(alias="PID")] = None
+ exit_code: Annotated[int | None, Field(alias="ExitCode")] = None
class Status1(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- timestamp: str | None = Field(default=None, alias="Timestamp")
- state: TaskState | None = Field(default=None, alias="State")
- message: str | None = Field(default=None, alias="Message")
- err: str | None = Field(default=None, alias="Err")
- container_status: ContainerStatus | None = Field(
- default=None, alias="ContainerStatus"
- )
+ timestamp: Annotated[str | None, Field(alias="Timestamp")] = None
+ state: Annotated[TaskState | None, Field(alias="State")] = None
+ message: Annotated[str | None, Field(alias="Message")] = None
+ err: Annotated[str | None, Field(alias="Err")] = None
+ container_status: Annotated[
+ ContainerStatus | None, Field(alias="ContainerStatus")
+ ] = None
class Replicated(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- replicas: int | None = Field(default=None, alias="Replicas")
+ replicas: Annotated[int | None, Field(alias="Replicas")] = None
class ReplicatedJob(BaseModel):
@@ -3162,16 +3679,20 @@ class ReplicatedJob(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- max_concurrent: int | None = Field(
- default=1,
- alias="MaxConcurrent",
- description="The maximum number of replicas to run simultaneously.\n",
- )
- total_completions: int | None = Field(
- default=None,
- alias="TotalCompletions",
- description="The total number of replicas desired to reach the Completed\nstate. If unset, will default to the value of `MaxConcurrent`\n",
- )
+ max_concurrent: Annotated[
+ int | None,
+ Field(
+ alias="MaxConcurrent",
+ description="The maximum number of replicas to run simultaneously.\n",
+ ),
+ ] = 1
+ total_completions: Annotated[
+ int | None,
+ Field(
+ alias="TotalCompletions",
+ description="The total number of replicas desired to reach the Completed\nstate. If unset, will default to the value of `MaxConcurrent`\n",
+ ),
+ ] = None
class Mode(BaseModel):
@@ -3182,18 +3703,22 @@ class Mode(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- replicated: Replicated | None = Field(default=None, alias="Replicated")
- global_: dict[str, Any] | None = Field(default=None, alias="Global")
- replicated_job: ReplicatedJob | None = Field(
- default=None,
- alias="ReplicatedJob",
- description="The mode used for services with a finite number of tasks that run\nto a completed state.\n",
- )
- global_job: dict[str, Any] | None = Field(
- default=None,
- alias="GlobalJob",
- description="The mode used for services which run a task to the completed state\non each valid node.\n",
- )
+ replicated: Annotated[Replicated | None, Field(alias="Replicated")] = None
+ global_: Annotated[dict[str, Any] | None, Field(alias="Global")] = None
+ replicated_job: Annotated[
+ ReplicatedJob | None,
+ Field(
+ alias="ReplicatedJob",
+ description="The mode used for services with a finite number of tasks that run\nto a completed state.\n",
+ ),
+ ] = None
+ global_job: Annotated[
+ dict[str, Any] | None,
+ Field(
+ alias="GlobalJob",
+ description="The mode used for services which run a task to the completed state\non each valid node.\n",
+ ),
+ ] = None
class FailureAction(str, Enum):
@@ -3228,36 +3753,47 @@ class UpdateConfig(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- parallelism: int | None = Field(
- default=None,
- alias="Parallelism",
- description="Maximum number of tasks to be updated in one iteration (0 means\nunlimited parallelism).\n",
- )
- delay: int | None = Field(
- default=None,
- alias="Delay",
- description="Amount of time between updates, in nanoseconds.",
- )
- failure_action: FailureAction | None = Field(
- default=None,
- alias="FailureAction",
- description="Action to take if an updated task fails to run, or stops running\nduring the update.\n",
- )
- monitor: int | None = Field(
- default=None,
- alias="Monitor",
- description="Amount of time to monitor each updated task for failures, in\nnanoseconds.\n",
- )
- max_failure_ratio: float | None = Field(
- default=0,
- alias="MaxFailureRatio",
- description="The fraction of tasks that may fail during an update before the\nfailure action is invoked, specified as a floating point number\nbetween 0 and 1.\n",
- )
- order: Order | None = Field(
- default=None,
- alias="Order",
- description="The order of operations when rolling out an updated task. Either\nthe old task is shut down before the new task is started, or the\nnew task is started before the old task is shut down.\n",
- )
+ parallelism: Annotated[
+ int | None,
+ Field(
+ alias="Parallelism",
+ description="Maximum number of tasks to be updated in one iteration (0 means\nunlimited parallelism).\n",
+ ),
+ ] = None
+ delay: Annotated[
+ int | None,
+ Field(
+ alias="Delay", description="Amount of time between updates, in nanoseconds."
+ ),
+ ] = None
+ failure_action: Annotated[
+ FailureAction | None,
+ Field(
+ alias="FailureAction",
+ description="Action to take if an updated task fails to run, or stops running\nduring the update.\n",
+ ),
+ ] = None
+ monitor: Annotated[
+ int | None,
+ Field(
+ alias="Monitor",
+ description="Amount of time to monitor each updated task for failures, in\nnanoseconds.\n",
+ ),
+ ] = None
+ max_failure_ratio: Annotated[
+ float | None,
+ Field(
+ alias="MaxFailureRatio",
+ description="The fraction of tasks that may fail during an update before the\nfailure action is invoked, specified as a floating point number\nbetween 0 and 1.\n",
+ ),
+ ] = 0
+ order: Annotated[
+ Order | None,
+ Field(
+ alias="Order",
+ description="The order of operations when rolling out an updated task. Either\nthe old task is shut down before the new task is started, or the\nnew task is started before the old task is shut down.\n",
+ ),
+ ] = None
class FailureAction1(str, Enum):
@@ -3291,36 +3827,48 @@ class RollbackConfig(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- parallelism: int | None = Field(
- default=None,
- alias="Parallelism",
- description="Maximum number of tasks to be rolled back in one iteration (0 means\nunlimited parallelism).\n",
- )
- delay: int | None = Field(
- default=None,
- alias="Delay",
- description="Amount of time between rollback iterations, in nanoseconds.\n",
- )
- failure_action: FailureAction1 | None = Field(
- default=None,
- alias="FailureAction",
- description="Action to take if an rolled back task fails to run, or stops\nrunning during the rollback.\n",
- )
- monitor: int | None = Field(
- default=None,
- alias="Monitor",
- description="Amount of time to monitor each rolled back task for failures, in\nnanoseconds.\n",
- )
- max_failure_ratio: float | None = Field(
- default=0,
- alias="MaxFailureRatio",
- description="The fraction of tasks that may fail during a rollback before the\nfailure action is invoked, specified as a floating point number\nbetween 0 and 1.\n",
- )
- order: Order1 | None = Field(
- default=None,
- alias="Order",
- description="The order of operations when rolling back a task. Either the old\ntask is shut down before the new task is started, or the new task\nis started before the old task is shut down.\n",
- )
+ parallelism: Annotated[
+ int | None,
+ Field(
+ alias="Parallelism",
+ description="Maximum number of tasks to be rolled back in one iteration (0 means\nunlimited parallelism).\n",
+ ),
+ ] = None
+ delay: Annotated[
+ int | None,
+ Field(
+ alias="Delay",
+ description="Amount of time between rollback iterations, in nanoseconds.\n",
+ ),
+ ] = None
+ failure_action: Annotated[
+ FailureAction1 | None,
+ Field(
+ alias="FailureAction",
+ description="Action to take if an rolled back task fails to run, or stops\nrunning during the rollback.\n",
+ ),
+ ] = None
+ monitor: Annotated[
+ int | None,
+ Field(
+ alias="Monitor",
+ description="Amount of time to monitor each rolled back task for failures, in\nnanoseconds.\n",
+ ),
+ ] = None
+ max_failure_ratio: Annotated[
+ float | None,
+ Field(
+ alias="MaxFailureRatio",
+ description="The fraction of tasks that may fail during a rollback before the\nfailure action is invoked, specified as a floating point number\nbetween 0 and 1.\n",
+ ),
+ ] = 0
+ order: Annotated[
+ Order1 | None,
+ Field(
+ alias="Order",
+ description="The order of operations when rolling back a task. Either the old\ntask is shut down before the new task is started, or the new task\nis started before the old task is shut down.\n",
+ ),
+ ] = None
class PublishMode(str, Enum):
@@ -3345,20 +3893,24 @@ class EndpointPortConfig(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(default=None, alias="Name")
- protocol: Type | None = Field(default=None, alias="Protocol")
- target_port: int | None = Field(
- default=None, alias="TargetPort", description="The port inside the container."
- )
- published_port: int | None = Field(
- default=None, alias="PublishedPort", description="The port on the swarm hosts."
- )
- publish_mode: PublishMode | None = Field(
- default=PublishMode.ingress,
- alias="PublishMode",
- description='The mode in which port is published.\n\n
\n\n- "ingress" makes the target port accessible on every node,\n regardless of whether there is a task for the service running on\n that node or not.\n- "host" bypasses the routing mesh and publish the port directly on\n the swarm node where that service is running.\n',
- examples=["ingress"],
- )
+ name: Annotated[str | None, Field(alias="Name")] = None
+ protocol: Annotated[Type | None, Field(alias="Protocol")] = None
+ target_port: Annotated[
+ int | None,
+ Field(alias="TargetPort", description="The port inside the container."),
+ ] = None
+ published_port: Annotated[
+ int | None,
+ Field(alias="PublishedPort", description="The port on the swarm hosts."),
+ ] = None
+ publish_mode: Annotated[
+ PublishMode | None,
+ Field(
+ alias="PublishMode",
+ description='The mode in which port is published.\n\n
\n\n- "ingress" makes the target port accessible on every node,\n regardless of whether there is a task for the service running on\n that node or not.\n- "host" bypasses the routing mesh and publish the port directly on\n the swarm node where that service is running.\n',
+ examples=["ingress"],
+ ),
+ ] = PublishMode.ingress
class Mode1(str, Enum):
@@ -3379,33 +3931,37 @@ class EndpointSpec(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- mode: Mode1 | None = Field(
- default=Mode1.vip,
- alias="Mode",
- description="The mode of resolution to use for internal load balancing between tasks.\n",
- )
- ports: list[EndpointPortConfig] | None = Field(
- default=None,
- alias="Ports",
- description="List of exposed ports that this service is accessible on from the\noutside. Ports can only be provided if `vip` resolution mode is used.\n",
- )
+ mode: Annotated[
+ Mode1 | None,
+ Field(
+ alias="Mode",
+ description="The mode of resolution to use for internal load balancing between tasks.\n",
+ ),
+ ] = Mode1.vip
+ ports: Annotated[
+ list[EndpointPortConfig] | None,
+ Field(
+ alias="Ports",
+ description="List of exposed ports that this service is accessible on from the\noutside. Ports can only be provided if `vip` resolution mode is used.\n",
+ ),
+ ] = None
class VirtualIP(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- network_id: str | None = Field(default=None, alias="NetworkID")
- addr: str | None = Field(default=None, alias="Addr")
+ network_id: Annotated[str | None, Field(alias="NetworkID")] = None
+ addr: Annotated[str | None, Field(alias="Addr")] = None
class Endpoint(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- spec: EndpointSpec | None = Field(default=None, alias="Spec")
- ports: list[EndpointPortConfig] | None = Field(default=None, alias="Ports")
- virtual_i_ps: list[VirtualIP] | None = Field(default=None, alias="VirtualIPs")
+ spec: Annotated[EndpointSpec | None, Field(alias="Spec")] = None
+ ports: Annotated[list[EndpointPortConfig] | None, Field(alias="Ports")] = None
+ virtual_i_ps: Annotated[list[VirtualIP] | None, Field(alias="VirtualIPs")] = None
class State(str, Enum):
@@ -3422,10 +3978,10 @@ class UpdateStatus(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- state: State | None = Field(default=None, alias="State")
- started_at: str | None = Field(default=None, alias="StartedAt")
- completed_at: str | None = Field(default=None, alias="CompletedAt")
- message: str | None = Field(default=None, alias="Message")
+ state: Annotated[State | None, Field(alias="State")] = None
+ started_at: Annotated[str | None, Field(alias="StartedAt")] = None
+ completed_at: Annotated[str | None, Field(alias="CompletedAt")] = None
+ message: Annotated[str | None, Field(alias="Message")] = None
class ServiceStatus(BaseModel):
@@ -3438,23 +3994,29 @@ class ServiceStatus(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- running_tasks: int | None = Field(
- default=None,
- alias="RunningTasks",
- description="The number of tasks for the service currently in the Running state.\n",
- examples=[7],
- )
- desired_tasks: int | None = Field(
- default=None,
- alias="DesiredTasks",
- description="The number of tasks for the service desired to be running.\nFor replicated services, this is the replica count from the\nservice spec. For global services, this is computed by taking\ncount of all tasks for the service with a Desired State other\nthan Shutdown.\n",
- examples=[10],
- )
- completed_tasks: int | None = Field(
- default=None,
- alias="CompletedTasks",
- description="The number of tasks for a job that are in the Completed state.\nThis field must be cross-referenced with the service type, as the\nvalue of 0 may mean the service is not in a job mode, or it may\nmean the job-mode service has no tasks yet Completed.\n",
- )
+ running_tasks: Annotated[
+ int | None,
+ Field(
+ alias="RunningTasks",
+ description="The number of tasks for the service currently in the Running state.\n",
+ examples=[7],
+ ),
+ ] = None
+ desired_tasks: Annotated[
+ int | None,
+ Field(
+ alias="DesiredTasks",
+ description="The number of tasks for the service desired to be running.\nFor replicated services, this is the replica count from the\nservice spec. For global services, this is computed by taking\ncount of all tasks for the service with a Desired State other\nthan Shutdown.\n",
+ examples=[10],
+ ),
+ ] = None
+ completed_tasks: Annotated[
+ int | None,
+ Field(
+ alias="CompletedTasks",
+ description="The number of tasks for a job that are in the Completed state.\nThis field must be cross-referenced with the service type, as the\nvalue of 0 may mean the service is not in a job mode, or it may\nmean the job-mode service has no tasks yet Completed.\n",
+ ),
+ ] = None
class JobStatus(BaseModel):
@@ -3469,48 +4031,53 @@ class JobStatus(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- job_iteration: ObjectVersion | None = Field(
- default=None,
- alias="JobIteration",
- description='JobIteration is a value increased each time a Job is executed,\nsuccessfully or otherwise. "Executed", in this case, means the\njob as a whole has been started, not that an individual Task has\nbeen launched. A job is "Executed" when its ServiceSpec is\nupdated. JobIteration can be used to disambiguate Tasks belonging\nto different executions of a job. Though JobIteration will\nincrease with each subsequent execution, it may not necessarily\nincrease by 1, and so JobIteration should not be used to\n',
- )
- last_execution: str | None = Field(
- default=None,
- alias="LastExecution",
- description="The last time, as observed by the server, that this job was\nstarted.\n",
- )
+ job_iteration: Annotated[
+ ObjectVersion | None,
+ Field(
+ alias="JobIteration",
+ description='JobIteration is a value increased each time a Job is executed,\nsuccessfully or otherwise. "Executed", in this case, means the\njob as a whole has been started, not that an individual Task has\nbeen launched. A job is "Executed" when its ServiceSpec is\nupdated. JobIteration can be used to disambiguate Tasks belonging\nto different executions of a job. Though JobIteration will\nincrease with each subsequent execution, it may not necessarily\nincrease by 1, and so JobIteration should not be used to\n',
+ ),
+ ] = None
+ last_execution: Annotated[
+ str | None,
+ Field(
+ alias="LastExecution",
+ description="The last time, as observed by the server, that this job was\nstarted.\n",
+ ),
+ ] = None
class ImageDeleteResponseItem(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- untagged: str | None = Field(
- default=None,
- alias="Untagged",
- description="The image ID of an image that was untagged",
- )
- deleted: str | None = Field(
- default=None,
- alias="Deleted",
- description="The image ID of an image that was deleted",
- )
+ untagged: Annotated[
+ str | None,
+ Field(
+ alias="Untagged", description="The image ID of an image that was untagged"
+ ),
+ ] = None
+ deleted: Annotated[
+ str | None,
+ Field(alias="Deleted", description="The image ID of an image that was deleted"),
+ ] = None
class ServiceUpdateResponse(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- warnings: list[str] | None = Field(
- default=None, alias="Warnings", description="Optional warning messages"
- )
+ warnings: Annotated[
+ list[str] | None,
+ Field(alias="Warnings", description="Optional warning messages"),
+ ] = None
class HostConfig1(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- network_mode: str | None = Field(default=None, alias="NetworkMode")
+ network_mode: Annotated[str | None, Field(alias="NetworkMode")] = None
class Driver(BaseModel):
@@ -3521,106 +4088,126 @@ class Driver(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str = Field(
- ..., alias="Name", description="Name of the driver.", examples=["some-driver"]
- )
- options: dict[str, str] | None = Field(
- default=None,
- alias="Options",
- description="Key/value map of driver-specific options.",
- examples=[
- {
- "OptionA": "value for driver-specific option A",
- "OptionB": "value for driver-specific option B",
- }
- ],
- )
+ name: Annotated[
+ str,
+ Field(
+ alias="Name", description="Name of the driver.", examples=["some-driver"]
+ ),
+ ]
+ options: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="Options",
+ description="Key/value map of driver-specific options.",
+ examples=[
+ {
+ "OptionA": "value for driver-specific option A",
+ "OptionB": "value for driver-specific option B",
+ }
+ ],
+ ),
+ ] = None
class SecretSpec(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(
- default=None, alias="Name", description="User-defined name of the secret."
- )
- labels: dict[str, str] | None = Field(
- default=None,
- alias="Labels",
- description="User-defined key/value metadata.",
- examples=[
- {
- "com.example.some-label": "some-value",
- "com.example.some-other-label": "some-other-value",
- }
- ],
- )
- data: str | None = Field(
- default=None,
- alias="Data",
- description="Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5))\ndata to store as secret.\n\nThis field is only used to _create_ a secret, and is not returned by\nother endpoints.\n",
- examples=[""],
- )
- driver: Driver | None = Field(
- default=None,
- alias="Driver",
- description="Name of the secrets driver used to fetch the secret's value from an\nexternal secret store.\n",
- )
- templating: Driver | None = Field(
- default=None,
- alias="Templating",
- description="Templating driver, if applicable\n\nTemplating controls whether and how to evaluate the config payload as\na template. If no driver is set, no templating is used.\n",
- )
+ name: Annotated[
+ str | None, Field(alias="Name", description="User-defined name of the secret.")
+ ] = None
+ labels: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="Labels",
+ description="User-defined key/value metadata.",
+ examples=[
+ {
+ "com.example.some-label": "some-value",
+ "com.example.some-other-label": "some-other-value",
+ }
+ ],
+ ),
+ ] = None
+ data: Annotated[
+ str | None,
+ Field(
+ alias="Data",
+ description="Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5))\ndata to store as secret.\n\nThis field is only used to _create_ a secret, and is not returned by\nother endpoints.\n",
+ examples=[""],
+ ),
+ ] = None
+ driver: Annotated[
+ Driver | None,
+ Field(
+ alias="Driver",
+ description="Name of the secrets driver used to fetch the secret's value from an\nexternal secret store.\n",
+ ),
+ ] = None
+ templating: Annotated[
+ Driver | None,
+ Field(
+ alias="Templating",
+ description="Templating driver, if applicable\n\nTemplating controls whether and how to evaluate the config payload as\na template. If no driver is set, no templating is used.\n",
+ ),
+ ] = None
class Secret1(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- id: str | None = Field(
- default=None, alias="ID", examples=["blt1owaxmitz71s9v5zh81zun"]
- )
- version: ObjectVersion | None = Field(default=None, alias="Version")
- created_at: str | None = Field(
- default=None, alias="CreatedAt", examples=["2017-07-20T13:55:28.678958722Z"]
- )
- updated_at: str | None = Field(
- default=None, alias="UpdatedAt", examples=["2017-07-20T13:55:28.678958722Z"]
- )
- spec: SecretSpec | None = Field(default=None, alias="Spec")
+ id: Annotated[
+ str | None, Field(alias="ID", examples=["blt1owaxmitz71s9v5zh81zun"])
+ ] = None
+ version: Annotated[ObjectVersion | None, Field(alias="Version")] = None
+ created_at: Annotated[
+ str | None,
+ Field(alias="CreatedAt", examples=["2017-07-20T13:55:28.678958722Z"]),
+ ] = None
+ updated_at: Annotated[
+ str | None,
+ Field(alias="UpdatedAt", examples=["2017-07-20T13:55:28.678958722Z"]),
+ ] = None
+ spec: Annotated[SecretSpec | None, Field(alias="Spec")] = None
class ConfigSpec(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(
- default=None, alias="Name", description="User-defined name of the config."
- )
- labels: dict[str, str] | None = Field(
- default=None, alias="Labels", description="User-defined key/value metadata."
- )
- data: str | None = Field(
- default=None,
- alias="Data",
- description="Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5))\nconfig data.\n",
- )
- templating: Driver | None = Field(
- default=None,
- alias="Templating",
- description="Templating driver, if applicable\n\nTemplating controls whether and how to evaluate the config payload as\na template. If no driver is set, no templating is used.\n",
- )
+ name: Annotated[
+ str | None, Field(alias="Name", description="User-defined name of the config.")
+ ] = None
+ labels: Annotated[
+ dict[str, str] | None,
+ Field(alias="Labels", description="User-defined key/value metadata."),
+ ] = None
+ data: Annotated[
+ str | None,
+ Field(
+ alias="Data",
+ description="Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5))\nconfig data.\n",
+ ),
+ ] = None
+ templating: Annotated[
+ Driver | None,
+ Field(
+ alias="Templating",
+ description="Templating driver, if applicable\n\nTemplating controls whether and how to evaluate the config payload as\na template. If no driver is set, no templating is used.\n",
+ ),
+ ] = None
class Config2(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- id: str | None = Field(default=None, alias="ID")
- version: ObjectVersion | None = Field(default=None, alias="Version")
- created_at: str | None = Field(default=None, alias="CreatedAt")
- updated_at: str | None = Field(default=None, alias="UpdatedAt")
- spec: ConfigSpec | None = Field(default=None, alias="Spec")
+ id: Annotated[str | None, Field(alias="ID")] = None
+ version: Annotated[ObjectVersion | None, Field(alias="Version")] = None
+ created_at: Annotated[str | None, Field(alias="CreatedAt")] = None
+ updated_at: Annotated[str | None, Field(alias="UpdatedAt")] = None
+ spec: Annotated[ConfigSpec | None, Field(alias="Spec")] = None
class Status2(str, Enum):
@@ -3647,36 +4234,41 @@ class ContainerWaitExitError(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- message: str | None = Field(
- default=None, alias="Message", description="Details of an error"
- )
+ message: Annotated[
+ str | None, Field(alias="Message", description="Details of an error")
+ ] = None
class Platform1(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str = Field(..., alias="Name")
+ name: Annotated[str, Field(alias="Name")]
class Component(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str = Field(
- ..., alias="Name", description="Name of the component\n", examples=["Engine"]
- )
- version: str = Field(
- ...,
- alias="Version",
- description="Version of the component\n",
- examples=["19.03.12"],
- )
- details: dict[str, Any] | None = Field(
- default=None,
- alias="Details",
- description="Key/value pairs of strings with additional information about the\ncomponent. These values are intended for informational purposes\nonly, and their content is not defined, and not part of the API\nspecification.\n\nThese messages can be printed by the client as information to the user.\n",
- )
+ name: Annotated[
+ str,
+ Field(alias="Name", description="Name of the component\n", examples=["Engine"]),
+ ]
+ version: Annotated[
+ str,
+ Field(
+ alias="Version",
+ description="Version of the component\n",
+ examples=["19.03.12"],
+ ),
+ ]
+ details: Annotated[
+ dict[str, Any] | None,
+ Field(
+ alias="Details",
+ description="Key/value pairs of strings with additional information about the\ncomponent. These values are intended for informational purposes\nonly, and their content is not defined, and not part of the API\nspecification.\n\nThese messages can be printed by the client as information to the user.\n",
+ ),
+ ] = None
class SystemVersion(BaseModel):
@@ -3688,72 +4280,91 @@ class SystemVersion(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- platform: Platform1 | None = Field(default=None, alias="Platform")
- components: list[Component] | None = Field(
- default=None,
- alias="Components",
- description="Information about system components\n",
- )
- version: str | None = Field(
- default=None,
- alias="Version",
- description="The version of the daemon",
- examples=["19.03.12"],
- )
- api_version: str | None = Field(
- default=None,
- alias="ApiVersion",
- description="The default (and highest) API version that is supported by the daemon\n",
- examples=["1.40"],
- )
- min_api_version: str | None = Field(
- default=None,
- alias="MinAPIVersion",
- description="The minimum API version that is supported by the daemon\n",
- examples=["1.12"],
- )
- git_commit: str | None = Field(
- default=None,
- alias="GitCommit",
- description="The Git commit of the source code that was used to build the daemon\n",
- examples=["48a66213fe"],
- )
- go_version: str | None = Field(
- default=None,
- alias="GoVersion",
- description="The version Go used to compile the daemon, and the version of the Go\nruntime in use.\n",
- examples=["go1.13.14"],
- )
- os: str | None = Field(
- default=None,
- alias="Os",
- description='The operating system that the daemon is running on ("linux" or "windows")\n',
- examples=["linux"],
- )
- arch: str | None = Field(
- default=None,
- alias="Arch",
- description="The architecture that the daemon is running on\n",
- examples=["amd64"],
- )
- kernel_version: str | None = Field(
- default=None,
- alias="KernelVersion",
- description="The kernel version (`uname -r`) that the daemon is running on.\n\nThis field is omitted when empty.\n",
- examples=["4.19.76-linuxkit"],
- )
- experimental: bool | None = Field(
- default=None,
- alias="Experimental",
- description="Indicates if the daemon is started with experimental features enabled.\n\nThis field is omitted when empty / false.\n",
- examples=[True],
- )
- build_time: str | None = Field(
- default=None,
- alias="BuildTime",
- description="The date and time that the daemon was compiled.\n",
- examples=["2020-06-22T15:49:27.000000000+00:00"],
- )
+ platform: Annotated[Platform1 | None, Field(alias="Platform")] = None
+ components: Annotated[
+ list[Component] | None,
+ Field(alias="Components", description="Information about system components\n"),
+ ] = None
+ version: Annotated[
+ str | None,
+ Field(
+ alias="Version",
+ description="The version of the daemon",
+ examples=["19.03.12"],
+ ),
+ ] = None
+ api_version: Annotated[
+ str | None,
+ Field(
+ alias="ApiVersion",
+ description="The default (and highest) API version that is supported by the daemon\n",
+ examples=["1.40"],
+ ),
+ ] = None
+ min_api_version: Annotated[
+ str | None,
+ Field(
+ alias="MinAPIVersion",
+ description="The minimum API version that is supported by the daemon\n",
+ examples=["1.12"],
+ ),
+ ] = None
+ git_commit: Annotated[
+ str | None,
+ Field(
+ alias="GitCommit",
+ description="The Git commit of the source code that was used to build the daemon\n",
+ examples=["48a66213fe"],
+ ),
+ ] = None
+ go_version: Annotated[
+ str | None,
+ Field(
+ alias="GoVersion",
+ description="The version Go used to compile the daemon, and the version of the Go\nruntime in use.\n",
+ examples=["go1.13.14"],
+ ),
+ ] = None
+ os: Annotated[
+ str | None,
+ Field(
+ alias="Os",
+ description='The operating system that the daemon is running on ("linux" or "windows")\n',
+ examples=["linux"],
+ ),
+ ] = None
+ arch: Annotated[
+ str | None,
+ Field(
+ alias="Arch",
+ description="The architecture that the daemon is running on\n",
+ examples=["amd64"],
+ ),
+ ] = None
+ kernel_version: Annotated[
+ str | None,
+ Field(
+ alias="KernelVersion",
+ description="The kernel version (`uname -r`) that the daemon is running on.\n\nThis field is omitted when empty.\n",
+ examples=["4.19.76-linuxkit"],
+ ),
+ ] = None
+ experimental: Annotated[
+ bool | None,
+ Field(
+ alias="Experimental",
+ description="Indicates if the daemon is started with experimental features enabled.\n\nThis field is omitted when empty / false.\n",
+ examples=[True],
+ ),
+ ] = None
+ build_time: Annotated[
+ str | None,
+ Field(
+ alias="BuildTime",
+ description="The date and time that the daemon was compiled.\n",
+ examples=["2020-06-22T15:49:27.000000000+00:00"],
+ ),
+ ] = None
class CgroupDriver(str, Enum):
@@ -3798,15 +4409,18 @@ class DefaultAddressPool(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- base: str | None = Field(
- default=None,
- alias="Base",
- description="The network address in CIDR format",
- examples=["10.10.0.0/16"],
- )
- size: int | None = Field(
- default=None, alias="Size", description="The network pool size", examples=["24"]
- )
+ base: Annotated[
+ str | None,
+ Field(
+ alias="Base",
+ description="The network address in CIDR format",
+ examples=["10.10.0.0/16"],
+ ),
+ ] = None
+ size: Annotated[
+ int | None,
+ Field(alias="Size", description="The network pool size", examples=["24"]),
+ ] = None
class PluginsInfo(BaseModel):
@@ -3824,41 +4438,49 @@ class PluginsInfo(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- volume: list[str] | None = Field(
- default=None,
- alias="Volume",
- description="Names of available volume-drivers, and network-driver plugins.",
- examples=[["local"]],
- )
- network: list[str] | None = Field(
- default=None,
- alias="Network",
- description="Names of available network-drivers, and network-driver plugins.",
- examples=[["bridge", "host", "ipvlan", "macvlan", "null", "overlay"]],
- )
- authorization: list[str] | None = Field(
- default=None,
- alias="Authorization",
- description="Names of available authorization plugins.",
- examples=[["img-authz-plugin", "hbm"]],
- )
- log: list[str] | None = Field(
- default=None,
- alias="Log",
- description="Names of available logging-drivers, and logging-driver plugins.",
- examples=[
- [
- "awslogs",
- "fluentd",
- "gcplogs",
- "gelf",
- "journald",
- "json-file",
- "splunk",
- "syslog",
- ]
- ],
- )
+ volume: Annotated[
+ list[str] | None,
+ Field(
+ alias="Volume",
+ description="Names of available volume-drivers, and network-driver plugins.",
+ examples=[["local"]],
+ ),
+ ] = None
+ network: Annotated[
+ list[str] | None,
+ Field(
+ alias="Network",
+ description="Names of available network-drivers, and network-driver plugins.",
+ examples=[["bridge", "host", "ipvlan", "macvlan", "null", "overlay"]],
+ ),
+ ] = None
+ authorization: Annotated[
+ list[str] | None,
+ Field(
+ alias="Authorization",
+ description="Names of available authorization plugins.",
+ examples=[["img-authz-plugin", "hbm"]],
+ ),
+ ] = None
+ log: Annotated[
+ list[str] | None,
+ Field(
+ alias="Log",
+ description="Names of available logging-drivers, and logging-driver plugins.",
+ examples=[
+ [
+ "awslogs",
+ "fluentd",
+ "gcplogs",
+ "gelf",
+ "journald",
+ "json-file",
+ "splunk",
+ "syslog",
+ ]
+ ],
+ ),
+ ] = None
class IndexInfo(BaseModel):
@@ -3869,36 +4491,44 @@ class IndexInfo(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(
- default=None,
- alias="Name",
- description='Name of the registry, such as "docker.io".\n',
- examples=["docker.io"],
- )
- mirrors: list[str] | None = Field(
- default=None,
- alias="Mirrors",
- description="List of mirrors, expressed as URIs.\n",
- examples=[
- [
- "https://hub-mirror.corp.example.com:5000/",
- "https://registry-2.docker.io/",
- "https://registry-3.docker.io/",
- ]
- ],
- )
- secure: bool | None = Field(
- default=None,
- alias="Secure",
- description="Indicates if the registry is part of the list of insecure\nregistries.\n\nIf `false`, the registry is insecure. Insecure registries accept\nun-encrypted (HTTP) and/or untrusted (HTTPS with certificates from\nunknown CAs) communication.\n\n> **Warning**: Insecure registries can be useful when running a local\n> registry. However, because its use creates security vulnerabilities\n> it should ONLY be enabled for testing purposes. For increased\n> security, users should add their CA to their system's list of\n> trusted CAs instead of enabling this option.\n",
- examples=[True],
- )
- official: bool | None = Field(
- default=None,
- alias="Official",
- description="Indicates whether this is an official registry (i.e., Docker Hub / docker.io)\n",
- examples=[True],
- )
+ name: Annotated[
+ str | None,
+ Field(
+ alias="Name",
+ description='Name of the registry, such as "docker.io".\n',
+ examples=["docker.io"],
+ ),
+ ] = None
+ mirrors: Annotated[
+ list[str] | None,
+ Field(
+ alias="Mirrors",
+ description="List of mirrors, expressed as URIs.\n",
+ examples=[
+ [
+ "https://hub-mirror.corp.example.com:5000/",
+ "https://registry-2.docker.io/",
+ "https://registry-3.docker.io/",
+ ]
+ ],
+ ),
+ ] = None
+ secure: Annotated[
+ bool | None,
+ Field(
+ alias="Secure",
+ description="Indicates if the registry is part of the list of insecure\nregistries.\n\nIf `false`, the registry is insecure. Insecure registries accept\nun-encrypted (HTTP) and/or untrusted (HTTPS with certificates from\nunknown CAs) communication.\n\n> **Warning**: Insecure registries can be useful when running a local\n> registry. However, because its use creates security vulnerabilities\n> it should ONLY be enabled for testing purposes. For increased\n> security, users should add their CA to their system's list of\n> trusted CAs instead of enabling this option.\n",
+ examples=[True],
+ ),
+ ] = None
+ official: Annotated[
+ bool | None,
+ Field(
+ alias="Official",
+ description="Indicates whether this is an official registry (i.e., Docker Hub / docker.io)\n",
+ examples=[True],
+ ),
+ ] = None
class Runtime(BaseModel):
@@ -3915,17 +4545,21 @@ class Runtime(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- path: str | None = Field(
- default=None,
- description="Name and, optional, path, of the OCI executable binary.\n\nIf the path is omitted, the daemon searches the host's `$PATH` for the\nbinary and uses the first result.\n",
- examples=["/usr/local/bin/my-oci-runtime"],
- )
- runtime_args: list[str] | None = Field(
- default=None,
- alias="runtimeArgs",
- description="List of command-line arguments to pass to the runtime when invoked.\n",
- examples=[["--debug", "--systemd-cgroup=false"]],
- )
+ path: Annotated[
+ str | None,
+ Field(
+ description="Name and, optional, path, of the OCI executable binary.\n\nIf the path is omitted, the daemon searches the host's `$PATH` for the\nbinary and uses the first result.\n",
+ examples=["/usr/local/bin/my-oci-runtime"],
+ ),
+ ] = None
+ runtime_args: Annotated[
+ list[str] | None,
+ Field(
+ alias="runtimeArgs",
+ description="List of command-line arguments to pass to the runtime when invoked.\n",
+ examples=[["--debug", "--systemd-cgroup=false"]],
+ ),
+ ] = None
class Commit(BaseModel):
@@ -3939,18 +4573,22 @@ class Commit(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- id: str | None = Field(
- default=None,
- alias="ID",
- description="Actual commit ID of external tool.",
- examples=["cfb82a876ecc11b5ca0977d1733adbe58599088a"],
- )
- expected: str | None = Field(
- default=None,
- alias="Expected",
- description="Commit ID of external tool expected by dockerd as set at build time.\n",
- examples=["2d41c047c83e09a6d61d464906feb2a2f3c52aa4"],
- )
+ id: Annotated[
+ str | None,
+ Field(
+ alias="ID",
+ description="Actual commit ID of external tool.",
+ examples=["cfb82a876ecc11b5ca0977d1733adbe58599088a"],
+ ),
+ ] = None
+ expected: Annotated[
+ str | None,
+ Field(
+ alias="Expected",
+ description="Commit ID of external tool expected by dockerd as set at build time.\n",
+ examples=["2d41c047c83e09a6d61d464906feb2a2f3c52aa4"],
+ ),
+ ] = None
class LocalNodeState(str, Enum):
@@ -3974,16 +4612,20 @@ class PeerNode(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- node_id: str | None = Field(
- default=None,
- alias="NodeID",
- description="Unique identifier of for this node in the swarm.",
- )
- addr: str | None = Field(
- default=None,
- alias="Addr",
- description="IP address and ports at which this node can be reached.\n",
- )
+ node_id: Annotated[
+ str | None,
+ Field(
+ alias="NodeID",
+ description="Unique identifier of for this node in the swarm.",
+ ),
+ ] = None
+ addr: Annotated[
+ str | None,
+ Field(
+ alias="Addr",
+ description="IP address and ports at which this node can be reached.\n",
+ ),
+ ] = None
class NetworkAttachmentConfig(BaseModel):
@@ -3995,21 +4637,27 @@ class NetworkAttachmentConfig(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- target: str | None = Field(
- default=None,
- alias="Target",
- description="The target network for attachment. Must be a network name or ID.\n",
- )
- aliases: list[str] | None = Field(
- default=None,
- alias="Aliases",
- description="Discoverable alternate names for the service on this network.\n",
- )
- driver_opts: dict[str, str] | None = Field(
- default=None,
- alias="DriverOpts",
- description="Driver attachment options for the network target.\n",
- )
+ target: Annotated[
+ str | None,
+ Field(
+ alias="Target",
+ description="The target network for attachment. Must be a network name or ID.\n",
+ ),
+ ] = None
+ aliases: Annotated[
+ list[str] | None,
+ Field(
+ alias="Aliases",
+ description="Discoverable alternate names for the service on this network.\n",
+ ),
+ ] = None
+ driver_opts: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="DriverOpts",
+ description="Driver attachment options for the network target.\n",
+ ),
+ ] = None
class EventActor(BaseModel):
@@ -4022,24 +4670,30 @@ class EventActor(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- id: str | None = Field(
- default=None,
- alias="ID",
- description="The ID of the object emitting the event",
- examples=["ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743"],
- )
- attributes: dict[str, str] | None = Field(
- default=None,
- alias="Attributes",
- description="Various key/value attributes of the object, depending on its type.\n",
- examples=[
- {
- "com.example.some-label": "some-label-value",
- "image": "alpine:latest",
- "name": "my-container",
- }
- ],
- )
+ id: Annotated[
+ str | None,
+ Field(
+ alias="ID",
+ description="The ID of the object emitting the event",
+ examples=[
+ "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743"
+ ],
+ ),
+ ] = None
+ attributes: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="Attributes",
+ description="Various key/value attributes of the object, depending on its type.\n",
+ examples=[
+ {
+ "com.example.some-label": "some-label-value",
+ "image": "alpine:latest",
+ "name": "my-container",
+ }
+ ],
+ ),
+ ] = None
class Type5(str, Enum):
@@ -4080,32 +4734,36 @@ class SystemEventsResponse(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- type: Type5 | None = Field(
- default=None,
- alias="Type",
- description="The type of object emitting the event",
- examples=["container"],
- )
- action: str | None = Field(
- default=None,
- alias="Action",
- description="The type of event",
- examples=["create"],
- )
- actor: EventActor | None = Field(default=None, alias="Actor")
- scope: Scope1 | None = Field(
- default=None,
- description="Scope of the event. Engine events are `local` scope. Cluster (Swarm)\nevents are `swarm` scope.\n",
- )
- time: int | None = Field(
- default=None, description="Timestamp of event", examples=[1629574695]
- )
- time_nano: int | None = Field(
- default=None,
- alias="timeNano",
- description="Timestamp of event, with nanosecond accuracy",
- examples=[1629574695515050031],
- )
+ type: Annotated[
+ Type5 | None,
+ Field(
+ alias="Type",
+ description="The type of object emitting the event",
+ examples=["container"],
+ ),
+ ] = None
+ action: Annotated[
+ str | None,
+ Field(alias="Action", description="The type of event", examples=["create"]),
+ ] = None
+ actor: Annotated[EventActor | None, Field(alias="Actor")] = None
+ scope: Annotated[
+ Scope1 | None,
+ Field(
+ description="Scope of the event. Engine events are `local` scope. Cluster (Swarm)\nevents are `swarm` scope.\n"
+ ),
+ ] = None
+ time: Annotated[
+ int | None, Field(description="Timestamp of event", examples=[1629574695])
+ ] = None
+ time_nano: Annotated[
+ int | None,
+ Field(
+ alias="timeNano",
+ description="Timestamp of event, with nanosecond accuracy",
+ examples=[1629574695515050031],
+ ),
+ ] = None
class OCIDescriptor(BaseModel):
@@ -4118,22 +4776,27 @@ class OCIDescriptor(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- media_type: str | None = Field(
- default=None,
- alias="mediaType",
- description="The media type of the object this schema refers to.\n",
- examples=["application/vnd.docker.distribution.manifest.v2+json"],
- )
- digest: str | None = Field(
- default=None,
- description="The digest of the targeted content.\n",
- examples=[
- "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96"
- ],
- )
- size: int | None = Field(
- default=None, description="The size in bytes of the blob.\n", examples=[3987495]
- )
+ media_type: Annotated[
+ str | None,
+ Field(
+ alias="mediaType",
+ description="The media type of the object this schema refers to.\n",
+ examples=["application/vnd.docker.distribution.manifest.v2+json"],
+ ),
+ ] = None
+ digest: Annotated[
+ str | None,
+ Field(
+ description="The digest of the targeted content.\n",
+ examples=[
+ "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96"
+ ],
+ ),
+ ] = None
+ size: Annotated[
+ int | None,
+ Field(description="The size in bytes of the blob.\n", examples=[3987495]),
+ ] = None
class OCIPlatform(BaseModel):
@@ -4146,33 +4809,43 @@ class OCIPlatform(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- architecture: str | None = Field(
- default=None,
- description="The CPU architecture, for example `amd64` or `ppc64`.\n",
- examples=["arm"],
- )
- os: str | None = Field(
- default=None,
- description="The operating system, for example `linux` or `windows`.\n",
- examples=["windows"],
- )
- os_version: str | None = Field(
- default=None,
- alias="os.version",
- description="Optional field specifying the operating system version, for example on\nWindows `10.0.19041.1165`.\n",
- examples=["10.0.19041.1165"],
- )
- os_features: list[str] | None = Field(
- default=None,
- alias="os.features",
- description="Optional field specifying an array of strings, each listing a required\nOS feature (for example on Windows `win32k`).\n",
- examples=[["win32k"]],
- )
- variant: str | None = Field(
- default=None,
- description="Optional field specifying a variant of the CPU, for example `v7` to\nspecify ARMv7 when architecture is `arm`.\n",
- examples=["v7"],
- )
+ architecture: Annotated[
+ str | None,
+ Field(
+ description="The CPU architecture, for example `amd64` or `ppc64`.\n",
+ examples=["arm"],
+ ),
+ ] = None
+ os: Annotated[
+ str | None,
+ Field(
+ description="The operating system, for example `linux` or `windows`.\n",
+ examples=["windows"],
+ ),
+ ] = None
+ os_version: Annotated[
+ str | None,
+ Field(
+ alias="os.version",
+ description="Optional field specifying the operating system version, for example on\nWindows `10.0.19041.1165`.\n",
+ examples=["10.0.19041.1165"],
+ ),
+ ] = None
+ os_features: Annotated[
+ list[str] | None,
+ Field(
+ alias="os.features",
+ description="Optional field specifying an array of strings, each listing a required\nOS feature (for example on Windows `win32k`).\n",
+ examples=[["win32k"]],
+ ),
+ ] = None
+ variant: Annotated[
+ str | None,
+ Field(
+ description="Optional field specifying a variant of the CPU, for example `v7` to\nspecify ARMv7 when architecture is `arm`.\n",
+ examples=["v7"],
+ ),
+ ] = None
class DistributionInspectResponse(BaseModel):
@@ -4185,12 +4858,14 @@ class DistributionInspectResponse(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- descriptor: OCIDescriptor = Field(..., alias="Descriptor")
- platforms: list[OCIPlatform] = Field(
- ...,
- alias="Platforms",
- description="An array containing all platforms supported by the image.\n",
- )
+ descriptor: Annotated[OCIDescriptor, Field(alias="Descriptor")]
+ platforms: Annotated[
+ list[OCIPlatform],
+ Field(
+ alias="Platforms",
+ description="An array containing all platforms supported by the image.\n",
+ ),
+ ]
class ResourceObject(BaseModel):
@@ -4203,15 +4878,15 @@ class ResourceObject(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- nano_cp_us: int | None = Field(
- default=None, alias="NanoCPUs", examples=[4000000000]
- )
- memory_bytes: int | None = Field(
- default=None, alias="MemoryBytes", examples=[8272408576]
- )
- generic_resources: GenericResources | None = Field(
- default=None, alias="GenericResources"
- )
+ nano_cp_us: Annotated[
+ int | None, Field(alias="NanoCPUs", examples=[4000000000])
+ ] = None
+ memory_bytes: Annotated[
+ int | None, Field(alias="MemoryBytes", examples=[8272408576])
+ ] = None
+ generic_resources: Annotated[
+ GenericResources | None, Field(alias="GenericResources")
+ ] = None
class Health(BaseModel):
@@ -4223,23 +4898,29 @@ class Health(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- status: Status | None = Field(
- default=None,
- alias="Status",
- description='Status is one of `none`, `starting`, `healthy` or `unhealthy`\n\n- "none" Indicates there is no healthcheck\n- "starting" Starting indicates that the container is not yet ready\n- "healthy" Healthy indicates that the container is running correctly\n- "unhealthy" Unhealthy indicates that the container has a problem\n',
- examples=["healthy"],
- )
- failing_streak: int | None = Field(
- default=None,
- alias="FailingStreak",
- description="FailingStreak is the number of consecutive failures",
- examples=[0],
- )
- log: list[HealthcheckResult] | None = Field(
- default=None,
- alias="Log",
- description="Log contains the last few results (oldest first)\n",
- )
+ status: Annotated[
+ Status | None,
+ Field(
+ alias="Status",
+ description='Status is one of `none`, `starting`, `healthy` or `unhealthy`\n\n- "none" Indicates there is no healthcheck\n- "starting" Starting indicates that the container is not yet ready\n- "healthy" Healthy indicates that the container is running correctly\n- "unhealthy" Unhealthy indicates that the container has a problem\n',
+ examples=["healthy"],
+ ),
+ ] = None
+ failing_streak: Annotated[
+ int | None,
+ Field(
+ alias="FailingStreak",
+ description="FailingStreak is the number of consecutive failures",
+ examples=[0],
+ ),
+ ] = None
+ log: Annotated[
+ list[HealthcheckResult] | None,
+ Field(
+ alias="Log",
+ description="Log contains the last few results (oldest first)\n",
+ ),
+ ] = None
class PortMap(RootModel[dict[str, list[PortBinding]] | None]):
@@ -4263,23 +4944,29 @@ class IPAM(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- driver: str | None = Field(
- default="default",
- alias="Driver",
- description="Name of the IPAM driver to use.",
- examples=["default"],
- )
- config: list[IPAMConfig] | None = Field(
- default=None,
- alias="Config",
- description='List of IPAM configuration options, specified as a map:\n\n```\n{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }\n```\n',
- )
- options: dict[str, str] | None = Field(
- default=None,
- alias="Options",
- description="Driver-specific options, specified as a map.",
- examples=[{"foo": "bar"}],
- )
+ driver: Annotated[
+ str | None,
+ Field(
+ alias="Driver",
+ description="Name of the IPAM driver to use.",
+ examples=["default"],
+ ),
+ ] = "default"
+ config: Annotated[
+ list[IPAMConfig] | None,
+ Field(
+ alias="Config",
+ description='List of IPAM configuration options, specified as a map:\n\n```\n{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }\n```\n',
+ ),
+ ] = None
+ options: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="Options",
+ description="Driver-specific options, specified as a map.",
+ examples=[{"foo": "bar"}],
+ ),
+ ] = None
class BuildInfo(BaseModel):
@@ -4289,10 +4976,12 @@ class BuildInfo(BaseModel):
id: str | None = None
stream: str | None = None
error: str | None = None
- error_detail: ErrorDetail | None = Field(default=None, alias="errorDetail")
+ error_detail: Annotated[ErrorDetail | None, Field(alias="errorDetail")] = None
status: str | None = None
progress: str | None = None
- progress_detail: ProgressDetail | None = Field(default=None, alias="progressDetail")
+ progress_detail: Annotated[
+ ProgressDetail | None, Field(alias="progressDetail")
+ ] = None
aux: ImageID | None = None
@@ -4302,10 +4991,12 @@ class CreateImageInfo(BaseModel):
)
id: str | None = None
error: str | None = None
- error_detail: ErrorDetail | None = Field(default=None, alias="errorDetail")
+ error_detail: Annotated[ErrorDetail | None, Field(alias="errorDetail")] = None
status: str | None = None
progress: str | None = None
- progress_detail: ProgressDetail | None = Field(default=None, alias="progressDetail")
+ progress_detail: Annotated[
+ ProgressDetail | None, Field(alias="progressDetail")
+ ] = None
class PushImageInfo(BaseModel):
@@ -4315,7 +5006,9 @@ class PushImageInfo(BaseModel):
error: str | None = None
status: str | None = None
progress: str | None = None
- progress_detail: ProgressDetail | None = Field(default=None, alias="progressDetail")
+ progress_detail: Annotated[
+ ProgressDetail | None, Field(alias="progressDetail")
+ ] = None
class EndpointSettings(BaseModel):
@@ -4326,78 +5019,101 @@ class EndpointSettings(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- ipam_config: EndpointIPAMConfig | None = Field(default=None, alias="IPAMConfig")
- links: list[str] | None = Field(
- default=None, alias="Links", examples=[["container_1", "container_2"]]
- )
- aliases: list[str] | None = Field(
- default=None, alias="Aliases", examples=[["server_x", "server_y"]]
- )
- network_id: str | None = Field(
- default=None,
- alias="NetworkID",
- description="Unique ID of the network.\n",
- examples=["08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a"],
- )
- endpoint_id: str | None = Field(
- default=None,
- alias="EndpointID",
- description="Unique ID for the service endpoint in a Sandbox.\n",
- examples=["b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"],
- )
- gateway: str | None = Field(
- default=None,
- alias="Gateway",
- description="Gateway address for this network.\n",
- examples=["172.17.0.1"],
- )
- ip_address: str | None = Field(
- default=None,
- alias="IPAddress",
- description="IPv4 address.\n",
- examples=["172.17.0.4"],
- )
- ip_prefix_len: int | None = Field(
- default=None,
- alias="IPPrefixLen",
- description="Mask length of the IPv4 address.\n",
- examples=[16],
- )
- i_pv6_gateway: str | None = Field(
- default=None,
- alias="IPv6Gateway",
- description="IPv6 gateway address.\n",
- examples=["2001:db8:2::100"],
- )
- global_i_pv6_address: str | None = Field(
- default=None,
- alias="GlobalIPv6Address",
- description="Global IPv6 address.\n",
- examples=["2001:db8::5689"],
- )
- global_i_pv6_prefix_len: int | None = Field(
- default=None,
- alias="GlobalIPv6PrefixLen",
- description="Mask length of the global IPv6 address.\n",
- examples=[64],
- )
- mac_address: str | None = Field(
- default=None,
- alias="MacAddress",
- description="MAC address for the endpoint on this network.\n",
- examples=["02:42:ac:11:00:04"],
- )
- driver_opts: dict[str, str] | None = Field(
- default=None,
- alias="DriverOpts",
- description="DriverOpts is a mapping of driver options and values. These options\nare passed directly to the driver and are driver specific.\n",
- examples=[
- {
- "com.example.some-label": "some-value",
- "com.example.some-other-label": "some-other-value",
- }
- ],
- )
+ ipam_config: Annotated[EndpointIPAMConfig | None, Field(alias="IPAMConfig")] = None
+ links: Annotated[
+ list[str] | None,
+ Field(alias="Links", examples=[["container_1", "container_2"]]),
+ ] = None
+ aliases: Annotated[
+ list[str] | None, Field(alias="Aliases", examples=[["server_x", "server_y"]])
+ ] = None
+ network_id: Annotated[
+ str | None,
+ Field(
+ alias="NetworkID",
+ description="Unique ID of the network.\n",
+ examples=[
+ "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a"
+ ],
+ ),
+ ] = None
+ endpoint_id: Annotated[
+ str | None,
+ Field(
+ alias="EndpointID",
+ description="Unique ID for the service endpoint in a Sandbox.\n",
+ examples=[
+ "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"
+ ],
+ ),
+ ] = None
+ gateway: Annotated[
+ str | None,
+ Field(
+ alias="Gateway",
+ description="Gateway address for this network.\n",
+ examples=["172.17.0.1"],
+ ),
+ ] = None
+ ip_address: Annotated[
+ str | None,
+ Field(
+ alias="IPAddress", description="IPv4 address.\n", examples=["172.17.0.4"]
+ ),
+ ] = None
+ ip_prefix_len: Annotated[
+ int | None,
+ Field(
+ alias="IPPrefixLen",
+ description="Mask length of the IPv4 address.\n",
+ examples=[16],
+ ),
+ ] = None
+ i_pv6_gateway: Annotated[
+ str | None,
+ Field(
+ alias="IPv6Gateway",
+ description="IPv6 gateway address.\n",
+ examples=["2001:db8:2::100"],
+ ),
+ ] = None
+ global_i_pv6_address: Annotated[
+ str | None,
+ Field(
+ alias="GlobalIPv6Address",
+ description="Global IPv6 address.\n",
+ examples=["2001:db8::5689"],
+ ),
+ ] = None
+ global_i_pv6_prefix_len: Annotated[
+ int | None,
+ Field(
+ alias="GlobalIPv6PrefixLen",
+ description="Mask length of the global IPv6 address.\n",
+ examples=[64],
+ ),
+ ] = None
+ mac_address: Annotated[
+ str | None,
+ Field(
+ alias="MacAddress",
+ description="MAC address for the endpoint on this network.\n",
+ examples=["02:42:ac:11:00:04"],
+ ),
+ ] = None
+ driver_opts: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="DriverOpts",
+ description="DriverOpts is a mapping of driver options and values. These options\nare passed directly to the driver and are driver specific.\n",
+ examples=[
+ {
+ "com.example.some-label": "some-value",
+ "com.example.some-other-label": "some-other-value",
+ }
+ ],
+ ),
+ ] = None
class NodeDescription(BaseModel):
@@ -4410,13 +5126,13 @@ class NodeDescription(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- hostname: str | None = Field(
- default=None, alias="Hostname", examples=["bf3067039e47"]
- )
- platform: Platform | None = Field(default=None, alias="Platform")
- resources: ResourceObject | None = Field(default=None, alias="Resources")
- engine: EngineDescription | None = Field(default=None, alias="Engine")
- tls_info: TLSInfo | None = Field(default=None, alias="TLSInfo")
+ hostname: Annotated[
+ str | None, Field(alias="Hostname", examples=["bf3067039e47"])
+ ] = None
+ platform: Annotated[Platform | None, Field(alias="Platform")] = None
+ resources: Annotated[ResourceObject | None, Field(alias="Resources")] = None
+ engine: Annotated[EngineDescription | None, Field(alias="Engine")] = None
+ tls_info: Annotated[TLSInfo | None, Field(alias="TLSInfo")] = None
class NodeStatus(BaseModel):
@@ -4430,14 +5146,14 @@ class NodeStatus(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- state: NodeState | None = Field(default=None, alias="State")
- message: str | None = Field(default=None, alias="Message", examples=[""])
- addr: str | None = Field(
- default=None,
- alias="Addr",
- description="IP address of the node.",
- examples=["172.17.0.2"],
- )
+ state: Annotated[NodeState | None, Field(alias="State")] = None
+ message: Annotated[str | None, Field(alias="Message", examples=[""])] = None
+ addr: Annotated[
+ str | None,
+ Field(
+ alias="Addr", description="IP address of the node.", examples=["172.17.0.2"]
+ ),
+ ] = None
class ManagerStatus(BaseModel):
@@ -4452,14 +5168,16 @@ class ManagerStatus(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- leader: bool | None = Field(default=False, alias="Leader", examples=[True])
- reachability: Reachability | None = Field(default=None, alias="Reachability")
- addr: str | None = Field(
- default=None,
- alias="Addr",
- description="The IP address and port at which the manager is reachable.\n",
- examples=["10.0.0.46:2377"],
- )
+ leader: Annotated[bool | None, Field(alias="Leader", examples=[True])] = False
+ reachability: Annotated[Reachability | None, Field(alias="Reachability")] = None
+ addr: Annotated[
+ str | None,
+ Field(
+ alias="Addr",
+ description="The IP address and port at which the manager is reachable.\n",
+ examples=["10.0.0.46:2377"],
+ ),
+ ] = None
class Resources1(BaseModel):
@@ -4472,12 +5190,13 @@ class Resources1(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- limits: Limit | None = Field(
- default=None, alias="Limits", description="Define resources limits."
- )
- reservations: ResourceObject | None = Field(
- default=None, alias="Reservations", description="Define resources reservation."
- )
+ limits: Annotated[
+ Limit | None, Field(alias="Limits", description="Define resources limits.")
+ ] = None
+ reservations: Annotated[
+ ResourceObject | None,
+ Field(alias="Reservations", description="Define resources reservation."),
+ ] = None
class TaskSpec(BaseModel):
@@ -4488,90 +5207,113 @@ class TaskSpec(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- plugin_spec: PluginSpec | None = Field(
- default=None,
- alias="PluginSpec",
- description="Plugin spec for the service. *(Experimental release only.)*\n\n
\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n",
- )
- container_spec: ContainerSpec | None = Field(
- default=None,
- alias="ContainerSpec",
- description="Container spec for the service.\n\n
\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n",
- )
- network_attachment_spec: NetworkAttachmentSpec | None = Field(
- default=None,
- alias="NetworkAttachmentSpec",
- description="Read-only spec type for non-swarm containers attached to swarm overlay\nnetworks.\n\n
\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n",
- )
- resources: Resources1 | None = Field(
- default=None,
- alias="Resources",
- description="Resource requirements which apply to each individual container created\nas part of the service.\n",
- )
- restart_policy: RestartPolicy1 | None = Field(
- default=None,
- alias="RestartPolicy",
- description="Specification for the restart policy which applies to containers\ncreated as part of this service.\n",
- )
- placement: Placement | None = Field(default=None, alias="Placement")
- force_update: int | None = Field(
- default=None,
- alias="ForceUpdate",
- description="A counter that triggers an update even if no relevant parameters have\nbeen changed.\n",
- )
- runtime: str | None = Field(
- default=None,
- alias="Runtime",
- description="Runtime is the type of runtime specified for the task executor.\n",
- )
- networks: list[NetworkAttachmentConfig] | None = Field(
- default=None,
- alias="Networks",
- description="Specifies which networks the service should attach to.",
- )
- log_driver: LogDriver1 | None = Field(
- default=None,
- alias="LogDriver",
- description="Specifies the log driver to use for tasks created from this spec. If\nnot present, the default one for the swarm will be used, finally\nfalling back to the engine default if not specified.\n",
- )
+ plugin_spec: Annotated[
+ PluginSpec | None,
+ Field(
+ alias="PluginSpec",
+ description="Plugin spec for the service. *(Experimental release only.)*\n\n
\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n",
+ ),
+ ] = None
+ container_spec: Annotated[
+ ContainerSpec | None,
+ Field(
+ alias="ContainerSpec",
+ description="Container spec for the service.\n\n
\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n",
+ ),
+ ] = None
+ network_attachment_spec: Annotated[
+ NetworkAttachmentSpec | None,
+ Field(
+ alias="NetworkAttachmentSpec",
+ description="Read-only spec type for non-swarm containers attached to swarm overlay\nnetworks.\n\n
\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n",
+ ),
+ ] = None
+ resources: Annotated[
+ Resources1 | None,
+ Field(
+ alias="Resources",
+ description="Resource requirements which apply to each individual container created\nas part of the service.\n",
+ ),
+ ] = None
+ restart_policy: Annotated[
+ RestartPolicy1 | None,
+ Field(
+ alias="RestartPolicy",
+ description="Specification for the restart policy which applies to containers\ncreated as part of this service.\n",
+ ),
+ ] = None
+ placement: Annotated[Placement | None, Field(alias="Placement")] = None
+ force_update: Annotated[
+ int | None,
+ Field(
+ alias="ForceUpdate",
+ description="A counter that triggers an update even if no relevant parameters have\nbeen changed.\n",
+ ),
+ ] = None
+ runtime: Annotated[
+ str | None,
+ Field(
+ alias="Runtime",
+ description="Runtime is the type of runtime specified for the task executor.\n",
+ ),
+ ] = None
+ networks: Annotated[
+ list[NetworkAttachmentConfig] | None,
+ Field(
+ alias="Networks",
+ description="Specifies which networks the service should attach to.",
+ ),
+ ] = None
+ log_driver: Annotated[
+ LogDriver1 | None,
+ Field(
+ alias="LogDriver",
+ description="Specifies the log driver to use for tasks created from this spec. If\nnot present, the default one for the swarm will be used, finally\nfalling back to the engine default if not specified.\n",
+ ),
+ ] = None
class Task(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- id: str | None = Field(default=None, alias="ID", description="The ID of the task.")
- version: ObjectVersion | None = Field(default=None, alias="Version")
- created_at: str | None = Field(default=None, alias="CreatedAt")
- updated_at: str | None = Field(default=None, alias="UpdatedAt")
- name: str | None = Field(
- default=None, alias="Name", description="Name of the task."
- )
- labels: dict[str, str] | None = Field(
- default=None, alias="Labels", description="User-defined key/value metadata."
- )
- spec: TaskSpec | None = Field(default=None, alias="Spec")
- service_id: str | None = Field(
- default=None,
- alias="ServiceID",
- description="The ID of the service this task is part of.",
- )
- slot: int | None = Field(default=None, alias="Slot")
- node_id: str | None = Field(
- default=None,
- alias="NodeID",
- description="The ID of the node that this task is on.",
- )
- assigned_generic_resources: GenericResources | None = Field(
- default=None, alias="AssignedGenericResources"
- )
- status: Status1 | None = Field(default=None, alias="Status")
- desired_state: TaskState | None = Field(default=None, alias="DesiredState")
- job_iteration: ObjectVersion | None = Field(
- default=None,
- alias="JobIteration",
- description="If the Service this Task belongs to is a job-mode service, contains\nthe JobIteration of the Service this Task was created for. Absent if\nthe Task was created for a Replicated or Global Service.\n",
- )
+ id: Annotated[
+ str | None, Field(alias="ID", description="The ID of the task.")
+ ] = None
+ version: Annotated[ObjectVersion | None, Field(alias="Version")] = None
+ created_at: Annotated[str | None, Field(alias="CreatedAt")] = None
+ updated_at: Annotated[str | None, Field(alias="UpdatedAt")] = None
+ name: Annotated[
+ str | None, Field(alias="Name", description="Name of the task.")
+ ] = None
+ labels: Annotated[
+ dict[str, str] | None,
+ Field(alias="Labels", description="User-defined key/value metadata."),
+ ] = None
+ spec: Annotated[TaskSpec | None, Field(alias="Spec")] = None
+ service_id: Annotated[
+ str | None,
+ Field(
+ alias="ServiceID", description="The ID of the service this task is part of."
+ ),
+ ] = None
+ slot: Annotated[int | None, Field(alias="Slot")] = None
+ node_id: Annotated[
+ str | None,
+ Field(alias="NodeID", description="The ID of the node that this task is on."),
+ ] = None
+ assigned_generic_resources: Annotated[
+ GenericResources | None, Field(alias="AssignedGenericResources")
+ ] = None
+ status: Annotated[Status1 | None, Field(alias="Status")] = None
+ desired_state: Annotated[TaskState | None, Field(alias="DesiredState")] = None
+ job_iteration: Annotated[
+ ObjectVersion | None,
+ Field(
+ alias="JobIteration",
+ description="If the Service this Task belongs to is a job-mode service, contains\nthe JobIteration of the Service this Task was created for. Absent if\nthe Task was created for a Replicated or Global Service.\n",
+ ),
+ ] = None
class ServiceSpec(BaseModel):
@@ -4582,59 +5324,69 @@ class ServiceSpec(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(
- default=None, alias="Name", description="Name of the service."
- )
- labels: dict[str, str] | None = Field(
- default=None, alias="Labels", description="User-defined key/value metadata."
- )
- task_template: TaskSpec | None = Field(default=None, alias="TaskTemplate")
- mode: Mode | None = Field(
- default=None, alias="Mode", description="Scheduling mode for the service."
- )
- update_config: UpdateConfig | None = Field(
- default=None,
- alias="UpdateConfig",
- description="Specification for the update strategy of the service.",
- )
- rollback_config: RollbackConfig | None = Field(
- default=None,
- alias="RollbackConfig",
- description="Specification for the rollback strategy of the service.",
- )
- networks: list[NetworkAttachmentConfig] | None = Field(
- default=None,
- alias="Networks",
- description="Specifies which networks the service should attach to.",
- )
- endpoint_spec: EndpointSpec | None = Field(default=None, alias="EndpointSpec")
+ name: Annotated[
+ str | None, Field(alias="Name", description="Name of the service.")
+ ] = None
+ labels: Annotated[
+ dict[str, str] | None,
+ Field(alias="Labels", description="User-defined key/value metadata."),
+ ] = None
+ task_template: Annotated[TaskSpec | None, Field(alias="TaskTemplate")] = None
+ mode: Annotated[
+ Mode | None, Field(alias="Mode", description="Scheduling mode for the service.")
+ ] = None
+ update_config: Annotated[
+ UpdateConfig | None,
+ Field(
+ alias="UpdateConfig",
+ description="Specification for the update strategy of the service.",
+ ),
+ ] = None
+ rollback_config: Annotated[
+ RollbackConfig | None,
+ Field(
+ alias="RollbackConfig",
+ description="Specification for the rollback strategy of the service.",
+ ),
+ ] = None
+ networks: Annotated[
+ list[NetworkAttachmentConfig] | None,
+ Field(
+ alias="Networks",
+ description="Specifies which networks the service should attach to.",
+ ),
+ ] = None
+ endpoint_spec: Annotated[EndpointSpec | None, Field(alias="EndpointSpec")] = None
class Service(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- id: str | None = Field(default=None, alias="ID")
- version: ObjectVersion | None = Field(default=None, alias="Version")
- created_at: str | None = Field(default=None, alias="CreatedAt")
- updated_at: str | None = Field(default=None, alias="UpdatedAt")
- spec: ServiceSpec | None = Field(default=None, alias="Spec")
- endpoint: Endpoint | None = Field(default=None, alias="Endpoint")
- update_status: UpdateStatus | None = Field(
- default=None,
- alias="UpdateStatus",
- description="The status of a service update.",
- )
- service_status: ServiceStatus | None = Field(
- default=None,
- alias="ServiceStatus",
- description="The status of the service's tasks. Provided only when requested as\npart of a ServiceList operation.\n",
- )
- job_status: JobStatus | None = Field(
- default=None,
- alias="JobStatus",
- description="The status of the service when it is in one of ReplicatedJob or\nGlobalJob modes. Absent on Replicated and Global mode services. The\nJobIteration is an ObjectVersion, but unlike the Service's version,\ndoes not need to be sent with an update request.\n",
- )
+ id: Annotated[str | None, Field(alias="ID")] = None
+ version: Annotated[ObjectVersion | None, Field(alias="Version")] = None
+ created_at: Annotated[str | None, Field(alias="CreatedAt")] = None
+ updated_at: Annotated[str | None, Field(alias="UpdatedAt")] = None
+ spec: Annotated[ServiceSpec | None, Field(alias="Spec")] = None
+ endpoint: Annotated[Endpoint | None, Field(alias="Endpoint")] = None
+ update_status: Annotated[
+ UpdateStatus | None,
+ Field(alias="UpdateStatus", description="The status of a service update."),
+ ] = None
+ service_status: Annotated[
+ ServiceStatus | None,
+ Field(
+ alias="ServiceStatus",
+ description="The status of the service's tasks. Provided only when requested as\npart of a ServiceList operation.\n",
+ ),
+ ] = None
+ job_status: Annotated[
+ JobStatus | None,
+ Field(
+ alias="JobStatus",
+ description="The status of the service when it is in one of ReplicatedJob or\nGlobalJob modes. Absent on Replicated and Global mode services. The\nJobIteration is an ObjectVersion, but unlike the Service's version,\ndoes not need to be sent with an update request.\n",
+ ),
+ ] = None
class NetworkSettings1(BaseModel):
@@ -4645,72 +5397,89 @@ class NetworkSettings1(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- networks: dict[str, EndpointSettings] | None = Field(default=None, alias="Networks")
+ networks: Annotated[
+ dict[str, EndpointSettings] | None, Field(alias="Networks")
+ ] = None
class ContainerSummary(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- id: str | None = Field(
- default=None, alias="Id", description="The ID of this container"
- )
- names: list[str] | None = Field(
- default=None,
- alias="Names",
- description="The names that this container has been given",
- )
- image: str | None = Field(
- default=None,
- alias="Image",
- description="The name of the image used when creating this container",
- )
- image_id: str | None = Field(
- default=None,
- alias="ImageID",
- description="The ID of the image that this container was created from",
- )
- command: str | None = Field(
- default=None,
- alias="Command",
- description="Command to run when starting the container",
- )
- created: int | None = Field(
- default=None, alias="Created", description="When the container was created"
- )
- ports: list[Port] | None = Field(
- default=None, alias="Ports", description="The ports exposed by this container"
- )
- size_rw: int | None = Field(
- default=None,
- alias="SizeRw",
- description="The size of files that have been created or changed by this container",
- )
- size_root_fs: int | None = Field(
- default=None,
- alias="SizeRootFs",
- description="The total size of all the files in this container",
- )
- labels: dict[str, str] | None = Field(
- default=None, alias="Labels", description="User-defined key/value metadata."
- )
- state: str | None = Field(
- default=None,
- alias="State",
- description="The state of this container (e.g. `Exited`)",
- )
- status: str | None = Field(
- default=None,
- alias="Status",
- description="Additional human-readable status of this container (e.g. `Exit 0`)",
- )
- host_config: HostConfig1 | None = Field(default=None, alias="HostConfig")
- network_settings: NetworkSettings1 | None = Field(
- default=None,
- alias="NetworkSettings",
- description="A summary of the container's network settings",
- )
- mounts: list[MountPoint] | None = Field(default=None, alias="Mounts")
+ id: Annotated[
+ str | None, Field(alias="Id", description="The ID of this container")
+ ] = None
+ names: Annotated[
+ list[str] | None,
+ Field(
+ alias="Names", description="The names that this container has been given"
+ ),
+ ] = None
+ image: Annotated[
+ str | None,
+ Field(
+ alias="Image",
+ description="The name of the image used when creating this container",
+ ),
+ ] = None
+ image_id: Annotated[
+ str | None,
+ Field(
+ alias="ImageID",
+ description="The ID of the image that this container was created from",
+ ),
+ ] = None
+ command: Annotated[
+ str | None,
+ Field(
+ alias="Command", description="Command to run when starting the container"
+ ),
+ ] = None
+ created: Annotated[
+ int | None, Field(alias="Created", description="When the container was created")
+ ] = None
+ ports: Annotated[
+ list[Port] | None,
+ Field(alias="Ports", description="The ports exposed by this container"),
+ ] = None
+ size_rw: Annotated[
+ int | None,
+ Field(
+ alias="SizeRw",
+ description="The size of files that have been created or changed by this container",
+ ),
+ ] = None
+ size_root_fs: Annotated[
+ int | None,
+ Field(
+ alias="SizeRootFs",
+ description="The total size of all the files in this container",
+ ),
+ ] = None
+ labels: Annotated[
+ dict[str, str] | None,
+ Field(alias="Labels", description="User-defined key/value metadata."),
+ ] = None
+ state: Annotated[
+ str | None,
+ Field(alias="State", description="The state of this container (e.g. `Exited`)"),
+ ] = None
+ status: Annotated[
+ str | None,
+ Field(
+ alias="Status",
+ description="Additional human-readable status of this container (e.g. `Exit 0`)",
+ ),
+ ] = None
+ host_config: Annotated[HostConfig1 | None, Field(alias="HostConfig")] = None
+ network_settings: Annotated[
+ NetworkSettings1 | None,
+ Field(
+ alias="NetworkSettings",
+ description="A summary of the container's network settings",
+ ),
+ ] = None
+ mounts: Annotated[list[MountPoint] | None, Field(alias="Mounts")] = None
class ContainerState(BaseModel):
@@ -4723,63 +5492,79 @@ class ContainerState(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- status: Status2 | None = Field(
- default=None,
- alias="Status",
- description='String representation of the container state. Can be one of "created",\n"running", "paused", "restarting", "removing", "exited", or "dead".\n',
- examples=["running"],
- )
- running: bool | None = Field(
- default=None,
- alias="Running",
- description='Whether this container is running.\n\nNote that a running container can be _paused_. The `Running` and `Paused`\nbooleans are not mutually exclusive:\n\nWhen pausing a container (on Linux), the freezer cgroup is used to suspend\nall processes in the container. Freezing the process requires the process to\nbe running. As a result, paused containers are both `Running` _and_ `Paused`.\n\nUse the `Status` field instead to determine if a container\'s state is "running".\n',
- examples=[True],
- )
- paused: bool | None = Field(
- default=None,
- alias="Paused",
- description="Whether this container is paused.",
- examples=[False],
- )
- restarting: bool | None = Field(
- default=None,
- alias="Restarting",
- description="Whether this container is restarting.",
- examples=[False],
- )
- oom_killed: bool | None = Field(
- default=None,
- alias="OOMKilled",
- description="Whether this container has been killed because it ran out of memory.\n",
- examples=[False],
- )
- dead: bool | None = Field(default=None, alias="Dead", examples=[False])
- pid: int | None = Field(
- default=None,
- alias="Pid",
- description="The process ID of this container",
- examples=[1234],
- )
- exit_code: int | None = Field(
- default=None,
- alias="ExitCode",
- description="The last exit code of this container",
- examples=[0],
- )
- error: str | None = Field(default=None, alias="Error")
- started_at: str | None = Field(
- default=None,
- alias="StartedAt",
- description="The time when this container was last started.",
- examples=["2020-01-06T09:06:59.461876391Z"],
- )
- finished_at: str | None = Field(
- default=None,
- alias="FinishedAt",
- description="The time when this container last exited.",
- examples=["2020-01-06T09:07:59.461876391Z"],
- )
- health: Health | None = Field(default=None, alias="Health")
+ status: Annotated[
+ Status2 | None,
+ Field(
+ alias="Status",
+ description='String representation of the container state. Can be one of "created",\n"running", "paused", "restarting", "removing", "exited", or "dead".\n',
+ examples=["running"],
+ ),
+ ] = None
+ running: Annotated[
+ bool | None,
+ Field(
+ alias="Running",
+ description='Whether this container is running.\n\nNote that a running container can be _paused_. The `Running` and `Paused`\nbooleans are not mutually exclusive:\n\nWhen pausing a container (on Linux), the freezer cgroup is used to suspend\nall processes in the container. Freezing the process requires the process to\nbe running. As a result, paused containers are both `Running` _and_ `Paused`.\n\nUse the `Status` field instead to determine if a container\'s state is "running".\n',
+ examples=[True],
+ ),
+ ] = None
+ paused: Annotated[
+ bool | None,
+ Field(
+ alias="Paused",
+ description="Whether this container is paused.",
+ examples=[False],
+ ),
+ ] = None
+ restarting: Annotated[
+ bool | None,
+ Field(
+ alias="Restarting",
+ description="Whether this container is restarting.",
+ examples=[False],
+ ),
+ ] = None
+ oom_killed: Annotated[
+ bool | None,
+ Field(
+ alias="OOMKilled",
+ description="Whether this container has been killed because it ran out of memory.\n",
+ examples=[False],
+ ),
+ ] = None
+ dead: Annotated[bool | None, Field(alias="Dead", examples=[False])] = None
+ pid: Annotated[
+ int | None,
+ Field(
+ alias="Pid", description="The process ID of this container", examples=[1234]
+ ),
+ ] = None
+ exit_code: Annotated[
+ int | None,
+ Field(
+ alias="ExitCode",
+ description="The last exit code of this container",
+ examples=[0],
+ ),
+ ] = None
+ error: Annotated[str | None, Field(alias="Error")] = None
+ started_at: Annotated[
+ str | None,
+ Field(
+ alias="StartedAt",
+ description="The time when this container was last started.",
+ examples=["2020-01-06T09:06:59.461876391Z"],
+ ),
+ ] = None
+ finished_at: Annotated[
+ str | None,
+ Field(
+ alias="FinishedAt",
+ description="The time when this container last exited.",
+ examples=["2020-01-06T09:07:59.461876391Z"],
+ ),
+ ] = None
+ health: Annotated[Health | None, Field(alias="Health")] = None
class ContainerWaitResponse(BaseModel):
@@ -4790,10 +5575,10 @@ class ContainerWaitResponse(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- status_code: int = Field(
- ..., alias="StatusCode", description="Exit code of the container"
- )
- error: ContainerWaitExitError | None = Field(default=None, alias="Error")
+ status_code: Annotated[
+ int, Field(alias="StatusCode", description="Exit code of the container")
+ ]
+ error: Annotated[ContainerWaitExitError | None, Field(alias="Error")] = None
class RegistryServiceConfig(BaseModel):
@@ -4805,69 +5590,82 @@ class RegistryServiceConfig(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- allow_nondistributable_artifacts_cid_rs: list[str] | None = Field(
- default=None,
- alias="AllowNondistributableArtifactsCIDRs",
- description="List of IP ranges to which nondistributable artifacts can be pushed,\nusing the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632).\n\nSome images (for example, Windows base images) contain artifacts\nwhose distribution is restricted by license. When these images are\npushed to a registry, restricted artifacts are not included.\n\nThis configuration override this behavior, and enables the daemon to\npush nondistributable artifacts to all registries whose resolved IP\naddress is within the subnet described by the CIDR syntax.\n\nThis option is useful when pushing images containing\nnondistributable artifacts to a registry on an air-gapped network so\nhosts on that network can pull the images without connecting to\nanother server.\n\n> **Warning**: Nondistributable artifacts typically have restrictions\n> on how and where they can be distributed and shared. Only use this\n> feature to push artifacts to private registries and ensure that you\n> are in compliance with any terms that cover redistributing\n> nondistributable artifacts.\n",
- examples=[["::1/128", "127.0.0.0/8"]],
- )
- allow_nondistributable_artifacts_hostnames: list[str] | None = Field(
- default=None,
- alias="AllowNondistributableArtifactsHostnames",
- description="List of registry hostnames to which nondistributable artifacts can be\npushed, using the format `[:]` or `[:]`.\n\nSome images (for example, Windows base images) contain artifacts\nwhose distribution is restricted by license. When these images are\npushed to a registry, restricted artifacts are not included.\n\nThis configuration override this behavior for the specified\nregistries.\n\nThis option is useful when pushing images containing\nnondistributable artifacts to a registry on an air-gapped network so\nhosts on that network can pull the images without connecting to\nanother server.\n\n> **Warning**: Nondistributable artifacts typically have restrictions\n> on how and where they can be distributed and shared. Only use this\n> feature to push artifacts to private registries and ensure that you\n> are in compliance with any terms that cover redistributing\n> nondistributable artifacts.\n",
- examples=[
- ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"]
- ],
- )
- insecure_registry_cid_rs: list[str] | None = Field(
- default=None,
- alias="InsecureRegistryCIDRs",
- description="List of IP ranges of insecure registries, using the CIDR syntax\n([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries\naccept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates\nfrom unknown CAs) communication.\n\nBy default, local registries (`127.0.0.0/8`) are configured as\ninsecure. All other registries are secure. Communicating with an\ninsecure registry is not possible if the daemon assumes that registry\nis secure.\n\nThis configuration override this behavior, insecure communication with\nregistries whose resolved IP address is within the subnet described by\nthe CIDR syntax.\n\nRegistries can also be marked insecure by hostname. Those registries\nare listed under `IndexConfigs` and have their `Secure` field set to\n`false`.\n\n> **Warning**: Using this option can be useful when running a local\n> registry, but introduces security vulnerabilities. This option\n> should therefore ONLY be used for testing purposes. For increased\n> security, users should add their CA to their system's list of trusted\n> CAs instead of enabling this option.\n",
- examples=[["::1/128", "127.0.0.0/8"]],
- )
- index_configs: dict[str, IndexInfo] | None = Field(
- default=None,
- alias="IndexConfigs",
- examples=[
- {
- "127.0.0.1:5000": {
- "Name": "127.0.0.1:5000",
- "Mirrors": [],
- "Secure": False,
- "Official": False,
- },
- "[2001:db8:a0b:12f0::1]:80": {
- "Name": "[2001:db8:a0b:12f0::1]:80",
- "Mirrors": [],
- "Secure": False,
- "Official": False,
- },
- "docker.io": {
- "Name": "docker.io",
- "Mirrors": ["https://hub-mirror.corp.example.com:5000/"],
- "Secure": True,
- "Official": True,
- },
- "registry.internal.corp.example.com:3000": {
- "Name": "registry.internal.corp.example.com:3000",
- "Mirrors": [],
- "Secure": False,
- "Official": False,
- },
- }
- ],
- )
- mirrors: list[str] | None = Field(
- default=None,
- alias="Mirrors",
- description="List of registry URLs that act as a mirror for the official\n(`docker.io`) registry.\n",
- examples=[
- [
- "https://hub-mirror.corp.example.com:5000/",
- "https://[2001:db8:a0b:12f0::1]/",
- ]
- ],
- )
+ allow_nondistributable_artifacts_cid_rs: Annotated[
+ list[str] | None,
+ Field(
+ alias="AllowNondistributableArtifactsCIDRs",
+ description="List of IP ranges to which nondistributable artifacts can be pushed,\nusing the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632).\n\nSome images (for example, Windows base images) contain artifacts\nwhose distribution is restricted by license. When these images are\npushed to a registry, restricted artifacts are not included.\n\nThis configuration override this behavior, and enables the daemon to\npush nondistributable artifacts to all registries whose resolved IP\naddress is within the subnet described by the CIDR syntax.\n\nThis option is useful when pushing images containing\nnondistributable artifacts to a registry on an air-gapped network so\nhosts on that network can pull the images without connecting to\nanother server.\n\n> **Warning**: Nondistributable artifacts typically have restrictions\n> on how and where they can be distributed and shared. Only use this\n> feature to push artifacts to private registries and ensure that you\n> are in compliance with any terms that cover redistributing\n> nondistributable artifacts.\n",
+ examples=[["::1/128", "127.0.0.0/8"]],
+ ),
+ ] = None
+ allow_nondistributable_artifacts_hostnames: Annotated[
+ list[str] | None,
+ Field(
+ alias="AllowNondistributableArtifactsHostnames",
+ description="List of registry hostnames to which nondistributable artifacts can be\npushed, using the format `[:]` or `[:]`.\n\nSome images (for example, Windows base images) contain artifacts\nwhose distribution is restricted by license. When these images are\npushed to a registry, restricted artifacts are not included.\n\nThis configuration override this behavior for the specified\nregistries.\n\nThis option is useful when pushing images containing\nnondistributable artifacts to a registry on an air-gapped network so\nhosts on that network can pull the images without connecting to\nanother server.\n\n> **Warning**: Nondistributable artifacts typically have restrictions\n> on how and where they can be distributed and shared. Only use this\n> feature to push artifacts to private registries and ensure that you\n> are in compliance with any terms that cover redistributing\n> nondistributable artifacts.\n",
+ examples=[
+ [
+ "registry.internal.corp.example.com:3000",
+ "[2001:db8:a0b:12f0::1]:443",
+ ]
+ ],
+ ),
+ ] = None
+ insecure_registry_cid_rs: Annotated[
+ list[str] | None,
+ Field(
+ alias="InsecureRegistryCIDRs",
+ description="List of IP ranges of insecure registries, using the CIDR syntax\n([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries\naccept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates\nfrom unknown CAs) communication.\n\nBy default, local registries (`127.0.0.0/8`) are configured as\ninsecure. All other registries are secure. Communicating with an\ninsecure registry is not possible if the daemon assumes that registry\nis secure.\n\nThis configuration override this behavior, insecure communication with\nregistries whose resolved IP address is within the subnet described by\nthe CIDR syntax.\n\nRegistries can also be marked insecure by hostname. Those registries\nare listed under `IndexConfigs` and have their `Secure` field set to\n`false`.\n\n> **Warning**: Using this option can be useful when running a local\n> registry, but introduces security vulnerabilities. This option\n> should therefore ONLY be used for testing purposes. For increased\n> security, users should add their CA to their system's list of trusted\n> CAs instead of enabling this option.\n",
+ examples=[["::1/128", "127.0.0.0/8"]],
+ ),
+ ] = None
+ index_configs: Annotated[
+ dict[str, IndexInfo] | None,
+ Field(
+ alias="IndexConfigs",
+ examples=[
+ {
+ "127.0.0.1:5000": {
+ "Name": "127.0.0.1:5000",
+ "Mirrors": [],
+ "Secure": False,
+ "Official": False,
+ },
+ "[2001:db8:a0b:12f0::1]:80": {
+ "Name": "[2001:db8:a0b:12f0::1]:80",
+ "Mirrors": [],
+ "Secure": False,
+ "Official": False,
+ },
+ "docker.io": {
+ "Name": "docker.io",
+ "Mirrors": ["https://hub-mirror.corp.example.com:5000/"],
+ "Secure": True,
+ "Official": True,
+ },
+ "registry.internal.corp.example.com:3000": {
+ "Name": "registry.internal.corp.example.com:3000",
+ "Mirrors": [],
+ "Secure": False,
+ "Official": False,
+ },
+ }
+ ],
+ ),
+ ] = None
+ mirrors: Annotated[
+ list[str] | None,
+ Field(
+ alias="Mirrors",
+ description="List of registry URLs that act as a mirror for the official\n(`docker.io`) registry.\n",
+ examples=[
+ [
+ "https://hub-mirror.corp.example.com:5000/",
+ "https://[2001:db8:a0b:12f0::1]/",
+ ]
+ ],
+ ),
+ ] = None
class SwarmInfo(BaseModel):
@@ -4879,48 +5677,60 @@ class SwarmInfo(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- node_id: str | None = Field(
- default="",
- alias="NodeID",
- description="Unique identifier of for this node in the swarm.",
- examples=["k67qz4598weg5unwwffg6z1m1"],
- )
- node_addr: str | None = Field(
- default="",
- alias="NodeAddr",
- description="IP address at which this node can be reached by other nodes in the\nswarm.\n",
- examples=["10.0.0.46"],
- )
- local_node_state: LocalNodeState | None = Field(default="", alias="LocalNodeState")
- control_available: bool | None = Field(
- default=False, alias="ControlAvailable", examples=[True]
- )
- error: str | None = Field(default="", alias="Error")
- remote_managers: list[PeerNode] | None = Field(
- default=None,
- alias="RemoteManagers",
- description="List of ID's and addresses of other managers in the swarm.\n",
- examples=[
- [
- {"NodeID": "71izy0goik036k48jg985xnds", "Addr": "10.0.0.158:2377"},
- {"NodeID": "79y6h1o4gv8n120drcprv5nmc", "Addr": "10.0.0.159:2377"},
- {"NodeID": "k67qz4598weg5unwwffg6z1m1", "Addr": "10.0.0.46:2377"},
- ]
- ],
- )
- nodes: int | None = Field(
- default=None,
- alias="Nodes",
- description="Total number of nodes in the swarm.",
- examples=[4],
- )
- managers: int | None = Field(
- default=None,
- alias="Managers",
- description="Total number of managers in the swarm.",
- examples=[3],
- )
- cluster: ClusterInfo | None = Field(default=None, alias="Cluster")
+ node_id: Annotated[
+ str | None,
+ Field(
+ alias="NodeID",
+ description="Unique identifier of for this node in the swarm.",
+ examples=["k67qz4598weg5unwwffg6z1m1"],
+ ),
+ ] = ""
+ node_addr: Annotated[
+ str | None,
+ Field(
+ alias="NodeAddr",
+ description="IP address at which this node can be reached by other nodes in the\nswarm.\n",
+ examples=["10.0.0.46"],
+ ),
+ ] = ""
+ local_node_state: Annotated[
+ LocalNodeState | None, Field(alias="LocalNodeState")
+ ] = "" # type: ignore[assignment]
+ control_available: Annotated[
+ bool | None, Field(alias="ControlAvailable", examples=[True])
+ ] = False
+ error: Annotated[str | None, Field(alias="Error")] = ""
+ remote_managers: Annotated[
+ list[PeerNode] | None,
+ Field(
+ alias="RemoteManagers",
+ description="List of ID's and addresses of other managers in the swarm.\n",
+ examples=[
+ [
+ {"NodeID": "71izy0goik036k48jg985xnds", "Addr": "10.0.0.158:2377"},
+ {"NodeID": "79y6h1o4gv8n120drcprv5nmc", "Addr": "10.0.0.159:2377"},
+ {"NodeID": "k67qz4598weg5unwwffg6z1m1", "Addr": "10.0.0.46:2377"},
+ ]
+ ],
+ ),
+ ] = None
+ nodes: Annotated[
+ int | None,
+ Field(
+ alias="Nodes",
+ description="Total number of nodes in the swarm.",
+ examples=[4],
+ ),
+ ] = None
+ managers: Annotated[
+ int | None,
+ Field(
+ alias="Managers",
+ description="Total number of managers in the swarm.",
+ examples=[3],
+ ),
+ ] = None
+ cluster: Annotated[ClusterInfo | None, Field(alias="Cluster")] = None
class HostConfig(Resources):
@@ -4931,184 +5741,248 @@ class HostConfig(Resources):
model_config = ConfigDict(
populate_by_name=True,
)
- binds: list[str] | None = Field(
- default=None,
- alias="Binds",
- description="A list of volume bindings for this container. Each volume binding\nis a string in one of these forms:\n\n- `host-src:container-dest[:options]` to bind-mount a host path\n into the container. Both `host-src`, and `container-dest` must\n be an _absolute_ path.\n- `volume-name:container-dest[:options]` to bind-mount a volume\n managed by a volume driver into the container. `container-dest`\n must be an _absolute_ path.\n\n`options` is an optional, comma-delimited list of:\n\n- `nocopy` disables automatic copying of data from the container\n path to the volume. The `nocopy` flag only applies to named volumes.\n- `[ro|rw]` mounts a volume read-only or read-write, respectively.\n If omitted or set to `rw`, volumes are mounted read-write.\n- `[z|Z]` applies SELinux labels to allow or deny multiple containers\n to read and write to the same volume.\n - `z`: a _shared_ content label is applied to the content. This\n label indicates that multiple containers can share the volume\n content, for both reading and writing.\n - `Z`: a _private unshared_ label is applied to the content.\n This label indicates that only the current container can use\n a private volume. Labeling systems such as SELinux require\n proper labels to be placed on volume content that is mounted\n into a container. Without a label, the security system can\n prevent a container's processes from using the content. By\n default, the labels set by the host operating system are not\n modified.\n- `[[r]shared|[r]slave|[r]private]` specifies mount\n [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt).\n This only applies to bind-mounted volumes, not internal volumes\n or named volumes. Mount propagation requires the source mount\n point (the location where the source directory is mounted in the\n host operating system) to have the correct propagation properties.\n For shared volumes, the source mount point must be set to `shared`.\n For slave volumes, the mount must be set to either `shared` or\n `slave`.\n",
- )
- container_id_file: str | None = Field(
- default=None,
- alias="ContainerIDFile",
- description="Path to a file where the container ID is written",
- )
- log_config: LogConfig | None = Field(
- default=None,
- alias="LogConfig",
- description="The logging configuration for this container",
- )
- network_mode: str | None = Field(
- default=None,
- alias="NetworkMode",
- description="Network mode to use for this container. Supported standard values\nare: `bridge`, `host`, `none`, and `container:`. Any\nother value is taken as a custom network's name to which this\ncontainer should connect to.\n",
- )
- port_bindings: PortMap | None = Field(default=None, alias="PortBindings")
- restart_policy: RestartPolicy | None = Field(default=None, alias="RestartPolicy")
- auto_remove: bool | None = Field(
- default=None,
- alias="AutoRemove",
- description="Automatically remove the container when the container's process\nexits. This has no effect if `RestartPolicy` is set.\n",
- )
- volume_driver: str | None = Field(
- default=None,
- alias="VolumeDriver",
- description="Driver that this container uses to mount volumes.",
- )
- volumes_from: list[str] | None = Field(
- default=None,
- alias="VolumesFrom",
- description="A list of volumes to inherit from another container, specified in\nthe form `[:]`.\n",
- )
- mounts: list[Mount] | None = Field(
- default=None,
- alias="Mounts",
- description="Specification for mounts to be added to the container.\n",
- )
- cap_add: list[str] | None = Field(
- default=None,
- alias="CapAdd",
- description="A list of kernel capabilities to add to the container. Conflicts\nwith option 'Capabilities'.\n",
- )
- cap_drop: list[str] | None = Field(
- default=None,
- alias="CapDrop",
- description="A list of kernel capabilities to drop from the container. Conflicts\nwith option 'Capabilities'.\n",
- )
- cgroupns_mode: CgroupnsMode | None = Field(
- default=None,
- alias="CgroupnsMode",
- description='cgroup namespace mode for the container. Possible values are:\n\n- `"private"`: the container runs in its own private cgroup namespace\n- `"host"`: use the host system\'s cgroup namespace\n\nIf not specified, the daemon default is used, which can either be `"private"`\nor `"host"`, depending on daemon version, kernel support and configuration.\n',
- )
- dns: list[str] | None = Field(
- default=None,
- alias="Dns",
- description="A list of DNS servers for the container to use.",
- )
- dns_options: list[str] | None = Field(
- default=None, alias="DnsOptions", description="A list of DNS options."
- )
- dns_search: list[str] | None = Field(
- default=None, alias="DnsSearch", description="A list of DNS search domains."
- )
- extra_hosts: list[str] | None = Field(
- default=None,
- alias="ExtraHosts",
- description='A list of hostnames/IP mappings to add to the container\'s `/etc/hosts`\nfile. Specified in the form `["hostname:IP"]`.\n',
- )
- group_add: list[str] | None = Field(
- default=None,
- alias="GroupAdd",
- description="A list of additional groups that the container process will run as.\n",
- )
- ipc_mode: str | None = Field(
- default=None,
- alias="IpcMode",
- description='IPC sharing mode for the container. Possible values are:\n\n- `"none"`: own private IPC namespace, with /dev/shm not mounted\n- `"private"`: own private IPC namespace\n- `"shareable"`: own private IPC namespace, with a possibility to share it with other containers\n- `"container:"`: join another (shareable) container\'s IPC namespace\n- `"host"`: use the host system\'s IPC namespace\n\nIf not specified, daemon default is used, which can either be `"private"`\nor `"shareable"`, depending on daemon version and configuration.\n',
- )
- cgroup: str | None = Field(
- default=None, alias="Cgroup", description="Cgroup to use for the container."
- )
- links: list[str] | None = Field(
- default=None,
- alias="Links",
- description="A list of links for the container in the form `container_name:alias`.\n",
- )
- oom_score_adj: int | None = Field(
- default=None,
- alias="OomScoreAdj",
- description="An integer value containing the score given to the container in\norder to tune OOM killer preferences.\n",
- examples=[500],
- )
- pid_mode: str | None = Field(
- default=None,
- alias="PidMode",
- description='Set the PID (Process) Namespace mode for the container. It can be\neither:\n\n- `"container:"`: joins another container\'s PID namespace\n- `"host"`: use the host\'s PID namespace inside the container\n',
- )
- privileged: bool | None = Field(
- default=None,
- alias="Privileged",
- description="Gives the container full access to the host.",
- )
- publish_all_ports: bool | None = Field(
- default=None,
- alias="PublishAllPorts",
- description="Allocates an ephemeral host port for all of a container's\nexposed ports.\n\nPorts are de-allocated when the container stops and allocated when\nthe container starts. The allocated port might be changed when\nrestarting the container.\n\nThe port is selected from the ephemeral port range that depends on\nthe kernel. For example, on Linux the range is defined by\n`/proc/sys/net/ipv4/ip_local_port_range`.\n",
- )
- readonly_rootfs: bool | None = Field(
- default=None,
- alias="ReadonlyRootfs",
- description="Mount the container's root filesystem as read only.",
- )
- security_opt: list[str] | None = Field(
- default=None,
- alias="SecurityOpt",
- description="A list of string values to customize labels for MLS systems, such\nas SELinux.\n",
- )
- storage_opt: dict[str, str] | None = Field(
- default=None,
- alias="StorageOpt",
- description='Storage driver options for this container, in the form `{"size": "120G"}`.\n',
- )
- tmpfs: dict[str, str] | None = Field(
- default=None,
- alias="Tmpfs",
- description='A map of container directories which should be replaced by tmpfs\nmounts, and their corresponding mount options. For example:\n\n```\n{ "/run": "rw,noexec,nosuid,size=65536k" }\n```\n',
- )
- uts_mode: str | None = Field(
- default=None,
- alias="UTSMode",
- description="UTS namespace to use for the container.",
- )
- userns_mode: str | None = Field(
- default=None,
- alias="UsernsMode",
- description="Sets the usernamespace mode for the container when usernamespace\nremapping option is enabled.\n",
- )
- shm_size: int | None = Field(
- default=None,
- alias="ShmSize",
- description="Size of `/dev/shm` in bytes. If omitted, the system uses 64MB.\n",
- ge=0,
- )
- sysctls: dict[str, str] | None = Field(
- default=None,
- alias="Sysctls",
- description='A list of kernel parameters (sysctls) to set in the container.\nFor example:\n\n```\n{"net.ipv4.ip_forward": "1"}\n```\n',
- )
- runtime: str | None = Field(
- default=None, alias="Runtime", description="Runtime to use with this container."
- )
- console_size: list[ConsoleSizeItem] | None = Field(
- default=None,
- alias="ConsoleSize",
- description="Initial console size, as an `[height, width]` array. (Windows only)\n",
- max_length=2,
- min_length=2,
- )
- isolation: Isolation | None = Field(
- default=None,
- alias="Isolation",
- description="Isolation technology of the container. (Windows only)\n",
- )
- masked_paths: list[str] | None = Field(
- default=None,
- alias="MaskedPaths",
- description="The list of paths to be masked inside the container (this overrides\nthe default set of paths).\n",
- )
- readonly_paths: list[str] | None = Field(
- default=None,
- alias="ReadonlyPaths",
- description="The list of paths to be set as read-only inside the container\n(this overrides the default set of paths).\n",
- )
+ binds: Annotated[
+ list[str] | None,
+ Field(
+ alias="Binds",
+ description="A list of volume bindings for this container. Each volume binding\nis a string in one of these forms:\n\n- `host-src:container-dest[:options]` to bind-mount a host path\n into the container. Both `host-src`, and `container-dest` must\n be an _absolute_ path.\n- `volume-name:container-dest[:options]` to bind-mount a volume\n managed by a volume driver into the container. `container-dest`\n must be an _absolute_ path.\n\n`options` is an optional, comma-delimited list of:\n\n- `nocopy` disables automatic copying of data from the container\n path to the volume. The `nocopy` flag only applies to named volumes.\n- `[ro|rw]` mounts a volume read-only or read-write, respectively.\n If omitted or set to `rw`, volumes are mounted read-write.\n- `[z|Z]` applies SELinux labels to allow or deny multiple containers\n to read and write to the same volume.\n - `z`: a _shared_ content label is applied to the content. This\n label indicates that multiple containers can share the volume\n content, for both reading and writing.\n - `Z`: a _private unshared_ label is applied to the content.\n This label indicates that only the current container can use\n a private volume. Labeling systems such as SELinux require\n proper labels to be placed on volume content that is mounted\n into a container. Without a label, the security system can\n prevent a container's processes from using the content. By\n default, the labels set by the host operating system are not\n modified.\n- `[[r]shared|[r]slave|[r]private]` specifies mount\n [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt).\n This only applies to bind-mounted volumes, not internal volumes\n or named volumes. Mount propagation requires the source mount\n point (the location where the source directory is mounted in the\n host operating system) to have the correct propagation properties.\n For shared volumes, the source mount point must be set to `shared`.\n For slave volumes, the mount must be set to either `shared` or\n `slave`.\n",
+ ),
+ ] = None
+ container_id_file: Annotated[
+ str | None,
+ Field(
+ alias="ContainerIDFile",
+ description="Path to a file where the container ID is written",
+ ),
+ ] = None
+ log_config: Annotated[
+ LogConfig | None,
+ Field(
+ alias="LogConfig",
+ description="The logging configuration for this container",
+ ),
+ ] = None
+ network_mode: Annotated[
+ str | None,
+ Field(
+ alias="NetworkMode",
+ description="Network mode to use for this container. Supported standard values\nare: `bridge`, `host`, `none`, and `container:`. Any\nother value is taken as a custom network's name to which this\ncontainer should connect to.\n",
+ ),
+ ] = None
+ port_bindings: Annotated[PortMap | None, Field(alias="PortBindings")] = None
+ restart_policy: Annotated[RestartPolicy | None, Field(alias="RestartPolicy")] = None
+ auto_remove: Annotated[
+ bool | None,
+ Field(
+ alias="AutoRemove",
+ description="Automatically remove the container when the container's process\nexits. This has no effect if `RestartPolicy` is set.\n",
+ ),
+ ] = None
+ volume_driver: Annotated[
+ str | None,
+ Field(
+ alias="VolumeDriver",
+ description="Driver that this container uses to mount volumes.",
+ ),
+ ] = None
+ volumes_from: Annotated[
+ list[str] | None,
+ Field(
+ alias="VolumesFrom",
+ description="A list of volumes to inherit from another container, specified in\nthe form `[:]`.\n",
+ ),
+ ] = None
+ mounts: Annotated[
+ list[Mount] | None,
+ Field(
+ alias="Mounts",
+ description="Specification for mounts to be added to the container.\n",
+ ),
+ ] = None
+ cap_add: Annotated[
+ list[str] | None,
+ Field(
+ alias="CapAdd",
+ description="A list of kernel capabilities to add to the container. Conflicts\nwith option 'Capabilities'.\n",
+ ),
+ ] = None
+ cap_drop: Annotated[
+ list[str] | None,
+ Field(
+ alias="CapDrop",
+ description="A list of kernel capabilities to drop from the container. Conflicts\nwith option 'Capabilities'.\n",
+ ),
+ ] = None
+ cgroupns_mode: Annotated[
+ CgroupnsMode | None,
+ Field(
+ alias="CgroupnsMode",
+ description='cgroup namespace mode for the container. Possible values are:\n\n- `"private"`: the container runs in its own private cgroup namespace\n- `"host"`: use the host system\'s cgroup namespace\n\nIf not specified, the daemon default is used, which can either be `"private"`\nor `"host"`, depending on daemon version, kernel support and configuration.\n',
+ ),
+ ] = None
+ dns: Annotated[
+ list[str] | None,
+ Field(
+ alias="Dns", description="A list of DNS servers for the container to use."
+ ),
+ ] = None
+ dns_options: Annotated[
+ list[str] | None,
+ Field(alias="DnsOptions", description="A list of DNS options."),
+ ] = None
+ dns_search: Annotated[
+ list[str] | None,
+ Field(alias="DnsSearch", description="A list of DNS search domains."),
+ ] = None
+ extra_hosts: Annotated[
+ list[str] | None,
+ Field(
+ alias="ExtraHosts",
+ description='A list of hostnames/IP mappings to add to the container\'s `/etc/hosts`\nfile. Specified in the form `["hostname:IP"]`.\n',
+ ),
+ ] = None
+ group_add: Annotated[
+ list[str] | None,
+ Field(
+ alias="GroupAdd",
+ description="A list of additional groups that the container process will run as.\n",
+ ),
+ ] = None
+ ipc_mode: Annotated[
+ str | None,
+ Field(
+ alias="IpcMode",
+ description='IPC sharing mode for the container. Possible values are:\n\n- `"none"`: own private IPC namespace, with /dev/shm not mounted\n- `"private"`: own private IPC namespace\n- `"shareable"`: own private IPC namespace, with a possibility to share it with other containers\n- `"container:"`: join another (shareable) container\'s IPC namespace\n- `"host"`: use the host system\'s IPC namespace\n\nIf not specified, daemon default is used, which can either be `"private"`\nor `"shareable"`, depending on daemon version and configuration.\n',
+ ),
+ ] = None
+ cgroup: Annotated[
+ str | None,
+ Field(alias="Cgroup", description="Cgroup to use for the container."),
+ ] = None
+ links: Annotated[
+ list[str] | None,
+ Field(
+ alias="Links",
+ description="A list of links for the container in the form `container_name:alias`.\n",
+ ),
+ ] = None
+ oom_score_adj: Annotated[
+ int | None,
+ Field(
+ alias="OomScoreAdj",
+ description="An integer value containing the score given to the container in\norder to tune OOM killer preferences.\n",
+ examples=[500],
+ ),
+ ] = None
+ pid_mode: Annotated[
+ str | None,
+ Field(
+ alias="PidMode",
+ description='Set the PID (Process) Namespace mode for the container. It can be\neither:\n\n- `"container:"`: joins another container\'s PID namespace\n- `"host"`: use the host\'s PID namespace inside the container\n',
+ ),
+ ] = None
+ privileged: Annotated[
+ bool | None,
+ Field(
+ alias="Privileged",
+ description="Gives the container full access to the host.",
+ ),
+ ] = None
+ publish_all_ports: Annotated[
+ bool | None,
+ Field(
+ alias="PublishAllPorts",
+ description="Allocates an ephemeral host port for all of a container's\nexposed ports.\n\nPorts are de-allocated when the container stops and allocated when\nthe container starts. The allocated port might be changed when\nrestarting the container.\n\nThe port is selected from the ephemeral port range that depends on\nthe kernel. For example, on Linux the range is defined by\n`/proc/sys/net/ipv4/ip_local_port_range`.\n",
+ ),
+ ] = None
+ readonly_rootfs: Annotated[
+ bool | None,
+ Field(
+ alias="ReadonlyRootfs",
+ description="Mount the container's root filesystem as read only.",
+ ),
+ ] = None
+ security_opt: Annotated[
+ list[str] | None,
+ Field(
+ alias="SecurityOpt",
+ description="A list of string values to customize labels for MLS systems, such\nas SELinux.\n",
+ ),
+ ] = None
+ storage_opt: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="StorageOpt",
+ description='Storage driver options for this container, in the form `{"size": "120G"}`.\n',
+ ),
+ ] = None
+ tmpfs: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="Tmpfs",
+ description='A map of container directories which should be replaced by tmpfs\nmounts, and their corresponding mount options. For example:\n\n```\n{ "/run": "rw,noexec,nosuid,size=65536k" }\n```\n',
+ ),
+ ] = None
+ uts_mode: Annotated[
+ str | None,
+ Field(alias="UTSMode", description="UTS namespace to use for the container."),
+ ] = None
+ userns_mode: Annotated[
+ str | None,
+ Field(
+ alias="UsernsMode",
+ description="Sets the usernamespace mode for the container when usernamespace\nremapping option is enabled.\n",
+ ),
+ ] = None
+ shm_size: Annotated[
+ int | None,
+ Field(
+ alias="ShmSize",
+ description="Size of `/dev/shm` in bytes. If omitted, the system uses 64MB.\n",
+ ge=0,
+ ),
+ ] = None
+ sysctls: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="Sysctls",
+ description='A list of kernel parameters (sysctls) to set in the container.\nFor example:\n\n```\n{"net.ipv4.ip_forward": "1"}\n```\n',
+ ),
+ ] = None
+ runtime: Annotated[
+ str | None,
+ Field(alias="Runtime", description="Runtime to use with this container."),
+ ] = None
+ console_size: Annotated[
+ list[ConsoleSizeItem] | None,
+ Field(
+ alias="ConsoleSize",
+ description="Initial console size, as an `[height, width]` array. (Windows only)\n",
+ max_length=2,
+ min_length=2,
+ ),
+ ] = None
+ isolation: Annotated[
+ Isolation | None,
+ Field(
+ alias="Isolation",
+ description="Isolation technology of the container. (Windows only)\n",
+ ),
+ ] = None
+ masked_paths: Annotated[
+ list[str] | None,
+ Field(
+ alias="MaskedPaths",
+ description="The list of paths to be masked inside the container (this overrides\nthe default set of paths).\n",
+ ),
+ ] = None
+ readonly_paths: Annotated[
+ list[str] | None,
+ Field(
+ alias="ReadonlyPaths",
+ description="The list of paths to be set as read-only inside the container\n(this overrides the default set of paths).\n",
+ ),
+ ] = None
class NetworkingConfig(BaseModel):
@@ -5123,11 +5997,13 @@ class NetworkingConfig(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- endpoints_config: dict[str, EndpointSettings] | None = Field(
- default=None,
- alias="EndpointsConfig",
- description="A mapping of network name to endpoint configuration for that network.\n",
- )
+ endpoints_config: Annotated[
+ dict[str, EndpointSettings] | None,
+ Field(
+ alias="EndpointsConfig",
+ description="A mapping of network name to endpoint configuration for that network.\n",
+ ),
+ ] = None
class NetworkSettings(BaseModel):
@@ -5138,615 +6014,795 @@ class NetworkSettings(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- bridge: str | None = Field(
- default=None,
- alias="Bridge",
- description="Name of the network's bridge (for example, `docker0`).",
- examples=["docker0"],
- )
- sandbox_id: str | None = Field(
- default=None,
- alias="SandboxID",
- description="SandboxID uniquely represents a container's network stack.",
- examples=["9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3"],
- )
- hairpin_mode: bool | None = Field(
- default=None,
- alias="HairpinMode",
- description="Indicates if hairpin NAT should be enabled on the virtual interface.\n",
- examples=[False],
- )
- link_local_i_pv6_address: str | None = Field(
- default=None,
- alias="LinkLocalIPv6Address",
- description="IPv6 unicast address using the link-local prefix.",
- examples=["fe80::42:acff:fe11:1"],
- )
- link_local_i_pv6_prefix_len: int | None = Field(
- default=None,
- alias="LinkLocalIPv6PrefixLen",
- description="Prefix length of the IPv6 unicast address.",
- examples=["64"],
- )
- ports: PortMap | None = Field(default=None, alias="Ports")
- sandbox_key: str | None = Field(
- default=None,
- alias="SandboxKey",
- description="SandboxKey identifies the sandbox",
- examples=["/var/run/docker/netns/8ab54b426c38"],
- )
- secondary_ip_addresses: list[Address] | None = Field(
- default=None, alias="SecondaryIPAddresses", description=""
- )
- secondary_i_pv6_addresses: list[Address] | None = Field(
- default=None, alias="SecondaryIPv6Addresses", description=""
- )
- endpoint_id: str | None = Field(
- default=None,
- alias="EndpointID",
- description='EndpointID uniquely represents a service endpoint in a Sandbox.\n\n
\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n',
- examples=["b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"],
- )
- gateway: str | None = Field(
- default=None,
- alias="Gateway",
- description='Gateway address for the default "bridge" network.\n\n
\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n',
- examples=["172.17.0.1"],
- )
- global_i_pv6_address: str | None = Field(
- default=None,
- alias="GlobalIPv6Address",
- description='Global IPv6 address for the default "bridge" network.\n\n
\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n',
- examples=["2001:db8::5689"],
- )
- global_i_pv6_prefix_len: int | None = Field(
- default=None,
- alias="GlobalIPv6PrefixLen",
- description='Mask length of the global IPv6 address.\n\n
\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n',
- examples=[64],
- )
- ip_address: str | None = Field(
- default=None,
- alias="IPAddress",
- description='IPv4 address for the default "bridge" network.\n\n
\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n',
- examples=["172.17.0.4"],
- )
- ip_prefix_len: int | None = Field(
- default=None,
- alias="IPPrefixLen",
- description='Mask length of the IPv4 address.\n\n
\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n',
- examples=[16],
- )
- i_pv6_gateway: str | None = Field(
- default=None,
- alias="IPv6Gateway",
- description='IPv6 gateway address for this network.\n\n
\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n',
- examples=["2001:db8:2::100"],
- )
- mac_address: str | None = Field(
- default=None,
- alias="MacAddress",
- description='MAC address for the container on the default "bridge" network.\n\n
\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n',
- examples=["02:42:ac:11:00:04"],
- )
- networks: dict[str, EndpointSettings] | None = Field(
- default=None,
- alias="Networks",
- description="Information about all networks that the container is connected to.\n",
- )
+ bridge: Annotated[
+ str | None,
+ Field(
+ alias="Bridge",
+ description="Name of the network's bridge (for example, `docker0`).",
+ examples=["docker0"],
+ ),
+ ] = None
+ sandbox_id: Annotated[
+ str | None,
+ Field(
+ alias="SandboxID",
+ description="SandboxID uniquely represents a container's network stack.",
+ examples=[
+ "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3"
+ ],
+ ),
+ ] = None
+ hairpin_mode: Annotated[
+ bool | None,
+ Field(
+ alias="HairpinMode",
+ description="Indicates if hairpin NAT should be enabled on the virtual interface.\n",
+ examples=[False],
+ ),
+ ] = None
+ link_local_i_pv6_address: Annotated[
+ str | None,
+ Field(
+ alias="LinkLocalIPv6Address",
+ description="IPv6 unicast address using the link-local prefix.",
+ examples=["fe80::42:acff:fe11:1"],
+ ),
+ ] = None
+ link_local_i_pv6_prefix_len: Annotated[
+ int | None,
+ Field(
+ alias="LinkLocalIPv6PrefixLen",
+ description="Prefix length of the IPv6 unicast address.",
+ examples=["64"],
+ ),
+ ] = None
+ ports: Annotated[PortMap | None, Field(alias="Ports")] = None
+ sandbox_key: Annotated[
+ str | None,
+ Field(
+ alias="SandboxKey",
+ description="SandboxKey identifies the sandbox",
+ examples=["/var/run/docker/netns/8ab54b426c38"],
+ ),
+ ] = None
+ secondary_ip_addresses: Annotated[
+ list[Address] | None, Field(alias="SecondaryIPAddresses", description="")
+ ] = None
+ secondary_i_pv6_addresses: Annotated[
+ list[Address] | None, Field(alias="SecondaryIPv6Addresses", description="")
+ ] = None
+ endpoint_id: Annotated[
+ str | None,
+ Field(
+ alias="EndpointID",
+ description='EndpointID uniquely represents a service endpoint in a Sandbox.\n\n
\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n',
+ examples=[
+ "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"
+ ],
+ ),
+ ] = None
+ gateway: Annotated[
+ str | None,
+ Field(
+ alias="Gateway",
+ description='Gateway address for the default "bridge" network.\n\n
\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n',
+ examples=["172.17.0.1"],
+ ),
+ ] = None
+ global_i_pv6_address: Annotated[
+ str | None,
+ Field(
+ alias="GlobalIPv6Address",
+ description='Global IPv6 address for the default "bridge" network.\n\n
\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n',
+ examples=["2001:db8::5689"],
+ ),
+ ] = None
+ global_i_pv6_prefix_len: Annotated[
+ int | None,
+ Field(
+ alias="GlobalIPv6PrefixLen",
+ description='Mask length of the global IPv6 address.\n\n
\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n',
+ examples=[64],
+ ),
+ ] = None
+ ip_address: Annotated[
+ str | None,
+ Field(
+ alias="IPAddress",
+ description='IPv4 address for the default "bridge" network.\n\n
\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n',
+ examples=["172.17.0.4"],
+ ),
+ ] = None
+ ip_prefix_len: Annotated[
+ int | None,
+ Field(
+ alias="IPPrefixLen",
+ description='Mask length of the IPv4 address.\n\n
\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n',
+ examples=[16],
+ ),
+ ] = None
+ i_pv6_gateway: Annotated[
+ str | None,
+ Field(
+ alias="IPv6Gateway",
+ description='IPv6 gateway address for this network.\n\n
\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n',
+ examples=["2001:db8:2::100"],
+ ),
+ ] = None
+ mac_address: Annotated[
+ str | None,
+ Field(
+ alias="MacAddress",
+ description='MAC address for the container on the default "bridge" network.\n\n
\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n',
+ examples=["02:42:ac:11:00:04"],
+ ),
+ ] = None
+ networks: Annotated[
+ dict[str, EndpointSettings] | None,
+ Field(
+ alias="Networks",
+ description="Information about all networks that the container is connected to.\n",
+ ),
+ ] = None
class Network(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- name: str | None = Field(
- default=None,
- alias="Name",
- description="Name of the network.\n",
- examples=["my_network"],
- )
- id: str | None = Field(
- default=None,
- alias="Id",
- description="ID that uniquely identifies a network on a single machine.\n",
- examples=["7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99"],
- )
- created: str | None = Field(
- default=None,
- alias="Created",
- description="Date and time at which the network was created in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
- examples=["2016-10-19T04:33:30.360899459Z"],
- )
- scope: str | None = Field(
- default=None,
- alias="Scope",
- description="The level at which the network exists (e.g. `swarm` for cluster-wide\nor `local` for machine level)\n",
- examples=["local"],
- )
- driver: str | None = Field(
- default=None,
- alias="Driver",
- description="The name of the driver used to create the network (e.g. `bridge`,\n`overlay`).\n",
- examples=["overlay"],
- )
- enable_i_pv6: bool | None = Field(
- default=None,
- alias="EnableIPv6",
- description="Whether the network was created with IPv6 enabled.\n",
- examples=[False],
- )
- ipam: IPAM | None = Field(default=None, alias="IPAM")
- internal: bool | None = Field(
- default=False,
- alias="Internal",
- description="Whether the network is created to only allow internal networking\nconnectivity.\n",
- examples=[False],
- )
- attachable: bool | None = Field(
- default=False,
- alias="Attachable",
- description="Whether a global / swarm scope network is manually attachable by regular\ncontainers from workers in swarm mode.\n",
- examples=[False],
- )
- ingress: bool | None = Field(
- default=False,
- alias="Ingress",
- description="Whether the network is providing the routing-mesh for the swarm cluster.\n",
- examples=[False],
- )
- config_from: ConfigReference | None = Field(default=None, alias="ConfigFrom")
- config_only: bool | None = Field(
- default=False,
- alias="ConfigOnly",
- description="Whether the network is a config-only network. Config-only networks are\nplaceholder networks for network configurations to be used by other\nnetworks. Config-only networks cannot be used directly to run containers\nor services.\n",
- )
- containers: dict[str, NetworkContainer] | None = Field(
- default=None,
- alias="Containers",
- description="Contains endpoints attached to the network.\n",
- examples=[
- {
- "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": {
- "Name": "test",
- "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a",
- "MacAddress": "02:42:ac:13:00:02",
- "IPv4Address": "172.19.0.2/16",
- "IPv6Address": "",
+ name: Annotated[
+ str | None,
+ Field(
+ alias="Name", description="Name of the network.\n", examples=["my_network"]
+ ),
+ ] = None
+ id: Annotated[
+ str | None,
+ Field(
+ alias="Id",
+ description="ID that uniquely identifies a network on a single machine.\n",
+ examples=[
+ "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99"
+ ],
+ ),
+ ] = None
+ created: Annotated[
+ str | None,
+ Field(
+ alias="Created",
+ description="Date and time at which the network was created in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
+ examples=["2016-10-19T04:33:30.360899459Z"],
+ ),
+ ] = None
+ scope: Annotated[
+ str | None,
+ Field(
+ alias="Scope",
+ description="The level at which the network exists (e.g. `swarm` for cluster-wide\nor `local` for machine level)\n",
+ examples=["local"],
+ ),
+ ] = None
+ driver: Annotated[
+ str | None,
+ Field(
+ alias="Driver",
+ description="The name of the driver used to create the network (e.g. `bridge`,\n`overlay`).\n",
+ examples=["overlay"],
+ ),
+ ] = None
+ enable_i_pv6: Annotated[
+ bool | None,
+ Field(
+ alias="EnableIPv6",
+ description="Whether the network was created with IPv6 enabled.\n",
+ examples=[False],
+ ),
+ ] = None
+ ipam: Annotated[IPAM | None, Field(alias="IPAM")] = None
+ internal: Annotated[
+ bool | None,
+ Field(
+ alias="Internal",
+ description="Whether the network is created to only allow internal networking\nconnectivity.\n",
+ examples=[False],
+ ),
+ ] = False
+ attachable: Annotated[
+ bool | None,
+ Field(
+ alias="Attachable",
+ description="Whether a global / swarm scope network is manually attachable by regular\ncontainers from workers in swarm mode.\n",
+ examples=[False],
+ ),
+ ] = False
+ ingress: Annotated[
+ bool | None,
+ Field(
+ alias="Ingress",
+ description="Whether the network is providing the routing-mesh for the swarm cluster.\n",
+ examples=[False],
+ ),
+ ] = False
+ config_from: Annotated[ConfigReference | None, Field(alias="ConfigFrom")] = None
+ config_only: Annotated[
+ bool | None,
+ Field(
+ alias="ConfigOnly",
+ description="Whether the network is a config-only network. Config-only networks are\nplaceholder networks for network configurations to be used by other\nnetworks. Config-only networks cannot be used directly to run containers\nor services.\n",
+ ),
+ ] = False
+ containers: Annotated[
+ dict[str, NetworkContainer] | None,
+ Field(
+ alias="Containers",
+ description="Contains endpoints attached to the network.\n",
+ examples=[
+ {
+ "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": {
+ "Name": "test",
+ "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a",
+ "MacAddress": "02:42:ac:13:00:02",
+ "IPv4Address": "172.19.0.2/16",
+ "IPv6Address": "",
+ }
}
- }
- ],
- )
- options: dict[str, str] | None = Field(
- default=None,
- alias="Options",
- description="Network-specific options uses when creating the network.\n",
- examples=[
- {
- "com.docker.network.bridge.default_bridge": "true",
- "com.docker.network.bridge.enable_icc": "true",
- "com.docker.network.bridge.enable_ip_masquerade": "true",
- "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
- "com.docker.network.bridge.name": "docker0",
- "com.docker.network.driver.mtu": "1500",
- }
- ],
- )
- labels: dict[str, str] | None = Field(
- default=None,
- alias="Labels",
- description="User-defined key/value metadata.",
- examples=[
- {
- "com.example.some-label": "some-value",
- "com.example.some-other-label": "some-other-value",
- }
- ],
- )
- peers: list[PeerInfo] | None = Field(
- default=None,
- alias="Peers",
- description="List of peer nodes for an overlay network. This field is only present\nfor overlay networks, and omitted for other network types.\n",
- )
+ ],
+ ),
+ ] = None
+ options: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="Options",
+ description="Network-specific options uses when creating the network.\n",
+ examples=[
+ {
+ "com.docker.network.bridge.default_bridge": "true",
+ "com.docker.network.bridge.enable_icc": "true",
+ "com.docker.network.bridge.enable_ip_masquerade": "true",
+ "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
+ "com.docker.network.bridge.name": "docker0",
+ "com.docker.network.driver.mtu": "1500",
+ }
+ ],
+ ),
+ ] = None
+ labels: Annotated[
+ dict[str, str] | None,
+ Field(
+ alias="Labels",
+ description="User-defined key/value metadata.",
+ examples=[
+ {
+ "com.example.some-label": "some-value",
+ "com.example.some-other-label": "some-other-value",
+ }
+ ],
+ ),
+ ] = None
+ peers: Annotated[
+ list[PeerInfo] | None,
+ Field(
+ alias="Peers",
+ description="List of peer nodes for an overlay network. This field is only present\nfor overlay networks, and omitted for other network types.\n",
+ ),
+ ] = None
class Node(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- id: str | None = Field(default=None, alias="ID", examples=["24ifsmvkjbyhk"])
- version: ObjectVersion | None = Field(default=None, alias="Version")
- created_at: str | None = Field(
- default=None,
- alias="CreatedAt",
- description="Date and time at which the node was added to the swarm in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
- examples=["2016-08-18T10:44:24.496525531Z"],
- )
- updated_at: str | None = Field(
- default=None,
- alias="UpdatedAt",
- description="Date and time at which the node was last updated in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
- examples=["2017-08-09T07:09:37.632105588Z"],
- )
- spec: NodeSpec | None = Field(default=None, alias="Spec")
- description: NodeDescription | None = Field(default=None, alias="Description")
- status: NodeStatus | None = Field(default=None, alias="Status")
- manager_status: ManagerStatus | None = Field(default=None, alias="ManagerStatus")
+ id: Annotated[str | None, Field(alias="ID", examples=["24ifsmvkjbyhk"])] = None
+ version: Annotated[ObjectVersion | None, Field(alias="Version")] = None
+ created_at: Annotated[
+ str | None,
+ Field(
+ alias="CreatedAt",
+ description="Date and time at which the node was added to the swarm in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
+ examples=["2016-08-18T10:44:24.496525531Z"],
+ ),
+ ] = None
+ updated_at: Annotated[
+ str | None,
+ Field(
+ alias="UpdatedAt",
+ description="Date and time at which the node was last updated in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
+ examples=["2017-08-09T07:09:37.632105588Z"],
+ ),
+ ] = None
+ spec: Annotated[NodeSpec | None, Field(alias="Spec")] = None
+ description: Annotated[NodeDescription | None, Field(alias="Description")] = None
+ status: Annotated[NodeStatus | None, Field(alias="Status")] = None
+ manager_status: Annotated[ManagerStatus | None, Field(alias="ManagerStatus")] = None
class SystemInfo(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
- id: str | None = Field(
- default=None,
- alias="ID",
- description="Unique identifier of the daemon.\n\n
\n\n> **Note**: The format of the ID itself is not part of the API, and\n> should not be considered stable.\n",
- examples=["7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS"],
- )
- containers: int | None = Field(
- default=None,
- alias="Containers",
- description="Total number of containers on the host.",
- examples=[14],
- )
- containers_running: int | None = Field(
- default=None,
- alias="ContainersRunning",
- description='Number of containers with status `"running"`.\n',
- examples=[3],
- )
- containers_paused: int | None = Field(
- default=None,
- alias="ContainersPaused",
- description='Number of containers with status `"paused"`.\n',
- examples=[1],
- )
- containers_stopped: int | None = Field(
- default=None,
- alias="ContainersStopped",
- description='Number of containers with status `"stopped"`.\n',
- examples=[10],
- )
- images: int | None = Field(
- default=None,
- alias="Images",
- description="Total number of images on the host.\n\nBoth _tagged_ and _untagged_ (dangling) images are counted.\n",
- examples=[508],
- )
- driver: str | None = Field(
- default=None,
- alias="Driver",
- description="Name of the storage driver in use.",
- examples=["overlay2"],
- )
- driver_status: list[list[str]] | None = Field(
- default=None,
- alias="DriverStatus",
- description='Information specific to the storage driver, provided as\n"label" / "value" pairs.\n\nThis information is provided by the storage driver, and formatted\nin a way consistent with the output of `docker info` on the command\nline.\n\n
\n\n> **Note**: The information returned in this field, including the\n> formatting of values and labels, should not be considered stable,\n> and may change without notice.\n',
- examples=[
- [
- ["Backing Filesystem", "extfs"],
- ["Supports d_type", "true"],
- ["Native Overlay Diff", "true"],
- ]
- ],
- )
- docker_root_dir: str | None = Field(
- default=None,
- alias="DockerRootDir",
- description="Root directory of persistent Docker state.\n\nDefaults to `/var/lib/docker` on Linux, and `C:\\ProgramData\\docker`\non Windows.\n",
- examples=["/var/lib/docker"],
- )
- plugins: PluginsInfo | None = Field(default=None, alias="Plugins")
- memory_limit: bool | None = Field(
- default=None,
- alias="MemoryLimit",
- description="Indicates if the host has memory limit support enabled.",
- examples=[True],
- )
- swap_limit: bool | None = Field(
- default=None,
- alias="SwapLimit",
- description="Indicates if the host has memory swap limit support enabled.",
- examples=[True],
- )
- kernel_memory: bool | None = Field(
- default=None,
- alias="KernelMemory",
- description="Indicates if the host has kernel memory limit support enabled.\n\n
\n\n> **Deprecated**: This field is deprecated as the kernel 5.4 deprecated\n> `kmem.limit_in_bytes`.\n",
- examples=[True],
- )
- kernel_memory_tcp: bool | None = Field(
- default=None,
- alias="KernelMemoryTCP",
- description="Indicates if the host has kernel memory TCP limit support enabled.\n\nKernel memory TCP limits are not supported when using cgroups v2, which\ndoes not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup.\n",
- examples=[True],
- )
- cpu_cfs_period: bool | None = Field(
- default=None,
- alias="CpuCfsPeriod",
- description="Indicates if CPU CFS(Completely Fair Scheduler) period is supported by\nthe host.\n",
- examples=[True],
- )
- cpu_cfs_quota: bool | None = Field(
- default=None,
- alias="CpuCfsQuota",
- description="Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by\nthe host.\n",
- examples=[True],
- )
- cpu_shares: bool | None = Field(
- default=None,
- alias="CPUShares",
- description="Indicates if CPU Shares limiting is supported by the host.\n",
- examples=[True],
- )
- cpu_set: bool | None = Field(
- default=None,
- alias="CPUSet",
- description="Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host.\n\nSee [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt)\n",
- examples=[True],
- )
- pids_limit: bool | None = Field(
- default=None,
- alias="PidsLimit",
- description="Indicates if the host kernel has PID limit support enabled.",
- examples=[True],
- )
- oom_kill_disable: bool | None = Field(
- default=None,
- alias="OomKillDisable",
- description="Indicates if OOM killer disable is supported on the host.",
- )
- i_pv4_forwarding: bool | None = Field(
- default=None,
- alias="IPv4Forwarding",
- description="Indicates IPv4 forwarding is enabled.",
- examples=[True],
- )
- bridge_nf_iptables: bool | None = Field(
- default=None,
- alias="BridgeNfIptables",
- description="Indicates if `bridge-nf-call-iptables` is available on the host.",
- examples=[True],
- )
- bridge_nf_ip6tables: bool | None = Field(
- default=None,
- alias="BridgeNfIp6tables",
- description="Indicates if `bridge-nf-call-ip6tables` is available on the host.",
- examples=[True],
- )
- debug: bool | None = Field(
- default=None,
- alias="Debug",
- description="Indicates if the daemon is running in debug-mode / with debug-level\nlogging enabled.\n",
- examples=[True],
- )
- n_fd: int | None = Field(
- default=None,
- alias="NFd",
- description="The total number of file Descriptors in use by the daemon process.\n\nThis information is only returned if debug-mode is enabled.\n",
- examples=[64],
- )
- n_goroutines: int | None = Field(
- default=None,
- alias="NGoroutines",
- description="The number of goroutines that currently exist.\n\nThis information is only returned if debug-mode is enabled.\n",
- examples=[174],
- )
- system_time: str | None = Field(
- default=None,
- alias="SystemTime",
- description="Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt)\nformat with nano-seconds.\n",
- examples=["2017-08-08T20:28:29.06202363Z"],
- )
- logging_driver: str | None = Field(
- default=None,
- alias="LoggingDriver",
- description="The logging driver to use as a default for new containers.\n",
- )
- cgroup_driver: CgroupDriver | None = Field(
- default=CgroupDriver.cgroupfs,
- alias="CgroupDriver",
- description="The driver to use for managing cgroups.\n",
- examples=["cgroupfs"],
- )
- cgroup_version: CgroupVersion | None = Field(
- default=CgroupVersion.field_1,
- alias="CgroupVersion",
- description="The version of the cgroup.\n",
- examples=["1"],
- )
- n_events_listener: int | None = Field(
- default=None,
- alias="NEventsListener",
- description="Number of event listeners subscribed.",
- examples=[30],
- )
- kernel_version: str | None = Field(
- default=None,
- alias="KernelVersion",
- description='Kernel version of the host.\n\nOn Linux, this information obtained from `uname`. On Windows this\ninformation is queried from the HKEY_LOCAL_MACHINE\\\\SOFTWARE\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\\nregistry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_.\n',
- examples=["4.9.38-moby"],
- )
- operating_system: str | None = Field(
- default=None,
- alias="OperatingSystem",
- description='Name of the host\'s operating system, for example: "Ubuntu 16.04.2 LTS"\nor "Windows Server 2016 Datacenter"\n',
- examples=["Alpine Linux v3.5"],
- )
- os_version: str | None = Field(
- default=None,
- alias="OSVersion",
- description="Version of the host's operating system\n\n
\n\n> **Note**: The information returned in this field, including its\n> very existence, and the formatting of values, should not be considered\n> stable, and may change without notice.\n",
- examples=["16.04"],
- )
- os_type: str | None = Field(
- default=None,
- alias="OSType",
- description='Generic type of the operating system of the host, as returned by the\nGo runtime (`GOOS`).\n\nCurrently returned values are "linux" and "windows". A full list of\npossible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment).\n',
- examples=["linux"],
- )
- architecture: str | None = Field(
- default=None,
- alias="Architecture",
- description="Hardware architecture of the host, as returned by the Go runtime\n(`GOARCH`).\n\nA full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment).\n",
- examples=["x86_64"],
- )
- ncpu: int | None = Field(
- default=None,
- alias="NCPU",
- description="The number of logical CPUs usable by the daemon.\n\nThe number of available CPUs is checked by querying the operating\nsystem when the daemon starts. Changes to operating system CPU\nallocation after the daemon is started are not reflected.\n",
- examples=[4],
- )
- mem_total: int | None = Field(
- default=None,
- alias="MemTotal",
- description="Total amount of physical memory available on the host, in bytes.\n",
- examples=[2095882240],
- )
- index_server_address: str | None = Field(
- default="https://index.docker.io/v1/",
- alias="IndexServerAddress",
- description="Address / URL of the index server that is used for image search,\nand as a default for user authentication for Docker Hub and Docker Cloud.\n",
- examples=["https://index.docker.io/v1/"],
- )
- registry_config: RegistryServiceConfig | None = Field(
- default=None, alias="RegistryConfig"
- )
- generic_resources: GenericResources | None = Field(
- default=None, alias="GenericResources"
- )
- http_proxy: str | None = Field(
- default=None,
- alias="HttpProxy",
- description="HTTP-proxy configured for the daemon. This value is obtained from the\n[`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.\nCredentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL\nare masked in the API response.\n\nContainers do not automatically inherit this configuration.\n",
- examples=["http://xxxxx:xxxxx@proxy.corp.example.com:8080"],
- )
- https_proxy: str | None = Field(
- default=None,
- alias="HttpsProxy",
- description="HTTPS-proxy configured for the daemon. This value is obtained from the\n[`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.\nCredentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL\nare masked in the API response.\n\nContainers do not automatically inherit this configuration.\n",
- examples=["https://xxxxx:xxxxx@proxy.corp.example.com:4443"],
- )
- no_proxy: str | None = Field(
- default=None,
- alias="NoProxy",
- description="Comma-separated list of domain extensions for which no proxy should be\nused. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html)\nenvironment variable.\n\nContainers do not automatically inherit this configuration.\n",
- examples=["*.local, 169.254/16"],
- )
- name: str | None = Field(
- default=None,
- alias="Name",
- description="Hostname of the host.",
- examples=["node5.corp.example.com"],
- )
- labels: list[str] | None = Field(
- default=None,
- alias="Labels",
- description="User-defined labels (key/value metadata) as set on the daemon.\n\n
\n\n> **Note**: When part of a Swarm, nodes can both have _daemon_ labels,\n> set through the daemon configuration, and _node_ labels, set from a\n> manager node in the Swarm. Node labels are not included in this\n> field. Node labels can be retrieved using the `/nodes/(id)` endpoint\n> on a manager node in the Swarm.\n",
- examples=[["storage=ssd", "production"]],
- )
- experimental_build: bool | None = Field(
- default=None,
- alias="ExperimentalBuild",
- description="Indicates if experimental features are enabled on the daemon.\n",
- examples=[True],
- )
- server_version: str | None = Field(
- default=None,
- alias="ServerVersion",
- description="Version string of the daemon.\n",
- examples=["20.10.25"],
- )
- cluster_store: str | None = Field(
- default=None,
- alias="ClusterStore",
- description="URL of the distributed storage backend.\n\n\nThe storage backend is used for multihost networking (to store\nnetwork and endpoint information) and by the node discovery mechanism.\n\n
\n\n> **Deprecated**: This field is only propagated when using standalone Swarm\n> mode, and overlay networking using an external k/v store. Overlay\n> networks with Swarm mode enabled use the built-in raft store, and\n> this field will be empty.\n",
- examples=["consul://consul.corp.example.com:8600/some/path"],
- )
- cluster_advertise: str | None = Field(
- default=None,
- alias="ClusterAdvertise",
- description="The network endpoint that the Engine advertises for the purpose of\nnode discovery. ClusterAdvertise is a `host:port` combination on which\nthe daemon is reachable by other hosts.\n\n
\n\n> **Deprecated**: This field is only propagated when using standalone Swarm\n> mode, and overlay networking using an external k/v store. Overlay\n> networks with Swarm mode enabled use the built-in raft store, and\n> this field will be empty.\n",
- examples=["node5.corp.example.com:8000"],
- )
- runtimes: dict[str, Runtime] | None = Field(
- default_factory=lambda: Runtime.model_validate({"runc": {"path": "runc"}}),
- alias="Runtimes",
- description='List of [OCI compliant](https://github.com/opencontainers/runtime-spec)\nruntimes configured on the daemon. Keys hold the "name" used to\nreference the runtime.\n\nThe Docker daemon relies on an OCI compliant runtime (invoked via the\n`containerd` daemon) as its interface to the Linux kernel namespaces,\ncgroups, and SELinux.\n\nThe default runtime is `runc`, and automatically configured. Additional\nruntimes can be configured by the user and will be listed here.\n',
- examples=[
- {
- "runc": {"path": "runc"},
- "runc-master": {"path": "/go/bin/runc"},
- "custom": {
- "path": "/usr/local/bin/my-oci-runtime",
- "runtimeArgs": ["--debug", "--systemd-cgroup=false"],
- },
- }
- ],
- )
- default_runtime: str | None = Field(
- default="runc",
- alias="DefaultRuntime",
- description="Name of the default OCI runtime that is used when starting containers.\n\nThe default can be overridden per-container at create time.\n",
- examples=["runc"],
- )
- swarm: SwarmInfo | None = Field(default=None, alias="Swarm")
- live_restore_enabled: bool | None = Field(
- default=False,
- alias="LiveRestoreEnabled",
- description="Indicates if live restore is enabled.\n\nIf enabled, containers are kept running when the daemon is shutdown\nor upon daemon start if running containers are detected.\n",
- examples=[False],
- )
- isolation: Isolation2 | None = Field(
- default=Isolation2.default,
- alias="Isolation",
- description="Represents the isolation technology to use as a default for containers.\nThe supported values are platform-specific.\n\nIf no isolation value is specified on daemon start, on Windows client,\nthe default is `hyperv`, and on Windows server, the default is `process`.\n\nThis option is currently not used on other platforms.\n",
- )
- init_binary: str | None = Field(
- default=None,
- alias="InitBinary",
- description="Name and, optional, path of the `docker-init` binary.\n\nIf the path is omitted, the daemon searches the host's `$PATH` for the\nbinary and uses the first result.\n",
- examples=["docker-init"],
- )
- containerd_commit: Commit | None = Field(default=None, alias="ContainerdCommit")
- runc_commit: Commit | None = Field(default=None, alias="RuncCommit")
- init_commit: Commit | None = Field(default=None, alias="InitCommit")
- security_options: list[str] | None = Field(
- default=None,
- alias="SecurityOptions",
- description="List of security features that are enabled on the daemon, such as\napparmor, seccomp, SELinux, user-namespaces (userns), and rootless.\n\nAdditional configuration options for each security feature may\nbe present, and are included as a comma-separated list of key/value\npairs.\n",
- examples=[
- [
- "name=apparmor",
- "name=seccomp,profile=default",
- "name=selinux",
- "name=userns",
- "name=rootless",
- ]
- ],
- )
- product_license: str | None = Field(
- default=None,
- alias="ProductLicense",
- description="Reports a summary of the product license on the daemon.\n\nIf a commercial license has been applied to the daemon, information\nsuch as number of nodes, and expiration are included.\n",
- examples=["Community Engine"],
- )
- default_address_pools: list[DefaultAddressPool] | None = Field(
- default=None,
- alias="DefaultAddressPools",
- description='List of custom default address pools for local networks, which can be\nspecified in the daemon.json file or dockerd option.\n\nExample: a Base "10.10.0.0/16" with Size 24 will define the set of 256\n10.10.[0-255].0/24 address pools.\n',
- )
- warnings: list[str] | None = Field(
- default=None,
- alias="Warnings",
- description="List of warnings / informational messages about missing features, or\nissues related to the daemon configuration.\n\nThese messages can be printed by the client as information to the user.\n",
- examples=[
- [
- "WARNING: No memory limit support",
- "WARNING: bridge-nf-call-iptables is disabled",
- "WARNING: bridge-nf-call-ip6tables is disabled",
- ]
- ],
- )
+ id: Annotated[
+ str | None,
+ Field(
+ alias="ID",
+ description="Unique identifier of the daemon.\n\n
\n\n> **Note**: The format of the ID itself is not part of the API, and\n> should not be considered stable.\n",
+ examples=["7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS"],
+ ),
+ ] = None
+ containers: Annotated[
+ int | None,
+ Field(
+ alias="Containers",
+ description="Total number of containers on the host.",
+ examples=[14],
+ ),
+ ] = None
+ containers_running: Annotated[
+ int | None,
+ Field(
+ alias="ContainersRunning",
+ description='Number of containers with status `"running"`.\n',
+ examples=[3],
+ ),
+ ] = None
+ containers_paused: Annotated[
+ int | None,
+ Field(
+ alias="ContainersPaused",
+ description='Number of containers with status `"paused"`.\n',
+ examples=[1],
+ ),
+ ] = None
+ containers_stopped: Annotated[
+ int | None,
+ Field(
+ alias="ContainersStopped",
+ description='Number of containers with status `"stopped"`.\n',
+ examples=[10],
+ ),
+ ] = None
+ images: Annotated[
+ int | None,
+ Field(
+ alias="Images",
+ description="Total number of images on the host.\n\nBoth _tagged_ and _untagged_ (dangling) images are counted.\n",
+ examples=[508],
+ ),
+ ] = None
+ driver: Annotated[
+ str | None,
+ Field(
+ alias="Driver",
+ description="Name of the storage driver in use.",
+ examples=["overlay2"],
+ ),
+ ] = None
+ driver_status: Annotated[
+ list[list[str]] | None,
+ Field(
+ alias="DriverStatus",
+ description='Information specific to the storage driver, provided as\n"label" / "value" pairs.\n\nThis information is provided by the storage driver, and formatted\nin a way consistent with the output of `docker info` on the command\nline.\n\n
\n\n> **Note**: The information returned in this field, including the\n> formatting of values and labels, should not be considered stable,\n> and may change without notice.\n',
+ examples=[
+ [
+ ["Backing Filesystem", "extfs"],
+ ["Supports d_type", "true"],
+ ["Native Overlay Diff", "true"],
+ ]
+ ],
+ ),
+ ] = None
+ docker_root_dir: Annotated[
+ str | None,
+ Field(
+ alias="DockerRootDir",
+ description="Root directory of persistent Docker state.\n\nDefaults to `/var/lib/docker` on Linux, and `C:\\ProgramData\\docker`\non Windows.\n",
+ examples=["/var/lib/docker"],
+ ),
+ ] = None
+ plugins: Annotated[PluginsInfo | None, Field(alias="Plugins")] = None
+ memory_limit: Annotated[
+ bool | None,
+ Field(
+ alias="MemoryLimit",
+ description="Indicates if the host has memory limit support enabled.",
+ examples=[True],
+ ),
+ ] = None
+ swap_limit: Annotated[
+ bool | None,
+ Field(
+ alias="SwapLimit",
+ description="Indicates if the host has memory swap limit support enabled.",
+ examples=[True],
+ ),
+ ] = None
+ kernel_memory: Annotated[
+ bool | None,
+ Field(
+ alias="KernelMemory",
+ description="Indicates if the host has kernel memory limit support enabled.\n\n
\n\n> **Deprecated**: This field is deprecated as the kernel 5.4 deprecated\n> `kmem.limit_in_bytes`.\n",
+ examples=[True],
+ ),
+ ] = None
+ kernel_memory_tcp: Annotated[
+ bool | None,
+ Field(
+ alias="KernelMemoryTCP",
+ description="Indicates if the host has kernel memory TCP limit support enabled.\n\nKernel memory TCP limits are not supported when using cgroups v2, which\ndoes not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup.\n",
+ examples=[True],
+ ),
+ ] = None
+ cpu_cfs_period: Annotated[
+ bool | None,
+ Field(
+ alias="CpuCfsPeriod",
+ description="Indicates if CPU CFS(Completely Fair Scheduler) period is supported by\nthe host.\n",
+ examples=[True],
+ ),
+ ] = None
+ cpu_cfs_quota: Annotated[
+ bool | None,
+ Field(
+ alias="CpuCfsQuota",
+ description="Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by\nthe host.\n",
+ examples=[True],
+ ),
+ ] = None
+ cpu_shares: Annotated[
+ bool | None,
+ Field(
+ alias="CPUShares",
+ description="Indicates if CPU Shares limiting is supported by the host.\n",
+ examples=[True],
+ ),
+ ] = None
+ cpu_set: Annotated[
+ bool | None,
+ Field(
+ alias="CPUSet",
+ description="Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host.\n\nSee [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt)\n",
+ examples=[True],
+ ),
+ ] = None
+ pids_limit: Annotated[
+ bool | None,
+ Field(
+ alias="PidsLimit",
+ description="Indicates if the host kernel has PID limit support enabled.",
+ examples=[True],
+ ),
+ ] = None
+ oom_kill_disable: Annotated[
+ bool | None,
+ Field(
+ alias="OomKillDisable",
+ description="Indicates if OOM killer disable is supported on the host.",
+ ),
+ ] = None
+ i_pv4_forwarding: Annotated[
+ bool | None,
+ Field(
+ alias="IPv4Forwarding",
+ description="Indicates IPv4 forwarding is enabled.",
+ examples=[True],
+ ),
+ ] = None
+ bridge_nf_iptables: Annotated[
+ bool | None,
+ Field(
+ alias="BridgeNfIptables",
+ description="Indicates if `bridge-nf-call-iptables` is available on the host.",
+ examples=[True],
+ ),
+ ] = None
+ bridge_nf_ip6tables: Annotated[
+ bool | None,
+ Field(
+ alias="BridgeNfIp6tables",
+ description="Indicates if `bridge-nf-call-ip6tables` is available on the host.",
+ examples=[True],
+ ),
+ ] = None
+ debug: Annotated[
+ bool | None,
+ Field(
+ alias="Debug",
+ description="Indicates if the daemon is running in debug-mode / with debug-level\nlogging enabled.\n",
+ examples=[True],
+ ),
+ ] = None
+ n_fd: Annotated[
+ int | None,
+ Field(
+ alias="NFd",
+ description="The total number of file Descriptors in use by the daemon process.\n\nThis information is only returned if debug-mode is enabled.\n",
+ examples=[64],
+ ),
+ ] = None
+ n_goroutines: Annotated[
+ int | None,
+ Field(
+ alias="NGoroutines",
+ description="The number of goroutines that currently exist.\n\nThis information is only returned if debug-mode is enabled.\n",
+ examples=[174],
+ ),
+ ] = None
+ system_time: Annotated[
+ str | None,
+ Field(
+ alias="SystemTime",
+ description="Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt)\nformat with nano-seconds.\n",
+ examples=["2017-08-08T20:28:29.06202363Z"],
+ ),
+ ] = None
+ logging_driver: Annotated[
+ str | None,
+ Field(
+ alias="LoggingDriver",
+ description="The logging driver to use as a default for new containers.\n",
+ ),
+ ] = None
+ cgroup_driver: Annotated[
+ CgroupDriver | None,
+ Field(
+ alias="CgroupDriver",
+ description="The driver to use for managing cgroups.\n",
+ examples=["cgroupfs"],
+ ),
+ ] = CgroupDriver.cgroupfs
+ cgroup_version: Annotated[
+ CgroupVersion | None,
+ Field(
+ alias="CgroupVersion",
+ description="The version of the cgroup.\n",
+ examples=["1"],
+ ),
+ ] = CgroupVersion.field_1
+ n_events_listener: Annotated[
+ int | None,
+ Field(
+ alias="NEventsListener",
+ description="Number of event listeners subscribed.",
+ examples=[30],
+ ),
+ ] = None
+ kernel_version: Annotated[
+ str | None,
+ Field(
+ alias="KernelVersion",
+ description='Kernel version of the host.\n\nOn Linux, this information obtained from `uname`. On Windows this\ninformation is queried from the HKEY_LOCAL_MACHINE\\\\SOFTWARE\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\\nregistry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_.\n',
+ examples=["4.9.38-moby"],
+ ),
+ ] = None
+ operating_system: Annotated[
+ str | None,
+ Field(
+ alias="OperatingSystem",
+ description='Name of the host\'s operating system, for example: "Ubuntu 16.04.2 LTS"\nor "Windows Server 2016 Datacenter"\n',
+ examples=["Alpine Linux v3.5"],
+ ),
+ ] = None
+ os_version: Annotated[
+ str | None,
+ Field(
+ alias="OSVersion",
+ description="Version of the host's operating system\n\n
\n\n> **Note**: The information returned in this field, including its\n> very existence, and the formatting of values, should not be considered\n> stable, and may change without notice.\n",
+ examples=["16.04"],
+ ),
+ ] = None
+ os_type: Annotated[
+ str | None,
+ Field(
+ alias="OSType",
+ description='Generic type of the operating system of the host, as returned by the\nGo runtime (`GOOS`).\n\nCurrently returned values are "linux" and "windows". A full list of\npossible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment).\n',
+ examples=["linux"],
+ ),
+ ] = None
+ architecture: Annotated[
+ str | None,
+ Field(
+ alias="Architecture",
+ description="Hardware architecture of the host, as returned by the Go runtime\n(`GOARCH`).\n\nA full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment).\n",
+ examples=["x86_64"],
+ ),
+ ] = None
+ ncpu: Annotated[
+ int | None,
+ Field(
+ alias="NCPU",
+ description="The number of logical CPUs usable by the daemon.\n\nThe number of available CPUs is checked by querying the operating\nsystem when the daemon starts. Changes to operating system CPU\nallocation after the daemon is started are not reflected.\n",
+ examples=[4],
+ ),
+ ] = None
+ mem_total: Annotated[
+ int | None,
+ Field(
+ alias="MemTotal",
+ description="Total amount of physical memory available on the host, in bytes.\n",
+ examples=[2095882240],
+ ),
+ ] = None
+ index_server_address: Annotated[
+ str | None,
+ Field(
+ alias="IndexServerAddress",
+ description="Address / URL of the index server that is used for image search,\nand as a default for user authentication for Docker Hub and Docker Cloud.\n",
+ examples=["https://index.docker.io/v1/"],
+ ),
+ ] = "https://index.docker.io/v1/"
+ registry_config: Annotated[
+ RegistryServiceConfig | None, Field(alias="RegistryConfig")
+ ] = None
+ generic_resources: Annotated[
+ GenericResources | None, Field(alias="GenericResources")
+ ] = None
+ http_proxy: Annotated[
+ str | None,
+ Field(
+ alias="HttpProxy",
+ description="HTTP-proxy configured for the daemon. This value is obtained from the\n[`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.\nCredentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL\nare masked in the API response.\n\nContainers do not automatically inherit this configuration.\n",
+ examples=["http://xxxxx:xxxxx@proxy.corp.example.com:8080"],
+ ),
+ ] = None
+ https_proxy: Annotated[
+ str | None,
+ Field(
+ alias="HttpsProxy",
+ description="HTTPS-proxy configured for the daemon. This value is obtained from the\n[`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.\nCredentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL\nare masked in the API response.\n\nContainers do not automatically inherit this configuration.\n",
+ examples=["https://xxxxx:xxxxx@proxy.corp.example.com:4443"],
+ ),
+ ] = None
+ no_proxy: Annotated[
+ str | None,
+ Field(
+ alias="NoProxy",
+ description="Comma-separated list of domain extensions for which no proxy should be\nused. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html)\nenvironment variable.\n\nContainers do not automatically inherit this configuration.\n",
+ examples=["*.local, 169.254/16"],
+ ),
+ ] = None
+ name: Annotated[
+ str | None,
+ Field(
+ alias="Name",
+ description="Hostname of the host.",
+ examples=["node5.corp.example.com"],
+ ),
+ ] = None
+ labels: Annotated[
+ list[str] | None,
+ Field(
+ alias="Labels",
+ description="User-defined labels (key/value metadata) as set on the daemon.\n\n
\n\n> **Note**: When part of a Swarm, nodes can both have _daemon_ labels,\n> set through the daemon configuration, and _node_ labels, set from a\n> manager node in the Swarm. Node labels are not included in this\n> field. Node labels can be retrieved using the `/nodes/(id)` endpoint\n> on a manager node in the Swarm.\n",
+ examples=[["storage=ssd", "production"]],
+ ),
+ ] = None
+ experimental_build: Annotated[
+ bool | None,
+ Field(
+ alias="ExperimentalBuild",
+ description="Indicates if experimental features are enabled on the daemon.\n",
+ examples=[True],
+ ),
+ ] = None
+ server_version: Annotated[
+ str | None,
+ Field(
+ alias="ServerVersion",
+ description="Version string of the daemon.\n",
+ examples=["20.10.25"],
+ ),
+ ] = None
+ cluster_store: Annotated[
+ str | None,
+ Field(
+ alias="ClusterStore",
+ description="URL of the distributed storage backend.\n\n\nThe storage backend is used for multihost networking (to store\nnetwork and endpoint information) and by the node discovery mechanism.\n\n
\n\n> **Deprecated**: This field is only propagated when using standalone Swarm\n> mode, and overlay networking using an external k/v store. Overlay\n> networks with Swarm mode enabled use the built-in raft store, and\n> this field will be empty.\n",
+ examples=["consul://consul.corp.example.com:8600/some/path"],
+ ),
+ ] = None
+ cluster_advertise: Annotated[
+ str | None,
+ Field(
+ alias="ClusterAdvertise",
+ description="The network endpoint that the Engine advertises for the purpose of\nnode discovery. ClusterAdvertise is a `host:port` combination on which\nthe daemon is reachable by other hosts.\n\n
\n\n> **Deprecated**: This field is only propagated when using standalone Swarm\n> mode, and overlay networking using an external k/v store. Overlay\n> networks with Swarm mode enabled use the built-in raft store, and\n> this field will be empty.\n",
+ examples=["node5.corp.example.com:8000"],
+ ),
+ ] = None
+ runtimes: Annotated[
+ dict[str, Runtime] | None,
+ Field(
+ alias="Runtimes",
+ description='List of [OCI compliant](https://github.com/opencontainers/runtime-spec)\nruntimes configured on the daemon. Keys hold the "name" used to\nreference the runtime.\n\nThe Docker daemon relies on an OCI compliant runtime (invoked via the\n`containerd` daemon) as its interface to the Linux kernel namespaces,\ncgroups, and SELinux.\n\nThe default runtime is `runc`, and automatically configured. Additional\nruntimes can be configured by the user and will be listed here.\n',
+ examples=[
+ {
+ "runc": {"path": "runc"},
+ "runc-master": {"path": "/go/bin/runc"},
+ "custom": {
+ "path": "/usr/local/bin/my-oci-runtime",
+ "runtimeArgs": ["--debug", "--systemd-cgroup=false"],
+ },
+ }
+ ],
+ ),
+ ] = {
+ "runc": {"path": "runc"} # type: ignore[dict-item]
+ }
+ default_runtime: Annotated[
+ str | None,
+ Field(
+ alias="DefaultRuntime",
+ description="Name of the default OCI runtime that is used when starting containers.\n\nThe default can be overridden per-container at create time.\n",
+ examples=["runc"],
+ ),
+ ] = "runc"
+ swarm: Annotated[SwarmInfo | None, Field(alias="Swarm")] = None
+ live_restore_enabled: Annotated[
+ bool | None,
+ Field(
+ alias="LiveRestoreEnabled",
+ description="Indicates if live restore is enabled.\n\nIf enabled, containers are kept running when the daemon is shutdown\nor upon daemon start if running containers are detected.\n",
+ examples=[False],
+ ),
+ ] = False
+ isolation: Annotated[
+ Isolation2 | None,
+ Field(
+ alias="Isolation",
+ description="Represents the isolation technology to use as a default for containers.\nThe supported values are platform-specific.\n\nIf no isolation value is specified on daemon start, on Windows client,\nthe default is `hyperv`, and on Windows server, the default is `process`.\n\nThis option is currently not used on other platforms.\n",
+ ),
+ ] = Isolation2.default
+ init_binary: Annotated[
+ str | None,
+ Field(
+ alias="InitBinary",
+ description="Name and, optional, path of the `docker-init` binary.\n\nIf the path is omitted, the daemon searches the host's `$PATH` for the\nbinary and uses the first result.\n",
+ examples=["docker-init"],
+ ),
+ ] = None
+ containerd_commit: Annotated[Commit | None, Field(alias="ContainerdCommit")] = None
+ runc_commit: Annotated[Commit | None, Field(alias="RuncCommit")] = None
+ init_commit: Annotated[Commit | None, Field(alias="InitCommit")] = None
+ security_options: Annotated[
+ list[str] | None,
+ Field(
+ alias="SecurityOptions",
+ description="List of security features that are enabled on the daemon, such as\napparmor, seccomp, SELinux, user-namespaces (userns), and rootless.\n\nAdditional configuration options for each security feature may\nbe present, and are included as a comma-separated list of key/value\npairs.\n",
+ examples=[
+ [
+ "name=apparmor",
+ "name=seccomp,profile=default",
+ "name=selinux",
+ "name=userns",
+ "name=rootless",
+ ]
+ ],
+ ),
+ ] = None
+ product_license: Annotated[
+ str | None,
+ Field(
+ alias="ProductLicense",
+ description="Reports a summary of the product license on the daemon.\n\nIf a commercial license has been applied to the daemon, information\nsuch as number of nodes, and expiration are included.\n",
+ examples=["Community Engine"],
+ ),
+ ] = None
+ default_address_pools: Annotated[
+ list[DefaultAddressPool] | None,
+ Field(
+ alias="DefaultAddressPools",
+ description='List of custom default address pools for local networks, which can be\nspecified in the daemon.json file or dockerd option.\n\nExample: a Base "10.10.0.0/16" with Size 24 will define the set of 256\n10.10.[0-255].0/24 address pools.\n',
+ ),
+ ] = None
+ warnings: Annotated[
+ list[str] | None,
+ Field(
+ alias="Warnings",
+ description="List of warnings / informational messages about missing features, or\nissues related to the daemon configuration.\n\nThese messages can be printed by the client as information to the user.\n",
+ examples=[
+ [
+ "WARNING: No memory limit support",
+ "WARNING: bridge-nf-call-iptables is disabled",
+ "WARNING: bridge-nf-call-ip6tables is disabled",
+ ]
+ ],
+ ),
+ ] = None
diff --git a/packages/models-library/src/models_library/projects.py b/packages/models-library/src/models_library/projects.py
index dcc15295a5f..c25309dac3e 100644
--- a/packages/models-library/src/models_library/projects.py
+++ b/packages/models-library/src/models_library/projects.py
@@ -7,6 +7,7 @@
from typing import Annotated, Any, Final, TypeAlias
from uuid import UUID
+from common_library.basic_types import DEFAULT_FACTORY
from models_library.basic_types import ConstrainedStr
from models_library.folders import FolderID
from models_library.workspaces import WorkspaceID
@@ -144,11 +145,14 @@ class Project(BaseProjectModel):
# Classification
tags: list[int] | None = []
- classifiers: list[ClassifierID] | None = Field(
- default_factory=list,
- description="Contains the reference to the project classifiers",
- examples=["some:id:to:a:classifier"],
- )
+ classifiers: Annotated[
+ list[ClassifierID] | None,
+ Field(
+ default_factory=list,
+ description="Contains the reference to the project classifiers",
+ examples=["some:id:to:a:classifier"],
+ ),
+ ] = DEFAULT_FACTORY
# Project state (SEE projects_state.py)
state: ProjectState | None = None
diff --git a/packages/models-library/src/models_library/projects_nodes.py b/packages/models-library/src/models_library/projects_nodes.py
index 3a6ea052313..f7db56b1ded 100644
--- a/packages/models-library/src/models_library/projects_nodes.py
+++ b/packages/models-library/src/models_library/projects_nodes.py
@@ -2,9 +2,9 @@
Models Node as a central element in a project's pipeline
"""
-from copy import deepcopy
from typing import Annotated, Any, TypeAlias, Union
+from common_library.basic_types import DEFAULT_FACTORY
from pydantic import (
BaseModel,
ConfigDict,
@@ -113,16 +113,6 @@ class NodeState(BaseModel):
)
-def _patch_json_schema_extra(schema: dict) -> None:
- # NOTE: exporting without this trick does not make runHash as nullable.
- # It is a Pydantic issue see https://github.com/samuelcolvin/pydantic/issues/1270
- for prop_name in ["parent", "runHash"]:
- if prop_name in schema.get("properties", {}):
- prop = deepcopy(schema["properties"][prop_name])
- prop["nullable"] = True
- schema["properties"][prop_name] = prop
-
-
class Node(BaseModel):
key: ServiceKey = Field(
...,
@@ -141,110 +131,146 @@ class Node(BaseModel):
label: str = Field(
..., description="The short name of the node", examples=["JupyterLab"]
)
- progress: float | None = Field(
- default=None,
- ge=0,
- le=100,
- description="the node progress value (deprecated in DB, still used for API only)",
- deprecated=True,
- )
- thumbnail: Annotated[str, HttpUrl] | None = Field(
- default=None,
- description="url of the latest screenshot of the node",
- examples=["https://placeimg.com/171/96/tech/grayscale/?0.jpg"],
- )
+ progress: Annotated[
+ float | None,
+ Field(
+ ge=0,
+ le=100,
+ description="the node progress value (deprecated in DB, still used for API only)",
+ deprecated=True,
+ ),
+ ] = None
+
+ thumbnail: Annotated[
+ str | HttpUrl | None,
+ Field(
+ description="url of the latest screenshot of the node",
+ examples=["https://placeimg.com/171/96/tech/grayscale/?0.jpg"],
+ ),
+ ] = None
# RUN HASH
- run_hash: str | None = Field(
- default=None,
- description="the hex digest of the resolved inputs +outputs hash at the time when the last outputs were generated",
- alias="runHash",
- )
+ run_hash: Annotated[
+ str | None,
+ Field(
+ description="the hex digest of the resolved inputs +outputs hash at the time when the last outputs were generated",
+ alias="runHash",
+ ),
+ ] = None
# INPUT PORTS ---
- inputs: InputsDict | None = Field(
- default_factory=dict, description="values of input properties"
- )
- inputs_required: list[InputID] = Field(
- default_factory=list,
- description="Defines inputs that are required in order to run the service",
- alias="inputsRequired",
- )
- inputs_units: dict[InputID, UnitStr] | None = Field(
- default=None,
- description="Overrides default unit (if any) defined in the service for each port",
- alias="inputsUnits",
- )
- input_access: dict[InputID, AccessEnum] | None = Field(
- default=None,
- description="map with key - access level pairs",
- alias="inputAccess",
- )
- input_nodes: list[NodeID] | None = Field(
- default_factory=list,
- description="node IDs of where the node is connected to",
- alias="inputNodes",
- )
+ inputs: Annotated[
+ InputsDict | None,
+ Field(default_factory=dict, description="values of input properties"),
+ ] = DEFAULT_FACTORY
+
+ inputs_required: Annotated[
+ list[InputID],
+ Field(
+ default_factory=list,
+ description="Defines inputs that are required in order to run the service",
+ alias="inputsRequired",
+ ),
+ ] = DEFAULT_FACTORY
+
+ inputs_units: Annotated[
+ dict[InputID, UnitStr] | None,
+ Field(
+ description="Overrides default unit (if any) defined in the service for each port",
+ alias="inputsUnits",
+ ),
+ ] = None
+
+ input_access: Annotated[
+ dict[InputID, AccessEnum] | None,
+ Field(
+ description="map with key - access level pairs",
+ alias="inputAccess",
+ ),
+ ] = None
+
+ input_nodes: Annotated[
+ list[NodeID] | None,
+ Field(
+ default_factory=list,
+ description="node IDs of where the node is connected to",
+ alias="inputNodes",
+ ),
+ ] = DEFAULT_FACTORY
# OUTPUT PORTS ---
- outputs: OutputsDict | None = Field(
- default_factory=dict, description="values of output properties"
- )
- output_node: bool | None = Field(default=None, deprecated=True, alias="outputNode")
- output_nodes: list[NodeID] | None = Field(
- default=None,
- description="Used in group-nodes. Node IDs of those connected to the output",
- alias="outputNodes",
- )
+ outputs: Annotated[
+ OutputsDict | None,
+ Field(default_factory=dict, description="values of output properties"),
+ ] = DEFAULT_FACTORY
- parent: NodeID | None = Field(
- default=None,
- description="Parent's (group-nodes') node ID s. Used to group",
- )
+ output_node: Annotated[
+ bool | None, Field(deprecated=True, alias="outputNode")
+ ] = None
- position: Position | None = Field(
- default=None,
- deprecated=True,
- description="Use projects_ui.WorkbenchUI.position instead",
- )
+ output_nodes: Annotated[
+ list[NodeID] | None,
+ Field(
+ description="Used in group-nodes. Node IDs of those connected to the output",
+ alias="outputNodes",
+ ),
+ ] = None
- state: NodeState | None = Field(
- default_factory=NodeState, description="The node's state object"
- )
+ parent: Annotated[
+ NodeID | None,
+ Field(
+ description="Parent's (group-nodes') node ID s. Used to group",
+ ),
+ ] = None
- boot_options: dict[EnvVarKey, str] | None = Field(
- default=None,
- alias="bootOptions",
- description=(
- "Some services provide alternative parameters to be injected at boot time. "
- "The user selection should be stored here, and it will overwrite the "
- "services's defaults."
+ position: Annotated[
+ Position | None,
+ Field(
+ deprecated=True,
+ description="Use projects_ui.WorkbenchUI.position instead",
),
- )
+ ] = None
+
+ state: Annotated[
+ NodeState | None,
+ Field(default_factory=NodeState, description="The node's state object"),
+ ] = DEFAULT_FACTORY
+
+ boot_options: Annotated[
+ dict[EnvVarKey, str] | None,
+ Field(
+ alias="bootOptions",
+ description=(
+ "Some services provide alternative parameters to be injected at boot time. "
+ "The user selection should be stored here, and it will overwrite the "
+ "services's defaults."
+ ),
+ ),
+ ] = None
@field_validator("thumbnail", mode="before")
@classmethod
- def convert_empty_str_to_none(cls, v):
+ def _convert_empty_str_to_none(cls, v):
if isinstance(v, str) and v == "":
return None
return v
@classmethod
- def convert_old_enum_name(cls, v) -> RunningState:
+ def _convert_old_enum_name(cls, v) -> RunningState:
if v == "FAILURE":
return RunningState.FAILED
return RunningState(v)
@field_validator("state", mode="before")
@classmethod
- def convert_from_enum(cls, v):
+ def _convert_from_enum(cls, v):
if isinstance(v, str):
# the old version of state was a enum of RunningState
- running_state_value = cls.convert_old_enum_name(v)
+ running_state_value = cls._convert_old_enum_name(v)
return NodeState(currentStatus=running_state_value)
return v
model_config = ConfigDict(
extra="forbid",
- json_schema_extra=_patch_json_schema_extra,
+ populate_by_name=True,
)
diff --git a/packages/models-library/src/models_library/rest_error.py b/packages/models-library/src/models_library/rest_error.py
index 3ba2475c9b3..ce3b5ef5d56 100644
--- a/packages/models-library/src/models_library/rest_error.py
+++ b/packages/models-library/src/models_library/rest_error.py
@@ -8,7 +8,7 @@
class Log(BaseModel):
- level: LogLevel | None = Field("INFO", description="log level")
+ level: Annotated[LogLevel | None, Field(description="log level")] = LogLevel.INFO
message: str = Field(
...,
description="log message. If logger is USER, then it MUST be human readable",
diff --git a/packages/models-library/src/models_library/service_settings_labels.py b/packages/models-library/src/models_library/service_settings_labels.py
index 7c57f3c1f19..830065785de 100644
--- a/packages/models-library/src/models_library/service_settings_labels.py
+++ b/packages/models-library/src/models_library/service_settings_labels.py
@@ -19,6 +19,7 @@
field_validator,
model_validator,
)
+from pydantic.config import JsonDict
from .callbacks_mapping import CallbacksMapping
from .generics import ListModel
@@ -332,11 +333,14 @@ class DynamicSidecarServiceLabels(BaseModel):
description="allow complete internet access to containers in here",
)
- callbacks_mapping: Json[CallbacksMapping] | None = Field(
- default_factory=CallbacksMapping, # type: ignore[arg-type] # this one ANE I am not sure about
- alias="simcore.service.callbacks-mapping",
- description="exposes callbacks from user services to the sidecar",
- )
+ callbacks_mapping: Annotated[
+ Json[CallbacksMapping] | None,
+ Field(
+ default_factory=CallbacksMapping, # NOTE: PC->ANE I still think this could be an issue
+ alias="simcore.service.callbacks-mapping",
+ description="exposes callbacks from user services to the sidecar",
+ ),
+ ]
@cached_property
def needs_dynamic_sidecar(self) -> bool:
@@ -487,58 +491,28 @@ def _not_allowed_in_both_specs(self):
model_config = _BaseConfig
-class SimcoreServiceLabels(DynamicSidecarServiceLabels):
- """
- Validate all the simcores.services.* labels on a service.
-
- When no other fields expect `settings` are present
- the service will be started as legacy by director-v0.
-
- If `paths_mapping` is present the service will be started
- via dynamic-sidecar by director-v2.
-
- When starting via dynamic-sidecar, if `compose_spec` is
- present, also `container_http_entry` must be present.
- When both of these fields are missing a docker-compose
- spec will be generated before starting the service.
- """
-
- settings: Json[SimcoreServiceSettingsLabel] = Field(
- default_factory=dict,
- alias="simcore.service.settings",
- description=(
- "Json encoded. Contains setting like environment variables and "
- "resource constraints which are required by the service. "
- "Should be compatible with Docker REST API."
- ),
- )
+def _update_json_schema_extra(schema: JsonDict) -> None:
+ #
+ # NOTE: this will be automatically called with SimcoreServiceLabels.model_json_schema
+ #
- model_config = _BaseConfig | ConfigDict(
- extra="allow",
- json_schema_extra={
+ schema.update(
+ {
"examples": [
# WARNING: do not change order. Used in tests!
# legacy service
{
"simcore.service.settings": json_dumps(
- SimcoreServiceSettingLabelEntry.model_config[
- "json_schema_extra"
- ][
- "examples"
- ] # type: ignore[index]
+ SimcoreServiceSettingLabelEntry.model_json_schema()["examples"]
)
},
# dynamic-service
{
"simcore.service.settings": json_dumps(
- SimcoreServiceSettingLabelEntry.model_config[
- "json_schema_extra"
- ][
- "examples"
- ] # type: ignore[index]
+ SimcoreServiceSettingLabelEntry.model_json_schema()["examples"]
),
"simcore.service.paths-mapping": json_dumps(
- PathMappingsLabel.model_config["json_schema_extra"]["examples"][0] # type: ignore [index]
+ PathMappingsLabel.model_json_schema()["examples"][0]
),
"simcore.service.restart-policy": RestartPolicy.NO_RESTART.value,
"simcore.service.callbacks-mapping": json_dumps(
@@ -557,14 +531,10 @@ class SimcoreServiceLabels(DynamicSidecarServiceLabels):
# dynamic-service with compose spec
{
"simcore.service.settings": json_dumps(
- SimcoreServiceSettingLabelEntry.model_config[
- "json_schema_extra"
- ][
- "examples"
- ] # type: ignore[index]
+ SimcoreServiceSettingLabelEntry.model_json_schema()["examples"]
),
"simcore.service.paths-mapping": json_dumps(
- PathMappingsLabel.model_config["json_schema_extra"]["examples"][0], # type: ignore[index]
+ PathMappingsLabel.model_json_schema()["examples"][0],
),
"simcore.service.compose-spec": json_dumps(
{
@@ -592,9 +562,44 @@ class SimcoreServiceLabels(DynamicSidecarServiceLabels):
"simcore.service.container-http-entrypoint": "rt-web",
"simcore.service.restart-policy": RestartPolicy.ON_INPUTS_DOWNLOADED.value,
"simcore.service.callbacks-mapping": json_dumps(
- CallbacksMapping.model_config["json_schema_extra"]["examples"][3] # type: ignore [index]
+ CallbacksMapping.model_json_schema()["examples"][3]
),
},
]
},
)
+
+
+class SimcoreServiceLabels(DynamicSidecarServiceLabels):
+ """
+ Validate all the simcores.services.* labels on a service.
+
+ When no other fields expect `settings` are present
+ the service will be started as legacy by director-v0.
+
+ If `paths_mapping` is present the service will be started
+ via dynamic-sidecar by director-v2.
+
+ When starting via dynamic-sidecar, if `compose_spec` is
+ present, also `container_http_entry` must be present.
+ When both of these fields are missing a docker-compose
+ spec will be generated before starting the service.
+ """
+
+ settings: Annotated[
+ Json[SimcoreServiceSettingsLabel],
+ Field(
+ default_factory=dict,
+ alias="simcore.service.settings",
+ description=(
+ "Json encoded. Contains setting like environment variables and "
+ "resource constraints which are required by the service. "
+ "Should be compatible with Docker REST API."
+ ),
+ ),
+ ]
+
+ model_config = _BaseConfig | ConfigDict(
+ extra="allow",
+ json_schema_extra=_update_json_schema_extra,
+ )
diff --git a/packages/models-library/src/models_library/services_metadata_editable.py b/packages/models-library/src/models_library/services_metadata_editable.py
index c4436583503..be0a67bb336 100644
--- a/packages/models-library/src/models_library/services_metadata_editable.py
+++ b/packages/models-library/src/models_library/services_metadata_editable.py
@@ -33,9 +33,9 @@ class ServiceMetaDataEditable(ServiceBaseDisplay):
"If now>=deprecated, the service is retired",
)
classifiers: list[str] | None
- quality: dict[str, Any] = Field(
- default_factory=dict, json_schema_extra={"default": {}}
- )
+ quality: Annotated[
+ dict[str, Any], Field(default_factory=dict, json_schema_extra={"default": {}})
+ ]
model_config = ConfigDict(
json_schema_extra={
diff --git a/packages/models-library/src/models_library/user_preferences.py b/packages/models-library/src/models_library/user_preferences.py
index f16c934b2da..0d912c1af19 100644
--- a/packages/models-library/src/models_library/user_preferences.py
+++ b/packages/models-library/src/models_library/user_preferences.py
@@ -4,6 +4,7 @@
from common_library.pydantic_fields_extension import get_type
from pydantic import BaseModel, Field
from pydantic._internal._model_construction import ModelMetaclass
+from pydantic.fields import FieldInfo
from .services import ServiceKey, ServiceVersion
from .utils.enums import StrAutoEnum
@@ -74,10 +75,12 @@ def get_preference_name(cls) -> PreferenceName:
@classmethod
def get_default_value(cls) -> Any:
+ value_field: FieldInfo = dict(cls.model_fields)["value"]
+
return (
- cls.model_fields["value"].default_factory()
- if cls.model_fields["value"].default_factory
- else cls.model_fields["value"].default
+ value_field.default_factory() # type: ignore[call-arg]
+ if callable(value_field.default_factory)
+ else value_field.default
)
diff --git a/packages/models-library/tests/test_app_diagnostics.py b/packages/models-library/tests/test_app_diagnostics.py
new file mode 100644
index 00000000000..0e5f5864eae
--- /dev/null
+++ b/packages/models-library/tests/test_app_diagnostics.py
@@ -0,0 +1,18 @@
+from models_library.app_diagnostics import AppStatusCheck
+
+
+def test_annotated_defaults_and_default_factories():
+
+ model = AppStatusCheck(app_name="foo", version="1.2.3")
+ assert model.app_name == "foo"
+ assert model.version == "1.2.3"
+
+ # checks default_factory
+ assert model.services == {}
+ assert model.sessions == {}
+
+ # checks default inside Annotated[, Field(default=None, ...)]
+ assert model.url is None
+
+ # checks default outside Annotated
+ assert model.diagnostics_url is None
diff --git a/packages/models-library/tests/test_service_settings_labels.py b/packages/models-library/tests/test_service_settings_labels.py
index 775facf96a4..119c5cfb2aa 100644
--- a/packages/models-library/tests/test_service_settings_labels.py
+++ b/packages/models-library/tests/test_service_settings_labels.py
@@ -43,17 +43,17 @@ class _Parametrization(NamedTuple):
SIMCORE_SERVICE_EXAMPLES = {
"legacy": _Parametrization(
- example=SimcoreServiceLabels.model_config["json_schema_extra"]["examples"][0],
+ example=SimcoreServiceLabels.model_json_schema()["examples"][0],
items=1,
uses_dynamic_sidecar=False,
),
"dynamic-service": _Parametrization(
- example=SimcoreServiceLabels.model_config["json_schema_extra"]["examples"][1],
+ example=SimcoreServiceLabels.model_json_schema()["examples"][1],
items=5,
uses_dynamic_sidecar=True,
),
"dynamic-service-with-compose-spec": _Parametrization(
- example=SimcoreServiceLabels.model_config["json_schema_extra"]["examples"][2],
+ example=SimcoreServiceLabels.model_json_schema()["examples"][2],
items=6,
uses_dynamic_sidecar=True,
),
@@ -104,7 +104,7 @@ def test_correctly_detect_dynamic_sidecar_boot(
def test_raises_error_if_http_entrypoint_is_missing():
simcore_service_labels: dict[str, Any] = deepcopy(
- SimcoreServiceLabels.model_config["json_schema_extra"]["examples"][2]
+ SimcoreServiceLabels.model_json_schema()["examples"][2]
)
del simcore_service_labels["simcore.service.container-http-entrypoint"]
@@ -133,7 +133,7 @@ def test_path_mappings_json_encoding():
def test_simcore_services_labels_compose_spec_null_container_http_entry_provided():
sample_data: dict[str, Any] = deepcopy(
- SimcoreServiceLabels.model_config["json_schema_extra"]["examples"][2]
+ SimcoreServiceLabels.model_json_schema()["examples"][2]
)
assert sample_data["simcore.service.container-http-entrypoint"]
@@ -145,7 +145,7 @@ def test_simcore_services_labels_compose_spec_null_container_http_entry_provided
def test_raises_error_wrong_restart_policy():
simcore_service_labels: dict[str, Any] = deepcopy(
- SimcoreServiceLabels.model_config["json_schema_extra"]["examples"][2]
+ SimcoreServiceLabels.model_json_schema()["examples"][2]
)
simcore_service_labels["simcore.service.restart-policy"] = "__not_a_valid_policy__"
diff --git a/packages/notifications-library/requirements/_base.txt b/packages/notifications-library/requirements/_base.txt
index 560e3e1e3b6..11add4a57b1 100644
--- a/packages/notifications-library/requirements/_base.txt
+++ b/packages/notifications-library/requirements/_base.txt
@@ -102,7 +102,7 @@ packaging==24.2
# via opentelemetry-instrumentation
psycopg2-binary==2.9.9
# via sqlalchemy
-pydantic==2.9.2
+pydantic==2.10.3
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -121,7 +121,7 @@ pydantic==2.9.2
# -r requirements/../../../packages/settings-library/requirements/_base.in
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
diff --git a/packages/postgres-database/requirements/_base.txt b/packages/postgres-database/requirements/_base.txt
index 4eddd14e0e4..e14ea9e8ab2 100644
--- a/packages/postgres-database/requirements/_base.txt
+++ b/packages/postgres-database/requirements/_base.txt
@@ -43,14 +43,14 @@ orjson==3.10.11
# -r requirements/../../../packages/common-library/requirements/_base.in
psycopg2-binary==2.9.9
# via sqlalchemy
-pydantic==2.9.2
+pydantic==2.10.3
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../requirements/constraints.txt
# -r requirements/../../../packages/common-library/requirements/_base.in
# -r requirements/_base.in
# pydantic-extra-types
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.10.0
# via -r requirements/../../../packages/common-library/requirements/_base.in
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_calls_capture_parameters.py b/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_calls_capture_parameters.py
index c2e722b6697..3e44b62c3b8 100644
--- a/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_calls_capture_parameters.py
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_calls_capture_parameters.py
@@ -16,9 +16,7 @@ class CapturedParameterSchema(BaseModel):
allOf: list["CapturedParameterSchema"] | None = None
oneOf: list["CapturedParameterSchema"] | None = None
- class Config:
- validate_always = True
- allow_population_by_field_name = True
+ model_config = ConfigDict(validate_default=True, populate_by_name=True)
@field_validator("type_", mode="before")
@classmethod
diff --git a/packages/pytest-simcore/src/pytest_simcore/pydantic_models.py b/packages/pytest-simcore/src/pytest_simcore/pydantic_models.py
index e9a7c318a9c..cfe0a62b7d0 100644
--- a/packages/pytest-simcore/src/pytest_simcore/pydantic_models.py
+++ b/packages/pytest-simcore/src/pytest_simcore/pydantic_models.py
@@ -2,6 +2,7 @@
import importlib
import inspect
import itertools
+import json
import pkgutil
from collections.abc import Iterator
from contextlib import suppress
@@ -9,7 +10,7 @@
from typing import Any, NamedTuple
import pytest
-from pydantic import BaseModel
+from pydantic import BaseModel, ValidationError
def is_strict_inner(outer_cls: type, inner_cls: type) -> bool:
@@ -93,27 +94,36 @@ def _is_model_cls(obj) -> bool:
assert inspect.ismodule(module)
for model_name, model_cls in inspect.getmembers(module, _is_model_cls):
- assert model_name # nosec
- if (
- (model_config := model_cls.model_config)
- and isinstance(model_config, dict)
- and (json_schema_extra := model_config.get("json_schema_extra", {}))
- and isinstance(json_schema_extra, dict)
- ):
- if "example" in json_schema_extra:
+
+ schema = model_cls.model_json_schema()
+
+ if example := schema.get("example"):
+ yield ModelExample(
+ model_cls=model_cls,
+ example_name=f"{model_name}_example",
+ example_data=example,
+ )
+
+ if many_examples := schema.get("examples"):
+ for index, example in enumerate(many_examples):
yield ModelExample(
model_cls=model_cls,
- example_name="example",
- example_data=json_schema_extra["example"],
+ example_name=f"{model_name}_examples_{index}",
+ example_data=example,
)
- elif "examples" in json_schema_extra:
- for index, example in enumerate(json_schema_extra["examples"]):
- yield ModelExample(
- model_cls=model_cls,
- example_name=f"examples_{index}",
- example_data=example,
- )
+
+def assert_validation_model(
+ model_cls: type[BaseModel], example_name: int, example_data: Any
+):
+ try:
+ assert model_cls.model_validate(example_data) is not None
+ except ValidationError as err:
+ pytest.fail(
+ f"{example_name} is invalid {model_cls.__module__}.{model_cls.__name__}:"
+ f"\n{json.dumps(example_data, indent=1)}"
+ f"\nError: {err}"
+ )
## PYDANTIC MODELS & SCHEMAS -----------------------------------------------------
@@ -132,10 +142,10 @@ def model_cls_examples(model_cls: type[BaseModel]) -> dict[str, dict[str, Any]]:
"SEE https://pydantic-docs.helpmanual.io/usage/schema/#schema-customization"
)
- json_schema_extra: dict = model_cls.model_config.get("json_schema_extra", {})
+ json_schema: dict = model_cls.model_json_schema()
# checks exampleS setup in schema_extra
- examples_list = copy.deepcopy(json_schema_extra.get("examples", []))
+ examples_list = copy.deepcopy(json_schema.get("examples", []))
assert isinstance(examples_list, list), (
"OpenAPI and json-schema differ regarding the format for exampleS."
"The former is a dict and the latter an array. "
@@ -149,7 +159,7 @@ def model_cls_examples(model_cls: type[BaseModel]) -> dict[str, dict[str, Any]]:
f"{model_cls.__name__}.example[{index}]": example_
for index, example_ in enumerate(examples_list)
}
- if example := copy.deepcopy(json_schema_extra.get("example")):
+ if example := copy.deepcopy(json_schema.get("example")):
examples[f"{model_cls.__name__}.example"] = example
return examples
diff --git a/packages/service-integration/requirements/_base.txt b/packages/service-integration/requirements/_base.txt
index 810abab5e48..9df00b1eb49 100644
--- a/packages/service-integration/requirements/_base.txt
+++ b/packages/service-integration/requirements/_base.txt
@@ -79,7 +79,7 @@ packaging==24.1
# via pytest
pluggy==1.5.0
# via pytest
-pydantic==2.9.2
+pydantic==2.10.3
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -90,7 +90,7 @@ pydantic==2.9.2
# -r requirements/../../../packages/models-library/requirements/_base.in
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
diff --git a/packages/service-integration/src/service_integration/oci_image_spec.py b/packages/service-integration/src/service_integration/oci_image_spec.py
index 3b9e45b46ab..3b3ea7ffe8a 100644
--- a/packages/service-integration/src/service_integration/oci_image_spec.py
+++ b/packages/service-integration/src/service_integration/oci_image_spec.py
@@ -7,7 +7,7 @@
import os
from datetime import datetime
-from typing import Any
+from typing import Annotated, Any
from models_library.basic_types import SHA1Str, VersionStr
from models_library.utils.labels_annotations import from_labels, to_labels
@@ -43,62 +43,89 @@ def _underscore_as_dot(field_name: str):
class OciImageSpecAnnotations(BaseModel):
# TODO: review and polish constraints
- created: datetime = Field(
- None,
- description="date and time on which the image was built (string, date-time as defined by RFC 3339)",
- )
-
- authors: str = Field(
- None,
- description="contact details of the people or organization responsible for the image (freeform string)",
- )
-
- url: AnyUrl = Field(
- None, description="URL to find more information on the image (string)"
- )
-
- documentation: AnyUrl = Field(
- None, description="URL to get documentation on the image (string)"
- )
-
- source: AnyUrl = Field(
- None, description="URL to get source code for building the image (string)"
- )
-
- version: VersionStr = Field(
- None,
- description="version of the packaged software"
- "The version MAY match a label or tag in the source code repository"
- "version MAY be Semantic versioning-compatible",
- )
- revision: str = Field(
- None,
- description="Source control revision identifier for the packaged software.",
- )
-
- vendor: str = Field(
- None, description="Name of the distributing entity, organization or individual."
- )
+ created: Annotated[
+ datetime | None,
+ Field(
+ description="date and time on which the image was built (string, date-time as defined by RFC 3339)",
+ ),
+ ] = None
+
+ authors: Annotated[
+ str | None,
+ Field(
+ description="contact details of the people or organization responsible for the image (freeform string)",
+ ),
+ ] = None
+
+ url: Annotated[
+ AnyUrl | None,
+ Field(None, description="URL to find more information on the image (string)"),
+ ] = None
+
+ documentation: Annotated[
+ AnyUrl | None,
+ Field(None, description="URL to get documentation on the image (string)"),
+ ] = None
+
+ source: Annotated[
+ AnyUrl | None,
+ Field(
+ None, description="URL to get source code for building the image (string)"
+ ),
+ ] = None
+
+ version: Annotated[
+ VersionStr | None,
+ Field(
+ description="version of the packaged software"
+ "The version MAY match a label or tag in the source code repository"
+ "version MAY be Semantic versioning-compatible",
+ ),
+ ] = None
+ revision: Annotated[
+ str | None,
+ Field(
+ description="Source control revision identifier for the packaged software.",
+ ),
+ ] = None
+ vendor: Annotated[
+ str | None,
+ Field(
+ description="Name of the distributing entity, organization or individual."
+ ),
+ ] = None
# SEE https://spdx.dev/spdx-specification-21-web-version/#h.jxpfx0ykyb60
- licenses: str = Field(
- "MIT",
- description="License(s) under which contained software is distributed as an SPDX License Expression.",
- )
- ref_name: str = Field(
- None,
- description="Name of the reference for a target (string).",
- )
+ licenses: Annotated[
+ str,
+ Field(
+ description="License(s) under which contained software is distributed as an SPDX License Expression.",
+ ),
+ ] = "MIT"
+
+ ref_name: Annotated[
+ str | None,
+ Field(
+ description="Name of the reference for a target (string).",
+ ),
+ ] = None
+
+ title: Annotated[
+ str | None, Field(description="Human-readable title of the image (string)")
+ ] = None
+ description: Annotated[
+ str | None,
+ Field(
+ description="Human-readable description of the software packaged in the image (string)",
+ ),
+ ] = None
+ base_digest: Annotated[
+ SHA1Str | None,
+ Field(
+ description="Digest of the image this image is based on (string)",
+ ),
+ ] = None
- title: str = Field(None, description="Human-readable title of the image (string)")
- description: str = Field(
- None,
- description="Human-readable description of the software packaged in the image (string)",
- )
- base_digest: SHA1Str = Field(
- None,
- description="Digest of the image this image is based on (string)",
- )
model_config = ConfigDict(
alias_generator=_underscore_as_dot, populate_by_name=True, extra="forbid"
)
@@ -123,7 +150,7 @@ class LabelSchemaAnnotations(BaseModel):
NOTE: DEPRECATED IN FAVOUR OF OCI IMAGE SPEC
"""
- schema_version: VersionStr = Field("1.0.0", alias="schema-version")
+ schema_version: Annotated[VersionStr, Field(alias="schema-version")] = "1.0.0"
build_date: datetime
vcs_ref: str
diff --git a/packages/service-integration/src/service_integration/osparc_config.py b/packages/service-integration/src/service_integration/osparc_config.py
index 9382b98b447..dbc97b1c8e5 100644
--- a/packages/service-integration/src/service_integration/osparc_config.py
+++ b/packages/service-integration/src/service_integration/osparc_config.py
@@ -14,8 +14,9 @@
import logging
from pathlib import Path
-from typing import Any, Final, Literal
+from typing import Annotated, Any, Final, Literal
+from common_library.basic_types import DEFAULT_FACTORY
from models_library.basic_types import SHA256Str
from models_library.callbacks_mapping import CallbacksMapping
from models_library.service_settings_labels import (
@@ -215,7 +216,10 @@ class RuntimeConfig(BaseModel):
restart_policy: RestartPolicy = RestartPolicy.NO_RESTART
- callbacks_mapping: CallbacksMapping | None = Field(default_factory=dict)
+ callbacks_mapping: Annotated[
+ CallbacksMapping | None, Field(default_factory=dict)
+ ] = DEFAULT_FACTORY
+
paths_mapping: PathMappingsLabel | None = None
user_preferences_path: Path | None = None
@@ -226,11 +230,13 @@ class RuntimeConfig(BaseModel):
containers_allowed_outgoing_internet: set[str] | None = None
- settings: list[SettingsItem] = Field(default_factory=list)
+ settings: Annotated[
+ list[SettingsItem], Field(default_factory=list)
+ ] = DEFAULT_FACTORY
@model_validator(mode="before")
@classmethod
- def ensure_compatibility(cls, v):
+ def _ensure_compatibility(cls, v):
# NOTE: if changes are applied to `DynamicSidecarServiceLabels`
# these are also validated when ooil runs.
try:
diff --git a/packages/service-library/requirements/_base.txt b/packages/service-library/requirements/_base.txt
index d4a84148c40..e65bfe29d46 100644
--- a/packages/service-library/requirements/_base.txt
+++ b/packages/service-library/requirements/_base.txt
@@ -63,7 +63,7 @@ email-validator==2.2.0
# via pydantic
fast-depends==2.4.12
# via faststream
-faststream==0.5.28
+faststream==0.5.31
# via -r requirements/_base.in
frozenlist==1.4.1
# via
@@ -160,7 +160,7 @@ protobuf==4.25.5
# opentelemetry-proto
psutil==6.0.0
# via -r requirements/_base.in
-pydantic==2.9.2
+pydantic==2.10.3
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -177,7 +177,7 @@ pydantic==2.9.2
# fast-depends
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
diff --git a/packages/service-library/requirements/_fastapi.txt b/packages/service-library/requirements/_fastapi.txt
index b93f8be5cc4..692bf022c6b 100644
--- a/packages/service-library/requirements/_fastapi.txt
+++ b/packages/service-library/requirements/_fastapi.txt
@@ -81,7 +81,7 @@ prometheus-client==0.21.0
# prometheus-fastapi-instrumentator
prometheus-fastapi-instrumentator==6.1.0
# via -r requirements/_fastapi.in
-pydantic==2.9.2
+pydantic==2.10.3
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -90,7 +90,7 @@ pydantic==2.9.2
# -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../requirements/constraints.txt
# fastapi
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
setuptools==75.1.0
# via opentelemetry-instrumentation
diff --git a/packages/settings-library/requirements/_base.txt b/packages/settings-library/requirements/_base.txt
index 2e155733652..eeeaa8f12c2 100644
--- a/packages/settings-library/requirements/_base.txt
+++ b/packages/settings-library/requirements/_base.txt
@@ -11,7 +11,7 @@ orjson==3.10.10
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../requirements/constraints.txt
# -r requirements/../../../packages/common-library/requirements/_base.in
-pydantic==2.9.2
+pydantic==2.10.3
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../requirements/constraints.txt
@@ -19,7 +19,7 @@ pydantic==2.9.2
# -r requirements/_base.in
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via -r requirements/../../../packages/common-library/requirements/_base.in
diff --git a/packages/settings-library/src/settings_library/aws_s3_cli.py b/packages/settings-library/src/settings_library/aws_s3_cli.py
index 7fd3b271b10..68ae01d91eb 100644
--- a/packages/settings-library/src/settings_library/aws_s3_cli.py
+++ b/packages/settings-library/src/settings_library/aws_s3_cli.py
@@ -1,3 +1,5 @@
+from typing import Annotated
+
from pydantic import Field
from .base import BaseCustomSettings
@@ -5,7 +7,10 @@
class AwsS3CliSettings(BaseCustomSettings):
- AWS_S3_CLI_S3: S3Settings = Field(
- default=None,
- description="These settings intentionally do not use auto_default_from_env=True because we might want to turn them off if RClone is enabled.",
- )
+ AWS_S3_CLI_S3: Annotated[
+ S3Settings,
+ Field(
+ description="These settings intentionally do not use auto_default_from_env=True "
+ "because we might want to turn them off if RClone is enabled.",
+ ),
+ ]
diff --git a/packages/settings-library/src/settings_library/base.py b/packages/settings-library/src/settings_library/base.py
index b5bc05f2725..9ab3119dfc7 100644
--- a/packages/settings-library/src/settings_library/base.py
+++ b/packages/settings-library/src/settings_library/base.py
@@ -91,7 +91,7 @@ def prepare_field_value(
and field.default is None
and prepared_value == _MARKED_AS_UNSET
):
- prepared_value = field.default_factory()
+ prepared_value = field.default_factory() # type: ignore[call-arg]
return prepared_value
diff --git a/packages/settings-library/src/settings_library/utils_cli.py b/packages/settings-library/src/settings_library/utils_cli.py
index 8c28dfe9cb5..106b1d6fb74 100644
--- a/packages/settings-library/src/settings_library/utils_cli.py
+++ b/packages/settings-library/src/settings_library/utils_cli.py
@@ -1,4 +1,3 @@
-import json
import logging
import os
from collections.abc import Callable
@@ -44,7 +43,7 @@ def print_as_envfile(
if isinstance(value, BaseSettings):
if compact:
- value = json.dumps(
+ value = json_dumps(
model_dump_with_secrets(
value, show_secrets=show_secrets, **pydantic_export_options
)
@@ -70,16 +69,15 @@ def print_as_envfile(
typer.echo(f"{name}={value}")
-def print_as_json(
+def _print_as_json(
settings_obj,
*,
compact: bool = False,
show_secrets: bool,
- json_serializer,
**pydantic_export_options,
):
typer.echo(
- json_serializer(
+ json_dumps(
model_dump_with_secrets(
settings_obj, show_secrets=show_secrets, **pydantic_export_options
),
@@ -91,7 +89,6 @@ def print_as_json(
def create_settings_command(
settings_cls: type[BaseCustomSettings],
logger: logging.Logger | None = None,
- json_serializer=json_dumps,
) -> Callable:
"""Creates typer command function for settings"""
@@ -117,7 +114,7 @@ def settings(
if as_json_schema:
typer.echo(
- json.dumps(
+ json_dumps(
settings_cls.model_json_schema(),
default=to_jsonable_python,
indent=0 if compact else 2,
@@ -129,7 +126,7 @@ def settings(
settings_obj = settings_cls.create_from_envs()
except ValidationError as err:
- settings_schema = json.dumps(
+ settings_schema = json_dumps(
settings_cls.model_json_schema(),
default=to_jsonable_python,
indent=2,
@@ -162,11 +159,10 @@ def settings(
pydantic_export_options: dict[str, Any] = {"exclude_unset": exclude_unset}
if as_json:
- print_as_json(
+ _print_as_json(
settings_obj,
compact=compact,
show_secrets=show_secrets,
- json_serializer=json_serializer,
**pydantic_export_options,
)
else:
diff --git a/packages/settings-library/tests/test_base.py b/packages/settings-library/tests/test_base.py
index 879bf5868be..d4ebd987760 100644
--- a/packages/settings-library/tests/test_base.py
+++ b/packages/settings-library/tests/test_base.py
@@ -8,6 +8,7 @@
from collections.abc import Callable
from typing import Any
+import pydantic
import pytest
import settings_library.base
from pydantic import BaseModel, ValidationError
@@ -23,6 +24,8 @@
)
from settings_library.email import SMTPSettings
+pydantic_version = ".".join(pydantic.__version__.split(".")[:2])
+
S2 = json.dumps({"S_VALUE": 2})
S3 = json.dumps({"S_VALUE": 3})
@@ -180,7 +183,7 @@ def test_create_settings_class_without_environ_fails(
"loc": ("S_VALUE",),
"msg": "Field required",
"type": "missing",
- "url": "https://errors.pydantic.dev/2.9/v/missing",
+ "url": f"https://errors.pydantic.dev/{pydantic_version}/v/missing",
}
@@ -310,7 +313,7 @@ class SettingsClassExt(SettingsClass):
"loc": ("INT_VALUE_TO_NOTHING",),
"msg": "Input should be a valid integer, unable to parse string as an integer",
"type": "int_parsing",
- "url": "https://errors.pydantic.dev/2.9/v/int_parsing",
+ "url": f"https://errors.pydantic.dev/{pydantic_version}/v/int_parsing",
}
diff --git a/packages/settings-library/tests/test_base_w_postgres.py b/packages/settings-library/tests/test_base_w_postgres.py
index b1d4958378f..37329a4e9bb 100644
--- a/packages/settings-library/tests/test_base_w_postgres.py
+++ b/packages/settings-library/tests/test_base_w_postgres.py
@@ -7,11 +7,14 @@
from collections.abc import Callable
import pytest
-from pydantic import AliasChoices, Field, ValidationError
+from pydantic import AliasChoices, Field, ValidationError, __version__
from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_envfile
from settings_library.base import BaseCustomSettings, DefaultFromEnvFactoryError
from settings_library.basic_types import PortInt
+pydantic_vtag = ".".join(__version__.split(".")[:2])
+
+
#
# NOTE: Pydantic models are returned by function-scoped fixture such that every
# test starts with a fresh Model class (notice that pydanctic classes involve meta-operations
@@ -174,7 +177,7 @@ def test_parse_from_individual_envs(
"loc": ("WEBSERVER_POSTGRES",),
"msg": "Field required",
"type": "missing",
- "url": "https://errors.pydantic.dev/2.9/v/missing",
+ "url": f"https://errors.pydantic.dev/{pydantic_vtag}/v/missing",
}
s2 = S2()
diff --git a/packages/settings-library/tests/test_utils_cli.py b/packages/settings-library/tests/test_utils_cli.py
index b3a0c10d899..49c684ea626 100644
--- a/packages/settings-library/tests/test_utils_cli.py
+++ b/packages/settings-library/tests/test_utils_cli.py
@@ -11,16 +11,16 @@
import pytest
import typer
from dotenv import dotenv_values
-from pydantic import Field, SecretStr
+from pydantic import AnyHttpUrl, Field, SecretStr
from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_envfile
from pytest_simcore.helpers.typing_env import EnvVarsDict
from settings_library.base import BaseCustomSettings
from settings_library.utils_cli import (
+ _print_as_json,
create_settings_command,
create_version_callback,
model_dump_with_secrets,
print_as_envfile,
- print_as_json,
)
from typer.testing import CliRunner
@@ -243,8 +243,8 @@ def test_cli_compact_settings_envs(
assert mocked_envs_2 == {
"APP_HOST": "localhost",
"APP_PORT": "80",
- "APP_OPTIONAL_ADDON": '{"MODULE_VALUE": 10, "MODULE_VALUE_DEFAULT": 42}',
- "APP_REQUIRED_PLUGIN": '{"POSTGRES_HOST": "localhost", "POSTGRES_PORT": 5432, "POSTGRES_USER": "foo", "POSTGRES_PASSWORD": "secret", "POSTGRES_DB": "foodb", "POSTGRES_MINSIZE": 1, "POSTGRES_MAXSIZE": 50, "POSTGRES_CLIENT_NAME": null}',
+ "APP_OPTIONAL_ADDON": '{"MODULE_VALUE":10,"MODULE_VALUE_DEFAULT":42}',
+ "APP_REQUIRED_PLUGIN": '{"POSTGRES_HOST":"localhost","POSTGRES_PORT":5432,"POSTGRES_USER":"foo","POSTGRES_PASSWORD":"secret","POSTGRES_DB":"foodb","POSTGRES_MINSIZE":1,"POSTGRES_MAXSIZE":50,"POSTGRES_CLIENT_NAME":null}',
}
settings_2 = fake_settings_class()
@@ -416,8 +416,9 @@ def test_print_as(capsys: pytest.CaptureFixture):
class FakeSettings(BaseCustomSettings):
INTEGER: int = Field(..., description="Some info")
SECRET: SecretStr
+ URL: AnyHttpUrl
- settings_obj = FakeSettings(INTEGER=1, SECRET="secret") # type: ignore
+ settings_obj = FakeSettings(INTEGER=1, SECRET="secret", URL="http://google.com") # type: ignore
print_as_envfile(settings_obj, compact=True, verbose=True, show_secrets=True)
captured = capsys.readouterr()
@@ -434,9 +435,7 @@ class FakeSettings(BaseCustomSettings):
assert "secret" not in captured.out
assert "Some info" not in captured.out
- print_as_json(
- settings_obj, compact=True, show_secrets=False, json_serializer=json.dumps
- )
+ _print_as_json(settings_obj, compact=True, show_secrets=False)
captured = capsys.readouterr()
assert "secret" not in captured.out
assert "**" in captured.out
diff --git a/packages/simcore-sdk/requirements/_base.txt b/packages/simcore-sdk/requirements/_base.txt
index d2fa58f9494..541037d54fe 100644
--- a/packages/simcore-sdk/requirements/_base.txt
+++ b/packages/simcore-sdk/requirements/_base.txt
@@ -97,7 +97,7 @@ email-validator==2.2.0
# via pydantic
fast-depends==2.4.12
# via faststream
-faststream==0.5.28
+faststream==0.5.31
# via -r requirements/../../../packages/service-library/requirements/_base.in
flexcache==0.3
# via pint
@@ -256,7 +256,7 @@ psycopg2-binary==2.9.9
# via
# aiopg
# sqlalchemy
-pydantic==2.9.2
+pydantic==2.10.3
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -289,7 +289,7 @@ pydantic==2.9.2
# fast-depends
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
@@ -352,7 +352,6 @@ redis==5.0.4
# -r requirements/../../../packages/service-library/requirements/_base.in
referencing==0.29.3
# via
- # -c requirements/../../../packages/service-library/requirements/./constraints.txt
# jsonschema
# jsonschema-specifications
repro-zipfile==0.3.1
@@ -450,6 +449,7 @@ wrapt==1.16.0
yarl==1.12.1
# via
# -r requirements/../../../packages/postgres-database/requirements/_base.in
+ # -r requirements/../../../packages/service-library/requirements/_base.in
# aio-pika
# aiohttp
# aiormq
diff --git a/packages/simcore-sdk/requirements/_test.txt b/packages/simcore-sdk/requirements/_test.txt
index 3065f3672cb..dc4e1888bdd 100644
--- a/packages/simcore-sdk/requirements/_test.txt
+++ b/packages/simcore-sdk/requirements/_test.txt
@@ -206,12 +206,12 @@ py-partiql-parser==0.5.6
# via moto
pycparser==2.22
# via cffi
-pydantic==2.9.2
+pydantic==2.10.3
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
# aws-sam-translator
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via
# -c requirements/_base.txt
# pydantic
diff --git a/requirements/constraints.txt b/requirements/constraints.txt
index 3e40b2694d4..575fda3e462 100644
--- a/requirements/constraints.txt
+++ b/requirements/constraints.txt
@@ -18,7 +18,6 @@ mako>=1.2.2 # https://github.com/advisories/GH
orjson>=3.9.15 # https://github.com/advisories/GHSA-pwr2-4v36-6qpr
paramiko>=2.10.1 # https://github.com/advisories/GHSA-f8q4-jwww-x3wv
py>=1.11.0 # https://github.com/advisories/GHSA-w596-4wvx-j9j6 / CVE-2022-42969
-pydantic>=1.8.2 # https://github.com/advisories/GHSA-5jqp-qgf6-3pvh
pyyaml>=5.4 # https://github.com/advisories/GHSA-8q59-q68h-6hv4
redis>=4.5.4 # https://github.com/advisories/GHSA-24wv-mv5m-xv4h
rsa>=4.1 # https://github.com/advisories/GHSA-537h-rv9q-vvph
@@ -53,6 +52,7 @@ pytest-asyncio<0.24
#
+pydantic>=2.10.0 # Avoids inter-version compatibility serialization errors as: _pickle.UnpicklingError: NEWOBJ class argument must be a type, not _AnnotatedAlias
# constraint since https://github.com/MagicStack/uvloop/releases/tag/v0.15.0: drops support for 3.5/3.6 Feb.2021
uvloop<0.15.0 ; python_version < '3.7'
diff --git a/scripts/openapi-pydantic-models-generator.bash b/scripts/openapi-pydantic-models-generator.bash
index 88e071a5273..d5170d5b0c9 100755
--- a/scripts/openapi-pydantic-models-generator.bash
+++ b/scripts/openapi-pydantic-models-generator.bash
@@ -20,22 +20,23 @@ FROM python:${PYTHON_VERSION}-slim
COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
RUN uv pip install --system datamodel-code-generator[http] && uv pip list
ENTRYPOINT ["datamodel-codegen", \
- "--output-model-type=pydantic_v2.BaseModel", \
- "--input-file-type=jsonschema", \
- "--snake-case-field", \
- "--use-standard-collections", \
- "--use-union-operator", \
- "--use-schema-description", \
+ "--use-non-positive-negative-number-constrained-types", \
"--allow-population-by-field-name", \
- "--use-subclass-enum", \
- "--use-double-quotes", \
"--field-constraints", \
- "--use-non-positive-negative-number-constrained-types", \
+ "--input-file-type=jsonschema", \
+ "--output-model-type=pydantic_v2.BaseModel", \
"--reuse-model", \
"--set-default-enum-member", \
- "--use-title-as-name", \
+ "--snake-case-field", \
"--target-python-version=${PYTHON_VERSION%.*}", \
+ "--use-annotated", \
"--use-default-kwarg", \
+ "--use-double-quotes", \
+ "--use-schema-description", \
+ "--use-standard-collections", \
+ "--use-subclass-enum", \
+ "--use-title-as-name", \
+ "--use-union-operator", \
"--validation"]
EOF
}
diff --git a/services/agent/requirements/_base.txt b/services/agent/requirements/_base.txt
index f73ea222b5b..4fddb46bb50 100644
--- a/services/agent/requirements/_base.txt
+++ b/services/agent/requirements/_base.txt
@@ -92,7 +92,7 @@ fastapi==0.115.5
# via
# -r requirements/../../../packages/service-library/requirements/_fastapi.in
# -r requirements/_base.in
-faststream==0.5.30
+faststream==0.5.31
# via -r requirements/../../../packages/service-library/requirements/_base.in
frozenlist==1.5.0
# via
diff --git a/services/api-server/openapi.json b/services/api-server/openapi.json
index e87fda69545..883b7364473 100644
--- a/services/api-server/openapi.json
+++ b/services/api-server/openapi.json
@@ -5546,8 +5546,6 @@
"urls": {
"items": {
"type": "string",
- "maxLength": 65536,
- "minLength": 1,
"format": "uri"
},
"type": "array",
@@ -6161,8 +6159,6 @@
},
"download_link": {
"type": "string",
- "maxLength": 65536,
- "minLength": 1,
"format": "uri",
"title": "Download Link"
}
@@ -6203,14 +6199,14 @@
},
"docs_url": {
"type": "string",
- "maxLength": 65536,
+ "maxLength": 2083,
"minLength": 1,
"format": "uri",
"title": "Docs Url"
},
"docs_dev_url": {
"type": "string",
- "maxLength": 65536,
+ "maxLength": 2083,
"minLength": 1,
"format": "uri",
"title": "Docs Dev Url"
@@ -6473,7 +6469,6 @@
"enum": [
"TIER"
],
- "const": "TIER",
"title": "PricingPlanClassification"
},
"PricingUnitGet": {
diff --git a/services/api-server/requirements/_base.txt b/services/api-server/requirements/_base.txt
index 50bb56b4e69..bb064316789 100644
--- a/services/api-server/requirements/_base.txt
+++ b/services/api-server/requirements/_base.txt
@@ -191,7 +191,7 @@ fastapi-cli==0.0.5
# via fastapi
fastapi-pagination==0.12.31
# via -r requirements/_base.in
-faststream==0.5.28
+faststream==0.5.31
# via
# -r requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in
@@ -512,7 +512,7 @@ psycopg2-binary==2.9.9
# sqlalchemy
pycparser==2.22
# via cffi
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -575,7 +575,7 @@ pydantic==2.9.2
# fastapi-pagination
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
@@ -809,7 +809,7 @@ typer==0.12.3
# fastapi-cli
types-python-dateutil==2.9.0.20240316
# via arrow
-typing-extensions==4.10.0
+typing-extensions==4.12.2
# via
# aiodebug
# aiodocker
@@ -907,7 +907,9 @@ wrapt==1.16.0
yarl==1.9.4
# via
# -r requirements/../../../packages/postgres-database/requirements/_base.in
+ # -r requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in
+ # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in
# aio-pika
# aiohttp
# aiormq
diff --git a/services/api-server/requirements/_test.txt b/services/api-server/requirements/_test.txt
index 26f93fde738..e5254193173 100644
--- a/services/api-server/requirements/_test.txt
+++ b/services/api-server/requirements/_test.txt
@@ -320,7 +320,7 @@ types-boto3==1.0.2
# via -r requirements/_test.in
types-s3transfer==0.10.2
# via boto3-stubs
-typing-extensions==4.10.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# alembic
diff --git a/services/api-server/requirements/_tools.txt b/services/api-server/requirements/_tools.txt
index 8b1f4a36d83..795564deb10 100644
--- a/services/api-server/requirements/_tools.txt
+++ b/services/api-server/requirements/_tools.txt
@@ -93,7 +93,7 @@ setuptools==69.2.0
# pip-tools
tomlkit==0.13.2
# via pylint
-typing-extensions==4.10.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# -c requirements/_test.txt
diff --git a/services/api-server/src/simcore_service_api_server/api/routes/files.py b/services/api-server/src/simcore_service_api_server/api/routes/files.py
index c279f2ce303..219306f693b 100644
--- a/services/api-server/src/simcore_service_api_server/api/routes/files.py
+++ b/services/api-server/src/simcore_service_api_server/api/routes/files.py
@@ -261,7 +261,7 @@ async def get_upload_links(
abort_url: URL = request.url_for("abort_multipart_upload", file_id=file_meta.id)
upload_data: FileUploadData = FileUploadData(
chunk_size=upload_links.chunk_size,
- urls=upload_links.urls,
+ urls=upload_links.urls, # type: ignore[arg-type]
links=UploadLinks(
complete_upload=completion_url.path, abort_upload=abort_url.path
),
diff --git a/services/api-server/src/simcore_service_api_server/api/routes/solvers.py b/services/api-server/src/simcore_service_api_server/api/routes/solvers.py
index c172000bd9e..fca71667030 100644
--- a/services/api-server/src/simcore_service_api_server/api/routes/solvers.py
+++ b/services/api-server/src/simcore_service_api_server/api/routes/solvers.py
@@ -276,6 +276,9 @@ async def get_solver_pricing_plan(
):
assert user_id
assert product_name
- return await webserver_api.get_service_pricing_plan(
+ pricing_plan_or_none = await webserver_api.get_service_pricing_plan(
solver_key=solver_key, version=version
)
+ # NOTE: pricing_plan_or_none https://github.com/ITISFoundation/osparc-simcore/issues/6901
+ assert pricing_plan_or_none # nosec
+ return pricing_plan_or_none
diff --git a/services/api-server/src/simcore_service_api_server/core/settings.py b/services/api-server/src/simcore_service_api_server/core/settings.py
index 8c804df22be..6e33055084d 100644
--- a/services/api-server/src/simcore_service_api_server/core/settings.py
+++ b/services/api-server/src/simcore_service_api_server/core/settings.py
@@ -58,10 +58,15 @@ class BasicSettings(BaseCustomSettings, MixinLoggingSettings):
)
# LOGGING
- LOG_LEVEL: LogLevel = Field(
- default=LogLevel.INFO.value,
- validation_alias=AliasChoices("API_SERVER_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"),
- )
+ LOG_LEVEL: Annotated[
+ LogLevel,
+ Field(
+ validation_alias=AliasChoices(
+ "API_SERVER_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"
+ ),
+ ),
+ ] = LogLevel.INFO
+
API_SERVER_LOG_FORMAT_LOCAL_DEV_ENABLED: bool = Field(
default=False,
validation_alias=AliasChoices(
diff --git a/services/api-server/src/simcore_service_api_server/models/_utils_pydantic.py b/services/api-server/src/simcore_service_api_server/models/_utils_pydantic.py
index 150cac87b97..cb0d3352d94 100644
--- a/services/api-server/src/simcore_service_api_server/models/_utils_pydantic.py
+++ b/services/api-server/src/simcore_service_api_server/models/_utils_pydantic.py
@@ -1,6 +1,48 @@
+from copy import deepcopy
+
from common_library.json_serialization import json_dumps, json_loads
+from pydantic import GetJsonSchemaHandler
+from pydantic.json_schema import JsonSchemaValue
+from pydantic_core.core_schema import CoreSchema
class BaseConfig:
json_loads = json_loads
json_dumps = json_dumps
+
+
+class UriSchema:
+ """Metadata class to modify openapi schemas of Url fields
+
+ Usage:
+ class TestModel(BaseModel):
+ url: Annotated[HttpUrl, UriSchema()]
+
+
+ will produce a schema for url field property as a string with a format
+ {
+ "format": "uri",
+ "type": "string",
+ }
+
+ SEE https://datatracker.ietf.org/doc/html/draft-bhutton-json-schema-validation-00#section-7.3.5
+ """
+
+ @classmethod
+ def __get_pydantic_json_schema__(
+ cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler
+ ) -> JsonSchemaValue:
+ # SEE https://docs.pydantic.dev/2.10/concepts/json_schema/#implementing-__get_pydantic_json_schema__
+ json_schema = deepcopy(handler(core_schema))
+
+ if (schema := core_schema.get("schema", {})) and schema.get("type") == "url":
+ json_schema.update(
+ type="string",
+ format="uri",
+ )
+ if max_length := schema.get("max_length"):
+ # SEE https://docs.pydantic.dev/2.10/api/networks/#pydantic.networks.UrlConstraints
+ # adds limits if schema UrlConstraints includes it (e.g HttUrl includes )
+ json_schema.update(maxLength=max_length, minLength=1)
+
+ return json_schema
diff --git a/services/api-server/src/simcore_service_api_server/models/api_resources.py b/services/api-server/src/simcore_service_api_server/models/api_resources.py
index e8bf8bf2bc1..9a2221034ad 100644
--- a/services/api-server/src/simcore_service_api_server/models/api_resources.py
+++ b/services/api-server/src/simcore_service_api_server/models/api_resources.py
@@ -1,8 +1,8 @@
import re
import urllib.parse
-from typing import Annotated, Any, TypeAlias
+from typing import Annotated, TypeAlias
-from pydantic import BaseModel, Field, TypeAdapter
+from pydantic import Field, TypeAdapter
from pydantic.types import StringConstraints
# RESOURCE NAMES https://cloud.google.com/apis/design/resource_names
@@ -56,20 +56,3 @@ def compose_resource_name(*collection_or_resource_ids) -> RelativeResourceName:
def split_resource_name(resource_name: RelativeResourceName) -> list[str]:
quoted_parts = resource_name.split("/")
return [f"{urllib.parse.unquote_plus(p)}" for p in quoted_parts]
-
-
-#
-# For resource definitions, the first field should be a string field for the resource name,
-# and it should be called *name*
-# Resource IDs must be clearly documented whether they are assigned by the client, the server, or either
-#
-class BaseResource(BaseModel):
- name: RelativeResourceName = Field(None, examples=["solvers/isolve/releases/1.2.3"])
- id: Any = Field(None, description="Resource ID", examples=["1.2.3"]) # noqa: A003
-
-
-class BaseCollection(BaseModel):
- name: RelativeResourceName = Field(None, examples=["solvers/isolve/releases"])
- id: Any = Field(
- None, description="Collection ID", examples=["releases"]
- ) # noqa: A003
diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/files.py b/services/api-server/src/simcore_service_api_server/models/schemas/files.py
index f56fa0da669..1a188e87740 100644
--- a/services/api-server/src/simcore_service_api_server/models/schemas/files.py
+++ b/services/api-server/src/simcore_service_api_server/models/schemas/files.py
@@ -11,7 +11,7 @@
from models_library.basic_types import SHA256Str
from models_library.projects_nodes_io import StorageFileID
from pydantic import (
- AnyUrl,
+ AnyHttpUrl,
BaseModel,
ConfigDict,
Field,
@@ -23,6 +23,8 @@
)
from servicelib.file_utils import create_sha256_checksum
+from .._utils_pydantic import UriSchema
+
_NAMESPACE_FILEID_KEY = UUID("aa154444-d22d-4290-bb15-df37dba87865")
@@ -167,9 +169,7 @@ class UploadLinks(BaseModel):
class FileUploadData(BaseModel):
chunk_size: NonNegativeInt
- urls: list[
- Annotated[AnyUrl, StringConstraints(max_length=65536)]
- ] # maxlength added for backwards compatibility
+ urls: list[Annotated[AnyHttpUrl, UriSchema()]]
links: UploadLinks
diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/jobs.py b/services/api-server/src/simcore_service_api_server/models/schemas/jobs.py
index 2a7dc8c4f32..cdd01eacd35 100644
--- a/services/api-server/src/simcore_service_api_server/models/schemas/jobs.py
+++ b/services/api-server/src/simcore_service_api_server/models/schemas/jobs.py
@@ -26,6 +26,7 @@
from ...models.schemas.files import File
from ...models.schemas.solvers import Solver
+from .._utils_pydantic import UriSchema
from ..api_resources import (
RelativeResourceName,
compose_resource_name,
@@ -152,7 +153,9 @@ class JobMetadata(BaseModel):
metadata: dict[str, MetaValueType] = Field(..., description="Custom key-value map")
# Links
- url: HttpUrl | None = Field(..., description="Link to get this resource (self)")
+ url: Annotated[HttpUrl, UriSchema()] | None = Field(
+ ..., description="Link to get this resource (self)"
+ )
model_config = ConfigDict(
json_schema_extra={
@@ -198,11 +201,13 @@ class Job(BaseModel):
)
# Get links to other resources
- url: HttpUrl | None = Field(..., description="Link to get this resource (self)")
- runner_url: HttpUrl | None = Field(
+ url: Annotated[HttpUrl, UriSchema()] | None = Field(
+ ..., description="Link to get this resource (self)"
+ )
+ runner_url: Annotated[HttpUrl, UriSchema()] | None = Field(
..., description="Link to the solver's job (parent collection)"
)
- outputs_url: HttpUrl | None = Field(
+ outputs_url: Annotated[HttpUrl, UriSchema()] | None = Field(
..., description="Link to the job outputs (sub-collection)"
)
@@ -223,7 +228,7 @@ class Job(BaseModel):
@field_validator("name", mode="before")
@classmethod
- def check_name(cls, v, info: ValidationInfo):
+ def _check_name(cls, v, info: ValidationInfo):
_id = str(info.data["id"])
if not v.endswith(f"/{_id}"):
msg = f"Resource name [{v}] and id [{_id}] do not match"
diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/meta.py b/services/api-server/src/simcore_service_api_server/models/schemas/meta.py
index 6332d5ae1d5..9e195214ec0 100644
--- a/services/api-server/src/simcore_service_api_server/models/schemas/meta.py
+++ b/services/api-server/src/simcore_service_api_server/models/schemas/meta.py
@@ -1,12 +1,13 @@
from typing import Annotated
from models_library.api_schemas__common.meta import BaseMeta
-from pydantic import AnyHttpUrl, ConfigDict, StringConstraints
+from pydantic import ConfigDict, HttpUrl
+from simcore_service_api_server.models._utils_pydantic import UriSchema
class Meta(BaseMeta):
- docs_url: Annotated[AnyHttpUrl, StringConstraints(max_length=65536)]
- docs_dev_url: Annotated[AnyHttpUrl, StringConstraints(max_length=65536)]
+ docs_url: Annotated[HttpUrl, UriSchema()]
+ docs_dev_url: Annotated[HttpUrl, UriSchema()]
model_config = ConfigDict(
json_schema_extra={
"example": {
diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/solvers.py b/services/api-server/src/simcore_service_api_server/models/schemas/solvers.py
index 8462efba68c..9db1e9696ad 100644
--- a/services/api-server/src/simcore_service_api_server/models/schemas/solvers.py
+++ b/services/api-server/src/simcore_service_api_server/models/schemas/solvers.py
@@ -7,6 +7,7 @@
from models_library.services_regex import COMPUTATIONAL_SERVICE_KEY_RE
from packaging.version import Version
from pydantic import BaseModel, ConfigDict, Field, HttpUrl, StringConstraints
+from simcore_service_api_server.models._utils_pydantic import UriSchema
from ..api_resources import compose_resource_name
from ..basic_types import VersionStr
@@ -52,7 +53,11 @@ class Solver(BaseModel):
# TODO: consider version_aliases: list[str] = [] # remaining tags
# Get links to other resources
- url: Annotated[HttpUrl | None, Field(..., description="Link to get this resource")]
+ url: Annotated[
+ Annotated[HttpUrl, UriSchema()] | None,
+ Field(..., description="Link to get this resource"),
+ ]
+
model_config = ConfigDict(
extra="ignore",
json_schema_extra={
diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/studies.py b/services/api-server/src/simcore_service_api_server/models/schemas/studies.py
index 6815fcc5216..1905477236f 100644
--- a/services/api-server/src/simcore_service_api_server/models/schemas/studies.py
+++ b/services/api-server/src/simcore_service_api_server/models/schemas/studies.py
@@ -1,14 +1,15 @@
from typing import Annotated, TypeAlias
from models_library import projects, projects_nodes_io
-from pydantic import AnyUrl, BaseModel, ConfigDict, Field, StringConstraints
+from pydantic import AnyHttpUrl, BaseModel, ConfigDict, Field
+from simcore_service_api_server.models._utils_pydantic import UriSchema
from .. import api_resources
from . import solvers
StudyID: TypeAlias = projects.ProjectID
NodeName: TypeAlias = str
-DownloadLink: TypeAlias = AnyUrl
+DownloadLink: TypeAlias = Annotated[AnyHttpUrl, UriSchema()]
class Study(BaseModel):
@@ -48,7 +49,7 @@ class StudyPort(solvers.SolverPort):
class LogLink(BaseModel):
node_name: NodeName
- download_link: Annotated[DownloadLink, StringConstraints(max_length=65536)]
+ download_link: DownloadLink
class JobLogsMap(BaseModel):
diff --git a/services/api-server/src/simcore_service_api_server/services/director_v2.py b/services/api-server/src/simcore_service_api_server/services/director_v2.py
index aaa946f10d4..45f42af73eb 100644
--- a/services/api-server/src/simcore_service_api_server/services/director_v2.py
+++ b/services/api-server/src/simcore_service_api_server/services/director_v2.py
@@ -6,15 +6,7 @@
from models_library.projects_nodes_io import NodeID
from models_library.projects_pipeline import ComputationTask
from models_library.projects_state import RunningState
-from pydantic import (
- AnyHttpUrl,
- AnyUrl,
- BaseModel,
- ConfigDict,
- Field,
- PositiveInt,
- TypeAdapter,
-)
+from pydantic import AnyHttpUrl, BaseModel, ConfigDict, Field, PositiveInt, TypeAdapter
from settings_library.tracing import TracingSettings
from starlette import status
@@ -61,7 +53,7 @@ def guess_progress(self) -> PercentageInt:
class TaskLogFileGet(BaseModel):
task_id: NodeID
- download_link: AnyUrl | None = Field(
+ download_link: AnyHttpUrl | None = Field(
None, description="Presigned link for log file or None if still not available"
)
diff --git a/services/api-server/src/simcore_service_api_server/services/solver_job_models_converters.py b/services/api-server/src/simcore_service_api_server/services/solver_job_models_converters.py
index a8988037f65..f95061c0ded 100644
--- a/services/api-server/src/simcore_service_api_server/services/solver_job_models_converters.py
+++ b/services/api-server/src/simcore_service_api_server/services/solver_job_models_converters.py
@@ -143,7 +143,7 @@ def create_new_project_for_job(
version=solver.version,
label=solver.title,
inputs=solver_inputs,
- inputsUnits={},
+ inputs_units={},
)
# Ensembles project model so it can be used as input for create_project
diff --git a/services/api-server/src/simcore_service_api_server/services/webserver.py b/services/api-server/src/simcore_service_api_server/services/webserver.py
index b5e1c29c106..ec828687b77 100644
--- a/services/api-server/src/simcore_service_api_server/services/webserver.py
+++ b/services/api-server/src/simcore_service_api_server/services/webserver.py
@@ -602,7 +602,13 @@ async def get_service_pricing_plan(
)
if pricing_plan_get:
return ServicePricingPlanGet.model_construct(
- **pricing_plan_get.model_dump(exclude={"is_active"})
+ pricing_plan_id=pricing_plan_get.pricing_plan_id,
+ display_name=pricing_plan_get.display_name,
+ description=pricing_plan_get.description,
+ classification=pricing_plan_get.classification,
+ created_at=pricing_plan_get.created_at,
+ pricing_plan_key=pricing_plan_get.pricing_plan_key,
+ pricing_units=pricing_plan_get.pricing_units,
)
return None
diff --git a/services/api-server/tests/test_utils_pydantic.py b/services/api-server/tests/test_utils_pydantic.py
new file mode 100644
index 00000000000..77136f73982
--- /dev/null
+++ b/services/api-server/tests/test_utils_pydantic.py
@@ -0,0 +1,214 @@
+# pylint: disable=redefined-outer-name
+# pylint: disable=unused-argument
+# pylint: disable=unused-variable
+# pylint: disable=too-many-arguments
+
+from typing import Annotated, Any
+
+import fastapi
+import pydantic
+import pytest
+from fastapi import FastAPI
+from pydantic import (
+ AnyHttpUrl,
+ AnyUrl,
+ BaseModel,
+ HttpUrl,
+ TypeAdapter,
+ ValidationError,
+)
+from simcore_service_api_server.models._utils_pydantic import UriSchema
+
+
+class _FakeModel(BaseModel):
+ urls0: list[HttpUrl]
+ urls1: list[Annotated[HttpUrl, UriSchema()]]
+
+ # with and w/o
+ url0: HttpUrl
+ url1: Annotated[HttpUrl, UriSchema()]
+
+ # # including None inside/outside annotated
+ url2: Annotated[HttpUrl, UriSchema()] | None
+ url3: Annotated[HttpUrl | None, UriSchema()]
+
+ # # mistake
+ int0: Annotated[int, UriSchema()]
+
+
+@pytest.fixture
+def pydantic_schema() -> dict[str, Any]:
+ return _FakeModel.model_json_schema()
+
+
+def test_pydantic_json_schema(pydantic_schema: dict[str, Any]):
+ assert pydantic_schema["properties"] == {
+ "int0": {"title": "Int0", "type": "integer"},
+ "url0": {
+ "format": "uri",
+ "maxLength": 2083,
+ "minLength": 1,
+ "title": "Url0",
+ "type": "string",
+ },
+ "url1": {
+ "format": "uri",
+ "maxLength": 2083,
+ "minLength": 1,
+ "title": "Url1",
+ "type": "string",
+ },
+ "url2": {
+ "anyOf": [
+ {"format": "uri", "maxLength": 2083, "minLength": 1, "type": "string"},
+ {"type": "null"},
+ ],
+ "title": "Url2",
+ },
+ "url3": {
+ "anyOf": [
+ {"format": "uri", "maxLength": 2083, "minLength": 1, "type": "string"},
+ {"type": "null"},
+ ],
+ "title": "Url3",
+ },
+ "urls0": {
+ "items": {
+ "format": "uri",
+ "maxLength": 2083,
+ "minLength": 1,
+ "type": "string",
+ },
+ "title": "Urls0",
+ "type": "array",
+ },
+ "urls1": {
+ "items": {
+ "format": "uri",
+ "maxLength": 2083,
+ "minLength": 1,
+ "type": "string",
+ },
+ "title": "Urls1",
+ "type": "array",
+ },
+ }
+
+
+@pytest.fixture
+def fastapi_schema() -> dict[str, Any]:
+ app = FastAPI()
+
+ @app.get("/", response_model=_FakeModel)
+ def _h():
+ ...
+
+ openapi = app.openapi()
+ return openapi["components"]["schemas"][_FakeModel.__name__]
+
+
+def test_fastapi_openapi_component_schemas(fastapi_schema: dict[str, Any]):
+
+ assert fastapi_schema["properties"] == {
+ "int0": {"title": "Int0", "type": "integer"},
+ "url0": {"title": "Url0", "type": "string"},
+ "url1": {
+ "format": "uri",
+ "maxLength": 2083,
+ "minLength": 1,
+ "title": "Url1",
+ "type": "string",
+ },
+ "url2": {
+ "anyOf": [
+ {"format": "uri", "maxLength": 2083, "minLength": 1, "type": "string"},
+ {"type": "null"},
+ ],
+ "title": "Url2",
+ },
+ "url3": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Url3"},
+ "urls0": {"items": {"type": "string"}, "title": "Urls0", "type": "array"},
+ "urls1": {
+ "items": {
+ "format": "uri",
+ "maxLength": 2083,
+ "minLength": 1,
+ "type": "string",
+ },
+ "title": "Urls1",
+ "type": "array",
+ },
+ }
+
+
+@pytest.mark.xfail(
+ reason=f"{pydantic.__version__=} and {fastapi.__version__=} produce different json-schemas for the same model"
+)
+def test_compare_pydantic_vs_fastapi_schemas(
+ fastapi_schema: dict[str, Any], pydantic_schema: dict[str, Any]
+):
+
+ # NOTE @all: I cannot understand this?!
+ assert fastapi_schema["properties"] == pydantic_schema["properties"]
+
+
+def test_differences_between_new_pydantic_url_types():
+ # SEE https://docs.pydantic.dev/2.10/api/networks/
+
+ # | **URL** | **AnyUrl** | **AnyHttpUrl** | **HttpUrl** |
+ # |-------------------------------|-------------|-----------------|-----------------|
+ # | `http://example.com` | ✅ | ✅ | ✅ |
+ # | `https://example.com/resource`| ✅ | ✅ | ✅ |
+ # | `ftp://example.com` | ✅ | ❌ | ❌ |
+ # | `http://localhost` | ✅ | ✅ | ✅ |
+ # | `http://127.0.0.1` | ✅ | ✅ | ✅ |
+ # | `http://127.0.0.1:8080` | ✅ | ✅ | ✅ |
+ # | `customscheme://example.com` | ✅ | ❌ | ❌ |
+
+ url = "http://example.com"
+ TypeAdapter(AnyUrl).validate_python(url)
+ TypeAdapter(HttpUrl).validate_python(url)
+ TypeAdapter(AnyHttpUrl).validate_python(url)
+
+ url = "https://example.com/resource"
+ TypeAdapter(AnyUrl).validate_python(url)
+ TypeAdapter(HttpUrl).validate_python(url)
+ TypeAdapter(AnyHttpUrl).validate_python(url)
+
+ url = "ftp://example.com"
+ TypeAdapter(AnyUrl).validate_python(url)
+ with pytest.raises(ValidationError):
+ TypeAdapter(HttpUrl).validate_python(url)
+ with pytest.raises(ValidationError):
+ TypeAdapter(AnyHttpUrl).validate_python(url)
+
+ url = "http://localhost"
+ TypeAdapter(AnyUrl).validate_python(url)
+ TypeAdapter(HttpUrl).validate_python(url)
+ TypeAdapter(AnyHttpUrl).validate_python(url)
+
+ url = "http://127.0.0.1"
+ TypeAdapter(AnyUrl).validate_python(url)
+ TypeAdapter(HttpUrl).validate_python(url)
+ TypeAdapter(AnyHttpUrl).validate_python(url)
+
+ url = "http://127.0.0.1:8080"
+ TypeAdapter(AnyUrl).validate_python(url)
+ TypeAdapter(HttpUrl).validate_python(url)
+ TypeAdapter(AnyHttpUrl).validate_python(url)
+
+ url = "customscheme://example.com"
+ TypeAdapter(AnyUrl).validate_python(url)
+ with pytest.raises(ValidationError):
+ TypeAdapter(HttpUrl).validate_python(url)
+ with pytest.raises(ValidationError):
+ TypeAdapter(AnyHttpUrl).validate_python(url)
+
+ # examples taken from docker API
+ for url in (
+ "https://hub-mirror.corp.example.com:5000/",
+ "https://[2001:db8:a0b:12f0::1]/",
+ ):
+ TypeAdapter(AnyUrl).validate_python(url)
+ TypeAdapter(HttpUrl).validate_python(url)
+ TypeAdapter(AnyHttpUrl).validate_python(url)
diff --git a/services/api-server/tests/unit/test__models_examples.py b/services/api-server/tests/unit/test__models_examples.py
new file mode 100644
index 00000000000..225b4b01c95
--- /dev/null
+++ b/services/api-server/tests/unit/test__models_examples.py
@@ -0,0 +1,20 @@
+import json
+from itertools import chain
+from typing import Any
+
+import pytest
+import simcore_service_api_server.models.schemas
+from pydantic import BaseModel
+from pytest_simcore.pydantic_models import walk_model_examples_in_package
+
+
+@pytest.mark.parametrize(
+ "model_cls, example_name, example_data",
+ chain(walk_model_examples_in_package(simcore_service_api_server.models)),
+)
+def test_all_models_library_models_config_examples(
+ model_cls: type[BaseModel], example_name: int, example_data: Any
+):
+ assert model_cls.model_validate(
+ example_data
+ ), f"Failed {example_name} : {json.dumps(example_data)}"
diff --git a/services/autoscaling/requirements/_base.txt b/services/autoscaling/requirements/_base.txt
index 39676c07d5b..5b02f55cb96 100644
--- a/services/autoscaling/requirements/_base.txt
+++ b/services/autoscaling/requirements/_base.txt
@@ -162,7 +162,7 @@ fastapi==0.115.5
# -r requirements/../../../packages/service-library/requirements/_fastapi.in
# -r requirements/_base.in
# prometheus-fastapi-instrumentator
-faststream==0.5.28
+faststream==0.5.31
# via
# -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../packages/service-library/requirements/_base.in
@@ -436,7 +436,7 @@ psutil==6.0.0
# -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../packages/service-library/requirements/_base.in
# distributed
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -489,7 +489,7 @@ pydantic==2.9.2
# fastapi
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
@@ -592,8 +592,6 @@ redis==5.0.4
# -r requirements/../../../packages/service-library/requirements/_base.in
referencing==0.29.3
# via
- # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/./constraints.txt
- # -c requirements/../../../packages/service-library/requirements/./constraints.txt
# jsonschema
# jsonschema-specifications
repro-zipfile==0.3.1
@@ -700,7 +698,7 @@ types-awscrt==0.20.9
# via botocore-stubs
types-python-dateutil==2.9.0.20240316
# via arrow
-typing-extensions==4.11.0
+typing-extensions==4.12.2
# via
# aiodebug
# aiodocker
@@ -754,6 +752,8 @@ wrapt==1.16.0
# opentelemetry-instrumentation-redis
yarl==1.9.4
# via
+ # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in
+ # -r requirements/../../../packages/service-library/requirements/_base.in
# aio-pika
# aiohttp
# aiormq
diff --git a/services/autoscaling/requirements/_test.txt b/services/autoscaling/requirements/_test.txt
index e019e4f118b..b6342993557 100644
--- a/services/autoscaling/requirements/_test.txt
+++ b/services/autoscaling/requirements/_test.txt
@@ -185,12 +185,12 @@ py-partiql-parser==0.5.6
# via moto
pycparser==2.22
# via cffi
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
# aws-sam-translator
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via
# -c requirements/_base.txt
# pydantic
@@ -319,7 +319,7 @@ types-awscrt==0.20.9
# botocore-stubs
types-pyyaml==6.0.12.20240917
# via -r requirements/_test.in
-typing-extensions==4.11.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# aws-sam-translator
diff --git a/services/autoscaling/requirements/_tools.txt b/services/autoscaling/requirements/_tools.txt
index dec3b9c204d..94d944fd4d1 100644
--- a/services/autoscaling/requirements/_tools.txt
+++ b/services/autoscaling/requirements/_tools.txt
@@ -77,7 +77,7 @@ setuptools==74.0.0
# pip-tools
tomlkit==0.13.2
# via pylint
-typing-extensions==4.11.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# -c requirements/_test.txt
diff --git a/services/catalog/requirements/_base.txt b/services/catalog/requirements/_base.txt
index 7f61e93f32a..78fac9431bf 100644
--- a/services/catalog/requirements/_base.txt
+++ b/services/catalog/requirements/_base.txt
@@ -106,7 +106,7 @@ fastapi==0.115.5
# prometheus-fastapi-instrumentator
fastapi-cli==0.0.5
# via fastapi
-faststream==0.5.28
+faststream==0.5.31
# via -r requirements/../../../packages/service-library/requirements/_base.in
frozenlist==1.4.1
# via
@@ -325,7 +325,7 @@ psutil==6.0.0
# via -r requirements/../../../packages/service-library/requirements/_base.in
psycopg2-binary==2.9.9
# via sqlalchemy
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -359,7 +359,7 @@ pydantic==2.9.2
# fastapi
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
@@ -432,7 +432,6 @@ redis==5.0.4
# aiocache
referencing==0.29.3
# via
- # -c requirements/../../../packages/service-library/requirements/./constraints.txt
# jsonschema
# jsonschema-specifications
repro-zipfile==0.3.1
@@ -508,7 +507,7 @@ typer==0.12.3
# fastapi-cli
types-python-dateutil==2.9.0.20240316
# via arrow
-typing-extensions==4.10.0
+typing-extensions==4.12.2
# via
# aiodebug
# aiodocker
@@ -572,6 +571,7 @@ wrapt==1.16.0
yarl==1.9.4
# via
# -r requirements/../../../packages/postgres-database/requirements/_base.in
+ # -r requirements/../../../packages/service-library/requirements/_base.in
# aio-pika
# aiohttp
# aiormq
diff --git a/services/catalog/requirements/_test.txt b/services/catalog/requirements/_test.txt
index a379e35f4d5..77c2a58ebd1 100644
--- a/services/catalog/requirements/_test.txt
+++ b/services/catalog/requirements/_test.txt
@@ -179,7 +179,7 @@ types-psycopg2==2.9.21.20240819
# via -r requirements/_test.in
types-pyyaml==6.0.12.20240917
# via -r requirements/_test.in
-typing-extensions==4.10.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# alembic
diff --git a/services/catalog/requirements/_tools.txt b/services/catalog/requirements/_tools.txt
index 97a60860ebb..38140a80e67 100644
--- a/services/catalog/requirements/_tools.txt
+++ b/services/catalog/requirements/_tools.txt
@@ -77,7 +77,7 @@ setuptools==74.0.0
# pip-tools
tomlkit==0.13.2
# via pylint
-typing-extensions==4.10.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# -c requirements/_test.txt
diff --git a/services/catalog/src/simcore_service_catalog/core/settings.py b/services/catalog/src/simcore_service_catalog/core/settings.py
index dc49cbbf68e..9c36ff5ba43 100644
--- a/services/catalog/src/simcore_service_catalog/core/settings.py
+++ b/services/catalog/src/simcore_service_catalog/core/settings.py
@@ -1,6 +1,6 @@
import logging
from functools import cached_property
-from typing import Final
+from typing import Annotated, Final
from models_library.api_schemas_catalog.services_specifications import (
ServiceSpecifications,
@@ -44,12 +44,14 @@ def base_url(self) -> str:
class ApplicationSettings(BaseApplicationSettings, MixinLoggingSettings):
- LOG_LEVEL: LogLevel = Field(
- LogLevel.INFO.value,
- validation_alias=AliasChoices(
- "CATALOG_LOG_LEVEL", "CATALOG_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"
+ LOG_LEVEL: Annotated[
+ LogLevel,
+ Field(
+ validation_alias=AliasChoices(
+ "CATALOG_LOG_LEVEL", "CATALOG_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"
+ ),
),
- )
+ ] = LogLevel.INFO
CATALOG_LOG_FORMAT_LOCAL_DEV_ENABLED: bool = Field(
default=False,
validation_alias=AliasChoices(
diff --git a/services/catalog/src/simcore_service_catalog/models/services_db.py b/services/catalog/src/simcore_service_catalog/models/services_db.py
index 0412ba6878a..d8264b52df4 100644
--- a/services/catalog/src/simcore_service_catalog/models/services_db.py
+++ b/services/catalog/src/simcore_service_catalog/models/services_db.py
@@ -17,7 +17,8 @@ class ServiceMetaDataAtDB(ServiceKeyVersion, ServiceMetaDataEditable):
thumbnail: Annotated[str, HttpUrl] | None = None
description: str | None = None
- classifiers: list[str] | None = Field(default_factory=list)
+ classifiers: Annotated[list[str] | None, Field(default_factory=list)]
+
owner: PositiveInt | None = None
model_config = ConfigDict(
from_attributes=True,
diff --git a/services/clusters-keeper/requirements/_base.txt b/services/clusters-keeper/requirements/_base.txt
index c642e30aa64..15a9a8f8114 100644
--- a/services/clusters-keeper/requirements/_base.txt
+++ b/services/clusters-keeper/requirements/_base.txt
@@ -160,7 +160,7 @@ fastapi==0.115.5
# -r requirements/../../../packages/service-library/requirements/_fastapi.in
# -r requirements/_base.in
# prometheus-fastapi-instrumentator
-faststream==0.5.28
+faststream==0.5.31
# via
# -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../packages/service-library/requirements/_base.in
@@ -434,7 +434,7 @@ psutil==6.0.0
# -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../packages/service-library/requirements/_base.in
# distributed
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -487,7 +487,7 @@ pydantic==2.9.2
# fastapi
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
@@ -590,8 +590,6 @@ redis==5.0.4
# -r requirements/../../../packages/service-library/requirements/_base.in
referencing==0.29.3
# via
- # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/./constraints.txt
- # -c requirements/../../../packages/service-library/requirements/./constraints.txt
# jsonschema
# jsonschema-specifications
repro-zipfile==0.3.1
@@ -698,7 +696,7 @@ types-awscrt==0.20.9
# via botocore-stubs
types-python-dateutil==2.9.0.20240316
# via arrow
-typing-extensions==4.11.0
+typing-extensions==4.12.2
# via
# aiodebug
# aiodocker
@@ -752,6 +750,8 @@ wrapt==1.16.0
# opentelemetry-instrumentation-redis
yarl==1.9.4
# via
+ # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in
+ # -r requirements/../../../packages/service-library/requirements/_base.in
# aio-pika
# aiohttp
# aiormq
diff --git a/services/clusters-keeper/requirements/_test.txt b/services/clusters-keeper/requirements/_test.txt
index 4e297870fd4..0af083a1485 100644
--- a/services/clusters-keeper/requirements/_test.txt
+++ b/services/clusters-keeper/requirements/_test.txt
@@ -205,12 +205,12 @@ py-partiql-parser==0.5.6
# via moto
pycparser==2.22
# via cffi
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
# aws-sam-translator
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via
# -c requirements/_base.txt
# pydantic
@@ -309,7 +309,7 @@ sympy==1.13.3
# via cfn-lint
types-pyyaml==6.0.12.20240917
# via -r requirements/_test.in
-typing-extensions==4.11.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# aiodocker
diff --git a/services/clusters-keeper/requirements/_tools.txt b/services/clusters-keeper/requirements/_tools.txt
index dec3b9c204d..94d944fd4d1 100644
--- a/services/clusters-keeper/requirements/_tools.txt
+++ b/services/clusters-keeper/requirements/_tools.txt
@@ -77,7 +77,7 @@ setuptools==74.0.0
# pip-tools
tomlkit==0.13.2
# via pylint
-typing-extensions==4.11.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# -c requirements/_test.txt
diff --git a/services/dask-sidecar/requirements/_base.txt b/services/dask-sidecar/requirements/_base.txt
index 7cc0de4aa6d..15392d474dd 100644
--- a/services/dask-sidecar/requirements/_base.txt
+++ b/services/dask-sidecar/requirements/_base.txt
@@ -121,7 +121,7 @@ email-validator==2.1.1
# via pydantic
fast-depends==2.4.12
# via faststream
-faststream==0.5.28
+faststream==0.5.31
# via -r requirements/../../../packages/service-library/requirements/_base.in
frozenlist==1.4.1
# via
@@ -303,7 +303,7 @@ psutil==6.0.0
# via
# -r requirements/../../../packages/service-library/requirements/_base.in
# distributed
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -344,7 +344,7 @@ pydantic==2.9.2
# fast-depends
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
@@ -481,7 +481,7 @@ typer==0.12.3
# -r requirements/../../../packages/settings-library/requirements/_base.in
types-python-dateutil==2.9.0.20240316
# via arrow
-typing-extensions==4.11.0
+typing-extensions==4.12.2
# via
# aiodebug
# aiodocker
diff --git a/services/dask-sidecar/requirements/_test.txt b/services/dask-sidecar/requirements/_test.txt
index db273aba95a..25ef41fa488 100644
--- a/services/dask-sidecar/requirements/_test.txt
+++ b/services/dask-sidecar/requirements/_test.txt
@@ -145,12 +145,12 @@ py-partiql-parser==0.5.6
# via moto
pycparser==2.22
# via cffi
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
# aws-sam-translator
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via
# -c requirements/_base.txt
# pydantic
@@ -246,7 +246,7 @@ termcolor==2.4.0
# via pytest-sugar
types-aiofiles==24.1.0.20240626
# via -r requirements/_test.in
-typing-extensions==4.11.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# aws-sam-translator
diff --git a/services/dask-sidecar/requirements/_tools.txt b/services/dask-sidecar/requirements/_tools.txt
index dec3b9c204d..94d944fd4d1 100644
--- a/services/dask-sidecar/requirements/_tools.txt
+++ b/services/dask-sidecar/requirements/_tools.txt
@@ -77,7 +77,7 @@ setuptools==74.0.0
# pip-tools
tomlkit==0.13.2
# via pylint
-typing-extensions==4.11.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# -c requirements/_test.txt
diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/settings.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/settings.py
index 2c3d49ee685..b77811fd57f 100644
--- a/services/dask-sidecar/src/simcore_service_dask_sidecar/settings.py
+++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/settings.py
@@ -16,12 +16,11 @@ class Settings(BaseCustomSettings, MixinLoggingSettings):
LOG_LEVEL: Annotated[
LogLevel,
Field(
- LogLevel.INFO.value,
validation_alias=AliasChoices(
"DASK_SIDECAR_LOGLEVEL", "SIDECAR_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"
),
),
- ]
+ ] = LogLevel.INFO
# sidecar config ---
diff --git a/services/datcore-adapter/requirements/_base.txt b/services/datcore-adapter/requirements/_base.txt
index 476901f832b..fca17558d12 100644
--- a/services/datcore-adapter/requirements/_base.txt
+++ b/services/datcore-adapter/requirements/_base.txt
@@ -100,7 +100,7 @@ fastapi==0.115.5
# prometheus-fastapi-instrumentator
fastapi-pagination==0.12.31
# via -r requirements/_base.in
-faststream==0.5.28
+faststream==0.5.31
# via -r requirements/../../../packages/service-library/requirements/_base.in
frozenlist==1.4.1
# via
@@ -269,7 +269,7 @@ protobuf==4.25.4
# opentelemetry-proto
psutil==6.0.0
# via -r requirements/../../../packages/service-library/requirements/_base.in
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -300,7 +300,7 @@ pydantic==2.9.2
# fastapi-pagination
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
@@ -365,7 +365,6 @@ redis==5.0.4
# -r requirements/../../../packages/service-library/requirements/_base.in
referencing==0.29.3
# via
- # -c requirements/../../../packages/service-library/requirements/./constraints.txt
# jsonschema
# jsonschema-specifications
repro-zipfile==0.3.1
@@ -420,7 +419,7 @@ typer==0.12.3
# -r requirements/../../../packages/settings-library/requirements/_base.in
types-python-dateutil==2.9.0.20240316
# via arrow
-typing-extensions==4.10.0
+typing-extensions==4.12.2
# via
# aiodebug
# aiodocker
@@ -464,6 +463,7 @@ wrapt==1.16.0
# opentelemetry-instrumentation-redis
yarl==1.9.4
# via
+ # -r requirements/../../../packages/service-library/requirements/_base.in
# aio-pika
# aiohttp
# aiormq
diff --git a/services/datcore-adapter/requirements/_test.txt b/services/datcore-adapter/requirements/_test.txt
index 3ab05285f93..2e103070279 100644
--- a/services/datcore-adapter/requirements/_test.txt
+++ b/services/datcore-adapter/requirements/_test.txt
@@ -118,7 +118,7 @@ types-botocore==1.0.2
# via -r requirements/_test.in
types-s3transfer==0.10.2
# via boto3-stubs
-typing-extensions==4.10.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# boto3-stubs
diff --git a/services/datcore-adapter/requirements/_tools.txt b/services/datcore-adapter/requirements/_tools.txt
index 190dca86c7e..51dcbdba67d 100644
--- a/services/datcore-adapter/requirements/_tools.txt
+++ b/services/datcore-adapter/requirements/_tools.txt
@@ -73,7 +73,7 @@ setuptools==74.0.0
# pip-tools
tomlkit==0.13.2
# via pylint
-typing-extensions==4.10.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# -c requirements/_test.txt
diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/core/settings.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/core/settings.py
index cc7261e73be..d6d8a1924f3 100644
--- a/services/datcore-adapter/src/simcore_service_datcore_adapter/core/settings.py
+++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/core/settings.py
@@ -1,4 +1,5 @@
from functools import cached_property
+from typing import Annotated
from models_library.basic_types import BootModeEnum, LogLevel
from pydantic import AliasChoices, Field, TypeAdapter, field_validator
@@ -21,15 +22,17 @@ class PennsieveSettings(BaseCustomSettings):
class ApplicationSettings(BaseApplicationSettings, MixinLoggingSettings):
- LOG_LEVEL: LogLevel = Field(
- default=LogLevel.INFO.value,
- validation_alias=AliasChoices(
- "DATCORE_ADAPTER_LOGLEVEL",
- "DATCORE_ADAPTER_LOG_LEVEL",
- "LOG_LEVEL",
- "LOGLEVEL",
+ LOG_LEVEL: Annotated[
+ LogLevel,
+ Field(
+ validation_alias=AliasChoices(
+ "DATCORE_ADAPTER_LOGLEVEL",
+ "DATCORE_ADAPTER_LOG_LEVEL",
+ "LOG_LEVEL",
+ "LOGLEVEL",
+ ),
),
- )
+ ] = LogLevel.INFO
PENNSIEVE: PennsieveSettings = Field(
json_schema_extra={"auto_default_from_env": True}
diff --git a/services/director-v2/requirements/_base.txt b/services/director-v2/requirements/_base.txt
index e7bfdb265fc..20ff744f93c 100644
--- a/services/director-v2/requirements/_base.txt
+++ b/services/director-v2/requirements/_base.txt
@@ -193,7 +193,7 @@ fastapi==0.115.5
# prometheus-fastapi-instrumentator
fastapi-cli==0.0.5
# via fastapi
-faststream==0.5.28
+faststream==0.5.31
# via
# -r requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in
@@ -577,7 +577,7 @@ psycopg2-binary==2.9.9
# via
# aiopg
# sqlalchemy
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -651,7 +651,7 @@ pydantic==2.9.2
# fastapi
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.10.0
# via
@@ -945,7 +945,7 @@ typer==0.12.3
# fastapi-cli
types-python-dateutil==2.9.0.20240316
# via arrow
-typing-extensions==4.11.0
+typing-extensions==4.12.2
# via
# aiodebug
# aiodocker
diff --git a/services/director-v2/requirements/_test.txt b/services/director-v2/requirements/_test.txt
index 6f4b07aeac9..6194851e0c2 100644
--- a/services/director-v2/requirements/_test.txt
+++ b/services/director-v2/requirements/_test.txt
@@ -300,7 +300,7 @@ types-psycopg2==2.9.21.20240819
# via -r requirements/_test.in
types-pyyaml==6.0.12.20240917
# via -r requirements/_test.in
-typing-extensions==4.11.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# alembic
diff --git a/services/director-v2/requirements/_tools.txt b/services/director-v2/requirements/_tools.txt
index 8f148883b9c..9588b0ee960 100644
--- a/services/director-v2/requirements/_tools.txt
+++ b/services/director-v2/requirements/_tools.txt
@@ -79,7 +79,7 @@ setuptools==74.0.0
# pip-tools
tomlkit==0.13.2
# via pylint
-typing-extensions==4.11.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# -c requirements/_test.txt
diff --git a/services/director-v2/src/simcore_service_director_v2/core/settings.py b/services/director-v2/src/simcore_service_director_v2/core/settings.py
index 61e23e9f018..6b9c64de4a3 100644
--- a/services/director-v2/src/simcore_service_director_v2/core/settings.py
+++ b/services/director-v2/src/simcore_service_director_v2/core/settings.py
@@ -122,10 +122,15 @@ def _empty_auth_is_none(cls, v):
class AppSettings(BaseApplicationSettings, MixinLoggingSettings):
- LOG_LEVEL: LogLevel = Field(
- LogLevel.INFO.value,
- validation_alias=AliasChoices("DIRECTOR_V2_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"),
- )
+ LOG_LEVEL: Annotated[
+ LogLevel,
+ Field(
+ validation_alias=AliasChoices(
+ "DIRECTOR_V2_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"
+ ),
+ ),
+ ] = LogLevel.INFO
+
DIRECTOR_V2_LOG_FORMAT_LOCAL_DEV_ENABLED: bool = Field(
default=False,
validation_alias=AliasChoices(
diff --git a/services/director-v2/src/simcore_service_director_v2/models/comp_tasks.py b/services/director-v2/src/simcore_service_director_v2/models/comp_tasks.py
index e45e7aea896..4bf9e1e30af 100644
--- a/services/director-v2/src/simcore_service_director_v2/models/comp_tasks.py
+++ b/services/director-v2/src/simcore_service_director_v2/models/comp_tasks.py
@@ -1,6 +1,6 @@
import datetime as dt
from contextlib import suppress
-from typing import Any
+from typing import Annotated, Any
from dask_task_models_library.container_tasks.protocol import ContainerEnvsDict
from models_library.api_schemas_directorv2.services import NodeRequirements
@@ -119,9 +119,10 @@ class CompTaskAtDB(BaseModel):
job_id: str | None = Field(default=None, description="The worker job ID")
node_schema: NodeSchema = Field(..., alias="schema")
inputs: InputsDict | None = Field(..., description="the inputs payload")
- outputs: OutputsDict | None = Field(
- default_factory=dict, description="the outputs payload"
- )
+ outputs: Annotated[
+ OutputsDict | None,
+ Field(default_factory=dict, description="the outputs payload"),
+ ]
run_hash: str | None = Field(
default=None,
description="the hex digest of the resolved inputs +outputs hash at the time when the last outputs were generated",
diff --git a/services/director-v2/src/simcore_service_director_v2/models/dynamic_services_scheduler.py b/services/director-v2/src/simcore_service_director_v2/models/dynamic_services_scheduler.py
index 5e68dba227d..7e9b3ebeac6 100644
--- a/services/director-v2/src/simcore_service_director_v2/models/dynamic_services_scheduler.py
+++ b/services/director-v2/src/simcore_service_director_v2/models/dynamic_services_scheduler.py
@@ -403,7 +403,7 @@ def endpoint(self) -> AnyHttpUrl:
paths_mapping: PathMappingsLabel # overwrites in DynamicSidecarServiceLabels
user_preferences_path: Path | None = None
- callbacks_mapping: CallbacksMapping = Field(default_factory=dict)
+ callbacks_mapping: Annotated[CallbacksMapping, Field(default_factory=dict)]
dynamic_sidecar_network_name: str = Field(
...,
@@ -438,9 +438,7 @@ def endpoint(self) -> AnyHttpUrl:
...,
description="used as label to filter out the metrics from the cAdvisor prometheus metrics",
)
- proxy_service_name: str = Field(
- default=None, description="service name given to the proxy"
- )
+ proxy_service_name: str = Field(description="service name given to the proxy")
proxy_admin_api_port: PortInt | None = Field(
default=None, description="used as the admin endpoint API port"
)
@@ -466,11 +464,13 @@ def get_proxy_endpoint(self) -> AnyHttpUrl:
)
return url
- product_name: str = Field(
- None,
- description="Current product upon which this service is scheduled. "
- "If set to None, the current product is undefined. Mostly for backwards compatibility",
- )
+ product_name: Annotated[
+ str | None,
+ Field(
+ description="Current product upon which this service is scheduled"
+ "If set to None, the current product is undefined. Mostly for backwards compatibility",
+ ),
+ ] = None
@classmethod
def from_http_request(
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/proxy.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/proxy.py
index b946e71d4ea..07b1adbb08b 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/proxy.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/proxy.py
@@ -35,6 +35,9 @@ def get_dynamic_proxy_spec(
The proxy is used to create network isolation
from the rest of the platform.
"""
+ assert (
+ scheduler_data.product_name is not None
+ ), "ONLY for legacy. This function should not be called with product_name==None" # nosec
proxy_settings: DynamicSidecarProxySettings = (
dynamic_services_settings.DYNAMIC_SIDECAR_PROXY_SETTINGS
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/sidecar.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/sidecar.py
index b6885bae7b3..4ee83bee16f 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/sidecar.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/sidecar.py
@@ -447,6 +447,10 @@ async def get_dynamic_sidecar_spec( # pylint:disable=too-many-arguments# noqa:
dynamic_sidecar_settings=dynamic_sidecar_settings, app_settings=app_settings
)
+ assert (
+ scheduler_data.product_name is not None
+ ), "ONLY for legacy. This function should not be called with product_name==None" # nosec
+
standard_simcore_docker_labels: dict[
DockerLabelKey, str
] = StandardSimcoreDockerLabels(
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_event_create_sidecars.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_event_create_sidecars.py
index abf052f4e12..c6a4cba08f3 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_event_create_sidecars.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_event_create_sidecars.py
@@ -199,7 +199,9 @@ async def action(cls, app: FastAPI, scheduler_data: SchedulerData) -> None:
groups_extra_properties = get_repository(app, GroupsExtraPropertiesRepository)
- assert scheduler_data.product_name is not None # nosec
+ assert (
+ scheduler_data.product_name is not None
+ ), "ONLY for legacy. This function should not be called with product_name==None" # nosec
user_extra_properties = await groups_extra_properties.get_user_extra_properties(
user_id=scheduler_data.user_id, product_name=scheduler_data.product_name
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_events_user_services.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_events_user_services.py
index f8416b4809b..64563278d4c 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_events_user_services.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_events_user_services.py
@@ -73,7 +73,9 @@ async def submit_compose_sepc(app: FastAPI, scheduler_data: SchedulerData) -> No
)
groups_extra_properties = get_repository(app, GroupsExtraPropertiesRepository)
- assert scheduler_data.product_name is not None # nosec
+ assert (
+ scheduler_data.product_name is not None # nosec
+ ), "ONLY for legacy. This function should not be called with product_name==None"
allow_internet_access: bool = await groups_extra_properties.has_internet_access(
user_id=scheduler_data.user_id, product_name=scheduler_data.product_name
)
@@ -117,6 +119,10 @@ async def submit_compose_sepc(app: FastAPI, scheduler_data: SchedulerData) -> No
async def create_user_services( # pylint: disable=too-many-statements
app: FastAPI, scheduler_data: SchedulerData
) -> None:
+ assert (
+ scheduler_data.product_name is not None # nosec
+ ), "ONLY for legacy. This function should not be called with product_name==None"
+
dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings = (
app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER
)
@@ -168,7 +174,9 @@ async def progress_create_containers(
project_name=project_name,
node_name=node_name,
service_key=scheduler_data.key,
- service_version=TypeAdapter(ServiceVersion).validate_python(scheduler_data.version),
+ service_version=TypeAdapter(ServiceVersion).validate_python(
+ scheduler_data.version
+ ),
service_resources=scheduler_data.service_resources,
service_additional_metadata={},
)
diff --git a/services/director-v2/tests/unit/conftest.py b/services/director-v2/tests/unit/conftest.py
index b305f6bcafd..7f4bb33b47c 100644
--- a/services/director-v2/tests/unit/conftest.py
+++ b/services/director-v2/tests/unit/conftest.py
@@ -51,9 +51,9 @@ def simcore_services_network_name() -> str:
@pytest.fixture
def simcore_service_labels() -> SimcoreServiceLabels:
- simcore_service_labels = SimcoreServiceLabels.model_validate(
- SimcoreServiceLabels.model_config["json_schema_extra"]["examples"][1]
- )
+ example = SimcoreServiceLabels.model_json_schema()["examples"][1]
+
+ simcore_service_labels = SimcoreServiceLabels.model_validate(example)
simcore_service_labels.callbacks_mapping = CallbacksMapping.model_validate({})
return simcore_service_labels
diff --git a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_compose_specs.py b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_compose_specs.py
index 340c7ad3e44..e449cba3fc1 100644
--- a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_compose_specs.py
+++ b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_compose_specs.py
@@ -6,7 +6,6 @@
from typing import Any
from uuid import uuid4
-from pydantic import TypeAdapter
import pytest
import yaml
from models_library.docker import to_simcore_runtime_docker_label_key
@@ -22,6 +21,7 @@
ServiceResourcesDict,
)
from models_library.users import UserID
+from pydantic import TypeAdapter
from servicelib.resources import CPU_RESOURCE_LIMIT_KEY, MEM_RESOURCE_LIMIT_KEY
from simcore_service_director_v2.modules.dynamic_sidecar import docker_compose_specs
@@ -154,7 +154,7 @@ async def test_inject_resource_limits_and_reservations(
[
pytest.param(
json.loads(
- SimcoreServiceLabels.model_config["json_schema_extra"]["examples"][2][
+ SimcoreServiceLabels.model_json_schema()["examples"][2][
"simcore.service.compose-spec"
]
),
@@ -198,7 +198,9 @@ def test_regression_service_has_no_reservations():
"version": "3.7",
"services": {DEFAULT_SINGLE_SERVICE_NAME: {}},
}
- service_resources: ServiceResourcesDict = TypeAdapter(ServiceResourcesDict).validate_python({})
+ service_resources: ServiceResourcesDict = TypeAdapter(
+ ServiceResourcesDict
+ ).validate_python({})
spec_before = deepcopy(service_spec)
docker_compose_specs._update_resource_limits_and_reservations(
diff --git a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler.py b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler.py
index f0a17c5e51c..5ffbc0ef96a 100644
--- a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler.py
+++ b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler.py
@@ -162,11 +162,7 @@ def mocked_director_v0(
),
name="service labels",
).respond(
- json={
- "data": SimcoreServiceLabels.model_config["json_schema_extra"][
- "examples"
- ][0]
- }
+ json={"data": SimcoreServiceLabels.model_json_schema()["examples"][0]}
)
yield mock
diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py
index 04b85f8ad82..4c8ed5f4b78 100644
--- a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py
+++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py
@@ -126,7 +126,7 @@ def fake_service_resources() -> ServiceResourcesDict:
@pytest.fixture
def fake_service_labels() -> dict[str, Any]:
return choice( # noqa: S311
- SimcoreServiceLabels.model_config["json_schema_extra"]["examples"] # type: ignore
+ SimcoreServiceLabels.model_json_schema()["examples"] # type: ignore
)
diff --git a/services/director-v2/tests/unit/with_dbs/test_api_route_dynamic_services.py b/services/director-v2/tests/unit/with_dbs/test_api_route_dynamic_services.py
index 2de98368d9a..a44b2838431 100644
--- a/services/director-v2/tests/unit/with_dbs/test_api_route_dynamic_services.py
+++ b/services/director-v2/tests/unit/with_dbs/test_api_route_dynamic_services.py
@@ -292,9 +292,7 @@ def remove_service(node_uuid: NodeID, *ars: Any, **kwargs: Any) -> None:
service=DynamicServiceCreate.model_config["json_schema_extra"][
"example"
],
- service_labels=SimcoreServiceLabels.model_config["json_schema_extra"][
- "examples"
- ][0],
+ service_labels=SimcoreServiceLabels.model_json_schema()["examples"][0],
exp_status_code=status.HTTP_307_TEMPORARY_REDIRECT,
is_legacy=True,
),
@@ -305,9 +303,7 @@ def remove_service(node_uuid: NodeID, *ars: Any, **kwargs: Any) -> None:
service=DynamicServiceCreate.model_config["json_schema_extra"][
"example"
],
- service_labels=SimcoreServiceLabels.model_config["json_schema_extra"][
- "examples"
- ][1],
+ service_labels=SimcoreServiceLabels.model_json_schema()["examples"][1],
exp_status_code=status.HTTP_201_CREATED,
is_legacy=False,
),
@@ -318,9 +314,7 @@ def remove_service(node_uuid: NodeID, *ars: Any, **kwargs: Any) -> None:
service=DynamicServiceCreate.model_config["json_schema_extra"][
"example"
],
- service_labels=SimcoreServiceLabels.model_config["json_schema_extra"][
- "examples"
- ][2],
+ service_labels=SimcoreServiceLabels.model_json_schema()["examples"][2],
exp_status_code=status.HTTP_201_CREATED,
is_legacy=False,
),
@@ -376,9 +370,7 @@ def test_create_dynamic_services(
service=DynamicServiceCreate.model_config["json_schema_extra"][
"example"
],
- service_labels=SimcoreServiceLabels.model_config["json_schema_extra"][
- "examples"
- ][0],
+ service_labels=SimcoreServiceLabels.model_json_schema()["examples"][0],
exp_status_code=status.HTTP_307_TEMPORARY_REDIRECT,
is_legacy=True,
),
@@ -389,9 +381,7 @@ def test_create_dynamic_services(
service=DynamicServiceCreate.model_config["json_schema_extra"][
"example"
],
- service_labels=SimcoreServiceLabels.model_config["json_schema_extra"][
- "examples"
- ][1],
+ service_labels=SimcoreServiceLabels.model_json_schema()["examples"][1],
exp_status_code=status.HTTP_200_OK,
is_legacy=False,
),
@@ -402,9 +392,7 @@ def test_create_dynamic_services(
service=DynamicServiceCreate.model_config["json_schema_extra"][
"example"
],
- service_labels=SimcoreServiceLabels.model_config["json_schema_extra"][
- "examples"
- ][2],
+ service_labels=SimcoreServiceLabels.model_json_schema()["examples"][2],
exp_status_code=status.HTTP_200_OK,
is_legacy=False,
),
@@ -446,9 +434,7 @@ def test_get_service_status(
service=DynamicServiceCreate.model_config["json_schema_extra"][
"example"
],
- service_labels=SimcoreServiceLabels.model_config["json_schema_extra"][
- "examples"
- ][0],
+ service_labels=SimcoreServiceLabels.model_json_schema()["examples"][0],
exp_status_code=status.HTTP_307_TEMPORARY_REDIRECT,
is_legacy=True,
),
@@ -459,9 +445,7 @@ def test_get_service_status(
service=DynamicServiceCreate.model_config["json_schema_extra"][
"example"
],
- service_labels=SimcoreServiceLabels.model_config["json_schema_extra"][
- "examples"
- ][1],
+ service_labels=SimcoreServiceLabels.model_json_schema()["examples"][1],
exp_status_code=status.HTTP_204_NO_CONTENT,
is_legacy=False,
),
@@ -472,9 +456,7 @@ def test_get_service_status(
service=DynamicServiceCreate.model_config["json_schema_extra"][
"example"
],
- service_labels=SimcoreServiceLabels.model_config["json_schema_extra"][
- "examples"
- ][2],
+ service_labels=SimcoreServiceLabels.model_json_schema()["examples"][2],
exp_status_code=status.HTTP_204_NO_CONTENT,
is_legacy=False,
),
@@ -530,9 +512,7 @@ def dynamic_sidecar_scheduler(minimal_app: FastAPI) -> DynamicSidecarsScheduler:
service=DynamicServiceCreate.model_config["json_schema_extra"][
"example"
],
- service_labels=SimcoreServiceLabels.model_config["json_schema_extra"][
- "examples"
- ][1],
+ service_labels=SimcoreServiceLabels.model_json_schema()["examples"][1],
exp_status_code=status.HTTP_201_CREATED,
is_legacy=False,
)
@@ -581,9 +561,7 @@ def test_delete_service_waiting_for_manual_intervention(
service=DynamicServiceCreate.model_config["json_schema_extra"][
"example"
],
- service_labels=SimcoreServiceLabels.model_config["json_schema_extra"][
- "examples"
- ][0],
+ service_labels=SimcoreServiceLabels.model_json_schema()["examples"][0],
exp_status_code=status.HTTP_200_OK,
is_legacy=True,
),
@@ -594,9 +572,7 @@ def test_delete_service_waiting_for_manual_intervention(
service=DynamicServiceCreate.model_config["json_schema_extra"][
"example"
],
- service_labels=SimcoreServiceLabels.model_config["json_schema_extra"][
- "examples"
- ][1],
+ service_labels=SimcoreServiceLabels.model_json_schema()["examples"][1],
exp_status_code=status.HTTP_200_OK,
is_legacy=False,
),
@@ -607,9 +583,7 @@ def test_delete_service_waiting_for_manual_intervention(
service=DynamicServiceCreate.model_config["json_schema_extra"][
"example"
],
- service_labels=SimcoreServiceLabels.model_config["json_schema_extra"][
- "examples"
- ][2],
+ service_labels=SimcoreServiceLabels.model_json_schema()["examples"][2],
exp_status_code=status.HTTP_200_OK,
is_legacy=False,
),
diff --git a/services/director-v2/tests/unit/with_dbs/test_modules_dynamic_sidecar_docker_api.py b/services/director-v2/tests/unit/with_dbs/test_modules_dynamic_sidecar_docker_api.py
index 77c327706fd..f7423b3944c 100644
--- a/services/director-v2/tests/unit/with_dbs/test_modules_dynamic_sidecar_docker_api.py
+++ b/services/director-v2/tests/unit/with_dbs/test_modules_dynamic_sidecar_docker_api.py
@@ -331,9 +331,7 @@ def service_name() -> str:
@pytest.fixture(
params=[
SimcoreServiceLabels.model_validate(example)
- for example in SimcoreServiceLabels.model_config["json_schema_extra"][
- "examples"
- ]
+ for example in SimcoreServiceLabels.model_json_schema()["examples"]
],
)
def labels_example(request: pytest.FixtureRequest) -> SimcoreServiceLabels:
diff --git a/services/director-v2/tests/unit/with_dbs/test_modules_dynamic_sidecar_docker_service_specs.py b/services/director-v2/tests/unit/with_dbs/test_modules_dynamic_sidecar_docker_service_specs.py
index 52e0c6f4be0..99596264831 100644
--- a/services/director-v2/tests/unit/with_dbs/test_modules_dynamic_sidecar_docker_service_specs.py
+++ b/services/director-v2/tests/unit/with_dbs/test_modules_dynamic_sidecar_docker_service_specs.py
@@ -121,7 +121,7 @@ def swarm_network_id() -> str:
def simcore_service_labels() -> SimcoreServiceLabels:
# overwrites global fixture
return SimcoreServiceLabels.model_validate(
- SimcoreServiceLabels.model_config["json_schema_extra"]["examples"][2]
+ SimcoreServiceLabels.model_json_schema()["examples"][2]
)
diff --git a/services/director/requirements/_base.txt b/services/director/requirements/_base.txt
index 656861c1ba1..67e2c6e55f3 100644
--- a/services/director/requirements/_base.txt
+++ b/services/director/requirements/_base.txt
@@ -98,7 +98,7 @@ fastapi==0.115.5
# prometheus-fastapi-instrumentator
fastapi-cli==0.0.5
# via fastapi
-faststream==0.5.30
+faststream==0.5.31
# via -r requirements/../../../packages/service-library/requirements/_base.in
frozenlist==1.5.0
# via
@@ -292,7 +292,7 @@ protobuf==5.28.3
# opentelemetry-proto
psutil==6.1.0
# via -r requirements/../../../packages/service-library/requirements/_base.in
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -322,7 +322,7 @@ pydantic==2.9.2
# fastapi
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.10.0
# via
@@ -388,7 +388,6 @@ redis==5.2.0
# -r requirements/../../../packages/service-library/requirements/_base.in
referencing==0.29.3
# via
- # -c requirements/../../../packages/service-library/requirements/./constraints.txt
# jsonschema
# jsonschema-specifications
repro-zipfile==0.3.1
@@ -501,6 +500,7 @@ wrapt==1.16.0
# opentelemetry-instrumentation-redis
yarl==1.17.1
# via
+ # -r requirements/../../../packages/service-library/requirements/_base.in
# aio-pika
# aiohttp
# aiormq
diff --git a/services/docker-compose.yml b/services/docker-compose.yml
index 5da1a28ba0d..265d29e56ed 100644
--- a/services/docker-compose.yml
+++ b/services/docker-compose.yml
@@ -562,6 +562,7 @@ services:
REDIS_PASSWORD: ${REDIS_PASSWORD}
DIRECTOR_V2_HOST: ${DIRECTOR_V2_HOST}
DIRECTOR_V2_PORT: ${DIRECTOR_V2_PORT}
+ DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER: ${DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER}
DYNAMIC_SCHEDULER_LOGLEVEL: ${DYNAMIC_SCHEDULER_LOGLEVEL}
DYNAMIC_SCHEDULER_STOP_SERVICE_TIMEOUT: ${DYNAMIC_SCHEDULER_STOP_SERVICE_TIMEOUT}
DYNAMIC_SCHEDULER_PROFILING: ${DYNAMIC_SCHEDULER_PROFILING}
diff --git a/services/dynamic-scheduler/requirements/_base.txt b/services/dynamic-scheduler/requirements/_base.txt
index 6cf4dc07c90..c6fcc166562 100644
--- a/services/dynamic-scheduler/requirements/_base.txt
+++ b/services/dynamic-scheduler/requirements/_base.txt
@@ -110,7 +110,7 @@ fastapi==0.115.5
# -r requirements/../../../packages/service-library/requirements/_fastapi.in
# -r requirements/_base.in
# nicegui
-faststream==0.5.30
+faststream==0.5.31
# via -r requirements/../../../packages/service-library/requirements/_base.in
frozenlist==1.5.0
# via
diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rpc/_services.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rpc/_services.py
index f05f3f2e182..c1b48ad2eb4 100644
--- a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rpc/_services.py
+++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rpc/_services.py
@@ -14,9 +14,7 @@
ServiceWasNotFoundError,
)
-from ...core.settings import ApplicationSettings
-from ...services.director_v2 import DirectorV2Client
-from ...services.service_tracker import set_request_as_running, set_request_as_stopped
+from ...services import scheduler_interface
router = RPCRouter()
@@ -35,20 +33,16 @@ async def list_tracked_dynamic_services(
async def get_service_status(
app: FastAPI, *, node_id: NodeID
) -> NodeGet | DynamicServiceGet | NodeGetIdle:
- director_v2_client = DirectorV2Client.get_from_app_state(app)
- return await director_v2_client.get_status(node_id)
+ return await scheduler_interface.get_service_status(app, node_id=node_id)
@router.expose()
async def run_dynamic_service(
app: FastAPI, *, dynamic_service_start: DynamicServiceStart
) -> NodeGet | DynamicServiceGet:
- director_v2_client = DirectorV2Client.get_from_app_state(app)
- response: NodeGet | DynamicServiceGet = (
- await director_v2_client.run_dynamic_service(dynamic_service_start)
+ return await scheduler_interface.run_dynamic_service(
+ app, dynamic_service_start=dynamic_service_start
)
- await set_request_as_running(app, dynamic_service_start)
- return response
@router.expose(
@@ -60,12 +54,6 @@ async def run_dynamic_service(
async def stop_dynamic_service(
app: FastAPI, *, dynamic_service_stop: DynamicServiceStop
) -> None:
- director_v2_client = DirectorV2Client.get_from_app_state(app)
- settings: ApplicationSettings = app.state.settings
- await director_v2_client.stop_dynamic_service(
- node_id=dynamic_service_stop.node_id,
- simcore_user_agent=dynamic_service_stop.simcore_user_agent,
- save_state=dynamic_service_stop.save_state,
- timeout=settings.DYNAMIC_SCHEDULER_STOP_SERVICE_TIMEOUT,
+ return await scheduler_interface.stop_dynamic_service(
+ app, dynamic_service_stop=dynamic_service_stop
)
- await set_request_as_stopped(app, dynamic_service_stop)
diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/settings.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/settings.py
index 94acb6eaac4..9f046943344 100644
--- a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/settings.py
+++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/settings.py
@@ -68,6 +68,14 @@ class _BaseApplicationSettings(BaseApplicationSettings, MixinLoggingSettings):
),
)
+ DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER: bool = Field(
+ default=False,
+ description=(
+ "this is a way to switch between different dynamic schedulers for the new style services"
+ # NOTE: this option should be removed when the scheduling will be done via this service
+ ),
+ )
+
@field_validator("DYNAMIC_SCHEDULER_LOGLEVEL", mode="before")
@classmethod
def _validate_log_level(cls, value: str) -> str:
diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/scheduler_interface.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/scheduler_interface.py
new file mode 100644
index 00000000000..1d4bcdd112b
--- /dev/null
+++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/scheduler_interface.py
@@ -0,0 +1,60 @@
+from fastapi import FastAPI
+from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceGet
+from models_library.api_schemas_dynamic_scheduler.dynamic_services import (
+ DynamicServiceStart,
+ DynamicServiceStop,
+)
+from models_library.api_schemas_webserver.projects_nodes import NodeGet, NodeGetIdle
+from models_library.projects_nodes_io import NodeID
+
+from ..core.settings import ApplicationSettings
+from .director_v2 import DirectorV2Client
+from .service_tracker import set_request_as_running, set_request_as_stopped
+
+
+async def get_service_status(
+ app: FastAPI, *, node_id: NodeID
+) -> NodeGet | DynamicServiceGet | NodeGetIdle:
+ settings: ApplicationSettings = app.state.settings
+ if settings.DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER:
+ raise NotImplementedError
+
+ director_v2_client = DirectorV2Client.get_from_app_state(app)
+ response: NodeGet | DynamicServiceGet | NodeGetIdle = (
+ await director_v2_client.get_status(node_id)
+ )
+ return response
+
+
+async def run_dynamic_service(
+ app: FastAPI, *, dynamic_service_start: DynamicServiceStart
+) -> NodeGet | DynamicServiceGet:
+ settings: ApplicationSettings = app.state.settings
+ if settings.DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER:
+ raise NotImplementedError
+
+ director_v2_client = DirectorV2Client.get_from_app_state(app)
+ response: NodeGet | DynamicServiceGet = (
+ await director_v2_client.run_dynamic_service(dynamic_service_start)
+ )
+
+ await set_request_as_running(app, dynamic_service_start)
+ return response
+
+
+async def stop_dynamic_service(
+ app: FastAPI, *, dynamic_service_stop: DynamicServiceStop
+) -> None:
+ settings: ApplicationSettings = app.state.settings
+ if settings.DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER:
+ raise NotImplementedError
+
+ director_v2_client = DirectorV2Client.get_from_app_state(app)
+ await director_v2_client.stop_dynamic_service(
+ node_id=dynamic_service_stop.node_id,
+ simcore_user_agent=dynamic_service_stop.simcore_user_agent,
+ save_state=dynamic_service_stop.save_state,
+ timeout=settings.DYNAMIC_SCHEDULER_STOP_SERVICE_TIMEOUT,
+ )
+
+ await set_request_as_stopped(app, dynamic_service_stop)
diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_deferred_get_status.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_deferred_get_status.py
index 4cd8209d1ae..3f6efbfaecb 100644
--- a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_deferred_get_status.py
+++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_deferred_get_status.py
@@ -12,8 +12,7 @@
from servicelib.deferred_tasks import BaseDeferredHandler, TaskUID
from servicelib.deferred_tasks._base_deferred_handler import DeferredContext
-from .. import service_tracker
-from ..director_v2 import DirectorV2Client
+from .. import scheduler_interface, service_tracker
from ..notifier import notify_service_status_change
_logger = logging.getLogger(__name__)
@@ -47,9 +46,8 @@ async def run(
app: FastAPI = context["app"]
node_id: NodeID = context["node_id"]
- director_v2_client: DirectorV2Client = DirectorV2Client.get_from_app_state(app)
service_status: NodeGet | RunningDynamicServiceDetails | NodeGetIdle = (
- await director_v2_client.get_status(node_id)
+ await scheduler_interface.get_service_status(app, node_id=node_id)
)
_logger.debug(
"Service status type=%s, %s", type(service_status), service_status
diff --git a/services/dynamic-scheduler/tests/unit/api_rpc/test_api_rpc__services.py b/services/dynamic-scheduler/tests/unit/api_rpc/test_api_rpc__services.py
index b5775e3ccc2..7c1665065ae 100644
--- a/services/dynamic-scheduler/tests/unit/api_rpc/test_api_rpc__services.py
+++ b/services/dynamic-scheduler/tests/unit/api_rpc/test_api_rpc__services.py
@@ -20,6 +20,7 @@
from models_library.users import UserID
from pydantic import TypeAdapter
from pytest_mock import MockerFixture
+from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict
from pytest_simcore.helpers.typing_env import EnvVarsDict
from servicelib.rabbitmq import RabbitMQRPCClient, RPCServerError
from servicelib.rabbitmq.rpc_interfaces.dynamic_scheduler import services
@@ -142,12 +143,35 @@ def mock_director_v2_service_state(
yield None
+@pytest.fixture(
+ params=[
+ False,
+ pytest.param(
+ True,
+ marks=pytest.mark.xfail(
+ reason="INTERNAL scheduler implementation is missing"
+ ),
+ ),
+ ]
+)
+def use_internal_scheduler(request: pytest.FixtureRequest) -> bool:
+ return request.param
+
+
@pytest.fixture
def app_environment(
app_environment: EnvVarsDict,
rabbit_service: RabbitSettings,
redis_service: RedisSettings,
+ use_internal_scheduler: bool,
+ monkeypatch: pytest.MonkeyPatch,
) -> EnvVarsDict:
+ setenvs_from_dict(
+ monkeypatch,
+ {
+ "DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER": f"{use_internal_scheduler}",
+ },
+ )
return app_environment
diff --git a/services/dynamic-sidecar/requirements/_base.txt b/services/dynamic-sidecar/requirements/_base.txt
index 4f6ebdf0893..10ad9581448 100644
--- a/services/dynamic-sidecar/requirements/_base.txt
+++ b/services/dynamic-sidecar/requirements/_base.txt
@@ -156,7 +156,7 @@ fastapi==0.115.5
# via
# -r requirements/../../../packages/service-library/requirements/_fastapi.in
# -r requirements/_base.in
-faststream==0.5.30
+faststream==0.5.31
# via
# -r requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in
diff --git a/services/efs-guardian/requirements/_base.txt b/services/efs-guardian/requirements/_base.txt
index f0ac604d836..c5dbeb68306 100644
--- a/services/efs-guardian/requirements/_base.txt
+++ b/services/efs-guardian/requirements/_base.txt
@@ -155,7 +155,7 @@ fastapi==0.115.5
# -r requirements/../../../packages/service-library/requirements/_fastapi.in
# -r requirements/_base.in
# prometheus-fastapi-instrumentator
-faststream==0.5.28
+faststream==0.5.31
# via
# -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../packages/service-library/requirements/_base.in
@@ -418,7 +418,7 @@ psutil==6.1.0
# -r requirements/../../../packages/service-library/requirements/_base.in
psycopg2-binary==2.9.10
# via sqlalchemy
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -475,7 +475,7 @@ pydantic==2.9.2
# fastapi
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
@@ -580,8 +580,6 @@ redis==5.1.1
# -r requirements/../../../packages/service-library/requirements/_base.in
referencing==0.29.3
# via
- # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/./constraints.txt
- # -c requirements/../../../packages/service-library/requirements/./constraints.txt
# jsonschema
# jsonschema-specifications
repro-zipfile==0.3.1
@@ -758,7 +756,9 @@ wrapt==1.16.0
# opentelemetry-instrumentation-redis
yarl==1.15.4
# via
+ # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../packages/postgres-database/requirements/_base.in
+ # -r requirements/../../../packages/service-library/requirements/_base.in
# aio-pika
# aiohttp
# aiormq
diff --git a/services/efs-guardian/requirements/_test.txt b/services/efs-guardian/requirements/_test.txt
index c2b186a1310..364a52d87ae 100644
--- a/services/efs-guardian/requirements/_test.txt
+++ b/services/efs-guardian/requirements/_test.txt
@@ -212,12 +212,12 @@ py-partiql-parser==0.5.6
# via moto
pycparser==2.22
# via cffi
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
# aws-sam-translator
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via
# -c requirements/_base.txt
# pydantic
diff --git a/services/invitations/requirements/_base.txt b/services/invitations/requirements/_base.txt
index 23c53b39ae9..38827d398db 100644
--- a/services/invitations/requirements/_base.txt
+++ b/services/invitations/requirements/_base.txt
@@ -108,7 +108,7 @@ fastapi==0.115.5
# via
# -r requirements/../../../packages/service-library/requirements/_fastapi.in
# -r requirements/_base.in
-faststream==0.5.30
+faststream==0.5.31
# via -r requirements/../../../packages/service-library/requirements/_base.in
frozenlist==1.5.0
# via
diff --git a/services/payments/openapi.json b/services/payments/openapi.json
index a5f2198f99f..1b36cc36477 100644
--- a/services/payments/openapi.json
+++ b/services/payments/openapi.json
@@ -483,16 +483,22 @@
"title": "Message"
},
"payment_method_id": {
- "type": "string",
- "maxLength": 100,
- "minLength": 1,
+ "anyOf": [
+ {
+ "type": "string",
+ "maxLength": 100,
+ "minLength": 1
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Payment Method Id"
}
},
"type": "object",
"required": [
- "success",
- "payment_method_id"
+ "success"
],
"title": "SavedPaymentMethod"
},
diff --git a/services/payments/requirements/_base.txt b/services/payments/requirements/_base.txt
index c2f91b9459d..4d758aea4d6 100644
--- a/services/payments/requirements/_base.txt
+++ b/services/payments/requirements/_base.txt
@@ -124,7 +124,7 @@ fastapi==0.115.5
# via
# -r requirements/../../../packages/service-library/requirements/_fastapi.in
# -r requirements/_base.in
-faststream==0.5.30
+faststream==0.5.31
# via -r requirements/../../../packages/service-library/requirements/_base.in
frozenlist==1.5.0
# via
diff --git a/services/payments/src/simcore_service_payments/api/rest/_acknowledgements.py b/services/payments/src/simcore_service_payments/api/rest/_acknowledgements.py
index 041cf16f6a2..ca0d74c8e3e 100644
--- a/services/payments/src/simcore_service_payments/api/rest/_acknowledgements.py
+++ b/services/payments/src/simcore_service_payments/api/rest/_acknowledgements.py
@@ -6,6 +6,7 @@
PaymentMethodNotFoundError,
PaymentNotFoundError,
)
+from servicelib.logging_errors import create_troubleshotting_log_kwargs
from servicelib.logging_utils import log_context
from ..._constants import ACKED, PGDB
@@ -77,18 +78,36 @@ async def acknowledge_payment(
)
if ack.saved:
- inserted = await payments_methods.insert_payment_method(
- repo=repo_methods,
- payment_method_id=ack.saved.payment_method_id,
- user_id=transaction.user_id,
- wallet_id=transaction.wallet_id,
- ack=ack.saved,
- )
- background_tasks.add_task(
- payments_methods.on_payment_method_completed,
- payment_method=inserted,
- notifier=notifier,
- )
+ if ack.saved.payment_method_id is None or not ack.saved.success:
+ _logger.error(
+ **create_troubleshotting_log_kwargs(
+ f"Got ack that {payment_id=} was completed but failed to save the payment-method used for the payment as requested.",
+ error=RuntimeError("Failed to save payment-method after payment"),
+ error_context={
+ "ack": ack,
+ "user_id": transaction.user_id,
+ "payment_id": payment_id,
+ "transaction": transaction,
+ },
+ tip="This issue is not critical. Since the payment-method could not be saved, "
+ "the user cannot use it in following payments and will have to re-introduce it manually"
+ "SEE https://github.com/ITISFoundation/osparc-simcore/issues/6902",
+ )
+ )
+ else:
+ inserted = await payments_methods.insert_payment_method(
+ repo=repo_methods,
+ payment_method_id=ack.saved.payment_method_id,
+ user_id=transaction.user_id,
+ wallet_id=transaction.wallet_id,
+ ack=ack.saved,
+ )
+
+ background_tasks.add_task(
+ payments_methods.on_payment_method_completed,
+ payment_method=inserted,
+ notifier=notifier,
+ )
@router.post("/payments-methods/{payment_method_id}:ack")
diff --git a/services/payments/src/simcore_service_payments/cli.py b/services/payments/src/simcore_service_payments/cli.py
index d493ba884bd..64c67d00e8f 100644
--- a/services/payments/src/simcore_service_payments/cli.py
+++ b/services/payments/src/simcore_service_payments/cli.py
@@ -3,7 +3,6 @@
import os
import typer
-from common_library.json_serialization import json_dumps
from servicelib.utils_secrets import generate_password, generate_token_secret_key
from settings_library.postgres import PostgresSettings
from settings_library.rabbit import RabbitSettings
@@ -21,9 +20,7 @@
main = typer.Typer(name=PROJECT_NAME)
main.command()(
- create_settings_command(
- settings_cls=ApplicationSettings, logger=_logger, json_serializer=json_dumps
- )
+ create_settings_command(settings_cls=ApplicationSettings, logger=_logger)
)
main.callback()(create_version_callback(__version__))
diff --git a/services/payments/src/simcore_service_payments/models/schemas/acknowledgements.py b/services/payments/src/simcore_service_payments/models/schemas/acknowledgements.py
index 81e9abaae39..a9cb86f333a 100644
--- a/services/payments/src/simcore_service_payments/models/schemas/acknowledgements.py
+++ b/services/payments/src/simcore_service_payments/models/schemas/acknowledgements.py
@@ -42,7 +42,7 @@ class AckPaymentMethod(_BaseAck):
class SavedPaymentMethod(AckPaymentMethod):
- payment_method_id: PaymentMethodID
+ payment_method_id: PaymentMethodID | None = None
#
@@ -83,11 +83,14 @@ class SavedPaymentMethod(AckPaymentMethod):
class AckPayment(_BaseAckPayment):
- saved: SavedPaymentMethod | None = Field(
- default=None,
- description="Gets the payment-method if user opted to save it during payment."
- "If used did not opt to save of payment-method was already saved, then it defaults to None",
- )
+ saved: Annotated[
+ SavedPaymentMethod | None,
+ Field(
+ description="Gets the payment-method if user opted to save it during payment."
+ "If used did not opt to save of payment-method was already saved, then it defaults to None",
+ ),
+ ] = None
+
model_config = ConfigDict(
json_schema_extra={
"example": _EXAMPLES[1].copy(), # shown in openapi.json
@@ -97,7 +100,7 @@ class AckPayment(_BaseAckPayment):
@field_validator("invoice_url")
@classmethod
- def success_requires_invoice(cls, v, info: ValidationInfo):
+ def _success_requires_invoice(cls, v, info: ValidationInfo):
success = info.data.get("success")
if success and not v:
msg = "Invoice required on successful payments"
diff --git a/services/payments/tests/unit/test__model_examples.py b/services/payments/tests/unit/test__model_examples.py
index 6e072aa658a..c97e35a4686 100644
--- a/services/payments/tests/unit/test__model_examples.py
+++ b/services/payments/tests/unit/test__model_examples.py
@@ -24,5 +24,7 @@ def test_api_server_model_examples(
assert model_cls.model_validate(example_data) is not None
except ValidationError as err:
pytest.fail(
- f"\n{example_name}: {json.dumps(example_data, indent=1)}\nError: {err}"
+ f"{example_name} is invalid {model_cls.__module__}.{model_cls.__name__}:"
+ f"\n{json.dumps(example_data, indent=1)}"
+ f"\nError: {err}"
)
diff --git a/services/resource-usage-tracker/Makefile b/services/resource-usage-tracker/Makefile
index 5f61740d7ce..d6d8745bc13 100644
--- a/services/resource-usage-tracker/Makefile
+++ b/services/resource-usage-tracker/Makefile
@@ -10,4 +10,4 @@ include ../../scripts/common-service.Makefile
openapi-specs: openapi.json
openapi.json: ## produces openapi.json
# generating openapi specs file (need to have the environment set for this)
- @python3 -c "import json; from $(APP_PACKAGE_NAME).web_main import *; print( json.dumps(the_app.openapi(), indent=2) )" > $@
+ @python3 -c "import json; from $(APP_PACKAGE_NAME).main import *; print( json.dumps(the_app.openapi(), indent=2) )" > $@
diff --git a/services/resource-usage-tracker/openapi.json b/services/resource-usage-tracker/openapi.json
index 6aa53c7118c..3ee8f2eeb73 100644
--- a/services/resource-usage-tracker/openapi.json
+++ b/services/resource-usage-tracker/openapi.json
@@ -380,7 +380,6 @@
"enum": [
"TIER"
],
- "const": "TIER",
"title": "PricingPlanClassification"
},
"PricingPlanGet": {
@@ -581,9 +580,6 @@
},
"docs_url": {
"type": "string",
- "maxLength": 2083,
- "minLength": 1,
- "format": "uri",
"title": "Docs Url"
}
},
diff --git a/services/resource-usage-tracker/requirements/_base.txt b/services/resource-usage-tracker/requirements/_base.txt
index 0f0c9c3592e..da9a82895d9 100644
--- a/services/resource-usage-tracker/requirements/_base.txt
+++ b/services/resource-usage-tracker/requirements/_base.txt
@@ -161,7 +161,7 @@ fastapi==0.115.5
# -r requirements/../../../packages/service-library/requirements/_fastapi.in
# -r requirements/_base.in
# prometheus-fastapi-instrumentator
-faststream==0.5.28
+faststream==0.5.31
# via
# -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../packages/service-library/requirements/_base.in
@@ -446,7 +446,7 @@ psutil==6.0.0
# -r requirements/../../../packages/service-library/requirements/_base.in
psycopg2-binary==2.9.9
# via sqlalchemy
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -503,7 +503,7 @@ pydantic==2.9.2
# fastapi
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
@@ -620,8 +620,6 @@ redis==5.0.4
# -r requirements/../../../packages/service-library/requirements/_base.in
referencing==0.29.3
# via
- # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/./constraints.txt
- # -c requirements/../../../packages/service-library/requirements/./constraints.txt
# jsonschema
# jsonschema-specifications
regex==2023.12.25
@@ -752,7 +750,7 @@ types-awscrt==0.20.5
# via botocore-stubs
types-python-dateutil==2.9.0.20240316
# via arrow
-typing-extensions==4.10.0
+typing-extensions==4.12.2
# via
# aiodebug
# aiodocker
@@ -819,7 +817,9 @@ wrapt==1.16.0
# opentelemetry-instrumentation-redis
yarl==1.9.4
# via
+ # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../packages/postgres-database/requirements/_base.in
+ # -r requirements/../../../packages/service-library/requirements/_base.in
# aio-pika
# aiohttp
# aiormq
diff --git a/services/resource-usage-tracker/requirements/_test.txt b/services/resource-usage-tracker/requirements/_test.txt
index 484ce4158da..2e7590de3f6 100644
--- a/services/resource-usage-tracker/requirements/_test.txt
+++ b/services/resource-usage-tracker/requirements/_test.txt
@@ -186,12 +186,12 @@ py-partiql-parser==0.5.6
# via moto
pycparser==2.22
# via cffi
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
# aws-sam-translator
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via
# -c requirements/_base.txt
# pydantic
@@ -308,7 +308,7 @@ termcolor==2.4.0
# via pytest-sugar
types-requests==2.32.0.20240914
# via -r requirements/_test.in
-typing-extensions==4.10.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# alembic
diff --git a/services/resource-usage-tracker/requirements/_tools.txt b/services/resource-usage-tracker/requirements/_tools.txt
index 6565ecfab1f..6e0b433d1ed 100644
--- a/services/resource-usage-tracker/requirements/_tools.txt
+++ b/services/resource-usage-tracker/requirements/_tools.txt
@@ -80,7 +80,7 @@ setuptools==74.0.0
# pip-tools
tomlkit==0.13.2
# via pylint
-typing-extensions==4.10.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# -c requirements/_test.txt
diff --git a/services/storage/requirements/_base.txt b/services/storage/requirements/_base.txt
index a3513a00a8f..c5a74d9831e 100644
--- a/services/storage/requirements/_base.txt
+++ b/services/storage/requirements/_base.txt
@@ -155,7 +155,7 @@ email-validator==2.1.1
# via pydantic
fast-depends==2.4.12
# via faststream
-faststream==0.5.28
+faststream==0.5.31
# via
# -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../packages/service-library/requirements/_base.in
@@ -416,7 +416,7 @@ psycopg2-binary==2.9.9
# via
# aiopg
# sqlalchemy
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -472,7 +472,7 @@ pydantic==2.9.2
# fast-depends
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
@@ -674,7 +674,7 @@ types-awscrt==0.20.5
# via botocore-stubs
types-python-dateutil==2.9.0.20240316
# via arrow
-typing-extensions==4.10.0
+typing-extensions==4.12.2
# via
# aiodebug
# aiodocker
@@ -761,7 +761,9 @@ wrapt==1.16.0
# opentelemetry-instrumentation-redis
yarl==1.9.4
# via
+ # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../packages/postgres-database/requirements/_base.in
+ # -r requirements/../../../packages/service-library/requirements/_base.in
# aio-pika
# aiohttp
# aiormq
diff --git a/services/storage/requirements/_test.txt b/services/storage/requirements/_test.txt
index 4d1791cf9e2..6b7dbd281bb 100644
--- a/services/storage/requirements/_test.txt
+++ b/services/storage/requirements/_test.txt
@@ -189,12 +189,12 @@ py-partiql-parser==0.5.6
# via moto
pycparser==2.22
# via cffi
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
# aws-sam-translator
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via
# -c requirements/_base.txt
# pydantic
@@ -312,7 +312,7 @@ termcolor==2.4.0
# via pytest-sugar
types-aiofiles==24.1.0.20240626
# via -r requirements/_test.in
-typing-extensions==4.10.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# aws-sam-translator
diff --git a/services/storage/requirements/_tools.txt b/services/storage/requirements/_tools.txt
index 6565ecfab1f..6e0b433d1ed 100644
--- a/services/storage/requirements/_tools.txt
+++ b/services/storage/requirements/_tools.txt
@@ -80,7 +80,7 @@ setuptools==74.0.0
# pip-tools
tomlkit==0.13.2
# via pylint
-typing-extensions==4.10.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# -c requirements/_test.txt
diff --git a/services/storage/src/simcore_service_storage/settings.py b/services/storage/src/simcore_service_storage/settings.py
index 1931bdb79c2..75d25311fcd 100644
--- a/services/storage/src/simcore_service_storage/settings.py
+++ b/services/storage/src/simcore_service_storage/settings.py
@@ -1,4 +1,4 @@
-from typing import Self
+from typing import Annotated, Self
from pydantic import (
AliasChoices,
@@ -24,10 +24,12 @@ class Settings(BaseCustomSettings, MixinLoggingSettings):
STORAGE_HOST: str = "0.0.0.0" # nosec
STORAGE_PORT: PortInt = TypeAdapter(PortInt).validate_python(8080)
- LOG_LEVEL: LogLevel = Field(
- "INFO",
- validation_alias=AliasChoices("STORAGE_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"),
- )
+ LOG_LEVEL: Annotated[
+ LogLevel,
+ Field(
+ validation_alias=AliasChoices("STORAGE_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"),
+ ),
+ ] = LogLevel.INFO
STORAGE_MAX_WORKERS: PositiveInt = Field(
8,
@@ -103,7 +105,7 @@ def _validate_loglevel(cls, value: str) -> str:
return log_level
@model_validator(mode="after")
- def ensure_settings_consistency(self) -> Self:
+ def _ensure_settings_consistency(self) -> Self:
if self.STORAGE_CLEANER_INTERVAL_S is not None and not self.STORAGE_REDIS:
msg = (
"STORAGE_CLEANER_INTERVAL_S cleaner cannot be set without STORAGE_REDIS! "
diff --git a/services/web/server/requirements/_base.txt b/services/web/server/requirements/_base.txt
index 11bc97fb4bb..cc870dec999 100644
--- a/services/web/server/requirements/_base.txt
+++ b/services/web/server/requirements/_base.txt
@@ -205,7 +205,7 @@ faker==19.6.1
# via -r requirements/_base.in
fast-depends==2.4.12
# via faststream
-faststream==0.5.28
+faststream==0.5.31
# via
# -r requirements/../../../../packages/service-library/requirements/_base.in
# -r requirements/../../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in
@@ -495,7 +495,7 @@ pycountry==23.12.11
# via -r requirements/_base.in
pycparser==2.21
# via cffi
-pydantic==2.9.2
+pydantic==2.10.2
# via
# -c requirements/../../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -557,7 +557,7 @@ pydantic==2.9.2
# fast-depends
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
@@ -765,7 +765,7 @@ typer==0.12.3
# -r requirements/../../../../packages/settings-library/requirements/_base.in
# -r requirements/../../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in
# -r requirements/../../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/_base.in
-typing-extensions==4.12.0
+typing-extensions==4.12.2
# via
# aiodebug
# aiodocker
@@ -854,7 +854,9 @@ yarl==1.9.4
# via
# -c requirements/./constraints.txt
# -r requirements/../../../../packages/postgres-database/requirements/_base.in
+ # -r requirements/../../../../packages/service-library/requirements/_base.in
# -r requirements/../../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in
+ # -r requirements/../../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in
# aio-pika
# aiohttp
# aiormq
diff --git a/services/web/server/requirements/_test.txt b/services/web/server/requirements/_test.txt
index 6ac8bbec1e1..a62c8fcff02 100644
--- a/services/web/server/requirements/_test.txt
+++ b/services/web/server/requirements/_test.txt
@@ -224,7 +224,7 @@ types-passlib==1.7.7.20240819
# via -r requirements/_test.in
types-pyyaml==6.0.12.20240917
# via -r requirements/_test.in
-typing-extensions==4.12.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# asyncpg-stubs
diff --git a/services/web/server/requirements/_tools.txt b/services/web/server/requirements/_tools.txt
index 2ce86b62830..69df0c7072e 100644
--- a/services/web/server/requirements/_tools.txt
+++ b/services/web/server/requirements/_tools.txt
@@ -85,7 +85,7 @@ tomlkit==0.13.2
# via pylint
types-cachetools==5.5.0.20240820
# via -r requirements/_tools.in
-typing-extensions==4.12.0
+typing-extensions==4.12.2
# via
# -c requirements/_base.txt
# -c requirements/_test.txt
diff --git a/services/web/server/src/simcore_service_webserver/director_v2/_handlers.py b/services/web/server/src/simcore_service_webserver/director_v2/_handlers.py
index aa3914ee6df..bbd07f5c654 100644
--- a/services/web/server/src/simcore_service_webserver/director_v2/_handlers.py
+++ b/services/web/server/src/simcore_service_webserver/director_v2/_handlers.py
@@ -46,7 +46,7 @@ class _ComputationStarted(BaseModel):
..., description="ID for created pipeline (=project identifier)"
)
ref_ids: list[CommitID] = Field(
- None, description="Checkpoints IDs for created pipeline"
+ default_factory=list, description="Checkpoints IDs for created pipeline"
)
diff --git a/services/web/server/src/simcore_service_webserver/meta_modeling/_function_nodes.py b/services/web/server/src/simcore_service_webserver/meta_modeling/_function_nodes.py
index 3e0e3a630f7..029a733c6a3 100644
--- a/services/web/server/src/simcore_service_webserver/meta_modeling/_function_nodes.py
+++ b/services/web/server/src/simcore_service_webserver/meta_modeling/_function_nodes.py
@@ -36,7 +36,7 @@ def create_param_node_from_iterator_with_outputs(iterator_node: Node) -> Node:
version=ServiceVersion("1.0.0"),
label=iterator_node.label,
inputs={},
- inputNodes=[],
+ input_nodes=[],
thumbnail="", # NOTE: hack due to issue in projects json-schema
outputs=deepcopy(iterator_node.outputs),
)
diff --git a/services/web/server/src/simcore_service_webserver/payments/settings.py b/services/web/server/src/simcore_service_webserver/payments/settings.py
index 8553e508b76..ef825a5c1e9 100644
--- a/services/web/server/src/simcore_service_webserver/payments/settings.py
+++ b/services/web/server/src/simcore_service_webserver/payments/settings.py
@@ -1,5 +1,7 @@
import os
+from decimal import Decimal
from functools import cached_property
+from typing import Annotated
from aiohttp import web
from models_library.basic_types import NonNegativeDecimal
@@ -56,21 +58,27 @@ class PaymentsSettings(BaseCustomSettings, MixinServiceSettings):
description="FAKE Base url to the payment gateway",
)
- PAYMENTS_AUTORECHARGE_MIN_BALANCE_IN_CREDITS: NonNegativeDecimal = Field(
- default=100,
- description="Minimum balance in credits to top-up for auto-recharge",
- # NOTE: Using credits (instead of USD) simplify RUT monitoring which is reponsible to trigger auto-recharge
- )
+ PAYMENTS_AUTORECHARGE_MIN_BALANCE_IN_CREDITS: Annotated[
+ NonNegativeDecimal,
+ Field(
+ description="Minimum balance in credits to top-up for auto-recharge"
+ # NOTE: Using credits (instead of USD) simplify RUT monitoring which is reponsible to trigger auto-recharge
+ ),
+ ] = Decimal(100)
- PAYMENTS_AUTORECHARGE_DEFAULT_TOP_UP_AMOUNT: NonNegativeDecimal = Field(
- default=100,
- description="Default value in USD on the amount to top-up for auto-recharge (`top_up_amount_in_usd`)",
- )
+ PAYMENTS_AUTORECHARGE_DEFAULT_TOP_UP_AMOUNT: Annotated[
+ NonNegativeDecimal,
+ Field(
+ description="Default value in USD on the amount to top-up for auto-recharge (`top_up_amount_in_usd`)",
+ ),
+ ] = Decimal(100)
- PAYMENTS_AUTORECHARGE_DEFAULT_MONTHLY_LIMIT: NonNegativeDecimal | None = Field(
- default=10000,
- description="Default value in USD for the montly limit for auto-recharge (`monthly_limit_in_usd`)",
- )
+ PAYMENTS_AUTORECHARGE_DEFAULT_MONTHLY_LIMIT: Annotated[
+ NonNegativeDecimal | None,
+ Field(
+ description="Default value in USD for the montly limit for auto-recharge (`mont] = hly_limit_in_usd`)",
+ ),
+ ] = Decimal(10_000)
@cached_property
def api_base_url(self) -> str:
diff --git a/services/web/server/src/simcore_service_webserver/studies_dispatcher/_projects.py b/services/web/server/src/simcore_service_webserver/studies_dispatcher/_projects.py
index 53f61713a43..20d208daf07 100644
--- a/services/web/server/src/simcore_service_webserver/studies_dispatcher/_projects.py
+++ b/services/web/server/src/simcore_service_webserver/studies_dispatcher/_projects.py
@@ -59,9 +59,9 @@ def _create_file_picker(download_link: str, output_label: str | None):
data = {}
data["downloadLink"] = url = TypeAdapter(AnyUrl).validate_python(download_link)
if output_label:
- data["label"] = Path(output_label).name # type: ignore[assignment]
+ data["label"] = Path(output_label).name # type: ignore[assignment]
elif url.path:
- data["label"] = Path(url.path).name # type: ignore[assignment]
+ data["label"] = Path(url.path).name # type: ignore[assignment]
output = DownloadLink.model_validate(data)
output_id = "outFile"
@@ -70,7 +70,7 @@ def _create_file_picker(download_link: str, output_label: str | None):
version=_FILE_PICKER_VERSION,
label="File Picker",
inputs={},
- inputNodes=[],
+ input_nodes=[],
outputs={output_id: output},
progress=0,
)
@@ -121,7 +121,7 @@ def _create_project_with_service(
inputs=None,
)
- project = _create_project(
+ return _create_project(
project_id=project_id,
name=f"Service {service_info.title}",
description=f"Autogenerated study with service {service_info.footprint}",
@@ -135,8 +135,6 @@ def _create_project_with_service(
},
)
- return project
-
def _create_project_with_filepicker_and_service(
project_id: ProjectID,
@@ -160,12 +158,12 @@ def _create_project_with_filepicker_and_service(
output=file_picker_output_id,
)
},
- inputNodes=[
+ input_nodes=[
file_picker_id,
],
)
- project = _create_project(
+ return _create_project(
project_id=project_id,
name=f"Viewer {viewer_info.title}",
description=f"Autogenerated study with file-picker and service {viewer_info.footprint}",
@@ -181,8 +179,6 @@ def _create_project_with_filepicker_and_service(
},
)
- return project
-
async def _add_new_project(
app: web.Application, project: Project, user: UserInfo, *, product_name: str
diff --git a/services/web/server/src/simcore_service_webserver/studies_dispatcher/_redirects_handlers.py b/services/web/server/src/simcore_service_webserver/studies_dispatcher/_redirects_handlers.py
index 18bd6e96e8f..3ee106028fd 100644
--- a/services/web/server/src/simcore_service_webserver/studies_dispatcher/_redirects_handlers.py
+++ b/services/web/server/src/simcore_service_webserver/studies_dispatcher/_redirects_handlers.py
@@ -170,14 +170,7 @@ def ensure_extension_upper_and_dotless(cls, v):
class ServiceAndFileParams(FileQueryParams, ServiceParams):
- model_config = ConfigDict(
- json_schema_extra={
- "allOf": [
- {"$ref": "#/definitions/FileParams"},
- {"$ref": "#/definitions/ServiceParams"},
- ]
- }
- )
+ ...
class ViewerQueryParams(BaseModel):
diff --git a/services/web/server/tests/unit/with_dbs/04/studies_dispatcher/test_studies_dispatcher_handlers.py b/services/web/server/tests/unit/with_dbs/04/studies_dispatcher/test_studies_dispatcher_handlers.py
index 14f673ce5da..af534e9a481 100644
--- a/services/web/server/tests/unit/with_dbs/04/studies_dispatcher/test_studies_dispatcher_handlers.py
+++ b/services/web/server/tests/unit/with_dbs/04/studies_dispatcher/test_studies_dispatcher_handlers.py
@@ -5,24 +5,24 @@
# pylint: disable=unused-variable
import asyncio
+import json
import re
import urllib.parse
from collections.abc import AsyncIterator
from typing import Any
import pytest
-import simcore_service_webserver.studies_dispatcher._redirects_handlers
+import simcore_service_webserver.studies_dispatcher
import sqlalchemy as sa
from aiohttp import ClientResponse, ClientSession
from aiohttp.test_utils import TestClient, TestServer
from aioresponses import aioresponses
-from common_library.json_serialization import json_dumps
from models_library.projects_state import ProjectLocked, ProjectStatus
-from pydantic import BaseModel, ByteSize, TypeAdapter
+from pydantic import BaseModel, ByteSize, TypeAdapter, ValidationError
from pytest_mock import MockerFixture
from pytest_simcore.helpers.assert_checks import assert_status
from pytest_simcore.helpers.webserver_login import UserInfoDict, UserRole
-from pytest_simcore.pydantic_models import iter_model_examples_in_module
+from pytest_simcore.pydantic_models import walk_model_examples_in_package
from servicelib.aiohttp import status
from settings_library.redis import RedisSettings
from settings_library.utils_session import DEFAULT_SESSION_COOKIE_NAME
@@ -235,16 +235,19 @@ async def test_api_list_supported_filetypes(client: TestClient):
@pytest.mark.parametrize(
"model_cls, example_name, example_data",
- iter_model_examples_in_module(
- simcore_service_webserver.studies_dispatcher._redirects_handlers
- ),
+ walk_model_examples_in_package(simcore_service_webserver.studies_dispatcher),
)
def test_model_examples(
model_cls: type[BaseModel], example_name: int, example_data: Any
):
- print(example_name, ":", json_dumps(example_data))
- model = model_cls.model_validate(example_data)
- assert model
+ try:
+ assert model_cls.model_validate(example_data) is not None
+ except ValidationError as err:
+ pytest.fail(
+ f"{example_name} is invalid {model_cls.__module__}.{model_cls.__name__}:"
+ f"\n{json.dumps(example_data, indent=1)}"
+ f"\nError: {err}"
+ )
async def test_api_list_services(client: TestClient):
diff --git a/tests/e2e-playwright/requirements/_test.txt b/tests/e2e-playwright/requirements/_test.txt
index 011cb6fbd7c..2c499672c85 100644
--- a/tests/e2e-playwright/requirements/_test.txt
+++ b/tests/e2e-playwright/requirements/_test.txt
@@ -1,15 +1,9 @@
-#
-# This file is autogenerated by pip-compile with Python 3.11
-# by the following command:
-#
-# pip-compile _test.in
-#
annotated-types==0.7.0
# via pydantic
anyio==4.6.2.post1
# via httpx
arrow==1.3.0
- # via -r _test.in
+ # via -r requirements/_test.in
certifi==2024.8.30
# via
# httpcore
@@ -20,11 +14,11 @@ charset-normalizer==3.3.2
dnspython==2.6.1
# via email-validator
docker==7.1.0
- # via -r _test.in
+ # via -r requirements/_test.in
email-validator==2.2.0
# via pydantic
faker==29.0.0
- # via -r _test.in
+ # via -r requirements/_test.in
greenlet==3.0.3
# via playwright
h11==0.14.0
@@ -32,7 +26,7 @@ h11==0.14.0
httpcore==1.0.7
# via httpx
httpx==0.27.2
- # via -r _test.in
+ # via -r requirements/_test.in
idna==3.10
# via
# anyio
@@ -53,9 +47,9 @@ playwright==1.47.0
# via pytest-playwright
pluggy==1.5.0
# via pytest
-pydantic[email]==2.9.2
- # via -r _test.in
-pydantic-core==2.23.4
+pydantic==2.10.3
+ # via -r requirements/_test.in
+pydantic-core==2.27.1
# via pydantic
pyee==12.0.0
# via playwright
@@ -70,17 +64,17 @@ pytest==8.3.3
pytest-base-url==2.1.0
# via pytest-playwright
pytest-html==4.1.1
- # via -r _test.in
+ # via -r requirements/_test.in
pytest-instafail==0.5.0
- # via -r _test.in
+ # via -r requirements/_test.in
pytest-metadata==3.1.1
# via pytest-html
pytest-playwright==0.5.2
- # via -r _test.in
+ # via -r requirements/_test.in
pytest-runner==6.0.1
- # via -r _test.in
+ # via -r requirements/_test.in
pytest-sugar==1.0.0
- # via -r _test.in
+ # via -r requirements/_test.in
python-dateutil==2.9.0.post0
# via
# arrow
@@ -88,7 +82,7 @@ python-dateutil==2.9.0.post0
python-slugify==8.0.4
# via pytest-playwright
pyyaml==6.0.2
- # via -r _test.in
+ # via -r requirements/_test.in
requests==2.32.3
# via
# docker
@@ -100,7 +94,7 @@ sniffio==1.3.1
# anyio
# httpx
tenacity==9.0.0
- # via -r _test.in
+ # via -r requirements/_test.in
termcolor==2.4.0
# via pytest-sugar
text-unidecode==1.3
diff --git a/tests/environment-setup/requirements/requirements.txt b/tests/environment-setup/requirements/requirements.txt
index 9703160dd63..ab4e4b18e58 100644
--- a/tests/environment-setup/requirements/requirements.txt
+++ b/tests/environment-setup/requirements/requirements.txt
@@ -8,7 +8,7 @@ packaging==24.1
# pytest-sugar
pluggy==1.5.0
# via pytest
-pydantic==2.9.2
+pydantic==2.10.3
# via
# -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -20,7 +20,7 @@ pydantic==2.9.2
# -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../requirements/constraints.txt
# -r requirements/requirements.in
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pytest==8.3.3
# via
diff --git a/tests/public-api/requirements/_base.txt b/tests/public-api/requirements/_base.txt
index 753d75bafbe..fa549b188ba 100644
--- a/tests/public-api/requirements/_base.txt
+++ b/tests/public-api/requirements/_base.txt
@@ -41,7 +41,7 @@ osparc-client==0.6.6
# via osparc
packaging==24.1
# via osparc
-pydantic==2.9.2
+pydantic==2.10.3
# via
# -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt
@@ -49,7 +49,7 @@ pydantic==2.9.2
# -r requirements/../../../packages/settings-library/requirements/_base.in
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.10.0
# via -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in
diff --git a/tests/swarm-deploy/requirements/_test.txt b/tests/swarm-deploy/requirements/_test.txt
index 881b9db0ba3..fd400882d27 100644
--- a/tests/swarm-deploy/requirements/_test.txt
+++ b/tests/swarm-deploy/requirements/_test.txt
@@ -147,7 +147,7 @@ faker==30.8.2
# via -r requirements/_test.in
fast-depends==2.4.12
# via faststream
-faststream==0.5.28
+faststream==0.5.31
# via
# -r requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in
@@ -363,7 +363,7 @@ psycopg2-binary==2.9.9
# via
# aiopg
# sqlalchemy
-pydantic==2.9.2
+pydantic==2.10.3
# via
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt
@@ -417,7 +417,7 @@ pydantic==2.9.2
# fast-depends
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.23.4
+pydantic-core==2.27.1
# via pydantic
pydantic-extra-types==2.9.0
# via
@@ -565,8 +565,6 @@ redis==5.0.4
# -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in
referencing==0.29.3
# via
- # -c requirements/../../../packages/service-library/requirements/./constraints.txt
- # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/./constraints.txt
# jsonschema
# jsonschema-specifications
repro-zipfile==0.3.1
@@ -707,7 +705,9 @@ wrapt==1.16.0
# opentelemetry-instrumentation-redis
yarl==1.12.1
# via
+ # -r requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in
+ # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in
# aio-pika
# aiohttp
# aiormq