diff --git a/.codecov.yml b/.codecov.yml index 341e18a09bd..ca81ee39226 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -94,9 +94,6 @@ component_management: - component_id: invitations paths: - services/invitations/** - - component_id: osparc_gateway_server - paths: - - services/osparc-gateway-server/** - component_id: payments paths: - services/payments/** diff --git a/.env-devel b/.env-devel index 7703fa8082a..6a32129a920 100644 --- a/.env-devel +++ b/.env-devel @@ -128,6 +128,7 @@ DYNAMIC_SCHEDULER_LOGLEVEL=DEBUG DYNAMIC_SCHEDULER_PROFILING=1 DYNAMIC_SCHEDULER_STOP_SERVICE_TIMEOUT=01:00:00 DYNAMIC_SCHEDULER_TRACING={} +DYNAMIC_SCHEDULER_UI_STORAGE_SECRET=adminadmin FUNCTION_SERVICES_AUTHORS='{"UN": {"name": "Unknown", "email": "unknown@osparc.io", "affiliation": "unknown"}}' @@ -259,7 +260,6 @@ WB_API_WEBSERVER_PORT=8080 WB_GC_ACTIVITY=null WB_GC_ANNOUNCEMENTS=0 WB_GC_CATALOG=null -WB_GC_CLUSTERS=0 WB_GC_DB_LISTENER=0 WB_GC_DIAGNOSTICS=null WB_GC_EMAIL=null @@ -292,7 +292,6 @@ WB_GC_WALLETS=0 WB_DB_EL_ACTIVITY=null WB_DB_EL_ANNOUNCEMENTS=0 WB_DB_EL_CATALOG=null -WB_DB_EL_CLUSTERS=0 WB_DB_EL_DB_LISTENER=1 WB_DB_EL_DIAGNOSTICS=null WB_DB_EL_EMAIL=null @@ -359,7 +358,6 @@ TWILIO_COUNTRY_CODES_W_ALPHANUMERIC_SID_SUPPORT=["41"] WEBSERVER_ACTIVITY=null WEBSERVER_ANNOUNCEMENTS=1 WEBSERVER_CATALOG={} -WEBSERVER_CLUSTERS=0 WEBSERVER_CREDIT_COMPUTATION_ENABLED=1 WEBSERVER_DB_LISTENER=0 WEBSERVER_DEV_FEATURES_ENABLED=0 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 007676d351c..36c26ee310e 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -33,7 +33,6 @@ Makefile @pcrespov @sanderegg /services/efs-guardian/ @matusdrobuliak66 /services/invitations/ @pcrespov /services/migration/ @pcrespov -/services/osparc-gateway-server/ @sanderegg /services/payments/ @pcrespov @matusdrobuliak66 /services/resource-usage-tracker/ @matusdrobuliak66 /services/static-webserver/ @GitHK diff --git a/.github/workflows/ci-testing-deploy.yml b/.github/workflows/ci-testing-deploy.yml index d1a3ec75078..516a401f9e3 100644 --- a/.github/workflows/ci-testing-deploy.yml +++ b/.github/workflows/ci-testing-deploy.yml @@ -75,7 +75,6 @@ jobs: efs-guardian: ${{ steps.filter.outputs.efs-guardian }} invitations: ${{ steps.filter.outputs.invitations }} migration: ${{ steps.filter.outputs.migration }} - osparc-gateway-server: ${{ steps.filter.outputs.osparc-gateway-server }} payments: ${{ steps.filter.outputs.payments }} dynamic-scheduler: ${{ steps.filter.outputs.dynamic-scheduler }} resource-usage-tracker: ${{ steps.filter.outputs.resource-usage-tracker }} @@ -222,12 +221,6 @@ jobs: - 'packages/**' - 'services/migration/**' - 'services/docker-compose*' - osparc-gateway-server: - - 'packages/**' - - 'services/osparc-gateway-server/**' - - 'services/docker-compose*' - - 'scripts/mypy/*' - - 'mypy.ini' payments: - 'packages/**' - 'services/payments/**' @@ -1161,64 +1154,6 @@ jobs: with: token: ${{ secrets.CODECOV_TOKEN }} - unit-test-osparc-gateway-server: - needs: changes - if: ${{ needs.changes.outputs.osparc-gateway-server == 'true' || github.event_name == 'push' }} - timeout-minutes: 18 # if this timeout gets too small, then split the tests - name: "[unit] osparc-gateway-server" - runs-on: ${{ matrix.os }} - strategy: - matrix: - python: ["3.11"] - os: [ubuntu-22.04] - fail-fast: false - steps: - - uses: actions/checkout@v4 - - name: setup docker buildx - id: buildx - uses: docker/setup-buildx-action@v3 - with: - driver: docker-container - - name: setup python environment - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python }} - - name: install uv - uses: astral-sh/setup-uv@v4 - with: - version: "0.4.x" - enable-cache: false - cache-dependency-glob: "**/osparc-gateway-server/requirements/ci.txt" - - name: show system version - run: ./ci/helpers/show_system_versions.bash - - name: install - run: | - make devenv - source .venv/bin/activate && \ - pushd services/osparc-gateway-server && \ - make install-ci - - name: typecheck - run: | - source .venv/bin/activate && \ - pushd services/osparc-gateway-server && \ - make mypy - - name: test - if: ${{ !cancelled() }} - run: | - source .venv/bin/activate && \ - pushd services/osparc-gateway-server && \ - make test-ci-unit - - uses: codecov/codecov-action@v5.0.7 - env: - CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} - with: - flags: unittests #optional - - name: Upload test results to Codecov - if: ${{ !cancelled() }} - uses: codecov/test-results-action@v1 - with: - token: ${{ secrets.CODECOV_TOKEN }} - unit-test-payments: needs: changes if: ${{ needs.changes.outputs.payments == 'true' || github.event_name == 'push' }} @@ -1965,7 +1900,6 @@ jobs: unit-test-models-library, unit-test-common-library, unit-test-notifications-library, - unit-test-osparc-gateway-server, unit-test-payments, unit-test-dynamic-scheduler, unit-test-postgres-database, @@ -2317,84 +2251,6 @@ jobs: with: flags: integrationtests #optional - integration-test-osparc-gateway-server: - needs: [changes, build-test-images] - if: ${{ needs.changes.outputs.anything-py == 'true' || needs.changes.outputs.osparc-gateway-server == 'true' || github.event_name == 'push' }} - timeout-minutes: 30 # if this timeout gets too small, then split the tests - name: "[int] osparc-gateway-server" - runs-on: ${{ matrix.os }} - strategy: - matrix: - python: ["3.11"] - os: [ubuntu-22.04] - fail-fast: false - steps: - - uses: actions/checkout@v4 - - name: setup docker buildx - id: buildx - uses: docker/setup-buildx-action@v3 - with: - driver: docker-container - - - name: setup python environment - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python }} - - name: expose github runtime for buildx - uses: crazy-max/ghaction-github-runtime@v3 - # FIXME: Workaround for https://github.com/actions/download-artifact/issues/249 - - name: download docker images with retry - uses: Wandalen/wretry.action@master - with: - action: actions/download-artifact@v4 - with: | - name: docker-buildx-images-${{ runner.os }}-${{ github.sha }} - path: /${{ runner.temp }}/build - attempt_limit: 5 - attempt_delay: 1000 - - name: load docker images - run: make load-images local-src=/${{ runner.temp }}/build - - name: install uv - uses: astral-sh/setup-uv@v4 - with: - version: "0.4.x" - enable-cache: false - cache-dependency-glob: "**/osparc-gateway-server/requirements/ci.txt" - - name: show system version - run: ./ci/helpers/show_system_versions.bash - - name: install - run: | - make devenv && \ - source .venv/bin/activate && \ - pushd services/osparc-gateway-server && \ - make install-ci - - name: integration-test - run: | - source .venv/bin/activate && \ - pushd services/osparc-gateway-server && \ - make test-ci-integration - - name: system-test - run: | - source .venv/bin/activate && \ - pushd services/osparc-gateway-server && \ - make test-system - - name: upload failed tests logs - if: ${{ !cancelled() }} - uses: actions/upload-artifact@v4 - with: - name: ${{ github.job }}_docker_logs - path: ./services/director-v2/test_failures - - name: cleanup - if: ${{ !cancelled() }} - run: | - pushd services/osparc-gateway-server && \ - make down - - uses: codecov/codecov-action@v5.0.7 - env: - CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} - with: - flags: integrationtests #optional - integration-test-simcore-sdk: needs: [changes, build-test-images] if: ${{ needs.changes.outputs.anything-py == 'true' || needs.changes.outputs.simcore-sdk == 'true' || github.event_name == 'push' }} @@ -2460,13 +2316,12 @@ jobs: integration-tests: # NOTE: this is a github required status check! - if: ${{ !cancelled() }} + if: ${{ always() }} needs: [ integration-test-director-v2-01, integration-test-director-v2-02, integration-test-dynamic-sidecar, - integration-test-osparc-gateway-server, integration-test-simcore-sdk, integration-test-webserver-01, integration-test-webserver-02, @@ -2792,7 +2647,7 @@ jobs: system-tests: # NOTE: this is a github required status check! - if: ${{ !cancelled() }} + if: ${{ always() }} needs: [ system-test-e2e, diff --git a/Makefile b/Makefile index 9dbdf84c9f4..564e353ee58 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,6 @@ SERVICES_NAMES_TO_BUILD := \ efs-guardian \ invitations \ migration \ - osparc-gateway-server \ payments \ resource-usage-tracker \ dynamic-scheduler \ @@ -323,6 +322,7 @@ printf "$$rows" "oSparc platform" "http://$(get_my_ip).nip.io:9081";\ printf "$$rows" "oSparc public API doc" "http://$(get_my_ip).nip.io:8006/dev/doc";\ printf "$$rows" "oSparc web API doc" "http://$(get_my_ip).nip.io:9081/dev/doc";\ printf "$$rows" "Dask Dashboard" "http://$(get_my_ip).nip.io:8787";\ +printf "$$rows" "Dy-scheduler Dashboard" "http://$(get_my_ip).nip.io:8012";\ printf "$$rows" "Docker Registry" "http://$${REGISTRY_URL}/v2/_catalog" $${REGISTRY_USER} $${REGISTRY_PW};\ printf "$$rows" "Invitations" "http://$(get_my_ip).nip.io:8008/dev/doc" $${INVITATIONS_USERNAME} $${INVITATIONS_PASSWORD};\ printf "$$rows" "Jaeger" "http://$(get_my_ip).nip.io:16686";\ diff --git a/api/specs/web-server/_cluster.py b/api/specs/web-server/_cluster.py deleted file mode 100644 index 0a33c049f3e..00000000000 --- a/api/specs/web-server/_cluster.py +++ /dev/null @@ -1,104 +0,0 @@ -from typing import Annotated - -from fastapi import APIRouter, Depends, status -from models_library.api_schemas_webserver.clusters import ( - ClusterCreate, - ClusterDetails, - ClusterGet, - ClusterPatch, - ClusterPathParams, - ClusterPing, -) -from models_library.generics import Envelope -from simcore_service_webserver._meta import API_VTAG - -router = APIRouter( - prefix=f"/{API_VTAG}", - tags=[ - "clusters", - ], -) - - -@router.get( - "/clusters", - response_model=Envelope[list[ClusterGet]], -) -def list_clusters(): - ... - - -@router.post( - "/clusters", - response_model=Envelope[ClusterGet], - status_code=status.HTTP_201_CREATED, -) -def create_cluster( - _insert: ClusterCreate, -): - ... - - -@router.post( - "/clusters:ping", - response_model=None, - status_code=status.HTTP_204_NO_CONTENT, -) -def ping_cluster( - _ping: ClusterPing, -): - """ - Test connectivity with cluster - """ - - -@router.get( - "/clusters/{cluster_id}", - response_model=Envelope[ClusterGet], -) -def get_cluster(_path_params: Annotated[ClusterPathParams, Depends()]): - ... - - -@router.patch( - "/clusters/{cluster_id}", - response_model=Envelope[ClusterGet], -) -def update_cluster( - _path_params: Annotated[ClusterPathParams, Depends()], _update: ClusterPatch -): - ... - - -@router.delete( - "/clusters/{cluster_id}", - response_model=None, - status_code=status.HTTP_204_NO_CONTENT, -) -def delete_cluster( - _path_params: Annotated[ClusterPathParams, Depends()], -): - ... - - -@router.get( - "/clusters/{cluster_id}/details", - response_model=Envelope[ClusterDetails], -) -def get_cluster_details( - _path_params: Annotated[ClusterPathParams, Depends()], -): - ... - - -@router.post( - "/clusters/{cluster_id}:ping", - response_model=None, - status_code=status.HTTP_204_NO_CONTENT, -) -def ping_cluster_cluster_id( - _path_params: Annotated[ClusterPathParams, Depends()], -): - """ - Tests connectivity with cluster - """ diff --git a/api/specs/web-server/_computations.py b/api/specs/web-server/_computations.py index 36600f1efac..e6c7572e885 100644 --- a/api/specs/web-server/_computations.py +++ b/api/specs/web-server/_computations.py @@ -1,12 +1,10 @@ from fastapi import APIRouter, status +from models_library.api_schemas_directorv2.comp_tasks import ComputationGet from models_library.api_schemas_webserver.computations import ComputationStart from models_library.generics import Envelope from models_library.projects import ProjectID from simcore_service_webserver._meta import API_VTAG -from simcore_service_webserver.director_v2._handlers import ( - ComputationTaskGet, - _ComputationStarted, -) +from simcore_service_webserver.director_v2._handlers import _ComputationStarted router = APIRouter( prefix=f"/{API_VTAG}", @@ -19,7 +17,7 @@ @router.get( "/computations/{project_id}", - response_model=Envelope[ComputationTaskGet], + response_model=Envelope[ComputationGet], ) async def get_computation(project_id: ProjectID): ... diff --git a/api/specs/web-server/_folders.py b/api/specs/web-server/_folders.py index 88a2b19ce9e..2aa77e485d4 100644 --- a/api/specs/web-server/_folders.py +++ b/api/specs/web-server/_folders.py @@ -25,6 +25,9 @@ FoldersListQueryParams, FoldersPathParams, ) +from simcore_service_webserver.folders._workspaces_handlers import ( + _FolderWorkspacesPathParams, +) router = APIRouter( prefix=f"/{API_VTAG}", @@ -97,3 +100,15 @@ async def delete_folder( _path: Annotated[FoldersPathParams, Depends()], ): ... + + +@router.post( + "/folders/{folder_id}/workspaces/{workspace_id}:move", + status_code=status.HTTP_204_NO_CONTENT, + summary="Move folder to the workspace", + tags=["workspaces"], +) +async def move_folder_to_workspace( + _path: Annotated[_FolderWorkspacesPathParams, Depends()], +): + ... diff --git a/api/specs/web-server/_projects_workspaces.py b/api/specs/web-server/_projects_workspaces.py index 533d3c72a9b..caaccfca05c 100644 --- a/api/specs/web-server/_projects_workspaces.py +++ b/api/specs/web-server/_projects_workspaces.py @@ -23,12 +23,12 @@ ) -@router.put( - "/projects/{project_id}/workspaces/{workspace_id}", +@router.post( + "/projects/{project_id}/workspaces/{workspace_id}:move", status_code=status.HTTP_204_NO_CONTENT, summary="Move project to the workspace", ) -async def replace_project_workspace( +async def move_project_to_workspace( _path: Annotated[_ProjectWorkspacesPathParams, Depends()], ): ... diff --git a/api/specs/web-server/openapi.py b/api/specs/web-server/openapi.py index c205153e506..8e6b562c96d 100644 --- a/api/specs/web-server/openapi.py +++ b/api/specs/web-server/openapi.py @@ -31,7 +31,6 @@ "_announcements", "_catalog", "_catalog_tags", # MUST BE after _catalog - "_cluster", "_computations", "_exporter", "_folders", diff --git a/packages/models-library/src/models_library/api_schemas_directorv2/clusters.py b/packages/models-library/src/models_library/api_schemas_directorv2/clusters.py index 35513ace551..41951a1d06d 100644 --- a/packages/models-library/src/models_library/api_schemas_directorv2/clusters.py +++ b/packages/models-library/src/models_library/api_schemas_directorv2/clusters.py @@ -1,32 +1,16 @@ -from typing import Annotated, Any, TypeAlias +from typing import Any, TypeAlias from pydantic import ( - AnyHttpUrl, BaseModel, - ConfigDict, Field, - HttpUrl, NonNegativeFloat, - ValidationInfo, field_validator, model_validator, ) from pydantic.networks import AnyUrl from pydantic.types import ByteSize, PositiveFloat -from ..clusters import ( - CLUSTER_ADMIN_RIGHTS, - CLUSTER_MANAGER_RIGHTS, - CLUSTER_USER_RIGHTS, - BaseCluster, - Cluster, - ClusterAccessRights, - ClusterAuthentication, - ClusterTypeInModel, - ExternalClusterAuthentication, -) from ..generics import DictModel -from ..users import GroupID class TaskCounts(BaseModel): @@ -89,132 +73,3 @@ class ClusterDetails(BaseModel): dashboard_link: AnyUrl = Field( ..., description="Link to this scheduler's dashboard" ) - - -class ClusterGet(Cluster): - access_rights: Annotated[ - dict[GroupID, ClusterAccessRights], - Field( - alias="accessRights", - default_factory=dict, - json_schema_extra={"default": {}}, - ), - ] - - model_config = ConfigDict( - extra="allow", - populate_by_name=True, - json_schema_extra={ - # NOTE: make openapi-specs fails because - # Cluster.model_config.json_schema_extra is raises `TypeError: unhashable type: 'ClusterAccessRights'` - }, - ) - - @model_validator(mode="before") - @classmethod - def ensure_access_rights_converted(cls, values): - if "access_rights" in values: - access_rights = values.pop("access_rights") - values["accessRights"] = access_rights - return values - - -class ClusterDetailsGet(ClusterDetails): - ... - - -class ClusterCreate(BaseCluster): - owner: GroupID | None = None # type: ignore[assignment] - authentication: ExternalClusterAuthentication = Field(discriminator="type") - access_rights: dict[GroupID, ClusterAccessRights] = Field( - alias="accessRights", default_factory=dict - ) - - model_config = ConfigDict( - json_schema_extra={ - "examples": [ - { - "name": "My awesome cluster", - "type": ClusterTypeInModel.ON_PREMISE, - "endpoint": "https://registry.osparc-development.fake.dev", - "authentication": { - "type": "simple", - "username": "someuser", - "password": "somepassword", - }, - }, - { - "name": "My AWS cluster", - "description": "a AWS cluster administered by me", - "type": ClusterTypeInModel.AWS, - "owner": 154, - "endpoint": "https://registry.osparc-development.fake.dev", - "authentication": { - "type": "simple", - "username": "someuser", - "password": "somepassword", - }, - "accessRights": { - 154: CLUSTER_ADMIN_RIGHTS.model_dump(), # type:ignore[dict-item] - 12: CLUSTER_MANAGER_RIGHTS.model_dump(), # type:ignore[dict-item] - 7899: CLUSTER_USER_RIGHTS.model_dump(), # type:ignore[dict-item] - }, - }, - ] - } - ) - - @field_validator("thumbnail", mode="before") - @classmethod - def set_default_thumbnail_if_empty(cls, v, info: ValidationInfo): - if v is None: - cluster_type = info.data["type"] - default_thumbnails = { - ClusterTypeInModel.AWS.value: "https://upload.wikimedia.org/wikipedia/commons/thumb/9/93/Amazon_Web_Services_Logo.svg/250px-Amazon_Web_Services_Logo.svg.png", - ClusterTypeInModel.ON_PREMISE.value: "https://upload.wikimedia.org/wikipedia/commons/thumb/a/ac/Crystal_Clear_app_network_local.png/120px-Crystal_Clear_app_network_local.png", - ClusterTypeInModel.ON_DEMAND.value: "https://upload.wikimedia.org/wikipedia/commons/thumb/9/93/Amazon_Web_Services_Logo.svg/250px-Amazon_Web_Services_Logo.svg.png", - } - return default_thumbnails[cluster_type] - return v - - -class ClusterPatch(BaseCluster): - name: str | None = None # type: ignore[assignment] - description: str | None = None - type: ClusterTypeInModel | None = None # type: ignore[assignment] - owner: GroupID | None = None # type: ignore[assignment] - thumbnail: HttpUrl | None = None - endpoint: AnyUrl | None = None # type: ignore[assignment] - authentication: ExternalClusterAuthentication | None = Field(None, discriminator="type") # type: ignore[assignment] - access_rights: dict[GroupID, ClusterAccessRights] | None = Field( # type: ignore[assignment] - default=None, alias="accessRights" - ) - - model_config = ConfigDict( - json_schema_extra={ - "examples": [ - { - "name": "Changing the name of my cluster", - }, - { - "description": "adding a better description", - }, - { - "accessRights": { - 154: CLUSTER_ADMIN_RIGHTS.model_dump(), # type:ignore[dict-item] - 12: CLUSTER_MANAGER_RIGHTS.model_dump(), # type:ignore[dict-item] - 7899: CLUSTER_USER_RIGHTS.model_dump(), # type:ignore[dict-item] - }, - }, - ] - } - ) - - -class ClusterPing(BaseModel): - endpoint: AnyHttpUrl - authentication: ClusterAuthentication = Field( - ..., - description="Dask gateway authentication", - discriminator="type", - ) diff --git a/packages/models-library/src/models_library/api_schemas_directorv2/comp_tasks.py b/packages/models-library/src/models_library/api_schemas_directorv2/comp_tasks.py index 0663cc37f78..9b548c64e72 100644 --- a/packages/models-library/src/models_library/api_schemas_directorv2/comp_tasks.py +++ b/packages/models-library/src/models_library/api_schemas_directorv2/comp_tasks.py @@ -1,6 +1,5 @@ from typing import Any, TypeAlias -from models_library.basic_types import IDStr from pydantic import ( AnyHttpUrl, AnyUrl, @@ -11,7 +10,7 @@ field_validator, ) -from ..clusters import ClusterID +from ..basic_types import IDStr from ..projects import ProjectID from ..projects_nodes_io import NodeID from ..projects_pipeline import ComputationTask @@ -54,14 +53,10 @@ class ComputationCreate(BaseModel): force_restart: bool | None = Field( default=False, description="if True will force re-running all dependent nodes" ) - cluster_id: ClusterID | None = Field( - default=None, - description="the computation shall use the cluster described by its id, 0 is the default cluster", - ) simcore_user_agent: str = "" use_on_demand_clusters: bool = Field( default=False, - description="if True, a cluster will be created as necessary (wallet_id cannot be None, and cluster_id must be None)", + description="if True, a cluster will be created as necessary (wallet_id cannot be None)", validate_default=True, ) wallet_info: WalletInfo | None = Field( @@ -79,14 +74,6 @@ def _ensure_product_name_defined_if_computation_starts( raise ValueError(msg) return v - @field_validator("use_on_demand_clusters") - @classmethod - def _ensure_expected_options(cls, v, info: ValidationInfo): - if v and info.data.get("cluster_id") is not None: - msg = "cluster_id cannot be set if use_on_demand_clusters is set" - raise ValueError(msg) - return v - class ComputationStop(BaseModel): user_id: UserID diff --git a/packages/models-library/src/models_library/api_schemas_webserver/clusters.py b/packages/models-library/src/models_library/api_schemas_webserver/clusters.py deleted file mode 100644 index 17232a8b482..00000000000 --- a/packages/models-library/src/models_library/api_schemas_webserver/clusters.py +++ /dev/null @@ -1,33 +0,0 @@ -from pydantic import BaseModel, ConfigDict - -from ..api_schemas_directorv2 import clusters as directorv2_clusters -from ..clusters import ClusterID -from ._base import InputSchema, OutputSchema - - -class ClusterPathParams(BaseModel): - cluster_id: ClusterID - model_config = ConfigDict( - populate_by_name=True, - extra="forbid", - ) - - -class ClusterGet(directorv2_clusters.ClusterGet): - model_config = OutputSchema.model_config - - -class ClusterCreate(directorv2_clusters.ClusterCreate): - model_config = InputSchema.model_config - - -class ClusterPatch(directorv2_clusters.ClusterPatch): - model_config = InputSchema.model_config - - -class ClusterPing(directorv2_clusters.ClusterPing): - model_config = InputSchema.model_config - - -class ClusterDetails(directorv2_clusters.ClusterDetails): - model_config = OutputSchema.model_config diff --git a/packages/models-library/src/models_library/api_schemas_webserver/computations.py b/packages/models-library/src/models_library/api_schemas_webserver/computations.py index 278cc747c51..c16426f5f8e 100644 --- a/packages/models-library/src/models_library/api_schemas_webserver/computations.py +++ b/packages/models-library/src/models_library/api_schemas_webserver/computations.py @@ -1,11 +1,8 @@ from pydantic import BaseModel -from ..clusters import ClusterID - class ComputationStart(BaseModel): force_restart: bool = False - cluster_id: ClusterID = 0 subgraph: set[str] = set() diff --git a/packages/models-library/src/models_library/api_schemas_webserver/projects.py b/packages/models-library/src/models_library/api_schemas_webserver/projects.py index 7c4116a136c..a918ece3b92 100644 --- a/packages/models-library/src/models_library/api_schemas_webserver/projects.py +++ b/packages/models-library/src/models_library/api_schemas_webserver/projects.py @@ -9,8 +9,16 @@ from typing import Annotated, Any, Literal, TypeAlias from models_library.folders import FolderID +from models_library.utils._original_fastapi_encoders import jsonable_encoder from models_library.workspaces import WorkspaceID -from pydantic import BeforeValidator, ConfigDict, Field, HttpUrl, field_validator +from pydantic import ( + BeforeValidator, + ConfigDict, + Field, + HttpUrl, + PlainSerializer, + field_validator, +) from ..api_schemas_long_running_tasks.tasks import TaskGet from ..basic_types import LongTruncatedStr, ShortTruncatedStr @@ -130,12 +138,22 @@ class ProjectPatch(InputSchema): name: ShortTruncatedStr | None = Field(default=None) description: LongTruncatedStr | None = Field(default=None) thumbnail: Annotated[ - HttpUrl | None, BeforeValidator(empty_str_to_none_pre_validator) + HttpUrl | None, + BeforeValidator(empty_str_to_none_pre_validator), + PlainSerializer(lambda x: str(x) if x is not None else None), ] = Field(default=None) access_rights: dict[GroupIDStr, AccessRights] | None = Field(default=None) classifiers: list[ClassifierID] | None = Field(default=None) dev: dict | None = Field(default=None) - ui: StudyUI | None = Field(default=None) + ui: Annotated[ + StudyUI | None, + BeforeValidator(empty_str_to_none_pre_validator), + PlainSerializer( + lambda obj: jsonable_encoder( + obj, exclude_unset=True, by_alias=False + ) # For the sake of backward compatibility + ), + ] = Field(default=None) quality: dict[str, Any] | None = Field(default=None) diff --git a/packages/models-library/src/models_library/clusters.py b/packages/models-library/src/models_library/clusters.py index 911b709a1f6..783f82df016 100644 --- a/packages/models-library/src/models_library/clusters.py +++ b/packages/models-library/src/models_library/clusters.py @@ -1,17 +1,8 @@ from enum import auto from pathlib import Path -from typing import Annotated, Final, Literal, Self, TypeAlias - -from pydantic import ( - AnyUrl, - BaseModel, - ConfigDict, - Field, - HttpUrl, - SecretStr, - field_validator, - model_validator, -) +from typing import Literal, TypeAlias + +from pydantic import AnyUrl, BaseModel, ConfigDict, Field, HttpUrl, field_validator from pydantic.types import NonNegativeInt from .users import GroupID @@ -27,78 +18,19 @@ class ClusterTypeInModel(StrAutoEnum): ON_DEMAND = auto() -class ClusterAccessRights(BaseModel): - read: bool = Field(..., description="allows to run pipelines on that cluster") - write: bool = Field(..., description="allows to modify the cluster") - delete: bool = Field(..., description="allows to delete a cluster") - - model_config = ConfigDict(extra="forbid") - - -CLUSTER_ADMIN_RIGHTS = ClusterAccessRights(read=True, write=True, delete=True) -CLUSTER_MANAGER_RIGHTS = ClusterAccessRights(read=True, write=True, delete=False) -CLUSTER_USER_RIGHTS = ClusterAccessRights(read=True, write=False, delete=False) -CLUSTER_NO_RIGHTS = ClusterAccessRights(read=False, write=False, delete=False) - - -class BaseAuthentication(BaseModel): +class _AuthenticationBase(BaseModel): type: str model_config = ConfigDict(frozen=True, extra="forbid") -class SimpleAuthentication(BaseAuthentication): - type: Literal["simple"] = "simple" - username: str - password: SecretStr - - model_config = ConfigDict( - json_schema_extra={ - "examples": [ - { - "type": "simple", - "username": "someuser", - "password": "somepassword", - }, - ] - } - ) - - -class KerberosAuthentication(BaseAuthentication): - type: Literal["kerberos"] = "kerberos" - - model_config = ConfigDict( - json_schema_extra={ - "examples": [ - { - "type": "kerberos", - }, - ] - } - ) - - -class JupyterHubTokenAuthentication(BaseAuthentication): - type: Literal["jupyterhub"] = "jupyterhub" - api_token: str - - model_config = ConfigDict( - json_schema_extra={ - "examples": [ - {"type": "jupyterhub", "api_token": "some_jupyterhub_token"}, - ] - } - ) - - -class NoAuthentication(BaseAuthentication): +class NoAuthentication(_AuthenticationBase): type: Literal["none"] = "none" model_config = ConfigDict(json_schema_extra={"examples": [{"type": "none"}]}) -class TLSAuthentication(BaseAuthentication): +class TLSAuthentication(_AuthenticationBase): type: Literal["tls"] = "tls" tls_ca_file: Path tls_client_cert: Path @@ -118,18 +50,11 @@ class TLSAuthentication(BaseAuthentication): ) -InternalClusterAuthentication: TypeAlias = NoAuthentication | TLSAuthentication -ExternalClusterAuthentication: TypeAlias = ( - SimpleAuthentication | KerberosAuthentication | JupyterHubTokenAuthentication -) -ClusterAuthentication: TypeAlias = ( - ExternalClusterAuthentication | InternalClusterAuthentication -) +ClusterAuthentication: TypeAlias = NoAuthentication | TLSAuthentication class BaseCluster(BaseModel): name: str = Field(..., description="The human readable name of the cluster") - description: str | None = None type: ClusterTypeInModel owner: GroupID thumbnail: HttpUrl | None = Field( @@ -142,104 +67,41 @@ class BaseCluster(BaseModel): authentication: ClusterAuthentication = Field( ..., description="Dask gateway authentication", discriminator="type" ) - access_rights: Annotated[ - dict[GroupID, ClusterAccessRights], Field(default_factory=dict) - ] - _from_equivalent_enums = field_validator("type", mode="before")( create_enums_pre_validator(ClusterTypeInModel) ) - model_config = ConfigDict(extra="forbid", use_enum_values=True) - - -ClusterID: TypeAlias = NonNegativeInt -DEFAULT_CLUSTER_ID: Final[ClusterID] = 0 - - -class Cluster(BaseCluster): - id: ClusterID = Field(..., description="The cluster ID") - model_config = ConfigDict( - extra="allow", + use_enum_values=True, json_schema_extra={ "examples": [ { - "id": DEFAULT_CLUSTER_ID, - "name": "The default cluster", - "type": ClusterTypeInModel.ON_PREMISE, - "owner": 1456, - "endpoint": "tcp://default-dask-scheduler:8786", - "authentication": { - "type": "simple", - "username": "someuser", - "password": "somepassword", - }, - }, - { - "id": 432, "name": "My awesome cluster", "type": ClusterTypeInModel.ON_PREMISE, "owner": 12, "endpoint": "https://registry.osparc-development.fake.dev", "authentication": { - "type": "simple", - "username": "someuser", - "password": "somepassword", + "type": "tls", + "tls_ca_file": "/path/to/ca_file", + "tls_client_cert": "/path/to/cert_file", + "tls_client_key": "/path/to/key_file", }, }, { - "id": 432546, "name": "My AWS cluster", - "description": "a AWS cluster administered by me", "type": ClusterTypeInModel.AWS, "owner": 154, "endpoint": "https://registry.osparc-development.fake.dev", - "authentication": {"type": "kerberos"}, - "access_rights": { - 154: CLUSTER_ADMIN_RIGHTS, # type: ignore[dict-item] - 12: CLUSTER_MANAGER_RIGHTS, # type: ignore[dict-item] - 7899: CLUSTER_USER_RIGHTS, # type: ignore[dict-item] - }, - }, - { - "id": 325436, - "name": "My AWS cluster", - "description": "a AWS cluster administered by me", - "type": ClusterTypeInModel.AWS, - "owner": 2321, - "endpoint": "https://registry.osparc-development.fake2.dev", "authentication": { - "type": "jupyterhub", - "api_token": "some_fake_token", - }, - "access_rights": { - 154: CLUSTER_ADMIN_RIGHTS, # type: ignore[dict-item] - 12: CLUSTER_MANAGER_RIGHTS, # type: ignore[dict-item] - 7899: CLUSTER_USER_RIGHTS, # type: ignore[dict-item] + "type": "tls", + "tls_ca_file": "/path/to/ca_file", + "tls_client_cert": "/path/to/cert_file", + "tls_client_key": "/path/to/key_file", }, }, ] }, ) - @model_validator(mode="after") - def check_owner_has_access_rights(self: Self) -> Self: - is_default_cluster = bool(self.id == DEFAULT_CLUSTER_ID) - owner_gid = self.owner - - # check owner is in the access rights, if not add it - access_rights = self.access_rights.copy() - if owner_gid not in access_rights: - access_rights[owner_gid] = ( - CLUSTER_USER_RIGHTS if is_default_cluster else CLUSTER_ADMIN_RIGHTS - ) - # check owner has the expected access - if access_rights[owner_gid] != ( - CLUSTER_USER_RIGHTS if is_default_cluster else CLUSTER_ADMIN_RIGHTS - ): - msg = f"the cluster owner access rights are incorrectly set: {access_rights[owner_gid]}" - raise ValueError(msg) - # NOTE: overcomes frozen configuration (far fetched in ClusterGet model of webserver) - object.__setattr__(self, "access_rights", access_rights) - return self + +ClusterID: TypeAlias = NonNegativeInt diff --git a/packages/models-library/src/models_library/projects_pipeline.py b/packages/models-library/src/models_library/projects_pipeline.py index 975d4726b4e..40b47aa46eb 100644 --- a/packages/models-library/src/models_library/projects_pipeline.py +++ b/packages/models-library/src/models_library/projects_pipeline.py @@ -4,7 +4,6 @@ import arrow from pydantic import BaseModel, ConfigDict, Field, PositiveInt -from .clusters import ClusterID from .projects_nodes import NodeState from .projects_nodes_io import NodeID from .projects_state import RunningState @@ -40,10 +39,6 @@ class ComputationTask(BaseModel): ..., description="the iteration id of the computation task (none if no task ran yet)", ) - cluster_id: ClusterID | None = Field( - ..., - description="the cluster on which the computaional task runs/ran (none if no task ran yet)", - ) started: datetime.datetime | None = Field( ..., description="the timestamp when the computation was started or None if not started yet", @@ -87,7 +82,6 @@ class ComputationTask(BaseModel): "progress": 0.0, }, "iteration": None, - "cluster_id": None, "started": arrow.utcnow().shift(minutes=-50).datetime, # type: ignore[dict-item] "stopped": None, "submitted": arrow.utcnow().shift(hours=-1).datetime, # type: ignore[dict-item] @@ -119,7 +113,6 @@ class ComputationTask(BaseModel): "progress": 1.0, }, "iteration": 2, - "cluster_id": 0, "started": arrow.utcnow().shift(minutes=-50).datetime, # type: ignore[dict-item] "stopped": arrow.utcnow().shift(minutes=-20).datetime, # type: ignore[dict-item] "submitted": arrow.utcnow().shift(hours=-1).datetime, # type: ignore[dict-item] diff --git a/packages/models-library/tests/test_clusters.py b/packages/models-library/tests/test_clusters.py deleted file mode 100644 index 258bdc006f4..00000000000 --- a/packages/models-library/tests/test_clusters.py +++ /dev/null @@ -1,90 +0,0 @@ -from copy import deepcopy -from typing import Any - -import pytest -from faker import Faker -from models_library.clusters import ( - CLUSTER_ADMIN_RIGHTS, - CLUSTER_MANAGER_RIGHTS, - CLUSTER_USER_RIGHTS, - DEFAULT_CLUSTER_ID, - Cluster, -) -from pydantic import BaseModel, ValidationError - - -@pytest.mark.parametrize( - "model_cls", - [ - Cluster, - ], -) -def test_cluster_access_rights_correctly_created_when_owner_access_rights_not_present( - model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] -): - for example in model_cls_examples.values(): - modified_example = deepcopy(example) - owner_gid = modified_example["owner"] - # remove the owner from the access rights if any - modified_example.get("access_rights", {}).pop(owner_gid, None) - - instance = model_cls(**modified_example) - if instance.id != DEFAULT_CLUSTER_ID: - assert instance.access_rights[owner_gid] == CLUSTER_ADMIN_RIGHTS # type: ignore - else: - assert instance.access_rights[owner_gid] == CLUSTER_USER_RIGHTS # type: ignore - - -@pytest.mark.parametrize( - "model_cls", - [ - Cluster, - ], -) -def test_cluster_fails_when_owner_has_no_admin_rights_unless_default_cluster( - model_cls: type[BaseModel], - model_cls_examples: dict[str, dict[str, Any]], - faker: Faker, -): - for example in model_cls_examples.values(): - modified_example = deepcopy(example) - modified_example["id"] = faker.pyint(min_value=1) - owner_gid = modified_example["owner"] - # ensure there are access rights - modified_example.setdefault("access_rights", {}) - # set the owner with manager rights - modified_example["access_rights"][owner_gid] = CLUSTER_MANAGER_RIGHTS - with pytest.raises(ValidationError): - model_cls(**modified_example) - - # set the owner with user rights - modified_example["access_rights"][owner_gid] = CLUSTER_USER_RIGHTS - with pytest.raises(ValidationError): - model_cls(**modified_example) - - -@pytest.mark.parametrize( - "model_cls", - [ - Cluster, - ], -) -def test_cluster_fails_when_owner_has_no_user_rights_if_default_cluster( - model_cls: type[BaseModel], - model_cls_examples: dict[str, dict[str, Any]], -): - for example in model_cls_examples.values(): - modified_example = deepcopy(example) - modified_example["id"] = DEFAULT_CLUSTER_ID - owner_gid = modified_example["owner"] - # ensure there are access rights - modified_example.setdefault("access_rights", {}) - # set the owner with manager rights - modified_example["access_rights"][owner_gid] = CLUSTER_MANAGER_RIGHTS - with pytest.raises(ValidationError): - model_cls(**modified_example) - - # set the owner with user rights - modified_example["access_rights"][owner_gid] = CLUSTER_ADMIN_RIGHTS - with pytest.raises(ValidationError): - model_cls(**modified_example) diff --git a/packages/models-library/tests/test_service_settings_labels.py b/packages/models-library/tests/test_service_settings_labels.py index 0c582905d22..775facf96a4 100644 --- a/packages/models-library/tests/test_service_settings_labels.py +++ b/packages/models-library/tests/test_service_settings_labels.py @@ -8,8 +8,8 @@ from pprint import pformat from typing import Any, Final, NamedTuple -import pydantic_core import pytest +from common_library.json_serialization import json_dumps from models_library.basic_types import PortInt from models_library.osparc_variable_identifier import ( OsparcVariableIdentifier, @@ -558,11 +558,6 @@ def test_can_parse_labels_with_osparc_identifiers( assert "$" not in service_meta_str -def servicelib__json_serialization__json_dumps(obj: Any, **kwargs): - # Analogous to 'models_library.utils.json_serialization.json_dumps' - return json.dumps(obj, default=pydantic_core.to_jsonable_python, **kwargs) - - def test_resolving_some_service_labels_at_load_time( vendor_environments: dict[str, Any], service_labels: dict[str, str] ): @@ -579,9 +574,7 @@ def test_resolving_some_service_labels_at_load_time( ("settings", SimcoreServiceSettingsLabel), ): to_serialize = getattr(service_meta, attribute_name) - template = TextTemplate( - servicelib__json_serialization__json_dumps(to_serialize) - ) + template = TextTemplate(json_dumps(to_serialize)) assert template.is_valid() resolved_label: str = template.safe_substitute(vendor_environments) to_restore = TypeAdapter(pydantic_model).validate_json(resolved_label) diff --git a/packages/models-library/tests/test_utils_fastapi_encoders.py b/packages/models-library/tests/test_utils_fastapi_encoders.py index 6ee05a56e57..ecd046af24e 100644 --- a/packages/models-library/tests/test_utils_fastapi_encoders.py +++ b/packages/models-library/tests/test_utils_fastapi_encoders.py @@ -4,36 +4,25 @@ # pylint: disable=too-many-arguments import json -from typing import Any from uuid import uuid4 -import pytest +from common_library.json_serialization import json_dumps from faker import Faker from models_library.utils.fastapi_encoders import servicelib_jsonable_encoder -from pydantic.json import pydantic_encoder - - -def servicelib__json_serialization__json_dumps(obj: Any, **kwargs): - # Analogous to 'models_library.utils.json_serialization.json_dumps' - return json.dumps(obj, default=pydantic_encoder, **kwargs) def test_using_uuids_as_keys(faker: Faker): uuid_key = uuid4() - with pytest.raises(TypeError): - # IMPORTANT NOTE: we cannot serialize UUID objects as keys. - # We have to convert them to strings but then the class information is lost upon deserialization i.e. it is not reversable! - # NOTE: This could potentially be solved using 'orjson' !! - # - servicelib__json_serialization__json_dumps({uuid_key: "value"}, indent=1) + # this was previously failing + assert json_dumps({uuid_key: "value"}, indent=1) - # use encoder + # uuid keys now serialize without raising to the expected format string data = servicelib_jsonable_encoder({uuid_key: "value"}) assert data == {f"{uuid_key}": "value"} # serialize w/o raising - dumped_data = servicelib__json_serialization__json_dumps(data, indent=1) + dumped_data = json_dumps(data, indent=1) # deserialize w/o raising loaded_data = json.loads(dumped_data) diff --git a/packages/pytest-simcore/src/pytest_simcore/dask_gateway.py b/packages/pytest-simcore/src/pytest_simcore/dask_gateway.py deleted file mode 100644 index 3f89a7ac66f..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/dask_gateway.py +++ /dev/null @@ -1,119 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=redefined-outer-name - -from collections.abc import Callable -from typing import AsyncIterator, NamedTuple - -import pytest -import traitlets.config -from dask_gateway import Gateway, GatewayCluster, auth -from dask_gateway_server.app import DaskGateway -from dask_gateway_server.backends.local import UnsafeLocalBackend -from distributed import Client -from faker import Faker - - -@pytest.fixture -def local_dask_gateway_server_config( - unused_tcp_port_factory: Callable, -) -> traitlets.config.Config: - c = traitlets.config.Config() - assert isinstance(c.DaskGateway, traitlets.config.Config) - assert isinstance(c.ClusterConfig, traitlets.config.Config) - assert isinstance(c.Proxy, traitlets.config.Config) - assert isinstance(c.SimpleAuthenticator, traitlets.config.Config) - c.DaskGateway.backend_class = UnsafeLocalBackend - c.DaskGateway.address = f"127.0.0.1:{unused_tcp_port_factory()}" - c.Proxy.address = f"127.0.0.1:{unused_tcp_port_factory()}" - c.DaskGateway.authenticator_class = "dask_gateway_server.auth.SimpleAuthenticator" - c.SimpleAuthenticator.password = "qweqwe" # noqa: S105 - c.ClusterConfig.worker_cmd = [ - "dask-worker", - "--resources", - f"CPU=12,GPU=1,RAM={16e9}", - ] - # NOTE: This must be set such that the local unsafe backend creates a worker with enough cores/memory - c.ClusterConfig.worker_cores = 12 - c.ClusterConfig.worker_memory = "16G" - c.ClusterConfig.cluster_max_workers = 3 - - c.DaskGateway.log_level = "DEBUG" - return c - - -class DaskGatewayServer(NamedTuple): - address: str - proxy_address: str - password: str - server: DaskGateway - - -@pytest.fixture -async def local_dask_gateway_server( - local_dask_gateway_server_config: traitlets.config.Config, -) -> AsyncIterator[DaskGatewayServer]: - print("--> creating local dask gateway server") - dask_gateway_server = DaskGateway(config=local_dask_gateway_server_config) - dask_gateway_server.initialize([]) # that is a shitty one! - print("--> local dask gateway server initialized") - await dask_gateway_server.setup() - await dask_gateway_server.backend.proxy._proxy_contacted # pylint: disable=protected-access - - print("--> local dask gateway server setup completed") - yield DaskGatewayServer( - f"http://{dask_gateway_server.backend.proxy.address}", - f"gateway://{dask_gateway_server.backend.proxy.tcp_address}", - local_dask_gateway_server_config.SimpleAuthenticator.password, # type: ignore - dask_gateway_server, - ) - print("--> local dask gateway server switching off...") - await dask_gateway_server.cleanup() - print("...done") - - -@pytest.fixture -def gateway_username(faker: Faker) -> str: - return faker.user_name() - - -@pytest.fixture -def gateway_auth( - local_dask_gateway_server: DaskGatewayServer, gateway_username: str -) -> auth.BasicAuth: - return auth.BasicAuth(gateway_username, local_dask_gateway_server.password) - - -@pytest.fixture -async def dask_gateway( - local_dask_gateway_server: DaskGatewayServer, gateway_auth: auth.BasicAuth -) -> Gateway: - async with Gateway( - local_dask_gateway_server.address, - local_dask_gateway_server.proxy_address, - asynchronous=True, - auth=gateway_auth, - ) as gateway: - print( - f"--> {gateway=} created, with {gateway_auth.username=}/{gateway_auth.password=}" - ) - cluster_options = await gateway.cluster_options() - gateway_versions = await gateway.get_versions() - clusters_list = await gateway.list_clusters() - print(f"--> {gateway_versions=}, {cluster_options=}, {clusters_list=}") - for option in cluster_options.items(): - print(f"--> {option=}") - return gateway - - -@pytest.fixture -async def dask_gateway_cluster(dask_gateway: Gateway) -> AsyncIterator[GatewayCluster]: - async with dask_gateway.new_cluster() as cluster: - yield cluster - - -@pytest.fixture -async def dask_gateway_cluster_client( - dask_gateway_cluster: GatewayCluster, -) -> AsyncIterator[Client]: - async with dask_gateway_cluster.get_client() as client: - yield client diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_projects.py b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_projects.py index 55065daaf76..092ab82d655 100644 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_projects.py +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_projects.py @@ -95,7 +95,7 @@ async def create_project( for group_id, permissions in _access_rights.items(): await update_or_insert_project_group( app, - new_project["uuid"], + project_id=new_project["uuid"], group_id=int(group_id), read=permissions["read"], write=permissions["write"], diff --git a/packages/pytest-simcore/src/pytest_simcore/services_api_mocks_for_aiohttp_clients.py b/packages/pytest-simcore/src/pytest_simcore/services_api_mocks_for_aiohttp_clients.py index aea927de4d6..5b85a036d79 100644 --- a/packages/pytest-simcore/src/pytest_simcore/services_api_mocks_for_aiohttp_clients.py +++ b/packages/pytest-simcore/src/pytest_simcore/services_api_mocks_for_aiohttp_clients.py @@ -2,8 +2,6 @@ # pylint: disable=unused-argument # pylint: disable=unused-variable -import json -import random import re from pathlib import Path from typing import Any @@ -13,6 +11,7 @@ from aioresponses import aioresponses as AioResponsesMock from aioresponses.core import CallbackResult from faker import Faker +from models_library.api_schemas_directorv2.comp_tasks import ComputationGet from models_library.api_schemas_storage import ( FileMetaDataGet, FileUploadCompleteFutureResponse, @@ -23,7 +22,6 @@ LinkType, PresignedLink, ) -from models_library.clusters import Cluster from models_library.generics import Envelope from models_library.projects_pipeline import ComputationTask from models_library.projects_state import RunningState @@ -81,7 +79,7 @@ def create_computation_cb(url, **kwargs) -> CallbackResult: assert param in body, f"{param} is missing from body: {body}" state = ( RunningState.PUBLISHED - if "start_pipeline" in body and body["start_pipeline"] + if body.get("start_pipeline") else RunningState.NOT_STARTED ) pipeline: dict[str, list[str]] = FULL_PROJECT_PIPELINE_ADJACENCY @@ -131,8 +129,13 @@ def get_computation_cb(url, **kwargs) -> CallbackResult: state = RunningState.NOT_STARTED pipeline: dict[str, list[str]] = FULL_PROJECT_PIPELINE_ADJACENCY node_states = FULL_PROJECT_NODE_STATES - returned_computation = ComputationTask.model_validate( - ComputationTask.model_config["json_schema_extra"]["examples"][0] + assert "json_schema_extra" in ComputationGet.model_config + assert isinstance(ComputationGet.model_config["json_schema_extra"], dict) + assert isinstance( + ComputationGet.model_config["json_schema_extra"]["examples"], list + ) + returned_computation = ComputationGet.model_validate( + ComputationGet.model_config["json_schema_extra"]["examples"][0] ).model_copy( update={ "id": Path(url.path).name, @@ -151,85 +154,6 @@ def get_computation_cb(url, **kwargs) -> CallbackResult: ) -def create_cluster_cb(url, **kwargs) -> CallbackResult: - assert "json" in kwargs, f"missing body in call to {url}" - assert url.query.get("user_id") - random_cluster = Cluster.model_validate( - random.choice(Cluster.model_config["json_schema_extra"]["examples"]) - ) - return CallbackResult( - status=201, payload=json.loads(random_cluster.model_dump_json(by_alias=True)) - ) - - -def list_clusters_cb(url, **kwargs) -> CallbackResult: - assert url.query.get("user_id") - return CallbackResult( - status=200, - body=json.dumps( - [ - json.loads( - Cluster.model_validate( - random.choice( - Cluster.model_config["json_schema_extra"]["examples"] - ) - ).model_dump_json(by_alias=True) - ) - for _ in range(3) - ] - ), - ) - - -def get_cluster_cb(url, **kwargs) -> CallbackResult: - assert url.query.get("user_id") - cluster_id = url.path.split("/")[-1] - return CallbackResult( - status=200, - payload=json.loads( - Cluster.model_validate( - { - **random.choice( - Cluster.model_config["json_schema_extra"]["examples"] - ), - **{"id": cluster_id}, - } - ).model_dump_json(by_alias=True) - ), - ) - - -def get_cluster_details_cb(url, **kwargs) -> CallbackResult: - assert url.query.get("user_id") - cluster_id = url.path.split("/")[-1] - assert cluster_id - return CallbackResult( - status=200, - payload={ - "scheduler": {"status": "RUNNING"}, - "dashboard_link": "https://dashboard.link.com", - }, - ) - - -def patch_cluster_cb(url, **kwargs) -> CallbackResult: - assert url.query.get("user_id") - cluster_id = url.path.split("/")[-1] - return CallbackResult( - status=200, - payload=json.loads( - Cluster.model_validate( - { - **random.choice( - Cluster.model_config["json_schema_extra"]["examples"] - ), - **{"id": cluster_id}, - } - ).model_dump_json(by_alias=True) - ), - ) - - @pytest.fixture async def director_v2_service_mock( aioresponses_mocker: AioResponsesMock, @@ -280,73 +204,6 @@ async def director_v2_service_mock( aioresponses_mocker.delete(delete_computation_pattern, status=204, repeat=True) aioresponses_mocker.patch(projects_networks_pattern, status=204, repeat=True) - # clusters - aioresponses_mocker.post( - re.compile( - r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters\?(\w+(?:=\w+)?\&?){1,}$" - ), - callback=create_cluster_cb, - status=status.HTTP_201_CREATED, - repeat=True, - ) - - aioresponses_mocker.get( - re.compile( - r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters\?(\w+(?:=\w+)?\&?){1,}$" - ), - callback=list_clusters_cb, - status=status.HTTP_201_CREATED, - repeat=True, - ) - - aioresponses_mocker.get( - re.compile( - r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)\?(\w+(?:=\w+)?\&?){1,}$" - ), - callback=get_cluster_cb, - status=status.HTTP_201_CREATED, - repeat=True, - ) - - aioresponses_mocker.get( - re.compile( - r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters/[0-9]+/details\?(\w+(?:=\w+)?\&?){1,}$" - ), - callback=get_cluster_details_cb, - status=status.HTTP_201_CREATED, - repeat=True, - ) - - aioresponses_mocker.patch( - re.compile( - r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)\?(\w+(?:=\w+)?\&?){1,}$" - ), - callback=patch_cluster_cb, - status=status.HTTP_201_CREATED, - repeat=True, - ) - aioresponses_mocker.delete( - re.compile( - r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)\?(\w+(?:=\w+)?\&?){1,}$" - ), - status=status.HTTP_204_NO_CONTENT, - repeat=True, - ) - - aioresponses_mocker.post( - re.compile(r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters:ping$"), - status=status.HTTP_204_NO_CONTENT, - repeat=True, - ) - - aioresponses_mocker.post( - re.compile( - r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+):ping\?(\w+(?:=\w+)?\&?){1,}$" - ), - status=status.HTTP_204_NO_CONTENT, - repeat=True, - ) - return aioresponses_mocker diff --git a/packages/pytest-simcore/src/pytest_simcore/simcore_dask_service.py b/packages/pytest-simcore/src/pytest_simcore/simcore_dask_service.py index c2900bf3e4f..507bb602e06 100644 --- a/packages/pytest-simcore/src/pytest_simcore/simcore_dask_service.py +++ b/packages/pytest-simcore/src/pytest_simcore/simcore_dask_service.py @@ -9,7 +9,7 @@ import distributed import pytest from distributed import Client -from models_library.clusters import InternalClusterAuthentication, TLSAuthentication +from models_library.clusters import ClusterAuthentication, TLSAuthentication from pydantic import AnyUrl from .helpers.docker import get_service_published_port @@ -72,7 +72,7 @@ def dask_backend_tls_certificates( @pytest.fixture def dask_scheduler_auth( dask_backend_tls_certificates: _TLSCertificates, -) -> InternalClusterAuthentication: +) -> ClusterAuthentication: return TLSAuthentication( tls_ca_file=dask_backend_tls_certificates.tls_ca_file, tls_client_cert=dask_backend_tls_certificates.tls_cert_file, diff --git a/services/api-server/openapi.json b/services/api-server/openapi.json index 5b23b44603d..20210a69dba 100644 --- a/services/api-server/openapi.json +++ b/services/api-server/openapi.json @@ -2598,7 +2598,7 @@ "solvers" ], "summary": "Start Job", - "description": "Starts job job_id created with the solver solver_key:version\n\nAdded in *version 0.4.3*: query parameter `cluster_id`\nAdded in *version 0.6*: responds with a 202 when successfully starting a computation", + "description": "Starts job job_id created with the solver solver_key:version\n\nAdded in *version 0.4.3*: query parameter `cluster_id`\nAdded in *version 0.6*: responds with a 202 when successfully starting a computation\nChanged in *version 0.8*: query parameter `cluster_id` deprecated", "operationId": "start_job", "security": [ { @@ -2651,7 +2651,8 @@ } ], "title": "Cluster Id" - } + }, + "deprecated": true } ], "responses": { @@ -4585,7 +4586,7 @@ "studies" ], "summary": "Start Study Job", - "description": "Changed in *version 0.6.0*: Now responds with a 202 when successfully starting a computation", + "description": "Changed in *version 0.6.0*: Now responds with a 202 when successfully starting a computation\nChanged in *version 0.8*: query parameter `cluster_id` deprecated", "operationId": "start_study_job", "security": [ { @@ -4628,7 +4629,8 @@ } ], "title": "Cluster Id" - } + }, + "deprecated": true } ], "responses": { @@ -6891,7 +6893,7 @@ "type": "integer", "x_unit": "second" }, - "key": "input_2", + "key": "f763658f-a89a-4a90-ace4-c44631290f12", "kind": "input" } }, @@ -7099,7 +7101,9 @@ "required": [ "walletId", "name", + "description", "owner", + "thumbnail", "status", "created", "modified", diff --git a/services/api-server/src/simcore_service_api_server/api/routes/solvers_jobs.py b/services/api-server/src/simcore_service_api_server/api/routes/solvers_jobs.py index 151a79c6871..af1c80c70ac 100644 --- a/services/api-server/src/simcore_service_api_server/api/routes/solvers_jobs.py +++ b/services/api-server/src/simcore_service_api_server/api/routes/solvers_jobs.py @@ -12,10 +12,8 @@ from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID from pydantic.types import PositiveInt -from simcore_service_api_server.exceptions.backend_errors import ( - ProjectAlreadyStartedError, -) +from ...exceptions.backend_errors import ProjectAlreadyStartedError from ...exceptions.service_errors_utils import DEFAULT_BACKEND_SERVICE_STATUS_CODES from ...models.basic_types import VersionStr from ...models.schemas.errors import ErrorGet @@ -40,7 +38,11 @@ from ..dependencies.authentication import get_current_user_id, get_product_name from ..dependencies.services import get_api_client from ..dependencies.webserver import AuthSession, get_webserver_session -from ._constants import FMSG_CHANGELOG_ADDED_IN_VERSION, FMSG_CHANGELOG_NEW_IN_VERSION +from ._constants import ( + FMSG_CHANGELOG_ADDED_IN_VERSION, + FMSG_CHANGELOG_CHANGED_IN_VERSION, + FMSG_CHANGELOG_NEW_IN_VERSION, +) _logger = logging.getLogger(__name__) @@ -182,6 +184,9 @@ async def delete_job( + FMSG_CHANGELOG_ADDED_IN_VERSION.format("0.4.3", "query parameter `cluster_id`") + FMSG_CHANGELOG_ADDED_IN_VERSION.format( "0.6", "responds with a 202 when successfully starting a computation" + ) + + FMSG_CHANGELOG_CHANGED_IN_VERSION.format( + "0.8", "query parameter `cluster_id` deprecated" ), ) async def start_job( @@ -192,7 +197,9 @@ async def start_job( user_id: Annotated[PositiveInt, Depends(get_current_user_id)], director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))], webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], - cluster_id: ClusterID | None = None, + cluster_id: Annotated[ # pylint: disable=unused-argument # noqa: ARG001 + ClusterID | None, Query(deprecated=True) + ] = None, ): job_name = _compose_job_resource_name(solver_key, version, job_id) _logger.debug("Start Job '%s'", job_name) @@ -203,7 +210,6 @@ async def start_job( job_id=job_id, expected_job_name=job_name, webserver_api=webserver_api, - cluster_id=cluster_id, ) except ProjectAlreadyStartedError: job_status = await inspect_job( diff --git a/services/api-server/src/simcore_service_api_server/api/routes/studies_jobs.py b/services/api-server/src/simcore_service_api_server/api/routes/studies_jobs.py index 177b50d1e6c..8d23def5c0b 100644 --- a/services/api-server/src/simcore_service_api_server/api/routes/studies_jobs.py +++ b/services/api-server/src/simcore_service_api_server/api/routes/studies_jobs.py @@ -15,14 +15,11 @@ from models_library.projects_nodes_io import NodeID from pydantic import PositiveInt from servicelib.logging_utils import log_context -from simcore_service_api_server.api.routes.solvers_jobs import JOBS_STATUS_CODES -from simcore_service_api_server.exceptions.backend_errors import ( - ProjectAlreadyStartedError, -) from ...api.dependencies.authentication import get_current_user_id from ...api.dependencies.services import get_api_client from ...api.dependencies.webserver import get_webserver_session +from ...exceptions.backend_errors import ProjectAlreadyStartedError from ...models.pagination import Page, PaginationParams from ...models.schemas.errors import ErrorGet from ...models.schemas.jobs import ( @@ -53,6 +50,7 @@ from ..dependencies.application import get_reverse_url_mapper from ._common import API_SERVER_DEV_FEATURES_ENABLED from ._constants import FMSG_CHANGELOG_CHANGED_IN_VERSION, FMSG_CHANGELOG_NEW_IN_VERSION +from .solvers_jobs import JOBS_STATUS_CODES _logger = logging.getLogger(__name__) router = APIRouter() @@ -210,6 +208,9 @@ async def delete_study_job( }, description=FMSG_CHANGELOG_CHANGED_IN_VERSION.format( "0.6.0", "Now responds with a 202 when successfully starting a computation" + ) + + FMSG_CHANGELOG_CHANGED_IN_VERSION.format( + "0.8", "query parameter `cluster_id` deprecated" ), ) async def start_study_job( @@ -219,7 +220,9 @@ async def start_study_job( user_id: Annotated[PositiveInt, Depends(get_current_user_id)], webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))], - cluster_id: ClusterID | None = None, + cluster_id: Annotated[ # pylint: disable=unused-argument # noqa: ARG001 + ClusterID | None, Query(deprecated=True) + ] = None, ): job_name = _compose_job_resource_name(study_id, job_id) with log_context(_logger, logging.DEBUG, f"Starting Job '{job_name}'"): @@ -229,7 +232,6 @@ async def start_study_job( job_id=job_id, expected_job_name=job_name, webserver_api=webserver_api, - cluster_id=cluster_id, ) except ProjectAlreadyStartedError: job_status: JobStatus = await inspect_study_job( diff --git a/services/api-server/src/simcore_service_api_server/services/director_v2.py b/services/api-server/src/simcore_service_api_server/services/director_v2.py index e225a8adef7..aaa946f10d4 100644 --- a/services/api-server/src/simcore_service_api_server/services/director_v2.py +++ b/services/api-server/src/simcore_service_api_server/services/director_v2.py @@ -3,7 +3,6 @@ from uuid import UUID from fastapi import FastAPI -from models_library.clusters import ClusterID from models_library.projects_nodes_io import NodeID from models_library.projects_pipeline import ComputationTask from models_library.projects_state import RunningState @@ -102,9 +101,7 @@ async def start_computation( user_id: PositiveInt, product_name: str, groups_extra_properties_repository: GroupsExtraPropertiesRepository, - cluster_id: ClusterID | None = None, ) -> ComputationTaskGet: - extras = {} use_on_demand_clusters = ( await groups_extra_properties_repository.use_on_demand_clusters( @@ -112,9 +109,6 @@ async def start_computation( ) ) - if cluster_id is not None and not use_on_demand_clusters: - extras["cluster_id"] = cluster_id - response = await self.client.post( "/v2/computations", json={ @@ -123,7 +117,6 @@ async def start_computation( "start_pipeline": True, "product_name": product_name, "use_on_demand_clusters": use_on_demand_clusters, - **extras, }, ) response.raise_for_status() diff --git a/services/api-server/src/simcore_service_api_server/services/jobs.py b/services/api-server/src/simcore_service_api_server/services/jobs.py index 7bc46d5ed1e..277f9625f17 100644 --- a/services/api-server/src/simcore_service_api_server/services/jobs.py +++ b/services/api-server/src/simcore_service_api_server/services/jobs.py @@ -4,7 +4,6 @@ from fastapi import Depends, HTTPException, Request, status from models_library.api_schemas_webserver.projects import ProjectGet -from models_library.clusters import ClusterID from pydantic import HttpUrl, PositiveInt from servicelib.logging_utils import log_context @@ -41,7 +40,6 @@ async def start_project( job_id: JobID, expected_job_name: str, webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], - cluster_id: ClusterID | None = None, ) -> None: if pricing_spec := JobPricingSpecification.create_from_headers(request.headers): with log_context(_logger, logging.DEBUG, "Set pricing plan and unit"): @@ -56,7 +54,7 @@ async def start_project( pricing_unit=pricing_spec.pricing_unit, ) with log_context(_logger, logging.DEBUG, "Starting job"): - await webserver_api.start_project(project_id=job_id, cluster_id=cluster_id) + await webserver_api.start_project(project_id=job_id) async def stop_project( diff --git a/services/api-server/src/simcore_service_api_server/services/webserver.py b/services/api-server/src/simcore_service_api_server/services/webserver.py index ac0437dbc7d..b5e1c29c106 100644 --- a/services/api-server/src/simcore_service_api_server/services/webserver.py +++ b/services/api-server/src/simcore_service_api_server/services/webserver.py @@ -2,9 +2,10 @@ import logging import urllib.parse +from collections.abc import Mapping from dataclasses import dataclass from functools import partial -from typing import Any, Mapping +from typing import Any from uuid import UUID import httpx @@ -36,7 +37,6 @@ WalletGet, WalletGetWithAvailableCredits, ) -from models_library.clusters import ClusterID from models_library.generics import Envelope from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID @@ -444,11 +444,12 @@ async def connect_pricing_unit_to_project_node( } ) async def start_project( - self, *, project_id: UUID, cluster_id: ClusterID | None = None + self, + *, + project_id: UUID, ) -> None: body_input: dict[str, Any] = {} - if cluster_id: - body_input["cluster_id"] = cluster_id + body: ComputationStart = ComputationStart(**body_input) response = await self.client.post( f"/computations/{project_id}:start", diff --git a/services/api-server/tests/mocks/get_job_outputs.json b/services/api-server/tests/mocks/get_job_outputs.json index cc49e55fe27..a53e1742e95 100644 --- a/services/api-server/tests/mocks/get_job_outputs.json +++ b/services/api-server/tests/mocks/get_job_outputs.json @@ -1,586 +1,585 @@ [ - { - "name": "POST /projects", - "description": "", - "method": "POST", - "host": "webserver", - "path": { - "path": "/v0/projects", - "path_parameters": [] - }, - "query": "from_study=e9f34992-436c-11ef-a15d-0242ac14000c&hidden=true", - "response_body": { - "data": { - "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", - "task_name": "POST /v0/projects?from_study=e9f34992-436c-11ef-a15d-0242ac14000c&hidden=true", - "status_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", - "result_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3/result", - "abort_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3" - } - }, - "status_code": 202 - }, - { - "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", - "description": "", - "method": "GET", - "host": "webserver", - "path": { - "path": "/v0/tasks/{task_id}", - "path_parameters": [ - { - "in": "path", - "name": "task_id", - "required": true, - "schema": { - "title": "Task Id", - "type": "str" - }, - "response_value": "tasks" - } - ] + { + "name": "POST /projects", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/projects", + "path_parameters": [] + }, + "query": "from_study=e9f34992-436c-11ef-a15d-0242ac14000c&hidden=true", + "response_body": { + "data": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", + "task_name": "POST /v0/projects?from_study=e9f34992-436c-11ef-a15d-0242ac14000c&hidden=true", + "status_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", + "result_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3/result", + "abort_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3" + } + }, + "status_code": 202 }, - "response_body": { - "data": { - "task_progress": { - "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", - "message": "Checking study access rights...", - "percent": 0.0 + { + "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] }, - "done": false, - "started": "2024-07-16T12:56:51.900041" - } - } - }, - { - "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", - "description": "", - "method": "GET", - "host": "webserver", - "path": { - "path": "/v0/tasks/{task_id}", - "path_parameters": [ - { - "in": "path", - "name": "task_id", - "required": true, - "schema": { - "title": "Task Id", - "type": "str" - }, - "response_value": "tasks" + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", + "message": "Checking study access rights...", + "percent": 0.0 + }, + "done": false, + "started": "2024-07-16T12:56:51.900041" + } } - ] }, - "response_body": { - "data": { - "task_progress": { - "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", - "message": "Checking study access rights...", - "percent": 0.0 + { + "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] }, - "done": false, - "started": "2024-07-16T12:56:51.900041" - } - } - }, - { - "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", - "description": "", - "method": "GET", - "host": "webserver", - "path": { - "path": "/v0/tasks/{task_id}", - "path_parameters": [ - { - "in": "path", - "name": "task_id", - "required": true, - "schema": { - "title": "Task Id", - "type": "str" - }, - "response_value": "tasks" + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", + "message": "Checking study access rights...", + "percent": 0.0 + }, + "done": false, + "started": "2024-07-16T12:56:51.900041" + } } - ] }, - "response_body": { - "data": { - "task_progress": { - "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", - "message": "finished", - "percent": 1.0 + { + "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] }, - "done": true, - "started": "2024-07-16T12:56:51.900041" - } - } - }, - { - "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3/result", - "description": "", - "method": "GET", - "host": "webserver", - "path": { - "path": "/v0/tasks/{task_id}/result", - "path_parameters": [ - { - "in": "path", - "name": "task_id", - "required": true, - "schema": { - "title": "Task Id", - "type": "str" - }, - "response_value": "tasks" + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", + "message": "finished", + "percent": 1.0 + }, + "done": true, + "started": "2024-07-16T12:56:51.900041" + } } - ] }, - "response_body": { - "data": { - "uuid": "df0b67b6-4372-11ef-a15d-0242ac14000c", - "name": "teststudy (Copy)", - "description": "", - "thumbnail": "", - "creationDate": "2024-07-16T12:56:51.922Z", - "lastChangeDate": "2024-07-16T12:56:51.922Z", - "workspaceId": 5, - "folderId": 2, - "trashedAt": null, - "workbench": { - "dd875b4f-7663-529f-bd7f-3716b19e28af": { - "key": "simcore/services/comp/itis/sleeper", - "version": "2.0.2", - "label": "sleeper", - "progress": 0.0, - "inputs": { - "input_1": { - "nodeUuid": "cda9d480-d3ad-55c8-b9ce-c50eb1bab818", - "output": "outFile" - }, - "input_2": 2, - "input_3": false - }, - "inputsRequired": [], - "inputNodes": [ - "cda9d480-d3ad-55c8-b9ce-c50eb1bab818" - ], - "state": { - "modified": true, - "dependencies": [], - "currentStatus": "NOT_STARTED", - "progress": null - } - }, - "cda9d480-d3ad-55c8-b9ce-c50eb1bab818": { - "key": "simcore/services/frontend/file-picker", - "version": "1.0.0", - "label": "inputfile", - "inputs": {}, - "inputsRequired": [], - "inputNodes": [] - }, - "c784a033-36c7-558b-9cc5-448321de01f8": { - "key": "simcore/services/frontend/iterator-consumer/probe/file", - "version": "1.0.0", - "label": "outputfile", - "inputs": { - "in_1": { - "nodeUuid": "dd875b4f-7663-529f-bd7f-3716b19e28af", - "output": "output_1" - } - }, - "inputsRequired": [], - "inputNodes": [ - "dd875b4f-7663-529f-bd7f-3716b19e28af" + { + "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3/result", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}/result", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } ] - } - }, - "prjOwner": "bisgaard@itis.swiss", - "accessRights": { - "3": { - "read": true, - "write": true, - "delete": true - } - }, - "tags": [], - "classifiers": [], - "state": { - "locked": { - "value": false, - "status": "CLOSED" - }, - "state": { - "value": "NOT_STARTED" - } }, - "ui": { - "workbench": { - "c784a033-36c7-558b-9cc5-448321de01f8": { - "position": { - "x": 1175, - "y": 467 - } - }, - "cda9d480-d3ad-55c8-b9ce-c50eb1bab818": { - "position": { - "x": 586, - "y": 471 - } - }, - "dd875b4f-7663-529f-bd7f-3716b19e28af": { - "position": { - "x": 860, - "y": 440 - } + "response_body": { + "data": { + "uuid": "df0b67b6-4372-11ef-a15d-0242ac14000c", + "name": "teststudy (Copy)", + "description": "", + "thumbnail": "", + "creationDate": "2024-07-16T12:56:51.922Z", + "lastChangeDate": "2024-07-16T12:56:51.922Z", + "workspaceId": 5, + "folderId": 2, + "trashedAt": null, + "workbench": { + "dd875b4f-7663-529f-bd7f-3716b19e28af": { + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.2", + "label": "sleeper", + "progress": 0.0, + "inputs": { + "input_1": { + "nodeUuid": "cda9d480-d3ad-55c8-b9ce-c50eb1bab818", + "output": "outFile" + }, + "input_2": 2, + "input_3": false + }, + "inputsRequired": [], + "inputNodes": [ + "cda9d480-d3ad-55c8-b9ce-c50eb1bab818" + ], + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": null + } + }, + "cda9d480-d3ad-55c8-b9ce-c50eb1bab818": { + "key": "simcore/services/frontend/file-picker", + "version": "1.0.0", + "label": "inputfile", + "inputs": {}, + "inputsRequired": [], + "inputNodes": [] + }, + "c784a033-36c7-558b-9cc5-448321de01f8": { + "key": "simcore/services/frontend/iterator-consumer/probe/file", + "version": "1.0.0", + "label": "outputfile", + "inputs": { + "in_1": { + "nodeUuid": "dd875b4f-7663-529f-bd7f-3716b19e28af", + "output": "output_1" + } + }, + "inputsRequired": [], + "inputNodes": [ + "dd875b4f-7663-529f-bd7f-3716b19e28af" + ] + } + }, + "prjOwner": "bisgaard@itis.swiss", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "NOT_STARTED" + } + }, + "ui": { + "workbench": { + "c784a033-36c7-558b-9cc5-448321de01f8": { + "position": { + "x": 1175, + "y": 467 + } + }, + "cda9d480-d3ad-55c8-b9ce-c50eb1bab818": { + "position": { + "x": 586, + "y": 471 + } + }, + "dd875b4f-7663-529f-bd7f-3716b19e28af": { + "position": { + "x": 860, + "y": 440 + } + } + }, + "slideshow": {}, + "currentNodeId": "b448cfb0-436c-11ef-a15d-0242ac14000c", + "mode": "workbench" + }, + "quality": { + "enabled": true, + "tsr_target": { + "r01": { + "level": 4, + "references": "" + }, + "r02": { + "level": 4, + "references": "" + }, + "r03": { + "level": 4, + "references": "" + }, + "r04": { + "level": 4, + "references": "" + }, + "r05": { + "level": 4, + "references": "" + }, + "r06": { + "level": 4, + "references": "" + }, + "r07": { + "level": 4, + "references": "" + }, + "r08": { + "level": 4, + "references": "" + }, + "r09": { + "level": 4, + "references": "" + }, + "r10": { + "level": 4, + "references": "" + }, + "r03b": { + "references": "" + }, + "r03c": { + "references": "" + }, + "r07b": { + "references": "" + }, + "r07c": { + "references": "" + }, + "r07d": { + "references": "" + }, + "r07e": { + "references": "" + }, + "r08b": { + "references": "" + }, + "r10b": { + "references": "" + } + }, + "tsr_current": { + "r01": { + "level": 0, + "references": "" + }, + "r02": { + "level": 0, + "references": "" + }, + "r03": { + "level": 0, + "references": "" + }, + "r04": { + "level": 0, + "references": "" + }, + "r05": { + "level": 0, + "references": "" + }, + "r06": { + "level": 0, + "references": "" + }, + "r07": { + "level": 0, + "references": "" + }, + "r08": { + "level": 0, + "references": "" + }, + "r09": { + "level": 0, + "references": "" + }, + "r10": { + "level": 0, + "references": "" + }, + "r03b": { + "references": "" + }, + "r03c": { + "references": "" + }, + "r07b": { + "references": "" + }, + "r07c": { + "references": "" + }, + "r07d": { + "references": "" + }, + "r07e": { + "references": "" + }, + "r08b": { + "references": "" + }, + "r10b": { + "references": "" + } + } + }, + "dev": {} } - }, - "slideshow": {}, - "currentNodeId": "b448cfb0-436c-11ef-a15d-0242ac14000c", - "mode": "workbench" }, - "quality": { - "enabled": true, - "tsr_target": { - "r01": { - "level": 4, - "references": "" - }, - "r02": { - "level": 4, - "references": "" - }, - "r03": { - "level": 4, - "references": "" - }, - "r04": { - "level": 4, - "references": "" - }, - "r05": { - "level": 4, - "references": "" - }, - "r06": { - "level": 4, - "references": "" - }, - "r07": { - "level": 4, - "references": "" - }, - "r08": { - "level": 4, - "references": "" - }, - "r09": { - "level": 4, - "references": "" - }, - "r10": { - "level": 4, - "references": "" - }, - "r03b": { - "references": "" - }, - "r03c": { - "references": "" - }, - "r07b": { - "references": "" - }, - "r07c": { - "references": "" - }, - "r07d": { - "references": "" - }, - "r07e": { - "references": "" - }, - "r08b": { - "references": "" - }, - "r10b": { - "references": "" - } - }, - "tsr_current": { - "r01": { - "level": 0, - "references": "" - }, - "r02": { - "level": 0, - "references": "" - }, - "r03": { - "level": 0, - "references": "" - }, - "r04": { - "level": 0, - "references": "" - }, - "r05": { - "level": 0, - "references": "" - }, - "r06": { - "level": 0, - "references": "" - }, - "r07": { - "level": 0, - "references": "" - }, - "r08": { - "level": 0, - "references": "" - }, - "r09": { - "level": 0, - "references": "" - }, - "r10": { - "level": 0, - "references": "" - }, - "r03b": { - "references": "" - }, - "r03c": { - "references": "" - }, - "r07b": { - "references": "" - }, - "r07c": { - "references": "" - }, - "r07d": { - "references": "" - }, - "r07e": { - "references": "" - }, - "r08b": { - "references": "" - }, - "r10b": { - "references": "" - } - } - }, - "dev": {} - } - }, - "status_code": 201 - }, - { - "name": "PATCH /projects/df0b67b6-4372-11ef-a15d-0242ac14000c", - "description": "", - "method": "PATCH", - "host": "webserver", - "path": { - "path": "/v0/projects/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "projects" - } - ] + "status_code": 201 }, - "request_payload": { - "name": "studies/e9f34992-436c-11ef-a15d-0242ac14000c/jobs/df0b67b6-4372-11ef-a15d-0242ac14000c" - }, - "status_code": 204 - }, - { - "name": "GET /projects/df0b67b6-4372-11ef-a15d-0242ac14000c/inputs", - "description": "", - "method": "GET", - "host": "webserver", - "path": { - "path": "/v0/projects/{project_id}/inputs", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "projects" - } - ] - }, - "response_body": { - "data": {} - } - }, - { - "name": "PATCH /projects/df0b67b6-4372-11ef-a15d-0242ac14000c/nodes/cda9d480-d3ad-55c8-b9ce-c50eb1bab818/outputs", - "description": "", - "method": "PATCH", - "host": "webserver", - "path": { - "path": "/v0/projects/{project_id}/nodes/{node_id}/outputs", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str" - }, - "response_value": "projects" + { + "name": "PATCH /projects/df0b67b6-4372-11ef-a15d-0242ac14000c", + "description": "", + "method": "PATCH", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] }, - { - "in": "path", - "name": "node_id", - "required": true, - "schema": { - "title": "Node Id", - "type": "str" - }, - "response_value": "nodes" - } - ] - }, - "request_payload": { - "outputs": { - "outFile": { - "store": 0, - "path": "api/c1dcde67-6434-31c3-95ee-bf5fe1e9422d/inputfile", - "label": "inputfile", - "eTag": null, - "dataset": null - } - } + "request_payload": { + "name": "studies/e9f34992-436c-11ef-a15d-0242ac14000c/jobs/df0b67b6-4372-11ef-a15d-0242ac14000c" + }, + "status_code": 204 }, - "status_code": 204 - }, - { - "name": "POST /computations/df0b67b6-4372-11ef-a15d-0242ac14000c:start", - "description": "", - "method": "POST", - "host": "webserver", - "path": { - "path": "/v0/computations/{project_id}:start", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "computations" + { + "name": "GET /projects/df0b67b6-4372-11ef-a15d-0242ac14000c/inputs", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/inputs", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "response_body": { + "data": {} } - ] }, - "request_payload": {}, - "response_body": { - "data": { - "pipeline_id": "df0b67b6-4372-11ef-a15d-0242ac14000c" - } + { + "name": "PATCH /projects/df0b67b6-4372-11ef-a15d-0242ac14000c/nodes/cda9d480-d3ad-55c8-b9ce-c50eb1bab818/outputs", + "description": "", + "method": "PATCH", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/nodes/{node_id}/outputs", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str" + }, + "response_value": "projects" + }, + { + "in": "path", + "name": "node_id", + "required": true, + "schema": { + "title": "Node Id", + "type": "str" + }, + "response_value": "nodes" + } + ] + }, + "request_payload": { + "outputs": { + "outFile": { + "store": 0, + "path": "api/c1dcde67-6434-31c3-95ee-bf5fe1e9422d/inputfile", + "label": "inputfile", + "eTag": null, + "dataset": null + } + } + }, + "status_code": 204 }, - "status_code": 201 - }, - { - "name": "GET /v2/computations/df0b67b6-4372-11ef-a15d-0242ac14000c", - "description": "", - "method": "GET", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "computations" - } - ] + { + "name": "POST /computations/df0b67b6-4372-11ef-a15d-0242ac14000c:start", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/computations/{project_id}:start", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "request_payload": {}, + "response_body": { + "data": { + "pipeline_id": "df0b67b6-4372-11ef-a15d-0242ac14000c" + } + }, + "status_code": 201 }, - "query": "user_id=1", - "response_body": { - "id": "df0b67b6-4372-11ef-a15d-0242ac14000c", - "state": "STARTED", - "result": null, - "pipeline_details": { - "adjacency_list": { - "dd875b4f-7663-529f-bd7f-3716b19e28af": [] + { + "name": "GET /v2/computations/df0b67b6-4372-11ef-a15d-0242ac14000c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] }, - "progress": 0.0, - "node_states": { - "dd875b4f-7663-529f-bd7f-3716b19e28af": { - "modified": true, - "dependencies": [], - "currentStatus": "STARTED", - "progress": 0.0 - } + "query": "user_id=1", + "response_body": { + "id": "df0b67b6-4372-11ef-a15d-0242ac14000c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "dd875b4f-7663-529f-bd7f-3716b19e28af": [] + }, + "progress": 0.0, + "node_states": { + "dd875b4f-7663-529f-bd7f-3716b19e28af": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.0 + } + } + }, + "iteration": 1, + "started": "2024-07-16T12:56:57.553331+00:00", + "stopped": null, + "submitted": "2024-07-16T12:56:57.454372+00:00", + "url": "http://10.43.103.193:30009/v2/computations/df0b67b6-4372-11ef-a15d-0242ac14000c?user_id=1", + "stop_url": "http://10.43.103.193:30009/v2/computations/df0b67b6-4372-11ef-a15d-0242ac14000c:stop?user_id=1" } - }, - "iteration": 1, - "cluster_id": 0, - "started": "2024-07-16T12:56:57.553331+00:00", - "stopped": null, - "submitted": "2024-07-16T12:56:57.454372+00:00", - "url": "http://10.43.103.193:30009/v2/computations/df0b67b6-4372-11ef-a15d-0242ac14000c?user_id=1", - "stop_url": "http://10.43.103.193:30009/v2/computations/df0b67b6-4372-11ef-a15d-0242ac14000c:stop?user_id=1" - } - }, - { - "name": "GET /projects/df0b67b6-4372-11ef-a15d-0242ac14000c/outputs", - "description": "", - "method": "GET", - "host": "webserver", - "path": { - "path": "/v0/projects/{project_id}/outputs", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "projects" - } - ] }, - "response_body": { - "data": { - "c784a033-36c7-558b-9cc5-448321de01f8": { - "key": "c784a033-36c7-558b-9cc5-448321de01f8", - "value": null, - "label": "outputfile" + { + "name": "GET /projects/df0b67b6-4372-11ef-a15d-0242ac14000c/outputs", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/outputs", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "response_body": { + "data": { + "c784a033-36c7-558b-9cc5-448321de01f8": { + "key": "c784a033-36c7-558b-9cc5-448321de01f8", + "value": null, + "label": "outputfile" + } + } } - } } - } ] diff --git a/services/api-server/tests/mocks/run_study_workflow.json b/services/api-server/tests/mocks/run_study_workflow.json index 56b92873e50..8078a8cc155 100644 --- a/services/api-server/tests/mocks/run_study_workflow.json +++ b/services/api-server/tests/mocks/run_study_workflow.json @@ -1,1676 +1,1666 @@ [ - { - "name": "GET /projects/aeab71fe-f71b-11ee-8fca-0242ac140008/metadata/ports", - "description": "", - "method": "GET", - "host": "webserver", - "path": { - "path": "/v0/projects/{project_id}/metadata/ports", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "projects" + { + "name": "GET /projects/aeab71fe-f71b-11ee-8fca-0242ac140008/metadata/ports", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/metadata/ports", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "response_body": { + "data": [ + { + "key": "0b8042c4-501a-4f9b-b2fa-17f860548b33", + "kind": "output", + "content_schema": null + }, + { + "key": "c0f304e0-228b-413c-937a-2b1b060c9e02", + "kind": "input", + "content_schema": { + "title": "InputInt", + "type": "integer", + "description": "Produced integer value" + } + }, + { + "key": "d9069bdb-35ae-4ec3-a05a-a42d7a7b0579", + "kind": "output", + "content_schema": { + "title": "OutputInt", + "type": "integer", + "description": "Captured integer value" + } + }, + { + "key": "50fd6b01-bb5d-4136-a932-73676a461680", + "kind": "output", + "content_schema": { + "title": "OutputString", + "type": "string", + "description": "Captured string value" + } + }, + { + "key": "38985050-7476-4534-8c79-839a928ea2a8", + "kind": "input", + "content_schema": { + "title": "InputString", + "type": "string", + "description": "Produced string value" + } + }, + { + "key": "8815eab9-9bd5-4dda-a65c-3c14a423bfb3", + "kind": "input", + "content_schema": { + "title": "InputArray", + "type": "array", + "items": { + "type": "number" + } + } + }, + { + "key": "22e7a091-2e4e-4e5a-93aa-c500457f5684", + "kind": "output", + "content_schema": { + "title": "OutputArray", + "type": "array", + "items": { + "type": "number" + } + } + }, + { + "key": "04de3b6f-668d-4826-822f-c58370c037ed", + "kind": "input", + "content_schema": { + "title": "InputNumber", + "type": "number", + "description": "Produced number value" + } + }, + { + "key": "b227b053-1207-4b48-b6ee-71a0ff24b014", + "kind": "output", + "content_schema": { + "title": "OutputNumber", + "type": "number", + "description": "Captured number value" + } + }, + { + "key": "72d5daac-f728-4603-b49e-9a407e4aa079", + "kind": "input", + "content_schema": { + "title": "InputBool", + "type": "boolean", + "description": "Produced boolean value" + } + }, + { + "key": "f85418d5-45d8-41eb-a1ac-4f14a63ec890", + "kind": "output", + "content_schema": { + "title": "OutputBool", + "type": "boolean", + "description": "Captured boolean value" + } + } + ] } - ] }, - "response_body": { - "data": [ - { - "key": "0b8042c4-501a-4f9b-b2fa-17f860548b33", - "kind": "output", - "content_schema": null - }, - { - "key": "c0f304e0-228b-413c-937a-2b1b060c9e02", - "kind": "input", - "content_schema": { - "title": "InputInt", - "type": "integer", - "description": "Produced integer value" - } - }, - { - "key": "d9069bdb-35ae-4ec3-a05a-a42d7a7b0579", - "kind": "output", - "content_schema": { - "title": "OutputInt", - "type": "integer", - "description": "Captured integer value" - } - }, - { - "key": "50fd6b01-bb5d-4136-a932-73676a461680", - "kind": "output", - "content_schema": { - "title": "OutputString", - "type": "string", - "description": "Captured string value" - } + { + "name": "POST /projects", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/projects", + "path_parameters": [] }, - { - "key": "38985050-7476-4534-8c79-839a928ea2a8", - "kind": "input", - "content_schema": { - "title": "InputString", - "type": "string", - "description": "Produced string value" - } - }, - { - "key": "8815eab9-9bd5-4dda-a65c-3c14a423bfb3", - "kind": "input", - "content_schema": { - "title": "InputArray", - "type": "array", - "items": { - "type": "number" - } - } - }, - { - "key": "22e7a091-2e4e-4e5a-93aa-c500457f5684", - "kind": "output", - "content_schema": { - "title": "OutputArray", - "type": "array", - "items": { - "type": "number" + "query": "from_study=aeab71fe-f71b-11ee-8fca-0242ac140008&hidden=true", + "response_body": { + "data": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "task_name": "POST /v0/projects?from_study=aeab71fe-f71b-11ee-8fca-0242ac140008&hidden=true", + "status_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "result_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24/result", + "abort_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24" } - } - }, - { - "key": "04de3b6f-668d-4826-822f-c58370c037ed", - "kind": "input", - "content_schema": { - "title": "InputNumber", - "type": "number", - "description": "Produced number value" - } - }, - { - "key": "b227b053-1207-4b48-b6ee-71a0ff24b014", - "kind": "output", - "content_schema": { - "title": "OutputNumber", - "type": "number", - "description": "Captured number value" - } - }, - { - "key": "72d5daac-f728-4603-b49e-9a407e4aa079", - "kind": "input", - "content_schema": { - "title": "InputBool", - "type": "boolean", - "description": "Produced boolean value" - } }, - { - "key": "f85418d5-45d8-41eb-a1ac-4f14a63ec890", - "kind": "output", - "content_schema": { - "title": "OutputBool", - "type": "boolean", - "description": "Captured boolean value" - } - } - ] - } - }, - { - "name": "POST /projects", - "description": "", - "method": "POST", - "host": "webserver", - "path": { - "path": "/v0/projects", - "path_parameters": [] - }, - "query": "from_study=aeab71fe-f71b-11ee-8fca-0242ac140008&hidden=true", - "response_body": { - "data": { - "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", - "task_name": "POST /v0/projects?from_study=aeab71fe-f71b-11ee-8fca-0242ac140008&hidden=true", - "status_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", - "result_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24/result", - "abort_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24" - } + "status_code": 202 }, - "status_code": 202 - }, - { - "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", - "description": "", - "method": "GET", - "host": "webserver", - "path": { - "path": "/v0/tasks/{task_id}", - "path_parameters": [ - { - "in": "path", - "name": "task_id", - "required": true, - "schema": { - "title": "Task Id", - "type": "str" - }, - "response_value": "tasks" - } - ] - }, - "response_body": { - "data": { - "task_progress": { - "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", - "message": "inserted project new_project['uuid']='e19f9144-fb3f-11ee-b7b0-0242ac14001c' into the db", - "percent": 0.0 + { + "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] }, - "done": false, - "started": "2024-04-15T15:50:28.173722" - } - } - }, - { - "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", - "description": "", - "method": "GET", - "host": "webserver", - "path": { - "path": "/v0/tasks/{task_id}", - "path_parameters": [ - { - "in": "path", - "name": "task_id", - "required": true, - "schema": { - "title": "Task Id", - "type": "str" - }, - "response_value": "tasks" + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "message": "inserted project new_project['uuid']='e19f9144-fb3f-11ee-b7b0-0242ac14001c' into the db", + "percent": 0.0 + }, + "done": false, + "started": "2024-04-15T15:50:28.173722" + } } - ] }, - "response_body": { - "data": { - "task_progress": { - "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", - "message": "Checking study access rights...", - "percent": 0.0 + { + "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] }, - "done": false, - "started": "2024-04-15T15:50:28.173722" - } - } - }, - { - "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", - "description": "", - "method": "GET", - "host": "webserver", - "path": { - "path": "/v0/tasks/{task_id}", - "path_parameters": [ - { - "in": "path", - "name": "task_id", - "required": true, - "schema": { - "title": "Task Id", - "type": "str" - }, - "response_value": "tasks" + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "message": "Checking study access rights...", + "percent": 0.0 + }, + "done": false, + "started": "2024-04-15T15:50:28.173722" + } } - ] }, - "response_body": { - "data": { - "task_progress": { - "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", - "message": "updated network information in directorv2", - "percent": 1.0 + { + "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] }, - "done": false, - "started": "2024-04-15T15:50:28.173722" - } - } - }, - { - "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", - "description": "", - "method": "GET", - "host": "webserver", - "path": { - "path": "/v0/tasks/{task_id}", - "path_parameters": [ - { - "in": "path", - "name": "task_id", - "required": true, - "schema": { - "title": "Task Id", - "type": "str" - }, - "response_value": "tasks" + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "message": "updated network information in directorv2", + "percent": 1.0 + }, + "done": false, + "started": "2024-04-15T15:50:28.173722" + } } - ] }, - "response_body": { - "data": { - "task_progress": { - "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", - "message": "finished", - "percent": 1.0 + { + "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] }, - "done": true, - "started": "2024-04-15T15:50:28.173722" - } - } - }, - { - "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24/result", - "description": "", - "method": "GET", - "host": "webserver", - "path": { - "path": "/v0/tasks/{task_id}/result", - "path_parameters": [ - { - "in": "path", - "name": "task_id", - "required": true, - "schema": { - "title": "Task Id", - "type": "str" - }, - "response_value": "tasks" + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "message": "finished", + "percent": 1.0 + }, + "done": true, + "started": "2024-04-15T15:50:28.173722" + } } - ] }, - "response_body": { - "data": { - "uuid": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "name": "test (Copy)", - "description": "", - "thumbnail": "", - "creationDate": "2024-04-15T15:50:28.196Z", - "lastChangeDate": "2024-04-15T15:50:28.196Z", - "workspaceId": 3, - "folderId": 3, - "trashedAt": null, - "workbench": { - "ab014072-a95f-5775-bb34-5582a13245a6": { - "key": "simcore/services/frontend/iterator-consumer/probe/file", - "version": "1.0.0", - "label": "OutputFile", - "thumbnail": null, - "inputs": { - "in_1": { - "nodeUuid": "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca", - "output": "outFile" - } - }, - "inputsUnits": {}, - "inputNodes": [ - "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca" - ], - "parent": null - }, - "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca": { - "key": "simcore/services/frontend/file-picker", - "version": "1.0.0", - "label": "InputFile", - "thumbnail": null, - "inputs": {}, - "inputsUnits": {}, - "inputNodes": [], - "outputs": {} - }, - "096acfb2-8c38-560a-91d3-8911f4334289": { - "key": "simcore/services/frontend/parameter/integer", - "version": "1.0.0", - "label": "InputInt", - "thumbnail": null, - "runHash": null, - "inputs": {}, - "inputsUnits": {}, - "inputNodes": [], - "outputs": { - "out_1": 1 - } - }, - "3d4963ee-179f-5948-9086-dd9bef543f65": { - "key": "simcore/services/frontend/iterator-consumer/probe/integer", - "version": "1.0.0", - "label": "OutputInt", - "thumbnail": null, - "inputs": { - "in_1": { - "nodeUuid": "096acfb2-8c38-560a-91d3-8911f4334289", - "output": "out_1" - } - }, - "inputsUnits": {}, - "inputNodes": [ - "096acfb2-8c38-560a-91d3-8911f4334289" - ] - }, - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { - "key": "simcore/services/comp/itis/sleeper", - "version": "2.0.2", - "label": "sleeper", - "progress": 0.0, - "thumbnail": null, - "inputs": { - "input_2": 2, - "input_3": false - }, - "inputsUnits": {}, - "inputNodes": [], - "state": { - "modified": true, - "dependencies": [], - "currentStatus": "NOT_STARTED", - "progress": null - } - }, - "2a9452ac-d210-5e11-a631-1d73454bfd91": { - "key": "simcore/services/frontend/iterator-consumer/probe/string", - "version": "1.0.0", - "label": "OutputString", - "thumbnail": null, - "inputs": { - "in_1": { - "nodeUuid": "bcc36381-7377-533f-bb04-f785c0f8e2be", - "output": "out_1" - } - }, - "inputsUnits": {}, - "inputNodes": [ - "bcc36381-7377-533f-bb04-f785c0f8e2be" - ] - }, - "bcc36381-7377-533f-bb04-f785c0f8e2be": { - "key": "simcore/services/frontend/parameter/string", - "version": "1.0.0", - "label": "InputString", - "thumbnail": null, - "runHash": null, - "inputs": {}, - "inputsUnits": {}, - "inputNodes": [], - "outputs": { - "out_1": "Foo" - } - }, - "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": { - "key": "simcore/services/frontend/parameter/array", - "version": "1.0.0", - "label": "InputArray", - "thumbnail": null, - "runHash": null, - "inputs": {}, - "inputsUnits": {}, - "inputNodes": [], - "outputs": { - "out_1": [ - 1 - ] - } - }, - "cb5bc33d-6635-5680-98e3-a6ac57f908f4": { - "key": "simcore/services/frontend/iterator-consumer/probe/array", - "version": "1.0.0", - "label": "OutputArray", - "thumbnail": null, - "inputs": { - "in_1": { - "nodeUuid": "197ba9f7-d09c-5cf8-9290-284cd6c40fb3", - "output": "out_1" - } - }, - "inputsUnits": {}, - "inputNodes": [ - "197ba9f7-d09c-5cf8-9290-284cd6c40fb3" - ] - }, - "d43949c5-5143-5738-bae9-7d231dcabe7f": { - "key": "simcore/services/frontend/parameter/number", - "version": "1.0.0", - "label": "InputNumber", - "thumbnail": null, - "runHash": null, - "inputs": {}, - "inputsUnits": {}, - "inputNodes": [], - "outputs": { - "out_1": 1 - } - }, - "cd7eacb5-6806-5956-86c8-9b30ec588402": { - "key": "simcore/services/frontend/iterator-consumer/probe/number", - "version": "1.0.0", - "label": "OutputNumber", - "thumbnail": null, - "inputs": { - "in_1": { - "nodeUuid": "d43949c5-5143-5738-bae9-7d231dcabe7f", - "output": "out_1" - } - }, - "inputsUnits": {}, - "inputNodes": [ - "d43949c5-5143-5738-bae9-7d231dcabe7f" + { + "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24/result", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}/result", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } ] - }, - "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": { - "key": "simcore/services/frontend/parameter/boolean", - "version": "1.0.0", - "label": "InputBool", - "thumbnail": null, - "runHash": null, - "inputs": {}, - "inputsUnits": {}, - "inputNodes": [], - "outputs": { - "out_1": true + }, + "response_body": { + "data": { + "uuid": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "name": "test (Copy)", + "description": "", + "thumbnail": "", + "creationDate": "2024-04-15T15:50:28.196Z", + "lastChangeDate": "2024-04-15T15:50:28.196Z", + "workspaceId": 3, + "folderId": 3, + "trashedAt": null, + "workbench": { + "ab014072-a95f-5775-bb34-5582a13245a6": { + "key": "simcore/services/frontend/iterator-consumer/probe/file", + "version": "1.0.0", + "label": "OutputFile", + "thumbnail": null, + "inputs": { + "in_1": { + "nodeUuid": "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca", + "output": "outFile" + } + }, + "inputsUnits": {}, + "inputNodes": [ + "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca" + ], + "parent": null + }, + "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca": { + "key": "simcore/services/frontend/file-picker", + "version": "1.0.0", + "label": "InputFile", + "thumbnail": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": {} + }, + "096acfb2-8c38-560a-91d3-8911f4334289": { + "key": "simcore/services/frontend/parameter/integer", + "version": "1.0.0", + "label": "InputInt", + "thumbnail": null, + "runHash": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": { + "out_1": 1 + } + }, + "3d4963ee-179f-5948-9086-dd9bef543f65": { + "key": "simcore/services/frontend/iterator-consumer/probe/integer", + "version": "1.0.0", + "label": "OutputInt", + "thumbnail": null, + "inputs": { + "in_1": { + "nodeUuid": "096acfb2-8c38-560a-91d3-8911f4334289", + "output": "out_1" + } + }, + "inputsUnits": {}, + "inputNodes": [ + "096acfb2-8c38-560a-91d3-8911f4334289" + ] + }, + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.2", + "label": "sleeper", + "progress": 0.0, + "thumbnail": null, + "inputs": { + "input_2": 2, + "input_3": false + }, + "inputsUnits": {}, + "inputNodes": [], + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": null + } + }, + "2a9452ac-d210-5e11-a631-1d73454bfd91": { + "key": "simcore/services/frontend/iterator-consumer/probe/string", + "version": "1.0.0", + "label": "OutputString", + "thumbnail": null, + "inputs": { + "in_1": { + "nodeUuid": "bcc36381-7377-533f-bb04-f785c0f8e2be", + "output": "out_1" + } + }, + "inputsUnits": {}, + "inputNodes": [ + "bcc36381-7377-533f-bb04-f785c0f8e2be" + ] + }, + "bcc36381-7377-533f-bb04-f785c0f8e2be": { + "key": "simcore/services/frontend/parameter/string", + "version": "1.0.0", + "label": "InputString", + "thumbnail": null, + "runHash": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": { + "out_1": "Foo" + } + }, + "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": { + "key": "simcore/services/frontend/parameter/array", + "version": "1.0.0", + "label": "InputArray", + "thumbnail": null, + "runHash": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": { + "out_1": [ + 1 + ] + } + }, + "cb5bc33d-6635-5680-98e3-a6ac57f908f4": { + "key": "simcore/services/frontend/iterator-consumer/probe/array", + "version": "1.0.0", + "label": "OutputArray", + "thumbnail": null, + "inputs": { + "in_1": { + "nodeUuid": "197ba9f7-d09c-5cf8-9290-284cd6c40fb3", + "output": "out_1" + } + }, + "inputsUnits": {}, + "inputNodes": [ + "197ba9f7-d09c-5cf8-9290-284cd6c40fb3" + ] + }, + "d43949c5-5143-5738-bae9-7d231dcabe7f": { + "key": "simcore/services/frontend/parameter/number", + "version": "1.0.0", + "label": "InputNumber", + "thumbnail": null, + "runHash": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": { + "out_1": 1 + } + }, + "cd7eacb5-6806-5956-86c8-9b30ec588402": { + "key": "simcore/services/frontend/iterator-consumer/probe/number", + "version": "1.0.0", + "label": "OutputNumber", + "thumbnail": null, + "inputs": { + "in_1": { + "nodeUuid": "d43949c5-5143-5738-bae9-7d231dcabe7f", + "output": "out_1" + } + }, + "inputsUnits": {}, + "inputNodes": [ + "d43949c5-5143-5738-bae9-7d231dcabe7f" + ] + }, + "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": { + "key": "simcore/services/frontend/parameter/boolean", + "version": "1.0.0", + "label": "InputBool", + "thumbnail": null, + "runHash": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": { + "out_1": true + } + }, + "efaaeabf-e4bc-5667-a757-d9b17ad606d9": { + "key": "simcore/services/frontend/iterator-consumer/probe/boolean", + "version": "1.0.0", + "label": "OutputBool", + "thumbnail": null, + "inputs": { + "in_1": { + "nodeUuid": "584e44d4-9a78-571f-a2a4-7d9c7b2396e3", + "output": "out_1" + } + }, + "inputsUnits": {}, + "inputNodes": [ + "584e44d4-9a78-571f-a2a4-7d9c7b2396e3" + ] + } + }, + "prjOwner": "harpercynthia@example.com", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "NOT_STARTED" + } + }, + "ui": { + "workbench": { + "096acfb2-8c38-560a-91d3-8911f4334289": { + "position": { + "x": 220, + "y": 40 + } + }, + "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": { + "position": { + "x": 240, + "y": 400 + } + }, + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "position": { + "x": 820, + "y": 360 + } + }, + "2a9452ac-d210-5e11-a631-1d73454bfd91": { + "position": { + "x": 580, + "y": 200 + } + }, + "3d4963ee-179f-5948-9086-dd9bef543f65": { + "position": { + "x": 580, + "y": 40 + } + }, + "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": { + "position": { + "x": 278, + "y": 733 + } + }, + "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca": { + "position": { + "x": 200, + "y": 840 + } + }, + "ab014072-a95f-5775-bb34-5582a13245a6": { + "position": { + "x": 700, + "y": 840 + } + }, + "bcc36381-7377-533f-bb04-f785c0f8e2be": { + "position": { + "x": 220, + "y": 200 + } + }, + "cb5bc33d-6635-5680-98e3-a6ac57f908f4": { + "position": { + "x": 580, + "y": 420 + } + }, + "cd7eacb5-6806-5956-86c8-9b30ec588402": { + "position": { + "x": 562, + "y": 586 + } + }, + "d43949c5-5143-5738-bae9-7d231dcabe7f": { + "position": { + "x": 271, + "y": 621 + } + }, + "efaaeabf-e4bc-5667-a757-d9b17ad606d9": { + "position": { + "x": 656, + "y": 720 + } + } + }, + "slideshow": {}, + "currentNodeId": "aeab71fe-f71b-11ee-8fca-0242ac140008", + "mode": "workbench" + }, + "quality": { + "enabled": true, + "tsr_target": { + "r01": { + "level": 4, + "references": "" + }, + "r02": { + "level": 4, + "references": "" + }, + "r03": { + "level": 4, + "references": "" + }, + "r04": { + "level": 4, + "references": "" + }, + "r05": { + "level": 4, + "references": "" + }, + "r06": { + "level": 4, + "references": "" + }, + "r07": { + "level": 4, + "references": "" + }, + "r08": { + "level": 4, + "references": "" + }, + "r09": { + "level": 4, + "references": "" + }, + "r10": { + "level": 4, + "references": "" + }, + "r03b": { + "references": "" + }, + "r03c": { + "references": "" + }, + "r07b": { + "references": "" + }, + "r07c": { + "references": "" + }, + "r07d": { + "references": "" + }, + "r07e": { + "references": "" + }, + "r08b": { + "references": "" + }, + "r10b": { + "references": "" + } + }, + "tsr_current": { + "r01": { + "level": 0, + "references": "" + }, + "r02": { + "level": 0, + "references": "" + }, + "r03": { + "level": 0, + "references": "" + }, + "r04": { + "level": 0, + "references": "" + }, + "r05": { + "level": 0, + "references": "" + }, + "r06": { + "level": 0, + "references": "" + }, + "r07": { + "level": 0, + "references": "" + }, + "r08": { + "level": 0, + "references": "" + }, + "r09": { + "level": 0, + "references": "" + }, + "r10": { + "level": 0, + "references": "" + }, + "r03b": { + "references": "" + }, + "r03c": { + "references": "" + }, + "r07b": { + "references": "" + }, + "r07c": { + "references": "" + }, + "r07d": { + "references": "" + }, + "r07e": { + "references": "" + }, + "r08b": { + "references": "" + }, + "r10b": { + "references": "" + } + } + }, + "dev": {} } - }, - "efaaeabf-e4bc-5667-a757-d9b17ad606d9": { - "key": "simcore/services/frontend/iterator-consumer/probe/boolean", - "version": "1.0.0", - "label": "OutputBool", - "thumbnail": null, - "inputs": { - "in_1": { - "nodeUuid": "584e44d4-9a78-571f-a2a4-7d9c7b2396e3", - "output": "out_1" - } - }, - "inputsUnits": {}, - "inputNodes": [ - "584e44d4-9a78-571f-a2a4-7d9c7b2396e3" - ] - } }, - "prjOwner": "harpercynthia@example.com", - "accessRights": { - "3": { - "read": true, - "write": true, - "delete": true - } + "status_code": 201 + }, + { + "name": "PATCH /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "PATCH", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] }, - "tags": [], - "classifiers": [], - "state": { - "locked": { - "value": false, - "status": "CLOSED" - }, - "state": { - "value": "NOT_STARTED" - } + "request_payload": { + "name": "posix" }, - "ui": { - "workbench": { - "096acfb2-8c38-560a-91d3-8911f4334289": { - "position": { - "x": 220, - "y": 40 - } - }, - "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": { - "position": { - "x": 240, - "y": 400 - } - }, - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { - "position": { - "x": 820, - "y": 360 - } - }, - "2a9452ac-d210-5e11-a631-1d73454bfd91": { - "position": { - "x": 580, - "y": 200 - } - }, - "3d4963ee-179f-5948-9086-dd9bef543f65": { - "position": { - "x": 580, - "y": 40 - } - }, - "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": { - "position": { - "x": 278, - "y": 733 - } - }, - "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca": { - "position": { - "x": 200, - "y": 840 - } - }, - "ab014072-a95f-5775-bb34-5582a13245a6": { - "position": { - "x": 700, - "y": 840 - } - }, - "bcc36381-7377-533f-bb04-f785c0f8e2be": { - "position": { - "x": 220, - "y": 200 - } - }, - "cb5bc33d-6635-5680-98e3-a6ac57f908f4": { - "position": { - "x": 580, - "y": 420 - } - }, - "cd7eacb5-6806-5956-86c8-9b30ec588402": { - "position": { - "x": 562, - "y": 586 - } - }, - "d43949c5-5143-5738-bae9-7d231dcabe7f": { - "position": { - "x": 271, - "y": 621 - } - }, - "efaaeabf-e4bc-5667-a757-d9b17ad606d9": { - "position": { - "x": 656, - "y": 720 - } - } - }, - "slideshow": {}, - "currentNodeId": "aeab71fe-f71b-11ee-8fca-0242ac140008", - "mode": "workbench" + "status_code": 204 + }, + { + "name": "GET /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/inputs", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/inputs", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] }, - "quality": { - "enabled": true, - "tsr_target": { - "r01": { - "level": 4, - "references": "" - }, - "r02": { - "level": 4, - "references": "" - }, - "r03": { - "level": 4, - "references": "" - }, - "r04": { - "level": 4, - "references": "" - }, - "r05": { - "level": 4, - "references": "" - }, - "r06": { - "level": 4, - "references": "" - }, - "r07": { - "level": 4, - "references": "" - }, - "r08": { - "level": 4, - "references": "" - }, - "r09": { - "level": 4, - "references": "" - }, - "r10": { - "level": 4, - "references": "" - }, - "r03b": { - "references": "" - }, - "r03c": { - "references": "" - }, - "r07b": { - "references": "" - }, - "r07c": { - "references": "" - }, - "r07d": { - "references": "" - }, - "r07e": { - "references": "" - }, - "r08b": { - "references": "" - }, - "r10b": { - "references": "" - } - }, - "tsr_current": { - "r01": { - "level": 0, - "references": "" - }, - "r02": { - "level": 0, - "references": "" - }, - "r03": { - "level": 0, - "references": "" - }, - "r04": { - "level": 0, - "references": "" - }, - "r05": { - "level": 0, - "references": "" - }, - "r06": { - "level": 0, - "references": "" - }, - "r07": { - "level": 0, - "references": "" - }, - "r08": { - "level": 0, - "references": "" - }, - "r09": { - "level": 0, - "references": "" - }, - "r10": { - "level": 0, - "references": "" - }, - "r03b": { - "references": "" - }, - "r03c": { - "references": "" - }, - "r07b": { - "references": "" - }, - "r07c": { - "references": "" - }, - "r07d": { - "references": "" - }, - "r07e": { - "references": "" - }, - "r08b": { - "references": "" - }, - "r10b": { - "references": "" + "response_body": { + "data": { + "096acfb2-8c38-560a-91d3-8911f4334289": { + "key": "096acfb2-8c38-560a-91d3-8911f4334289", + "value": 1, + "label": "InputInt" + }, + "bcc36381-7377-533f-bb04-f785c0f8e2be": { + "key": "bcc36381-7377-533f-bb04-f785c0f8e2be", + "value": "Foo", + "label": "InputString" + }, + "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": { + "key": "197ba9f7-d09c-5cf8-9290-284cd6c40fb3", + "value": [ + 1 + ], + "label": "InputArray" + }, + "d43949c5-5143-5738-bae9-7d231dcabe7f": { + "key": "d43949c5-5143-5738-bae9-7d231dcabe7f", + "value": 1, + "label": "InputNumber" + }, + "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": { + "key": "584e44d4-9a78-571f-a2a4-7d9c7b2396e3", + "value": true, + "label": "InputBool" + } } - } - }, - "dev": {} - } - }, - "status_code": 201 - }, - { - "name": "PATCH /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "description": "", - "method": "PATCH", - "host": "webserver", - "path": { - "path": "/v0/projects/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "projects" - } - ] - }, - "request_payload": { - "name": "posix" - }, - "status_code": 204 - }, - { - "name": "GET /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/inputs", - "description": "", - "method": "GET", - "host": "webserver", - "path": { - "path": "/v0/projects/{project_id}/inputs", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "projects" } - ] }, - "response_body": { - "data": { - "096acfb2-8c38-560a-91d3-8911f4334289": { - "key": "096acfb2-8c38-560a-91d3-8911f4334289", - "value": 1, - "label": "InputInt" - }, - "bcc36381-7377-533f-bb04-f785c0f8e2be": { - "key": "bcc36381-7377-533f-bb04-f785c0f8e2be", - "value": "Foo", - "label": "InputString" + { + "name": "PATCH /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/nodes/9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca/outputs", + "description": "", + "method": "PATCH", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/nodes/{node_id}/outputs", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str" + }, + "response_value": "projects" + }, + { + "in": "path", + "name": "node_id", + "required": true, + "schema": { + "title": "Node Id", + "type": "str" + }, + "response_value": "nodes" + } + ] }, - "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": { - "key": "197ba9f7-d09c-5cf8-9290-284cd6c40fb3", - "value": [ - 1 - ], - "label": "InputArray" + "request_payload": { + "outputs": { + "outFile": { + "store": 0, + "path": "api/d8bc0c02-c3ee-3cec-a562-e6fd3e00be4b/input.json", + "label": "input.json", + "eTag": "3f14fb3a8ba8d750f26bdaa402b2f6cc", + "dataset": null + } + } }, - "d43949c5-5143-5738-bae9-7d231dcabe7f": { - "key": "d43949c5-5143-5738-bae9-7d231dcabe7f", - "value": 1, - "label": "InputNumber" + "status_code": 204 + }, + { + "name": "PATCH /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/inputs", + "description": "", + "method": "PATCH", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/inputs", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] }, - "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": { - "key": "584e44d4-9a78-571f-a2a4-7d9c7b2396e3", - "value": true, - "label": "InputBool" - } - } - } - }, - { - "name": "PATCH /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/nodes/9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca/outputs", - "description": "", - "method": "PATCH", - "host": "webserver", - "path": { - "path": "/v0/projects/{project_id}/nodes/{node_id}/outputs", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str" - }, - "response_value": "projects" + "request_payload": { + "key": "value" }, - { - "in": "path", - "name": "node_id", - "required": true, - "schema": { - "title": "Node Id", - "type": "str" - }, - "response_value": "nodes" - } - ] - }, - "request_payload": { - "outputs": { - "outFile": { - "store": 0, - "path": "api/d8bc0c02-c3ee-3cec-a562-e6fd3e00be4b/input.json", - "label": "input.json", - "eTag": "3f14fb3a8ba8d750f26bdaa402b2f6cc", - "dataset": null - } - } - }, - "status_code": 204 - }, - { - "name": "PATCH /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/inputs", - "description": "", - "method": "PATCH", - "host": "webserver", - "path": { - "path": "/v0/projects/{project_id}/inputs", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "projects" + "response_body": { + "data": { + "096acfb2-8c38-560a-91d3-8911f4334289": { + "key": "096acfb2-8c38-560a-91d3-8911f4334289", + "value": 42, + "label": "InputInt" + }, + "bcc36381-7377-533f-bb04-f785c0f8e2be": { + "key": "bcc36381-7377-533f-bb04-f785c0f8e2be", + "value": "Z43", + "label": "InputString" + }, + "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": { + "key": "197ba9f7-d09c-5cf8-9290-284cd6c40fb3", + "value": [ + 1, + 2, + 3 + ], + "label": "InputArray" + }, + "d43949c5-5143-5738-bae9-7d231dcabe7f": { + "key": "d43949c5-5143-5738-bae9-7d231dcabe7f", + "value": 3.14, + "label": "InputNumber" + }, + "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": { + "key": "584e44d4-9a78-571f-a2a4-7d9c7b2396e3", + "value": false, + "label": "InputBool" + } + } } - ] - }, - "request_payload": { - "key": "value" }, - "response_body": { - "data": { - "096acfb2-8c38-560a-91d3-8911f4334289": { - "key": "096acfb2-8c38-560a-91d3-8911f4334289", - "value": 42, - "label": "InputInt" - }, - "bcc36381-7377-533f-bb04-f785c0f8e2be": { - "key": "bcc36381-7377-533f-bb04-f785c0f8e2be", - "value": "Z43", - "label": "InputString" + { + "name": "POST /computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:start", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/computations/{project_id}:start", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] }, - "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": { - "key": "197ba9f7-d09c-5cf8-9290-284cd6c40fb3", - "value": [ - 1, - 2, - 3 - ], - "label": "InputArray" + "request_payload": {}, + "response_body": { + "data": { + "pipeline_id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c" + } }, - "d43949c5-5143-5738-bae9-7d231dcabe7f": { - "key": "d43949c5-5143-5738-bae9-7d231dcabe7f", - "value": 3.14, - "label": "InputNumber" + "status_code": 201 + }, + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] }, - "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": { - "key": "584e44d4-9a78-571f-a2a4-7d9c7b2396e3", - "value": false, - "label": "InputBool" - } - } - } - }, - { - "name": "POST /computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:start", - "description": "", - "method": "POST", - "host": "webserver", - "path": { - "path": "/v0/computations/{project_id}:start", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "computations" + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 0.05, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": null, + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" } - ] - }, - "request_payload": {}, - "response_body": { - "data": { - "pipeline_id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c" - } }, - "status_code": 201 - }, - { - "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "description": "", - "method": "GET", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "computations" + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 0.05, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": null, + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" } - ] }, - "query": "user_id=1", - "response_body": { - "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "state": "STARTED", - "result": null, - "pipeline_details": { - "adjacency_list": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] }, - "progress": 0.05, - "node_states": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { - "modified": true, - "dependencies": [], - "currentStatus": "STARTED", - "progress": 0.05 - } - } - }, - "iteration": 1, - "cluster_id": 0, - "started": "2024-04-15T15:50:31.284124+00:00", - "stopped": null, - "submitted": "2024-04-15T15:50:31.162440+00:00", - "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", - "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" - } - }, - { - "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "description": "", - "method": "GET", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "computations" + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 0.05, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": null, + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" } - ] }, - "query": "user_id=1", - "response_body": { - "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "state": "STARTED", - "result": null, - "pipeline_details": { - "adjacency_list": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] }, - "progress": 0.05, - "node_states": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { - "modified": true, - "dependencies": [], - "currentStatus": "STARTED", - "progress": 0.05 - } - } - }, - "iteration": 1, - "cluster_id": 0, - "started": "2024-04-15T15:50:31.284124+00:00", - "stopped": null, - "submitted": "2024-04-15T15:50:31.162440+00:00", - "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", - "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" - } - }, - { - "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "description": "", - "method": "GET", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "computations" + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 0.05, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": null, + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" } - ] }, - "query": "user_id=1", - "response_body": { - "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "state": "STARTED", - "result": null, - "pipeline_details": { - "adjacency_list": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] }, - "progress": 0.05, - "node_states": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { - "modified": true, - "dependencies": [], - "currentStatus": "STARTED", - "progress": 0.05 - } - } - }, - "iteration": 1, - "cluster_id": 0, - "started": "2024-04-15T15:50:31.284124+00:00", - "stopped": null, - "submitted": "2024-04-15T15:50:31.162440+00:00", - "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", - "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" - } - }, - { - "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "description": "", - "method": "GET", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "computations" + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 0.05, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": null, + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" } - ] }, - "query": "user_id=1", - "response_body": { - "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "state": "STARTED", - "result": null, - "pipeline_details": { - "adjacency_list": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] }, - "progress": 0.05, - "node_states": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { - "modified": true, - "dependencies": [], - "currentStatus": "STARTED", - "progress": 0.05 - } - } - }, - "iteration": 1, - "cluster_id": 0, - "started": "2024-04-15T15:50:31.284124+00:00", - "stopped": null, - "submitted": "2024-04-15T15:50:31.162440+00:00", - "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", - "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" - } - }, - { - "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "description": "", - "method": "GET", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "computations" + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 0.05, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": null, + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" } - ] }, - "query": "user_id=1", - "response_body": { - "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "state": "STARTED", - "result": null, - "pipeline_details": { - "adjacency_list": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] }, - "progress": 0.05, - "node_states": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { - "modified": true, - "dependencies": [], - "currentStatus": "STARTED", - "progress": 0.05 - } - } - }, - "iteration": 1, - "cluster_id": 0, - "started": "2024-04-15T15:50:31.284124+00:00", - "stopped": null, - "submitted": "2024-04-15T15:50:31.162440+00:00", - "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", - "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" - } - }, - { - "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "description": "", - "method": "GET", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "computations" + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 0.05, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": null, + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" } - ] }, - "query": "user_id=1", - "response_body": { - "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "state": "STARTED", - "result": null, - "pipeline_details": { - "adjacency_list": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] }, - "progress": 0.05, - "node_states": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { - "modified": true, - "dependencies": [], - "currentStatus": "STARTED", - "progress": 0.05 - } - } - }, - "iteration": 1, - "cluster_id": 0, - "started": "2024-04-15T15:50:31.284124+00:00", - "stopped": null, - "submitted": "2024-04-15T15:50:31.162440+00:00", - "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", - "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" - } - }, - { - "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "description": "", - "method": "GET", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "computations" + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 0.05, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": null, + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" } - ] }, - "query": "user_id=1", - "response_body": { - "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "state": "STARTED", - "result": null, - "pipeline_details": { - "adjacency_list": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] }, - "progress": 0.05, - "node_states": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { - "modified": true, - "dependencies": [], - "currentStatus": "STARTED", - "progress": 0.05 - } - } - }, - "iteration": 1, - "cluster_id": 0, - "started": "2024-04-15T15:50:31.284124+00:00", - "stopped": null, - "submitted": "2024-04-15T15:50:31.162440+00:00", - "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", - "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" - } - }, - { - "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "description": "", - "method": "GET", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "computations" + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "SUCCESS", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 1.0, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": false, + "dependencies": [], + "currentStatus": "SUCCESS", + "progress": 1.0 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": "2024-04-15T15:50:37.747356+00:00", + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": null } - ] }, - "query": "user_id=1", - "response_body": { - "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "state": "STARTED", - "result": null, - "pipeline_details": { - "adjacency_list": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] }, - "progress": 0.05, - "node_states": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { - "modified": true, - "dependencies": [], - "currentStatus": "STARTED", - "progress": 0.05 - } - } - }, - "iteration": 1, - "cluster_id": 0, - "started": "2024-04-15T15:50:31.284124+00:00", - "stopped": null, - "submitted": "2024-04-15T15:50:31.162440+00:00", - "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", - "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" - } - }, - { - "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "description": "", - "method": "GET", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "computations" + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "SUCCESS", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 1.0, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": false, + "dependencies": [], + "currentStatus": "SUCCESS", + "progress": 1.0 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": "2024-04-15T15:50:37.747356+00:00", + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": null } - ] }, - "query": "user_id=1", - "response_body": { - "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "state": "SUCCESS", - "result": null, - "pipeline_details": { - "adjacency_list": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + { + "name": "GET /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/outputs", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/outputs", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] }, - "progress": 1.0, - "node_states": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { - "modified": false, - "dependencies": [], - "currentStatus": "SUCCESS", - "progress": 1.0 - } - } - }, - "iteration": 1, - "cluster_id": 0, - "started": "2024-04-15T15:50:31.284124+00:00", - "stopped": "2024-04-15T15:50:37.747356+00:00", - "submitted": "2024-04-15T15:50:31.162440+00:00", - "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", - "stop_url": null - } - }, - { - "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "description": "", - "method": "GET", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "computations" + "response_body": { + "data": { + "ab014072-a95f-5775-bb34-5582a13245a6": { + "key": "ab014072-a95f-5775-bb34-5582a13245a6", + "value": { + "store": 0, + "path": "api/d8bc0c02-c3ee-3cec-a562-e6fd3e00be4b/input.json", + "label": "input.json", + "eTag": "3f14fb3a8ba8d750f26bdaa402b2f6cc", + "dataset": null + }, + "label": "OutputFile" + }, + "3d4963ee-179f-5948-9086-dd9bef543f65": { + "key": "3d4963ee-179f-5948-9086-dd9bef543f65", + "value": 42, + "label": "OutputInt" + }, + "2a9452ac-d210-5e11-a631-1d73454bfd91": { + "key": "2a9452ac-d210-5e11-a631-1d73454bfd91", + "value": "Z43", + "label": "OutputString" + }, + "cb5bc33d-6635-5680-98e3-a6ac57f908f4": { + "key": "cb5bc33d-6635-5680-98e3-a6ac57f908f4", + "value": [ + 1, + 2, + 3 + ], + "label": "OutputArray" + }, + "cd7eacb5-6806-5956-86c8-9b30ec588402": { + "key": "cd7eacb5-6806-5956-86c8-9b30ec588402", + "value": 3.14, + "label": "OutputNumber" + }, + "efaaeabf-e4bc-5667-a757-d9b17ad606d9": { + "key": "efaaeabf-e4bc-5667-a757-d9b17ad606d9", + "value": false, + "label": "OutputBool" + } + } } - ] }, - "query": "user_id=1", - "response_body": { - "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "state": "SUCCESS", - "result": null, - "pipeline_details": { - "adjacency_list": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + { + "name": "POST /simcore-s3/files/metadata:search_owned", + "description": "", + "method": "POST", + "host": "storage", + "path": { + "path": "/v0/simcore-s3/files/metadata:search_owned", + "path_parameters": [] }, - "progress": 1.0, - "node_states": { - "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { - "modified": false, - "dependencies": [], - "currentStatus": "SUCCESS", - "progress": 1.0 - } + "query": "user_id=1&startswith=api%2F5b0cd3cd-5ceb-3d74-9961-246840c1e1d4", + "response_body": { + "data": [] } - }, - "iteration": 1, - "cluster_id": 0, - "started": "2024-04-15T15:50:31.284124+00:00", - "stopped": "2024-04-15T15:50:37.747356+00:00", - "submitted": "2024-04-15T15:50:31.162440+00:00", - "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", - "stop_url": null - } - }, - { - "name": "GET /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/outputs", - "description": "", - "method": "GET", - "host": "webserver", - "path": { - "path": "/v0/projects/{project_id}/outputs", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "projects" - } - ] }, - "response_body": { - "data": { - "ab014072-a95f-5775-bb34-5582a13245a6": { - "key": "ab014072-a95f-5775-bb34-5582a13245a6", - "value": { - "store": 0, - "path": "api/d8bc0c02-c3ee-3cec-a562-e6fd3e00be4b/input.json", - "label": "input.json", - "eTag": "3f14fb3a8ba8d750f26bdaa402b2f6cc", - "dataset": null - }, - "label": "OutputFile" - }, - "3d4963ee-179f-5948-9086-dd9bef543f65": { - "key": "3d4963ee-179f-5948-9086-dd9bef543f65", - "value": 42, - "label": "OutputInt" - }, - "2a9452ac-d210-5e11-a631-1d73454bfd91": { - "key": "2a9452ac-d210-5e11-a631-1d73454bfd91", - "value": "Z43", - "label": "OutputString" - }, - "cb5bc33d-6635-5680-98e3-a6ac57f908f4": { - "key": "cb5bc33d-6635-5680-98e3-a6ac57f908f4", - "value": [ - 1, - 2, - 3 - ], - "label": "OutputArray" + { + "name": "POST /files/api%2Fd8bc0c02-c3ee-3cec-a562-e6fd3e00be4b%2Finput.json:soft-copy", + "description": "", + "method": "POST", + "host": "storage", + "path": { + "path": "/v0/files/{file_id}:soft-copy", + "path_parameters": [ + { + "in": "path", + "name": "file_id", + "required": true, + "schema": { + "title": "File Id", + "anyOf": [ + { + "type": "str", + "pattern": "^(api|([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}))\\/([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12})\\/(.+)$" + }, + { + "type": "str", + "pattern": "^N:package:[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$" + } + ] + }, + "response_value": "files" + } + ] }, - "cd7eacb5-6806-5956-86c8-9b30ec588402": { - "key": "cd7eacb5-6806-5956-86c8-9b30ec588402", - "value": 3.14, - "label": "OutputNumber" + "query": "user_id=1", + "request_payload": { + "link_id": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json" }, - "efaaeabf-e4bc-5667-a757-d9b17ad606d9": { - "key": "efaaeabf-e4bc-5667-a757-d9b17ad606d9", - "value": false, - "label": "OutputBool" + "response_body": { + "data": { + "file_uuid": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json", + "location_id": 0, + "project_name": null, + "node_name": null, + "file_name": "input.json", + "file_id": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json", + "created_at": "2024-04-15T15:50:27.134729", + "last_modified": "2024-04-15T15:50:27+00:00", + "file_size": 9, + "entity_tag": "3f14fb3a8ba8d750f26bdaa402b2f6cc", + "is_soft_link": true, + "is_directory": false, + "sha256_checksum": "fd3bb7e4cc5098e8040cd35fe3346628693097fbf7d05477d0b2845b20b4a4fd" + }, + "error": null } - } - } - }, - { - "name": "POST /simcore-s3/files/metadata:search_owned", - "description": "", - "method": "POST", - "host": "storage", - "path": { - "path": "/v0/simcore-s3/files/metadata:search_owned", - "path_parameters": [] }, - "query": "user_id=1&startswith=api%2F5b0cd3cd-5ceb-3d74-9961-246840c1e1d4", - "response_body": { - "data": [] - } - }, - { - "name": "POST /files/api%2Fd8bc0c02-c3ee-3cec-a562-e6fd3e00be4b%2Finput.json:soft-copy", - "description": "", - "method": "POST", - "host": "storage", - "path": { - "path": "/v0/files/{file_id}:soft-copy", - "path_parameters": [ - { - "in": "path", - "name": "file_id", - "required": true, - "schema": { - "title": "File Id", - "anyOf": [ - { - "type": "str", - "pattern": "^(api|([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}))\\/([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12})\\/(.+)$" - }, - { - "type": "str", - "pattern": "^N:package:[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$" - } + { + "name": "POST /simcore-s3/files/metadata:search", + "description": "", + "method": "POST", + "host": "storage", + "path": { + "path": "/v0/simcore-s3/files/metadata:search", + "path_parameters": [] + }, + "query": "user_id=1&startswith=api%2F5b0cd3cd-5ceb-3d74-9961-246840c1e1d4&access_right=read", + "response_body": { + "data": [ + { + "file_uuid": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json", + "location_id": 0, + "project_name": null, + "node_name": null, + "file_name": "input.json", + "file_id": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json", + "created_at": "2024-04-15T15:50:27.134729", + "last_modified": "2024-04-15T15:50:27+00:00", + "file_size": 9, + "entity_tag": "3f14fb3a8ba8d750f26bdaa402b2f6cc", + "is_soft_link": true, + "is_directory": false, + "sha256_checksum": "fd3bb7e4cc5098e8040cd35fe3346628693097fbf7d05477d0b2845b20b4a4fd" + } ] - }, - "response_value": "files" } - ] - }, - "query": "user_id=1", - "request_payload": { - "link_id": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json" - }, - "response_body": { - "data": { - "file_uuid": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json", - "location_id": 0, - "project_name": null, - "node_name": null, - "file_name": "input.json", - "file_id": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json", - "created_at": "2024-04-15T15:50:27.134729", - "last_modified": "2024-04-15T15:50:27+00:00", - "file_size": 9, - "entity_tag": "3f14fb3a8ba8d750f26bdaa402b2f6cc", - "is_soft_link": true, - "is_directory": false, - "sha256_checksum": "fd3bb7e4cc5098e8040cd35fe3346628693097fbf7d05477d0b2845b20b4a4fd" - }, - "error": null - } - }, - { - "name": "POST /simcore-s3/files/metadata:search", - "description": "", - "method": "POST", - "host": "storage", - "path": { - "path": "/v0/simcore-s3/files/metadata:search", - "path_parameters": [] }, - "query": "user_id=1&startswith=api%2F5b0cd3cd-5ceb-3d74-9961-246840c1e1d4&access_right=read", - "response_body": { - "data": [ - { - "file_uuid": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json", - "location_id": 0, - "project_name": null, - "node_name": null, - "file_name": "input.json", - "file_id": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json", - "created_at": "2024-04-15T15:50:27.134729", - "last_modified": "2024-04-15T15:50:27+00:00", - "file_size": 9, - "entity_tag": "3f14fb3a8ba8d750f26bdaa402b2f6cc", - "is_soft_link": true, - "is_directory": false, - "sha256_checksum": "fd3bb7e4cc5098e8040cd35fe3346628693097fbf7d05477d0b2845b20b4a4fd" - } - ] - } - }, - { - "name": "GET /locations/0/files/api%2F5b0cd3cd-5ceb-3d74-9961-246840c1e1d4%2Finput.json", - "description": "", - "method": "GET", - "host": "storage", - "path": { - "path": "/v0/locations/{location_id}/files/{file_id}", - "path_parameters": [ - { - "in": "path", - "name": "file_id", - "required": true, - "schema": { - "title": "File Id", - "anyOf": [ - { - "type": "str", - "pattern": "^(api|([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}))\\/([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12})\\/(.+)$" - }, - { - "type": "str", - "pattern": "^N:package:[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$" - } + { + "name": "GET /locations/0/files/api%2F5b0cd3cd-5ceb-3d74-9961-246840c1e1d4%2Finput.json", + "description": "", + "method": "GET", + "host": "storage", + "path": { + "path": "/v0/locations/{location_id}/files/{file_id}", + "path_parameters": [ + { + "in": "path", + "name": "file_id", + "required": true, + "schema": { + "title": "File Id", + "anyOf": [ + { + "type": "str", + "pattern": "^(api|([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}))\\/([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12})\\/(.+)$" + }, + { + "type": "str", + "pattern": "^N:package:[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$" + } + ] + }, + "response_value": "files" + }, + { + "in": "path", + "name": "location_id", + "required": true, + "schema": { + "title": "Location Id", + "type": "int" + }, + "response_value": "locations" + } ] - }, - "response_value": "files" }, - { - "in": "path", - "name": "location_id", - "required": true, - "schema": { - "title": "Location Id", - "type": "int" - }, - "response_value": "locations" + "query": "user_id=1", + "response_body": { + "data": { + "link": "http://127.0.0.1:9001/simcore/api/d8bc0c02-c3ee-3cec-a562-e6fd3e00be4b/input.json?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=12345678%2F20240415%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240415T155039Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=79a5cbc9b23ebb4084f4156acd6f7e6f891197dbd5a088327c9131768bd1c610" + } } - ] }, - "query": "user_id=1", - "response_body": { - "data": { - "link": "http://127.0.0.1:9001/simcore/api/d8bc0c02-c3ee-3cec-a562-e6fd3e00be4b/input.json?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=12345678%2F20240415%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240415T155039Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=79a5cbc9b23ebb4084f4156acd6f7e6f891197dbd5a088327c9131768bd1c610" - } + { + "name": "DELETE /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "DELETE", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "status_code": 204 } - }, - { - "name": "DELETE /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c", - "description": "", - "method": "DELETE", - "host": "webserver", - "path": { - "path": "/v0/projects/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "projects" - } - ] - }, - "status_code": 204 - } ] diff --git a/services/api-server/tests/mocks/start_job_no_payment.json b/services/api-server/tests/mocks/start_job_no_payment.json index fb20632634a..15b1e3b92b4 100644 --- a/services/api-server/tests/mocks/start_job_no_payment.json +++ b/services/api-server/tests/mocks/start_job_no_payment.json @@ -1,99 +1,97 @@ [ - { - "name": "POST /computations/48323c7f-e379-4e16-8b58-dc69643f653d:start", - "description": "", - "method": "POST", - "host": "webserver", - "path": { - "path": "/v0/computations/{project_id}:start", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "computations" - } - ] - }, - "query": null, - "request_payload": { - "force_restart": false, - "cluster_id": 0, - "subgraph": [] - }, - "response_body": { - "data": { - "pipeline_id": "48323c7f-e379-4e16-8b58-dc69643f653d" - } - }, - "status_code": 201 - }, - { - "name": "GET /v2/computations/48323c7f-e379-4e16-8b58-dc69643f653d", - "description": "", - "method": "GET", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "computations" - } - ] - }, - "query": "user_id=1", - "request_payload": null, - "response_body": { - "id": "48323c7f-e379-4e16-8b58-dc69643f653d", - "state": "STARTED", - "result": null, - "pipeline_details": { - "adjacency_list": { - "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": [] + { + "name": "POST /computations/48323c7f-e379-4e16-8b58-dc69643f653d:start", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/computations/{project_id}:start", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] }, - "progress": 0.0, - "node_states": { - "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": { - "modified": true, - "dependencies": [], - "currentStatus": "STARTED", - "progress": 0.0 - } - } - }, - "iteration": 1, - "cluster_id": 0, - "started": "2023-10-26T14:19:05.389765+00:00", - "stopped": null, - "submitted": "2023-10-26T14:19:05.241935+00:00", - "url": "http://director-v2:8000/v2/computations/48323c7f-e379-4e16-8b58-dc69643f653d?user_id=1", - "stop_url": "http://director-v2:8000/v2/computations/48323c7f-e379-4e16-8b58-dc69643f653d:stop?user_id=1" + "query": null, + "request_payload": { + "force_restart": false, + "subgraph": [] + }, + "response_body": { + "data": { + "pipeline_id": "48323c7f-e379-4e16-8b58-dc69643f653d" + } + }, + "status_code": 201 }, - "status_code": 200 - } + { + "name": "GET /v2/computations/48323c7f-e379-4e16-8b58-dc69643f653d", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "request_payload": null, + "response_body": { + "id": "48323c7f-e379-4e16-8b58-dc69643f653d", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": [] + }, + "progress": 0.0, + "node_states": { + "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.0 + } + } + }, + "iteration": 1, + "started": "2023-10-26T14:19:05.389765+00:00", + "stopped": null, + "submitted": "2023-10-26T14:19:05.241935+00:00", + "url": "http://director-v2:8000/v2/computations/48323c7f-e379-4e16-8b58-dc69643f653d?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/48323c7f-e379-4e16-8b58-dc69643f653d:stop?user_id=1" + }, + "status_code": 200 + } ] diff --git a/services/api-server/tests/mocks/start_job_not_enough_credit.json b/services/api-server/tests/mocks/start_job_not_enough_credit.json index 2167313c683..19f54e53ca6 100644 --- a/services/api-server/tests/mocks/start_job_not_enough_credit.json +++ b/services/api-server/tests/mocks/start_job_not_enough_credit.json @@ -1,242 +1,241 @@ [ - { - "name": "GET /projects/48323c7f-e379-4e16-8b58-dc69643f653d", - "description": "", - "method": "GET", - "host": "webserver", - "path": { - "path": "/v0/projects/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "projects" - } - ] - }, - "query": null, - "request_payload": null, - "response_body": { - "data": { - "uuid": "48323c7f-e379-4e16-8b58-dc69643f653d", - "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/48323c7f-e379-4e16-8b58-dc69643f653d", - "description": "Study associated to solver job:\n{\n \"id\": \"48323c7f-e379-4e16-8b58-dc69643f653d\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/48323c7f-e379-4e16-8b58-dc69643f653d\",\n \"inputs_checksum\": \"015ba4cd5cf00c511a8217deb65c242e3b15dc6ae4b1ecf94982d693887d9e8a\",\n \"created_at\": \"2023-10-26T14:13:07.998632+00:00\"\n}", - "thumbnail": "https://via.placeholder.com/170x120.png", - "creationDate": "2023-10-26T14:13:08.013Z", - "lastChangeDate": "2023-10-26T14:13:08.013Z", - "workspaceId": 3, - "folderId": 2, - "trashedAt": null, - "workbench": { - "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": { - "key": "simcore/services/comp/itis/sleeper", - "version": "2.0.2", - "label": "sleeper", - "progress": 0.0, - "inputs": { - "x": 4.33, - "n": 55, - "title": "Temperature", - "enabled": true, - "input_file": { - "store": 0, - "path": "api/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt", - "label": "input.txt" - } - }, - "inputsUnits": {}, - "inputNodes": [], - "outputs": {}, - "state": { - "modified": true, - "dependencies": [], - "currentStatus": "NOT_STARTED", - "progress": null - } - } - }, - "prjOwner": "austin66@example.org", - "accessRights": { - "3": { - "read": true, - "write": true, - "delete": true - } + { + "name": "GET /projects/48323c7f-e379-4e16-8b58-dc69643f653d", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "projects" + } + ] }, - "tags": [], - "classifiers": [], - "state": { - "locked": { - "value": false, - "status": "CLOSED" - }, - "state": { - "value": "NOT_STARTED" - } - }, - "ui": { - "workbench": { - "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": { - "position": { - "x": 633, - "y": 229 - } + "query": null, + "request_payload": null, + "response_body": { + "data": { + "uuid": "48323c7f-e379-4e16-8b58-dc69643f653d", + "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/48323c7f-e379-4e16-8b58-dc69643f653d", + "description": "Study associated to solver job:\n{\n \"id\": \"48323c7f-e379-4e16-8b58-dc69643f653d\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/48323c7f-e379-4e16-8b58-dc69643f653d\",\n \"inputs_checksum\": \"015ba4cd5cf00c511a8217deb65c242e3b15dc6ae4b1ecf94982d693887d9e8a\",\n \"created_at\": \"2023-10-26T14:13:07.998632+00:00\"\n}", + "thumbnail": "https://via.placeholder.com/170x120.png", + "creationDate": "2023-10-26T14:13:08.013Z", + "lastChangeDate": "2023-10-26T14:13:08.013Z", + "workspaceId": 3, + "folderId": 2, + "trashedAt": null, + "workbench": { + "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": { + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.2", + "label": "sleeper", + "progress": 0.0, + "inputs": { + "x": 4.33, + "n": 55, + "title": "Temperature", + "enabled": true, + "input_file": { + "store": 0, + "path": "api/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt", + "label": "input.txt" + } + }, + "inputsUnits": {}, + "inputNodes": [], + "outputs": {}, + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": null + } + } + }, + "prjOwner": "austin66@example.org", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "NOT_STARTED" + } + }, + "ui": { + "workbench": { + "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": { + "position": { + "x": 633, + "y": 229 + } + } + }, + "slideshow": {}, + "currentNodeId": "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24", + "annotations": {} + }, + "quality": {}, + "dev": {} } - }, - "slideshow": {}, - "currentNodeId": "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24", - "annotations": {} }, - "quality": {}, - "dev": {} - } + "status_code": 200 }, - "status_code": 200 - }, - { - "name": "PUT /projects/48323c7f-e379-4e16-8b58-dc69643f653d/nodes/3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24/pricing-plan/1/pricing-unit/1", - "description": "", - "method": "PUT", - "host": "webserver", - "path": { - "path": "/v0/projects/{project_id}/nodes/{node_id}/pricing-plan/{pricing_plan_id}/pricing-unit/{pricing_unit_id}", - "path_parameters": [ - { - "in": "path", - "name": "pricing_plan_id", - "required": true, - "schema": { - "title": "Pricing Plan Id", - "type": "int", - "pattern": null, - "format": null, - "exclusiveMinimum": true, - "minimum": 0, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "pricing-plan" + { + "name": "PUT /projects/48323c7f-e379-4e16-8b58-dc69643f653d/nodes/3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24/pricing-plan/1/pricing-unit/1", + "description": "", + "method": "PUT", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/nodes/{node_id}/pricing-plan/{pricing_plan_id}/pricing-unit/{pricing_unit_id}", + "path_parameters": [ + { + "in": "path", + "name": "pricing_plan_id", + "required": true, + "schema": { + "title": "Pricing Plan Id", + "type": "int", + "pattern": null, + "format": null, + "exclusiveMinimum": true, + "minimum": 0, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "pricing-plan" + }, + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "projects" + }, + { + "in": "path", + "name": "pricing_unit_id", + "required": true, + "schema": { + "title": "Pricing Unit Id", + "type": "int", + "pattern": null, + "format": null, + "exclusiveMinimum": true, + "minimum": 0, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "pricing-unit" + }, + { + "in": "path", + "name": "node_id", + "required": true, + "schema": { + "title": "Node Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "nodes" + } + ] }, - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "projects" + "query": null, + "request_payload": null, + "response_body": null, + "status_code": 204 + }, + { + "name": "POST /computations/48323c7f-e379-4e16-8b58-dc69643f653d:start", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/computations/{project_id}:start", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] }, - { - "in": "path", - "name": "pricing_unit_id", - "required": true, - "schema": { - "title": "Pricing Unit Id", - "type": "int", - "pattern": null, - "format": null, - "exclusiveMinimum": true, - "minimum": 0, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "pricing-unit" + "query": null, + "request_payload": { + "force_restart": false, + "subgraph": [] }, - { - "in": "path", - "name": "node_id", - "required": true, - "schema": { - "title": "Node Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "nodes" - } - ] - }, - "query": null, - "request_payload": null, - "response_body": null, - "status_code": 204 - }, - { - "name": "POST /computations/48323c7f-e379-4e16-8b58-dc69643f653d:start", - "description": "", - "method": "POST", - "host": "webserver", - "path": { - "path": "/v0/computations/{project_id}:start", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "computations" - } - ] - }, - "query": null, - "request_payload": { - "force_restart": false, - "cluster_id": 0, - "subgraph": [] - }, - "response_body": { - "data": null, - "error": { - "logs": [], - "errors": [ - { - "code": "WalletNotEnoughCreditsError", - "message": "Wallet does not have enough credits. Wallet 1 credit balance -200.11", - "resource": null, - "field": null - } - ], - "status": 402, - "message": "Unexpected client error" - } - }, - "status_code": 402 - } + "response_body": { + "data": null, + "error": { + "logs": [], + "errors": [ + { + "code": "WalletNotEnoughCreditsError", + "message": "Wallet does not have enough credits. Wallet 1 credit balance -200.11", + "resource": null, + "field": null + } + ], + "status": 402, + "message": "Unexpected client error" + } + }, + "status_code": 402 + } ] diff --git a/services/api-server/tests/mocks/start_job_with_payment.json b/services/api-server/tests/mocks/start_job_with_payment.json index 1a7a829cf11..ac3aed74ecb 100644 --- a/services/api-server/tests/mocks/start_job_with_payment.json +++ b/services/api-server/tests/mocks/start_job_with_payment.json @@ -1,288 +1,286 @@ [ - { - "name": "GET /projects/e551e994-a68d-4c26-b6fc-59019b35ee6e", - "description": "", - "method": "GET", - "host": "webserver", - "path": { - "path": "/v0/projects/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "projects" - } - ] - }, - "query": null, - "request_payload": null, - "response_body": { - "data": { - "uuid": "e551e994-a68d-4c26-b6fc-59019b35ee6e", - "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/e551e994-a68d-4c26-b6fc-59019b35ee6e", - "description": "Study associated to solver job:\n{\n \"id\": \"e551e994-a68d-4c26-b6fc-59019b35ee6e\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/e551e994-a68d-4c26-b6fc-59019b35ee6e\",\n \"inputs_checksum\": \"015ba4cd5cf00c511a8217deb65c242e3b15dc6ae4b1ecf94982d693887d9e8a\",\n \"created_at\": \"2023-10-26T14:10:11.103041+00:00\"\n}", - "thumbnail": "https://via.placeholder.com/170x120.png", - "creationDate": "2023-10-26T14:10:11.118Z", - "lastChangeDate": "2023-10-26T14:10:11.118Z", - "workspaceId": 12, - "folderId": 2, - "trashedAt": null, - "workbench": { - "657b124c-0697-5166-b820-a2ea2704ae84": { - "key": "simcore/services/comp/itis/sleeper", - "version": "2.0.2", - "label": "sleeper", - "progress": 0.0, - "inputs": { - "x": 4.33, - "n": 55, - "title": "Temperature", - "enabled": true, - "input_file": { - "store": 0, - "path": "api/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt", - "label": "input.txt" - } - }, - "inputsUnits": {}, - "inputNodes": [], - "outputs": {}, - "state": { - "modified": true, - "dependencies": [], - "currentStatus": "NOT_STARTED", - "progress": null - } - } - }, - "prjOwner": "freemanryan@example.net", - "accessRights": { - "3": { - "read": true, - "write": true, - "delete": true - } + { + "name": "GET /projects/e551e994-a68d-4c26-b6fc-59019b35ee6e", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "projects" + } + ] }, - "tags": [], - "classifiers": [], - "state": { - "locked": { - "value": false, - "status": "CLOSED" - }, - "state": { - "value": "NOT_STARTED" - } - }, - "ui": { - "workbench": { - "657b124c-0697-5166-b820-a2ea2704ae84": { - "position": { - "x": 633, - "y": 229 - } + "query": null, + "request_payload": null, + "response_body": { + "data": { + "uuid": "e551e994-a68d-4c26-b6fc-59019b35ee6e", + "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/e551e994-a68d-4c26-b6fc-59019b35ee6e", + "description": "Study associated to solver job:\n{\n \"id\": \"e551e994-a68d-4c26-b6fc-59019b35ee6e\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/e551e994-a68d-4c26-b6fc-59019b35ee6e\",\n \"inputs_checksum\": \"015ba4cd5cf00c511a8217deb65c242e3b15dc6ae4b1ecf94982d693887d9e8a\",\n \"created_at\": \"2023-10-26T14:10:11.103041+00:00\"\n}", + "thumbnail": "https://via.placeholder.com/170x120.png", + "creationDate": "2023-10-26T14:10:11.118Z", + "lastChangeDate": "2023-10-26T14:10:11.118Z", + "workspaceId": 12, + "folderId": 2, + "trashedAt": null, + "workbench": { + "657b124c-0697-5166-b820-a2ea2704ae84": { + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.2", + "label": "sleeper", + "progress": 0.0, + "inputs": { + "x": 4.33, + "n": 55, + "title": "Temperature", + "enabled": true, + "input_file": { + "store": 0, + "path": "api/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt", + "label": "input.txt" + } + }, + "inputsUnits": {}, + "inputNodes": [], + "outputs": {}, + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": null + } + } + }, + "prjOwner": "freemanryan@example.net", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "NOT_STARTED" + } + }, + "ui": { + "workbench": { + "657b124c-0697-5166-b820-a2ea2704ae84": { + "position": { + "x": 633, + "y": 229 + } + } + }, + "slideshow": {}, + "currentNodeId": "657b124c-0697-5166-b820-a2ea2704ae84", + "annotations": {} + }, + "quality": {}, + "dev": {} } - }, - "slideshow": {}, - "currentNodeId": "657b124c-0697-5166-b820-a2ea2704ae84", - "annotations": {} }, - "quality": {}, - "dev": {} - } + "status_code": 200 }, - "status_code": 200 - }, - { - "name": "PUT /projects/e551e994-a68d-4c26-b6fc-59019b35ee6e/nodes/657b124c-0697-5166-b820-a2ea2704ae84/pricing-plan/1/pricing-unit/1", - "description": "", - "method": "PUT", - "host": "webserver", - "path": { - "path": "/v0/projects/{project_id}/nodes/{node_id}/pricing-plan/{pricing_plan_id}/pricing-unit/{pricing_unit_id}", - "path_parameters": [ - { - "in": "path", - "name": "pricing_plan_id", - "required": true, - "schema": { - "title": "Pricing Plan Id", - "type": "int", - "pattern": null, - "format": null, - "exclusiveMinimum": true, - "minimum": 0, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "pricing-plan" + { + "name": "PUT /projects/e551e994-a68d-4c26-b6fc-59019b35ee6e/nodes/657b124c-0697-5166-b820-a2ea2704ae84/pricing-plan/1/pricing-unit/1", + "description": "", + "method": "PUT", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/nodes/{node_id}/pricing-plan/{pricing_plan_id}/pricing-unit/{pricing_unit_id}", + "path_parameters": [ + { + "in": "path", + "name": "pricing_plan_id", + "required": true, + "schema": { + "title": "Pricing Plan Id", + "type": "int", + "pattern": null, + "format": null, + "exclusiveMinimum": true, + "minimum": 0, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "pricing-plan" + }, + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "projects" + }, + { + "in": "path", + "name": "pricing_unit_id", + "required": true, + "schema": { + "title": "Pricing Unit Id", + "type": "int", + "pattern": null, + "format": null, + "exclusiveMinimum": true, + "minimum": 0, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "pricing-unit" + }, + { + "in": "path", + "name": "node_id", + "required": true, + "schema": { + "title": "Node Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "nodes" + } + ] }, - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "projects" + "query": null, + "request_payload": null, + "response_body": null, + "status_code": 204 + }, + { + "name": "POST /computations/e551e994-a68d-4c26-b6fc-59019b35ee6e:start", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/computations/{project_id}:start", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] }, - { - "in": "path", - "name": "pricing_unit_id", - "required": true, - "schema": { - "title": "Pricing Unit Id", - "type": "int", - "pattern": null, - "format": null, - "exclusiveMinimum": true, - "minimum": 0, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "pricing-unit" + "query": null, + "request_payload": { + "force_restart": false, + "subgraph": [] }, - { - "in": "path", - "name": "node_id", - "required": true, - "schema": { - "title": "Node Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "nodes" - } - ] - }, - "query": null, - "request_payload": null, - "response_body": null, - "status_code": 204 - }, - { - "name": "POST /computations/e551e994-a68d-4c26-b6fc-59019b35ee6e:start", - "description": "", - "method": "POST", - "host": "webserver", - "path": { - "path": "/v0/computations/{project_id}:start", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "computations" - } - ] - }, - "query": null, - "request_payload": { - "force_restart": false, - "cluster_id": 0, - "subgraph": [] - }, - "response_body": { - "data": { - "pipeline_id": "e551e994-a68d-4c26-b6fc-59019b35ee6e" - } - }, - "status_code": 201 - }, - { - "name": "GET /v2/computations/e551e994-a68d-4c26-b6fc-59019b35ee6e", - "description": "", - "method": "GET", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "computations" - } - ] - }, - "query": "user_id=1", - "request_payload": null, - "response_body": { - "id": "e551e994-a68d-4c26-b6fc-59019b35ee6e", - "state": "STARTED", - "result": null, - "pipeline_details": { - "adjacency_list": { - "657b124c-0697-5166-b820-a2ea2704ae84": [] + "response_body": { + "data": { + "pipeline_id": "e551e994-a68d-4c26-b6fc-59019b35ee6e" + } }, - "progress": 0.0, - "node_states": { - "657b124c-0697-5166-b820-a2ea2704ae84": { - "modified": true, - "dependencies": [], - "currentStatus": "STARTED", - "progress": 0.0 - } - } - }, - "iteration": 1, - "cluster_id": 0, - "started": "2023-10-26T14:11:20.606448+00:00", - "stopped": null, - "submitted": "2023-10-26T14:11:20.460760+00:00", - "url": "http://director-v2:8000/v2/computations/e551e994-a68d-4c26-b6fc-59019b35ee6e?user_id=1", - "stop_url": "http://director-v2:8000/v2/computations/e551e994-a68d-4c26-b6fc-59019b35ee6e:stop?user_id=1" + "status_code": 201 }, - "status_code": 200 - } + { + "name": "GET /v2/computations/e551e994-a68d-4c26-b6fc-59019b35ee6e", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "request_payload": null, + "response_body": { + "id": "e551e994-a68d-4c26-b6fc-59019b35ee6e", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "657b124c-0697-5166-b820-a2ea2704ae84": [] + }, + "progress": 0.0, + "node_states": { + "657b124c-0697-5166-b820-a2ea2704ae84": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.0 + } + } + }, + "iteration": 1, + "started": "2023-10-26T14:11:20.606448+00:00", + "stopped": null, + "submitted": "2023-10-26T14:11:20.460760+00:00", + "url": "http://director-v2:8000/v2/computations/e551e994-a68d-4c26-b6fc-59019b35ee6e?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e551e994-a68d-4c26-b6fc-59019b35ee6e:stop?user_id=1" + }, + "status_code": 200 + } ] diff --git a/services/api-server/tests/mocks/start_solver_job.json b/services/api-server/tests/mocks/start_solver_job.json index 6c54ff0a058..f779cd45b9d 100644 --- a/services/api-server/tests/mocks/start_solver_job.json +++ b/services/api-server/tests/mocks/start_solver_job.json @@ -1,80 +1,79 @@ [ - { - "name": "POST /computations/b9faf8d8-4928-4e50-af40-3690712c5481:start", - "description": "", - "method": "POST", - "host": "webserver", - "path": { - "path": "/v0/computations/{project_id}:start", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "computations" - } - ] - }, - "request_payload": {}, - "response_body": { - "data": { - "pipeline_id": "b9faf8d8-4928-4e50-af40-3690712c5481" - } - }, - "status_code": 409 - }, - { - "name": "GET /v2/computations/b9faf8d8-4928-4e50-af40-3690712c5481", - "description": "", - "method": "GET", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "format": "uuid" - }, - "response_value": "computations" - } - ] + { + "name": "POST /computations/b9faf8d8-4928-4e50-af40-3690712c5481:start", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/computations/{project_id}:start", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "request_payload": {}, + "response_body": { + "data": { + "pipeline_id": "b9faf8d8-4928-4e50-af40-3690712c5481" + } + }, + "status_code": 409 }, - "query": "user_id=1", - "response_body": { - "id": "b9faf8d8-4928-4e50-af40-3690712c5481", - "state": "STARTED", - "result": null, - "pipeline_details": { - "adjacency_list": { - "d3a3c1e6-3d89-5e7a-af22-0f3ffcedef3d": [] + { + "name": "GET /v2/computations/b9faf8d8-4928-4e50-af40-3690712c5481", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] }, - "progress": 0.05, - "node_states": { - "d3a3c1e6-3d89-5e7a-af22-0f3ffcedef3d": { - "modified": true, - "dependencies": [], - "currentStatus": "STARTED", - "progress": 0.05 - } + "query": "user_id=1", + "response_body": { + "id": "b9faf8d8-4928-4e50-af40-3690712c5481", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "d3a3c1e6-3d89-5e7a-af22-0f3ffcedef3d": [] + }, + "progress": 0.05, + "node_states": { + "d3a3c1e6-3d89-5e7a-af22-0f3ffcedef3d": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 2, + "started": "2024-06-18T20:33:46.482456+00:00", + "stopped": "2024-06-18T20:31:25.399647+00:00", + "submitted": "2024-06-18T20:33:46.384524+00:00", + "url": "http://director-v2/v2/computations/b9faf8d8-4928-4e50-af40-3690712c5481?user_id=1", + "stop_url": "http://director-v2/v2/computations/b9faf8d8-4928-4e50-af40-3690712c5481:stop?user_id=1" } - }, - "iteration": 2, - "cluster_id": 0, - "started": "2024-06-18T20:33:46.482456+00:00", - "stopped": "2024-06-18T20:31:25.399647+00:00", - "submitted": "2024-06-18T20:33:46.384524+00:00", - "url": "http://director-v2/v2/computations/b9faf8d8-4928-4e50-af40-3690712c5481?user_id=1", - "stop_url": "http://director-v2/v2/computations/b9faf8d8-4928-4e50-af40-3690712c5481:stop?user_id=1" } - } ] diff --git a/services/api-server/tests/mocks/stop_job.json b/services/api-server/tests/mocks/stop_job.json index e840e1b5cca..f6574562dbf 100644 --- a/services/api-server/tests/mocks/stop_job.json +++ b/services/api-server/tests/mocks/stop_job.json @@ -1,118 +1,116 @@ [ - { - "name": "POST /v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc:stop", - "description": "", - "method": "POST", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}:stop", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "computations" - } - ] - }, - "query": null, - "request_payload": { - "user_id": 1 - }, - "response_body": { - "id": "4989fa99-b567-43bd-978a-68c2b95fdabc", - "state": "NOT_STARTED", - "result": null, - "pipeline_details": { - "adjacency_list": { - "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": [] + { + "name": "POST /v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc:stop", + "description": "", + "method": "POST", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}:stop", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] }, - "progress": 0.0, - "node_states": { - "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": { - "modified": true, - "dependencies": [], - "currentStatus": "NOT_STARTED", - "progress": null - } - } - }, - "iteration": null, - "cluster_id": null, - "started": null, - "stopped": null, - "submitted": "2023-11-17T13:04:59.327557+00:00", - "url": "http://director-v2:8000/v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc:stop", - "stop_url": null - }, - "status_code": 202 - }, - { - "name": "GET /v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc", - "description": "", - "method": "GET", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "computations" - } - ] - }, - "query": "user_id=1", - "request_payload": null, - "response_body": { - "id": "4989fa99-b567-43bd-978a-68c2b95fdabc", - "state": "NOT_STARTED", - "result": null, - "pipeline_details": { - "adjacency_list": { - "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": [] + "query": null, + "request_payload": { + "user_id": 1 }, - "progress": 0.0, - "node_states": { - "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": { - "modified": true, - "dependencies": [], - "currentStatus": "NOT_STARTED", - "progress": null - } - } - }, - "iteration": null, - "cluster_id": null, - "started": null, - "stopped": null, - "submitted": "2023-11-17T13:04:59.327557+00:00", - "url": "http://director-v2:8000/v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc?user_id=1", - "stop_url": null + "response_body": { + "id": "4989fa99-b567-43bd-978a-68c2b95fdabc", + "state": "NOT_STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": [] + }, + "progress": 0.0, + "node_states": { + "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": null + } + } + }, + "iteration": null, + "started": null, + "stopped": null, + "submitted": "2023-11-17T13:04:59.327557+00:00", + "url": "http://director-v2:8000/v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc:stop", + "stop_url": null + }, + "status_code": 202 }, - "status_code": 200 - } + { + "name": "GET /v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "request_payload": null, + "response_body": { + "id": "4989fa99-b567-43bd-978a-68c2b95fdabc", + "state": "NOT_STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": [] + }, + "progress": 0.0, + "node_states": { + "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": null + } + } + }, + "iteration": null, + "started": null, + "stopped": null, + "submitted": "2023-11-17T13:04:59.327557+00:00", + "url": "http://director-v2:8000/v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc?user_id=1", + "stop_url": null + }, + "status_code": 200 + } ] diff --git a/services/api-server/tests/mocks/study_job_start_stop_delete.json b/services/api-server/tests/mocks/study_job_start_stop_delete.json index 823f2639334..d279e1dc240 100644 --- a/services/api-server/tests/mocks/study_job_start_stop_delete.json +++ b/services/api-server/tests/mocks/study_job_start_stop_delete.json @@ -1,243 +1,240 @@ [ - { - "name": "POST /computations/10da03f0-f1bc-11ee-9e42-0242ac140012:start", - "description": "", - "method": "POST", - "host": "webserver", - "path": { - "path": "/v0/computations/{project_id}:start", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "computations" - } - ] - }, - "query": null, - "request_payload": {}, - "response_body": { - "data": { - "pipeline_id": "10da03f0-f1bc-11ee-9e42-0242ac140012" - } - }, - "status_code": 201 - }, - { - "name": "GET /v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012", - "description": "", - "method": "GET", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "computations" - } - ] - }, - "query": "user_id=1", - "request_payload": null, - "response_body": { - "id": "10da03f0-f1bc-11ee-9e42-0242ac140012", - "state": "PENDING", - "result": null, - "pipeline_details": { - "adjacency_list": { - "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": [] + { + "name": "POST /computations/10da03f0-f1bc-11ee-9e42-0242ac140012:start", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/computations/{project_id}:start", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] }, - "progress": 0.0, - "node_states": { - "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": { - "modified": true, - "dependencies": [], - "currentStatus": "PENDING", - "progress": null - } - } - }, - "iteration": 1, - "cluster_id": 0, - "started": null, - "stopped": null, - "submitted": "2024-04-03T13:15:00.045631+00:00", - "url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012?user_id=1", - "stop_url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012:stop?user_id=1" - }, - "status_code": 200 - }, - { - "name": "POST /v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012:stop", - "description": "", - "method": "POST", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}:stop", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "computations" - } - ] - }, - "query": null, - "request_payload": { - "user_id": 1 - }, - "response_body": { - "id": "10da03f0-f1bc-11ee-9e42-0242ac140012", - "state": "SUCCESS", - "result": null, - "pipeline_details": { - "adjacency_list": { - "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": [] + "query": null, + "request_payload": {}, + "response_body": { + "data": { + "pipeline_id": "10da03f0-f1bc-11ee-9e42-0242ac140012" + } }, - "progress": 1.0, - "node_states": { - "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": { - "modified": false, - "dependencies": [], - "currentStatus": "SUCCESS", - "progress": 1.0 - } - } - }, - "iteration": 1, - "cluster_id": 0, - "started": "2024-04-03T13:15:00.425270+00:00", - "stopped": "2024-04-03T13:15:08.997076+00:00", - "submitted": "2024-04-03T13:15:00.045631+00:00", - "url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012:stop", - "stop_url": null + "status_code": 201 }, - "status_code": 202 - }, - { - "name": "GET /v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012", - "description": "", - "method": "GET", - "host": "director-v2", - "path": { - "path": "/v2/computations/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "computations" - } - ] + { + "name": "GET /v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "request_payload": null, + "response_body": { + "id": "10da03f0-f1bc-11ee-9e42-0242ac140012", + "state": "PENDING", + "result": null, + "pipeline_details": { + "adjacency_list": { + "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": [] + }, + "progress": 0.0, + "node_states": { + "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": { + "modified": true, + "dependencies": [], + "currentStatus": "PENDING", + "progress": null + } + } + }, + "iteration": 1, + "started": null, + "stopped": null, + "submitted": "2024-04-03T13:15:00.045631+00:00", + "url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012:stop?user_id=1" + }, + "status_code": 200 }, - "query": "user_id=1", - "request_payload": null, - "response_body": { - "id": "10da03f0-f1bc-11ee-9e42-0242ac140012", - "state": "SUCCESS", - "result": null, - "pipeline_details": { - "adjacency_list": { - "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": [] + { + "name": "POST /v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012:stop", + "description": "", + "method": "POST", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}:stop", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] + }, + "query": null, + "request_payload": { + "user_id": 1 + }, + "response_body": { + "id": "10da03f0-f1bc-11ee-9e42-0242ac140012", + "state": "SUCCESS", + "result": null, + "pipeline_details": { + "adjacency_list": { + "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": [] + }, + "progress": 1.0, + "node_states": { + "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": { + "modified": false, + "dependencies": [], + "currentStatus": "SUCCESS", + "progress": 1.0 + } + } + }, + "iteration": 1, + "started": "2024-04-03T13:15:00.425270+00:00", + "stopped": "2024-04-03T13:15:08.997076+00:00", + "submitted": "2024-04-03T13:15:00.045631+00:00", + "url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012:stop", + "stop_url": null }, - "progress": 1.0, - "node_states": { - "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": { - "modified": false, - "dependencies": [], - "currentStatus": "SUCCESS", - "progress": 1.0 - } - } - }, - "iteration": 1, - "cluster_id": 0, - "started": "2024-04-03T13:15:00.425270+00:00", - "stopped": "2024-04-03T13:15:08.997076+00:00", - "submitted": "2024-04-03T13:15:00.045631+00:00", - "url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012?user_id=1", - "stop_url": null + "status_code": 202 }, - "status_code": 200 - }, - { - "name": "DELETE /projects/10da03f0-f1bc-11ee-9e42-0242ac140012", - "description": "", - "method": "DELETE", - "host": "webserver", - "path": { - "path": "/v0/projects/{project_id}", - "path_parameters": [ - { - "in": "path", - "name": "project_id", - "required": true, - "schema": { - "title": "Project Id", - "type": "str", - "pattern": null, - "format": "uuid", - "exclusiveMinimum": null, - "minimum": null, - "anyOf": null, - "allOf": null, - "oneOf": null - }, - "response_value": "projects" - } - ] + { + "name": "GET /v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "request_payload": null, + "response_body": { + "id": "10da03f0-f1bc-11ee-9e42-0242ac140012", + "state": "SUCCESS", + "result": null, + "pipeline_details": { + "adjacency_list": { + "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": [] + }, + "progress": 1.0, + "node_states": { + "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": { + "modified": false, + "dependencies": [], + "currentStatus": "SUCCESS", + "progress": 1.0 + } + } + }, + "iteration": 1, + "started": "2024-04-03T13:15:00.425270+00:00", + "stopped": "2024-04-03T13:15:08.997076+00:00", + "submitted": "2024-04-03T13:15:00.045631+00:00", + "url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012?user_id=1", + "stop_url": null + }, + "status_code": 200 }, - "query": null, - "request_payload": null, - "response_body": null, - "status_code": 204 - } + { + "name": "DELETE /projects/10da03f0-f1bc-11ee-9e42-0242ac140012", + "description": "", + "method": "DELETE", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "projects" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": null, + "status_code": 204 + } ] diff --git a/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs.py b/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs.py index 237b846abaf..865983537b0 100644 --- a/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs.py +++ b/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs.py @@ -231,7 +231,6 @@ async def test_run_solver_job( "result", "pipeline_details", "iteration", - "cluster_id", "url", "stop_url", "submitted", @@ -269,7 +268,6 @@ async def test_run_solver_job( "progress": 0.0, }, "iteration": 1, - "cluster_id": 0, "url": "http://test.com", "stop_url": "http://test.com", "started": None, @@ -365,7 +363,6 @@ async def test_run_solver_job( resp = await client.post( f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs/{job.id}:start", auth=auth, - params={"cluster_id": 1}, ) assert resp.status_code == status.HTTP_202_ACCEPTED assert mocked_directorv2_service_api["inspect_computation"].called diff --git a/services/autoscaling/src/simcore_service_autoscaling/core/settings.py b/services/autoscaling/src/simcore_service_autoscaling/core/settings.py index 51e7a06e7d5..347c4c978c3 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/core/settings.py +++ b/services/autoscaling/src/simcore_service_autoscaling/core/settings.py @@ -11,7 +11,7 @@ PortInt, VersionTag, ) -from models_library.clusters import InternalClusterAuthentication +from models_library.clusters import ClusterAuthentication from models_library.docker import DockerLabelKey from pydantic import ( AliasChoices, @@ -193,9 +193,9 @@ class NodesMonitoringSettings(BaseCustomSettings): class DaskMonitoringSettings(BaseCustomSettings): DASK_MONITORING_URL: AnyUrl = Field( - ..., description="the url to the osparc-dask-scheduler" + ..., description="the url to the dask-scheduler" ) - DASK_SCHEDULER_AUTH: InternalClusterAuthentication = Field( + DASK_SCHEDULER_AUTH: ClusterAuthentication = Field( ..., description="defines the authentication of the clusters created via clusters-keeper (can be None or TLS)", ) diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_computational.py b/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_computational.py index cc6dcef68a4..a632afe956e 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_computational.py +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_computational.py @@ -4,7 +4,7 @@ from aws_library.ec2 import EC2InstanceData, EC2Tags, Resources from fastapi import FastAPI -from models_library.clusters import InternalClusterAuthentication +from models_library.clusters import ClusterAuthentication from models_library.docker import ( DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY, DockerLabelKey, @@ -37,7 +37,7 @@ def _scheduler_url(app: FastAPI) -> AnyUrl: return app_settings.AUTOSCALING_DASK.DASK_MONITORING_URL -def _scheduler_auth(app: FastAPI) -> InternalClusterAuthentication: +def _scheduler_auth(app: FastAPI) -> ClusterAuthentication: app_settings = get_application_settings(app) assert app_settings.AUTOSCALING_DASK # nosec return app_settings.AUTOSCALING_DASK.DASK_SCHEDULER_AUTH diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/dask.py b/services/autoscaling/src/simcore_service_autoscaling/modules/dask.py index b547ce2bbd4..4c5ee00f86c 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/modules/dask.py +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/dask.py @@ -12,7 +12,7 @@ from aws_library.ec2 import EC2InstanceData, Resources from dask_task_models_library.resource_constraints import DaskTaskResources from distributed.core import Status -from models_library.clusters import InternalClusterAuthentication, TLSAuthentication +from models_library.clusters import ClusterAuthentication, TLSAuthentication from pydantic import AnyUrl, ByteSize, TypeAdapter from ..core.errors import ( @@ -43,7 +43,7 @@ async def _wrap_client_async_routine( @contextlib.asynccontextmanager async def _scheduler_client( - url: AnyUrl, authentication: InternalClusterAuthentication + url: AnyUrl, authentication: ClusterAuthentication ) -> AsyncIterator[distributed.Client]: """ Raises: @@ -116,7 +116,7 @@ def _find_by_worker_host( async def is_worker_connected( scheduler_url: AnyUrl, - authentication: InternalClusterAuthentication, + authentication: ClusterAuthentication, worker_ec2_instance: EC2InstanceData, ) -> bool: with contextlib.suppress(DaskNoWorkersError, DaskWorkerNotFoundError): @@ -130,7 +130,7 @@ async def is_worker_connected( async def is_worker_retired( scheduler_url: AnyUrl, - authentication: InternalClusterAuthentication, + authentication: ClusterAuthentication, worker_ec2_instance: EC2InstanceData, ) -> bool: with contextlib.suppress(DaskNoWorkersError, DaskWorkerNotFoundError): @@ -156,7 +156,7 @@ def _dask_key_to_dask_task_id(key: dask.typing.Key) -> DaskTaskId: async def list_unrunnable_tasks( scheduler_url: AnyUrl, - authentication: InternalClusterAuthentication, + authentication: ClusterAuthentication, ) -> list[DaskTask]: """ Raises: @@ -188,7 +188,7 @@ def _list_tasks( async def list_processing_tasks_per_worker( scheduler_url: AnyUrl, - authentication: InternalClusterAuthentication, + authentication: ClusterAuthentication, ) -> dict[DaskWorkerUrl, list[DaskTask]]: """ Raises: @@ -227,7 +227,7 @@ def _list_processing_tasks( async def get_worker_still_has_results_in_memory( scheduler_url: AnyUrl, - authentication: InternalClusterAuthentication, + authentication: ClusterAuthentication, ec2_instance: EC2InstanceData, ) -> int: """ @@ -246,7 +246,7 @@ async def get_worker_still_has_results_in_memory( async def get_worker_used_resources( scheduler_url: AnyUrl, - authentication: InternalClusterAuthentication, + authentication: ClusterAuthentication, ec2_instance: EC2InstanceData, ) -> Resources: """ @@ -299,7 +299,7 @@ def _list_processing_tasks_on_worker( async def compute_cluster_total_resources( scheduler_url: AnyUrl, - authentication: InternalClusterAuthentication, + authentication: ClusterAuthentication, instances: list[AssociatedInstance], ) -> Resources: if not instances: @@ -320,7 +320,7 @@ async def compute_cluster_total_resources( async def try_retire_nodes( - scheduler_url: AnyUrl, authentication: InternalClusterAuthentication + scheduler_url: AnyUrl, authentication: ClusterAuthentication ) -> None: async with _scheduler_client(scheduler_url, authentication) as client: await _wrap_client_async_routine( diff --git a/services/autoscaling/tests/unit/test_modules_dask.py b/services/autoscaling/tests/unit/test_modules_dask.py index ae2ed0c5f15..36c45a70752 100644 --- a/services/autoscaling/tests/unit/test_modules_dask.py +++ b/services/autoscaling/tests/unit/test_modules_dask.py @@ -13,7 +13,7 @@ from aws_library.ec2 import Resources from faker import Faker from models_library.clusters import ( - InternalClusterAuthentication, + ClusterAuthentication, NoAuthentication, TLSAuthentication, ) @@ -52,7 +52,7 @@ "authentication", _authentication_types, ids=lambda p: f"authentication-{p.type}" ) async def test__scheduler_client_with_wrong_url( - faker: Faker, authentication: InternalClusterAuthentication + faker: Faker, authentication: ClusterAuthentication ): with pytest.raises(DaskSchedulerNotFoundError): async with _scheduler_client( @@ -72,7 +72,7 @@ def scheduler_url(dask_spec_local_cluster: distributed.SpecCluster) -> AnyUrl: @pytest.fixture -def scheduler_authentication() -> InternalClusterAuthentication: +def scheduler_authentication() -> ClusterAuthentication: return NoAuthentication() @@ -92,7 +92,7 @@ def dask_workers_config() -> dict[str, Any]: async def test__scheduler_client( - scheduler_url: AnyUrl, scheduler_authentication: InternalClusterAuthentication + scheduler_url: AnyUrl, scheduler_authentication: ClusterAuthentication ): async with _scheduler_client(scheduler_url, scheduler_authentication): ... @@ -109,7 +109,7 @@ async def test_list_unrunnable_tasks_with_no_workers( async def test_list_unrunnable_tasks( scheduler_url: AnyUrl, - scheduler_authentication: InternalClusterAuthentication, + scheduler_authentication: ClusterAuthentication, create_dask_task: Callable[[DaskTaskResources], distributed.Future], ): # we have nothing running now @@ -131,7 +131,7 @@ async def test_list_unrunnable_tasks( async def test_list_processing_tasks( scheduler_url: AnyUrl, - scheduler_authentication: InternalClusterAuthentication, + scheduler_authentication: ClusterAuthentication, dask_spec_cluster_client: distributed.Client, ): def _add_fct(x: int, y: int) -> int: @@ -190,7 +190,7 @@ def fake_ec2_instance_data_with_invalid_ec2_name( async def test_get_worker_still_has_results_in_memory_with_invalid_ec2_name_raises( scheduler_url: AnyUrl, - scheduler_authentication: InternalClusterAuthentication, + scheduler_authentication: ClusterAuthentication, fake_ec2_instance_data_with_invalid_ec2_name: EC2InstanceData, ): with pytest.raises(Ec2InvalidDnsNameError): @@ -216,7 +216,7 @@ async def test_get_worker_still_has_results_in_memory_with_no_workers_raises( async def test_get_worker_still_has_results_in_memory_with_invalid_worker_host_raises( scheduler_url: AnyUrl, - scheduler_authentication: InternalClusterAuthentication, + scheduler_authentication: ClusterAuthentication, fake_ec2_instance_data: Callable[..., EC2InstanceData], ): ec2_instance_data = fake_ec2_instance_data() @@ -229,7 +229,7 @@ async def test_get_worker_still_has_results_in_memory_with_invalid_worker_host_r @pytest.mark.parametrize("fct_shall_err", [True, False], ids=str) async def test_get_worker_still_has_results_in_memory( scheduler_url: AnyUrl, - scheduler_authentication: InternalClusterAuthentication, + scheduler_authentication: ClusterAuthentication, dask_spec_cluster_client: distributed.Client, fake_localhost_ec2_instance_data: EC2InstanceData, fct_shall_err: bool, @@ -291,7 +291,7 @@ def _add_fct(x: int, y: int) -> int: async def test_worker_used_resources_with_invalid_ec2_name_raises( scheduler_url: AnyUrl, - scheduler_authentication: InternalClusterAuthentication, + scheduler_authentication: ClusterAuthentication, fake_ec2_instance_data_with_invalid_ec2_name: EC2InstanceData, ): with pytest.raises(Ec2InvalidDnsNameError): @@ -317,7 +317,7 @@ async def test_worker_used_resources_with_no_workers_raises( async def test_worker_used_resources_with_invalid_worker_host_raises( scheduler_url: AnyUrl, - scheduler_authentication: InternalClusterAuthentication, + scheduler_authentication: ClusterAuthentication, fake_ec2_instance_data: Callable[..., EC2InstanceData], ): ec2_instance_data = fake_ec2_instance_data() @@ -329,7 +329,7 @@ async def test_worker_used_resources_with_invalid_worker_host_raises( async def test_worker_used_resources( scheduler_url: AnyUrl, - scheduler_authentication: InternalClusterAuthentication, + scheduler_authentication: ClusterAuthentication, dask_spec_cluster_client: distributed.Client, fake_localhost_ec2_instance_data: EC2InstanceData, ): diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/core/settings.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/core/settings.py index c4f656c68fb..32b5cdae9d1 100644 --- a/services/clusters-keeper/src/simcore_service_clusters_keeper/core/settings.py +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/core/settings.py @@ -10,7 +10,7 @@ LogLevel, VersionTag, ) -from models_library.clusters import InternalClusterAuthentication +from models_library.clusters import ClusterAuthentication from pydantic import ( AliasChoices, Field, @@ -347,7 +347,7 @@ class ApplicationSettings(BaseCustomSettings, MixinLoggingSettings): ) CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH: ( - InternalClusterAuthentication + ClusterAuthentication ) = Field( ..., description="defines the authentication of the clusters created via clusters-keeper (can be None or TLS)", diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/dask.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/dask.py index af1d0df0e66..0641e812777 100644 --- a/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/dask.py +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/dask.py @@ -3,7 +3,7 @@ from typing import Any, Final import distributed -from models_library.clusters import InternalClusterAuthentication, TLSAuthentication +from models_library.clusters import ClusterAuthentication, TLSAuthentication from pydantic import AnyUrl _logger = logging.getLogger(__name__) @@ -21,9 +21,7 @@ async def _wrap_client_async_routine( _CONNECTION_TIMEOUT: Final[str] = "5" -async def ping_scheduler( - url: AnyUrl, authentication: InternalClusterAuthentication -) -> bool: +async def ping_scheduler(url: AnyUrl, authentication: ClusterAuthentication) -> bool: try: security = distributed.Security() if isinstance(authentication, TLSAuthentication): @@ -47,9 +45,7 @@ async def ping_scheduler( return False -async def is_scheduler_busy( - url: AnyUrl, authentication: InternalClusterAuthentication -) -> bool: +async def is_scheduler_busy(url: AnyUrl, authentication: ClusterAuthentication) -> bool: security = distributed.Security() if isinstance(authentication, TLSAuthentication): security = distributed.Security( diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/clusters.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/clusters.py index a6ecfdb8189..5a9402ba093 100644 --- a/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/clusters.py +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/clusters.py @@ -14,7 +14,7 @@ ClusterState, OnDemandCluster, ) -from models_library.clusters import InternalClusterAuthentication, TLSAuthentication +from models_library.clusters import ClusterAuthentication, TLSAuthentication from models_library.users import UserID from models_library.wallets import WalletID from types_aiobotocore_ec2.literals import InstanceStateNameType @@ -190,7 +190,7 @@ def create_cluster_from_ec2_instance( wallet_id: WalletID | None, *, dask_scheduler_ready: bool, - cluster_auth: InternalClusterAuthentication, + cluster_auth: ClusterAuthentication, max_cluster_start_time: datetime.timedelta, ) -> OnDemandCluster: return OnDemandCluster( diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/dask.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/dask.py index 266557358b7..6dc6a452fe4 100644 --- a/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/dask.py +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/dask.py @@ -1,6 +1,6 @@ from aws_library.ec2 import EC2InstanceData from fastapi import FastAPI -from models_library.clusters import InternalClusterAuthentication +from models_library.clusters import ClusterAuthentication from pydantic import AnyUrl, TypeAdapter from ..core.settings import get_application_settings @@ -13,7 +13,7 @@ def get_scheduler_url(ec2_instance: EC2InstanceData) -> AnyUrl: return url -def get_scheduler_auth(app: FastAPI) -> InternalClusterAuthentication: +def get_scheduler_auth(app: FastAPI) -> ClusterAuthentication: return get_application_settings( app ).CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH diff --git a/services/clusters-keeper/tests/unit/test_modules_dask.py b/services/clusters-keeper/tests/unit/test_modules_dask.py index 7f0408d7057..97c831ea789 100644 --- a/services/clusters-keeper/tests/unit/test_modules_dask.py +++ b/services/clusters-keeper/tests/unit/test_modules_dask.py @@ -8,7 +8,7 @@ from distributed import SpecCluster from faker import Faker from models_library.clusters import ( - InternalClusterAuthentication, + ClusterAuthentication, NoAuthentication, TLSAuthentication, ) @@ -34,11 +34,13 @@ "authentication", _authentication_types, ids=lambda p: f"authentication-{p.type}" ) async def test_ping_scheduler_non_existing_scheduler( - faker: Faker, authentication: InternalClusterAuthentication + faker: Faker, authentication: ClusterAuthentication ): assert ( await ping_scheduler( - TypeAdapter(AnyUrl).validate_python(f"tcp://{faker.ipv4()}:{faker.port_number()}"), + TypeAdapter(AnyUrl).validate_python( + f"tcp://{faker.ipv4()}:{faker.port_number()}" + ), authentication, ) is False @@ -48,7 +50,9 @@ async def test_ping_scheduler_non_existing_scheduler( async def test_ping_scheduler(dask_spec_local_cluster: SpecCluster): assert ( await ping_scheduler( - TypeAdapter(AnyUrl).validate_python(dask_spec_local_cluster.scheduler_address), + TypeAdapter(AnyUrl).validate_python( + dask_spec_local_cluster.scheduler_address + ), NoAuthentication(), ) is True @@ -71,7 +75,9 @@ async def test_is_scheduler_busy( dask_spec_cluster_client: distributed.Client, ): # nothing runs right now - scheduler_address = TypeAdapter(AnyUrl).validate_python(dask_spec_local_cluster.scheduler_address) + scheduler_address = TypeAdapter(AnyUrl).validate_python( + dask_spec_local_cluster.scheduler_address + ) assert await is_scheduler_busy(scheduler_address, NoAuthentication()) is False _SLEEP_TIME = 5 diff --git a/services/clusters-keeper/tests/unit/test_utils_clusters.py b/services/clusters-keeper/tests/unit/test_utils_clusters.py index 55190cb46a1..96983dd34d5 100644 --- a/services/clusters-keeper/tests/unit/test_utils_clusters.py +++ b/services/clusters-keeper/tests/unit/test_utils_clusters.py @@ -19,7 +19,7 @@ from faker import Faker from models_library.api_schemas_clusters_keeper.clusters import ClusterState from models_library.clusters import ( - InternalClusterAuthentication, + ClusterAuthentication, NoAuthentication, TLSAuthentication, ) @@ -55,7 +55,7 @@ def ec2_boot_specs(app_settings: ApplicationSettings) -> EC2InstanceBootSpecific @pytest.fixture(params=[TLSAuthentication, NoAuthentication]) def backend_cluster_auth( request: pytest.FixtureRequest, -) -> InternalClusterAuthentication: +) -> ClusterAuthentication: return request.param @@ -63,7 +63,7 @@ def backend_cluster_auth( def app_environment( app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch, - backend_cluster_auth: InternalClusterAuthentication, + backend_cluster_auth: ClusterAuthentication, ) -> EnvVarsDict: return app_environment | setenvs_from_dict( monkeypatch, @@ -295,7 +295,7 @@ def test_create_cluster_from_ec2_instance( faker: Faker, ec2_state: InstanceStateNameType, expected_cluster_state: ClusterState, - authentication: InternalClusterAuthentication, + authentication: ClusterAuthentication, ): instance_data = fake_ec2_instance_data(state=ec2_state) cluster_instance = create_cluster_from_ec2_instance( diff --git a/services/dask-sidecar/README.md b/services/dask-sidecar/README.md index 8abc94a9dbc..2a3f5ec254b 100644 --- a/services/dask-sidecar/README.md +++ b/services/dask-sidecar/README.md @@ -13,42 +13,3 @@ source .venv/bin/activate cd services/api-service make install-dev ``` - -## Deploy on a specific cluster - -1. define label on docker engine - - ```bash - sudo nano /etc/docker/daemon.json - ``` - - ```json - { - "labels":["cluster_id=MYCLUSTERUNIQUEIDENTIFIER"] - } - ``` - -2. restart the docker engine - - ```bash - sudo service docker restart - ``` - -3. verify - - ```bash - docker info --format "{{.Labels}}" - ``` - - -## Dev notes - -### 2021.08.24 - - - sidecar sets up its own available resources on start - - sidecar checks local docker engine labels to get its cluster_id - -### 2021.06.10 - - - installed from dynamic-sidecar in current repo, but could have opted for taking sidecar image as a base. The latter would complicate in-host development though, so we start commando here. - - can be started as scheduler or worker. TODO: scheduler does not need to mount anything diff --git a/services/dask-sidecar/requirements/_base.in b/services/dask-sidecar/requirements/_base.in index 2352652e4a0..9571b106d4f 100644 --- a/services/dask-sidecar/requirements/_base.in +++ b/services/dask-sidecar/requirements/_base.in @@ -22,7 +22,6 @@ aiodocker aiofiles blosc # for compression dask[distributed, diagnostics] -dask-gateway # needed for the osparc-dask-gateway to preload the module fsspec[http, s3] # sub types needed as we acces http and s3 here lz4 # for compression pydantic diff --git a/services/dask-sidecar/requirements/_base.txt b/services/dask-sidecar/requirements/_base.txt index e3cd751062d..7cc0de4aa6d 100644 --- a/services/dask-sidecar/requirements/_base.txt +++ b/services/dask-sidecar/requirements/_base.txt @@ -36,7 +36,6 @@ aiohttp==3.9.5 # -c requirements/../../../requirements/constraints.txt # aiobotocore # aiodocker - # dask-gateway # fsspec # s3fs aioitertools==0.11.0 @@ -94,7 +93,6 @@ charset-normalizer==3.3.2 click==8.1.7 # via # dask - # dask-gateway # distributed # typer cloudpickle==3.0.0 @@ -108,10 +106,7 @@ dask==2024.5.1 # -c requirements/constraints.txt # -r requirements/../../../packages/dask-task-models-library/requirements/_base.in # -r requirements/_base.in - # dask-gateway # distributed -dask-gateway==2024.1.0 - # via -r requirements/_base.in deprecated==1.2.14 # via # opentelemetry-api @@ -119,9 +114,7 @@ deprecated==1.2.14 # opentelemetry-exporter-otlp-proto-http # opentelemetry-semantic-conventions distributed==2024.5.1 - # via - # dask - # dask-gateway + # via dask dnspython==2.6.1 # via email-validator email-validator==2.1.1 @@ -411,7 +404,6 @@ pyyaml==6.0.1 # -r requirements/../../../packages/service-library/requirements/_base.in # bokeh # dask - # dask-gateway # distributed redis==5.0.4 # via @@ -436,7 +428,6 @@ redis==5.0.4 # -r requirements/../../../packages/service-library/requirements/_base.in referencing==0.29.3 # via - # -c requirements/../../../packages/service-library/requirements/./constraints.txt # jsonschema # jsonschema-specifications repro-zipfile==0.3.1 @@ -480,7 +471,6 @@ toolz==0.12.1 tornado==6.4 # via # bokeh - # dask-gateway # distributed tqdm==4.66.4 # via -r requirements/../../../packages/service-library/requirements/_base.in @@ -535,6 +525,7 @@ xyzservices==2024.4.0 # via bokeh yarl==1.9.4 # via + # -r requirements/../../../packages/service-library/requirements/_base.in # aio-pika # aiohttp # aiormq diff --git a/services/director-v2/openapi.json b/services/director-v2/openapi.json index cdd6d4eca05..c1b38416efe 100644 --- a/services/director-v2/openapi.json +++ b/services/director-v2/openapi.json @@ -118,25 +118,25 @@ "operationId": "get_computation_v2_computations__project_id__get", "parameters": [ { + "name": "project_id", + "in": "path", "required": true, "schema": { "type": "string", "format": "uuid", "title": "Project Id" - }, - "name": "project_id", - "in": "path" + } }, { + "name": "user_id", + "in": "query", "required": true, "schema": { "type": "integer", "exclusiveMinimum": true, "title": "User Id", "minimum": 0 - }, - "name": "user_id", - "in": "query" + } } ], "responses": { @@ -170,25 +170,25 @@ "operationId": "delete_computation_v2_computations__project_id__delete", "parameters": [ { + "name": "project_id", + "in": "path", "required": true, "schema": { "type": "string", "format": "uuid", "title": "Project Id" - }, - "name": "project_id", - "in": "path" + } } ], "requestBody": { + "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ComputationDelete" } } - }, - "required": true + } }, "responses": { "204": { @@ -216,25 +216,25 @@ "operationId": "stop_computation_v2_computations__project_id__stop_post", "parameters": [ { + "name": "project_id", + "in": "path", "required": true, "schema": { "type": "string", "format": "uuid", "title": "Project Id" - }, - "name": "project_id", - "in": "path" + } } ], "requestBody": { + "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ComputationStop" } } - }, - "required": true + } }, "responses": { "202": { @@ -270,25 +270,25 @@ "operationId": "get_all_tasks_log_files_v2_computations__project_id__tasks___logfile_get", "parameters": [ { + "name": "project_id", + "in": "path", "required": true, "schema": { "type": "string", "format": "uuid", "title": "Project Id" - }, - "name": "project_id", - "in": "path" + } }, { + "name": "user_id", + "in": "query", "required": true, "schema": { "type": "integer", "exclusiveMinimum": true, "title": "User Id", "minimum": 0 - }, - "name": "user_id", - "in": "query" + } } ], "responses": { @@ -297,10 +297,10 @@ "content": { "application/json": { "schema": { + "type": "array", "items": { "$ref": "#/components/schemas/TaskLogFileGet" }, - "type": "array", "title": "Response Get All Tasks Log Files V2 Computations Project Id Tasks Logfile Get" } } @@ -329,35 +329,35 @@ "operationId": "get_task_log_file_v2_computations__project_id__tasks__node_uuid__logfile_get", "parameters": [ { + "name": "project_id", + "in": "path", "required": true, "schema": { "type": "string", "format": "uuid", "title": "Project Id" - }, - "name": "project_id", - "in": "path" + } }, { + "name": "node_uuid", + "in": "path", "required": true, "schema": { "type": "string", "format": "uuid", "title": "Node Uuid" - }, - "name": "node_uuid", - "in": "path" + } }, { + "name": "user_id", + "in": "query", "required": true, "schema": { "type": "integer", "exclusiveMinimum": true, "title": "User Id", "minimum": 0 - }, - "name": "user_id", - "in": "query" + } } ], "responses": { @@ -393,25 +393,25 @@ "operationId": "get_batch_tasks_outputs_v2_computations__project_id__tasks___outputs_batchGet_post", "parameters": [ { + "name": "project_id", + "in": "path", "required": true, "schema": { "type": "string", "format": "uuid", "title": "Project Id" - }, - "name": "project_id", - "in": "path" + } } ], "requestBody": { + "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/TasksSelection" } } - }, - "required": true + } }, "responses": { "200": { @@ -449,25 +449,39 @@ "operationId": "list_tracked_dynamic_services_v2_dynamic_services_get", "parameters": [ { + "name": "user_id", + "in": "query", "required": false, "schema": { - "type": "integer", - "exclusiveMinimum": true, - "title": "User Id", - "minimum": 0 - }, - "name": "user_id", - "in": "query" + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": true, + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "User Id" + } }, { + "name": "project_id", + "in": "query", "required": false, "schema": { - "type": "string", - "format": "uuid", + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], "title": "Project Id" - }, - "name": "project_id", - "in": "query" + } } ], "responses": { @@ -476,10 +490,10 @@ "content": { "application/json": { "schema": { + "type": "array", "items": { "$ref": "#/components/schemas/RunningDynamicServiceDetails" }, - "type": "array", "title": "Response List Tracked Dynamic Services V2 Dynamic Services Get" } } @@ -505,42 +519,42 @@ "operationId": "create_dynamic_service_v2_dynamic_services_post", "parameters": [ { + "name": "x-dynamic-sidecar-request-dns", + "in": "header", "required": true, "schema": { "type": "string", "title": "X-Dynamic-Sidecar-Request-Dns" - }, - "name": "x-dynamic-sidecar-request-dns", - "in": "header" + } }, { + "name": "x-dynamic-sidecar-request-scheme", + "in": "header", "required": true, "schema": { "type": "string", "title": "X-Dynamic-Sidecar-Request-Scheme" - }, - "name": "x-dynamic-sidecar-request-scheme", - "in": "header" + } }, { + "name": "x-simcore-user-agent", + "in": "header", "required": true, "schema": { "type": "string", "title": "X-Simcore-User-Agent" - }, - "name": "x-simcore-user-agent", - "in": "header" + } } ], "requestBody": { + "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/DynamicServiceCreate" } } - }, - "required": true + } }, "responses": { "201": { @@ -575,14 +589,14 @@ "operationId": "get_dynamic_sidecar_status_v2_dynamic_services__node_uuid__get", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { "type": "string", "format": "uuid", "title": "Node Uuid" - }, - "name": "node_uuid", - "in": "path" + } } ], "responses": { @@ -616,24 +630,31 @@ "operationId": "stop_dynamic_service_v2_dynamic_services__node_uuid__delete", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { "type": "string", "format": "uuid", "title": "Node Uuid" - }, - "name": "node_uuid", - "in": "path" + } }, { + "name": "can_save", + "in": "query", "required": false, "schema": { - "type": "boolean", - "title": "Can Save", - "default": true - }, - "name": "can_save", - "in": "query" + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": true, + "title": "Can Save" + } } ], "responses": { @@ -662,25 +683,25 @@ "operationId": "service_retrieve_data_on_ports_v2_dynamic_services__node_uuid__retrieve_post", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { "type": "string", "format": "uuid", "title": "Node Uuid" - }, - "name": "node_uuid", - "in": "path" + } } ], "requestBody": { + "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/RetrieveDataIn" } } - }, - "required": true + } }, "responses": { "200": { @@ -715,14 +736,14 @@ "operationId": "service_restart_containers_v2_dynamic_services__node_uuid__restart_post", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { "type": "string", "format": "uuid", "title": "Node Uuid" - }, - "name": "node_uuid", - "in": "path" + } } ], "responses": { @@ -751,14 +772,14 @@ "operationId": "update_projects_networks_v2_dynamic_services_projects__project_id____networks_patch", "parameters": [ { + "name": "project_id", + "in": "path", "required": true, "schema": { "type": "string", "format": "uuid", "title": "Project Id" - }, - "name": "project_id", - "in": "path" + } } ], "responses": { @@ -787,24 +808,24 @@ "operationId": "get_project_inactivity_v2_dynamic_services_projects__project_id__inactivity_get", "parameters": [ { + "name": "project_id", + "in": "path", "required": true, "schema": { "type": "string", "format": "uuid", "title": "Project Id" - }, - "name": "project_id", - "in": "path" + } }, { + "name": "max_inactivity_seconds", + "in": "query", "required": true, "schema": { "type": "number", "minimum": 0.0, "title": "Max Inactivity Seconds" - }, - "name": "max_inactivity_seconds", - "in": "query" + } } ], "responses": { @@ -831,40 +852,38 @@ } } }, - "/v2/clusters": { - "get": { + "/v2/dynamic_scheduler/services/{node_uuid}/observation": { + "patch": { "tags": [ - "clusters" + "dynamic scheduler" ], - "summary": "Lists clusters for user", - "operationId": "list_clusters_v2_clusters_get", + "summary": "Enable/disable observation of the service", + "operationId": "update_service_observation_v2_dynamic_scheduler_services__node_uuid__observation_patch", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { - "type": "integer", - "exclusiveMinimum": true, - "title": "User Id", - "minimum": 0 - }, - "name": "user_id", - "in": "query" + "type": "string", + "format": "uuid", + "title": "Node Uuid" + } } ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "items": { - "$ref": "#/components/schemas/ClusterGet" - }, - "type": "array", - "title": "Response List Clusters V2 Clusters Get" - } + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ObservationItem" } } + } + }, + "responses": { + "204": { + "description": "Successful Response" }, "422": { "description": "Validation Error", @@ -877,47 +896,42 @@ } } } - }, - "post": { + } + }, + "/v2/dynamic_scheduler/services/{node_uuid}/containers": { + "delete": { "tags": [ - "clusters" + "dynamic scheduler" ], - "summary": "Create a new cluster for a user", - "operationId": "create_cluster_v2_clusters_post", + "summary": "Removes the service's user services", + "operationId": "delete_service_containers_v2_dynamic_scheduler_services__node_uuid__containers_delete", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { - "type": "integer", - "exclusiveMinimum": true, - "title": "User Id", - "minimum": 0 - }, - "name": "user_id", - "in": "query" + "type": "string", + "format": "uuid", + "title": "Node Uuid" + } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClusterCreate" - } - } - }, - "required": true - }, "responses": { - "201": { + "202": { "description": "Successful Response", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ClusterGet" + "type": "string", + "title": "Response Delete Service Containers V2 Dynamic Scheduler Services Node Uuid Containers Delete" } } } }, + "409": { + "description": "Task already running, cannot start a new one" + }, "422": { "description": "Validation Error", "content": { @@ -931,55 +945,23 @@ } } }, - "/v2/clusters/default": { - "get": { - "tags": [ - "clusters" - ], - "summary": "Returns the default cluster", - "operationId": "get_default_cluster_v2_clusters_default_get", - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClusterGet" - } - } - } - } - } - } - }, - "/v2/clusters/{cluster_id}": { + "/v2/dynamic_scheduler/services/{node_uuid}/state": { "get": { "tags": [ - "clusters" + "dynamic scheduler" ], - "summary": "Get one cluster for user", - "operationId": "get_cluster_v2_clusters__cluster_id__get", + "summary": "Returns the internals of the scheduler for the given service", + "operationId": "get_service_state_v2_dynamic_scheduler_services__node_uuid__state_get", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { - "type": "integer", - "minimum": 0, - "title": "Cluster Id" - }, - "name": "cluster_id", - "in": "path" - }, - { - "required": true, - "schema": { - "type": "integer", - "exclusiveMinimum": true, - "title": "User Id", - "minimum": 0 - }, - "name": "user_id", - "in": "query" + "type": "string", + "format": "uuid", + "title": "Node Uuid" + } } ], "responses": { @@ -988,7 +970,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ClusterGet" + "$ref": "#/components/schemas/SchedulerData" } } } @@ -1004,461 +986,25 @@ } } } - }, - "delete": { + } + }, + "/v2/dynamic_scheduler/services/{node_uuid}/state:save": { + "post": { "tags": [ - "clusters" + "dynamic scheduler" ], - "summary": "Remove a cluster for user", - "operationId": "delete_cluster_v2_clusters__cluster_id__delete", + "summary": "Starts the saving of the state for the service", + "operationId": "save_service_state_v2_dynamic_scheduler_services__node_uuid__state_save_post", "parameters": [ { - "required": true, - "schema": { - "type": "integer", - "minimum": 0, - "title": "Cluster Id" - }, - "name": "cluster_id", - "in": "path" - }, - { - "required": true, - "schema": { - "type": "integer", - "exclusiveMinimum": true, - "title": "User Id", - "minimum": 0 - }, - "name": "user_id", - "in": "query" - } - ], - "responses": { - "204": { - "description": "Successful Response" - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "patch": { - "tags": [ - "clusters" - ], - "summary": "Modify a cluster for user", - "operationId": "update_cluster_v2_clusters__cluster_id__patch", - "parameters": [ - { - "required": true, - "schema": { - "type": "integer", - "minimum": 0, - "title": "Cluster Id" - }, - "name": "cluster_id", - "in": "path" - }, - { - "required": true, - "schema": { - "type": "integer", - "exclusiveMinimum": true, - "title": "User Id", - "minimum": 0 - }, - "name": "user_id", - "in": "query" - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClusterPatch" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClusterGet" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/clusters/default/details": { - "get": { - "tags": [ - "clusters" - ], - "summary": "Returns the cluster details", - "operationId": "get_default_cluster_details_v2_clusters_default_details_get", - "parameters": [ - { - "required": true, - "schema": { - "type": "integer", - "exclusiveMinimum": true, - "title": "User Id", - "minimum": 0 - }, - "name": "user_id", - "in": "query" - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClusterDetailsGet" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/clusters/{cluster_id}/details": { - "get": { - "tags": [ - "clusters" - ], - "summary": "Returns the cluster details", - "operationId": "get_cluster_details_v2_clusters__cluster_id__details_get", - "parameters": [ - { - "required": true, - "schema": { - "type": "integer", - "minimum": 0, - "title": "Cluster Id" - }, - "name": "cluster_id", - "in": "path" - }, - { - "required": true, - "schema": { - "type": "integer", - "exclusiveMinimum": true, - "title": "User Id", - "minimum": 0 - }, - "name": "user_id", - "in": "query" - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClusterDetailsGet" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/clusters:ping": { - "post": { - "tags": [ - "clusters" - ], - "summary": "Test cluster connection", - "operationId": "test_cluster_connection_v2_clusters_ping_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClusterPing" - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "Successful Response" - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/clusters/default:ping": { - "post": { - "tags": [ - "clusters" - ], - "summary": "Test cluster connection", - "operationId": "test_default_cluster_connection_v2_clusters_default_ping_post", - "responses": { - "204": { - "description": "Successful Response" - } - } - } - }, - "/v2/clusters/{cluster_id}:ping": { - "post": { - "tags": [ - "clusters" - ], - "summary": "Test cluster connection", - "operationId": "test_specific_cluster_connection_v2_clusters__cluster_id__ping_post", - "parameters": [ - { - "required": true, - "schema": { - "type": "integer", - "minimum": 0, - "title": "Cluster Id" - }, - "name": "cluster_id", - "in": "path" - }, - { - "required": true, - "schema": { - "type": "integer", - "exclusiveMinimum": true, - "title": "User Id", - "minimum": 0 - }, - "name": "user_id", - "in": "query" - } - ], - "responses": { - "204": { - "description": "Successful Response" - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/dynamic_scheduler/services/{node_uuid}/observation": { - "patch": { - "tags": [ - "dynamic scheduler" - ], - "summary": "Enable/disable observation of the service", - "operationId": "update_service_observation_v2_dynamic_scheduler_services__node_uuid__observation_patch", - "parameters": [ - { - "required": true, - "schema": { - "type": "string", - "format": "uuid", - "title": "Node Uuid" - }, - "name": "node_uuid", - "in": "path" - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ObservationItem" - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "Successful Response" - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/dynamic_scheduler/services/{node_uuid}/containers": { - "delete": { - "tags": [ - "dynamic scheduler" - ], - "summary": "Removes the service's user services", - "operationId": "delete_service_containers_v2_dynamic_scheduler_services__node_uuid__containers_delete", - "parameters": [ - { - "required": true, - "schema": { - "type": "string", - "format": "uuid", - "title": "Node Uuid" - }, - "name": "node_uuid", - "in": "path" - } - ], - "responses": { - "202": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "type": "string", - "title": "Response Delete Service Containers V2 Dynamic Scheduler Services Node Uuid Containers Delete" - } - } - } - }, - "409": { - "description": "Task already running, cannot start a new one" - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/dynamic_scheduler/services/{node_uuid}/state": { - "get": { - "tags": [ - "dynamic scheduler" - ], - "summary": "Returns the internals of the scheduler for the given service", - "operationId": "get_service_state_v2_dynamic_scheduler_services__node_uuid__state_get", - "parameters": [ - { - "required": true, - "schema": { - "type": "string", - "format": "uuid", - "title": "Node Uuid" - }, "name": "node_uuid", - "in": "path" - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SchedulerData" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/dynamic_scheduler/services/{node_uuid}/state:save": { - "post": { - "tags": [ - "dynamic scheduler" - ], - "summary": "Starts the saving of the state for the service", - "operationId": "save_service_state_v2_dynamic_scheduler_services__node_uuid__state_save_post", - "parameters": [ - { + "in": "path", "required": true, "schema": { "type": "string", "format": "uuid", "title": "Node Uuid" - }, - "name": "node_uuid", - "in": "path" + } } ], "responses": { @@ -1498,14 +1044,14 @@ "operationId": "push_service_outputs_v2_dynamic_scheduler_services__node_uuid__outputs_push_post", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { "type": "string", "format": "uuid", "title": "Node Uuid" - }, - "name": "node_uuid", - "in": "path" + } } ], "responses": { @@ -1545,14 +1091,14 @@ "operationId": "delete_service_docker_resources_v2_dynamic_scheduler_services__node_uuid__docker_resources_delete", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { "type": "string", "format": "uuid", "title": "Node Uuid" - }, - "name": "node_uuid", - "in": "path" + } } ], "responses": { @@ -1592,14 +1138,14 @@ "operationId": "free_reserved_disk_space_v2_dynamic_scheduler_services__node_uuid__disk_reserved_free_post", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { "type": "string", "format": "uuid", "title": "Node Uuid" - }, - "name": "node_uuid", - "in": "path" + } } ], "responses": { @@ -1634,11 +1180,18 @@ "title": "Version" }, "released": { - "additionalProperties": { - "type": "string", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$" - }, - "type": "object", + "anyOf": [ + { + "additionalProperties": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$" + }, + "type": "object" + }, + { + "type": "null" + } + ], "title": "Released", "description": "Maps every route's path tag with a released version" } @@ -1651,11 +1204,11 @@ "title": "BaseMeta", "example": { "name": "simcore_service_foo", - "version": "2.4.45", "released": { "v1": "1.3.4", "v2": "2.4.45" - } + }, + "version": "2.4.45" } }, "BootMode": { @@ -1665,351 +1218,44 @@ "GPU", "MPI" ], - "title": "BootMode", - "description": "An enumeration." + "title": "BootMode" }, "CallbacksMapping": { "properties": { - "metrics": { - "allOf": [ - { - "$ref": "#/components/schemas/UserServiceCommand" - } - ], - "title": "Metrics", - "description": "command to recover prometheus metrics from a specific user service" - }, - "before_shutdown": { - "items": { - "$ref": "#/components/schemas/UserServiceCommand" - }, - "type": "array", - "title": "Before Shutdown", - "description": "commands to run before shutting down the user servicescommands get executed first to last, multiple commands for the sameuser services are allowed" - }, - "inactivity": { - "allOf": [ - { - "$ref": "#/components/schemas/UserServiceCommand" - } - ], - "title": "Inactivity", - "description": "command used to figure out for how much time the user service(s) were inactive for" - } - }, - "additionalProperties": false, - "type": "object", - "title": "CallbacksMapping" - }, - "ClusterAccessRights": { - "properties": { - "read": { - "type": "boolean", - "title": "Read", - "description": "allows to run pipelines on that cluster" - }, - "write": { - "type": "boolean", - "title": "Write", - "description": "allows to modify the cluster" - }, - "delete": { - "type": "boolean", - "title": "Delete", - "description": "allows to delete a cluster" - } - }, - "additionalProperties": false, - "type": "object", - "required": [ - "read", - "write", - "delete" - ], - "title": "ClusterAccessRights" - }, - "ClusterCreate": { - "properties": { - "name": { - "type": "string", - "title": "Name", - "description": "The human readable name of the cluster" - }, - "description": { - "type": "string", - "title": "Description" - }, - "type": { - "$ref": "#/components/schemas/ClusterTypeInModel" - }, - "owner": { - "type": "integer", - "exclusiveMinimum": true, - "title": "Owner", - "minimum": 0 - }, - "thumbnail": { - "type": "string", - "maxLength": 2083, - "minLength": 1, - "format": "uri", - "title": "Thumbnail", - "description": "url to the image describing this cluster" - }, - "endpoint": { - "type": "string", - "maxLength": 65536, - "minLength": 1, - "format": "uri", - "title": "Endpoint" - }, - "authentication": { - "anyOf": [ - { - "$ref": "#/components/schemas/SimpleAuthentication" - }, - { - "$ref": "#/components/schemas/KerberosAuthentication" - }, - { - "$ref": "#/components/schemas/JupyterHubTokenAuthentication" - } - ], - "title": "Authentication" - }, - "accessRights": { - "additionalProperties": { - "$ref": "#/components/schemas/ClusterAccessRights" - }, - "type": "object", - "title": "Accessrights" - } - }, - "additionalProperties": false, - "type": "object", - "required": [ - "name", - "type", - "endpoint", - "authentication" - ], - "title": "ClusterCreate" - }, - "ClusterDetailsGet": { - "properties": { - "scheduler": { - "allOf": [ - { - "$ref": "#/components/schemas/Scheduler" - } - ], - "title": "Scheduler", - "description": "This contains dask scheduler information given by the underlying dask library" - }, - "dashboard_link": { - "type": "string", - "maxLength": 65536, - "minLength": 1, - "format": "uri", - "title": "Dashboard Link", - "description": "Link to this scheduler's dashboard" - } - }, - "type": "object", - "required": [ - "scheduler", - "dashboard_link" - ], - "title": "ClusterDetailsGet" - }, - "ClusterGet": { - "properties": { - "name": { - "type": "string", - "title": "Name", - "description": "The human readable name of the cluster" - }, - "description": { - "type": "string", - "title": "Description" - }, - "type": { - "$ref": "#/components/schemas/ClusterTypeInModel" - }, - "owner": { - "type": "integer", - "exclusiveMinimum": true, - "title": "Owner", - "minimum": 0 - }, - "thumbnail": { - "type": "string", - "maxLength": 2083, - "minLength": 1, - "format": "uri", - "title": "Thumbnail", - "description": "url to the image describing this cluster" - }, - "endpoint": { - "type": "string", - "maxLength": 65536, - "minLength": 1, - "format": "uri", - "title": "Endpoint" - }, - "authentication": { - "anyOf": [ - { - "$ref": "#/components/schemas/SimpleAuthentication" - }, - { - "$ref": "#/components/schemas/KerberosAuthentication" - }, - { - "$ref": "#/components/schemas/JupyterHubTokenAuthentication" - }, - { - "$ref": "#/components/schemas/NoAuthentication" - }, - { - "$ref": "#/components/schemas/TLSAuthentication" - } - ], - "title": "Authentication", - "description": "Dask gateway authentication" - }, - "accessRights": { - "additionalProperties": { - "$ref": "#/components/schemas/ClusterAccessRights" - }, - "type": "object", - "title": "Accessrights" - }, - "id": { - "type": "integer", - "minimum": 0, - "title": "Id", - "description": "The cluster ID" - } - }, - "additionalProperties": false, - "type": "object", - "required": [ - "name", - "type", - "owner", - "endpoint", - "authentication", - "id" - ], - "title": "ClusterGet" - }, - "ClusterPatch": { - "properties": { - "name": { - "type": "string", - "title": "Name" - }, - "description": { - "type": "string", - "title": "Description" - }, - "type": { - "$ref": "#/components/schemas/ClusterTypeInModel" - }, - "owner": { - "type": "integer", - "exclusiveMinimum": true, - "title": "Owner", - "minimum": 0 - }, - "thumbnail": { - "type": "string", - "maxLength": 2083, - "minLength": 1, - "format": "uri", - "title": "Thumbnail" - }, - "endpoint": { - "type": "string", - "maxLength": 65536, - "minLength": 1, - "format": "uri", - "title": "Endpoint" - }, - "authentication": { - "anyOf": [ - { - "$ref": "#/components/schemas/SimpleAuthentication" - }, - { - "$ref": "#/components/schemas/KerberosAuthentication" - }, - { - "$ref": "#/components/schemas/JupyterHubTokenAuthentication" - } - ], - "title": "Authentication" - }, - "accessRights": { - "additionalProperties": { - "$ref": "#/components/schemas/ClusterAccessRights" - }, - "type": "object", - "title": "Accessrights" - } - }, - "additionalProperties": false, - "type": "object", - "title": "ClusterPatch" - }, - "ClusterPing": { - "properties": { - "endpoint": { - "type": "string", - "maxLength": 65536, - "minLength": 1, - "format": "uri", - "title": "Endpoint" - }, - "authentication": { + "metrics": { "anyOf": [ { - "$ref": "#/components/schemas/SimpleAuthentication" - }, - { - "$ref": "#/components/schemas/KerberosAuthentication" + "$ref": "#/components/schemas/UserServiceCommand" }, { - "$ref": "#/components/schemas/JupyterHubTokenAuthentication" - }, + "type": "null" + } + ], + "description": "command to recover prometheus metrics from a specific user service" + }, + "before_shutdown": { + "items": { + "$ref": "#/components/schemas/UserServiceCommand" + }, + "type": "array", + "title": "Before Shutdown", + "description": "commands to run before shutting down the user servicescommands get executed first to last, multiple commands for the sameuser services are allowed" + }, + "inactivity": { + "anyOf": [ { - "$ref": "#/components/schemas/NoAuthentication" + "$ref": "#/components/schemas/UserServiceCommand" }, { - "$ref": "#/components/schemas/TLSAuthentication" + "type": "null" } ], - "title": "Authentication", - "description": "Dask gateway authentication" + "description": "command used to figure out for how much time the user service(s) were inactive for" } }, + "additionalProperties": false, "type": "object", - "required": [ - "endpoint", - "authentication" - ], - "title": "ClusterPing" - }, - "ClusterTypeInModel": { - "type": "string", - "enum": [ - "AWS", - "ON_PREMISE", - "ON_DEMAND" - ], - "title": "ClusterTypeInModel", - "description": "An enumeration." + "title": "CallbacksMapping" }, "ComputationCreate": { "properties": { @@ -2025,7 +1271,14 @@ "title": "Project Id" }, "start_pipeline": { - "type": "boolean", + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], "title": "Start Pipeline", "description": "if True the computation pipeline will start right away", "default": false @@ -2035,26 +1288,34 @@ "title": "Product Name" }, "subgraph": { - "items": { - "type": "string", - "format": "uuid" - }, - "type": "array", + "anyOf": [ + { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array" + }, + { + "type": "null" + } + ], "title": "Subgraph", "description": "An optional set of nodes that must be executed, if empty the whole pipeline is executed" }, "force_restart": { - "type": "boolean", + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], "title": "Force Restart", "description": "if True will force re-running all dependent nodes", "default": false }, - "cluster_id": { - "type": "integer", - "minimum": 0, - "title": "Cluster Id", - "description": "the computation shall use the cluster described by its id, 0 is the default cluster" - }, "simcore_user_agent": { "type": "string", "title": "Simcore User Agent", @@ -2063,16 +1324,18 @@ "use_on_demand_clusters": { "type": "boolean", "title": "Use On Demand Clusters", - "description": "if True, a cluster will be created as necessary (wallet_id cannot be None, and cluster_id must be None)", + "description": "if True, a cluster will be created as necessary (wallet_id cannot be None)", "default": false }, "wallet_info": { - "allOf": [ + "anyOf": [ + { + "$ref": "#/components/schemas/WalletInfo-Input" + }, { - "$ref": "#/components/schemas/WalletInfo" + "type": "null" } ], - "title": "Wallet Info", "description": "contains information about the wallet used to bill the running service" } }, @@ -2093,7 +1356,14 @@ "minimum": 0 }, "force": { - "type": "boolean", + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], "title": "Force", "description": "if True then the pipeline will be removed even if it is running", "default": false @@ -2114,71 +1384,96 @@ "description": "the id of the computation task" }, "state": { - "allOf": [ - { - "$ref": "#/components/schemas/RunningState" - } - ], + "$ref": "#/components/schemas/RunningState", "description": "the state of the computational task" }, "result": { - "type": "string", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Result", "description": "the result of the computational task" }, "pipeline_details": { - "allOf": [ - { - "$ref": "#/components/schemas/PipelineDetails" - } - ], - "title": "Pipeline Details", + "$ref": "#/components/schemas/PipelineDetails", "description": "the details of the generated pipeline" }, "iteration": { - "type": "integer", - "exclusiveMinimum": true, + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": true, + "minimum": 0 + }, + { + "type": "null" + } + ], "title": "Iteration", - "description": "the iteration id of the computation task (none if no task ran yet)", - "minimum": 0 - }, - "cluster_id": { - "type": "integer", - "minimum": 0, - "title": "Cluster Id", - "description": "the cluster on which the computaional task runs/ran (none if no task ran yet)" + "description": "the iteration id of the computation task (none if no task ran yet)" }, "started": { - "type": "string", - "format": "date-time", + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], "title": "Started", "description": "the timestamp when the computation was started or None if not started yet" }, "stopped": { - "type": "string", - "format": "date-time", + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], "title": "Stopped", "description": "the timestamp when the computation was stopped or None if not started nor stopped yet" }, "submitted": { - "type": "string", - "format": "date-time", + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], "title": "Submitted", "description": "task last modification timestamp or None if the there is no task" }, "url": { "type": "string", - "maxLength": 65536, "minLength": 1, "format": "uri", "title": "Url", "description": "the link where to get the status of the task" }, "stop_url": { - "type": "string", - "maxLength": 65536, - "minLength": 1, - "format": "uri", + "anyOf": [ + { + "type": "string", + "minLength": 1, + "format": "uri" + }, + { + "type": "null" + } + ], "title": "Stop Url", "description": "the link where to stop the task" } @@ -2189,7 +1484,6 @@ "state", "pipeline_details", "iteration", - "cluster_id", "started", "stopped", "submitted", @@ -2215,85 +1509,156 @@ "ContainerState": { "properties": { "Status": { - "allOf": [ + "anyOf": [ { "$ref": "#/components/schemas/Status2" + }, + { + "type": "null" } ], - "description": "String representation of the container state. Can be one of \"created\",\n\"running\", \"paused\", \"restarting\", \"removing\", \"exited\", or \"dead\".\n", - "example": "running" + "description": "String representation of the container state. Can be one of \"created\",\n\"running\", \"paused\", \"restarting\", \"removing\", \"exited\", or \"dead\".\n" }, "Running": { - "type": "boolean", + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], "title": "Running", - "description": "Whether this container is running.\n\nNote that a running container can be _paused_. The `Running` and `Paused`\nbooleans are not mutually exclusive:\n\nWhen pausing a container (on Linux), the freezer cgroup is used to suspend\nall processes in the container. Freezing the process requires the process to\nbe running. As a result, paused containers are both `Running` _and_ `Paused`.\n\nUse the `Status` field instead to determine if a container's state is \"running\".\n", - "example": true + "description": "Whether this container is running.\n\nNote that a running container can be _paused_. The `Running` and `Paused`\nbooleans are not mutually exclusive:\n\nWhen pausing a container (on Linux), the freezer cgroup is used to suspend\nall processes in the container. Freezing the process requires the process to\nbe running. As a result, paused containers are both `Running` _and_ `Paused`.\n\nUse the `Status` field instead to determine if a container's state is \"running\".\n" }, "Paused": { - "type": "boolean", + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], "title": "Paused", - "description": "Whether this container is paused.", - "example": false + "description": "Whether this container is paused." }, "Restarting": { - "type": "boolean", + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], "title": "Restarting", - "description": "Whether this container is restarting.", - "example": false + "description": "Whether this container is restarting." }, "OOMKilled": { - "type": "boolean", + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], "title": "Oomkilled", - "description": "Whether this container has been killed because it ran out of memory.\n", - "example": false + "description": "Whether this container has been killed because it ran out of memory.\n" }, "Dead": { - "type": "boolean", - "title": "Dead", - "example": false + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Dead" }, "Pid": { - "type": "integer", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], "title": "Pid", - "description": "The process ID of this container", - "example": 1234 + "description": "The process ID of this container" }, "ExitCode": { - "type": "integer", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], "title": "Exitcode", - "description": "The last exit code of this container", - "example": 0 + "description": "The last exit code of this container" }, "Error": { - "type": "string", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Error" }, "StartedAt": { - "type": "string", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Startedat", - "description": "The time when this container was last started.", - "example": "2020-01-06T09:06:59.461876391Z" + "description": "The time when this container was last started." }, "FinishedAt": { - "type": "string", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Finishedat", - "description": "The time when this container last exited.", - "example": "2020-01-06T09:07:59.461876391Z" + "description": "The time when this container last exited." }, "Health": { - "$ref": "#/components/schemas/Health" + "anyOf": [ + { + "$ref": "#/components/schemas/Health" + }, + { + "type": "null" + } + ] } }, "type": "object", "title": "ContainerState", - "description": " ContainerState stores container's running state. It's part of ContainerJSONBase\nand will be returned by the \"inspect\" command." + "description": "ContainerState stores container's running state. It's part of ContainerJSONBase\nand will be returned by the \"inspect\" command." }, "DNSResolver": { "properties": { "address": { "anyOf": [ { - "$ref": "#/components/schemas/OsparcVariableIdentifier" + "type": "string", + "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$" }, { "type": "string" @@ -2312,12 +1677,14 @@ "minimum": 0 }, { - "$ref": "#/components/schemas/OsparcVariableIdentifier" + "type": "string", + "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$" } ], "title": "Port" } }, + "additionalProperties": true, "type": "object", "required": [ "address", @@ -2341,24 +1708,10 @@ "title": "DelayedExceptionHandler", "description": "Allows to ignore an exception for an established\nperiod of time after which it is raised.\n\nThis use case most commonly occurs when dealing with\nexternal systems.\nFor example, due to poor network performance or\nnetwork congestion, an external system which is healthy,\ncurrently is not reachable any longer.\nA possible solution:\n- ignore exceptions for an interval in which the\n system usually is reachable again by not\n raising the error\n- if the error persist give up and raise it\n\nExample code usage:\n\n delayed_handler_external_service = DelayedExceptionHandler(\n delay_for=60\n )\n try:\n function_called_periodically_accessing_external_service()\n except TargetException as e:\n delayed_handler_external_service.try_to_raise(e)\n else:\n delayed_handler_external_service.else_reset()" }, - "DictModel_str__PositiveFloat_": { - "additionalProperties": { - "type": "number", - "exclusiveMinimum": true, - "minimum": 0.0 - }, - "type": "object", - "title": "DictModel[str, PositiveFloat]" - }, "DockerContainerInspect": { "properties": { "container_state": { - "allOf": [ - { - "$ref": "#/components/schemas/ContainerState" - } - ], - "title": "Container State", + "$ref": "#/components/schemas/ContainerState", "description": "current state of container" }, "name": { @@ -2411,15 +1764,19 @@ "title": "Service Uuid" }, "service_basepath": { - "type": "string", - "format": "path", + "anyOf": [ + { + "type": "string", + "format": "path" + }, + { + "type": "null" + } + ], "title": "Service Basepath", "description": "predefined path where the dynamic service should be served. If empty, the service shall use the root endpoint." }, "service_resources": { - "additionalProperties": { - "$ref": "#/components/schemas/ImageResources" - }, "type": "object", "title": "Service Resources" }, @@ -2434,30 +1791,36 @@ "description": "the service data must be saved when closing" }, "wallet_info": { - "allOf": [ + "anyOf": [ + { + "$ref": "#/components/schemas/WalletInfo-Input" + }, { - "$ref": "#/components/schemas/WalletInfo" + "type": "null" } ], - "title": "Wallet Info", "description": "contains information about the wallet used to bill the running service" }, "pricing_info": { - "allOf": [ + "anyOf": [ { "$ref": "#/components/schemas/PricingInfo" + }, + { + "type": "null" } ], - "title": "Pricing Info", "description": "contains pricing information (ex. pricing plan and unit ids)" }, "hardware_info": { - "allOf": [ + "anyOf": [ { "$ref": "#/components/schemas/HardwareInfo" + }, + { + "type": "null" } ], - "title": "Hardware Info", "description": "contains harware information (ex. aws_ec2_instances)" } }, @@ -2474,16 +1837,27 @@ ], "title": "DynamicServiceCreate", "example": { + "basepath": "/x/75c7f3f4-18f9-4678-8610-54a2ade78eaa", + "can_save": true, + "hardware_info": { + "aws_ec2_instances": [ + "c6a.4xlarge" + ] + }, "key": "simcore/services/dynamic/3dviewer", - "version": "2.4.5", - "user_id": 234, - "project_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", "node_uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", - "basepath": "/x/75c7f3f4-18f9-4678-8610-54a2ade78eaa", + "pricing_info": { + "pricing_plan_id": 1, + "pricing_unit_cost_id": 1, + "pricing_unit_id": 1 + }, "product_name": "osparc", - "can_save": true, + "project_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", "service_resources": { "container": { + "boot_modes": [ + "CPU" + ], "image": "simcore/services/dynamic/jupyter-math:2.0.5", "resources": { "CPU": { @@ -2494,38 +1868,22 @@ "limit": 2147483648, "reservation": 2147483648 } - }, - "boot_modes": [ - "CPU" - ] + } } }, + "user_id": 234, + "version": "2.4.5", "wallet_info": { + "wallet_credit_amount": "10", "wallet_id": 1, - "wallet_name": "My Wallet", - "wallet_credit_amount": 10 - }, - "pricing_info": { - "pricing_plan_id": 1, - "pricing_unit_id": 1, - "pricing_unit_cost_id": 1 - }, - "hardware_info": { - "aws_ec2_instances": [ - "c6a.4xlarge" - ] + "wallet_name": "My Wallet" } } }, "DynamicSidecar": { "properties": { "status": { - "allOf": [ - { - "$ref": "#/components/schemas/simcore_service_director_v2__models__dynamic_services_scheduler__Status" - } - ], - "title": "Status", + "$ref": "#/components/schemas/simcore_service_director_v2__models__dynamic_services_scheduler__Status", "description": "status of the service sidecar also with additional information", "default": { "current": "ok", @@ -2535,8 +1893,8 @@ "is_ready": { "type": "boolean", "title": "Is Ready", - "default": false, - "scription": "is True while the health check on the dynamic-sidecar is responding. Meaning that the dynamic-sidecar is reachable and can accept requests" + "description": "is True while the health check on the dynamic-sidecar is responding. Meaning that the dynamic-sidecar is reachable and can accept requests", + "default": false }, "was_compose_spec_submitted": { "type": "boolean", @@ -2550,8 +1908,8 @@ }, "type": "array", "title": "Containers Inspect", - "default": [], - "scription": "docker inspect results from all the container ran at regular intervals" + "description": "docker inspect results from all the container ran at regular intervals", + "default": [] }, "was_dynamic_sidecar_started": { "type": "boolean", @@ -2582,12 +1940,7 @@ "default": false }, "service_removal_state": { - "allOf": [ - { - "$ref": "#/components/schemas/ServiceRemovalState" - } - ], - "title": "Service Removal State", + "$ref": "#/components/schemas/ServiceRemovalState", "description": "stores information used during service removal from the dynamic-sidecar scheduler" }, "wait_for_manual_intervention_after_error": { @@ -2609,52 +1962,77 @@ "default": false }, "instrumentation": { - "allOf": [ - { - "$ref": "#/components/schemas/ServicesInstrumentation" - } - ], - "title": "Instrumentation", + "$ref": "#/components/schemas/ServicesInstrumentation", "description": "keeps track times for various operations" }, "dynamic_sidecar_id": { - "type": "string", - "maxLength": 25, - "pattern": "[A-Za-z0-9]{25}", + "anyOf": [ + { + "type": "string", + "maxLength": 25, + "pattern": "[A-Za-z0-9]{25}" + }, + { + "type": "null" + } + ], "title": "Dynamic Sidecar Id", "description": "returned by the docker engine; used for starting the proxy" }, "dynamic_sidecar_network_id": { - "type": "string", - "maxLength": 25, - "pattern": "[A-Za-z0-9]{25}", + "anyOf": [ + { + "type": "string", + "maxLength": 25, + "pattern": "[A-Za-z0-9]{25}" + }, + { + "type": "null" + } + ], "title": "Dynamic Sidecar Network Id", "description": "returned by the docker engine; used for starting the proxy" }, "swarm_network_id": { - "type": "string", - "maxLength": 25, - "pattern": "[A-Za-z0-9]{25}", + "anyOf": [ + { + "type": "string", + "maxLength": 25, + "pattern": "[A-Za-z0-9]{25}" + }, + { + "type": "null" + } + ], "title": "Swarm Network Id", "description": "returned by the docker engine; used for starting the proxy" }, "swarm_network_name": { - "type": "string", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Swarm Network Name", "description": "used for starting the proxy" }, "docker_node_id": { - "type": "string", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Docker Node Id", "description": "contains node id of the docker node where all services and created containers are started" }, "inspect_error_handler": { - "allOf": [ - { - "$ref": "#/components/schemas/DelayedExceptionHandler" - } - ], - "title": "Inspect Error Handler", + "$ref": "#/components/schemas/DelayedExceptionHandler", "description": "Set when the dy-sidecar can no longer be reached by the director-v2. If it will be possible to reach the dy-sidecar again, this value will be set to None.", "default": { "delay_for": 0.0 @@ -2670,8 +2048,7 @@ "ok", "failing" ], - "title": "DynamicSidecarStatus", - "description": "An enumeration." + "title": "DynamicSidecarStatus" }, "GetProjectInactivityResponse": { "properties": { @@ -2718,25 +2095,40 @@ "Health": { "properties": { "Status": { - "allOf": [ + "anyOf": [ { "$ref": "#/components/schemas/models_library__generated_models__docker_rest_api__Status" + }, + { + "type": "null" } ], - "description": "Status is one of `none`, `starting`, `healthy` or `unhealthy`\n\n- \"none\" Indicates there is no healthcheck\n- \"starting\" Starting indicates that the container is not yet ready\n- \"healthy\" Healthy indicates that the container is running correctly\n- \"unhealthy\" Unhealthy indicates that the container has a problem\n", - "example": "healthy" + "description": "Status is one of `none`, `starting`, `healthy` or `unhealthy`\n\n- \"none\" Indicates there is no healthcheck\n- \"starting\" Starting indicates that the container is not yet ready\n- \"healthy\" Healthy indicates that the container is running correctly\n- \"unhealthy\" Unhealthy indicates that the container has a problem\n" }, "FailingStreak": { - "type": "integer", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], "title": "Failingstreak", - "description": "FailingStreak is the number of consecutive failures", - "example": 0 + "description": "FailingStreak is the number of consecutive failures" }, "Log": { - "items": { - "$ref": "#/components/schemas/HealthcheckResult" - }, - "type": "array", + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/HealthcheckResult" + }, + "type": "array" + }, + { + "type": "null" + } + ], "title": "Log", "description": "Log contains the last few results (oldest first)\n" } @@ -2764,26 +2156,51 @@ "HealthcheckResult": { "properties": { "Start": { - "type": "string", - "format": "date-time", + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], "title": "Start", - "description": "Date and time at which this check started in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", - "example": "2020-01-04T10:44:24.496525531Z" + "description": "Date and time at which this check started in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n" }, "End": { - "type": "string", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "End", - "description": "Date and time at which this check ended in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", - "example": "2020-01-04T10:45:21.364524523Z" + "description": "Date and time at which this check ended in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n" }, "ExitCode": { - "type": "integer", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], "title": "Exitcode", - "description": "ExitCode meanings:\n\n- `0` healthy\n- `1` unhealthy\n- `2` reserved (considered unhealthy)\n- other values: error running probe\n", - "example": 0 + "description": "ExitCode meanings:\n\n- `0` healthy\n- `1` unhealthy\n- `2` reserved (considered unhealthy)\n- other values: error running probe\n" }, "Output": { - "type": "string", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Output", "description": "Output from last check" } @@ -2812,6 +2229,7 @@ "$ref": "#/components/schemas/BootMode" }, "type": "array", + "title": "Boot Modes", "description": "describe how a service shall be booted, using CPU, MPI, openMP or GPU", "default": [ "CPU" @@ -2827,6 +2245,14 @@ "example": { "image": "simcore/service/dynamic/pretty-intense:1.0.0", "resources": { + "AIRAM": { + "limit": 1, + "reservation": 1 + }, + "ANY_resource": { + "limit": "some_value", + "reservation": "some_value" + }, "CPU": { "limit": 4, "reservation": 0.1 @@ -2838,61 +2264,17 @@ "VRAM": { "limit": 1, "reservation": 1 - }, - "AIRAM": { - "limit": 1, - "reservation": 1 - }, - "ANY_resource": { - "limit": "some_value", - "reservation": "some_value" } } } }, - "JupyterHubTokenAuthentication": { - "properties": { - "type": { - "type": "string", - "enum": [ - "jupyterhub" - ], - "title": "Type", - "default": "jupyterhub" - }, - "api_token": { - "type": "string", - "title": "Api Token" - } - }, - "additionalProperties": false, - "type": "object", - "required": [ - "api_token" - ], - "title": "JupyterHubTokenAuthentication" - }, - "KerberosAuthentication": { - "properties": { - "type": { - "type": "string", - "enum": [ - "kerberos" - ], - "title": "Type", - "default": "kerberos" - } - }, - "additionalProperties": false, - "type": "object", - "title": "KerberosAuthentication" - }, "NATRule": { "properties": { "hostname": { "anyOf": [ { - "$ref": "#/components/schemas/OsparcVariableIdentifier" + "type": "string", + "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$" }, { "type": "string" @@ -2911,7 +2293,8 @@ "minimum": 0 }, { - "$ref": "#/components/schemas/OsparcVariableIdentifier" + "type": "string", + "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$" }, { "$ref": "#/components/schemas/_PortRange" @@ -2922,12 +2305,7 @@ "title": "Tcp Ports" }, "dns_resolver": { - "allOf": [ - { - "$ref": "#/components/schemas/DNSResolver" - } - ], - "title": "Dns Resolver", + "$ref": "#/components/schemas/DNSResolver", "description": "specify a DNS resolver address and port" } }, @@ -2939,21 +2317,6 @@ "title": "NATRule", "description": "Content of \"simcore.service.containers-allowed-outgoing-permit-list\" label" }, - "NoAuthentication": { - "properties": { - "type": { - "type": "string", - "enum": [ - "none" - ], - "title": "Type", - "default": "none" - } - }, - "additionalProperties": false, - "type": "object", - "title": "NoAuthentication" - }, "NodeState": { "properties": { "modified": { @@ -2973,18 +2336,21 @@ "description": "contains the node inputs dependencies if they need to be computed first" }, "currentStatus": { - "allOf": [ - { - "$ref": "#/components/schemas/RunningState" - } - ], + "$ref": "#/components/schemas/RunningState", "description": "the node's current state", "default": "NOT_STARTED" }, "progress": { - "type": "number", - "maximum": 1.0, - "minimum": 0.0, + "anyOf": [ + { + "type": "number", + "maximum": 1.0, + "minimum": 0.0 + }, + { + "type": "null" + } + ], "title": "Progress", "description": "current progress of the task if available (None if not started or not a computational task)", "default": 0 @@ -3007,11 +2373,6 @@ ], "title": "ObservationItem" }, - "OsparcVariableIdentifier": { - "type": "string", - "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$", - "title": "OsparcVariableIdentifier" - }, "PathMappingsLabel": { "properties": { "inputs_path": { @@ -3033,23 +2394,36 @@ }, "type": "array", "title": "State Paths", - "description": "optional list of paths which contents need to be persisted", - "default": [] + "description": "optional list of paths which contents need to be persisted" }, "state_exclude": { - "items": { - "type": "string" - }, - "type": "array", - "uniqueItems": true, + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true + }, + { + "type": "null" + } + ], "title": "State Exclude", "description": "optional list unix shell rules used to exclude files from the state" }, "volume_size_limits": { - "additionalProperties": { - "type": "string" - }, - "type": "object", + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], "title": "Volume Size Limits", "description": "Apply volume size limits to entries in: `inputs_path`, `outputs_path` and `state_paths`. Limits must be parsable by Pydantic's ByteSize." } @@ -3078,9 +2452,16 @@ "description": "The adjacency list of the current pipeline in terms of {NodeID: [successor NodeID]}" }, "progress": { - "type": "number", - "maximum": 1.0, - "minimum": 0.0, + "anyOf": [ + { + "type": "number", + "maximum": 1.0, + "minimum": 0.0 + }, + { + "type": "null" + } + ], "title": "Progress", "description": "the progress of the pipeline (None if there are no computational tasks)" }, @@ -3199,6 +2580,7 @@ "properties": { "size_bytes": { "type": "integer", + "minimum": 0, "title": "Size Bytes", "description": "The amount of data transferred by the retrieve call" } @@ -3252,17 +2634,20 @@ "title": "Service Uuid" }, "service_basepath": { - "type": "string", - "format": "path", + "anyOf": [ + { + "type": "string", + "format": "path" + }, + { + "type": "null" + } + ], "title": "Service Basepath", "description": "predefined path where the dynamic service should be served. If empty, the service shall use the root endpoint." }, "boot_type": { - "allOf": [ - { - "$ref": "#/components/schemas/ServiceBootType" - } - ], + "$ref": "#/components/schemas/ServiceBootType", "description": "Describes how the dynamic services was started (legacy=V0, modern=V2).Since legacy services do not have this label it defaults to V0.", "default": "V0" }, @@ -3281,31 +2666,48 @@ "minimum": 0 }, "published_port": { - "type": "integer", - "exclusiveMaximum": true, - "exclusiveMinimum": true, + "anyOf": [ + { + "type": "integer", + "exclusiveMaximum": true, + "exclusiveMinimum": true, + "maximum": 65535, + "minimum": 0 + }, + { + "type": "null" + } + ], "title": "Published Port", "description": "the service swarm published port if any", - "deprecated": true, - "maximum": 65535, - "minimum": 0 + "deprecated": true }, "entry_point": { - "type": "string", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Entry Point", "description": "if empty the service entrypoint is on the root endpoint.", "deprecated": true }, "service_state": { - "allOf": [ - { - "$ref": "#/components/schemas/ServiceState" - } - ], + "$ref": "#/components/schemas/ServiceState", "description": "service current state" }, "service_message": { - "type": "string", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Service Message", "description": "additional information related to service state" } @@ -3340,73 +2742,83 @@ "title": "RunningState", "description": "State of execution of a project's computational workflow\n\nSEE StateType for task state" }, - "Scheduler": { - "properties": { - "status": { - "type": "string", - "title": "Status", - "description": "The running status of the scheduler" - }, - "workers": { - "additionalProperties": { - "$ref": "#/components/schemas/Worker" - }, - "type": "object", - "title": "Workers" - } - }, - "type": "object", - "required": [ - "status" - ], - "title": "Scheduler" - }, "SchedulerData": { "properties": { "paths_mapping": { "$ref": "#/components/schemas/PathMappingsLabel" }, "simcore.service.compose-spec": { - "type": "object", + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], "title": "Simcore.Service.Compose-Spec", "description": "json encoded docker-compose specifications. see https://docs.docker.com/compose/compose-file/, only used by dynamic-sidecar." }, "simcore.service.container-http-entrypoint": { - "type": "string", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Simcore.Service.Container-Http-Entrypoint", "description": "When a docker-compose specifications is provided, the container where the traffic must flow has to be specified. Required by dynamic-sidecar when compose_spec is set." }, "user_preferences_path": { - "type": "string", - "format": "path", - "title": "User Preferences Path" - }, - "simcore.service.restart-policy": { - "allOf": [ + "anyOf": [ + { + "type": "string", + "format": "path" + }, { - "$ref": "#/components/schemas/RestartPolicy" + "type": "null" } ], + "title": "User Preferences Path" + }, + "simcore.service.restart-policy": { + "$ref": "#/components/schemas/RestartPolicy", "description": "the dynamic-sidecar can restart all running containers on certain events. Supported events:\n- `no-restart` default\n- `on-inputs-downloaded` after inputs are downloaded\n", "default": "no-restart" }, "simcore.service.containers-allowed-outgoing-permit-list": { - "additionalProperties": { - "items": { - "$ref": "#/components/schemas/NATRule" + "anyOf": [ + { + "additionalProperties": { + "items": { + "$ref": "#/components/schemas/NATRule" + }, + "type": "array" + }, + "type": "object" }, - "type": "array" - }, - "type": "object", + { + "type": "null" + } + ], "title": "Simcore.Service.Containers-Allowed-Outgoing-Permit-List", "description": "allow internet access to certain domain names and ports per container" }, "simcore.service.containers-allowed-outgoing-internet": { - "items": { - "type": "string" - }, - "type": "array", - "uniqueItems": true, + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true + }, + { + "type": "null" + } + ], "title": "Simcore.Service.Containers-Allowed-Outgoing-Internet", "description": "allow complete internet access to containers in here" }, @@ -3468,12 +2880,7 @@ "minimum": 0 }, "dynamic_sidecar": { - "allOf": [ - { - "$ref": "#/components/schemas/DynamicSidecar" - } - ], - "title": "Dynamic Sidecar", + "$ref": "#/components/schemas/DynamicSidecar", "description": "stores information fetched from the dynamic-sidecar" }, "dynamic_sidecar_network_name": { @@ -3497,9 +2904,6 @@ "minimum": 0 }, "service_resources": { - "additionalProperties": { - "$ref": "#/components/schemas/ImageResources" - }, "type": "object", "title": "Service Resources", "description": "service resources used to enforce limits" @@ -3525,39 +2929,52 @@ "description": "service name given to the proxy" }, "proxy_admin_api_port": { - "type": "integer", - "exclusiveMaximum": true, - "exclusiveMinimum": true, + "anyOf": [ + { + "type": "integer", + "exclusiveMaximum": true, + "exclusiveMinimum": true, + "maximum": 65535, + "minimum": 0 + }, + { + "type": "null" + } + ], "title": "Proxy Admin Api Port", - "description": "used as the admin endpoint API port", - "maximum": 65535, - "minimum": 0 + "description": "used as the admin endpoint API port" }, "wallet_info": { - "allOf": [ + "anyOf": [ + { + "$ref": "#/components/schemas/WalletInfo-Output" + }, { - "$ref": "#/components/schemas/WalletInfo" + "type": "null" } ], - "title": "Wallet Info", "description": "contains information about the wallet used to bill the running service" }, "pricing_info": { - "allOf": [ + "anyOf": [ { "$ref": "#/components/schemas/PricingInfo" + }, + { + "type": "null" } ], - "title": "Pricing Info", "description": "contains pricing information so we know what is the cost of running of the service" }, "hardware_info": { - "allOf": [ + "anyOf": [ { "$ref": "#/components/schemas/HardwareInfo" + }, + { + "type": "null" } ], - "title": "Hardware Info", "description": "contains harware information so we know on which hardware to run the service" }, "product_name": { @@ -3566,6 +2983,7 @@ "description": "Current product upon which this service is scheduled. If set to None, the current product is undefined. Mostly for backwards compatibility" } }, + "additionalProperties": true, "type": "object", "required": [ "paths_mapping", @@ -3584,8 +3002,7 @@ "request_scheme", "request_simcore_user_agent" ], - "title": "SchedulerData", - "description": "All \"simcore.service.*\" labels including keys" + "title": "SchedulerData" }, "ServiceBootType": { "type": "string", @@ -3593,8 +3010,7 @@ "V0", "V2" ], - "title": "ServiceBootType", - "description": "An enumeration." + "title": "ServiceBootType" }, "ServiceRemovalState": { "properties": { @@ -3621,6 +3037,7 @@ "title": "ServiceRemovalState" }, "ServiceState": { + "type": "string", "enum": [ "failed", "pending", @@ -3631,20 +3048,33 @@ "complete", "idle" ], - "title": "ServiceState", - "description": "An enumeration." + "title": "ServiceState" }, "ServicesInstrumentation": { "properties": { "start_requested_at": { - "type": "string", - "format": "date-time", + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], "title": "Start Requested At", "description": "moment in which the process of starting the service was requested" }, "close_requested_at": { - "type": "string", - "format": "date-time", + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], "title": "Close Requested At", "description": "moment in which the process of stopping the service was requested" } @@ -3652,35 +3082,6 @@ "type": "object", "title": "ServicesInstrumentation" }, - "SimpleAuthentication": { - "properties": { - "type": { - "type": "string", - "enum": [ - "simple" - ], - "title": "Type", - "default": "simple" - }, - "username": { - "type": "string", - "title": "Username" - }, - "password": { - "type": "string", - "format": "password", - "title": "Password", - "writeOnly": true - } - }, - "additionalProperties": false, - "type": "object", - "required": [ - "username", - "password" - ], - "title": "SimpleAuthentication" - }, "Status2": { "type": "string", "enum": [ @@ -3693,63 +3094,7 @@ "dead" ], "title": "Status2", - "description": " String representation of the container state. Can be one of \"created\",\n\"running\", \"paused\", \"restarting\", \"removing\", \"exited\", or \"dead\"." - }, - "TLSAuthentication": { - "properties": { - "type": { - "type": "string", - "enum": [ - "tls" - ], - "title": "Type", - "default": "tls" - }, - "tls_ca_file": { - "type": "string", - "format": "path", - "title": "Tls Ca File" - }, - "tls_client_cert": { - "type": "string", - "format": "path", - "title": "Tls Client Cert" - }, - "tls_client_key": { - "type": "string", - "format": "path", - "title": "Tls Client Key" - } - }, - "additionalProperties": false, - "type": "object", - "required": [ - "tls_ca_file", - "tls_client_cert", - "tls_client_key" - ], - "title": "TLSAuthentication" - }, - "TaskCounts": { - "properties": { - "error": { - "type": "integer", - "title": "Error", - "default": 0 - }, - "memory": { - "type": "integer", - "title": "Memory", - "default": 0 - }, - "executing": { - "type": "integer", - "title": "Executing", - "default": 0 - } - }, - "type": "object", - "title": "TaskCounts" + "description": "String representation of the container state. Can be one of \"created\",\n\"running\", \"paused\", \"restarting\", \"removing\", \"exited\", or \"dead\"." }, "TaskLogFileGet": { "properties": { @@ -3759,10 +3104,16 @@ "title": "Task Id" }, "download_link": { - "type": "string", - "maxLength": 65536, - "minLength": 1, - "format": "uri", + "anyOf": [ + { + "type": "string", + "minLength": 1, + "format": "uri" + }, + { + "type": "null" + } + ], "title": "Download Link", "description": "Presigned link for log file or None if still not available" } @@ -3806,14 +3157,6 @@ ], "title": "TasksSelection" }, - "UsedResources": { - "additionalProperties": { - "type": "number", - "minimum": 0.0 - }, - "type": "object", - "title": "UsedResources" - }, "UserServiceCommand": { "properties": { "service": { @@ -3885,7 +3228,7 @@ ], "title": "ValidationError" }, - "WalletInfo": { + "WalletInfo-Input": { "properties": { "wallet_id": { "type": "integer", @@ -3898,7 +3241,14 @@ "title": "Wallet Name" }, "wallet_credit_amount": { - "type": "number", + "anyOf": [ + { + "type": "number" + }, + { + "type": "string" + } + ], "title": "Wallet Credit Amount" } }, @@ -3910,76 +3260,30 @@ ], "title": "WalletInfo" }, - "Worker": { - "properties": { - "id": { - "type": "string", - "title": "Id" - }, - "name": { - "type": "string", - "title": "Name" - }, - "resources": { - "$ref": "#/components/schemas/DictModel_str__PositiveFloat_" - }, - "used_resources": { - "$ref": "#/components/schemas/UsedResources" - }, - "memory_limit": { - "type": "integer", - "title": "Memory Limit" - }, - "metrics": { - "$ref": "#/components/schemas/WorkerMetrics" - } - }, - "type": "object", - "required": [ - "id", - "name", - "resources", - "used_resources", - "memory_limit", - "metrics" - ], - "title": "Worker" - }, - "WorkerMetrics": { + "WalletInfo-Output": { "properties": { - "cpu": { - "type": "number", - "title": "Cpu", - "description": "consumed % of cpus" - }, - "memory": { + "wallet_id": { "type": "integer", - "title": "Memory", - "description": "consumed memory" + "exclusiveMinimum": true, + "title": "Wallet Id", + "minimum": 0 }, - "num_fds": { - "type": "integer", - "title": "Num Fds", - "description": "consumed file descriptors" + "wallet_name": { + "type": "string", + "title": "Wallet Name" }, - "task_counts": { - "allOf": [ - { - "$ref": "#/components/schemas/TaskCounts" - } - ], - "title": "Task Counts", - "description": "task details" + "wallet_credit_amount": { + "type": "string", + "title": "Wallet Credit Amount" } }, "type": "object", "required": [ - "cpu", - "memory", - "num_fds", - "task_counts" + "wallet_id", + "wallet_name", + "wallet_credit_amount" ], - "title": "WorkerMetrics" + "title": "WalletInfo" }, "_PortRange": { "properties": { @@ -3993,7 +3297,8 @@ "minimum": 0 }, { - "$ref": "#/components/schemas/OsparcVariableIdentifier" + "type": "string", + "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$" } ], "title": "Lower" @@ -4008,7 +3313,8 @@ "minimum": 0 }, { - "$ref": "#/components/schemas/OsparcVariableIdentifier" + "type": "string", + "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$" } ], "title": "Upper" @@ -4031,16 +3337,12 @@ "unhealthy" ], "title": "Status", - "description": " Status is one of `none`, `starting`, `healthy` or `unhealthy`\n\n- \"none\" Indicates there is no healthcheck\n- \"starting\" Starting indicates that the container is not yet ready\n- \"healthy\" Healthy indicates that the container is running correctly\n- \"unhealthy\" Unhealthy indicates that the container has a problem" + "description": "Status is one of `none`, `starting`, `healthy` or `unhealthy`\n\n- \"none\" Indicates there is no healthcheck\n- \"starting\" Starting indicates that the container is not yet ready\n- \"healthy\" Healthy indicates that the container is running correctly\n- \"unhealthy\" Unhealthy indicates that the container has a problem" }, "simcore_service_director_v2__models__dynamic_services_scheduler__Status": { "properties": { "current": { - "allOf": [ - { - "$ref": "#/components/schemas/DynamicSidecarStatus" - } - ], + "$ref": "#/components/schemas/DynamicSidecarStatus", "description": "status of the service" }, "info": { diff --git a/services/director-v2/requirements/_base.in b/services/director-v2/requirements/_base.in index dc173e2c2b6..5cc2fcd649c 100644 --- a/services/director-v2/requirements/_base.in +++ b/services/director-v2/requirements/_base.in @@ -23,7 +23,6 @@ aio-pika aiocache[redis,msgpack] aiodocker aiopg[sa] -dask-gateway fastapi[all] httpx networkx diff --git a/services/director-v2/requirements/_base.txt b/services/director-v2/requirements/_base.txt index 15a4e37ffc5..e7bfdb265fc 100644 --- a/services/director-v2/requirements/_base.txt +++ b/services/director-v2/requirements/_base.txt @@ -61,7 +61,6 @@ aiohttp==3.9.5 # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/simcore-sdk/requirements/_base.in # aiodocker - # dask-gateway aiopg==1.4.0 # via # -r requirements/../../../packages/simcore-sdk/requirements/_base.in @@ -156,7 +155,6 @@ click==8.1.7 # via # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # dask - # dask-gateway # distributed # typer # uvicorn @@ -169,10 +167,7 @@ dask==2024.5.1 # via # -r requirements/../../../packages/dask-task-models-library/requirements/_base.in # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt - # dask-gateway # distributed -dask-gateway==2024.1.0 - # via -r requirements/_base.in deprecated==1.2.14 # via # opentelemetry-api @@ -183,7 +178,6 @@ distributed==2024.5.1 # via # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # dask - # dask-gateway dnspython==2.6.1 # via email-validator email-validator==2.1.1 @@ -755,7 +749,6 @@ pyyaml==6.0.1 # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # dask - # dask-gateway # distributed # fastapi # uvicorn @@ -801,8 +794,6 @@ redis==5.0.4 # aiocache referencing==0.29.3 # via - # -c requirements/../../../packages/service-library/requirements/./constraints.txt - # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/./constraints.txt # jsonschema # jsonschema-specifications repro-zipfile==0.3.1 @@ -938,7 +929,6 @@ toolz==0.12.1 tornado==6.4 # via # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt - # dask-gateway # distributed tqdm==4.66.4 # via @@ -1069,7 +1059,9 @@ wsproto==1.2.0 yarl==1.9.4 # via # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # aio-pika # aiohttp # aiormq diff --git a/services/director-v2/requirements/_test.in b/services/director-v2/requirements/_test.in index fb4edeaafbf..3633a09b704 100644 --- a/services/director-v2/requirements/_test.in +++ b/services/director-v2/requirements/_test.in @@ -14,7 +14,6 @@ aioboto3 alembic # migration due to pytest_simcore.postgres_service2 asgi_lifespan async-asgi-testclient # replacement for fastapi.testclient.TestClient [see b) below] -dask-gateway-server[local] dask[distributed,diagnostics] docker Faker diff --git a/services/director-v2/requirements/_test.txt b/services/director-v2/requirements/_test.txt index ee97fe23500..6f4b07aeac9 100644 --- a/services/director-v2/requirements/_test.txt +++ b/services/director-v2/requirements/_test.txt @@ -15,7 +15,6 @@ aiohttp==3.9.5 # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # aiobotocore - # dask-gateway-server aioitertools==0.12.0 # via aiobotocore aiormq==6.8.0 @@ -59,8 +58,6 @@ certifi==2024.2.2 # httpcore # httpx # requests -cffi==1.17.1 - # via cryptography charset-normalizer==3.3.2 # via # -c requirements/_base.txt @@ -75,23 +72,15 @@ cloudpickle==3.0.0 # -c requirements/_base.txt # dask # distributed -colorlog==6.8.2 - # via dask-gateway-server contourpy==1.3.0 # via bokeh coverage==7.6.1 # via pytest-cov -cryptography==43.0.1 - # via - # -c requirements/../../../requirements/constraints.txt - # dask-gateway-server dask==2024.5.1 # via # -c requirements/_base.txt # -r requirements/_test.in # distributed -dask-gateway-server==2023.1.1 - # via -r requirements/_test.in distributed==2024.5.1 # via # -c requirements/_base.txt @@ -219,8 +208,6 @@ psutil==6.0.0 # via # -c requirements/_base.txt # distributed -pycparser==2.22 - # via cffi pytest==8.3.3 # via # -r requirements/_test.in @@ -290,7 +277,6 @@ sqlalchemy==1.4.52 # -c requirements/_base.txt # -r requirements/_test.in # alembic - # dask-gateway-server sqlalchemy2-stubs==0.0.2a38 # via sqlalchemy tblib==3.0.0 @@ -308,8 +294,6 @@ tornado==6.4 # -c requirements/_base.txt # bokeh # distributed -traitlets==5.14.3 - # via dask-gateway-server types-networkx==3.2.1.20240918 # via -r requirements/_test.in types-psycopg2==2.9.21.20240819 diff --git a/services/director-v2/src/simcore_service_director_v2/api/entrypoints.py b/services/director-v2/src/simcore_service_director_v2/api/entrypoints.py index 671fc78d2d4..df3d607049c 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/entrypoints.py +++ b/services/director-v2/src/simcore_service_director_v2/api/entrypoints.py @@ -2,7 +2,6 @@ from .._meta import API_VTAG from .routes import ( - clusters, computations, computations_tasks, dynamic_scheduler, @@ -27,7 +26,6 @@ v2_router.include_router( dynamic_services.router, tags=["dynamic services"], prefix="/dynamic_services" ) -v2_router.include_router(clusters.router, tags=["clusters"], prefix="/clusters") v2_router.include_router( dynamic_scheduler.router, tags=["dynamic scheduler"], prefix="/dynamic_scheduler" diff --git a/services/director-v2/src/simcore_service_director_v2/api/routes/clusters.py b/services/director-v2/src/simcore_service_director_v2/api/routes/clusters.py deleted file mode 100644 index d2ab294757e..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/api/routes/clusters.py +++ /dev/null @@ -1,242 +0,0 @@ -import logging -from asyncio.log import logger -from typing import Final - -from aiocache import cached # type: ignore[import-untyped] -from fastapi import APIRouter, Depends, HTTPException -from models_library.api_schemas_directorv2.clusters import ( - ClusterCreate, - ClusterDetails, - ClusterDetailsGet, - ClusterGet, - ClusterPatch, - ClusterPing, -) -from models_library.clusters import DEFAULT_CLUSTER_ID, BaseCluster, ClusterID -from models_library.users import UserID -from starlette import status - -from ...core.errors import ( - ClusterInvalidOperationError, - ConfigurationError, - DaskClientAcquisisitonError, -) -from ...core.settings import ComputationalBackendSettings -from ...modules.dask_clients_pool import DaskClientsPool -from ...modules.db.repositories.clusters import ClustersRepository -from ...utils.dask_client_utils import test_scheduler_endpoint -from ..dependencies.dask import get_dask_clients_pool -from ..dependencies.database import get_repository -from ..dependencies.scheduler import get_scheduler_settings - -router = APIRouter() -log = logging.getLogger(__name__) - - -GET_CLUSTER_DETAILS_CACHING_TTL: Final[int] = 3 - - -def _build_cache_key(fct, *_, **kwargs): - return f"{fct.__name__}_{kwargs['cluster_id']}" - - -@cached(ttl=GET_CLUSTER_DETAILS_CACHING_TTL, key_builder=_build_cache_key) -async def _get_cluster_details_with_id( - settings: ComputationalBackendSettings, - user_id: UserID, - cluster_id: ClusterID, - clusters_repo: ClustersRepository, - dask_clients_pool: DaskClientsPool, -) -> ClusterDetails: - log.debug("Getting details for cluster '%s'", cluster_id) - cluster: BaseCluster = settings.default_cluster - if cluster_id != DEFAULT_CLUSTER_ID: - cluster = await clusters_repo.get_cluster(user_id, cluster_id) - async with dask_clients_pool.acquire(cluster) as client: - return await client.get_cluster_details() - - -@router.post( - "", - summary="Create a new cluster for a user", - response_model=ClusterGet, - status_code=status.HTTP_201_CREATED, -) -async def create_cluster( - user_id: UserID, - new_cluster: ClusterCreate, - clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)), -): - return await clusters_repo.create_cluster(user_id, new_cluster) - - -@router.get("", summary="Lists clusters for user", response_model=list[ClusterGet]) -async def list_clusters( - user_id: UserID, - clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)), - settings: ComputationalBackendSettings = Depends(get_scheduler_settings), -): - default_cluster = settings.default_cluster - return [default_cluster] + await clusters_repo.list_clusters(user_id) - - -@router.get( - "/default", - summary="Returns the default cluster", - response_model=ClusterGet, - status_code=status.HTTP_200_OK, -) -async def get_default_cluster( - settings: ComputationalBackendSettings = Depends(get_scheduler_settings), -): - return settings.default_cluster - - -@router.get( - "/{cluster_id}", - summary="Get one cluster for user", - response_model=ClusterGet, - status_code=status.HTTP_200_OK, -) -async def get_cluster( - user_id: UserID, - cluster_id: ClusterID, - clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)), -): - return await clusters_repo.get_cluster(user_id, cluster_id) - - -@router.patch( - "/{cluster_id}", - summary="Modify a cluster for user", - response_model=ClusterGet, - status_code=status.HTTP_200_OK, -) -async def update_cluster( - user_id: UserID, - cluster_id: ClusterID, - updated_cluster: ClusterPatch, - clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)), -): - try: - return await clusters_repo.update_cluster(user_id, cluster_id, updated_cluster) - except ClusterInvalidOperationError as e: - raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=f"{e}") from e - - -@router.delete( - "/{cluster_id}", - summary="Remove a cluster for user", - response_model=None, - status_code=status.HTTP_204_NO_CONTENT, -) -async def delete_cluster( - user_id: UserID, - cluster_id: ClusterID, - clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)), -): - await clusters_repo.delete_cluster(user_id, cluster_id) - - -@router.get( - "/default/details", - summary="Returns the cluster details", - response_model=ClusterDetailsGet, - status_code=status.HTTP_200_OK, -) -async def get_default_cluster_details( - user_id: UserID, - settings: ComputationalBackendSettings = Depends(get_scheduler_settings), - clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)), - dask_clients_pool: DaskClientsPool = Depends(get_dask_clients_pool), -): - default_cluster = await _get_cluster_details_with_id( - settings=settings, - user_id=user_id, - cluster_id=DEFAULT_CLUSTER_ID, - clusters_repo=clusters_repo, - dask_clients_pool=dask_clients_pool, - ) - logger.debug("found followind %s", f"{default_cluster=!r}") - return default_cluster - - -@router.get( - "/{cluster_id}/details", - summary="Returns the cluster details", - response_model=ClusterDetailsGet, - status_code=status.HTTP_200_OK, -) -async def get_cluster_details( - user_id: UserID, - cluster_id: ClusterID, - settings: ComputationalBackendSettings = Depends(get_scheduler_settings), - clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)), - dask_clients_pool: DaskClientsPool = Depends(get_dask_clients_pool), -): - try: - cluster_details = await _get_cluster_details_with_id( - settings=settings, - user_id=user_id, - cluster_id=cluster_id, - clusters_repo=clusters_repo, - dask_clients_pool=dask_clients_pool, - ) - logger.debug("found following %s", f"{cluster_details=!r}") - return cluster_details - except DaskClientAcquisisitonError as exc: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, detail=f"{exc}" - ) from exc - - -@router.post( - ":ping", - summary="Test cluster connection", - response_model=None, - status_code=status.HTTP_204_NO_CONTENT, -) -async def test_cluster_connection( - cluster_auth: ClusterPing, -): - try: - return await test_scheduler_endpoint( - endpoint=cluster_auth.endpoint, authentication=cluster_auth.authentication - ) - - except ConfigurationError as e: - raise HTTPException( - status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail=f"{e}" - ) from e - - -@router.post( - "/default:ping", - summary="Test cluster connection", - response_model=None, - status_code=status.HTTP_204_NO_CONTENT, -) -async def test_default_cluster_connection( - settings: ComputationalBackendSettings = Depends(get_scheduler_settings), -): - cluster = settings.default_cluster - return await test_scheduler_endpoint( - endpoint=cluster.endpoint, authentication=cluster.authentication - ) - - -@router.post( - "/{cluster_id}:ping", - summary="Test cluster connection", - response_model=None, - status_code=status.HTTP_204_NO_CONTENT, -) -async def test_specific_cluster_connection( - user_id: UserID, - cluster_id: ClusterID, - clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)), -): - cluster = await clusters_repo.get_cluster(user_id, cluster_id) - return await test_scheduler_endpoint( - endpoint=cluster.endpoint, authentication=cluster.authentication - ) diff --git a/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py b/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py index f25fdf32ece..f0b6e635ac7 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py +++ b/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py @@ -28,9 +28,8 @@ ComputationGet, ComputationStop, ) -from models_library.clusters import DEFAULT_CLUSTER_ID from models_library.projects import ProjectAtDB, ProjectID -from models_library.projects_nodes_io import NodeID, NodeIDStr +from models_library.projects_nodes_io import NodeID from models_library.projects_state import RunningState from models_library.services import ServiceKeyVersion from models_library.users import UserID @@ -49,7 +48,6 @@ from tenacity.wait import wait_random from ...core.errors import ( - ClusterAccessForbiddenError, ClusterNotFoundError, ClustersKeeperNotAvailableError, ComputationalRunNotFoundError, @@ -64,7 +62,6 @@ from ...models.comp_tasks import CompTaskAtDB from ...modules.catalog import CatalogClient from ...modules.comp_scheduler import run_new_pipeline, stop_pipeline -from ...modules.db.repositories.clusters import ClustersRepository from ...modules.db.repositories.comp_pipelines import CompPipelinesRepository from ...modules.db.repositories.comp_runs import CompRunsRepository from ...modules.db.repositories.comp_tasks import CompTasksRepository @@ -115,7 +112,6 @@ async def _check_pipeline_startable( pipeline_dag: nx.DiGraph, computation: ComputationCreate, catalog_client: CatalogClient, - clusters_repo: ClustersRepository, ) -> None: assert computation.product_name # nosec if deprecated_tasks := await utils.find_deprecated_tasks( @@ -131,20 +127,6 @@ async def _check_pipeline_startable( status_code=status.HTTP_406_NOT_ACCEPTABLE, detail=f"Project {computation.project_id} cannot run since it contains deprecated tasks {jsonable_encoder( deprecated_tasks)}", ) - if computation.cluster_id: - # check the cluster ID is a valid one - try: - await clusters_repo.get_cluster(computation.user_id, computation.cluster_id) - except ClusterNotFoundError as exc: - raise HTTPException( - status_code=status.HTTP_406_NOT_ACCEPTABLE, - detail=f"Project {computation.project_id} cannot run on cluster {computation.cluster_id}, not found", - ) from exc - except ClusterAccessForbiddenError as exc: - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail=f"Project {computation.project_id} cannot run on cluster {computation.cluster_id}, no access", - ) from exc _UNKNOWN_NODE: Final[str] = "unknown node" @@ -172,7 +154,7 @@ async def _get_project_node_names( project_uuid: ProjectID, node_id: NodeID ) -> tuple[str, str]: prj = await project_repo.get_project(project_uuid) - node_id_str = NodeIDStr(f"{node_id}") + node_id_str = f"{node_id}" if node_id_str not in prj.workbench: _logger.error( "%s not found in %s. it is an ancestor of %s. Please check!", @@ -245,7 +227,6 @@ async def _try_start_pipeline( app, user_id=computation.user_id, project_id=computation.project_id, - cluster_id=computation.cluster_id or DEFAULT_CLUSTER_ID, run_metadata=RunMetadataDict( node_id_names_map={ NodeID(node_idstr): node_data.label @@ -305,9 +286,6 @@ async def create_computation( # noqa: PLR0913 # pylint: disable=too-many-positi comp_runs_repo: Annotated[ CompRunsRepository, Depends(get_repository(CompRunsRepository)) ], - clusters_repo: Annotated[ - ClustersRepository, Depends(get_repository(ClustersRepository)) - ], users_repo: Annotated[UsersRepository, Depends(get_repository(UsersRepository))], projects_metadata_repo: Annotated[ ProjectsMetadataRepository, Depends(get_repository(ProjectsMetadataRepository)) @@ -342,7 +320,7 @@ async def create_computation( # noqa: PLR0913 # pylint: disable=too-many-positi if computation.start_pipeline: await _check_pipeline_startable( - minimal_computational_dag, computation, catalog_client, clusters_repo + minimal_computational_dag, computation, catalog_client ) # ok so put the tasks in the db @@ -411,7 +389,6 @@ async def create_computation( # noqa: PLR0913 # pylint: disable=too-many-positi else None ), iteration=last_run.iteration if last_run else None, - cluster_id=last_run.cluster_id if last_run else None, result=None, started=compute_pipeline_started_timestamp( minimal_computational_dag, comp_tasks @@ -518,7 +495,6 @@ async def get_computation( else None ), iteration=last_run.iteration if last_run else None, - cluster_id=last_run.cluster_id if last_run else None, result=None, started=compute_pipeline_started_timestamp(pipeline_dag, all_tasks), stopped=compute_pipeline_stopped_timestamp(pipeline_dag, all_tasks), @@ -593,7 +569,6 @@ async def stop_computation( url=TypeAdapter(AnyHttpUrl).validate_python(f"{request.url}"), stop_url=None, iteration=last_run.iteration if last_run else None, - cluster_id=last_run.cluster_id if last_run else None, result=None, started=compute_pipeline_started_timestamp(pipeline_dag, tasks), stopped=compute_pipeline_stopped_timestamp(pipeline_dag, tasks), @@ -665,9 +640,9 @@ def return_last_value(retry_state: Any) -> Any: before_sleep=before_sleep_log(_logger, logging.INFO), ) async def check_pipeline_stopped() -> bool: - comp_tasks: list[CompTaskAtDB] = ( - await comp_tasks_repo.list_computational_tasks(project_id) - ) + comp_tasks: list[ + CompTaskAtDB + ] = await comp_tasks_repo.list_computational_tasks(project_id) pipeline_state = utils.get_pipeline_state_from_task_states( comp_tasks, ) diff --git a/services/director-v2/src/simcore_service_director_v2/core/application.py b/services/director-v2/src/simcore_service_director_v2/core/application.py index 43a9dcc4e03..4b62b4ce73c 100644 --- a/services/director-v2/src/simcore_service_director_v2/core/application.py +++ b/services/director-v2/src/simcore_service_director_v2/core/application.py @@ -35,7 +35,6 @@ ) from ..modules.osparc_variables import substitutions from .errors import ( - ClusterAccessForbiddenError, ClusterNotFoundError, PipelineNotFoundError, ProjectNetworkNotFoundError, @@ -75,12 +74,6 @@ def _set_exception_handlers(app: FastAPI): status.HTTP_404_NOT_FOUND, ClusterNotFoundError ), ) - app.add_exception_handler( - ClusterAccessForbiddenError, - make_http_error_handler_for_exception( - status.HTTP_403_FORBIDDEN, ClusterAccessForbiddenError - ), - ) # SEE https://docs.python.org/3/library/exceptions.html#exception-hierarchy app.add_exception_handler( diff --git a/services/director-v2/src/simcore_service_director_v2/core/errors.py b/services/director-v2/src/simcore_service_director_v2/core/errors.py index 492e75bdeab..1dd0243bc4d 100644 --- a/services/director-v2/src/simcore_service_director_v2/core/errors.py +++ b/services/director-v2/src/simcore_service_director_v2/core/errors.py @@ -105,7 +105,7 @@ class MissingComputationalResourcesError( ): # pylint: disable=too-many-ancestors msg_template = ( "Service {service_name}:{service_version} cannot be scheduled " - "on cluster {cluster_id}: task needs '{task_resources}', " + "on cluster: task needs '{task_resources}', " "cluster has {cluster_resources}" ) @@ -114,7 +114,7 @@ class InsuficientComputationalResourcesError( TaskSchedulingError ): # pylint: disable=too-many-ancestors msg_template: str = ( - "Insufficient computational resources to run {service_name}:{service_version} with {service_requested_resources} on cluster {cluster_id}." + "Insufficient computational resources to run {service_name}:{service_version} with {service_requested_resources} on cluster." "Cluster available workers: {cluster_available_resources}" "TIP: Reduce service required resources or contact oSparc support" ) @@ -165,14 +165,6 @@ class ClusterNotFoundError(ComputationalSchedulerError): msg_template = "The cluster '{cluster_id}' not found" -class ClusterAccessForbiddenError(ComputationalSchedulerError): - msg_template = "Insufficient rights to access cluster '{cluster_id}'" - - -class ClusterInvalidOperationError(ComputationalSchedulerError): - msg_template = "Invalid operation on cluster '{cluster_id}'" - - # # SCHEDULER/CLIENT ERRORS # diff --git a/services/director-v2/src/simcore_service_director_v2/core/settings.py b/services/director-v2/src/simcore_service_director_v2/core/settings.py index fe0af49fc5c..61e23e9f018 100644 --- a/services/director-v2/src/simcore_service_director_v2/core/settings.py +++ b/services/director-v2/src/simcore_service_director_v2/core/settings.py @@ -10,8 +10,7 @@ from fastapi import FastAPI from models_library.basic_types import LogLevel, PortInt, VersionTag from models_library.clusters import ( - DEFAULT_CLUSTER_ID, - Cluster, + BaseCluster, ClusterAuthentication, ClusterTypeInModel, NoAuthentication, @@ -85,13 +84,11 @@ class ComputationalBackendSettings(BaseCustomSettings): ..., description="This is the cluster that will be used by default" " when submitting computational services (typically " - "tcp://dask-scheduler:8786, tls://dask-scheduler:8786 for the internal cluster, or " - "http(s)/GATEWAY_IP:8000 for a osparc-dask-gateway)", + "tcp://dask-scheduler:8786, tls://dask-scheduler:8786 for the internal cluster", ) COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH: ClusterAuthentication = Field( - ..., - description="Empty for the internal cluster, must be one " - "of simple/kerberos/jupyterhub for the osparc-dask-gateway", + default=..., + description="this is the cluster authentication that will be used by default", ) COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_FILE_LINK_TYPE: FileLinkType = Field( FileLinkType.S3, @@ -107,15 +104,13 @@ class ComputationalBackendSettings(BaseCustomSettings): ) @cached_property - def default_cluster(self) -> Cluster: - return Cluster( - id=DEFAULT_CLUSTER_ID, + def default_cluster(self) -> BaseCluster: + return BaseCluster( name="Default cluster", endpoint=self.COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL, authentication=self.COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH, owner=1, # NOTE: currently this is a soft hack (the group of everyone is the group 1) type=ClusterTypeInModel.ON_PREMISE, - access_rights={}, ) @field_validator("COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH", mode="before") diff --git a/services/director-v2/src/simcore_service_director_v2/models/comp_runs.py b/services/director-v2/src/simcore_service_director_v2/models/comp_runs.py index f3fedc6a9f9..915b5b2f1d0 100644 --- a/services/director-v2/src/simcore_service_director_v2/models/comp_runs.py +++ b/services/director-v2/src/simcore_service_director_v2/models/comp_runs.py @@ -2,7 +2,7 @@ from contextlib import suppress from typing import TypeAlias -from models_library.clusters import DEFAULT_CLUSTER_ID, ClusterID +from models_library.clusters import ClusterID from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID from models_library.projects_state import RunningState @@ -70,13 +70,6 @@ def convert_result_from_state_type_enum_if_needed(cls, v): return RunningState(DB_TO_RUNNING_STATE[StateType(v)]) return v - @field_validator("cluster_id", mode="before") - @classmethod - def convert_null_to_default_cluster_id(cls, v): - if v is None: - v = DEFAULT_CLUSTER_ID - return v - @field_validator("created", "modified", "started", "ended") @classmethod def ensure_utc(cls, v: datetime.datetime | None) -> datetime.datetime | None: @@ -100,7 +93,7 @@ def convert_null_to_empty_metadata(cls, v): "run_id": 432, "project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5", "user_id": 132, - "cluster_id": 0, + "cluster_id": None, "iteration": 42, "result": "UNKNOWN", "started": None, @@ -116,7 +109,7 @@ def convert_null_to_empty_metadata(cls, v): "run_id": 432, "project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5", "user_id": 132, - "cluster_id": None, # this default to DEFAULT_CLUSTER_ID + "cluster_id": None, "iteration": 42, "result": "NOT_STARTED", "started": None, @@ -132,7 +125,7 @@ def convert_null_to_empty_metadata(cls, v): "run_id": 43243, "project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5", "user_id": 132, - "cluster_id": 123, + "cluster_id": None, "iteration": 12, "result": "SUCCESS", "created": "2021-03-01T13:07:34.191610", @@ -155,7 +148,7 @@ def convert_null_to_empty_metadata(cls, v): "run_id": 43243, "project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5", "user_id": 132, - "cluster_id": 123, + "cluster_id": None, "iteration": 12, "result": "SUCCESS", "created": "2021-03-01T13:07:34.191610", diff --git a/services/director-v2/src/simcore_service_director_v2/modules/clusters_keeper.py b/services/director-v2/src/simcore_service_director_v2/modules/clusters_keeper.py index 2e62c414d86..01f5586fc35 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/clusters_keeper.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/clusters_keeper.py @@ -48,7 +48,6 @@ async def get_or_create_on_demand_cluster( owner=user_id, endpoint=returned_cluster.endpoint, authentication=returned_cluster.authentication, - access_rights={}, ) except RemoteMethodNotRegisteredError as exc: # no clusters-keeper, that is not going to work! diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py index 281c9fc4630..57308eb27c9 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py @@ -4,7 +4,6 @@ import networkx as nx from aiopg.sa import Engine from fastapi import FastAPI -from models_library.clusters import ClusterID from models_library.projects import ProjectID from models_library.users import UserID from servicelib.background_task import start_periodic_task, stop_periodic_task @@ -36,13 +35,10 @@ async def run_new_pipeline( *, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, run_metadata: RunMetadataDict, use_on_demand_clusters: bool, ) -> None: - """Sets a new pipeline to be scheduled on the computational resources. - Passing cluster_id=0 will use the default cluster. Passing an existing ID will instruct - the scheduler to run the tasks on the defined cluster""" + """Sets a new pipeline to be scheduled on the computational resources.""" # ensure the pipeline exists and is populated with something db_engine = get_db_engine(app) dag = await _get_pipeline_dag(project_id, db_engine) @@ -56,7 +52,6 @@ async def run_new_pipeline( new_run = await CompRunsRepository.instance(db_engine).create( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, metadata=run_metadata, use_on_demand_clusters=use_on_demand_clusters, ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py index d8fdccc1663..b959c9c8014 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py @@ -783,9 +783,9 @@ async def _schedule_tasks_to_start( # noqa: C901 except Exception: _logger.exception( "Unexpected error for %s with %s on %s happened when scheduling %s:", - f"{user_id=}", - f"{project_id=}", - f"{comp_run.cluster_id=}", + f"{comp_run.user_id=}", + f"{comp_run.project_uuid=}", + f"{comp_run.use_on_demand_clusters=}", f"{tasks_ready_to_start.keys()=}", ) await CompTasksRepository.instance( diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_dask.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_dask.py index adc67853686..153378e9ee5 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_dask.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_dask.py @@ -12,7 +12,7 @@ TaskProgressEvent, ) from dask_task_models_library.container_tasks.io import TaskOutputData -from models_library.clusters import DEFAULT_CLUSTER_ID, BaseCluster, ClusterID +from models_library.clusters import BaseCluster from models_library.errors import ErrorDict from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID @@ -45,7 +45,6 @@ from ..clusters_keeper import get_or_create_on_demand_cluster from ..dask_client import DaskClient, PublishedComputationTask from ..dask_clients_pool import DaskClientsPool -from ..db.repositories.clusters import ClustersRepository from ..db.repositories.comp_runs import CompRunsRepository from ..db.repositories.comp_tasks import CompTasksRepository from ._scheduler_base import BaseCompScheduler @@ -72,7 +71,6 @@ async def _cluster_dask_client( scheduler: "DaskScheduler", *, use_on_demand_clusters: bool, - cluster_id: ClusterID, run_metadata: RunMetadataDict, ) -> AsyncIterator[DaskClient]: cluster: BaseCluster = scheduler.settings.default_cluster @@ -82,9 +80,6 @@ async def _cluster_dask_client( user_id=user_id, wallet_id=run_metadata.get("wallet_id"), ) - if cluster_id != DEFAULT_CLUSTER_ID: - clusters_repo = ClustersRepository.instance(scheduler.db_engine) - cluster = await clusters_repo.get_cluster(user_id, cluster_id) async with scheduler.dask_clients_pool.acquire(cluster) as client: yield client @@ -115,11 +110,6 @@ async def _start_tasks( user_id, self, use_on_demand_clusters=comp_run.use_on_demand_clusters, - cluster_id=( - comp_run.cluster_id - if comp_run.cluster_id is not None - else DEFAULT_CLUSTER_ID - ), run_metadata=comp_run.metadata, ) as client: # Change the tasks state to PENDING @@ -135,11 +125,6 @@ async def _start_tasks( client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=( - comp_run.cluster_id - if comp_run.cluster_id is not None - else DEFAULT_CLUSTER_ID - ), tasks={node_id: task.image}, hardware_info=task.hardware_info, callback=wake_up_callback, @@ -171,11 +156,6 @@ async def _get_tasks_status( user_id, self, use_on_demand_clusters=comp_run.use_on_demand_clusters, - cluster_id=( - comp_run.cluster_id - if comp_run.cluster_id is not None - else DEFAULT_CLUSTER_ID - ), run_metadata=comp_run.metadata, ) as client: tasks_statuses = await client.get_tasks_status( @@ -213,11 +193,6 @@ async def _stop_tasks( user_id, self, use_on_demand_clusters=comp_run.use_on_demand_clusters, - cluster_id=( - comp_run.cluster_id - if comp_run.cluster_id is not None - else DEFAULT_CLUSTER_ID - ), run_metadata=comp_run.metadata, ) as client: await asyncio.gather( @@ -251,11 +226,6 @@ async def _process_completed_tasks( user_id, self, use_on_demand_clusters=comp_run.use_on_demand_clusters, - cluster_id=( - comp_run.cluster_id - if comp_run.cluster_id is not None - else DEFAULT_CLUSTER_ID - ), run_metadata=comp_run.metadata, ) as client: tasks_results = await asyncio.gather( @@ -275,11 +245,6 @@ async def _process_completed_tasks( user_id, self, use_on_demand_clusters=comp_run.use_on_demand_clusters, - cluster_id=( - comp_run.cluster_id - if comp_run.cluster_id is not None - else DEFAULT_CLUSTER_ID - ), run_metadata=comp_run.metadata, ) as client: await asyncio.gather( diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_utils.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_utils.py index 0458b159811..9d2722e3b6c 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_utils.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_utils.py @@ -1,4 +1,4 @@ -from typing import Callable +from collections.abc import Callable from fastapi import FastAPI from models_library.docker import DockerGenericTag @@ -13,10 +13,10 @@ from models_library.users import UserID from servicelib.redis import RedisClientSDK from settings_library.redis import RedisDatabase -from simcore_service_director_v2.modules.redis import get_redis_client_manager from ...models.comp_runs import Iteration from ...models.comp_tasks import CompTaskAtDB +from ..redis import get_redis_client_manager SCHEDULED_STATES: set[RunningState] = { RunningState.PUBLISHED, diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dask_client.py b/services/director-v2/src/simcore_service_director_v2/modules/dask_client.py index e28e48f82f7..96505371754 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dask_client.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dask_client.py @@ -1,5 +1,5 @@ """The dask client is the osparc part that communicates with a -dask-scheduler/worker backend directly or through a dask-gateway. +dask-scheduler/worker backend. From dask documentation any Data or function must follow the criteria to be usable in dask [http://distributed.dask.org/en/stable/limitations.html?highlight=cloudpickle#assumptions-on-functions-and-data]: @@ -43,7 +43,7 @@ from distributed.scheduler import TaskStateState as DaskSchedulerTaskState from fastapi import FastAPI from models_library.api_schemas_directorv2.clusters import ClusterDetails, Scheduler -from models_library.clusters import ClusterAuthentication, ClusterID, ClusterTypeInModel +from models_library.clusters import ClusterAuthentication, ClusterTypeInModel from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID from models_library.resource_tracker import HardwareInfo @@ -74,7 +74,7 @@ from ..utils.dask_client_utils import ( DaskSubSystem, TaskHandlers, - create_internal_client_based_on_auth, + connect_to_dask_scheduler, ) _logger = logging.getLogger(__name__) @@ -133,7 +133,7 @@ async def create( ) -> "DaskClient": _logger.info( "Initiating connection to %s with auth: %s, type: %s", - f"dask-scheduler/gateway at {endpoint}", + f"dask-scheduler at {endpoint}", authentication, cluster_type, ) @@ -149,9 +149,7 @@ async def create( endpoint, attempt.retry_state.attempt_number, ) - backend = await create_internal_client_based_on_auth( - endpoint, authentication - ) + backend = await connect_to_dask_scheduler(endpoint, authentication) dask_utils.check_scheduler_status(backend.client) instance = cls( app=app, @@ -162,7 +160,7 @@ async def create( ) _logger.info( "Connection to %s succeeded [%s]", - f"dask-scheduler/gateway at {endpoint}", + f"dask-scheduler at {endpoint}", json.dumps(attempt.retry_state.retry_object.statistics), ) _logger.info( @@ -287,7 +285,6 @@ async def send_computation_tasks( *, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, tasks: dict[NodeID, Image], callback: _UserCallbackInSepThread, remote_fct: ContainerRemoteFct | None = None, @@ -331,22 +328,18 @@ async def send_computation_tasks( ) dask_utils.check_communication_with_scheduler_is_open(self.backend.client) dask_utils.check_scheduler_status(self.backend.client) - await dask_utils.check_maximize_workers(self.backend.gateway_cluster) - # NOTE: in case it's a gateway or it is an on-demand cluster + # NOTE: in case it is an on-demand cluster # we do not check a priori if the task # is runnable because we CAN'T. A cluster might auto-scale, the worker(s) - # might also auto-scale and the gateway does not know that a priori. + # might also auto-scale we do not know that a priori. # So, we'll just send the tasks over and see what happens after a while. - if (self.cluster_type != ClusterTypeInModel.ON_DEMAND) and ( - self.backend.gateway is None - ): + if self.cluster_type != ClusterTypeInModel.ON_DEMAND: dask_utils.check_if_cluster_is_able_to_run_pipeline( project_id=project_id, node_id=node_id, scheduler_info=self.backend.client.scheduler_info(), task_resources=dask_resources, node_image=node_image, - cluster_id=cluster_id, ) s3_settings = None diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/clusters.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/clusters.py deleted file mode 100644 index 30381110173..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/clusters.py +++ /dev/null @@ -1,286 +0,0 @@ -import logging -from collections.abc import Iterable - -import psycopg2 -import sqlalchemy as sa -from aiopg.sa import connection -from models_library.api_schemas_directorv2.clusters import ClusterCreate, ClusterPatch -from models_library.clusters import ( - CLUSTER_ADMIN_RIGHTS, - CLUSTER_MANAGER_RIGHTS, - CLUSTER_NO_RIGHTS, - CLUSTER_USER_RIGHTS, - Cluster, - ClusterAccessRights, - ClusterID, -) -from models_library.users import UserID -from pydantic.types import PositiveInt -from simcore_postgres_database.models.cluster_to_groups import cluster_to_groups -from simcore_postgres_database.models.clusters import clusters -from simcore_postgres_database.models.groups import GroupType, groups, user_to_groups -from simcore_postgres_database.models.users import users -from sqlalchemy.dialects.postgresql import insert as pg_insert - -from ....core.errors import ( - ClusterAccessForbiddenError, - ClusterInvalidOperationError, - ClusterNotFoundError, -) -from ....utils.db import to_clusters_db -from ._base import BaseRepository - -logger = logging.getLogger(__name__) - - -async def _clusters_from_cluster_ids( - conn: connection.SAConnection, - cluster_ids: Iterable[PositiveInt], - offset: int = 0, - limit: int | None = None, -) -> list[Cluster]: - cluster_id_to_cluster: dict[PositiveInt, Cluster] = {} - async for row in conn.execute( - sa.select( - clusters, - cluster_to_groups.c.gid, - cluster_to_groups.c.read, - cluster_to_groups.c.write, - cluster_to_groups.c.delete, - ) - .select_from( - clusters.join( - cluster_to_groups, - clusters.c.id == cluster_to_groups.c.cluster_id, - ) - ) - .where(clusters.c.id.in_(cluster_ids)) - .offset(offset) - .limit(limit) - ): - cluster_access_rights = { - row[cluster_to_groups.c.gid]: ClusterAccessRights( - **{ - "read": row[cluster_to_groups.c.read], - "write": row[cluster_to_groups.c.write], - "delete": row[cluster_to_groups.c.delete], - } - ) - } - - cluster_id = row[clusters.c.id] - if cluster_id not in cluster_id_to_cluster: - cluster_id_to_cluster[cluster_id] = Cluster( - id=cluster_id, - name=row[clusters.c.name], - description=row[clusters.c.description], - type=row[clusters.c.type], - owner=row[clusters.c.owner], - endpoint=row[clusters.c.endpoint], - authentication=row[clusters.c.authentication], - thumbnail=row[clusters.c.thumbnail], - access_rights=cluster_access_rights, - ) - else: - cluster_id_to_cluster[cluster_id].access_rights.update( - cluster_access_rights - ) - - return list(cluster_id_to_cluster.values()) - - -async def _compute_user_access_rights( - conn: connection.SAConnection, user_id: UserID, cluster: Cluster -) -> ClusterAccessRights: - result = await conn.execute( - sa.select(user_to_groups.c.gid, groups.c.type) - .where(user_to_groups.c.uid == user_id) - .order_by(groups.c.type) - .join(groups) - ) - user_groups = await result.fetchall() - assert user_groups # nosec - # get the primary group first, as it has precedence - if ( - primary_group_row := next( - filter(lambda ugrp: ugrp[1] == GroupType.PRIMARY, user_groups), None - ) - ) and (primary_grp_rights := cluster.access_rights.get(primary_group_row.gid)): - return primary_grp_rights - - solved_rights = CLUSTER_NO_RIGHTS.model_dump() - for group_row in filter(lambda ugrp: ugrp[1] != GroupType.PRIMARY, user_groups): - grp_access = cluster.access_rights.get(group_row.gid, CLUSTER_NO_RIGHTS).model_dump() - for operation in ["read", "write", "delete"]: - solved_rights[operation] |= grp_access[operation] - return ClusterAccessRights(**solved_rights) - - -class ClustersRepository(BaseRepository): - async def create_cluster(self, user_id, new_cluster: ClusterCreate) -> Cluster: - async with self.db_engine.acquire() as conn: - user_primary_gid = await conn.scalar( - sa.select(users.c.primary_gid).where(users.c.id == user_id) - ) - new_cluster.owner = user_primary_gid - new_cluster_id = await conn.scalar( - sa.insert( - clusters, values=to_clusters_db(new_cluster, only_update=False) - ).returning(clusters.c.id) - ) - assert new_cluster_id # nosec - return await self.get_cluster(user_id, new_cluster_id) - - async def list_clusters(self, user_id: UserID) -> list[Cluster]: - async with self.db_engine.acquire() as conn: - result = await conn.execute( - sa.select(clusters.c.id) - .distinct() - .where( - cluster_to_groups.c.gid.in_( - # get the groups of the user where he/she has read access - sa.select(groups.c.gid) - .where(user_to_groups.c.uid == user_id) - .order_by(groups.c.gid) - .select_from(groups.join(user_to_groups)) - ) - & cluster_to_groups.c.read - ) - .join(cluster_to_groups) - ) - retrieved_clusters = [] - if cluster_ids := await result.fetchall(): - retrieved_clusters = await _clusters_from_cluster_ids( - conn, {c.id for c in cluster_ids} - ) - return retrieved_clusters - - async def get_cluster(self, user_id: UserID, cluster_id: ClusterID) -> Cluster: - async with self.db_engine.acquire() as conn: - clusters_list = await _clusters_from_cluster_ids(conn, {cluster_id}) - if not clusters_list: - raise ClusterNotFoundError(cluster_id=cluster_id) - the_cluster = clusters_list[0] - - access_rights = await _compute_user_access_rights( - conn, user_id, the_cluster - ) - logger.debug( - "found cluster in DB: %s, with computed %s", - f"{the_cluster=}", - f"{access_rights=}", - ) - if not access_rights.read: - raise ClusterAccessForbiddenError(cluster_id=cluster_id) - - return the_cluster - - async def update_cluster( # pylint: disable=too-many-branches - self, user_id: UserID, cluster_id: ClusterID, updated_cluster: ClusterPatch - ) -> Cluster: - async with self.db_engine.acquire() as conn: - clusters_list: list[Cluster] = await _clusters_from_cluster_ids( - conn, {cluster_id} - ) - if len(clusters_list) != 1: - raise ClusterNotFoundError(cluster_id=cluster_id) - the_cluster = clusters_list[0] - - this_user_access_rights = await _compute_user_access_rights( - conn, user_id, the_cluster - ) - logger.debug( - "found cluster in DB: %s, with computed %s", - f"{the_cluster=}", - f"{this_user_access_rights=}", - ) - - if not this_user_access_rights.write: - raise ClusterAccessForbiddenError(cluster_id=cluster_id) - - if updated_cluster.owner and updated_cluster.owner != the_cluster.owner: - # if the user wants to change the owner, we need more rights here - if this_user_access_rights != CLUSTER_ADMIN_RIGHTS: - raise ClusterAccessForbiddenError(cluster_id=cluster_id) - - # ensure the new owner has admin rights, too - if not updated_cluster.access_rights: - updated_cluster.access_rights = { - updated_cluster.owner: CLUSTER_ADMIN_RIGHTS - } - else: - updated_cluster.access_rights[ - updated_cluster.owner - ] = CLUSTER_ADMIN_RIGHTS - - # resolve access rights changes - resolved_access_rights = the_cluster.access_rights - if updated_cluster.access_rights: - # if the user is a manager he/she may ONLY add/remove users - if this_user_access_rights == CLUSTER_MANAGER_RIGHTS: - for grp, rights in updated_cluster.access_rights.items(): - if grp == the_cluster.owner or rights not in [ - CLUSTER_USER_RIGHTS, - CLUSTER_NO_RIGHTS, - ]: - # a manager cannot change the owner abilities or create - # managers/admins - raise ClusterAccessForbiddenError(cluster_id=cluster_id) - - resolved_access_rights.update(updated_cluster.access_rights) - # ensure the user is not trying to mess around owner admin rights - if ( - resolved_access_rights.setdefault( - the_cluster.owner, CLUSTER_ADMIN_RIGHTS - ) - != CLUSTER_ADMIN_RIGHTS - ): - raise ClusterAccessForbiddenError(cluster_id=cluster_id) - - # ok we can update now - try: - await conn.execute( - sa.update(clusters) - .where(clusters.c.id == the_cluster.id) - .values(to_clusters_db(updated_cluster, only_update=True)) - ) - except psycopg2.DatabaseError as e: - raise ClusterInvalidOperationError(cluster_id=cluster_id) from e - # upsert the rights - if updated_cluster.access_rights: - for grp, rights in resolved_access_rights.items(): - insert_stmt = pg_insert(cluster_to_groups).values( - **rights.model_dump(by_alias=True), gid=grp, cluster_id=the_cluster.id - ) - on_update_stmt = insert_stmt.on_conflict_do_update( - index_elements=[ - cluster_to_groups.c.cluster_id, - cluster_to_groups.c.gid, - ], - set_=rights.model_dump(by_alias=True), - ) - await conn.execute(on_update_stmt) - - clusters_list = await _clusters_from_cluster_ids(conn, {cluster_id}) - if not clusters_list: - raise ClusterNotFoundError(cluster_id=cluster_id) - return clusters_list[0] - - async def delete_cluster(self, user_id: UserID, cluster_id: ClusterID) -> None: - async with self.db_engine.acquire() as conn: - clusters_list = await _clusters_from_cluster_ids(conn, {cluster_id}) - if not clusters_list: - raise ClusterNotFoundError(cluster_id=cluster_id) - the_cluster = clusters_list[0] - - access_rights = await _compute_user_access_rights( - conn, user_id, the_cluster - ) - logger.debug( - "found cluster in DB: %s, with computed %s", - f"{the_cluster=}", - f"{access_rights=}", - ) - if not access_rights.delete: - raise ClusterAccessForbiddenError(cluster_id=cluster_id) - await conn.execute(sa.delete(clusters).where(clusters.c.id == cluster_id)) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py index 13e01a4276f..46cc7669cde 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py @@ -5,7 +5,6 @@ import arrow import sqlalchemy as sa from aiopg.sa.result import RowProxy -from models_library.clusters import DEFAULT_CLUSTER_ID, ClusterID from models_library.projects import ProjectID from models_library.projects_state import RunningState from models_library.users import UserID @@ -43,10 +42,6 @@ ("clusters", "cluster_id"), ), } -_DEFAULT_FK_CONSTRAINT_TO_ERROR: Final[tuple[type[DirectorError], tuple]] = ( - DirectorError, - (), -) class CompRunsRepository(BaseRepository): @@ -154,7 +149,6 @@ async def create( *, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, iteration: PositiveInt | None = None, metadata: RunMetadataDict, use_on_demand_clusters: bool, @@ -178,9 +172,7 @@ async def create( .values( user_id=user_id, project_uuid=f"{project_id}", - cluster_id=( - cluster_id if cluster_id != DEFAULT_CLUSTER_ID else None - ), + cluster_id=None, iteration=iteration, result=RUNNING_STATE_TO_DB[RunningState.PUBLISHED], started=datetime.datetime.now(tz=datetime.UTC), diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py index c5fd0819fcd..2619d9ce98f 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py @@ -6,6 +6,7 @@ import aiopg.sa import arrow from dask_task_models_library.container_tasks.protocol import ContainerEnvsDict +from models_library.api_schemas_catalog.services import ServiceGet from models_library.api_schemas_clusters_keeper.ec2_instances import EC2InstanceTypeGet from models_library.api_schemas_directorv2.services import ( NodeRequirements, @@ -89,7 +90,7 @@ async def _get_service_details( node.version, product_name, ) - obj: ServiceMetaDataPublished = ServiceMetaDataPublished(**service_details) + obj: ServiceMetaDataPublished = ServiceGet(**service_details) return obj @@ -144,12 +145,12 @@ async def _get_node_infos( None, ) - result: tuple[ - ServiceMetaDataPublished, ServiceExtras, SimcoreServiceLabels - ] = await asyncio.gather( - _get_service_details(catalog_client, user_id, product_name, node), - director_client.get_service_extras(node.key, node.version), - director_client.get_service_labels(node), + result: tuple[ServiceMetaDataPublished, ServiceExtras, SimcoreServiceLabels] = ( + await asyncio.gather( + _get_service_details(catalog_client, user_id, product_name, node), + director_client.get_service_extras(node.key, node.version), + director_client.get_service_labels(node), + ) ) return result @@ -245,9 +246,9 @@ async def _get_pricing_and_hardware_infos( return pricing_info, hardware_info -_RAM_SAFE_MARGIN_RATIO: Final[ - float -] = 0.1 # NOTE: machines always have less available RAM than advertised +_RAM_SAFE_MARGIN_RATIO: Final[float] = ( + 0.1 # NOTE: machines always have less available RAM than advertised +) _CPUS_SAFE_MARGIN: Final[float] = 0.1 @@ -265,11 +266,11 @@ async def _update_project_node_resources_from_hardware_info( if not hardware_info.aws_ec2_instances: return try: - unordered_list_ec2_instance_types: list[ - EC2InstanceTypeGet - ] = await get_instance_type_details( - rabbitmq_rpc_client, - instance_type_names=set(hardware_info.aws_ec2_instances), + unordered_list_ec2_instance_types: list[EC2InstanceTypeGet] = ( + await get_instance_type_details( + rabbitmq_rpc_client, + instance_type_names=set(hardware_info.aws_ec2_instances), + ) ) assert unordered_list_ec2_instance_types # nosec diff --git a/services/director-v2/src/simcore_service_director_v2/utils/dask.py b/services/director-v2/src/simcore_service_director_v2/utils/dask.py index afb1e0b3770..13967b0c5da 100644 --- a/services/director-v2/src/simcore_service_director_v2/utils/dask.py +++ b/services/director-v2/src/simcore_service_director_v2/utils/dask.py @@ -5,7 +5,6 @@ from typing import Any, Final, NoReturn, ParamSpec, TypeVar, cast from uuid import uuid4 -import dask_gateway # type: ignore[import-untyped] import distributed from aiopg.sa.engine import Engine from common_library.json_serialization import json_dumps @@ -22,7 +21,6 @@ ) from fastapi import FastAPI from models_library.api_schemas_directorv2.services import NodeRequirements -from models_library.clusters import ClusterID from models_library.docker import DockerLabelKey, StandardSimcoreDockerLabels from models_library.errors import ErrorDict from models_library.projects import ProjectID, ProjectIDStr @@ -515,14 +513,6 @@ def check_scheduler_status(client: distributed.Client): raise ComputationalBackendNotConnectedError -_LARGE_NUMBER_OF_WORKERS: Final[int] = 10000 - - -async def check_maximize_workers(cluster: dask_gateway.GatewayCluster | None) -> None: - if cluster: - await cluster.scale(_LARGE_NUMBER_OF_WORKERS) - - def _can_task_run_on_worker( task_resources: dict[str, Any], worker_resources: dict[str, Any] ) -> bool: @@ -573,7 +563,6 @@ def check_if_cluster_is_able_to_run_pipeline( scheduler_info: dict[str, Any], task_resources: dict[str, Any], node_image: Image, - cluster_id: ClusterID, ) -> None: _logger.debug( @@ -592,8 +581,7 @@ def check_if_cluster_is_able_to_run_pipeline( all_available_resources_in_cluster = dict(cluster_resources_counter) _logger.debug( - "Dask scheduler total available resources in cluster %s: %s, task needed resources %s", - cluster_id, + "Dask scheduler total available resources in cluster: %s, task needed resources %s", json_dumps(all_available_resources_in_cluster, indent=2), json_dumps(task_resources, indent=2), ) @@ -616,7 +604,6 @@ def check_if_cluster_is_able_to_run_pipeline( node_id=node_id, service_name=node_image.name, service_version=node_image.tag, - cluster_id=cluster_id, task_resources=task_resources, cluster_resources=cluster_resources, ) @@ -628,7 +615,6 @@ def check_if_cluster_is_able_to_run_pipeline( service_name=node_image.name, service_version=node_image.tag, service_requested_resources=_to_human_readable_resource_values(task_resources), - cluster_id=cluster_id, cluster_available_resources=[ _to_human_readable_resource_values(worker.get("resources", None)) for worker in workers.values() diff --git a/services/director-v2/src/simcore_service_director_v2/utils/dask_client_utils.py b/services/director-v2/src/simcore_service_director_v2/utils/dask_client_utils.py index 964f38e6484..0ec66eeabdd 100644 --- a/services/director-v2/src/simcore_service_director_v2/utils/dask_client_utils.py +++ b/services/director-v2/src/simcore_service_director_v2/utils/dask_client_utils.py @@ -2,41 +2,18 @@ import os import socket from collections.abc import Awaitable, Callable -from contextlib import suppress from dataclasses import dataclass, field -from typing import Final, Union -import dask_gateway # type: ignore[import-untyped] import distributed -import httpx -from aiohttp import ClientConnectionError, ClientResponseError from dask_task_models_library.container_tasks.events import ( TaskLogEvent, TaskProgressEvent, ) -from models_library.clusters import ( - ClusterAuthentication, - InternalClusterAuthentication, - JupyterHubTokenAuthentication, - KerberosAuthentication, - NoAuthentication, - SimpleAuthentication, - TLSAuthentication, -) +from models_library.clusters import ClusterAuthentication, TLSAuthentication from pydantic import AnyUrl -from ..core.errors import ( - ComputationalSchedulerError, - ConfigurationError, - DaskClientRequestError, - DaskClusterError, - DaskGatewayServerError, -) -from .dask import check_maximize_workers, wrap_client_async_routine - -DaskGatewayAuths = Union[ - dask_gateway.BasicAuth, dask_gateway.KerberosAuth, dask_gateway.JupyterHubAuth -] +from ..core.errors import ConfigurationError +from .dask import wrap_client_async_routine @dataclass @@ -52,8 +29,6 @@ class TaskHandlers: class DaskSubSystem: client: distributed.Client scheduler_id: str - gateway: dask_gateway.Gateway | None - gateway_cluster: dask_gateway.GatewayCluster | None progress_sub: distributed.Sub = field(init=False) logs_sub: distributed.Sub = field(init=False) @@ -69,14 +44,10 @@ async def close(self) -> None: # closing the client appears to fix the issue and the dask-scheduler remains happy if self.client: await wrap_client_async_routine(self.client.close()) - if self.gateway_cluster: - await wrap_client_async_routine(self.gateway_cluster.close()) - if self.gateway: - await wrap_client_async_routine(self.gateway.close()) -async def _connect_to_dask_scheduler( - endpoint: AnyUrl, authentication: InternalClusterAuthentication +async def connect_to_dask_scheduler( + endpoint: AnyUrl, authentication: ClusterAuthentication ) -> DaskSubSystem: try: security = distributed.Security() @@ -93,162 +64,7 @@ async def _connect_to_dask_scheduler( name=f"director-v2_{socket.gethostname()}_{os.getpid()}", security=security, ) - return DaskSubSystem( - client=client, - scheduler_id=client.scheduler_info()["id"], - gateway=None, - gateway_cluster=None, - ) + return DaskSubSystem(client=client, scheduler_id=client.scheduler_info()["id"]) except TypeError as exc: msg = f"Scheduler has invalid configuration: {endpoint=}" raise ConfigurationError(msg=msg) from exc - - -async def _connect_with_gateway_and_create_cluster( - endpoint: AnyUrl, auth_params: ClusterAuthentication -) -> DaskSubSystem: - try: - logger.debug( - "connecting with gateway at %s with %s", f"{endpoint!r}", f"{auth_params=}" - ) - gateway_auth = await get_gateway_auth_from_params(auth_params) - gateway = dask_gateway.Gateway( - address=f"{endpoint}", auth=gateway_auth, asynchronous=True - ) - - try: - # if there is already a cluster that means we can re-connect to it, - # and IT SHALL BE the first in the list - cluster_reports_list = await gateway.list_clusters() - logger.debug( - "current clusters on the gateway: %s", f"{cluster_reports_list=}" - ) - cluster = None - if cluster_reports_list: - assert ( - len(cluster_reports_list) == 1 - ), "More than 1 cluster at this location, that is unexpected!!" # nosec - cluster = await gateway.connect( - cluster_reports_list[0].name, shutdown_on_close=False - ) - logger.debug("connected to %s", f"{cluster=}") - else: - cluster = await gateway.new_cluster(shutdown_on_close=False) - logger.debug("created %s", f"{cluster=}") - assert cluster # nosec - logger.info("Cluster dashboard available: %s", cluster.dashboard_link) - await check_maximize_workers(cluster) - logger.info("Cluster workers maximized") - client = await cluster.get_client() - assert client # nosec - return DaskSubSystem( - client=client, - scheduler_id=client.scheduler_info()["id"], - gateway=gateway, - gateway_cluster=cluster, - ) - except Exception: - # cleanup - with suppress(Exception): - await wrap_client_async_routine(gateway.close()) - raise - - except TypeError as exc: - msg = f"Cluster has invalid configuration: {endpoint=}, {auth_params=}" - raise ConfigurationError(msg=msg) from exc - except ValueError as exc: - # this is when a 404=NotFound,422=MalformedData comes up - raise DaskClientRequestError(endpoint=endpoint, error=exc) from exc - except dask_gateway.GatewayClusterError as exc: - # this is when a 409=Conflict/Cannot complete request comes up - raise DaskClusterError(endpoint=endpoint, error=exc) from exc - except dask_gateway.GatewayServerError as exc: - # this is when a 500 comes up - raise DaskGatewayServerError(endpoint=endpoint, error=exc) from exc - - -def _is_dask_scheduler(authentication: ClusterAuthentication) -> bool: - return isinstance(authentication, NoAuthentication | TLSAuthentication) - - -async def create_internal_client_based_on_auth( - endpoint: AnyUrl, authentication: ClusterAuthentication -) -> DaskSubSystem: - if _is_dask_scheduler(authentication): - # if no auth then we go for a standard scheduler connection - return await _connect_to_dask_scheduler(endpoint, authentication) # type: ignore[arg-type] # _is_dask_scheduler checks already that it is a valid type - # we do have some auth, so it is going through a gateway - return await _connect_with_gateway_and_create_cluster(endpoint, authentication) - - -async def get_gateway_auth_from_params( - auth_params: ClusterAuthentication, -) -> DaskGatewayAuths: - try: - if isinstance(auth_params, SimpleAuthentication): - return dask_gateway.BasicAuth( - username=auth_params.username, - password=auth_params.password.get_secret_value(), - ) - if isinstance(auth_params, KerberosAuthentication): - return dask_gateway.KerberosAuth() - if isinstance(auth_params, JupyterHubTokenAuthentication): - return dask_gateway.JupyterHubAuth(auth_params.api_token) - except (TypeError, ValueError) as exc: - msg = f"Cluster has invalid configuration: {auth_params}" - raise ConfigurationError(msg=msg) from exc - - msg = f"Cluster has invalid configuration: {auth_params=}" - raise ConfigurationError(msg=msg) - - -_PING_TIMEOUT_S: Final[int] = 5 -_DASK_SCHEDULER_RUNNING_STATE: Final[str] = "running" - - -async def test_scheduler_endpoint( - endpoint: AnyUrl, authentication: ClusterAuthentication -) -> None: - """This method will try to connect to a gateway endpoint and raise a ConfigurationError in case of problem - - :raises ConfigurationError: contians some information as to why the connection failed - """ - try: - if _is_dask_scheduler(authentication): - async with distributed.Client( - address=f"{endpoint}", timeout=f"{_PING_TIMEOUT_S}", asynchronous=True - ) as dask_client: - if dask_client.status != _DASK_SCHEDULER_RUNNING_STATE: - msg = "internal scheduler is not running!" - raise ComputationalSchedulerError(msg=msg) - - else: - gateway_auth = await get_gateway_auth_from_params(authentication) - async with dask_gateway.Gateway( - address=f"{endpoint}", auth=gateway_auth, asynchronous=True - ) as gateway: - # this does not yet create any connection to the underlying gateway. - # since using a fct from dask gateway is going to timeout after a long time - # we bypass the pinging by calling in ourselves with a short timeout - async with httpx.AsyncClient( - transport=httpx.AsyncHTTPTransport(retries=2) - ) as httpx_client: - # try to get something the api shall return fast - response = await httpx_client.get( - f"{endpoint}/api/version", timeout=_PING_TIMEOUT_S - ) - response.raise_for_status() - # now we try to list the clusters to check the gateway responds in a sensible way - await gateway.list_clusters() - - logger.debug("Pinging %s, succeeded", f"{endpoint=}") - except ( - dask_gateway.GatewayServerError, - ClientConnectionError, - ClientResponseError, - httpx.HTTPError, - ComputationalSchedulerError, - ) as exc: - logger.debug("Pinging %s, failed: %s", f"{endpoint=}", f"{exc=!r}") - msg = f"Could not connect to cluster in {endpoint}: error: {exc}" - raise ConfigurationError(msg=msg) from exc diff --git a/services/director-v2/src/simcore_service_director_v2/utils/db.py b/services/director-v2/src/simcore_service_director_v2/utils/db.py index af944c11dff..43e3a371089 100644 --- a/services/director-v2/src/simcore_service_director_v2/utils/db.py +++ b/services/director-v2/src/simcore_service_director_v2/utils/db.py @@ -1,9 +1,6 @@ import logging -from typing import Any -from common_library.serialization import model_dump_with_secrets from fastapi import FastAPI -from models_library.clusters import BaseCluster from models_library.projects_state import RunningState from simcore_postgres_database.models.comp_pipeline import StateType @@ -28,17 +25,5 @@ _logger = logging.getLogger(__name__) -def to_clusters_db(cluster: BaseCluster, *, only_update: bool) -> dict[str, Any]: - db_model: dict[str, Any] = model_dump_with_secrets( - cluster, - show_secrets=True, - by_alias=True, - exclude={"id", "access_rights"}, - exclude_unset=only_update, - exclude_none=only_update, - ) - return db_model - - def get_repository(app: FastAPI, repo_type: type[RepoType]) -> RepoType: return get_base_repository(engine=app.state.engine, repo_type=repo_type) diff --git a/services/director-v2/tests/conftest.py b/services/director-v2/tests/conftest.py index 72b94ec3262..231debc371f 100644 --- a/services/director-v2/tests/conftest.py +++ b/services/director-v2/tests/conftest.py @@ -35,7 +35,6 @@ from starlette.testclient import ASGI3App, TestClient pytest_plugins = [ - "pytest_simcore.dask_gateway", "pytest_simcore.dask_scheduler", "pytest_simcore.db_entries_mocks", "pytest_simcore.docker_compose", diff --git a/services/director-v2/tests/helpers/shared_comp_utils.py b/services/director-v2/tests/helpers/shared_comp_utils.py index 8ee507f4a2b..2aed8e4525b 100644 --- a/services/director-v2/tests/helpers/shared_comp_utils.py +++ b/services/director-v2/tests/helpers/shared_comp_utils.py @@ -4,7 +4,6 @@ import httpx from models_library.api_schemas_directorv2.comp_tasks import ComputationGet -from models_library.clusters import ClusterID from models_library.projects import ProjectAtDB from models_library.projects_pipeline import PipelineDetails from models_library.projects_state import RunningState @@ -26,8 +25,7 @@ async def assert_computation_task_out_obj( exp_task_state: RunningState, exp_pipeline_details: PipelineDetails, iteration: PositiveInt | None, - cluster_id: ClusterID | None, -): +) -> None: assert task_out.id == project.uuid assert task_out.state == exp_task_state assert task_out.url.path == f"/v2/computations/{project.uuid}" @@ -41,7 +39,6 @@ async def assert_computation_task_out_obj( else: assert task_out.stop_url is None assert task_out.iteration == iteration - assert task_out.cluster_id == cluster_id # check pipeline details contents received_task_out_pipeline = task_out.pipeline_details.model_dump() expected_task_out_pipeline = exp_pipeline_details.model_dump() diff --git a/services/director-v2/tests/integration/01/test_computation_api.py b/services/director-v2/tests/integration/01/test_computation_api.py index 053431fc34d..431939c31dd 100644 --- a/services/director-v2/tests/integration/01/test_computation_api.py +++ b/services/director-v2/tests/integration/01/test_computation_api.py @@ -21,7 +21,7 @@ assert_computation_task_out_obj, ) from models_library.api_schemas_directorv2.comp_tasks import ComputationGet -from models_library.clusters import DEFAULT_CLUSTER_ID, InternalClusterAuthentication +from models_library.clusters import ClusterAuthentication from models_library.projects import ProjectAtDB from models_library.projects_nodes import NodeState from models_library.projects_nodes_io import NodeID @@ -58,7 +58,7 @@ def mock_env( monkeypatch: pytest.MonkeyPatch, dynamic_sidecar_docker_image_name: str, dask_scheduler_service: str, - dask_scheduler_auth: InternalClusterAuthentication, + dask_scheduler_auth: ClusterAuthentication, ) -> None: # used by the client fixture setenvs_from_dict( @@ -463,7 +463,6 @@ def _convert_to_pipeline_details( exp_task_state=RunningState.PUBLISHED, exp_pipeline_details=expected_pipeline_details, iteration=1, - cluster_id=DEFAULT_CLUSTER_ID, ) # now wait for the computation to finish @@ -479,7 +478,6 @@ def _convert_to_pipeline_details( exp_task_state=RunningState.SUCCESS, exp_pipeline_details=expected_pipeline_details_after_run, iteration=1, - cluster_id=DEFAULT_CLUSTER_ID, ) # run it a second time. the tasks are all up-to-date, nothing should be run @@ -531,7 +529,6 @@ def _convert_to_pipeline_details( exp_task_state=RunningState.PUBLISHED, exp_pipeline_details=expected_pipeline_details_forced, iteration=2, - cluster_id=DEFAULT_CLUSTER_ID, ) # now wait for the computation to finish @@ -572,7 +569,6 @@ async def test_run_computation( exp_task_state=RunningState.PUBLISHED, exp_pipeline_details=fake_workbench_computational_pipeline_details, iteration=1, - cluster_id=DEFAULT_CLUSTER_ID, ) # wait for the computation to start @@ -595,7 +591,6 @@ async def test_run_computation( exp_task_state=RunningState.SUCCESS, exp_pipeline_details=fake_workbench_computational_pipeline_details_completed, iteration=1, - cluster_id=DEFAULT_CLUSTER_ID, ) # NOTE: currently the webserver is the one updating the projects table so we need to fake this by copying the run_hash @@ -642,7 +637,6 @@ async def test_run_computation( exp_task_state=RunningState.PUBLISHED, exp_pipeline_details=expected_pipeline_details_forced, # NOTE: here the pipeline already ran so its states are different iteration=2, - cluster_id=DEFAULT_CLUSTER_ID, ) # wait for the computation to finish @@ -655,7 +649,6 @@ async def test_run_computation( exp_task_state=RunningState.SUCCESS, exp_pipeline_details=fake_workbench_computational_pipeline_details_completed, iteration=2, - cluster_id=DEFAULT_CLUSTER_ID, ) @@ -692,7 +685,6 @@ async def test_abort_computation( exp_task_state=RunningState.PUBLISHED, exp_pipeline_details=fake_workbench_computational_pipeline_details, iteration=1, - cluster_id=DEFAULT_CLUSTER_ID, ) # wait until the pipeline is started @@ -765,7 +757,6 @@ async def test_update_and_delete_computation( exp_task_state=RunningState.NOT_STARTED, exp_pipeline_details=fake_workbench_computational_pipeline_details_not_started, iteration=None, - cluster_id=None, ) # update the pipeline @@ -784,7 +775,6 @@ async def test_update_and_delete_computation( exp_task_state=RunningState.NOT_STARTED, exp_pipeline_details=fake_workbench_computational_pipeline_details_not_started, iteration=None, - cluster_id=None, ) # update the pipeline @@ -803,7 +793,6 @@ async def test_update_and_delete_computation( exp_task_state=RunningState.NOT_STARTED, exp_pipeline_details=fake_workbench_computational_pipeline_details_not_started, iteration=None, - cluster_id=None, ) # start it now @@ -821,7 +810,6 @@ async def test_update_and_delete_computation( exp_task_state=RunningState.PUBLISHED, exp_pipeline_details=fake_workbench_computational_pipeline_details, iteration=1, - cluster_id=DEFAULT_CLUSTER_ID, ) # wait until the pipeline is started diff --git a/services/director-v2/tests/integration/02/test_dynamic_sidecar_nodeports_integration.py b/services/director-v2/tests/integration/02/test_dynamic_sidecar_nodeports_integration.py index e43f23bc9dd..b1c99b772b9 100644 --- a/services/director-v2/tests/integration/02/test_dynamic_sidecar_nodeports_integration.py +++ b/services/director-v2/tests/integration/02/test_dynamic_sidecar_nodeports_integration.py @@ -29,7 +29,7 @@ assert_computation_task_out_obj, ) from models_library.api_schemas_directorv2.comp_tasks import ComputationGet -from models_library.clusters import DEFAULT_CLUSTER_ID, InternalClusterAuthentication +from models_library.clusters import ClusterAuthentication from models_library.projects import ( Node, NodesDict, @@ -360,7 +360,7 @@ def mock_env( network_name: str, dev_feature_r_clone_enabled: str, dask_scheduler_service: str, - dask_scheduler_auth: InternalClusterAuthentication, + dask_scheduler_auth: ClusterAuthentication, minimal_configuration: None, patch_storage_setup: None, ) -> None: @@ -983,7 +983,6 @@ async def test_nodeports_integration( exp_task_state=RunningState.SUCCESS, exp_pipeline_details=PipelineDetails.model_validate(fake_dy_success), iteration=1, - cluster_id=DEFAULT_CLUSTER_ID, ) update_project_workbench_with_comp_tasks(str(current_study.uuid)) diff --git a/services/director-v2/tests/unit/_dask_helpers.py b/services/director-v2/tests/unit/_dask_helpers.py deleted file mode 100644 index 9bf9a739946..00000000000 --- a/services/director-v2/tests/unit/_dask_helpers.py +++ /dev/null @@ -1,13 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument - -from typing import NamedTuple - -from dask_gateway_server.app import DaskGateway - - -class DaskGatewayServer(NamedTuple): - address: str - proxy_address: str - password: str - server: DaskGateway diff --git a/services/director-v2/tests/unit/conftest.py b/services/director-v2/tests/unit/conftest.py index cdf0751fab4..b305f6bcafd 100644 --- a/services/director-v2/tests/unit/conftest.py +++ b/services/director-v2/tests/unit/conftest.py @@ -3,7 +3,6 @@ import json import logging -import random import urllib.parse from collections.abc import AsyncIterable, Iterable, Iterator, Mapping from typing import Any @@ -20,7 +19,6 @@ ) from models_library.basic_types import PortInt from models_library.callbacks_mapping import CallbacksMapping -from models_library.clusters import ClusterID from models_library.generated_models.docker_rest_api import ( ServiceSpec as DockerServiceSpec, ) @@ -159,11 +157,6 @@ def scheduler_data( }[request.param] -@pytest.fixture -def cluster_id() -> ClusterID: - return random.randint(0, 10) - - @pytest.fixture(params=list(FileLinkType)) def tasks_file_link_type(request) -> FileLinkType: """parametrized fixture on all FileLinkType enum variants""" diff --git a/services/director-v2/tests/unit/test_models_clusters.py b/services/director-v2/tests/unit/test_models_clusters.py index b08a988fc68..ae0b17dd43e 100644 --- a/services/director-v2/tests/unit/test_models_clusters.py +++ b/services/director-v2/tests/unit/test_models_clusters.py @@ -1,52 +1,16 @@ -from pprint import pformat -from typing import Any - -import pytest from faker import Faker from models_library.api_schemas_directorv2.clusters import ( AvailableResources, - ClusterCreate, - ClusterPatch, Scheduler, UsedResources, Worker, WorkerMetrics, ) from models_library.clusters import ClusterTypeInModel -from pydantic import BaseModel, ByteSize, TypeAdapter +from pydantic import ByteSize, TypeAdapter from simcore_postgres_database.models.clusters import ClusterType -@pytest.mark.parametrize( - "model_cls", - [ClusterCreate, ClusterPatch], -) -def test_clusters_model_examples( - model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] -): - for name, example in model_cls_examples.items(): - print(name, ":", pformat(example)) - model_instance = model_cls(**example) - assert model_instance, f"Failed with {name}" - - -@pytest.mark.parametrize( - "model_cls", - [ - ClusterCreate, - ], -) -def test_cluster_creation_brings_default_thumbail( - model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] -): - for example in model_cls_examples.values(): - if "thumbnail" in example: - example.pop("thumbnail") - instance = model_cls(**example) - assert instance - assert instance.thumbnail - - def test_scheduler_constructor_with_default_has_correct_dict(faker: Faker): scheduler = Scheduler(status=faker.text()) assert scheduler.workers is not None diff --git a/services/director-v2/tests/unit/test_modules_dask_client.py b/services/director-v2/tests/unit/test_modules_dask_client.py index f45040c143a..83939689808 100644 --- a/services/director-v2/tests/unit/test_modules_dask_client.py +++ b/services/director-v2/tests/unit/test_modules_dask_client.py @@ -5,7 +5,6 @@ # pylint:disable=too-many-arguments # pylint: disable=reimported import asyncio -import datetime import functools import traceback from collections.abc import AsyncIterator, Awaitable, Callable, Coroutine @@ -17,7 +16,6 @@ import distributed import pytest import respx -from _dask_helpers import DaskGatewayServer from dask.distributed import get_worker from dask_task_models_library.container_tasks.docker import DockerBasicAuth from dask_task_models_library.container_tasks.errors import TaskCancelledError @@ -42,22 +40,15 @@ from faker import Faker from fastapi.applications import FastAPI from models_library.api_schemas_directorv2.services import NodeRequirements -from models_library.api_schemas_storage import LinkType -from models_library.clusters import ( - ClusterID, - ClusterTypeInModel, - NoAuthentication, - SimpleAuthentication, -) +from models_library.clusters import ClusterTypeInModel, NoAuthentication from models_library.docker import to_simcore_runtime_docker_label_key from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID from models_library.resource_tracker import HardwareInfo from models_library.users import UserID -from pydantic import AnyUrl, ByteSize, SecretStr, TypeAdapter +from pydantic import AnyUrl, ByteSize, TypeAdapter from pytest_mock.plugin import MockerFixture from pytest_simcore.helpers.typing_env import EnvVarsDict -from servicelib.background_task import periodic_task from settings_library.s3 import S3Settings from simcore_sdk.node_ports_v2 import FileLinkType from simcore_service_director_v2.core.errors import ( @@ -163,7 +154,9 @@ async def factory() -> DaskClient: client = await DaskClient.create( app=minimal_app, settings=minimal_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND, - endpoint=TypeAdapter(AnyUrl).validate_python(dask_spec_local_cluster.scheduler_address), + endpoint=TypeAdapter(AnyUrl).validate_python( + dask_spec_local_cluster.scheduler_address + ), authentication=NoAuthentication(), tasks_file_link_type=tasks_file_link_type, cluster_type=ClusterTypeInModel.ON_PREMISE, @@ -177,8 +170,6 @@ async def factory() -> DaskClient: assert not client._subscribed_tasks # noqa: SLF001 assert client.backend.client - assert not client.backend.gateway - assert not client.backend.gateway_cluster scheduler_infos = client.backend.client.scheduler_info() # type: ignore print( f"--> Connected to scheduler via client {client=} to scheduler {scheduler_infos=}" @@ -191,66 +182,13 @@ async def factory() -> DaskClient: print(f"<-- Disconnected scheduler clients {created_clients=}") -@pytest.fixture -async def create_dask_client_from_gateway( - _minimal_dask_config: None, - local_dask_gateway_server: DaskGatewayServer, - minimal_app: FastAPI, - tasks_file_link_type: FileLinkType, -) -> AsyncIterator[Callable[[], Awaitable[DaskClient]]]: - created_clients = [] - - async def factory() -> DaskClient: - client = await DaskClient.create( - app=minimal_app, - settings=minimal_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND, - endpoint=TypeAdapter(AnyUrl).validate_python(local_dask_gateway_server.address), - authentication=SimpleAuthentication( - username="pytest_user", - password=SecretStr(local_dask_gateway_server.password), - ), - tasks_file_link_type=tasks_file_link_type, - cluster_type=ClusterTypeInModel.AWS, - ) - assert client - assert client.app == minimal_app - assert ( - client.settings - == minimal_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND - ) - assert not client._subscribed_tasks # noqa: SLF001 - - assert client.backend.client - assert client.backend.gateway - assert client.backend.gateway_cluster - - scheduler_infos = client.backend.client.scheduler_info() - assert scheduler_infos - print(f"--> Connected to gateway {client.backend.gateway=}") - print(f"--> Cluster {client.backend.gateway_cluster=}") - print(f"--> Client {client=}") - print( - f"--> Cluster dashboard link {client.backend.gateway_cluster.dashboard_link}" - ) - created_clients.append(client) - return client - - yield factory - await asyncio.gather(*[client.delete() for client in created_clients]) - print(f"<-- Disconnected gateway clients {created_clients=}") - - -@pytest.fixture( - params=["create_dask_client_from_scheduler", "create_dask_client_from_gateway"] -) +@pytest.fixture(params=["create_dask_client_from_scheduler"]) async def dask_client( create_dask_client_from_scheduler: Callable[[], Awaitable[DaskClient]], - create_dask_client_from_gateway: Callable[[], Awaitable[DaskClient]], request, ) -> DaskClient: client: DaskClient = await { "create_dask_client_from_scheduler": create_dask_client_from_scheduler, - "create_dask_client_from_gateway": create_dask_client_from_gateway, }[request.param]() try: @@ -495,7 +433,6 @@ async def test_send_computation_task( user_id: UserID, project_id: ProjectID, node_id: NodeID, - cluster_id: ClusterID, image_params: ImageParams, _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, @@ -543,7 +480,6 @@ def fake_sidecar_fct( node_id_to_job_ids = await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=image_params.fake_tasks, callback=mocked_user_completed_cb, remote_fct=functools.partial( @@ -614,7 +550,6 @@ async def test_computation_task_is_persisted_on_dask_scheduler( dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, image_params: ImageParams, _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, @@ -651,7 +586,6 @@ def fake_sidecar_fct( published_computation_task = await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=image_params.fake_tasks, callback=mocked_user_completed_cb, remote_fct=fake_sidecar_fct, @@ -701,7 +635,6 @@ async def test_abort_computation_tasks( dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, image_params: ImageParams, _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, @@ -742,7 +675,6 @@ def fake_remote_fct( published_computation_task = await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=image_params.fake_tasks, callback=mocked_user_completed_cb, remote_fct=fake_remote_fct, @@ -793,7 +725,6 @@ async def test_failed_task_returns_exceptions( dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, gpu_image: ImageParams, _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, @@ -815,7 +746,6 @@ def fake_failing_sidecar_fct( published_computation_task = await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=gpu_image.fake_tasks, callback=mocked_user_completed_cb, remote_fct=fake_failing_sidecar_fct, @@ -857,7 +787,6 @@ async def test_send_computation_task_with_missing_resources_raises( dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, image_params: ImageParams, _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, @@ -885,7 +814,6 @@ async def test_send_computation_task_with_missing_resources_raises( await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=image_params.fake_tasks, callback=mocked_user_completed_cb, remote_fct=None, @@ -903,7 +831,6 @@ async def test_send_computation_task_with_hardware_info_raises( dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, image_params: ImageParams, _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, @@ -916,7 +843,6 @@ async def test_send_computation_task_with_hardware_info_raises( await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=image_params.fake_tasks, callback=mocked_user_completed_cb, remote_fct=None, @@ -934,7 +860,6 @@ async def test_too_many_resources_send_computation_task( user_id: UserID, project_id: ProjectID, node_id: NodeID, - cluster_id: ClusterID, _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, mocked_storage_service_api: respx.MockRouter, @@ -958,7 +883,6 @@ async def test_too_many_resources_send_computation_task( await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=fake_task, callback=mocked_user_completed_cb, remote_fct=None, @@ -971,11 +895,9 @@ async def test_too_many_resources_send_computation_task( async def test_disconnected_backend_raises_exception( dask_spec_local_cluster: SpecCluster, - local_dask_gateway_server: DaskGatewayServer, dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, cpu_image: ImageParams, _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, @@ -985,13 +907,10 @@ async def test_disconnected_backend_raises_exception( ): # DISCONNECT THE CLUSTER await dask_spec_local_cluster.close() # type: ignore - await local_dask_gateway_server.server.cleanup() - # with pytest.raises(ComputationalBackendNotConnectedError): await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=cpu_image.fake_tasks, callback=mocked_user_completed_cb, remote_fct=None, @@ -1009,7 +928,6 @@ async def test_changed_scheduler_raises_exception( dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, cpu_image: ImageParams, _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, @@ -1041,7 +959,6 @@ async def test_changed_scheduler_raises_exception( await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=cpu_image.fake_tasks, callback=mocked_user_completed_cb, remote_fct=None, @@ -1051,13 +968,11 @@ async def test_changed_scheduler_raises_exception( mocked_user_completed_cb.assert_not_called() -@pytest.mark.flaky(max_runs=3) @pytest.mark.parametrize("fail_remote_fct", [False, True]) async def test_get_tasks_status( dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, cpu_image: ImageParams, _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, @@ -1088,7 +1003,6 @@ def fake_remote_fct( published_computation_task = await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=cpu_image.fake_tasks, callback=mocked_user_completed_cb, remote_fct=fake_remote_fct, @@ -1148,7 +1062,6 @@ async def test_dask_sub_handlers( dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, cpu_image: ImageParams, _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, @@ -1180,7 +1093,6 @@ def fake_remote_fct( published_computation_task = await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=cpu_image.fake_tasks, callback=mocked_user_completed_cb, remote_fct=fake_remote_fct, @@ -1219,7 +1131,6 @@ async def test_get_cluster_details( dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, image_params: ImageParams, _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, @@ -1256,7 +1167,6 @@ def fake_sidecar_fct( published_computation_task = await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=image_params.fake_tasks, callback=mocked_user_completed_cb, remote_fct=functools.partial( @@ -1318,30 +1228,3 @@ def fake_sidecar_fct( ].used_resources assert all(res == 0.0 for res in currently_used_resources.values()) - - -@pytest.mark.skip(reason="manual testing") -@pytest.mark.parametrize("tasks_file_link_type", [LinkType.S3], indirect=True) -async def test_get_cluster_details_robust_to_worker_disappearing( - create_dask_client_from_gateway: Callable[[], Awaitable[DaskClient]] -): - """When running a high number of comp. services in a gateway, - one could observe an issue where getting the cluster used resources - would fail sometimes and generate a big amount of errors in the logs - due to dask worker disappearing or not completely ready. - This test kind of simulates this.""" - dask_client = await create_dask_client_from_gateway() - await dask_client.get_cluster_details() - - async def _scale_up_and_down() -> None: - assert dask_client.backend.gateway_cluster - await dask_client.backend.gateway_cluster.scale(40) - await asyncio.sleep(1) - await dask_client.backend.gateway_cluster.scale(1) - - async with periodic_task( - _scale_up_and_down, interval=datetime.timedelta(seconds=1), task_name="pytest" - ): - for _ in range(900): - await dask_client.get_cluster_details() - await asyncio.sleep(0.1) diff --git a/services/director-v2/tests/unit/test_modules_dask_clients_pool.py b/services/director-v2/tests/unit/test_modules_dask_clients_pool.py index 3bd1e318878..6f87cb4bcb1 100644 --- a/services/director-v2/tests/unit/test_modules_dask_clients_pool.py +++ b/services/director-v2/tests/unit/test_modules_dask_clients_pool.py @@ -3,27 +3,23 @@ # pylint:disable=redefined-outer-name +from collections.abc import AsyncIterator, Callable +from pathlib import Path from random import choice -from typing import Any, AsyncIterator, Callable, get_args +from typing import Any, cast, get_args from unittest import mock import pytest -from _dask_helpers import DaskGatewayServer -from common_library.json_serialization import json_dumps -from common_library.serialization import model_dump_with_secrets from distributed.deploy.spec import SpecCluster from faker import Faker +from fastapi import FastAPI from models_library.clusters import ( - DEFAULT_CLUSTER_ID, - Cluster, + BaseCluster, ClusterAuthentication, ClusterTypeInModel, - JupyterHubTokenAuthentication, - KerberosAuthentication, NoAuthentication, - SimpleAuthentication, + TLSAuthentication, ) -from pydantic import SecretStr from pytest_mock.plugin import MockerFixture from pytest_simcore.helpers.typing_env import EnvVarsDict from simcore_postgres_database.models.clusters import ClusterType @@ -61,9 +57,9 @@ def test_dask_clients_pool_missing_raises_configuration_error( settings = AppSettings.create_from_envs() app = init_app(settings) - with TestClient(app, raise_server_exceptions=True) as client: + with TestClient(app, raise_server_exceptions=True): # noqa: SIM117 with pytest.raises(ConfigurationError): - DaskClientsPool.instance(client.app) + DaskClientsPool.instance(app) def test_dask_clients_pool_properly_setup_and_deleted( @@ -77,66 +73,36 @@ def test_dask_clients_pool_properly_setup_and_deleted( settings = AppSettings.create_from_envs() app = init_app(settings) - with TestClient(app, raise_server_exceptions=True) as client: + with TestClient(app, raise_server_exceptions=True): mocked_dask_clients_pool.create.assert_called_once() mocked_dask_clients_pool.delete.assert_called_once() @pytest.fixture -def fake_clusters(faker: Faker) -> Callable[[int], list[Cluster]]: - def creator(num_clusters: int) -> list[Cluster]: - fake_clusters = [] - for n in range(num_clusters): - fake_clusters.append( - Cluster.model_validate( - { - "id": faker.pyint(), - "name": faker.name(), - "type": ClusterType.ON_PREMISE, - "owner": faker.pyint(), - "endpoint": faker.uri(), - "authentication": choice( - [ - NoAuthentication(), - SimpleAuthentication( - username=faker.user_name(), - password=faker.password(), - ), - KerberosAuthentication(), - JupyterHubTokenAuthentication(api_token=faker.uuid4()), - ] - ), - } - ) - ) - return fake_clusters - - return creator - - -@pytest.fixture() -def default_scheduler_set_as_osparc_gateway( - local_dask_gateway_server: DaskGatewayServer, - monkeypatch: pytest.MonkeyPatch, - faker: Faker, -) -> Callable: - def creator(): - monkeypatch.setenv( - "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL", - local_dask_gateway_server.proxy_address, - ) - monkeypatch.setenv( - "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH", - json_dumps( - model_dump_with_secrets( - SimpleAuthentication( - username=faker.user_name(), - password=SecretStr(local_dask_gateway_server.password), +def fake_clusters(faker: Faker) -> Callable[[int], list[BaseCluster]]: + def creator(num_clusters: int) -> list[BaseCluster]: + return [ + BaseCluster.model_validate( + { + "id": faker.pyint(), + "name": faker.name(), + "type": ClusterType.ON_PREMISE, + "owner": faker.pyint(), + "endpoint": faker.uri(), + "authentication": choice( # noqa: S311 + [ + NoAuthentication(), + TLSAuthentication( + tls_client_cert=Path(faker.file_path()), + tls_client_key=Path(faker.file_path()), + tls_ca_file=Path(faker.file_path()), + ), + ] ), - show_secrets=True, - ) - ), - ) + } + ) + for _n in range(num_clusters) + ] return creator @@ -157,17 +123,14 @@ def creator(): @pytest.fixture( params=[ "default_scheduler_set_as_dask_scheduler", - "default_scheduler_set_as_osparc_gateway", ] ) def default_scheduler( default_scheduler_set_as_dask_scheduler, - default_scheduler_set_as_osparc_gateway, request, ): { "default_scheduler_set_as_dask_scheduler": default_scheduler_set_as_dask_scheduler, - "default_scheduler_set_as_osparc_gateway": default_scheduler_set_as_osparc_gateway, }[request.param]() @@ -175,28 +138,30 @@ async def test_dask_clients_pool_acquisition_creates_client_on_demand( minimal_dask_config: None, mocker: MockerFixture, client: TestClient, - fake_clusters: Callable[[int], list[Cluster]], + fake_clusters: Callable[[int], list[BaseCluster]], ): assert client.app + the_app = cast(FastAPI, client.app) mocked_dask_client = mocker.patch( "simcore_service_director_v2.modules.dask_clients_pool.DaskClient", autospec=True, ) mocked_dask_client.create.return_value = mocked_dask_client - clients_pool = DaskClientsPool.instance(client.app) + clients_pool = DaskClientsPool.instance(the_app) mocked_dask_client.create.assert_not_called() mocked_dask_client.register_handlers.assert_not_called() clusters = fake_clusters(30) mocked_creation_calls = [] + assert isinstance(the_app.state.settings, AppSettings) for cluster in clusters: mocked_creation_calls.append( mock.call( app=client.app, - settings=client.app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND, + settings=the_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND, authentication=cluster.authentication, endpoint=cluster.endpoint, - tasks_file_link_type=client.app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND.COMPUTATIONAL_BACKEND_DEFAULT_FILE_LINK_TYPE, + tasks_file_link_type=the_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND.COMPUTATIONAL_BACKEND_DEFAULT_FILE_LINK_TYPE, cluster_type=ClusterTypeInModel.ON_PREMISE, ) ) @@ -218,14 +183,16 @@ async def test_acquiring_wrong_cluster_raises_exception( minimal_dask_config: None, mocker: MockerFixture, client: TestClient, - fake_clusters: Callable[[int], list[Cluster]], + fake_clusters: Callable[[int], list[BaseCluster]], ): + assert client.app + the_app = cast(FastAPI, client.app) mocked_dask_client = mocker.patch( "simcore_service_director_v2.modules.dask_clients_pool.DaskClient", autospec=True, ) mocked_dask_client.create.side_effect = Exception - clients_pool = DaskClientsPool.instance(client.app) + clients_pool = DaskClientsPool.instance(the_app) mocked_dask_client.assert_not_called() non_existing_cluster = fake_clusters(1)[0] @@ -237,9 +204,9 @@ async def test_acquiring_wrong_cluster_raises_exception( def test_default_cluster_correctly_initialized( minimal_dask_config: None, default_scheduler: None, client: TestClient ): - dask_scheduler_settings = ( - client.app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND - ) + assert client.app + the_app = cast(FastAPI, client.app) + dask_scheduler_settings = the_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND default_cluster = dask_scheduler_settings.default_cluster assert default_cluster assert ( @@ -247,7 +214,6 @@ def test_default_cluster_correctly_initialized( == dask_scheduler_settings.COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL ) - assert default_cluster.id == DEFAULT_CLUSTER_ID assert isinstance(default_cluster.authentication, get_args(ClusterAuthentication)) @@ -257,7 +223,9 @@ async def dask_clients_pool( default_scheduler, client: TestClient, ) -> AsyncIterator[DaskClientsPool]: - clients_pool = DaskClientsPool.instance(client.app) + assert client.app + the_app = cast(FastAPI, client.app) + clients_pool = DaskClientsPool.instance(the_app) assert clients_pool yield clients_pool await clients_pool.delete() @@ -268,9 +236,8 @@ async def test_acquire_default_cluster( client: TestClient, ): assert client.app - dask_scheduler_settings = ( - client.app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND - ) + the_app = cast(FastAPI, client.app) + dask_scheduler_settings = the_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND default_cluster = dask_scheduler_settings.default_cluster assert default_cluster async with dask_clients_pool.acquire(default_cluster) as dask_client: @@ -280,7 +247,7 @@ def just_a_quick_fct(x, y): assert ( dask_client.tasks_file_link_type - == client.app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND.COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_FILE_LINK_TYPE + == the_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND.COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_FILE_LINK_TYPE ) future = dask_client.backend.client.submit(just_a_quick_fct, 12, 23) assert future diff --git a/services/director-v2/tests/unit/test_utils_db.py b/services/director-v2/tests/unit/test_utils_db.py index d2a9e49484d..4bb06b82085 100644 --- a/services/director-v2/tests/unit/test_utils_db.py +++ b/services/director-v2/tests/unit/test_utils_db.py @@ -1,40 +1,10 @@ -from contextlib import suppress -from typing import Any, cast - import pytest -from models_library.clusters import BaseCluster, Cluster from models_library.projects_state import RunningState -from pydantic import BaseModel from simcore_postgres_database.models.comp_pipeline import StateType from simcore_service_director_v2.utils.db import ( DB_TO_RUNNING_STATE, RUNNING_STATE_TO_DB, - to_clusters_db, -) - - -@pytest.mark.parametrize( - "model_cls", - [Cluster], ) -def test_export_clusters_to_db( - model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] -): - for example in model_cls_examples.values(): - owner_gid = example["owner"] - # remove the owner from the access rights if any - with suppress(KeyError): - example.get("access_rights", {}).pop(owner_gid) - instance = cast(BaseCluster, model_cls(**example)) - - # for updates - - cluster_db_dict = to_clusters_db(instance, only_update=True) - keys_not_in_db = ["id", "access_rights"] - - assert list(cluster_db_dict.keys()) == [ - x for x in example if x not in keys_not_in_db - ] @pytest.mark.parametrize("input_running_state", RunningState) diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py index 4381c9311d4..04b85f8ad82 100644 --- a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py +++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py @@ -4,7 +4,7 @@ # pylint: disable=too-many-arguments # pylint: disable=unused-argument # pylint: disable=unused-variable -# pylint:disable=too-many-positional-arguments +# pylint: disable=too-many-positional-arguments import datetime as dt import json @@ -34,10 +34,8 @@ PricingPlanGet, PricingUnitGet, ) -from models_library.clusters import DEFAULT_CLUSTER_ID, Cluster, ClusterID from models_library.projects import ProjectAtDB from models_library.projects_nodes import NodeID, NodeState -from models_library.projects_nodes_io import NodeIDStr from models_library.projects_pipeline import PipelineDetails from models_library.projects_state import RunningState from models_library.service_settings_labels import SimcoreServiceLabels @@ -49,7 +47,7 @@ ) from models_library.utils.fastapi_encoders import jsonable_encoder from models_library.wallets import WalletInfo -from pydantic import AnyHttpUrl, ByteSize, PositiveInt, TypeAdapter, ValidationError +from pydantic import AnyHttpUrl, ByteSize, PositiveInt, TypeAdapter from pytest_mock.plugin import MockerFixture from pytest_simcore.helpers.typing_env import EnvVarsDict from settings_library.rabbit import RabbitSettings @@ -186,15 +184,29 @@ def _mocked_service_resources(request) -> httpx.Response: def _mocked_services_details( request, service_key: str, service_version: str ) -> httpx.Response: + assert "json_schema_extra" in ServiceGet.model_config + assert isinstance(ServiceGet.model_config["json_schema_extra"], dict) + assert isinstance( + ServiceGet.model_config["json_schema_extra"]["examples"], list + ) + assert isinstance( + ServiceGet.model_config["json_schema_extra"]["examples"][0], dict + ) + data_published = fake_service_details.model_copy( + update={ + "key": urllib.parse.unquote(service_key), + "version": service_version, + } + ).model_dump(by_alias=True) + data = { + **ServiceGet.model_config["json_schema_extra"]["examples"][0], + **data_published, + } + payload = ServiceGet.model_validate(data) return httpx.Response( 200, json=jsonable_encoder( - fake_service_details.model_copy( - update={ - "key": urllib.parse.unquote(service_key), - "version": service_version, - } - ), + payload, by_alias=True, ), ) @@ -274,6 +286,11 @@ def _mocked_services_details( yield respx_mock +assert "json_schema_extra" in PricingPlanGet.model_config +assert isinstance(PricingPlanGet.model_config["json_schema_extra"], dict) +assert isinstance(PricingPlanGet.model_config["json_schema_extra"]["examples"], list) + + @pytest.fixture( params=PricingPlanGet.model_config["json_schema_extra"]["examples"], ids=["with ec2 restriction", "without"], @@ -286,6 +303,7 @@ def default_pricing_plan(request: pytest.FixtureRequest) -> PricingPlanGet: def default_pricing_plan_aws_ec2_type( default_pricing_plan: PricingPlanGet, ) -> str | None: + assert default_pricing_plan.pricing_units for p in default_pricing_plan.pricing_units: if p.default: if p.specific_info.aws_ec2_instances: @@ -313,6 +331,11 @@ def _mocked_service_default_pricing_plan( ) def _mocked_get_pricing_unit(request, pricing_plan_id: int) -> httpx.Response: + assert "json_schema_extra" in PricingUnitGet.model_config + assert isinstance(PricingUnitGet.model_config["json_schema_extra"], dict) + assert isinstance( + PricingUnitGet.model_config["json_schema_extra"]["examples"], list + ) return httpx.Response( 200, json=jsonable_encoder( @@ -360,30 +383,6 @@ async def test_computation_create_validators( ): user = registered_user() proj = await project(user, workbench=fake_workbench_without_outputs) - # cluster id and use_on_demand raises - with pytest.raises(ValidationError, match=r"cluster_id cannot be set.+"): - ComputationCreate( - user_id=user["id"], - project_id=proj.uuid, - product_name=faker.pystr(), - use_on_demand_clusters=True, - cluster_id=faker.pyint(), - ) - # this should not raise - ComputationCreate( - user_id=user["id"], - project_id=proj.uuid, - product_name=faker.pystr(), - use_on_demand_clusters=True, - cluster_id=None, - ) - ComputationCreate( - user_id=user["id"], - project_id=proj.uuid, - product_name=faker.pystr(), - use_on_demand_clusters=False, - cluster_id=faker.pyint(), - ) ComputationCreate( user_id=user["id"], project_id=proj.uuid, @@ -479,6 +478,13 @@ def mocked_clusters_keeper_service_get_instance_type_details_with_invalid_name( ) +assert "json_schema_extra" in ServiceResourcesDictHelpers.model_config +assert isinstance(ServiceResourcesDictHelpers.model_config["json_schema_extra"], dict) +assert isinstance( + ServiceResourcesDictHelpers.model_config["json_schema_extra"]["examples"], list +) + + @pytest.fixture( params=ServiceResourcesDictHelpers.model_config["json_schema_extra"]["examples"] ) @@ -545,7 +551,7 @@ async def test_create_computation_with_wallet( project_nodes_repo = ProjectNodesRepo(project_uuid=proj.uuid) for node in await project_nodes_repo.list(connection): if ( - to_node_class(proj.workbench[NodeIDStr(f"{node.node_id}")].key) + to_node_class(proj.workbench[f"{node.node_id}"].key) != NodeClass.FRONTEND ): assert node.required_resources @@ -590,7 +596,11 @@ async def test_create_computation_with_wallet( @pytest.mark.parametrize( "default_pricing_plan", - [PricingPlanGet(**PricingPlanGet.model_config["json_schema_extra"]["examples"][0])], + [ + PricingPlanGet.model_validate( + PricingPlanGet.model_config["json_schema_extra"]["examples"][0] + ) + ], ) async def test_create_computation_with_wallet_with_invalid_pricing_unit_name_raises_422( minimal_configuration: None, @@ -730,6 +740,13 @@ async def test_start_computation_with_project_node_resources_defined( async_client: httpx.AsyncClient, ): user = registered_user() + assert "json_schema_extra" in ServiceResourcesDictHelpers.model_config + assert isinstance( + ServiceResourcesDictHelpers.model_config["json_schema_extra"], dict + ) + assert isinstance( + ServiceResourcesDictHelpers.model_config["json_schema_extra"]["examples"], list + ) proj = await project( user, project_nodes_overrides={ @@ -785,77 +802,6 @@ async def test_start_computation_with_deprecated_services_raises_406( assert response.status_code == status.HTTP_406_NOT_ACCEPTABLE, response.text -@pytest.fixture -async def unusable_cluster( - registered_user: Callable[..., dict[str, Any]], - create_cluster: Callable[..., Awaitable[Cluster]], -) -> ClusterID: - user = registered_user() - created_cluster = await create_cluster(user) - return created_cluster.id - - -async def test_start_computation_with_forbidden_cluster_raises_403( - minimal_configuration: None, - mocked_director_service_fcts, - mocked_catalog_service_fcts, - product_name: str, - fake_workbench_without_outputs: dict[str, Any], - registered_user: Callable[..., dict[str, Any]], - project: Callable[..., Awaitable[ProjectAtDB]], - async_client: httpx.AsyncClient, - unusable_cluster: ClusterID, -): - user = registered_user() - proj = await project(user, workbench=fake_workbench_without_outputs) - create_computation_url = httpx.URL("/v2/computations") - response = await async_client.post( - create_computation_url, - json=jsonable_encoder( - ComputationCreate( - user_id=user["id"], - project_id=proj.uuid, - start_pipeline=True, - product_name=product_name, - cluster_id=unusable_cluster, - ) - ), - ) - assert response.status_code == status.HTTP_403_FORBIDDEN, response.text - assert f"cluster {unusable_cluster}" in response.text - - -async def test_start_computation_with_unknown_cluster_raises_406( - minimal_configuration: None, - mocked_director_service_fcts, - mocked_catalog_service_fcts, - product_name: str, - fake_workbench_without_outputs: dict[str, Any], - registered_user: Callable[..., dict[str, Any]], - project: Callable[..., Awaitable[ProjectAtDB]], - async_client: httpx.AsyncClient, - faker: Faker, -): - user = registered_user() - proj = await project(user, workbench=fake_workbench_without_outputs) - create_computation_url = httpx.URL("/v2/computations") - unknown_cluster_id = faker.pyint(1, 10000) - response = await async_client.post( - create_computation_url, - json=jsonable_encoder( - ComputationCreate( - user_id=user["id"], - project_id=proj.uuid, - start_pipeline=True, - product_name=product_name, - cluster_id=unknown_cluster_id, - ) - ), - ) - assert response.status_code == status.HTTP_406_NOT_ACCEPTABLE, response.text - assert f"cluster {unknown_cluster_id}" in response.text - - async def test_get_computation_from_empty_project( minimal_configuration: None, fake_workbench_without_outputs: dict[str, Any], @@ -900,7 +846,6 @@ async def test_get_computation_from_empty_project( stop_url=None, result=None, iteration=None, - cluster_id=None, started=None, stopped=None, submitted=None, @@ -966,7 +911,6 @@ async def test_get_computation_from_not_started_computation_task( stop_url=None, result=None, iteration=None, - cluster_id=None, started=None, stopped=None, submitted=None, @@ -1043,7 +987,6 @@ async def test_get_computation_from_published_computation_task( stop_url=TypeAdapter(AnyHttpUrl).validate_python(f"{expected_stop_url}"), result=None, iteration=1, - cluster_id=DEFAULT_CLUSTER_ID, started=None, stopped=None, submitted=None, diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations_tasks.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations_tasks.py index 845983b99cb..73d59a740c5 100644 --- a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations_tasks.py +++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations_tasks.py @@ -30,6 +30,8 @@ pytest_simcore_core_services_selection = [ "postgres", + "rabbit", + "redis", ] pytest_simcore_ops_services_selection = [ "adminer", diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_db_repositories_comp_runs.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_db_repositories_comp_runs.py index ba903d1b069..1dea4f59cbe 100644 --- a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_db_repositories_comp_runs.py +++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_db_repositories_comp_runs.py @@ -15,12 +15,10 @@ import pytest from _helpers import PublishedProject from faker import Faker -from models_library.clusters import DEFAULT_CLUSTER_ID, Cluster from models_library.projects import ProjectID from models_library.projects_state import RunningState from models_library.users import UserID from simcore_service_director_v2.core.errors import ( - ClusterNotFoundError, ComputationalRunNotFoundError, ProjectNotFoundError, UserNotFoundError, @@ -89,7 +87,6 @@ async def test_list( created = await CompRunsRepository(aiopg_engine).create( user_id=published_project.user["id"], project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, iteration=None, metadata=run_metadata, use_on_demand_clusters=faker.pybool(), @@ -101,7 +98,6 @@ async def test_list( CompRunsRepository(aiopg_engine).create( user_id=published_project.user["id"], project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, iteration=created.iteration + n + 1, metadata=run_metadata, use_on_demand_clusters=faker.pybool(), @@ -260,13 +256,11 @@ async def test_create( run_metadata: RunMetadataDict, faker: Faker, publish_project: Callable[[], Awaitable[PublishedProject]], - create_cluster: Callable[..., Awaitable[Cluster]], ): with pytest.raises(ProjectNotFoundError): await CompRunsRepository(aiopg_engine).create( user_id=fake_user_id, project_id=fake_project_id, - cluster_id=DEFAULT_CLUSTER_ID, iteration=None, metadata=run_metadata, use_on_demand_clusters=faker.pybool(), @@ -276,7 +270,6 @@ async def test_create( await CompRunsRepository(aiopg_engine).create( user_id=fake_user_id, project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, iteration=None, metadata=run_metadata, use_on_demand_clusters=faker.pybool(), @@ -285,7 +278,6 @@ async def test_create( created = await CompRunsRepository(aiopg_engine).create( user_id=published_project.user["id"], project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, iteration=None, metadata=run_metadata, use_on_demand_clusters=faker.pybool(), @@ -300,7 +292,6 @@ async def test_create( created = await CompRunsRepository(aiopg_engine).create( user_id=published_project.user["id"], project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, iteration=None, metadata=run_metadata, use_on_demand_clusters=faker.pybool(), @@ -315,25 +306,6 @@ async def test_create( ) assert created == got - with pytest.raises(ClusterNotFoundError): - await CompRunsRepository(aiopg_engine).create( - user_id=published_project.user["id"], - project_id=published_project.project.uuid, - cluster_id=faker.pyint(min_value=1), - iteration=None, - metadata=run_metadata, - use_on_demand_clusters=faker.pybool(), - ) - cluster = await create_cluster(published_project.user) - await CompRunsRepository(aiopg_engine).create( - user_id=published_project.user["id"], - project_id=published_project.project.uuid, - cluster_id=cluster.id, - iteration=None, - metadata=run_metadata, - use_on_demand_clusters=faker.pybool(), - ) - async def test_update( aiopg_engine, @@ -353,7 +325,6 @@ async def test_update( created = await CompRunsRepository(aiopg_engine).create( user_id=published_project.user["id"], project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, iteration=None, metadata=run_metadata, use_on_demand_clusters=faker.pybool(), @@ -387,7 +358,6 @@ async def test_set_run_result( created = await CompRunsRepository(aiopg_engine).create( user_id=published_project.user["id"], project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, iteration=None, metadata=run_metadata, use_on_demand_clusters=faker.pybool(), @@ -435,7 +405,6 @@ async def test_mark_for_cancellation( created = await CompRunsRepository(aiopg_engine).create( user_id=published_project.user["id"], project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, iteration=None, metadata=run_metadata, use_on_demand_clusters=faker.pybool(), @@ -467,7 +436,6 @@ async def test_mark_for_scheduling( created = await CompRunsRepository(aiopg_engine).create( user_id=published_project.user["id"], project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, iteration=None, metadata=run_metadata, use_on_demand_clusters=faker.pybool(), @@ -501,7 +469,6 @@ async def test_mark_scheduling_done( created = await CompRunsRepository(aiopg_engine).create( user_id=published_project.user["id"], project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, iteration=None, metadata=run_metadata, use_on_demand_clusters=faker.pybool(), diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_manager.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_manager.py index ac5bbbcc942..47bdd35f8cd 100644 --- a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_manager.py +++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_manager.py @@ -18,7 +18,6 @@ import pytest from _helpers import PublishedProject, assert_comp_runs, assert_comp_runs_empty from fastapi import FastAPI -from models_library.clusters import DEFAULT_CLUSTER_ID from models_library.projects import ProjectAtDB from models_library.projects_state import RunningState from pytest_mock.plugin import MockerFixture @@ -156,7 +155,6 @@ async def test_schedule_all_pipelines( initialized_app, user_id=published_project.project.prj_owner, project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, run_metadata=run_metadata, use_on_demand_clusters=False, ) @@ -174,7 +172,7 @@ async def test_schedule_all_pipelines( assert comp_run.user_id == published_project.project.prj_owner assert comp_run.iteration == 1 assert comp_run.cancelled is None - assert comp_run.cluster_id == DEFAULT_CLUSTER_ID + assert comp_run.cluster_id is None assert comp_run.metadata == run_metadata assert comp_run.result is RunningState.PUBLISHED assert comp_run.scheduled is not None @@ -260,7 +258,6 @@ async def test_schedule_all_pipelines_logs_error_if_it_find_old_pipelines( initialized_app, user_id=published_project.project.prj_owner, project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, run_metadata=run_metadata, use_on_demand_clusters=False, ) @@ -278,7 +275,7 @@ async def test_schedule_all_pipelines_logs_error_if_it_find_old_pipelines( assert comp_run.user_id == published_project.project.prj_owner assert comp_run.iteration == 1 assert comp_run.cancelled is None - assert comp_run.cluster_id == DEFAULT_CLUSTER_ID + assert comp_run.cluster_id is None assert comp_run.metadata == run_metadata assert comp_run.result is RunningState.PUBLISHED assert comp_run.scheduled is not None @@ -345,7 +342,6 @@ async def test_empty_pipeline_is_not_scheduled( initialized_app, user_id=user["id"], project_id=empty_project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, run_metadata=run_metadata, use_on_demand_clusters=False, ) @@ -361,7 +357,6 @@ async def test_empty_pipeline_is_not_scheduled( initialized_app, user_id=user["id"], project_id=empty_project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, run_metadata=run_metadata, use_on_demand_clusters=False, ) diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py index 7609f6e956e..d9559b6c75e 100644 --- a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py +++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py @@ -30,7 +30,6 @@ from dask_task_models_library.container_tasks.protocol import TaskOwner from faker import Faker from fastapi.applications import FastAPI -from models_library.clusters import DEFAULT_CLUSTER_ID from models_library.projects import ProjectAtDB, ProjectID from models_library.projects_nodes_io import NodeID from models_library.projects_state import RunningState @@ -169,7 +168,6 @@ async def _assert_start_pipeline( app, user_id=published_project.project.prj_owner, project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, run_metadata=run_metadata, use_on_demand_clusters=False, ) @@ -253,7 +251,6 @@ async def _return_tasks_pending(job_ids: list[str]) -> list[DaskClientTaskState] mock.call( user_id=published_project.project.prj_owner, project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, tasks={f"{p.node_id}": p.image}, callback=mock.ANY, metadata=mock.ANY, @@ -651,7 +648,6 @@ async def _return_random_task_result(job_id) -> TaskOutputData: mocked_dask_client.send_computation_tasks.assert_called_once_with( user_id=published_project.project.prj_owner, project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, tasks={ f"{next_pending_task.node_id}": next_pending_task.image, }, @@ -1115,7 +1111,6 @@ async def test_broken_pipeline_configuration_is_not_scheduled_and_aborted( initialized_app, user_id=user["id"], project_id=sleepers_project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, run_metadata=run_metadata, use_on_demand_clusters=False, ) @@ -1241,7 +1236,6 @@ async def test_handling_of_disconnected_scheduler_dask( initialized_app, user_id=published_project.project.prj_owner, project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, run_metadata=run_metadata, use_on_demand_clusters=False, ) @@ -1749,7 +1743,6 @@ async def test_pipeline_with_on_demand_cluster_with_not_ready_backend_waits( initialized_app, user_id=published_project.project.prj_owner, project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, run_metadata=run_metadata, use_on_demand_clusters=True, ) @@ -1854,7 +1847,6 @@ async def test_pipeline_with_on_demand_cluster_with_no_clusters_keeper_fails( initialized_app, user_id=published_project.project.prj_owner, project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, run_metadata=run_metadata, use_on_demand_clusters=True, ) diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_worker.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_worker.py index 9eb301e0910..8a66e543ed1 100644 --- a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_worker.py +++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_worker.py @@ -14,7 +14,6 @@ import pytest from _helpers import PublishedProject from fastapi import FastAPI -from models_library.clusters import DEFAULT_CLUSTER_ID from pytest_mock import MockerFixture from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict from pytest_simcore.helpers.typing_env import EnvVarsDict @@ -66,7 +65,6 @@ async def test_worker_properly_autocalls_scheduler_api( initialized_app, user_id=published_project.project.prj_owner, project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, run_metadata=run_metadata, use_on_demand_clusters=False, ) @@ -123,7 +121,6 @@ async def _project_pipeline_creation_workflow() -> None: initialized_app, user_id=published_project.project.prj_owner, project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, run_metadata=run_metadata, use_on_demand_clusters=False, ) diff --git a/services/director-v2/tests/unit/with_dbs/conftest.py b/services/director-v2/tests/unit/with_dbs/conftest.py index 56784acba13..703686d2526 100644 --- a/services/director-v2/tests/unit/with_dbs/conftest.py +++ b/services/director-v2/tests/unit/with_dbs/conftest.py @@ -16,12 +16,9 @@ from _helpers import PublishedProject, RunningProject from faker import Faker from fastapi.encoders import jsonable_encoder -from models_library.clusters import Cluster from models_library.projects import ProjectAtDB, ProjectID from models_library.projects_nodes_io import NodeID from pydantic.main import BaseModel -from simcore_postgres_database.models.cluster_to_groups import cluster_to_groups -from simcore_postgres_database.models.clusters import clusters from simcore_postgres_database.models.comp_pipeline import StateType, comp_pipeline from simcore_postgres_database.models.comp_runs import comp_runs from simcore_postgres_database.models.comp_tasks import comp_tasks @@ -34,8 +31,6 @@ from simcore_service_director_v2.models.comp_tasks import CompTaskAtDB, Image from simcore_service_director_v2.utils.computations import to_node_class from simcore_service_director_v2.utils.dask import generate_dask_job_id -from simcore_service_director_v2.utils.db import to_clusters_db -from sqlalchemy.dialects.postgresql import insert as pg_insert from sqlalchemy.ext.asyncio import AsyncEngine @@ -223,87 +218,6 @@ async def _( ) -@pytest.fixture -async def create_cluster( - sqlalchemy_async_engine: AsyncEngine, -) -> AsyncIterator[Callable[..., Awaitable[Cluster]]]: - created_cluster_ids: list[str] = [] - - async def _(user: dict[str, Any], **cluster_kwargs) -> Cluster: - assert "json_schema_extra" in Cluster.model_config - assert isinstance(Cluster.model_config["json_schema_extra"], dict) - assert isinstance(Cluster.model_config["json_schema_extra"]["examples"], list) - assert isinstance( - Cluster.model_config["json_schema_extra"]["examples"][1], dict - ) - cluster_config = Cluster.model_config["json_schema_extra"]["examples"][1] - cluster_config["owner"] = user["primary_gid"] - cluster_config.update(**cluster_kwargs) - new_cluster = Cluster.model_validate(cluster_config) - assert new_cluster - - async with sqlalchemy_async_engine.begin() as conn: - # insert basic cluster - created_cluster = ( - await conn.execute( - sa.insert(clusters) - .values(to_clusters_db(new_cluster, only_update=False)) - .returning(sa.literal_column("*")) - ) - ).one() - created_cluster_ids.append(created_cluster.id) - if "access_rights" in cluster_kwargs: - for gid, rights in cluster_kwargs["access_rights"].items(): - await conn.execute( - pg_insert(cluster_to_groups) - .values( - cluster_id=created_cluster.id, - gid=gid, - **rights.model_dump(), - ) - .on_conflict_do_update( - index_elements=["gid", "cluster_id"], - set_=rights.model_dump(), - ) - ) - access_rights_in_db = {} - for row in await conn.execute( - sa.select( - cluster_to_groups.c.gid, - cluster_to_groups.c.read, - cluster_to_groups.c.write, - cluster_to_groups.c.delete, - ) - .select_from(clusters.join(cluster_to_groups)) - .where(clusters.c.id == created_cluster.id) - ): - access_rights_in_db[row.gid] = { - "read": row.read, - "write": row.write, - "delete": row.delete, - } - - return Cluster( - id=created_cluster.id, - name=created_cluster.name, - description=created_cluster.description, - type=created_cluster.type, - owner=created_cluster.owner, - endpoint=created_cluster.endpoint, - authentication=created_cluster.authentication, - access_rights=access_rights_in_db, - thumbnail=None, - ) - - yield _ - - # cleanup - async with sqlalchemy_async_engine.begin() as conn: - await conn.execute( - clusters.delete().where(clusters.c.id.in_(created_cluster_ids)) - ) - - @pytest.fixture async def publish_project( registered_user: Callable[..., dict[str, Any]], diff --git a/services/director-v2/tests/unit/with_dbs/test_api_route_clusters.py b/services/director-v2/tests/unit/with_dbs/test_api_route_clusters.py deleted file mode 100644 index 9f55e71f935..00000000000 --- a/services/director-v2/tests/unit/with_dbs/test_api_route_clusters.py +++ /dev/null @@ -1,802 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - -import random -from collections.abc import Callable, Iterator -from typing import Any, Awaitable - -import httpx -import pytest -import sqlalchemy as sa -from _dask_helpers import DaskGatewayServer -from common_library.serialization import model_dump_with_secrets -from distributed.deploy.spec import SpecCluster -from faker import Faker -from httpx import URL -from models_library.api_schemas_directorv2.clusters import ( - ClusterCreate, - ClusterGet, - ClusterPatch, - ClusterPing, -) -from models_library.clusters import ( - CLUSTER_ADMIN_RIGHTS, - CLUSTER_MANAGER_RIGHTS, - CLUSTER_NO_RIGHTS, - CLUSTER_USER_RIGHTS, - Cluster, - ClusterAccessRights, - ClusterAuthentication, - SimpleAuthentication, -) -from pydantic import AnyHttpUrl, SecretStr, TypeAdapter -from pytest_simcore.helpers.typing_env import EnvVarsDict -from simcore_postgres_database.models.clusters import ClusterType, clusters -from starlette import status - -pytest_simcore_core_services_selection = [ - "postgres", -] -pytest_simcore_ops_services_selection = [ - "adminer", -] - - -@pytest.fixture() -def clusters_config( - mock_env: EnvVarsDict, - postgres_db: sa.engine.Engine, - postgres_host_config: dict[str, str], - monkeypatch: pytest.MonkeyPatch, - dask_spec_local_cluster: SpecCluster, - faker: Faker, -): - monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "1") - monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") - monkeypatch.setenv("S3_ENDPOINT", faker.url()) - monkeypatch.setenv("S3_ACCESS_KEY", faker.pystr()) - monkeypatch.setenv("S3_REGION", faker.pystr()) - monkeypatch.setenv("S3_SECRET_KEY", faker.pystr()) - monkeypatch.setenv("S3_BUCKET_NAME", faker.pystr()) - - -@pytest.fixture -def cluster_simple_authentication(faker: Faker) -> Callable[[], dict[str, Any]]: - def creator() -> dict[str, Any]: - simple_auth = { - "type": "simple", - "username": faker.user_name(), - "password": faker.password(), - } - assert SimpleAuthentication.model_validate(simple_auth) - return simple_auth - - return creator - - -@pytest.fixture -def clusters_cleaner(postgres_db: sa.engine.Engine) -> Iterator: - yield - with postgres_db.connect() as conn: - conn.execute(sa.delete(clusters)) - - -async def test_list_clusters( - clusters_config: None, - registered_user: Callable[..., dict], - create_cluster: Callable[..., Awaitable[Cluster]], - async_client: httpx.AsyncClient, -): - user_1 = registered_user() - list_clusters_url = URL(f"/v2/clusters?user_id={user_1['id']}") - # there is no cluster at the moment, the list shall contain the default cluster - response = await async_client.get(list_clusters_url) - assert response.status_code == status.HTTP_200_OK - returned_clusters_list = TypeAdapter(list[ClusterGet]).validate_python( - response.json() - ) - assert ( - len(returned_clusters_list) == 1 - ), f"no default cluster in {returned_clusters_list=}" - assert ( - returned_clusters_list[0].id == 0 - ), "default cluster id is not the one expected" - - # let's create some clusters - NUM_CLUSTERS = 111 - for n in range(NUM_CLUSTERS): - await create_cluster(user_1, name=f"pytest cluster{n:04}") - - response = await async_client.get(list_clusters_url) - assert response.status_code == status.HTTP_200_OK - returned_clusters_list = TypeAdapter(list[ClusterGet]).validate_python( - response.json() - ) - assert ( - len(returned_clusters_list) == NUM_CLUSTERS + 1 - ) # the default cluster comes on top of the NUM_CLUSTERS - assert ( - returned_clusters_list[0].id == 0 - ), "the first cluster shall be the platform default cluster" - - # now create a second user and check the clusters are not seen by it BUT the default one - user_2 = registered_user() - response = await async_client.get(f"/v2/clusters?user_id={user_2['id']}") - assert response.status_code == status.HTTP_200_OK - returned_clusters_list = TypeAdapter(list[ClusterGet]).validate_python( - response.json() - ) - assert ( - len(returned_clusters_list) == 1 - ), f"no default cluster in {returned_clusters_list=}" - assert ( - returned_clusters_list[0].id == 0 - ), "default cluster id is not the one expected" - - # let's create a few more clusters owned by user_1 with specific rights - for rights, name in [ - (CLUSTER_NO_RIGHTS, "no rights"), - (CLUSTER_USER_RIGHTS, "user rights"), - (CLUSTER_MANAGER_RIGHTS, "manager rights"), - (CLUSTER_ADMIN_RIGHTS, "admin rights"), - ]: - await create_cluster( - user_1, # cluster is owned by user_1 - name=f"cluster with {name}", - access_rights={ - user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS, - user_2["primary_gid"]: rights, - }, - ) - - response = await async_client.get(f"/v2/clusters?user_id={user_2['id']}") - assert response.status_code == status.HTTP_200_OK - user_2_clusters = TypeAdapter(list[ClusterGet]).validate_python(response.json()) - # we should find 3 clusters + the default cluster - assert len(user_2_clusters) == 3 + 1 - for name in [ - "cluster with user rights", - "cluster with manager rights", - "cluster with admin rights", - ]: - clusters = list( - filter( - lambda cluster, name=name: cluster.name == name, - user_2_clusters, - ), - ) - assert len(clusters) == 1, f"missing cluster with {name=}" - - -async def test_get_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - create_cluster: Callable[..., Awaitable[Cluster]], - async_client: httpx.AsyncClient, -): - user_1 = registered_user() - # try to get one that does not exist - response = await async_client.get( - f"/v2/clusters/15615165165165?user_id={user_1['id']}" - ) - assert response.status_code == status.HTTP_404_NOT_FOUND - # let's create some clusters - a_bunch_of_clusters = [ - await create_cluster(user_1, name=f"pytest cluster{n:04}") for n in range(111) - ] - the_cluster = random.choice(a_bunch_of_clusters) - - # there is no cluster at the moment, the list is empty - response = await async_client.get( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}" - ) - assert response.status_code == status.HTTP_200_OK, f"received {response.text}" - returned_cluster = ClusterGet.model_validate(response.json()) - assert returned_cluster - assert the_cluster.model_dump( - exclude={"authentication"} - ) == returned_cluster.model_dump(exclude={"authentication"}) - - user_2 = registered_user() - # getting the same cluster for user 2 shall return 403 - response = await async_client.get( - f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}" - ) - assert ( - response.status_code == status.HTTP_403_FORBIDDEN - ), f"received {response.text}" - # let's create a few cluster for user 2 and share some with user 1 - for rights, user_1_expected_access in [ - (CLUSTER_NO_RIGHTS, False), - (CLUSTER_USER_RIGHTS, True), - (CLUSTER_MANAGER_RIGHTS, True), - (CLUSTER_ADMIN_RIGHTS, True), - ]: - a_cluster = await create_cluster( - user_2, # cluster is owned by user_2 - access_rights={ - user_2["primary_gid"]: CLUSTER_ADMIN_RIGHTS, - user_1["primary_gid"]: rights, - }, - ) - # now let's check that user_1 can access only the correct ones - response = await async_client.get( - f"/v2/clusters/{a_cluster.id}?user_id={user_1['id']}" - ) - assert ( - response.status_code == status.HTTP_200_OK - if user_1_expected_access - else status.HTTP_403_FORBIDDEN - ), f"received {response.text}" - - -@pytest.mark.parametrize( - "cluster_sharing_rights, can_use", - [ - pytest.param(CLUSTER_ADMIN_RIGHTS, True, id="SHARE_WITH_ADMIN_RIGHTS"), - pytest.param(CLUSTER_MANAGER_RIGHTS, True, id="SHARE_WITH_MANAGER_RIGHTS"), - pytest.param(CLUSTER_USER_RIGHTS, True, id="SHARE_WITH_USER_RIGHTS"), - pytest.param(CLUSTER_NO_RIGHTS, False, id="DENY_RIGHTS"), - ], -) -async def test_get_another_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - create_cluster: Callable[..., Awaitable[Cluster]], - async_client: httpx.AsyncClient, - cluster_sharing_rights: ClusterAccessRights, - can_use: bool, -): - user_1 = registered_user() - user_2 = registered_user() - # let's create some clusters - a_bunch_of_clusters = [ - await create_cluster( - user_1, - name=f"pytest cluster{n:04}", - access_rights={ - user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS, - user_2["primary_gid"]: cluster_sharing_rights, - }, - ) - for n in range(111) - ] - the_cluster = random.choice(a_bunch_of_clusters) - # try to get the cluster as user 2 - response = await async_client.get( - f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}" - ) - assert ( - response.status_code == status.HTTP_200_OK - if can_use - else status.HTTP_403_FORBIDDEN - ), f"received {response.text}" - - -@pytest.mark.parametrize("with_query", [True, False]) -async def test_get_default_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - async_client: httpx.AsyncClient, - with_query: bool, -): - user_1 = registered_user() - - get_cluster_url = URL("/v2/clusters/default") - if with_query: - get_cluster_url = URL(f"/v2/clusters/default?user_id={user_1['id']}") - response = await async_client.get(get_cluster_url) - assert response.status_code == status.HTTP_200_OK, f"received {response.text}" - returned_cluster = ClusterGet.model_validate(response.json()) - assert returned_cluster - assert returned_cluster.id == 0 - assert returned_cluster.name == "Default cluster" - assert 1 in returned_cluster.access_rights # everyone group is always 1 - assert returned_cluster.access_rights[1] == CLUSTER_USER_RIGHTS - - -async def test_create_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - cluster_simple_authentication: Callable, - async_client: httpx.AsyncClient, - faker: Faker, - postgres_db: sa.engine.Engine, - clusters_cleaner, -): - user_1 = registered_user() - create_cluster_url = URL(f"/v2/clusters?user_id={user_1['id']}") - cluster_data = ClusterCreate( - endpoint=faker.uri(), - authentication=cluster_simple_authentication(), - name=faker.name(), - type=random.choice(list(ClusterType)), - owner=faker.pyint(min_value=1), - ) - response = await async_client.post( - create_cluster_url, - json=model_dump_with_secrets( - cluster_data, - show_secrets=True, - by_alias=True, - exclude_unset=True, - ), - ) - assert response.status_code == status.HTTP_201_CREATED, f"received: {response.text}" - created_cluster = ClusterGet.model_validate(response.json()) - assert created_cluster - - assert cluster_data.model_dump( - exclude={"id", "owner", "access_rights", "authentication"} - ) == created_cluster.model_dump( - exclude={"id", "owner", "access_rights", "authentication"} - ) - - assert created_cluster.id is not None - assert created_cluster.owner == user_1["primary_gid"] - assert created_cluster.access_rights == { - user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS - } - - # let's check that DB is correctly setup, there is one entry - with postgres_db.connect() as conn: - conn.execute( - sa.select(clusters).where(clusters.c.name == cluster_data.name) - ).one() - - -async def test_update_own_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - create_cluster: Callable[..., Awaitable[Cluster]], - cluster_simple_authentication: Callable, - async_client: httpx.AsyncClient, - faker: Faker, -): - _PATCH_EXPORT = {"by_alias": True, "exclude_unset": True, "exclude_none": True} - user_1 = registered_user() - # try to modify one that does not exist - response = await async_client.patch( - f"/v2/clusters/15615165165165?user_id={user_1['id']}", - json=model_dump_with_secrets( - ClusterPatch(), show_secrets=True, **_PATCH_EXPORT - ), - ) - assert response.status_code == status.HTTP_404_NOT_FOUND - # let's create some clusters - a_bunch_of_clusters = [ - await create_cluster(user_1, name=f"pytest cluster{n:04}") for n in range(111) - ] - the_cluster = random.choice(a_bunch_of_clusters) - # get the original one - response = await async_client.get( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}" - ) - assert response.status_code == status.HTTP_200_OK, f"received {response.text}" - original_cluster = ClusterGet.model_validate(response.json()) - - # now we modify nothing - response = await async_client.patch( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}", - json=model_dump_with_secrets( - ClusterPatch(), show_secrets=True, **_PATCH_EXPORT - ), - ) - assert response.status_code == status.HTTP_200_OK, f"received {response.text}" - returned_cluster = ClusterGet.model_validate(response.json()) - assert returned_cluster.model_dump() == original_cluster.model_dump() - - # modify some simple things - expected_modified_cluster = original_cluster.model_copy() - for cluster_patch in [ - ClusterPatch(name=faker.name()), - ClusterPatch(description=faker.text()), - ClusterPatch(type=ClusterType.ON_PREMISE), - ClusterPatch(thumbnail=faker.uri()), - ClusterPatch(endpoint=faker.uri()), - ClusterPatch(authentication=cluster_simple_authentication()), - ]: - jsonable_cluster_patch = model_dump_with_secrets( - cluster_patch, show_secrets=True, **_PATCH_EXPORT - ) - print(f"--> patching cluster with {jsonable_cluster_patch}") - response = await async_client.patch( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}", - json=jsonable_cluster_patch, - ) - assert response.status_code == status.HTTP_200_OK, f"received {response.text}" - returned_cluster = ClusterGet.model_validate(response.json()) - expected_modified_cluster = expected_modified_cluster.model_copy( - update=cluster_patch.model_dump(**_PATCH_EXPORT) - ) - assert returned_cluster.model_dump( - exclude={"authentication": {"password"}} - ) == expected_modified_cluster.model_dump( - exclude={"authentication": {"password"}} - ) - - # we can change the access rights, the owner rights are always kept - user_2 = registered_user() - - for rights in [ - CLUSTER_ADMIN_RIGHTS, - CLUSTER_MANAGER_RIGHTS, - CLUSTER_USER_RIGHTS, - CLUSTER_NO_RIGHTS, - ]: - cluster_patch = ClusterPatch(accessRights={user_2["primary_gid"]: rights}) - response = await async_client.patch( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}", - json=cluster_patch.model_dump(**_PATCH_EXPORT), - ) - assert response.status_code == status.HTTP_200_OK, f"received {response.text}" - returned_cluster = ClusterGet.model_validate(response.json()) - - expected_modified_cluster.access_rights[user_2["primary_gid"]] = rights - assert returned_cluster.model_dump( - exclude={"authentication": {"password"}} - ) == expected_modified_cluster.model_dump( - exclude={"authentication": {"password"}} - ) - # we can change the owner since we are admin - cluster_patch = ClusterPatch(owner=user_2["primary_gid"]) - response = await async_client.patch( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}", - json=model_dump_with_secrets(cluster_patch, show_secrets=True, **_PATCH_EXPORT), - ) - assert response.status_code == status.HTTP_200_OK, f"received {response.text}" - returned_cluster = ClusterGet.model_validate(response.json()) - expected_modified_cluster.owner = user_2["primary_gid"] - expected_modified_cluster.access_rights[ - user_2["primary_gid"] - ] = CLUSTER_ADMIN_RIGHTS - assert returned_cluster.model_dump( - exclude={"authentication": {"password"}} - ) == expected_modified_cluster.model_dump(exclude={"authentication": {"password"}}) - - # we should not be able to reduce the rights of the new owner - cluster_patch = ClusterPatch( - accessRights={user_2["primary_gid"]: CLUSTER_NO_RIGHTS} - ) - response = await async_client.patch( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}", - json=model_dump_with_secrets(cluster_patch, show_secrets=True, **_PATCH_EXPORT), - ) - assert ( - response.status_code == status.HTTP_403_FORBIDDEN - ), f"received {response.text}" - - -async def test_update_default_cluster_fails( - clusters_config: None, - registered_user: Callable[..., dict], - create_cluster: Callable[..., Awaitable[Cluster]], - cluster_simple_authentication: Callable, - async_client: httpx.AsyncClient, - faker: Faker, -): - _PATCH_EXPORT = {"by_alias": True, "exclude_unset": True, "exclude_none": True} - user_1 = registered_user() - # try to modify one that does not exist - response = await async_client.patch( - f"/v2/clusters/default?user_id={user_1['id']}", - json=model_dump_with_secrets( - ClusterPatch(), show_secrets=True, **_PATCH_EXPORT - ), - ) - assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY - - -@pytest.mark.parametrize( - "cluster_sharing_rights, can_use, can_manage, can_administer", - [ - pytest.param( - CLUSTER_ADMIN_RIGHTS, True, True, True, id="SHARE_WITH_ADMIN_RIGHTS" - ), - pytest.param( - CLUSTER_MANAGER_RIGHTS, True, True, False, id="SHARE_WITH_MANAGER_RIGHTS" - ), - pytest.param( - CLUSTER_USER_RIGHTS, True, False, False, id="SHARE_WITH_USER_RIGHTS" - ), - pytest.param(CLUSTER_NO_RIGHTS, False, False, False, id="DENY_RIGHTS"), - ], -) -async def test_update_another_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - create_cluster: Callable[..., Awaitable[Cluster]], - cluster_simple_authentication: Callable, - async_client: httpx.AsyncClient, - faker: Faker, - cluster_sharing_rights: ClusterAccessRights, - can_use: bool, - can_manage: bool, - can_administer: bool, -): - """user_1 is the owner and administrator, he/she gives some rights to user 2""" - - _PATCH_EXPORT = {"by_alias": True, "exclude_unset": True, "exclude_none": True} - user_1 = registered_user() - user_2 = registered_user() - # let's create some clusters - a_bunch_of_clusters = [ - await create_cluster( - user_1, - name=f"pytest cluster{n:04}", - access_rights={ - user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS, - user_2["primary_gid"]: cluster_sharing_rights, - }, - ) - for n in range(111) - ] - the_cluster = random.choice(a_bunch_of_clusters) - # get the original one - response = await async_client.get( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}" - ) - assert response.status_code == status.HTTP_200_OK, f"received {response.text}" - ClusterGet.model_validate(response.json()) - - # let's try to modify stuff as we are user 2 - for cluster_patch in [ - ClusterPatch(name=faker.name()), - ClusterPatch(description=faker.text()), - ClusterPatch(type=ClusterType.ON_PREMISE), - ClusterPatch(thumbnail=faker.uri()), - ClusterPatch(endpoint=faker.uri()), - ClusterPatch(authentication=cluster_simple_authentication()), - ]: - response = await async_client.patch( - f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}", - json=model_dump_with_secrets( - cluster_patch, show_secrets=True, **_PATCH_EXPORT - ), - ) - assert ( - response.status_code == status.HTTP_200_OK - if can_manage - else status.HTTP_403_FORBIDDEN - ), f"received {response.text}" - - # let's try to add/remove someone (reserved to managers) - user_3 = registered_user() - for rights in [ - CLUSTER_USER_RIGHTS, # add user - CLUSTER_NO_RIGHTS, # remove user - ]: - # try to add user 3 - cluster_patch = ClusterPatch(accessRights={user_3["primary_gid"]: rights}) - response = await async_client.patch( - f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}", - json=model_dump_with_secrets( - cluster_patch, show_secrets=True, **_PATCH_EXPORT - ), - ) - assert ( - response.status_code == status.HTTP_200_OK - if can_manage - else status.HTTP_403_FORBIDDEN - ), f"received {response.text} while {'adding' if rights == CLUSTER_USER_RIGHTS else 'removing'} user" - - # modify rights to admin/manager (reserved to administrators) - for rights in [ - CLUSTER_ADMIN_RIGHTS, - CLUSTER_MANAGER_RIGHTS, - ]: - cluster_patch = ClusterPatch(accessRights={user_3["primary_gid"]: rights}) - response = await async_client.patch( - f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}", - json=model_dump_with_secrets( - cluster_patch, show_secrets=True, **_PATCH_EXPORT - ), - ) - assert ( - response.status_code == status.HTTP_200_OK - if can_administer - else status.HTTP_403_FORBIDDEN - ), f"received {response.text}" - - -async def test_delete_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - create_cluster: Callable[..., Awaitable[Cluster]], - async_client: httpx.AsyncClient, -): - user_1 = registered_user() - # let's create some clusters - a_bunch_of_clusters = [ - await create_cluster( - user_1, - name=f"pytest cluster{n:04}", - access_rights={ - user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS, - }, - ) - for n in range(111) - ] - the_cluster = random.choice(a_bunch_of_clusters) - # let's delete that cluster - response = await async_client.delete( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}" - ) - assert ( - response.status_code == status.HTTP_204_NO_CONTENT - ), f"received {response.text}" - # now check it is gone - response = await async_client.get( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}" - ) - assert ( - response.status_code == status.HTTP_404_NOT_FOUND - ), f"received {response.text}" - - -@pytest.mark.parametrize( - "cluster_sharing_rights, can_administer", - [ - pytest.param(CLUSTER_ADMIN_RIGHTS, True, id="SHARE_WITH_ADMIN_RIGHTS"), - pytest.param(CLUSTER_MANAGER_RIGHTS, False, id="SHARE_WITH_MANAGER_RIGHTS"), - pytest.param(CLUSTER_USER_RIGHTS, False, id="SHARE_WITH_USER_RIGHTS"), - pytest.param(CLUSTER_NO_RIGHTS, False, id="DENY_RIGHTS"), - ], -) -async def test_delete_another_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - create_cluster: Callable[..., Awaitable[Cluster]], - cluster_simple_authentication: Callable, - async_client: httpx.AsyncClient, - faker: Faker, - cluster_sharing_rights: ClusterAccessRights, - can_administer: bool, -): - user_1 = registered_user() - user_2 = registered_user() - # let's create some clusters - a_bunch_of_clusters = [ - await create_cluster( - user_1, - name=f"pytest cluster{n:04}", - access_rights={ - user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS, - user_2["primary_gid"]: cluster_sharing_rights, - }, - ) - for n in range(111) - ] - the_cluster = random.choice(a_bunch_of_clusters) - # let's delete that cluster as user_2 - response = await async_client.delete( - f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}" - ) - assert ( - response.status_code == status.HTTP_204_NO_CONTENT - if can_administer - else status.HTTP_403_FORBIDDEN - ), f"received {response.text}" - # now check it is gone or still around - response = await async_client.get( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}" - ) - assert ( - response.status_code == status.HTTP_404_NOT_FOUND - if can_administer - else status.HTTP_200_OK - ), f"received {response.text}" - - -async def test_delete_default_cluster_fails( - clusters_config: None, - registered_user: Callable[..., dict], - async_client: httpx.AsyncClient, -): - user_1 = registered_user() - response = await async_client.delete(f"/v2/clusters/default?user_id={user_1['id']}") - assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY - - -async def test_ping_invalid_cluster_raises_422( - clusters_config: None, - async_client: httpx.AsyncClient, - faker: Faker, - cluster_simple_authentication: Callable[[], dict[str, Any]], -): - # calling with wrong data raises - response = await async_client.post("/v2/clusters:ping", json={}) - with pytest.raises(httpx.HTTPStatusError): - response.raise_for_status() - - # calling with correct data but non existing cluster also raises - some_fake_cluster = ClusterPing( - endpoint=faker.url(), - authentication=TypeAdapter(ClusterAuthentication).validate_python( - cluster_simple_authentication() - ), - ) - response = await async_client.post( - "/v2/clusters:ping", - json=model_dump_with_secrets( - some_fake_cluster, show_secrets=True, by_alias=True - ), - ) - with pytest.raises(httpx.HTTPStatusError): - response.raise_for_status() - - -async def test_ping_cluster( - clusters_config: None, - async_client: httpx.AsyncClient, - local_dask_gateway_server: DaskGatewayServer, -): - valid_cluster = ClusterPing( - endpoint=TypeAdapter(AnyHttpUrl).validate_python( - local_dask_gateway_server.address - ), - authentication=SimpleAuthentication( - username="pytest_user", - password=TypeAdapter(SecretStr).validate_python( - local_dask_gateway_server.password - ), - ), - ) - response = await async_client.post( - "/v2/clusters:ping", - json=model_dump_with_secrets(valid_cluster, show_secrets=True, by_alias=True), - ) - response.raise_for_status() - assert response.status_code == status.HTTP_204_NO_CONTENT - - -async def test_ping_specific_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - create_cluster: Callable[..., Awaitable[Cluster]], - async_client: httpx.AsyncClient, - local_dask_gateway_server: DaskGatewayServer, -): - user_1 = registered_user() - # try to ping one that does not exist - response = await async_client.get( - f"/v2/clusters/15615165165165:ping?user_id={user_1['id']}" - ) - assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY - - # let's create some clusters and ping one - a_bunch_of_clusters = [ - await create_cluster( - user_1, - name=f"pytest cluster{n:04}", - endpoint=local_dask_gateway_server.address, - authentication=SimpleAuthentication( - username="pytest_user", - password=TypeAdapter(SecretStr).validate_python( - local_dask_gateway_server.password - ), - ), - ) - for n in range(111) - ] - the_cluster = random.choice(a_bunch_of_clusters) - - response = await async_client.post( - f"/v2/clusters/{the_cluster.id}:ping?user_id={user_1['id']}", - ) - response.raise_for_status() - assert response.status_code == status.HTTP_204_NO_CONTENT - - -async def test_ping_default_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - async_client: httpx.AsyncClient, -): - user_1 = registered_user() - # try to ping one that does not exist - response = await async_client.post( - f"/v2/clusters/default:ping?user_id={user_1['id']}" - ) - assert response.status_code == status.HTTP_204_NO_CONTENT diff --git a/services/director-v2/tests/unit/with_dbs/test_api_route_clusters_details.py b/services/director-v2/tests/unit/with_dbs/test_api_route_clusters_details.py deleted file mode 100644 index 357f3b7647a..00000000000 --- a/services/director-v2/tests/unit/with_dbs/test_api_route_clusters_details.py +++ /dev/null @@ -1,254 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - -import json -from collections.abc import Callable -from typing import Any, Awaitable - -import httpx -import pytest -import sqlalchemy as sa -from _dask_helpers import DaskGatewayServer -from dask_gateway import Gateway, GatewayCluster, auth -from distributed import Client as DaskClient -from distributed.deploy.spec import SpecCluster -from faker import Faker -from models_library.api_schemas_directorv2.clusters import ClusterDetailsGet -from models_library.clusters import Cluster, ClusterID, SimpleAuthentication -from models_library.users import UserID -from pydantic import SecretStr -from pytest_simcore.helpers.typing_env import EnvVarsDict -from starlette import status -from tenacity.asyncio import AsyncRetrying -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed - -pytest_simcore_core_services_selection = [ - "postgres", -] -pytest_simcore_ops_services_selection = [ - "adminer", -] - - -@pytest.fixture() -def clusters_config( - mock_env: EnvVarsDict, - postgres_db: sa.engine.Engine, - postgres_host_config: dict[str, str], - monkeypatch: pytest.MonkeyPatch, - dask_spec_local_cluster: SpecCluster, - faker: Faker, -): - monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "1") - monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") - monkeypatch.setenv("S3_ENDPOINT", faker.url()) - monkeypatch.setenv("S3_ACCESS_KEY", faker.pystr()) - monkeypatch.setenv("S3_REGION", faker.pystr()) - monkeypatch.setenv("S3_SECRET_KEY", faker.pystr()) - monkeypatch.setenv("S3_BUCKET_NAME", faker.pystr()) - - -@pytest.mark.skip( - reason="test for helping developers understand how to use dask gateways" -) -async def test_local_dask_gateway_server(local_dask_gateway_server: DaskGatewayServer): - async with Gateway( - local_dask_gateway_server.address, - local_dask_gateway_server.proxy_address, - asynchronous=True, - auth=auth.BasicAuth("pytest_user", local_dask_gateway_server.password), - ) as gateway: - print(f"--> {gateway=} created") - cluster_options = await gateway.cluster_options() - gateway_versions = await gateway.get_versions() - clusters_list = await gateway.list_clusters() - print(f"--> {gateway_versions=}, {cluster_options=}, {clusters_list=}") - for option in cluster_options.items(): - print(f"--> {option=}") - - async with gateway.new_cluster() as cluster: - assert cluster - print(f"--> created new cluster {cluster=}, {cluster.scheduler_info=}") - NUM_WORKERS = 10 - await cluster.scale(NUM_WORKERS) - print(f"--> scaling cluster {cluster=} to {NUM_WORKERS} workers") - async for attempt in AsyncRetrying( - reraise=True, wait=wait_fixed(0.24), stop=stop_after_delay(30) - ): - with attempt: - print( - f"cluster {cluster=} has now {len(cluster.scheduler_info.get('workers', []))} worker(s)" - ) - assert len(cluster.scheduler_info.get("workers", 0)) == 10 - - async with cluster.get_client() as client: - print(f"--> created new client {client=}, submitting a job") - res = await client.submit(lambda x: x + 1, 1) - assert res == 2 - - print(f"--> scaling cluster {cluster=} back to 0") - await cluster.scale(0) - - async for attempt in AsyncRetrying( - reraise=True, wait=wait_fixed(0.24), stop=stop_after_delay(30) - ): - with attempt: - print( - f"cluster {cluster=} has now {len(cluster.scheduler_info.get('workers', []))}" - ) - assert len(cluster.scheduler_info.get("workers", 0)) == 0 - - -async def test_get_default_cluster_details( - clusters_config: None, - registered_user: Callable, - async_client: httpx.AsyncClient, -): - user_1 = registered_user() - - # This test checks that the default cluster is accessible - # the default cluster is the osparc internal cluster available through a dask-scheduler - response = await async_client.get( - f"/v2/clusters/default/details?user_id={user_1['id']}" - ) - assert response.status_code == status.HTTP_200_OK - default_cluster_out = ClusterDetailsGet.model_validate(response.json()) - response = await async_client.get( - f"/v2/clusters/{0}/details?user_id={user_1['id']}" - ) - assert response.status_code == status.HTTP_200_OK - assert default_cluster_out == ClusterDetailsGet.model_validate(response.json()) - - -async def _get_cluster_details( - async_client: httpx.AsyncClient, user_id: UserID, cluster_id: ClusterID -) -> ClusterDetailsGet: - response = await async_client.get( - f"/v2/clusters/{cluster_id}/details?user_id={user_id}" - ) - assert response.status_code == status.HTTP_200_OK - print(f"<-- received cluster details response {response=}") - cluster_out = ClusterDetailsGet.model_validate(response.json()) - assert cluster_out - print(f"<-- received cluster details {cluster_out=}") - assert cluster_out.scheduler, "the cluster's scheduler is not started!" - return cluster_out - - -async def test_get_cluster_details( - clusters_config: None, - registered_user: Callable[..., dict[str, Any]], - async_client: httpx.AsyncClient, - local_dask_gateway_server: DaskGatewayServer, - create_cluster: Callable[..., Awaitable[Cluster]], - dask_gateway_cluster: GatewayCluster, - dask_gateway_cluster_client: DaskClient, - gateway_username: str, -): - user_1 = registered_user() - # define the cluster in the DB - some_cluster = await create_cluster( - user_1, - endpoint=local_dask_gateway_server.address, - authentication=SimpleAuthentication( - username=gateway_username, - password=SecretStr(local_dask_gateway_server.password), - ).model_dump(by_alias=True), - ) - # in its present state, the cluster should have no workers - cluster_out = await _get_cluster_details( - async_client, user_1["id"], some_cluster.id - ) - assert not cluster_out.scheduler.workers, "the cluster should not have any worker!" - - # now let's scale the cluster - _NUM_WORKERS = 1 - await dask_gateway_cluster.scale(_NUM_WORKERS) - async for attempt in AsyncRetrying( - reraise=True, stop=stop_after_delay(60), wait=wait_fixed(1) - ): - with attempt: - cluster_out = await _get_cluster_details( - async_client, user_1["id"], some_cluster.id - ) - assert cluster_out.scheduler.workers, "the cluster has no workers!" - assert ( - len(cluster_out.scheduler.workers) == _NUM_WORKERS - ), f"the cluster is expected to have {_NUM_WORKERS} worker(s), currently has {len(cluster_out.scheduler.workers)} worker(s)" - print( - f"cluster now has its {_NUM_WORKERS}, after {json.dumps(attempt.retry_state.retry_object.statistics)}" - ) - print(f"!!> cluster dashboard link: {dask_gateway_cluster.dashboard_link}") - - # let's start some computation - _TASK_SLEEP_TIME = 55 - - def do_some_work(x: int): - import time - - time.sleep(x) - return True - - task = dask_gateway_cluster_client.submit(do_some_work, _TASK_SLEEP_TIME) - # wait for the computation to start, we should see this in the cluster infos - async for attempt in AsyncRetrying( - reraise=True, stop=stop_after_delay(10), wait=wait_fixed(1) - ): - with attempt: - cluster_out = await _get_cluster_details( - async_client, user_1["id"], some_cluster.id - ) - assert cluster_out.scheduler.workers - assert ( - next( - iter(cluster_out.scheduler.workers.values()) - ).metrics.task_counts.executing - == 1 - ), "worker is not executing the task" - print( - f"!!> cluster metrics: {next(iter(cluster_out.scheduler.workers.values())).metrics=}" - ) - # let's wait for the result - result = task.result(timeout=_TASK_SLEEP_TIME + 5) - assert result - assert await result is True - # wait for the computation to effectively stop - async for attempt in AsyncRetrying( - reraise=True, stop=stop_after_delay(60), wait=wait_fixed(1) - ): - with attempt: - cluster_out = await _get_cluster_details( - async_client, user_1["id"], some_cluster.id - ) - assert cluster_out.scheduler.workers - print( - f"!!> cluster metrics: {next(iter(cluster_out.scheduler.workers.values())).metrics=}" - ) - assert ( - next( - iter(cluster_out.scheduler.workers.values()) - ).metrics.task_counts.executing - == 0 - ), "worker is still executing the task" - assert ( - next( - iter(cluster_out.scheduler.workers.values()) - ).metrics.task_counts.memory - == 1 - ), "worker did not keep the result in memory" - # NOTE: this is a CPU percent use - assert ( - next(iter(cluster_out.scheduler.workers.values())).metrics.cpu < 5.0 - ), "worker did not update the cpu metrics" - - # since the task is completed the worker should have stopped executing - cluster_out = await _get_cluster_details( - async_client, user_1["id"], some_cluster.id - ) - assert cluster_out.scheduler.workers - worker_data = next(iter(cluster_out.scheduler.workers.values())) - assert worker_data.metrics.task_counts.executing == 0 - # in dask, the task remains in memory until the result is deleted - assert worker_data.metrics.task_counts.memory == 1 diff --git a/services/director-v2/tests/unit/with_dbs/test_utils_dask.py b/services/director-v2/tests/unit/with_dbs/test_utils_dask.py index 977828e4753..d02836de9e2 100644 --- a/services/director-v2/tests/unit/with_dbs/test_utils_dask.py +++ b/services/director-v2/tests/unit/with_dbs/test_utils_dask.py @@ -31,7 +31,6 @@ from fastapi import FastAPI from models_library.api_schemas_directorv2.services import NodeRequirements from models_library.api_schemas_storage import FileUploadLinks, FileUploadSchema -from models_library.clusters import ClusterID from models_library.docker import to_simcore_runtime_docker_label_key from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID, SimCoreFileLink, SimcoreS3FileID @@ -100,7 +99,9 @@ async def mocked_node_ports_filemanager_fcts( ], chunk_size=TypeAdapter(ByteSize).validate_python("5GiB"), links=FileUploadLinks( - abort_upload=TypeAdapter(AnyUrl).validate_python("https://www.fakeabort.com"), + abort_upload=TypeAdapter(AnyUrl).validate_python( + "https://www.fakeabort.com" + ), complete_upload=TypeAdapter(AnyUrl).validate_python( "https://www.fakecomplete.com" ), @@ -425,7 +426,7 @@ async def test_clean_task_output_and_log_files_if_invalid( published_project: PublishedProject, mocked_node_ports_filemanager_fcts: dict[str, mock.MagicMock], create_simcore_file_id: Callable[[ProjectID, NodeID, str], SimcoreS3FileID], - entry_exists_returns: bool, # noqa: FBT001 + entry_exists_returns: bool, fake_io_schema: dict[str, dict[str, str]], faker: Faker, ): @@ -527,11 +528,6 @@ def test__to_human_readable_resource_values( ) -@pytest.fixture -def cluster_id(faker: Faker) -> ClusterID: - return faker.pyint(min_value=0) - - @pytest.fixture def _app_config_with_dask_client( _app_config_with_db: None, @@ -549,7 +545,6 @@ async def test_check_if_cluster_is_able_to_run_pipeline( _app_config_with_dask_client: None, project_id: ProjectID, node_id: NodeID, - cluster_id: ClusterID, published_project: PublishedProject, initialized_app: FastAPI, ): @@ -563,7 +558,6 @@ async def test_check_if_cluster_is_able_to_run_pipeline( check_if_cluster_is_able_to_run_pipeline( project_id=project_id, node_id=node_id, - cluster_id=cluster_id, node_image=sleeper_task.image, scheduler_info=dask_client.backend.client.scheduler_info(), task_resources={}, diff --git a/services/docker-bake.hcl b/services/docker-bake.hcl index c11de1c6834..7cc0470f177 100644 --- a/services/docker-bake.hcl +++ b/services/docker-bake.hcl @@ -10,12 +10,3 @@ target "dask-sidecar" { tags = ["${DOCKER_REGISTRY}/dask-sidecar:latest","${DOCKER_REGISTRY}/dask-sidecar:${DASK_SIDECAR_VERSION}"] output = ["type=registry"] } - -variable "OSPARC_GATEWAY_SERVER_VERSION" { - default = "latest" -} - -target "osparc-gateway-server" { - tags = ["${DOCKER_REGISTRY}/osparc-gateway-server:latest","${DOCKER_REGISTRY}/osparc-gateway-server:${OSPARC_GATEWAY_SERVER_VERSION}"] - output = ["type=registry"] -} diff --git a/services/docker-compose-build.yml b/services/docker-compose-build.yml index df66ec7a41c..becf5ce2a25 100644 --- a/services/docker-compose-build.yml +++ b/services/docker-compose-build.yml @@ -169,22 +169,6 @@ services: org.opencontainers.image.source: "${VCS_URL}" org.opencontainers.image.revision: "${VCS_REF}" - osparc-gateway-server: - image: local/osparc-gateway-server:${BUILD_TARGET:?build_target_required} - build: - context: ../ - dockerfile: services/osparc-gateway-server/Dockerfile - cache_from: - - local/osparc-gateway-server:${BUILD_TARGET:?build_target_required} - - ${DOCKER_REGISTRY:-itisfoundation}/osparc-gateway-server:master-github-latest - - ${DOCKER_REGISTRY:-itisfoundation}/osparc-gateway-server:staging-github-latest - - ${DOCKER_REGISTRY:-itisfoundation}/osparc-gateway-server:release-github-latest - target: ${BUILD_TARGET:?build_target_required} - labels: - org.opencontainers.image.created: "${BUILD_DATE}" - org.opencontainers.image.source: "${VCS_URL}" - org.opencontainers.image.revision: "${VCS_REF}" - resource-usage-tracker: image: local/resource-usage-tracker:${BUILD_TARGET:?build_target_required} build: diff --git a/services/docker-compose-deploy.yml b/services/docker-compose-deploy.yml index fb7adc69a9e..1da5f7933de 100644 --- a/services/docker-compose-deploy.yml +++ b/services/docker-compose-deploy.yml @@ -25,8 +25,6 @@ services: image: ${DOCKER_REGISTRY:-itisfoundation}/invitations:${DOCKER_IMAGE_TAG:-latest} migration: image: ${DOCKER_REGISTRY:-itisfoundation}/migration:${DOCKER_IMAGE_TAG:-latest} - osparc-gateway-server: - image: ${DOCKER_REGISTRY:-itisfoundation}/osparc-gateway-server:${DOCKER_IMAGE_TAG:-latest} payments: image: ${DOCKER_REGISTRY:-itisfoundation}/payments:${DOCKER_IMAGE_TAG:-latest} dynamic-scheduler: diff --git a/services/docker-compose.yml b/services/docker-compose.yml index 691e544b0c0..5da1a28ba0d 100644 --- a/services/docker-compose.yml +++ b/services/docker-compose.yml @@ -566,6 +566,7 @@ services: DYNAMIC_SCHEDULER_STOP_SERVICE_TIMEOUT: ${DYNAMIC_SCHEDULER_STOP_SERVICE_TIMEOUT} DYNAMIC_SCHEDULER_PROFILING: ${DYNAMIC_SCHEDULER_PROFILING} DYNAMIC_SCHEDULER_TRACING: ${DYNAMIC_SCHEDULER_TRACING} + DYNAMIC_SCHEDULER_UI_STORAGE_SECRET: ${DYNAMIC_SCHEDULER_UI_STORAGE_SECRET} TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT} TRACING_OPENTELEMETRY_COLLECTOR_PORT: ${TRACING_OPENTELEMETRY_COLLECTOR_PORT} static-webserver: diff --git a/services/dynamic-scheduler/Dockerfile b/services/dynamic-scheduler/Dockerfile index b3e9119c898..bffb3808bdd 100644 --- a/services/dynamic-scheduler/Dockerfile +++ b/services/dynamic-scheduler/Dockerfile @@ -146,7 +146,7 @@ HEALTHCHECK --interval=30s \ --timeout=20s \ --start-period=30s \ --retries=3 \ - CMD ["python3", "services/dynamic-scheduler/docker/healthcheck.py", "http://localhost:8000/"] + CMD ["python3", "services/dynamic-scheduler/docker/healthcheck.py", "http://localhost:8000/health"] ENTRYPOINT [ "/bin/sh", "services/dynamic-scheduler/docker/entrypoint.sh" ] CMD ["/bin/sh", "services/dynamic-scheduler/docker/boot.sh"] diff --git a/services/dynamic-scheduler/requirements/_base.in b/services/dynamic-scheduler/requirements/_base.in index fa6e19b5a14..a5926615337 100644 --- a/services/dynamic-scheduler/requirements/_base.in +++ b/services/dynamic-scheduler/requirements/_base.in @@ -18,6 +18,7 @@ arrow fastapi httpx +nicegui packaging python-socketio typer[all] diff --git a/services/dynamic-scheduler/requirements/_base.txt b/services/dynamic-scheduler/requirements/_base.txt index 7fbf832f7df..6cf4dc07c90 100644 --- a/services/dynamic-scheduler/requirements/_base.txt +++ b/services/dynamic-scheduler/requirements/_base.txt @@ -7,7 +7,9 @@ aiodebug==2.3.0 aiodocker==0.24.0 # via -r requirements/../../../packages/service-library/requirements/_base.in aiofiles==24.1.0 - # via -r requirements/../../../packages/service-library/requirements/_base.in + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # nicegui aiohappyeyeballs==2.4.3 # via aiohttp aiohttp==3.11.7 @@ -27,6 +29,8 @@ aiohttp==3.11.7 # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../requirements/constraints.txt # aiodocker + # nicegui + # python-socketio aiormq==6.8.1 # via aio-pika aiosignal==1.3.1 @@ -77,6 +81,7 @@ certifi==2024.8.30 # -c requirements/../../../requirements/constraints.txt # httpcore # httpx + # nicegui # requests charset-normalizer==3.4.0 # via requests @@ -92,6 +97,8 @@ deprecated==1.2.15 # opentelemetry-semantic-conventions dnspython==2.7.0 # via email-validator +docutils==0.21.2 + # via nicegui email-validator==2.2.0 # via pydantic exceptiongroup==1.2.2 @@ -102,6 +109,7 @@ fastapi==0.115.5 # via # -r requirements/../../../packages/service-library/requirements/_fastapi.in # -r requirements/_base.in + # nicegui faststream==0.5.30 # via -r requirements/../../../packages/service-library/requirements/_base.in frozenlist==1.5.0 @@ -143,6 +151,7 @@ httpx==0.27.2 # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_fastapi.in # -r requirements/_base.in + # nicegui idna==3.10 # via # anyio @@ -150,8 +159,29 @@ idna==3.10 # httpx # requests # yarl +ifaddr==0.2.0 + # via nicegui importlib-metadata==8.5.0 # via opentelemetry-api +itsdangerous==2.2.0 + # via nicegui +jinja2==3.1.4 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # nicegui jsonschema==4.23.0 # via # -r requirements/../../../packages/models-library/requirements/_base.in @@ -177,14 +207,20 @@ mako==1.3.6 # alembic markdown-it-py==3.0.0 # via rich +markdown2==2.5.1 + # via nicegui markupsafe==3.0.2 - # via mako + # via + # jinja2 + # mako mdurl==0.1.2 # via markdown-it-py multidict==6.1.0 # via # aiohttp # yarl +nicegui==2.7.0 + # via -r requirements/_base.in opentelemetry-api==1.28.2 # via # -r requirements/../../../packages/service-library/requirements/_base.in @@ -280,6 +316,7 @@ orjson==3.10.12 # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # nicegui packaging==24.2 # via # -r requirements/_base.in @@ -300,6 +337,8 @@ protobuf==5.28.3 # via # googleapis-common-protos # opentelemetry-proto +pscript==0.7.7 + # via vbuild psutil==6.1.0 # via -r requirements/../../../packages/service-library/requirements/_base.in psycopg2-binary==2.9.10 @@ -357,7 +396,9 @@ pydantic-settings==2.6.1 # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in # -r requirements/../../../packages/settings-library/requirements/_base.in pygments==2.18.0 - # via rich + # via + # nicegui + # rich pyinstrument==5.0.0 # via -r requirements/../../../packages/service-library/requirements/_base.in python-dateutil==2.9.0.post0 @@ -368,8 +409,12 @@ python-dotenv==1.0.1 # uvicorn python-engineio==4.10.1 # via python-socketio +python-multipart==0.0.17 + # via nicegui python-socketio==5.11.4 - # via -r requirements/_base.in + # via + # -r requirements/_base.in + # nicegui pyyaml==6.0.2 # via # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt @@ -412,7 +457,9 @@ referencing==0.35.1 repro-zipfile==0.3.1 # via -r requirements/../../../packages/service-library/requirements/_base.in requests==2.32.3 - # via opentelemetry-exporter-otlp-proto-http + # via + # nicegui + # opentelemetry-exporter-otlp-proto-http rich==13.9.4 # via # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in @@ -487,6 +534,7 @@ typing-extensions==4.12.2 # alembic # fastapi # faststream + # nicegui # opentelemetry-sdk # pydantic # pydantic-core @@ -510,15 +558,21 @@ urllib3==2.2.3 # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../requirements/constraints.txt + # nicegui # requests uvicorn==0.32.1 # via # -r requirements/../../../packages/service-library/requirements/_fastapi.in # -r requirements/_base.in + # nicegui uvloop==0.21.0 # via uvicorn +vbuild==0.8.2 + # via nicegui watchfiles==1.0.0 - # via uvicorn + # via + # nicegui + # uvicorn websockets==14.1 # via uvicorn wrapt==1.17.0 diff --git a/services/dynamic-scheduler/requirements/_test.in b/services/dynamic-scheduler/requirements/_test.in index 455f92720bb..1bc0580e049 100644 --- a/services/dynamic-scheduler/requirements/_test.in +++ b/services/dynamic-scheduler/requirements/_test.in @@ -15,6 +15,8 @@ asgi_lifespan coverage docker faker +hypercorn +playwright pytest pytest-asyncio pytest-cov diff --git a/services/dynamic-scheduler/requirements/_test.txt b/services/dynamic-scheduler/requirements/_test.txt index 2aeab660bbb..d951c31a63c 100644 --- a/services/dynamic-scheduler/requirements/_test.txt +++ b/services/dynamic-scheduler/requirements/_test.txt @@ -23,10 +23,20 @@ docker==7.1.0 # via -r requirements/_test.in faker==33.0.0 # via -r requirements/_test.in +greenlet==3.1.1 + # via + # -c requirements/_base.txt + # playwright h11==0.14.0 # via # -c requirements/_base.txt # httpcore + # hypercorn + # wsproto +h2==4.1.0 + # via hypercorn +hpack==4.0.0 + # via h2 httpcore==1.0.7 # via # -c requirements/_base.txt @@ -36,6 +46,10 @@ httpx==0.27.2 # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # respx +hypercorn==0.17.3 + # via -r requirements/_test.in +hyperframe==6.0.1 + # via h2 icdiff==2.0.7 # via pytest-icdiff idna==3.10 @@ -51,10 +65,16 @@ packaging==24.2 # -c requirements/_base.txt # pytest # pytest-sugar +playwright==1.49.0 + # via -r requirements/_test.in pluggy==1.5.0 # via pytest pprintpp==0.4.0 # via pytest-icdiff +priority==2.0.0 + # via hypercorn +pyee==12.0.0 + # via playwright pytest==8.3.3 # via # -r requirements/_test.in @@ -107,9 +127,14 @@ typing-extensions==4.12.2 # via # -c requirements/_base.txt # faker + # pyee urllib3==2.2.3 # via # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # docker # requests +wsproto==1.2.0 + # via + # -c requirements/_base.txt + # hypercorn diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/__init__.py new file mode 100644 index 00000000000..7f991346a4b --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/__init__.py @@ -0,0 +1,3 @@ +from ._setup import setup_frontend + +__all__: tuple[str, ...] = ("setup_frontend",) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/_setup.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/_setup.py new file mode 100644 index 00000000000..9e689c86023 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/_setup.py @@ -0,0 +1,19 @@ +import nicegui +from fastapi import FastAPI + +from ...core.settings import ApplicationSettings +from ._utils import set_parent_app +from .routes import router + + +def setup_frontend(app: FastAPI) -> None: + settings: ApplicationSettings = app.state.settings + + nicegui.app.include_router(router) + + nicegui.ui.run_with( + app, + mount_path="/", + storage_secret=settings.DYNAMIC_SCHEDULER_UI_STORAGE_SECRET.get_secret_value(), + ) + set_parent_app(app) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/_utils.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/_utils.py new file mode 100644 index 00000000000..6e890b8b8fe --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/_utils.py @@ -0,0 +1,11 @@ +import nicegui +from fastapi import FastAPI + + +def set_parent_app(parent_app: FastAPI) -> None: + nicegui.app.state.parent_app = parent_app + + +def get_parent_app(app: FastAPI) -> FastAPI: + parent_app: FastAPI = app.state.parent_app + return parent_app diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/__init__.py new file mode 100644 index 00000000000..098f68217be --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/__init__.py @@ -0,0 +1,10 @@ +from nicegui import APIRouter + +from . import _index, _service + +router = APIRouter() + +router.include_router(_index.router) +router.include_router(_service.router) + +__all__: tuple[str, ...] = ("router",) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_index.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_index.py new file mode 100644 index 00000000000..5c864651427 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_index.py @@ -0,0 +1,169 @@ +import json + +import httpx +from fastapi import FastAPI +from models_library.projects_nodes_io import NodeID +from nicegui import APIRouter, app, ui +from nicegui.element import Element +from nicegui.elements.label import Label +from settings_library.utils_service import DEFAULT_FASTAPI_PORT + +from ....services.service_tracker import TrackedServiceModel, get_all_tracked_services +from ....services.service_tracker._models import SchedulerServiceState +from .._utils import get_parent_app +from ._render_utils import base_page, get_iso_formatted_date + +router = APIRouter() + + +def _render_service_details(node_id: NodeID, service: TrackedServiceModel) -> None: + dict_to_render: dict[str, tuple[str, str]] = { + "NodeID": ("copy", f"{node_id}"), + "Display State": ("label", service.current_state), + "Last State Change": ( + "label", + get_iso_formatted_date(service.last_state_change), + ), + "UserID": ("copy", f"{service.user_id}"), + "ProjectID": ("copy", f"{service.project_id}"), + "User Requested": ("label", service.requested_state), + } + + if service.dynamic_service_start: + dict_to_render["Service"] = ( + "label", + f"{service.dynamic_service_start.key}:{service.dynamic_service_start.version}", + ) + dict_to_render["Product"] = ( + "label", + service.dynamic_service_start.product_name, + ) + service_status = ( + json.loads(service.service_status) if service.service_status else {} + ) + dict_to_render["Service State"] = ( + "label", + service_status.get( + "state" if "boot_type" in service_status else "service_state", "N/A" + ), + ) + + with ui.column().classes("gap-0"): + for key, (widget, value) in dict_to_render.items(): + with ui.row(align_items="baseline"): + ui.label(key).classes("font-bold") + match widget: + case "copy": + ui.label(value).classes("border bg-slate-200 px-1") + case "label": + ui.label(value) + case _: + ui.label(value) + + +def _render_buttons(node_id: NodeID, service: TrackedServiceModel) -> None: + + with ui.dialog() as confirm_dialog, ui.card(): + ui.markdown(f"Stop service **{node_id}**?") + ui.label("The service will be stopped and its data will be saved.") + with ui.row(): + + async def _stop_service() -> None: + confirm_dialog.close() + await httpx.AsyncClient(timeout=10).get( + f"http://localhost:{DEFAULT_FASTAPI_PORT}/service/{node_id}:stop" + ) + + ui.notify( + f"Submitted stop request for {node_id}. Please give the service some time to stop!" + ) + + ui.button("Stop Now", color="red", on_click=_stop_service) + ui.button("Cancel", on_click=confirm_dialog.close) + + with ui.button_group(): + ui.button( + "Details", + icon="source", + on_click=lambda: ui.navigate.to(f"/service/{node_id}:details"), + ).tooltip("Display more information about what the scheduler is tracking") + + if service.current_state != SchedulerServiceState.RUNNING: + return + + ui.button( + "Stop Service", + icon="stop", + color="orange", + on_click=confirm_dialog.open, + ).tooltip("Stops the service and saves the data") + + +def _render_card( + card_container: Element, node_id: NodeID, service: TrackedServiceModel +) -> None: + with card_container: # noqa: SIM117 + with ui.column().classes("border p-1"): + _render_service_details(node_id, service) + _render_buttons(node_id, service) + + +def _get_clean_hashable(model: TrackedServiceModel) -> dict: + """removes items which trigger frequent updates and are not interesting to the user""" + data = model.model_dump(mode="json") + data.pop("check_status_after") + data.pop("last_status_notification") + data.pop("service_status_task_uid") + return data + + +def _get_hash(items: list[tuple[NodeID, TrackedServiceModel]]) -> int: + return hash( + json.dumps([(f"{key}", _get_clean_hashable(model)) for key, model in items]) + ) + + +class CardUpdater: + def __init__( + self, parent_app: FastAPI, container: Element, services_count_label: Label + ) -> None: + self.parent_app = parent_app + self.container = container + self.services_count_label = services_count_label + self.last_hash: int = _get_hash([]) + + async def update(self) -> None: + tracked_services = await get_all_tracked_services(self.parent_app) + tracked_items: list[tuple[NodeID, TrackedServiceModel]] = sorted( + tracked_services.items(), reverse=True + ) + + current_hash = _get_hash(tracked_items) + + if self.last_hash != current_hash: + self.services_count_label.set_text(f"{len(tracked_services)}") + # Clear the current cards + self.container.clear() + for node_id, service in tracked_items: + _render_card(self.container, node_id, service) + + self.last_hash = current_hash + + +@router.page("/") +async def index(): + with base_page(): + with ui.row().classes("gap-0"): + ui.label("Total tracked services:") + ui.label("").classes("w-1") + with ui.label("0") as services_count_label: + pass + + card_container: Element = ui.row() + + updater = CardUpdater(get_parent_app(app), card_container, services_count_label) + + # render cards when page is loaded + await updater.update() + # update card at a set interval + ui.timer(1, updater.update) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_render_utils.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_render_utils.py new file mode 100644 index 00000000000..c3a315be2d7 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_render_utils.py @@ -0,0 +1,23 @@ +from collections.abc import Iterator +from contextlib import contextmanager + +import arrow +from nicegui import ui + + +@contextmanager +def base_page(*, title: str | None = None) -> Iterator[None]: + display_title = ( + "Dynamic Scheduler" if title is None else f"Dynamic Scheduler - {title}" + ) + ui.page_title(display_title) + + with ui.header(elevated=True).classes("items-center"): + ui.button(icon="o_home", on_click=lambda: ui.navigate.to("/")) + ui.label(display_title) + + yield None + + +def get_iso_formatted_date(timestamp: float) -> str: + return arrow.get(timestamp).isoformat() diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_service.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_service.py new file mode 100644 index 00000000000..b4d9327df0f --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_service.py @@ -0,0 +1,146 @@ +import json + +import httpx +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStop, +) +from models_library.projects_nodes_io import NodeID +from nicegui import APIRouter, app, ui +from servicelib.rabbitmq.rpc_interfaces.dynamic_scheduler.services import ( + stop_dynamic_service, +) +from settings_library.utils_service import DEFAULT_FASTAPI_PORT +from simcore_service_dynamic_scheduler.services.rabbitmq import get_rabbitmq_rpc_client + +from ....core.settings import ApplicationSettings +from ....services.service_tracker import get_tracked_service, remove_tracked_service +from .._utils import get_parent_app +from ._render_utils import base_page + +router = APIRouter() + + +def _render_remove_from_tracking(node_id): + with ui.dialog() as confirm_dialog, ui.card(): + + async def remove_from_tracking(): + confirm_dialog.close() + await httpx.AsyncClient(timeout=10).get( + f"http://localhost:{DEFAULT_FASTAPI_PORT}/service/{node_id}/tracker:remove" + ) + + ui.notify(f"Service {node_id} removed from tracking") + ui.navigate.to("/") + + ui.markdown(f"Remove the service **{node_id}** form the tracker?") + ui.label( + "This action will result in the removal of the service form the internal tracker. " + "This action should be used whn you are facing issues and the service is not " + "automatically removed." + ) + ui.label( + "NOTE 1: the system normally cleans up services but it might take a few minutes. " + "Only use this option when you have observed enough time passing without any change." + ).classes("text-red-600") + ui.label( + "NOTE 2: This will break the fronted for the user! If the user has the service opened, " + "it will no longer receive an status updates." + ).classes("text-red-600") + + with ui.row(): + ui.button("Remove service", color="red", on_click=remove_from_tracking) + ui.button("Cancel", on_click=confirm_dialog.close) + + ui.button( + "Remove from tracking", + icon="remove_circle", + color="red", + on_click=confirm_dialog.open, + ).tooltip("Removes the service form the dynamic-scheduler's internal tracking") + + +def _render_danger_zone(node_id: NodeID) -> None: + ui.separator() + + ui.markdown("**Danger Zone, beware!**").classes("text-2xl text-red-700") + ui.label( + "Do not use these actions if you do not know what they are doing." + ).classes("text-red-700") + + ui.label( + "They are reserved as means of recovering the system form a failing state." + ).classes("text-red-700") + + _render_remove_from_tracking(node_id) + + +@router.page("/service/{node_id}:details") +async def service_details(node_id: NodeID): + with base_page(title=f"{node_id} details"): + service_model = await get_tracked_service(get_parent_app(app), node_id) + + if not service_model: + ui.markdown( + f"Sorry could not find any details for **node_id={node_id}**. " + "Please make sure the **node_id** is correct. " + "Also make sure you have not provided a **product_id**." + ) + return + + scheduler_internals = service_model.model_dump(mode="json") + service_status = scheduler_internals.pop("service_status", "{}") + service_status = json.loads("{}" if service_status == "" else service_status) + dynamic_service_start = scheduler_internals.pop("dynamic_service_start") + + ui.markdown("**Service Status**") + ui.code(json.dumps(service_status, indent=2), language="json") + + ui.markdown("**Scheduler Internals**") + ui.code(json.dumps(scheduler_internals, indent=2), language="json") + + ui.markdown("**Start Parameters**") + ui.code(json.dumps(dynamic_service_start, indent=2), language="json") + + ui.markdown("**Raw serialized data (the one used to render the above**") + ui.code(service_model.model_dump_json(indent=2), language="json") + + _render_danger_zone(node_id) + + +@router.page("/service/{node_id}:stop") +async def service_stop(node_id: NodeID): + parent_app = get_parent_app(app) + + service_model = await get_tracked_service(parent_app, node_id) + if not service_model: + ui.notify(f"Could not stop service {node_id}. Was not abel to find it") + return + + settings: ApplicationSettings = parent_app.state.settings + + assert service_model.user_id # nosec + assert service_model.project_id # nosec + + await stop_dynamic_service( + get_rabbitmq_rpc_client(get_parent_app(app)), + dynamic_service_stop=DynamicServiceStop( + user_id=service_model.user_id, + project_id=service_model.project_id, + node_id=node_id, + simcore_user_agent="", + save_state=True, + ), + timeout_s=int(settings.DYNAMIC_SCHEDULER_STOP_SERVICE_TIMEOUT.total_seconds()), + ) + + +@router.page("/service/{node_id}/tracker:remove") +async def remove_service_from_tracking(node_id: NodeID): + parent_app = get_parent_app(app) + + service_model = await get_tracked_service(parent_app, node_id) + if not service_model: + ui.notify(f"Could not remove service {node_id}. Was not abel to find it") + return + + await remove_tracked_service(parent_app, node_id) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/_health.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/_health.py index 7e87c57fd06..ff5fe204132 100644 --- a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/_health.py +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/_health.py @@ -24,7 +24,7 @@ class HealthCheckError(RuntimeError): """Failed a health check""" -@router.get("/", response_class=PlainTextResponse) +@router.get("/health", response_class=PlainTextResponse) async def healthcheck( rabbit_client: Annotated[RabbitMQClient, Depends(get_rabbitmq_client_from_request)], rabbit_rpc_server: Annotated[ diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/cli.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/cli.py index e06b8f25129..0b7d56fccda 100644 --- a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/cli.py +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/cli.py @@ -52,6 +52,10 @@ def echo_dotenv(ctx: typer.Context, *, minimal: bool = True): ), ), ), + DYNAMIC_SCHEDULER_UI_STORAGE_SECRET=os.environ.get( + "DYNAMIC_SCHEDULER_UI_STORAGE_SECRET", + "replace-with-ui-storage-secret", + ), ) print_as_envfile( diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/application.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/application.py index e6ba2bbb53f..971b82888be 100644 --- a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/application.py +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/application.py @@ -15,6 +15,7 @@ PROJECT_NAME, SUMMARY, ) +from ..api.frontend import setup_frontend from ..api.rest.routes import setup_rest_api from ..api.rpc.routes import setup_rpc_api_routes from ..services.deferred_manager import setup_deferred_manager @@ -74,6 +75,7 @@ def create_app(settings: ApplicationSettings | None = None) -> FastAPI: setup_status_monitor(app) setup_rest_api(app) + setup_frontend(app) # ERROR HANDLERS # ... add here ... diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/settings.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/settings.py index e577a806712..94acb6eaac4 100644 --- a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/settings.py +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/settings.py @@ -1,7 +1,7 @@ import datetime from typing import Annotated -from pydantic import AliasChoices, Field, TypeAdapter, field_validator +from pydantic import AliasChoices, Field, SecretStr, TypeAdapter, field_validator from servicelib.logging_utils_filtering import LoggerName, MessageSubstring from settings_library.application import BaseApplicationSettings from settings_library.basic_types import LogLevel, VersionTag @@ -80,6 +80,14 @@ class ApplicationSettings(_BaseApplicationSettings): These settings includes extra configuration for the http-API """ + DYNAMIC_SCHEDULER_UI_STORAGE_SECRET: SecretStr = Field( + ..., + description=( + "secret required to enabled browser-based storage for the UI. " + "Enables the full set of features to be used for NiceUI" + ), + ) + DYNAMIC_SCHEDULER_RABBITMQ: RabbitSettings = Field( json_schema_extra={"auto_default_from_env": True}, description="settings for service/rabbitmq", diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/rabbitmq.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/rabbitmq.py index ff3a6ef9b94..b7b3d30425c 100644 --- a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/rabbitmq.py +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/rabbitmq.py @@ -21,6 +21,9 @@ async def _on_startup() -> None: app.state.rabbitmq_client = RabbitMQClient( client_name="dynamic_scheduler", settings=settings ) + app.state.rabbitmq_rpc_client = await RabbitMQRPCClient.create( + client_name="dynamic_scheduler_rpc_client", settings=settings + ) app.state.rabbitmq_rpc_server = await RabbitMQRPCClient.create( client_name="dynamic_scheduler_rpc_server", settings=settings ) @@ -28,6 +31,8 @@ async def _on_startup() -> None: async def _on_shutdown() -> None: if app.state.rabbitmq_client: await app.state.rabbitmq_client.close() + if app.state.rabbitmq_rpc_client: + await app.state.rabbitmq_rpc_client.close() if app.state.rabbitmq_rpc_server: await app.state.rabbitmq_rpc_server.close() @@ -40,6 +45,11 @@ def get_rabbitmq_client(app: FastAPI) -> RabbitMQClient: return cast(RabbitMQClient, app.state.rabbitmq_client) +def get_rabbitmq_rpc_client(app: FastAPI) -> RabbitMQRPCClient: + assert app.state.rabbitmq_rpc_client + return cast(RabbitMQRPCClient, app.state.rabbitmq_rpc_client) + + def get_rabbitmq_rpc_server(app: FastAPI) -> RabbitMQRPCClient: assert app.state.rabbitmq_rpc_server # nosec return cast(RabbitMQRPCClient, app.state.rabbitmq_rpc_server) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/__init__.py index abf543d1bef..e4cf7e50705 100644 --- a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/__init__.py +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/__init__.py @@ -4,7 +4,7 @@ get_tracked_service, get_user_id_for_service, remove_tracked_service, - set_frontned_notified_for_service, + set_frontend_notified_for_service, set_if_status_changed_for_service, set_request_as_running, set_request_as_stopped, @@ -21,7 +21,7 @@ "get_user_id_for_service", "NORMAL_RATE_POLL_INTERVAL", "remove_tracked_service", - "set_frontned_notified_for_service", + "set_frontend_notified_for_service", "set_if_status_changed_for_service", "set_request_as_running", "set_request_as_stopped", diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/_api.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/_api.py index 99215c69123..09e4c3b965f 100644 --- a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/_api.py +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/_api.py @@ -184,7 +184,7 @@ async def should_notify_frontend_for_service( ) -async def set_frontned_notified_for_service(app: FastAPI, node_id: NodeID) -> None: +async def set_frontend_notified_for_service(app: FastAPI, node_id: NodeID) -> None: tracker = get_tracker(app) model: TrackedServiceModel | None = await tracker.load(node_id) if model is None: diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_deferred_get_status.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_deferred_get_status.py index f710204504c..4cd8209d1ae 100644 --- a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_deferred_get_status.py +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_deferred_get_status.py @@ -76,7 +76,7 @@ async def on_result( ) if user_id: await notify_service_status_change(app, user_id, result) - await service_tracker.set_frontned_notified_for_service(app, node_id) + await service_tracker.set_frontend_notified_for_service(app, node_id) else: _logger.info( "Did not find a user for '%s', skipping status delivery of: %s", diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_monitor.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_monitor.py index 8ba70997a93..432cf8896d8 100644 --- a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_monitor.py +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_monitor.py @@ -70,9 +70,9 @@ def __init__(self, app: FastAPI, status_worker_interval: timedelta) -> None: def status_worker_interval_seconds(self) -> NonNegativeFloat: return self.status_worker_interval.total_seconds() - async def _worker_start_get_status_requests(self) -> None: + async def _worker_check_services_require_status_update(self) -> None: """ - Check if a service requires it's status to be polled. + Check if any service requires it's status to be polled. Note that the interval at which the status is polled can vary. This is a relatively low resource check. """ @@ -136,7 +136,7 @@ async def _worker_start_get_status_requests(self) -> None: async def setup(self) -> None: self.app.state.status_monitor_background_task = start_exclusive_periodic_task( get_redis_client(self.app, RedisDatabase.LOCKS), - self._worker_start_get_status_requests, + self._worker_check_services_require_status_update, task_period=_INTERVAL_BETWEEN_CHECKS, retry_after=_INTERVAL_BETWEEN_CHECKS, task_name="periodic_service_status_update", diff --git a/services/dynamic-scheduler/tests/unit/api_frontend/conftest.py b/services/dynamic-scheduler/tests/unit/api_frontend/conftest.py new file mode 100644 index 00000000000..9d131549faf --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/api_frontend/conftest.py @@ -0,0 +1,122 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +import asyncio +import subprocess +from collections.abc import AsyncIterable +from contextlib import suppress +from typing import Final +from unittest.mock import AsyncMock + +import pytest +from fastapi import FastAPI, status +from httpx import AsyncClient +from hypercorn.asyncio import serve +from hypercorn.config import Config +from playwright.async_api import Page, async_playwright +from pytest_mock import MockerFixture +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.rabbit import RabbitSettings +from settings_library.redis import RedisSettings +from settings_library.utils_service import DEFAULT_FASTAPI_PORT +from simcore_service_dynamic_scheduler.core.application import create_app +from tenacity import AsyncRetrying, stop_after_delay, wait_fixed + +_MODULE: Final["str"] = "simcore_service_dynamic_scheduler" + + +@pytest.fixture +def disable_status_monitor_background_task(mocker: MockerFixture) -> None: + mocker.patch( + f"{_MODULE}.services.status_monitor._monitor.Monitor._worker_check_services_require_status_update" + ) + + +@pytest.fixture +def mock_stop_dynamic_service(mocker: MockerFixture) -> AsyncMock: + async_mock = AsyncMock() + mocker.patch( + f"{_MODULE}.api.frontend.routes._service.stop_dynamic_service", async_mock + ) + return async_mock + + +@pytest.fixture +def mock_remove_tracked_service(mocker: MockerFixture) -> AsyncMock: + async_mock = AsyncMock() + mocker.patch( + f"{_MODULE}.api.frontend.routes._service.remove_tracked_service", async_mock + ) + return async_mock + + +@pytest.fixture +def app_environment( + app_environment: EnvVarsDict, + disable_status_monitor_background_task: None, + rabbit_service: RabbitSettings, + redis_service: RedisSettings, + remove_redis_data: None, +) -> EnvVarsDict: + return app_environment + + +@pytest.fixture +def server_host_port() -> str: + return f"127.0.0.1:{DEFAULT_FASTAPI_PORT}" + + +@pytest.fixture +def not_initialized_app(app_environment: EnvVarsDict) -> FastAPI: + return create_app() + + +@pytest.fixture +async def app_runner( + not_initialized_app: FastAPI, server_host_port: str +) -> AsyncIterable[None]: + + shutdown_event = asyncio.Event() + + async def _wait_for_shutdown_event(): + await shutdown_event.wait() + + async def _run_server() -> None: + config = Config() + config.bind = [server_host_port] + + with suppress(asyncio.CancelledError): + await serve( + not_initialized_app, config, shutdown_trigger=_wait_for_shutdown_event + ) + + server_task = asyncio.create_task(_run_server()) + + async for attempt in AsyncRetrying( + reraise=True, wait=wait_fixed(0.1), stop=stop_after_delay(2) + ): + with attempt: + async with AsyncClient(timeout=1) as client: + result = await client.get(f"http://{server_host_port}") + assert result.status_code == status.HTTP_200_OK + + yield + + shutdown_event.set() + await server_task + + +@pytest.fixture +def download_playwright_browser() -> None: + subprocess.run( # noqa: S603 + ["playwright", "install", "chromium"], check=True # noqa: S607 + ) + + +@pytest.fixture +async def async_page(download_playwright_browser: None) -> AsyncIterable[Page]: + async with async_playwright() as p: + browser = await p.chromium.launch() + page = await browser.new_page() + yield page + await browser.close() diff --git a/services/dynamic-scheduler/tests/unit/api_frontend/helpers.py b/services/dynamic-scheduler/tests/unit/api_frontend/helpers.py new file mode 100644 index 00000000000..91c2058c869 --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/api_frontend/helpers.py @@ -0,0 +1,104 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +import sys +from collections.abc import AsyncIterator +from contextlib import asynccontextmanager +from pathlib import Path +from typing import Final +from uuid import uuid4 + +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceGet +from models_library.api_schemas_webserver.projects_nodes import NodeGet +from playwright.async_api import Locator, Page +from pydantic import NonNegativeFloat, NonNegativeInt, TypeAdapter +from tenacity import AsyncRetrying, stop_after_delay, wait_fixed + +_HERE: Final[Path] = ( + Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent +) +_DEFAULT_TIMEOUT: Final[NonNegativeFloat] = 10 + + +@asynccontextmanager +async def take_screenshot_on_error( + async_page: Page, +) -> AsyncIterator[None]: + try: + yield + # allows to also capture exceptions form `with pytest.raise(...)`` + except BaseException: + path = _HERE / f"{uuid4()}.ignore.png" + await async_page.screenshot(path=path) + print(f"Please check :{path}") + + raise + + +async def _get_locator( + async_page: Page, + text: str, + instances: NonNegativeInt | None, + timeout: float, # noqa: ASYNC109 +) -> Locator: + async with take_screenshot_on_error(async_page): + async for attempt in AsyncRetrying( + reraise=True, wait=wait_fixed(0.1), stop=stop_after_delay(timeout) + ): + with attempt: + locator = async_page.get_by_text(text) + count = await locator.count() + if instances is None: + assert count > 0, f"cold not find text='{text}'" + else: + assert ( + count == instances + ), f"found {count} instances of text='{text}'. Expected {instances}" + return locator + + +async def assert_contains_text( + async_page: Page, + text: str, + instances: NonNegativeInt | None = None, + timeout: float = _DEFAULT_TIMEOUT, # noqa: ASYNC109 +) -> None: + await _get_locator(async_page, text, instances=instances, timeout=timeout) + + +async def click_on_text( + async_page: Page, + text: str, + instances: NonNegativeInt | None = None, + timeout: float = _DEFAULT_TIMEOUT, # noqa: ASYNC109 +) -> None: + locator = await _get_locator(async_page, text, instances=instances, timeout=timeout) + await locator.click() + + +async def assert_not_contains_text( + async_page: Page, + text: str, + timeout: float = _DEFAULT_TIMEOUT, # noqa: ASYNC109 +) -> None: + async with take_screenshot_on_error(async_page): + async for attempt in AsyncRetrying( + reraise=True, wait=wait_fixed(0.1), stop=stop_after_delay(timeout) + ): + with attempt: + locator = async_page.get_by_text(text) + assert await locator.count() < 1, f"found text='{text}' in body" + + +def get_new_style_service_status(state: str) -> DynamicServiceGet: + return TypeAdapter(DynamicServiceGet).validate_python( + DynamicServiceGet.model_config["json_schema_extra"]["examples"][0] + | {"state": state} + ) + + +def get_legacy_service_status(state: str) -> NodeGet: + return TypeAdapter(NodeGet).validate_python( + NodeGet.model_config["json_schema_extra"]["examples"][0] + | {"service_state": state} + ) diff --git a/services/dynamic-scheduler/tests/unit/api_frontend/test_api_frontend_routes_index.py b/services/dynamic-scheduler/tests/unit/api_frontend/test_api_frontend_routes_index.py new file mode 100644 index 00000000000..1cdb66ba587 --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/api_frontend/test_api_frontend_routes_index.py @@ -0,0 +1,125 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +from collections.abc import Callable +from unittest.mock import AsyncMock +from uuid import uuid4 + +import pytest +from fastapi import FastAPI +from helpers import ( + assert_contains_text, + assert_not_contains_text, + click_on_text, + get_legacy_service_status, + get_new_style_service_status, +) +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceGet +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStart, + DynamicServiceStop, +) +from models_library.api_schemas_webserver.projects_nodes import NodeGet +from models_library.projects_nodes_io import NodeID +from playwright.async_api import Page +from simcore_service_dynamic_scheduler.services.service_tracker import ( + set_if_status_changed_for_service, + set_request_as_running, + set_request_as_stopped, +) +from tenacity import AsyncRetrying, stop_after_delay, wait_fixed + +pytest_simcore_core_services_selection = [ + "rabbit", + "redis", +] + +pytest_simcore_ops_services_selection = [ + # "redis-commander", +] + + +async def test_index_with_elements( + app_runner: None, + async_page: Page, + server_host_port: str, + not_initialized_app: FastAPI, + get_dynamic_service_start: Callable[[NodeID], DynamicServiceStart], + get_dynamic_service_stop: Callable[[NodeID], DynamicServiceStop], +): + await async_page.goto(server_host_port) + + # 1. no content + await assert_contains_text(async_page, "Total tracked services:") + await assert_contains_text(async_page, "0") + await assert_not_contains_text(async_page, "Details") + + # 2. add elements and check + await set_request_as_running( + not_initialized_app, get_dynamic_service_start(uuid4()) + ) + await set_request_as_stopped(not_initialized_app, get_dynamic_service_stop(uuid4())) + + await assert_contains_text(async_page, "2") + await assert_contains_text(async_page, "Details", instances=2) + + +@pytest.mark.parametrize( + "service_status", + [ + get_new_style_service_status("running"), + get_legacy_service_status("running"), + ], +) +async def test_main_page( + app_runner: None, + async_page: Page, + server_host_port: str, + node_id: NodeID, + service_status: NodeGet | DynamicServiceGet, + not_initialized_app: FastAPI, + get_dynamic_service_start: Callable[[NodeID], DynamicServiceStart], + mock_stop_dynamic_service: AsyncMock, +): + await async_page.goto(server_host_port) + + # 1. no content + await assert_contains_text(async_page, "Total tracked services:") + await assert_contains_text(async_page, "0") + await assert_not_contains_text(async_page, "Details") + + # 2. start a service shows content + await set_request_as_running( + not_initialized_app, get_dynamic_service_start(node_id) + ) + await set_if_status_changed_for_service( + not_initialized_app, node_id, service_status + ) + + await assert_contains_text(async_page, "1") + await assert_contains_text(async_page, "Details") + + # 3. click on stop and then cancel + await click_on_text(async_page, "Stop Service") + await assert_contains_text( + async_page, "The service will be stopped and its data will be saved" + ) + await click_on_text(async_page, "Cancel") + + # 4. click on stop then confirm + + await assert_not_contains_text( + async_page, "The service will be stopped and its data will be saved" + ) + await click_on_text(async_page, "Stop Service") + await assert_contains_text( + async_page, "The service will be stopped and its data will be saved" + ) + + mock_stop_dynamic_service.assert_not_awaited() + await click_on_text(async_page, "Stop Now") + async for attempt in AsyncRetrying( + reraise=True, wait=wait_fixed(0.1), stop=stop_after_delay(3) + ): + with attempt: + mock_stop_dynamic_service.assert_awaited_once() diff --git a/services/dynamic-scheduler/tests/unit/api_frontend/test_api_frontend_routes_service.py b/services/dynamic-scheduler/tests/unit/api_frontend/test_api_frontend_routes_service.py new file mode 100644 index 00000000000..c37b7b0a4f1 --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/api_frontend/test_api_frontend_routes_service.py @@ -0,0 +1,121 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +from collections.abc import Callable +from unittest.mock import AsyncMock + +import pytest +from fastapi import FastAPI +from helpers import ( + assert_contains_text, + click_on_text, + get_legacy_service_status, + get_new_style_service_status, +) +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceGet +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStart, +) +from models_library.api_schemas_webserver.projects_nodes import NodeGet +from models_library.projects_nodes_io import NodeID +from playwright.async_api import Page +from simcore_service_dynamic_scheduler.services.service_tracker import ( + set_if_status_changed_for_service, + set_request_as_running, +) +from tenacity import AsyncRetrying, stop_after_delay, wait_fixed + +pytest_simcore_core_services_selection = [ + "rabbit", + "redis", +] + +pytest_simcore_ops_services_selection = [ + # "redis-commander", +] + + +async def test_service_details_no_status_present( + app_runner: None, + async_page: Page, + server_host_port: str, + node_id: NodeID, + not_initialized_app: FastAPI, + get_dynamic_service_start: Callable[[NodeID], DynamicServiceStart], +): + await set_request_as_running( + not_initialized_app, get_dynamic_service_start(node_id) + ) + + await async_page.goto(server_host_port) + + # 1. one service is tracked + await assert_contains_text(async_page, "Total tracked services:") + await assert_contains_text(async_page, "1") + await assert_contains_text(async_page, "Details", instances=1) + + # 2. open details page + await click_on_text(async_page, "Details") + # NOTE: if something is wrong with the page the bottom to remove from tracking + # will not be present + await assert_contains_text(async_page, "Remove from tracking", instances=1) + + +async def test_service_details_renders_friendly_404( + app_runner: None, async_page: Page, server_host_port: str, node_id: NodeID +): + # node was not started + await async_page.goto(f"{server_host_port}/service/{node_id}:details") + await assert_contains_text(async_page, "Sorry could not find any details for") + + +@pytest.mark.parametrize( + "service_status", + [ + get_new_style_service_status("running"), + get_legacy_service_status("running"), + ], +) +async def test_service_details( + app_runner: None, + async_page: Page, + server_host_port: str, + node_id: NodeID, + not_initialized_app: FastAPI, + get_dynamic_service_start: Callable[[NodeID], DynamicServiceStart], + mock_remove_tracked_service: AsyncMock, + service_status: NodeGet | DynamicServiceGet, +): + await set_request_as_running( + not_initialized_app, get_dynamic_service_start(node_id) + ) + await set_request_as_running( + not_initialized_app, get_dynamic_service_start(node_id) + ) + await set_if_status_changed_for_service( + not_initialized_app, node_id, service_status + ) + + await async_page.goto(server_host_port) + + # 1. one service is tracked + await assert_contains_text(async_page, "Total tracked services:") + await assert_contains_text(async_page, "1") + await assert_contains_text(async_page, "Details", instances=1) + + # 2. open details page + await click_on_text(async_page, "Details") + + # 3. click "Remove from tracking" -> cancel + await click_on_text(async_page, "Remove from tracking") + await click_on_text(async_page, "Cancel") + mock_remove_tracked_service.assert_not_awaited() + + # 4. click "Remove from tracking" -> confirm + await click_on_text(async_page, "Remove from tracking") + await click_on_text(async_page, "Remove service") + async for attempt in AsyncRetrying( + reraise=True, wait=wait_fixed(0.1), stop=stop_after_delay(3) + ): + with attempt: + mock_remove_tracked_service.assert_awaited_once() diff --git a/services/dynamic-scheduler/tests/unit/api_rest/test_api_rest__health.py b/services/dynamic-scheduler/tests/unit/api_rest/test_api_rest__health.py index 9b5648e12b4..cb7939c5824 100644 --- a/services/dynamic-scheduler/tests/unit/api_rest/test_api_rest__health.py +++ b/services/dynamic-scheduler/tests/unit/api_rest/test_api_rest__health.py @@ -68,9 +68,9 @@ def app_environment( ) async def test_health(client: AsyncClient, is_ok: bool): if is_ok: - response = await client.get("/") + response = await client.get("/health") assert response.status_code == status.HTTP_200_OK assert datetime.fromisoformat(response.text.split("@")[1]) else: with pytest.raises(HealthCheckError): - await client.get("/") + await client.get("/health") diff --git a/services/dynamic-scheduler/tests/unit/service_tracker/test__tracker.py b/services/dynamic-scheduler/tests/unit/service_tracker/test__tracker.py index 20293f343b5..f1c29a3d3f7 100644 --- a/services/dynamic-scheduler/tests/unit/service_tracker/test__tracker.py +++ b/services/dynamic-scheduler/tests/unit/service_tracker/test__tracker.py @@ -28,7 +28,7 @@ @pytest.fixture def disable_monitor_task(mocker: MockerFixture) -> None: mocker.patch( - "simcore_service_dynamic_scheduler.services.status_monitor._monitor.Monitor._worker_start_get_status_requests", + "simcore_service_dynamic_scheduler.services.status_monitor._monitor.Monitor._worker_check_services_require_status_update", autospec=True, ) diff --git a/services/dynamic-scheduler/tests/unit/status_monitor/test_services_status_monitor__monitor.py b/services/dynamic-scheduler/tests/unit/status_monitor/test_services_status_monitor__monitor.py index 2578114e541..5924f9dec84 100644 --- a/services/dynamic-scheduler/tests/unit/status_monitor/test_services_status_monitor__monitor.py +++ b/services/dynamic-scheduler/tests/unit/status_monitor/test_services_status_monitor__monitor.py @@ -404,7 +404,7 @@ async def test_expected_calls_to_notify_frontend( # pylint:disable=too-many-arg ): with attempt: # pylint:disable=protected-access - await monitor._worker_start_get_status_requests() # noqa: SLF001 + await monitor._worker_check_services_require_status_update() # noqa: SLF001 for method in ("start", "on_created", "on_result"): await _assert_call_to( deferred_status_spies, method=method, count=i + 1 @@ -428,7 +428,7 @@ async def test_expected_calls_to_notify_frontend( # pylint:disable=too-many-arg ): with attempt: # pylint:disable=protected-access - await monitor._worker_start_get_status_requests() # noqa: SLF001 + await monitor._worker_check_services_require_status_update() # noqa: SLF001 assert remove_tracked_spy.call_count == remove_tracked_count diff --git a/services/osparc-gateway-server/.env-devel b/services/osparc-gateway-server/.env-devel deleted file mode 100644 index 944c6914d43..00000000000 --- a/services/osparc-gateway-server/.env-devel +++ /dev/null @@ -1,2 +0,0 @@ -COMPUTATIONAL_SIDECAR_IMAGE=local/dask-sidecar:production -COMPUTATIONAL_SIDECAR_LOG_LEVEL=INFO diff --git a/services/osparc-gateway-server/.gitignore b/services/osparc-gateway-server/.gitignore deleted file mode 100644 index 4d7a877c063..00000000000 --- a/services/osparc-gateway-server/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.osparc-dask* diff --git a/services/osparc-gateway-server/Dockerfile b/services/osparc-gateway-server/Dockerfile deleted file mode 100644 index 899ef0fb434..00000000000 --- a/services/osparc-gateway-server/Dockerfile +++ /dev/null @@ -1,177 +0,0 @@ -# syntax=docker/dockerfile:1 - -# Define arguments in the global scope -ARG PYTHON_VERSION="3.11.9" -ARG UV_VERSION="0.4" -FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build -# we docker image is built based on debian -FROM python:${PYTHON_VERSION}-slim-bullseye AS base -ARG TARGETPLATFORM -ARG BUILDPLATFORM -RUN echo "I am running on $BUILDPLATFORM, building for $TARGETPLATFORM" - -LABEL maintainer=mguidon,sanderegg - -# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] -RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache -# libffi-dev is needed for ARM architectures -RUN --mount=type=cache,target=/var/cache/apt,mode=0755,sharing=private \ - --mount=type=cache,target=/var/lib/apt,mode=0755,sharing=private \ - set -eux \ - && apt-get update \ - && apt-get install -y --no-install-recommends \ - gosu \ - libffi-dev \ - libffi7 \ - && apt-get clean -y \ - # verify that the binary works - && gosu nobody true - - -# simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) -ENV SC_USER_ID=8004 \ - SC_USER_NAME=scu \ - SC_BUILD_TARGET=base \ - SC_BOOT_MODE=default - -RUN adduser \ - --uid ${SC_USER_ID} \ - --disabled-password \ - --gecos "" \ - --shell /bin/sh \ - --home /home/${SC_USER_NAME} \ - ${SC_USER_NAME} - - -ENV LANG=C.UTF-8 \ - PYTHONDONTWRITEBYTECODE=1 \ - VIRTUAL_ENV=/home/scu/.venv - -ENV PATH="${VIRTUAL_ENV}/bin:$PATH" - -# for ARM architecture this helps a lot VS building packages -# NOTE: remove as this might create bad caching behaviour -# ENV PIP_EXTRA_INDEX_URL=https://www.piwheels.org/simple - - -EXPOSE 8000 - - -# -------------------------- Build stage ------------------- -# Installs build/package management tools and third party dependencies -# -# + /build WORKDIR -# -FROM base AS build - -ENV SC_BUILD_TARGET=build - -RUN rm -f /etc/apt/apt.conf.d/docker-clean -RUN --mount=type=cache,target=/var/cache/apt,mode=0755,sharing=private \ - --mount=type=cache,target=/var/lib/apt,mode=0755,sharing=private \ - set -eux \ - && apt-get update \ - && apt-get install -y --no-install-recommends \ - build-essential \ - git \ - golang-go - -# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv -COPY --from=uv_build /uv /uvx /bin/ - -# NOTE: python virtualenv is used here such that installed packages may be moved to production image easily by copying the venv -RUN uv venv "${VIRTUAL_ENV}" -RUN --mount=type=cache,target=/root/.cache/uv \ - uv pip install --upgrade \ - wheel \ - setuptools - -WORKDIR /build - - - -# in ARM64 mode there is a catch: the piwheels package does not contain the dask-gateway-proxy executable in 64-bit -RUN dpkgArch="$(dpkg --print-architecture)";\ - case "$dpkgArch" in \ - arm64) git clone --depth 1 --branch 0.9.0 https://github.com/dask/dask-gateway.git \ - && cd dask-gateway/osparc-gateway-server \ - && uv pip install .\ - ;; \ - esac; - -# --------------------------Prod-depends-only stage ------------------- -# This stage is for production only dependencies that get partially wiped out afterwards (final docker image concerns) -# -# + /build -# + services/osparc-gateway-server [scu:scu] WORKDIR -# -FROM build AS prod-only-deps - -ENV SC_BUILD_TARGET=prod-only-deps - -WORKDIR /build/services/osparc-gateway-server - -RUN \ - --mount=type=bind,source=packages,target=/build/packages,rw \ - --mount=type=bind,source=services/osparc-gateway-server,target=/build/services/osparc-gateway-server,rw \ - --mount=type=cache,target=/root/.cache/uv \ - uv pip install \ - --requirement requirements/prod.txt - -# --------------------------Production stage ------------------- -# Final cleanup up to reduce image size and startup setup -# Runs as scu (non-root user) -# -# + /home/scu $HOME = WORKDIR -# + services/osparc-gateway-server [scu:scu] -# -FROM base AS production - -ENV SC_BUILD_TARGET=production \ - SC_BOOT_MODE=production - -ENV PYTHONOPTIMIZE=TRUE -# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode -ENV UV_COMPILE_BYTECODE=1 - -WORKDIR /home/scu -# ensure home folder is read/writable for user scu -RUN chown -R scu /home/scu -# bring installed package without build tools -COPY --from=prod-only-deps --chown=scu:scu ${VIRTUAL_ENV} ${VIRTUAL_ENV} -# copy docker entrypoint and boot scripts -COPY --chown=scu:scu services/osparc-gateway-server/docker services/osparc-gateway-server/docker - - -# TODO: Create healthcheck -# HEALTHCHECK \ -# --interval=60s \ -# --timeout=60s \ -# --start-period=10s \ -# --retries=3 \ -# CMD ["curl", "-Lf", "http://127.0.0.1:8787/health"] - -ENTRYPOINT [ "/bin/sh", "services/osparc-gateway-server/docker/entrypoint.sh" ] -CMD ["/bin/sh", "services/osparc-gateway-server/docker/boot.sh"] - - -# --------------------------Development stage ------------------- -# Source code accessible in host but runs in container -# Runs as scu with same gid/uid as host -# Placed at the end to speed-up the build if images targeting production -# -# + /devel WORKDIR -# + services (mounted volume) -# -FROM build AS development - -ENV SC_BUILD_TARGET=development - -WORKDIR /devel -RUN chown -R scu:scu "${VIRTUAL_ENV}" - -# NOTE: devel mode does NOT have HEALTHCHECK - -ENTRYPOINT [ "/bin/sh", "services/osparc-gateway-server/docker/entrypoint.sh" ] -CMD ["/bin/sh", "services/osparc-gateway-server/docker/boot.sh"] diff --git a/services/osparc-gateway-server/Makefile b/services/osparc-gateway-server/Makefile deleted file mode 100644 index 73a8327b8c8..00000000000 --- a/services/osparc-gateway-server/Makefile +++ /dev/null @@ -1,155 +0,0 @@ -# -# Targets for DEVELOPMENT for osparc gateway service service -# -include ../../scripts/common.Makefile -include ../../scripts/common-service.Makefile - - - -APP_PACKAGE_NAME=osparc_gateway_server -SERVICE_NAME=osparc-gateway-server -DASK_SIDECAR_NAME=dask-sidecar - -.env: .env-devel ## creates .env file from defaults in .env-devel - $(if $(wildcard $@), \ - @echo "WARNING ##### $< is newer than $@ ####"; diff -uN $@ $<; false;,\ - @echo "WARNING ##### $@ does not exist, cloning $< as $@ ############"; cp $< $@) - - - -## -## INFOS -## -.PHONY: info-images info-swarm -define show-meta - $(foreach iid,$(shell docker images "*/$(1):*" --quiet | sort | uniq),\ - docker image inspect $(iid) | jq '.[0] | {tags:.RepoTags, labels:.Config.Labels, arch:.Architecture}';) -endef - -info-images: ## lists tags and labels of built images. To display one: 'make target=webserver info-images' - @echo "## $(SERVICE_NAME) images:";\ - docker images */$(SERVICE_NAME):*;\ - $(call show-meta,$(SERVICE_NAME)) - @echo "## $(DASK_SIDECAR_NAME) images:";\ - docker images */$(DASK_SIDECAR_NAME):*;\ - $(call show-meta,$(DASK_SIDECAR_NAME)) - - -info-swarm: ## displays info about stacks and networks -ifneq ($(SWARM_HOSTS), ) - # Stacks in swarm - @docker stack ls - # Containers (tasks) running in '$(SWARM_STACK_NAME)' stack - -@docker stack ps $(SWARM_STACK_NAME) - # Services in '$(SWARM_STACK_NAME)' stack - -@docker stack services $(SWARM_STACK_NAME) - # Networks - @docker network ls -endif - -## -## Running Osparc Dask Gateway -## -SWARM_HOSTS = $(shell docker node ls --format="{{.Hostname}}" 2>$(if $(IS_WIN),NUL,/dev/null)) - -PHONY: .init-swarm -.init-swarm: - # Ensures swarm is initialized - $(if $(SWARM_HOSTS),,docker swarm init --advertise-addr=$(get_my_ip) --default-addr-pool 172.20.0.0/14) - -.PHONY: config -export OSPARC_GATEWAY_CONFIG_FILE_HOST = .osparc-dask-gateway-config.py -export SWARM_STACK_NAME ?= dask-gateway -docker-compose-config-cmd=../../scripts/docker/docker-stack-config.bash -docker-compose-configs = $(wildcard services/docker-compose*.yml) - -$(OSPARC_GATEWAY_CONFIG_FILE_HOST): $(CURDIR)/config/default_config.py ## creates config file from defaults in /config/default_config.py - $(if $(wildcard $@), \ - @echo "WARNING ##### $< is newer than $@ ####"; diff -uN $@ $<; false;,\ - @echo "WARNING ##### $@ does not exist, cloning $< as $@ ############"; cp $< $@) -config: $(OSPARC_GATEWAY_CONFIG_FILE_HOST) ## create default configuration file - -.stack-$(SWARM_STACK_NAME)-development.yml: .env $(docker-compose-configs) - # Creating config for stack with 'local/{service}:development' to $@ - @export DOCKER_REGISTRY=local && \ - export DOCKER_IMAGE_TAG=development && \ - $(docker-compose-config-cmd) \ - docker-compose.yml \ - docker-compose.local.yml \ - docker-compose.devel.yml > $@ - - -.stack-$(SWARM_STACK_NAME)-production.yml: .env $(docker-compose-configs) - # Creating config for stack with 'local/{service}:production' to $@ - @export DOCKER_REGISTRY=local && \ - export DOCKER_IMAGE_TAG=production && \ - $(docker-compose-config-cmd) \ - docker-compose.yml \ - docker-compose.local.yml > $@ - -.stack-$(SWARM_STACK_NAME)-version.yml: .env $(docker-compose-configs) - # Creating config for stack with '$(DOCKER_REGISTRY)/{service}:${DOCKER_IMAGE_TAG}' to $@ - $(docker-compose-config-cmd) \ - docker-compose.yml \ - docker-compose.local.yml > $@ - - -.PHONY: up-devel up-prod up-version up-latest - -define _show_endpoints -# The following endpoints are available -set -o allexport; \ -source $(CURDIR)/.env; \ -set +o allexport; \ -separator=------------------------------------------------------------------------------------;\ -separator=$${separator}$${separator}$${separator};\ -rows="%-22s | %40s | %12s | %12s\n";\ -TableWidth=100;\ -printf "%22s | %40s | %12s | %12s\n" Name Endpoint User Password;\ -printf "%.$${TableWidth}s\n" "$$separator";\ -printf "$$rows" Dask-Gateway 'http://$(get_my_ip):8000' whatever $(filter-out %.password =,$(shell cat $(OSPARC_GATEWAY_CONFIG_FILE_HOST) | grep c.Authenticator.password)); -endef - -show-endpoints: - @$(_show_endpoints) - - -up-devel: .stack-$(SWARM_STACK_NAME)-development.yml .init-swarm config ## Deploys local development stack and ops stack (pass 'make ops_disabled=1 up-...' to disable) - # Deploy stack $(SWARM_STACK_NAME) [back-end] - @docker stack deploy --with-registry-auth -c $< $(SWARM_STACK_NAME) - @$(_show_endpoints) - -up-prod: .stack-$(SWARM_STACK_NAME)-production.yml .init-swarm config ## Deploys local production stack and ops stack (pass 'make ops_disabled=1 up-...' to disable) -ifeq ($(target),) - # Deploy stack $(SWARM_STACK_NAME) - @docker stack deploy --with-registry-auth -c $< $(SWARM_STACK_NAME) -else - # deploys ONLY $(target) service - @docker compose --file $< up --detach $(target) -endif - @$(_show_endpoints) - -up up-version: .stack-$(SWARM_STACK_NAME)-version.yml .init-swarm config ## Deploys versioned stack '$(DOCKER_REGISTRY)/{service}:$(DOCKER_IMAGE_TAG)' and ops stack (pass 'make ops_disabled=1 up-...' to disable) - # Deploy stack $(SWARM_STACK_NAME) - @docker stack deploy --with-registry-auth -c $< $(SWARM_STACK_NAME) - @$(_show_endpoints) - -up-latest: - @export DOCKER_IMAGE_TAG=release-github-latest && \ - $(MAKE) up-version - -.PHONY: down -down: ## Stops and removes stack - # Removing stacks in reverse order to creation - -@docker stack rm $(SWARM_STACK_NAME) - -@docker stack rm $(SWARM_STACK_NAME)-ops - # Removing generated docker compose configurations, i.e. .stack-* - -@rm $(wildcard .stack-*) - -@rm $(wildcard $(OSPARC_GATEWAY_CONFIG_FILE_HOST)) - -## -## system tests -## -test-system: ## Runs system tests (needs local docker images of osparc-gateway-server and dask-sidecar) - $(MAKE_C) tests/system install-ci - $(MAKE_C) tests/system tests diff --git a/services/osparc-gateway-server/README.md b/services/osparc-gateway-server/README.md deleted file mode 100644 index 1f536df68e4..00000000000 --- a/services/osparc-gateway-server/README.md +++ /dev/null @@ -1 +0,0 @@ -# osparc backend for dask gateway server diff --git a/services/osparc-gateway-server/VERSION b/services/osparc-gateway-server/VERSION deleted file mode 100644 index 8acdd82b765..00000000000 --- a/services/osparc-gateway-server/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.0.1 diff --git a/services/osparc-gateway-server/config/default_config.py b/services/osparc-gateway-server/config/default_config.py deleted file mode 100644 index 4cd75de4d73..00000000000 --- a/services/osparc-gateway-server/config/default_config.py +++ /dev/null @@ -1,12 +0,0 @@ -# pylint: disable=undefined-variable - -# NOTE: this configuration is used by the dask-gateway-server -# it follows [traitlets](https://traitlets.readthedocs.io/en/stable/config.html) configuration files - -# defines the backend to use with the gateway -c.DaskGateway.backend_class = "osparc_gateway_server.backend.osparc.OsparcBackend" # type: ignore -# defines the password for 'simple' authentication -c.Authenticator.password = "asdf" # type: ignore -# defines log levels -c.DaskGateway.log_level = "WARN" # type: ignore -c.Proxy.log_level = "WARN" # type: ignore diff --git a/services/osparc-gateway-server/docker-compose.devel.yml b/services/osparc-gateway-server/docker-compose.devel.yml deleted file mode 100644 index 32514289e1a..00000000000 --- a/services/osparc-gateway-server/docker-compose.devel.yml +++ /dev/null @@ -1,10 +0,0 @@ -version: '3.9' -services: - osparc-gateway-server: - environment: - - SC_BOOT_MODE=debug - - LOG_LEVEL=debug - - DEBUG=true - volumes: - - ./:/devel/services/osparc-gateway-server - - ../../packages:/devel/packages diff --git a/services/osparc-gateway-server/docker-compose.local.yml b/services/osparc-gateway-server/docker-compose.local.yml deleted file mode 100644 index ff73e36a256..00000000000 --- a/services/osparc-gateway-server/docker-compose.local.yml +++ /dev/null @@ -1,8 +0,0 @@ -version: '3.9' -services: - osparc-gateway-server: - environment: - - SC_BOOT_MODE=${SC_BOOT_MODE:-default} - - OSPARC_GATEWAY_SERVER_DEBUGGING_PORT=3000 - ports: - - "3100:3000" # debug port diff --git a/services/osparc-gateway-server/docker-compose.yml b/services/osparc-gateway-server/docker-compose.yml deleted file mode 100644 index acdfe4179db..00000000000 --- a/services/osparc-gateway-server/docker-compose.yml +++ /dev/null @@ -1,37 +0,0 @@ -version: '3.9' -services: - osparc-gateway-server: - image: ${DOCKER_REGISTRY:-itisfoundation}/osparc-gateway-server:${DOCKER_IMAGE_TAG:-latest} - ports: - - "8000:8000" - volumes: - - /var/run/docker.sock:/var/run/docker.sock - networks: - - dask_net - configs: - - source: gateway_config - target: ${GATEWAY_SERVER_CONFIG_FILE_CONTAINER:-/etc/dask/dask_config.py} - environment: - - GATEWAY_WORKERS_NETWORK=${SWARM_STACK_NAME:?swarm_stack_name_required}_dask_net - - GATEWAY_SERVER_NAME=${SWARM_STACK_NAME:?swarm_stack_name_required}_osparc-gateway-server - - COMPUTATIONAL_SIDECAR_VOLUME_NAME=${SWARM_STACK_NAME:?swarm_stack_name_required}_sidecar_data - - COMPUTATIONAL_SIDECAR_IMAGE=${COMPUTATIONAL_SIDECAR_IMAGE:-local/dask-sidecar:production} - - COMPUTATIONAL_SIDECAR_LOG_LEVEL=${COMPUTATIONAL_SIDECAR_LOG_LEVEL:-WARNING} - - COMPUTATION_SIDECAR_NUM_NON_USABLE_CPUS=${COMPUTATION_SIDECAR_NUM_NON_USABLE_CPUS:-2} - - COMPUTATION_SIDECAR_NON_USABLE_RAM=${COMPUTATION_SIDECAR_NON_USABLE_RAM:-0} - - GATEWAY_SERVER_ONE_WORKER_PER_NODE=${GATEWAY_SERVER_ONE_WORKER_PER_NODE-True} - - GATEWAY_SERVER_CONFIG_FILE_CONTAINER=${GATEWAY_SERVER_CONFIG_FILE_CONTAINER:-/etc/dask/dask_config.py} - deploy: - placement: - constraints: - - node.role == manager -networks: - dask_net: - name: ${SWARM_STACK_NAME:?swarm_stack_name_required}_dask_net - -volumes: - sidecar_data: null - -configs: - gateway_config: - file: ./${OSPARC_GATEWAY_CONFIG_FILE_HOST:?gateway_config_required} diff --git a/services/osparc-gateway-server/docker/boot.sh b/services/osparc-gateway-server/docker/boot.sh deleted file mode 100755 index d2b912eb3ba..00000000000 --- a/services/osparc-gateway-server/docker/boot.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/sh -set -o errexit -set -o nounset - -IFS=$(printf '\n\t') - -INFO="INFO: [$(basename "$0")] " - -# BOOTING application --------------------------------------------- -echo "$INFO" "Booting in ${SC_BOOT_MODE} mode ..." -echo " User :$(id "$(whoami)")" -echo " Workdir :$(pwd)" -echo " env :$(env)" - -if [ "${SC_BUILD_TARGET}" = "development" ]; then - echo "$INFO" "Environment :" - printenv | sed 's/=/: /' | sed 's/^/ /' | sort - echo "$INFO" "Python :" - python --version | sed 's/^/ /' - command -v python | sed 's/^/ /' - cd services/osparc-gateway-server - uv pip install --no-cache-dir -r requirements/dev.txt - cd - - echo "$INFO" "PIP :" - pip list | sed 's/^/ /' -fi - -if [ "${SC_BOOT_MODE}" = "debug" ]; then - # NOTE: production does NOT pre-installs debugpy - uv pip install --no-cache-dir debugpy -fi - -if [ "${SC_BOOT_MODE}" = "debug" ]; then - exec python -m debugpy --listen 0.0.0.0:"${OSPARC_GATEWAY_SERVER_DEBUGGING_PORT}" -m watchmedo auto-restart \ - --recursive \ - --pattern="*.py;*/src/*" \ - --ignore-patterns="*test*;pytest_simcore/*;setup.py;*ignore*" \ - --ignore-directories -- \ - osparc-gateway-server \ - --config "${GATEWAY_SERVER_CONFIG_FILE_CONTAINER}" \ - --debug -else - exec osparc-gateway-server \ - --config "${GATEWAY_SERVER_CONFIG_FILE_CONTAINER}" -fi diff --git a/services/osparc-gateway-server/docker/entrypoint.sh b/services/osparc-gateway-server/docker/entrypoint.sh deleted file mode 100755 index cd8eb9a01ef..00000000000 --- a/services/osparc-gateway-server/docker/entrypoint.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/sh -set -o errexit -set -o nounset - -IFS=$(printf '\n\t') - -INFO="INFO: [$(basename "$0")] " -WARNING="WARNING: [$(basename "$0")] " -ERROR="ERROR: [$(basename "$0")] " - -# This entrypoint script: -# -# - Executes *inside* of the container upon start as --user [default root] -# - Notice that the container *starts* as --user [default root] but -# *runs* as non-root user [scu] -# -echo "$INFO" "Entrypoint for stage ${SC_BUILD_TARGET} ..." -echo User :"$(id "$(whoami)")" -echo Workdir :"$(pwd)" -echo scuUser :"$(id scu)" - -if [ "${SC_BUILD_TARGET}" = "development" ]; then - echo "$INFO" "development mode detected..." - # NOTE: expects docker run ... -v $(pwd):/devel/services/osparc-gateway-server - DEVEL_MOUNT=${DEVEL_MOUNT:="/devel/services/osparc-gateway-server"} - - stat $DEVEL_MOUNT >/dev/null 2>&1 || - (echo "$ERROR" "You must mount '$DEVEL_MOUNT' to deduce user and group ids" && exit 1) - - echo "setting correct user id/group id..." - HOST_USERID=$(stat --format=%u "${DEVEL_MOUNT}") - HOST_GROUPID=$(stat --format=%g "${DEVEL_MOUNT}") - CONT_GROUPNAME=$(getent group "${HOST_GROUPID}" | cut --delimiter=: --fields=1) - if [ "$HOST_USERID" -eq 0 ]; then - echo "Warning: Folder mounted owned by root user... adding $SC_USER_NAME to root..." - adduser "$SC_USER_NAME" root - else - echo "Folder mounted owned by user $HOST_USERID:$HOST_GROUPID-'$CONT_GROUPNAME'..." - # take host's credentials in $SC_USER_NAME - if [ -z "$CONT_GROUPNAME" ]; then - echo "Creating new group my$SC_USER_NAME" - CONT_GROUPNAME=my$SC_USER_NAME - addgroup --gid "$HOST_GROUPID" "$CONT_GROUPNAME" - else - echo "group already exists" - fi - echo "adding $SC_USER_NAME to group $CONT_GROUPNAME..." - adduser "$SC_USER_NAME" "$CONT_GROUPNAME" - - echo "changing $SC_USER_NAME:$SC_USER_NAME ($SC_USER_ID:$SC_USER_ID) to $SC_USER_NAME:$CONT_GROUPNAME ($HOST_USERID:$HOST_GROUPID)" - usermod --uid "$HOST_USERID" --gid "$HOST_GROUPID" "$SC_USER_NAME" - - echo "Changing group properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" - find / -path /proc -prune -o -group "$SC_USER_ID" -exec chgrp --no-dereference "$CONT_GROUPNAME" {} \; - # change user property of files already around - echo "Changing ownership properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" - find / -path /proc -prune -o -user "$SC_USER_ID" -exec chown --no-dereference "$SC_USER_NAME" {} \; - fi -fi - -DOCKER_MOUNT=/var/run/docker.sock -if stat $DOCKER_MOUNT >/dev/null 2>&1; then - echo "$INFO detected docker socket is mounted, adding user to group..." - GROUPID=$(stat --format=%g $DOCKER_MOUNT) - GROUPNAME=scdocker - - if ! addgroup --gid "$GROUPID" $GROUPNAME >/dev/null 2>&1; then - echo "$WARNING docker group with $GROUPID already exists, getting group name..." - # if group already exists in container, then reuse name - GROUPNAME=$(getent group "${GROUPID}" | cut --delimiter=: --fields=1) - echo "$WARNING docker group with $GROUPID has name $GROUPNAME" - fi - adduser "$SC_USER_NAME" "$GROUPNAME" -fi - -echo "$INFO Starting osparc-gateway-server ..." -echo " $SC_USER_NAME rights : $(id "$SC_USER_NAME")" -echo " local dir : $(ls -al)" - -exec gosu "$SC_USER_NAME" "$@" diff --git a/services/osparc-gateway-server/requirements/Makefile b/services/osparc-gateway-server/requirements/Makefile deleted file mode 100644 index 1118bbf105e..00000000000 --- a/services/osparc-gateway-server/requirements/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -# -# Targets to pip-compile requirements -# -include ../../../requirements/base.Makefile - -# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt - -_test.txt: _base.txt diff --git a/services/osparc-gateway-server/requirements/_base.in b/services/osparc-gateway-server/requirements/_base.in deleted file mode 100644 index 605373b2ef8..00000000000 --- a/services/osparc-gateway-server/requirements/_base.in +++ /dev/null @@ -1,11 +0,0 @@ -# Specifies third-party dependencies for the 'osparc-gateway-server' -# -# ---constraint ../../../requirements/constraints.txt ---constraint constraints.txt - -aiodocker -async-timeout -dask-gateway-server[local] -pydantic-settings -pydantic[email,dotenv] diff --git a/services/osparc-gateway-server/requirements/_base.txt b/services/osparc-gateway-server/requirements/_base.txt deleted file mode 100644 index c6689413bb4..00000000000 --- a/services/osparc-gateway-server/requirements/_base.txt +++ /dev/null @@ -1,68 +0,0 @@ -aiodocker==0.22.1 - # via -r requirements/_base.in -aiohttp==3.9.5 - # via - # -c requirements/../../../requirements/constraints.txt - # aiodocker - # dask-gateway-server -aiosignal==1.3.1 - # via aiohttp -annotated-types==0.7.0 - # via pydantic -async-timeout==4.0.3 - # via -r requirements/_base.in -attrs==23.2.0 - # via aiohttp -cffi==1.16.0 - # via cryptography -colorlog==6.8.2 - # via dask-gateway-server -cryptography==42.0.7 - # via - # -c requirements/../../../requirements/constraints.txt - # dask-gateway-server -dask-gateway-server==2023.1.1 - # via -r requirements/_base.in -dnspython==2.6.1 - # via email-validator -email-validator==2.1.1 - # via pydantic -frozenlist==1.4.1 - # via - # aiohttp - # aiosignal -greenlet==3.0.3 - # via sqlalchemy -idna==3.7 - # via - # email-validator - # yarl -multidict==6.0.5 - # via - # aiohttp - # yarl -pycparser==2.22 - # via cffi -pydantic==2.9.2 - # via - # -c requirements/../../../requirements/constraints.txt - # -r requirements/_base.in - # pydantic-settings -pydantic-core==2.23.4 - # via pydantic -pydantic-settings==2.6.1 - # via -r requirements/_base.in -python-dotenv==1.0.1 - # via pydantic-settings -sqlalchemy==1.4.52 - # via - # -c requirements/../../../requirements/constraints.txt - # dask-gateway-server -traitlets==5.14.3 - # via dask-gateway-server -typing-extensions==4.12.2 - # via - # pydantic - # pydantic-core -yarl==1.9.4 - # via aiohttp diff --git a/services/osparc-gateway-server/requirements/_test.in b/services/osparc-gateway-server/requirements/_test.in deleted file mode 100644 index 61f8faa298f..00000000000 --- a/services/osparc-gateway-server/requirements/_test.in +++ /dev/null @@ -1,25 +0,0 @@ -# -# Specifies dependencies required to run 'osparc-gateway-server' -# ---constraint ../../../requirements/constraints.txt - -# Adds base AS CONSTRAINT specs, not requirement. -# - Resulting _text.txt is a frozen list of EXTRA packages for testing, besides _base.txt -# ---constraint _base.txt ---constraint ../../dask-sidecar/requirements/_dask-distributed.txt - -coverage -dask-gateway -debugpy -docker -faker -pytest -pytest-asyncio -pytest-cov -pytest-icdiff -pytest-instafail -pytest-mock -pytest-sugar -tenacity -sqlalchemy[mypy] # adds Mypy / Pep-484 Support for ORM Mappings SEE https://docs.sqlalchemy.org/en/20/orm/extensions/mypy.html diff --git a/services/osparc-gateway-server/requirements/_test.txt b/services/osparc-gateway-server/requirements/_test.txt deleted file mode 100644 index 1fc9e930b69..00000000000 --- a/services/osparc-gateway-server/requirements/_test.txt +++ /dev/null @@ -1,213 +0,0 @@ -aiohttp==3.9.5 - # via - # -c requirements/../../../requirements/constraints.txt - # -c requirements/_base.txt - # dask-gateway -aiosignal==1.3.1 - # via - # -c requirements/_base.txt - # aiohttp -attrs==23.2.0 - # via - # -c requirements/_base.txt - # aiohttp -certifi==2024.8.30 - # via - # -c requirements/../../../requirements/constraints.txt - # requests -charset-normalizer==3.3.2 - # via requests -click==8.1.7 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # dask-gateway - # distributed -cloudpickle==3.0.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # distributed -coverage==7.6.1 - # via - # -r requirements/_test.in - # pytest-cov -dask==2024.5.1 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask-gateway - # distributed -dask-gateway==2024.1.0 - # via -r requirements/_test.in -debugpy==1.8.5 - # via -r requirements/_test.in -distributed==2024.5.1 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask-gateway -docker==7.1.0 - # via -r requirements/_test.in -faker==29.0.0 - # via -r requirements/_test.in -frozenlist==1.4.1 - # via - # -c requirements/_base.txt - # aiohttp - # aiosignal -fsspec==2024.5.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask -greenlet==3.0.3 - # via - # -c requirements/_base.txt - # sqlalchemy -icdiff==2.0.7 - # via pytest-icdiff -idna==3.7 - # via - # -c requirements/_base.txt - # requests - # yarl -importlib-metadata==7.1.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask -iniconfig==2.0.0 - # via pytest -jinja2==3.1.4 - # via - # -c requirements/../../../requirements/constraints.txt - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -locket==1.0.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # distributed - # partd -markupsafe==2.1.5 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # jinja2 -msgpack==1.1.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -multidict==6.0.5 - # via - # -c requirements/_base.txt - # aiohttp - # yarl -mypy==1.12.0 - # via sqlalchemy -mypy-extensions==1.0.0 - # via mypy -packaging==24.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # distributed - # pytest - # pytest-sugar -partd==1.4.2 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask -pluggy==1.5.0 - # via pytest -pprintpp==0.4.0 - # via pytest-icdiff -psutil==6.0.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -pytest==8.3.3 - # via - # -r requirements/_test.in - # pytest-asyncio - # pytest-cov - # pytest-icdiff - # pytest-instafail - # pytest-mock - # pytest-sugar -pytest-asyncio==0.23.8 - # via - # -c requirements/../../../requirements/constraints.txt - # -r requirements/_test.in -pytest-cov==5.0.0 - # via -r requirements/_test.in -pytest-icdiff==0.9 - # via -r requirements/_test.in -pytest-instafail==0.5.0 - # via -r requirements/_test.in -pytest-mock==3.14.0 - # via -r requirements/_test.in -pytest-sugar==1.0.0 - # via -r requirements/_test.in -python-dateutil==2.9.0.post0 - # via faker -pyyaml==6.0.1 - # via - # -c requirements/../../../requirements/constraints.txt - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # dask-gateway - # distributed -requests==2.32.3 - # via docker -six==1.16.0 - # via python-dateutil -sortedcontainers==2.4.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -sqlalchemy==1.4.52 - # via - # -c requirements/../../../requirements/constraints.txt - # -c requirements/_base.txt - # -r requirements/_test.in -sqlalchemy2-stubs==0.0.2a38 - # via sqlalchemy -tblib==3.0.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -tenacity==9.0.0 - # via -r requirements/_test.in -termcolor==2.4.0 - # via pytest-sugar -toolz==0.12.1 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # distributed - # partd -tornado==6.4 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask-gateway - # distributed -typing-extensions==4.12.2 - # via - # -c requirements/_base.txt - # mypy - # sqlalchemy2-stubs -urllib3==2.2.3 - # via - # -c requirements/../../../requirements/constraints.txt - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # distributed - # docker - # requests -yarl==1.9.4 - # via - # -c requirements/_base.txt - # aiohttp -zict==3.0.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -zipp==3.18.2 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # importlib-metadata diff --git a/services/osparc-gateway-server/requirements/_tools.in b/services/osparc-gateway-server/requirements/_tools.in deleted file mode 100644 index f91a82de59b..00000000000 --- a/services/osparc-gateway-server/requirements/_tools.in +++ /dev/null @@ -1,8 +0,0 @@ ---constraint ../../../requirements/constraints.txt ---constraint _base.txt ---constraint _test.txt - ---requirement ../../../requirements/devenv.txt - -# basic dev tools -watchdog[watchmedo] diff --git a/services/osparc-gateway-server/requirements/_tools.txt b/services/osparc-gateway-server/requirements/_tools.txt deleted file mode 100644 index 4366080afe1..00000000000 --- a/services/osparc-gateway-server/requirements/_tools.txt +++ /dev/null @@ -1,87 +0,0 @@ -astroid==3.3.4 - # via pylint -black==24.8.0 - # via -r requirements/../../../requirements/devenv.txt -build==1.2.2 - # via pip-tools -bump2version==1.0.1 - # via -r requirements/../../../requirements/devenv.txt -cfgv==3.4.0 - # via pre-commit -click==8.1.7 - # via - # -c requirements/_test.txt - # black - # pip-tools -dill==0.3.8 - # via pylint -distlib==0.3.8 - # via virtualenv -filelock==3.16.1 - # via virtualenv -identify==2.6.1 - # via pre-commit -isort==5.13.2 - # via - # -r requirements/../../../requirements/devenv.txt - # pylint -mccabe==0.7.0 - # via pylint -mypy==1.12.0 - # via - # -c requirements/_test.txt - # -r requirements/../../../requirements/devenv.txt -mypy-extensions==1.0.0 - # via - # -c requirements/_test.txt - # black - # mypy -nodeenv==1.9.1 - # via pre-commit -packaging==24.0 - # via - # -c requirements/_test.txt - # black - # build -pathspec==0.12.1 - # via black -pip==24.2 - # via pip-tools -pip-tools==7.4.1 - # via -r requirements/../../../requirements/devenv.txt -platformdirs==4.3.6 - # via - # black - # pylint - # virtualenv -pre-commit==3.8.0 - # via -r requirements/../../../requirements/devenv.txt -pylint==3.3.0 - # via -r requirements/../../../requirements/devenv.txt -pyproject-hooks==1.1.0 - # via - # build - # pip-tools -pyyaml==6.0.1 - # via - # -c requirements/../../../requirements/constraints.txt - # -c requirements/_test.txt - # pre-commit - # watchdog -ruff==0.6.7 - # via -r requirements/../../../requirements/devenv.txt -setuptools==75.1.0 - # via pip-tools -tomlkit==0.13.2 - # via pylint -typing-extensions==4.12.2 - # via - # -c requirements/_base.txt - # -c requirements/_test.txt - # mypy -virtualenv==20.26.5 - # via pre-commit -watchdog==5.0.2 - # via -r requirements/_tools.in -wheel==0.44.0 - # via pip-tools diff --git a/services/osparc-gateway-server/requirements/ci.txt b/services/osparc-gateway-server/requirements/ci.txt deleted file mode 100644 index e30762175d1..00000000000 --- a/services/osparc-gateway-server/requirements/ci.txt +++ /dev/null @@ -1,19 +0,0 @@ -# Shortcut to install all packages for the contigous integration (CI) of 'services/director-v2' -# -# - As ci.txt but w/ tests -# -# Usage: -# pip install -r requirements/ci.txt -# - - -# installs base + tests requirements ---requirement _base.txt ---requirement _test.txt ---requirement _tools.txt - -# installs this repo's packages -pytest-simcore @ ../../packages/pytest-simcore/ - -# installs current package -osparc-gateway-server @ . diff --git a/services/osparc-gateway-server/requirements/constraints.txt b/services/osparc-gateway-server/requirements/constraints.txt deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/services/osparc-gateway-server/requirements/dev.txt b/services/osparc-gateway-server/requirements/dev.txt deleted file mode 100644 index f2182d2b170..00000000000 --- a/services/osparc-gateway-server/requirements/dev.txt +++ /dev/null @@ -1,18 +0,0 @@ -# Shortcut to install all packages needed to develop 'services/director-v2' -# -# - As ci.txt but with current and repo packages in develop (edit) mode -# -# Usage: -# pip install -r requirements/dev.txt -# - -# installs base + tests + tools requirements ---requirement _base.txt ---requirement _test.txt ---requirement _tools.txt - -# installs this repo's packages ---editable ../../packages/pytest-simcore/ - -# installs current package ---editable . diff --git a/services/osparc-gateway-server/requirements/prod.txt b/services/osparc-gateway-server/requirements/prod.txt deleted file mode 100644 index 45b869b18cf..00000000000 --- a/services/osparc-gateway-server/requirements/prod.txt +++ /dev/null @@ -1,15 +0,0 @@ -# Shortcut to install 'services/director-v2' for production -# -# - As ci.txt but w/o tests -# -# Usage: -# pip install -r requirements/prod.txt -# - -# installs base requirements ---requirement _base.txt - -# installs this repo's packages - -# installs current package -osparc-gateway-server @ . diff --git a/services/osparc-gateway-server/setup.cfg b/services/osparc-gateway-server/setup.cfg deleted file mode 100644 index 421c932766f..00000000000 --- a/services/osparc-gateway-server/setup.cfg +++ /dev/null @@ -1,15 +0,0 @@ -[bumpversion] -current_version = 0.0.1 -commit = True -message = services/osparc-gateway-server version: {current_version} → {new_version} -tag = False -commit_args = --no-verify - -[bumpversion:file:VERSION] - -[tool:pytest] -asyncio_mode = auto - -[mypy] -plugins = - pydantic.mypy diff --git a/services/osparc-gateway-server/setup.py b/services/osparc-gateway-server/setup.py deleted file mode 100755 index c3a7becc072..00000000000 --- a/services/osparc-gateway-server/setup.py +++ /dev/null @@ -1,58 +0,0 @@ -#! /bin/python -import re -import sys -from pathlib import Path - -from setuptools import find_packages, setup - - -def read_reqs(reqs_path: Path) -> set[str]: - return { - r - for r in re.findall( - r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)", - reqs_path.read_text(), - re.MULTILINE, - ) - if isinstance(r, str) - } - - -CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent - -INSTALL_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_base.txt")) -TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) - - -SETUP = { - "name": "osparc-gateway-server", - "version": (CURRENT_DIR / "VERSION").read_text().strip(), - "author": "Manuel Guidon (mguidon), Sylvain Anderegg (sanderegg)", - "description": "Osparc backend for dask-gateway-server", - "classifiers": [ - "Development Status :: 1 - Planning", - "License :: OSI Approved :: MIT License", - "Natural Language :: English", - "Programming Language :: Python :: 3.10", - ], - "long_description": (CURRENT_DIR / "README.md").read_text(), - "license": "MIT license", - "python_requires": "~=3.11", - "packages": find_packages(where="src"), - "package_dir": { - "": "src", - }, - "install_requires": INSTALL_REQUIREMENTS, - "test_suite": "tests", - "tests_require": TEST_REQUIREMENTS, - "extras_require": {"test": TEST_REQUIREMENTS}, - "entry_points": { - "console_scripts": [ - "osparc-gateway-server=osparc_gateway_server.app:start", - ] - }, -} - - -if __name__ == "__main__": - setup(**SETUP) diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/__init__.py b/services/osparc-gateway-server/src/osparc_gateway_server/__init__.py deleted file mode 100644 index 0d83e8059db..00000000000 --- a/services/osparc-gateway-server/src/osparc_gateway_server/__init__.py +++ /dev/null @@ -1 +0,0 @@ -package_name = __name__ diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/app.py b/services/osparc-gateway-server/src/osparc_gateway_server/app.py deleted file mode 100644 index eddceee8d22..00000000000 --- a/services/osparc-gateway-server/src/osparc_gateway_server/app.py +++ /dev/null @@ -1,7 +0,0 @@ -import sys - -from dask_gateway_server.app import main # type: ignore[import-untyped] - - -def start() -> None: - sys.exit(main()) diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/__init__.py b/services/osparc-gateway-server/src/osparc_gateway_server/backend/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/errors.py b/services/osparc-gateway-server/src/osparc_gateway_server/backend/errors.py deleted file mode 100644 index 6a42e519f6e..00000000000 --- a/services/osparc-gateway-server/src/osparc_gateway_server/backend/errors.py +++ /dev/null @@ -1,14 +0,0 @@ -class OSparcGatewayServerException(Exception): - """Exception raised when there is an exception in oSparc gateway server""" - - -class NoServiceTasksError(OSparcGatewayServerException): - """Exception raised when there is no tasks attached to service""" - - -class TaskNotAssignedError(OSparcGatewayServerException): - """Exception raised when a task is not assigned to a host""" - - -class NoHostFoundError(OSparcGatewayServerException): - """Exception raised when there is no host found""" diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/models.py b/services/osparc-gateway-server/src/osparc_gateway_server/backend/models.py deleted file mode 100644 index 9cdd2fc9edb..00000000000 --- a/services/osparc-gateway-server/src/osparc_gateway_server/backend/models.py +++ /dev/null @@ -1,42 +0,0 @@ -from collections.abc import Mapping -from ipaddress import IPv4Address -from typing import Any, Union - -from pydantic import BaseModel, ByteSize, Field, PositiveFloat, TypeAdapter - -Hostname = str -ResourceName = str -ResourceType = Union[int, float] - - -class NodeResources(BaseModel): - memory: ByteSize - cpus: PositiveFloat - others: dict[ResourceName, ResourceType] = Field(default_factory=dict) - - -class NodeInformation(BaseModel): - docker_node_id: str - ip: IPv4Address - resources: NodeResources - - -ClusterInformation = dict[Hostname, NodeInformation] - - -def cluster_information_from_docker_nodes( - nodes_list: list[Mapping[str, Any]] -) -> ClusterInformation: - return TypeAdapter(ClusterInformation).validate_python( - { - node["Description"]["Hostname"]: { - "docker_node_id": node["ID"], - "ip": node["Status"]["Addr"], - "resources": { - "memory": node["Description"]["Resources"]["MemoryBytes"], - "cpus": node["Description"]["Resources"]["NanoCPUs"] / 1e9, - }, - } - for node in nodes_list - } - ) diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/osparc.py b/services/osparc-gateway-server/src/osparc_gateway_server/backend/osparc.py deleted file mode 100644 index f905dfc83a4..00000000000 --- a/services/osparc-gateway-server/src/osparc_gateway_server/backend/osparc.py +++ /dev/null @@ -1,350 +0,0 @@ -import asyncio -import logging -from collections.abc import AsyncGenerator -from importlib.metadata import version -from typing import Any - -import osparc_gateway_server -from aiodocker import Docker -from aiodocker.exceptions import DockerContainerError -from dask_gateway_server.backends.base import ( # type: ignore[import-untyped] - PublicException, -) -from dask_gateway_server.backends.db_base import ( # type: ignore[import-untyped] - Cluster, - DBBackendBase, - JobStatus, - Worker, - chain, - islice, - timestamp, -) - -from ..remote_debug import setup_remote_debugging -from .errors import NoHostFoundError, NoServiceTasksError, TaskNotAssignedError -from .settings import AppSettings, BootModeEnum -from .utils import ( - OSPARC_SCHEDULER_API_PORT, - DockerSecret, - create_docker_secrets_from_tls_certs_for_cluster, - delete_secrets, - get_cluster_information, - get_next_empty_node_hostname, - get_osparc_scheduler_cmd_modifications, - is_service_task_running, - modify_cmd_argument, - start_service, - stop_service, -) - -# -# https://patorjk.com/software/taag/#p=display&v=0&f=Avatar&t=osparc-gateway-server -# -WELCOME_MSG = rf""" - ____ ____ ____ ____ ____ ____ ____ ____ ____ _ __ _____ ____ _____ _____ _ ____ ___ _ ____ _____ ____ _ _____ ____ -/ _ \/ ___\/ __\/ _ \/ __\/ _\ / _ \/ _ \/ ___\/ |/ / / __// _ \/__ __\/ __// \ /|/ _ \\ \// / ___\/ __// __\/ \ |\/ __// __\ -| / \|| \| \/|| / \|| \/|| / _____ | | \|| / \|| \| /_____ | | _| / \| / \ | \ | | ||| / \| \ /_____ | \| \ | \/|| | //| \ | \/| -| \_/|\___ || __/| |-||| /| \_\____\| |_/|| |-||\___ || \\____\| |_//| |-|| | | | /_ | |/\||| |-|| / / \____\\___ || /_ | /| \// | /_ | / -\____/\____/\_/ \_/ \|\_/\_\\____/ \____/\_/ \|\____/\_|\_\ \____\\_/ \| \_/ \____\\_/ \|\_/ \|/_/ \____/\____\\_/\_\\__/ \____\\_/\_\ {version(osparc_gateway_server.package_name)} - - -""" - - -class OsparcBackend(DBBackendBase): - """A cluster backend that launches osparc workers. - - Scheduler are spawned as services in a docker swarm - Workers are spawned as services in a docker swarm - """ - - settings: AppSettings - docker_client: Docker - cluster_secrets: list[DockerSecret] = [] - - async def do_setup(self) -> None: - self.settings = AppSettings() # type: ignore[call-arg] - assert isinstance(self.log, logging.Logger) # nosec - self.log.info( - "osparc-gateway-server application settings:\n%s", - self.settings.model_dump_json(indent=2), - ) - - if self.settings.SC_BOOT_MODE in [BootModeEnum.DEBUG]: - setup_remote_debugging(logger=self.log) - - # pylint: disable=attribute-defined-outside-init - self.cluster_start_timeout = self.settings.GATEWAY_CLUSTER_START_TIMEOUT - self.worker_start_timeout = self.settings.GATEWAY_WORKER_START_TIMEOUT - self.docker_client = Docker() - - print(WELCOME_MSG, flush=True) # noqa: T201 - - async def do_cleanup(self) -> None: - assert isinstance(self.log, logging.Logger) # nosec - await self.docker_client.close() - self.log.info("osparc-gateway-server closed.") - - async def do_start_cluster( - self, cluster: Cluster - ) -> AsyncGenerator[dict[str, Any], None]: - assert isinstance(self.log, logging.Logger) # nosec - assert isinstance(self.api_url, str) # nosec - self.log.debug(f"starting {cluster=}") - self.cluster_secrets.extend( - await create_docker_secrets_from_tls_certs_for_cluster( - self.docker_client, self, cluster - ) - ) - self.log.debug("created '%s' for TLS certification", f"{self.cluster_secrets=}") - - # now we need a scheduler (get these auto-generated entries from dask-gateway base class) - scheduler_env = self.get_scheduler_env(cluster) - scheduler_cmd = self.get_scheduler_command(cluster) - # we need a few modifications for running in docker swarm - scheduler_service_name = f"cluster_{cluster.id}_scheduler" - modifications = get_osparc_scheduler_cmd_modifications(scheduler_service_name) - for key, value in modifications.items(): - scheduler_cmd = modify_cmd_argument(scheduler_cmd, key, value) - # start the scheduler - async for dask_scheduler_start_result in start_service( - docker_client=self.docker_client, - settings=self.settings, - logger=self.log, - service_name=scheduler_service_name, - base_env=scheduler_env, - cluster_secrets=[ - c for c in self.cluster_secrets if c.cluster.name == cluster.name - ], - cmd=scheduler_cmd, - labels={"cluster_id": f"{cluster.id}", "type": "scheduler"}, - gateway_api_url=self.api_url, - placement={"Constraints": ["node.role==manager"]}, - ): - yield dask_scheduler_start_result - - async def do_stop_cluster(self, cluster: Cluster) -> None: - assert isinstance(self.log, logging.Logger) # nosec - assert cluster.state # nosec - self.log.debug("--> stopping %s", f"{cluster=}") - dask_scheduler_service_id = cluster.state.get("service_id") - await stop_service(self.docker_client, dask_scheduler_service_id, self.log) - await delete_secrets(self.docker_client, cluster) - self.log.debug("<--%s stopped", f"{cluster=}") - - async def do_check_clusters(self, clusters: list[Cluster]) -> list[bool]: - assert isinstance(self.log, logging.Logger) # nosec - self.log.debug("--> checking statuses of : %s", f"{clusters=}") - oks: list[bool | BaseException] = await asyncio.gather( - *[self._check_service_status(c) for c in clusters], return_exceptions=True - ) - self.log.debug("<-- clusters status returned: %s", f"{oks=}") - return [ok if isinstance(ok, bool) else False for ok in oks] - - async def do_start_worker( - self, worker: Worker - ) -> AsyncGenerator[dict[str, Any], None]: - assert isinstance(self.log, logging.Logger) # nosec - assert isinstance(self.api_url, str) # nosec - assert worker.cluster # nosec - self.log.debug("--> starting %s", f"{worker=}") - node_hostname = None - try: - node_hostname = await get_next_empty_node_hostname( - self.docker_client, worker.cluster - ) - except (NoServiceTasksError, TaskNotAssignedError) as exc: - # this is a real error - raise PublicException(f"{exc}") from exc - except NoHostFoundError as exc: - # this should not happen since calling do_start_worker is done - # from the on_cluster_heartbeat that checks if we already reached max worker - # What may happen is that a docker node was removed in between and that is an error we can report. - msg = "Unexpected error while creating a new worker, there is no available host! Was a docker node removed?" - raise PublicException(msg) from exc - assert node_hostname is not None # nosec - worker_env = self.get_worker_env(worker.cluster) - dask_scheduler_url = f"tls://cluster_{worker.cluster.id}_scheduler:{OSPARC_SCHEDULER_API_PORT}" # worker.cluster.scheduler_address - # NOTE: the name must be set so that the scheduler knows which worker to wait for - worker_env.update( - { - "DASK_SCHEDULER_URL": dask_scheduler_url, - "DASK_WORKER_NAME": worker.name, - } - ) - - async for dask_sidecar_start_result in start_service( - docker_client=self.docker_client, - settings=self.settings, - logger=self.log, - service_name=f"cluster_{worker.cluster.id}_sidecar_{worker.id}", - base_env=worker_env, - cluster_secrets=[ - c for c in self.cluster_secrets if c.cluster.name == worker.cluster.name - ], - cmd=None, - labels={ - "cluster_id": f"{worker.cluster.id}", - "worker_id": f"{worker.id}", - "type": "worker", - }, - gateway_api_url=self.api_url, - placement={"Constraints": [f"node.hostname=={node_hostname}"]}, - ): - yield dask_sidecar_start_result - - async def do_stop_worker(self, worker: Worker) -> None: - assert isinstance(self.log, logging.Logger) # nosec - self.log.debug("--> Stopping %s", f"{worker=}") - assert worker.state # nosec - if service_id := worker.state.get("service_id"): - await stop_service(self.docker_client, service_id, self.log) - self.log.debug("<-- %s stopped", f"{worker=}") - else: - self.log.error( - "Worker %s does not have a service id! That is not expected!", - f"{worker=}", - ) - - async def _check_service_status(self, cluster_service: Worker | Cluster) -> bool: - assert isinstance(self.log, logging.Logger) # nosec - self.log.debug("--> checking status: %s", f"{cluster_service=}") - assert cluster_service.state # nosec - if service_id := cluster_service.state.get("service_id"): - self.log.debug("--> checking service '%s' status", f"{service_id}") - try: - service = await self.docker_client.services.inspect(service_id) - if service: - service_name = service["Spec"]["Name"] - return await is_service_task_running( - self.docker_client, service_name, self.log - ) - - except DockerContainerError: - self.log.exception("Error while checking %s", f"{service_id=}") - self.log.warning( - "%s does not have a service id! That is not expected!", - f"{cluster_service=}", - ) - return False - - async def do_check_workers(self, workers: list[Worker]) -> list[bool]: - assert isinstance(self.log, logging.Logger) # nosec - self.log.debug("--> checking statuses: %s", f"{workers=}") - ok = await asyncio.gather( - *[self._check_service_status(w) for w in workers], return_exceptions=True - ) - self.log.debug("<-- worker status returned: %s", f"{ok=}") - return [False if isinstance(_, BaseException) else _ for _ in ok] - - async def on_cluster_heartbeat(self, cluster_name, msg) -> None: - # pylint: disable=no-else-continue, unused-variable, too-many-branches - # pylint: disable=too-many-statements - assert isinstance(self.log, logging.Logger) # nosec - - # HACK: we override the base class heartbeat in order to - # dynamically allow for more or less workers depending on the - # available docker nodes!!! - cluster = self.db.get_cluster(cluster_name) - if cluster is None or cluster.target > JobStatus.RUNNING: - return - - cluster.last_heartbeat = timestamp() - - if cluster.status == JobStatus.RUNNING: - cluster_update = {} - else: - cluster_update = { - "api_address": msg["api_address"], - "scheduler_address": msg["scheduler_address"], - "dashboard_address": msg["dashboard_address"], - } - - count = msg["count"] - active_workers = set(msg["active_workers"]) - closing_workers = set(msg["closing_workers"]) - closed_workers = set(msg["closed_workers"]) - - self.log.info( - "Cluster %s heartbeat [count: %d, n_active: %d, n_closing: %d, n_closed: %d]", - cluster_name, - count, - len(active_workers), - len(closing_workers), - len(closed_workers), - ) - - # THIS IS THE HACK!!! - # original code in dask_gateway_server.backend.db_base - max_workers = cluster.config.get("cluster_max_workers") - if self.settings.GATEWAY_SERVER_ONE_WORKER_PER_NODE: - # cluster_max_workers = len(await get_cluster_information(self.docker_client)) - # if max_workers != cluster_max_workers: - # unfrozen_cluster_config = {k: v for k, v in cluster.config.items()} - # unfrozen_cluster_config["cluster_max_workers"] = cluster_max_workers - # cluster_update["config"] = unfrozen_cluster_config - max_workers = len(await get_cluster_information(self.docker_client)) - if max_workers is not None and count > max_workers: - # This shouldn't happen under normal operation, but could if the - # user does something malicious (or there's a bug). - self.log.info( - "Cluster %s heartbeat requested %d workers, exceeding limit of %s.", - cluster_name, - count, - max_workers, - ) - count = max_workers - - if count != cluster.count: - cluster_update["count"] = count - - created_workers = [] - submitted_workers = [] - target_updates = [] - newly_running = [] - close_expected = [] - for worker in cluster.workers.values(): - if worker.status >= JobStatus.STOPPED: - continue - if worker.name in closing_workers: - if worker.status < JobStatus.RUNNING: - newly_running.append(worker) - close_expected.append(worker) - elif worker.name in active_workers: - if worker.status < JobStatus.RUNNING: - newly_running.append(worker) - elif worker.name in closed_workers: - target = ( - JobStatus.STOPPED if worker.close_expected else JobStatus.FAILED - ) - target_updates.append((worker, {"target": target})) - elif worker.status == JobStatus.SUBMITTED: - submitted_workers.append(worker) - else: - assert worker.status == JobStatus.CREATED - created_workers.append(worker) - - n_pending = len(created_workers) + len(submitted_workers) - n_to_stop = len(active_workers) + n_pending - count - if n_to_stop > 0: - for w in islice(chain(created_workers, submitted_workers), n_to_stop): - target_updates.append((w, {"target": JobStatus.STOPPED})) - - if cluster_update: - self.db.update_cluster(cluster, **cluster_update) - self.queue.put(cluster) - - self.db.update_workers(target_updates) - for w, _u in target_updates: - self.queue.put(w) - - if newly_running: - # At least one worker successfully started, reset failure count - cluster.worker_start_failure_count = 0 - self.db.update_workers( - [(w, {"status": JobStatus.RUNNING}) for w in newly_running] - ) - for w in newly_running: - self.log.info("Worker %s is running", w.name) - - self.db.update_workers([(w, {"close_expected": True}) for w in close_expected]) diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/settings.py b/services/osparc-gateway-server/src/osparc_gateway_server/backend/settings.py deleted file mode 100644 index 6df9845bbaf..00000000000 --- a/services/osparc-gateway-server/src/osparc_gateway_server/backend/settings.py +++ /dev/null @@ -1,76 +0,0 @@ -from enum import Enum - -from pydantic import AliasChoices, Field, NonNegativeInt, PositiveInt -from pydantic_settings import BaseSettings - - -class BootModeEnum(str, Enum): - """ - Values taken by SC_BOOT_MODE environment variable - set in Dockerfile and used during docker/boot.sh - """ - - DEFAULT = "default" - LOCAL = "local-development" - DEBUG = "debug" - PRODUCTION = "production" - DEVELOPMENT = "development" - - -class AppSettings(BaseSettings): - COMPUTATIONAL_SIDECAR_IMAGE: str = Field( - ..., description="The computational sidecar image in use" - ) - COMPUTATIONAL_SIDECAR_LOG_LEVEL: str | None = Field( - default="WARNING", - description="The computational sidecar log level", - validation_alias=AliasChoices( - "COMPUTATIONAL_SIDECAR_LOG_LEVEL", - "LOG_LEVEL", - "LOGLEVEL", - "SIDECAR_LOG_LEVEL", - "SIDECAR_LOGLEVEL", - ), - ) - COMPUTATIONAL_SIDECAR_VOLUME_NAME: str = Field( - ..., description="Named volume for the computational sidecars" - ) - - COMPUTATION_SIDECAR_NUM_NON_USABLE_CPUS: NonNegativeInt = Field( - default=2, description="Number of CPUS the sidecar should not advertise/use" - ) - - COMPUTATION_SIDECAR_NON_USABLE_RAM: NonNegativeInt = Field( - default=0, - description="Amount of RAM in bytes, the sidecar should not advertise/use", - ) - - COMPUTATION_SIDECAR_DASK_NTHREADS: PositiveInt | None = Field( - default=None, - description="Allows to override the default number of threads used by the dask-sidecars", - ) - - GATEWAY_WORKERS_NETWORK: str = Field( - ..., - description="The docker network where the gateway workers shall be able to access the gateway", - ) - GATEWAY_SERVER_NAME: str = Field( - ..., - description="The hostname of the gateway server in the GATEWAY_WORKERS_NETWORK network", - ) - - SC_BOOT_MODE: BootModeEnum | None = None - - GATEWAY_SERVER_ONE_WORKER_PER_NODE: bool = Field( - default=True, - description="Only one dask-worker is allowed per node (default). If disabled, then scaling must be done manually.", - ) - - GATEWAY_CLUSTER_START_TIMEOUT: float = Field( - default=120.0, - description="Allowed timeout to define a starting cluster as failed", - ) - GATEWAY_WORKER_START_TIMEOUT: float = Field( - default=120.0, - description="Allowed timeout to define a starting worker as failed", - ) diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/utils.py b/services/osparc-gateway-server/src/osparc_gateway_server/backend/utils.py deleted file mode 100644 index 30cecf14235..00000000000 --- a/services/osparc-gateway-server/src/osparc_gateway_server/backend/utils.py +++ /dev/null @@ -1,403 +0,0 @@ -import asyncio -import json -import logging -from collections import deque -from collections.abc import AsyncGenerator, Mapping -from copy import deepcopy -from pathlib import Path -from typing import Any, Final, NamedTuple, cast - -import aiodocker -from aiodocker import Docker -from dask_gateway_server.backends.db_base import ( # type: ignore[import-untyped] - Cluster, - DBBackendBase, -) -from yarl import URL - -from .errors import NoHostFoundError -from .models import ClusterInformation, Hostname, cluster_information_from_docker_nodes -from .settings import AppSettings - -_SHARED_COMPUTATIONAL_FOLDER_IN_SIDECAR = "/home/scu/shared_computational_data" -_DASK_KEY_CERT_PATH_IN_SIDECAR = Path("/home/scu/dask-credentials") - - -class DockerSecret(NamedTuple): - secret_id: str - secret_name: str - secret_file_name: str - cluster: Cluster - - -async def is_service_task_running( - docker_client: Docker, service_name: str, logger: logging.Logger -) -> bool: - tasks = await docker_client.tasks.list(filters={"service": service_name}) - tasks_current_state = [task["Status"]["State"] for task in tasks] - logger.info( - "%s current service task states are %s", service_name, f"{tasks_current_state=}" - ) - num_running = sum(current == "running" for current in tasks_current_state) - return bool(num_running == 1) - - -async def get_network_id( - docker_client: Docker, network_name: str, logger: logging.Logger -) -> str: - # try to find the network name (usually named STACKNAME_default) - logger.debug("--> finding network id for '%s'", f"{network_name=}") - networks = [ - x - for x in (await docker_client.networks.list()) - if "swarm" in x["Scope"] and network_name == x["Name"] - ] - logger.debug(f"found the following: {networks=}") - if not networks: - raise ValueError(f"network {network_name} not found") - if len(networks) > 1: - # NOTE: this is impossible at the moment. test_utils::test_get_network_id proves it - raise ValueError( - f"network {network_name} is ambiguous, too many network founds: {networks=}" - ) - logger.debug("found '%s'", f"{networks[0]=}") - assert "Id" in networks[0] # nosec - assert isinstance(networks[0]["Id"], str) # nosec - return networks[0]["Id"] - - -def create_service_config( - settings: AppSettings, - service_env: dict[str, Any], - service_name: str, - network_id: str, - service_secrets: list[DockerSecret], - cmd: list[str] | None, - labels: dict[str, str], - placement: dict[str, Any] | None, - **service_kwargs, -) -> dict[str, Any]: - env = deepcopy(service_env) - env.pop("PATH", None) - # create the secrets array containing the TLS cert/key pair - container_secrets = [] - for s in service_secrets: - container_secrets.append( - { - "SecretName": s.secret_name, - "SecretID": s.secret_id, - "File": { - "Name": f"{_DASK_KEY_CERT_PATH_IN_SIDECAR / Path(s.secret_file_name).name}", - "UID": "0", - "GID": "0", - "Mode": 0x777, - }, - } - ) - env_updates = {} - for env_name, env_value in env.items(): - if env_value == s.secret_file_name: - env_updates[ - env_name - ] = f"{_DASK_KEY_CERT_PATH_IN_SIDECAR / Path(s.secret_file_name).name}" - env.update(env_updates) - mounts = [ - # docker socket needed to use the docker api - { - "Source": "/var/run/docker.sock", - "Target": "/var/run/docker.sock", - "Type": "bind", - "ReadOnly": True, - }, - # the sidecar data data is stored in a volume - { - "Source": settings.COMPUTATIONAL_SIDECAR_VOLUME_NAME, - "Target": _SHARED_COMPUTATIONAL_FOLDER_IN_SIDECAR, - "Type": "volume", - "ReadOnly": False, - }, - ] - - task_template: dict[str, Any] = { - "ContainerSpec": { - "Env": env, - "Image": settings.COMPUTATIONAL_SIDECAR_IMAGE, - "Init": True, - "Mounts": mounts, - "Secrets": container_secrets, - "Hostname": service_name, - }, - "RestartPolicy": {"Condition": "on-failure"}, - } - - if cmd: - task_template["ContainerSpec"]["Command"] = cmd - if placement: - task_template["Placement"] = placement - - return { - "name": service_name, - "labels": labels, - "task_template": task_template, - "networks": [network_id], - **service_kwargs, - } - - -async def create_or_update_secret( - docker_client: aiodocker.Docker, - target_file_name: str, - cluster: Cluster, - *, - file_path: Path | None = None, - secret_data: str | None = None, -) -> DockerSecret: - if file_path is None and secret_data is None: - raise ValueError( - f"Both {file_path=} and {secret_data=} are empty, that is not allowed" - ) - data = secret_data - if not data and file_path: - data = file_path.read_text() - - docker_secret_name = f"{Path( target_file_name).name}_{cluster.id}" - - secrets = await docker_client.secrets.list(filters={"name": docker_secret_name}) - if secrets: - # we must first delete it as only labels may be updated - secret = secrets[0] - await docker_client.secrets.delete(secret["ID"]) - assert data # nosec - secret = await docker_client.secrets.create( - name=docker_secret_name, - data=data, - labels={"cluster_id": f"{cluster.id}", "cluster_name": f"{cluster.name}"}, - ) - return DockerSecret( - secret_id=secret["ID"], - secret_name=docker_secret_name, - secret_file_name=target_file_name, - cluster=cluster, - ) - - -async def delete_secrets(docker_client: aiodocker.Docker, cluster: Cluster) -> None: - secrets = await docker_client.secrets.list( - filters={"label": f"cluster_id={cluster.id}"} - ) - await asyncio.gather(*[docker_client.secrets.delete(s["ID"]) for s in secrets]) - - -async def start_service( - docker_client: aiodocker.Docker, - settings: AppSettings, - logger: logging.Logger, - service_name: str, - base_env: dict[str, str], - cluster_secrets: list[DockerSecret], - cmd: list[str] | None, - labels: dict[str, str], - gateway_api_url: str, - placement: dict[str, Any] | None = None, - **service_kwargs, -) -> AsyncGenerator[dict[str, Any], None]: - service_parameters = {} - try: - assert settings.COMPUTATIONAL_SIDECAR_LOG_LEVEL # nosec - env = deepcopy(base_env) - env.update( - { - # NOTE: the hostname of the gateway API must be - # modified so that the scheduler/sidecar can - # send heartbeats to the gateway - "DASK_GATEWAY_API_URL": f"{URL(gateway_api_url).with_host(settings.GATEWAY_SERVER_NAME)}", - "SIDECAR_COMP_SERVICES_SHARED_FOLDER": _SHARED_COMPUTATIONAL_FOLDER_IN_SIDECAR, - "SIDECAR_COMP_SERVICES_SHARED_VOLUME_NAME": settings.COMPUTATIONAL_SIDECAR_VOLUME_NAME, - "LOG_LEVEL": settings.COMPUTATIONAL_SIDECAR_LOG_LEVEL, - "DASK_SIDECAR_NUM_NON_USABLE_CPUS": f"{settings.COMPUTATION_SIDECAR_NUM_NON_USABLE_CPUS}", - "DASK_SIDECAR_NON_USABLE_RAM": f"{settings.COMPUTATION_SIDECAR_NON_USABLE_RAM}", - } - ) - if settings.COMPUTATION_SIDECAR_DASK_NTHREADS: - env["DASK_NTHREADS"] = f"{settings.COMPUTATION_SIDECAR_DASK_NTHREADS}" - - # find service parameters - network_id = await get_network_id( - docker_client, settings.GATEWAY_WORKERS_NETWORK, logger - ) - service_parameters = create_service_config( - settings, - env, - service_name, - network_id, - cluster_secrets, - cmd, - labels=labels, - placement=placement, - **service_kwargs, - ) - - # start service - logger.info("Starting service %s", service_name) - logger.debug("Using parameters %s", json.dumps(service_parameters, indent=2)) - service = await docker_client.services.create(**service_parameters) - logger.info("Service %s started: %s", service_name, f"{service=}") - yield {"service_id": service["ID"]} - - # get the full info from docker - service = await docker_client.services.inspect(service["ID"]) - logger.debug( - "Service '%s' inspection: %s", - service_name, - f"{json.dumps(service, indent=2)}", - ) - - # wait until the service is started - logger.info( - "---> Service started, waiting for service %s to run...", - service_name, - ) - while not await is_service_task_running( - docker_client, service["Spec"]["Name"], logger - ): - yield {"service_id": service["ID"]} - await asyncio.sleep(1) - - # we are done, the service is started - logger.info( - "---> Service %s is started, and has ID %s", - service["Spec"]["Name"], - service["ID"], - ) - yield {"service_id": service["ID"]} - - except (aiodocker.DockerContainerError, aiodocker.DockerError): - logger.exception( - "Unexpected Error while running container with parameters %s", - json.dumps(service_parameters, indent=2), - ) - raise - except asyncio.CancelledError: - logger.warning("Service creation was cancelled") - raise - - -async def stop_service( - docker_client: aiodocker.Docker, service_id: str, logger: logging.Logger -) -> None: - logger.info("Stopping service %s", f"{service_id}") - try: - await docker_client.services.delete(service_id) - logger.info("service %s stopped", f"{service_id=}") - - except aiodocker.DockerContainerError: - logger.exception("Error while stopping service with id %s", f"{service_id=}") - - -async def create_docker_secrets_from_tls_certs_for_cluster( - docker_client: Docker, backend: DBBackendBase, cluster: Cluster -) -> list[DockerSecret]: - tls_cert_path, tls_key_path = backend.get_tls_paths(cluster) - return [ - await create_or_update_secret( - docker_client, - f"{tls_cert_path}", - cluster, - secret_data=cluster.tls_cert.decode(), - ), - await create_or_update_secret( - docker_client, - f"{tls_key_path}", - cluster, - secret_data=cluster.tls_key.decode(), - ), - ] - - -OSPARC_SCHEDULER_API_PORT: Final[int] = 8786 -OSPARC_SCHEDULER_DASHBOARD_PORT: Final[int] = 8787 - - -def get_osparc_scheduler_cmd_modifications( - scheduler_service_name: str, -) -> dict[str, str]: - # NOTE: the healthcheck of itisfoundation/dask-sidecar expects the dashboard - # to be on port 8787 - # (see https://github.com/ITISFoundation/osparc-simcore/blob/f3d98dccdae665d23701b0db4ee917364a0fbd99/services/dask-sidecar/Dockerfile) - return { - "--dashboard-address": f":{OSPARC_SCHEDULER_DASHBOARD_PORT}", - "--port": f"{OSPARC_SCHEDULER_API_PORT}", - "--host": scheduler_service_name, - } - - -def modify_cmd_argument( - cmd: list[str], argument_name: str, argument_value: str -) -> list[str]: - modified_cmd = deepcopy(cmd) - try: - dashboard_address_arg_index = modified_cmd.index(argument_name) - modified_cmd[dashboard_address_arg_index + 1] = argument_value - except ValueError: - modified_cmd.extend([argument_name, argument_value]) - return modified_cmd - - -async def get_cluster_information(docker_client: Docker) -> ClusterInformation: - cluster_information = cluster_information_from_docker_nodes( - await docker_client.nodes.list() - ) - - return cluster_information - - -def _find_service_node_assignment(service_tasks: list[Mapping[str, Any]]) -> str | None: - for task in service_tasks: - if task["Status"]["State"] in ("new", "pending"): - # some task is not running yet. that is a bit weird - service_constraints = ( - task.get("Spec", {}).get("Placement", {}).get("Constraints", []) - ) - filtered_service_constraints = list( - filter(lambda x: "node.hostname" in x, service_constraints) - ) - if len(filtered_service_constraints) > 1: - continue - service_placement: str = filtered_service_constraints[0] - return service_placement.split("==")[1] - - if task["Status"]["State"] in ( - "assigned", - "preparing", - "starting", - "running", - ): - return cast(str, task["NodeID"]) # mypy - return None - - -async def get_next_empty_node_hostname( - docker_client: Docker, cluster: Cluster -) -> Hostname: - current_count = getattr(get_next_empty_node_hostname, "counter", -1) + 1 - setattr(get_next_empty_node_hostname, "counter", current_count) # noqa: B010 - - cluster_nodes = deque(await docker_client.nodes.list()) - current_worker_services = await docker_client.services.list( - filters={"label": [f"cluster_id={cluster.id}", "type=worker"]} - ) - used_docker_node_ids = set() - - for service in current_worker_services: - service_tasks = await docker_client.tasks.list( - filters={"service": service["ID"]} - ) - if assigned_node := _find_service_node_assignment(service_tasks): - used_docker_node_ids.add(assigned_node) - - cluster_nodes.rotate(current_count) - for node in cluster_nodes: - if node["ID"] in used_docker_node_ids: - continue - return f"{node['Description']['Hostname']}" - raise NoHostFoundError("Could not find any empty host") diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/remote_debug.py b/services/osparc-gateway-server/src/osparc_gateway_server/remote_debug.py deleted file mode 100644 index c4d442796d2..00000000000 --- a/services/osparc-gateway-server/src/osparc_gateway_server/remote_debug.py +++ /dev/null @@ -1,24 +0,0 @@ -""" Setup remote debugger with debugpy - a debugger for Python - https://github.com/microsoft/debugpy - -""" - -import logging - - -def setup_remote_debugging(logger: logging.Logger) -> None: - try: - logger.debug("Attaching debugpy ...") - - import debugpy # type: ignore[import-untyped] - - REMOTE_DEBUGGING_PORT = 3000 - debugpy.listen(("0.0.0.0", REMOTE_DEBUGGING_PORT)) # nosec - # debugpy.wait_for_client() - - except ImportError as err: - raise RuntimeError( - "Cannot enable remote debugging. Please install debugpy first" - ) from err - - logger.info("Remote debugging enabled: listening port %s", REMOTE_DEBUGGING_PORT) diff --git a/services/osparc-gateway-server/tests/conftest.py b/services/osparc-gateway-server/tests/conftest.py deleted file mode 100644 index b7d545e4f0b..00000000000 --- a/services/osparc-gateway-server/tests/conftest.py +++ /dev/null @@ -1,26 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=redefined-outer-name - -from collections.abc import AsyncIterator -from pathlib import Path - -import aiodocker -import pytest - -pytest_plugins = [ - "pytest_simcore.repository_paths", - "pytest_simcore.docker_swarm", -] - - -@pytest.fixture(scope="session") -def package_dir(osparc_simcore_services_dir: Path): - package_folder = osparc_simcore_services_dir / "osparc-gateway-server" - assert package_folder.exists() - return package_folder - - -@pytest.fixture -async def async_docker_client() -> AsyncIterator[aiodocker.Docker]: - async with aiodocker.Docker() as docker_client: - yield docker_client diff --git a/services/osparc-gateway-server/tests/integration/_dask_helpers.py b/services/osparc-gateway-server/tests/integration/_dask_helpers.py deleted file mode 100644 index e81d0332787..00000000000 --- a/services/osparc-gateway-server/tests/integration/_dask_helpers.py +++ /dev/null @@ -1,10 +0,0 @@ -from typing import NamedTuple - -from dask_gateway_server.app import DaskGateway - - -class DaskGatewayServer(NamedTuple): - address: str - proxy_address: str - password: str - server: DaskGateway diff --git a/services/osparc-gateway-server/tests/integration/conftest.py b/services/osparc-gateway-server/tests/integration/conftest.py deleted file mode 100644 index dc89484803e..00000000000 --- a/services/osparc-gateway-server/tests/integration/conftest.py +++ /dev/null @@ -1,139 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=redefined-outer-name - - -import asyncio -import json -from typing import Any, AsyncIterator, Awaitable, Callable - -import aiodocker -import dask_gateway -import pytest -import traitlets -import traitlets.config -from _dask_helpers import DaskGatewayServer -from dask_gateway_server.app import DaskGateway -from faker import Faker -from osparc_gateway_server.backend.osparc import OsparcBackend -from osparc_gateway_server.backend.utils import ( - OSPARC_SCHEDULER_API_PORT, - OSPARC_SCHEDULER_DASHBOARD_PORT, -) -from pytest_simcore.helpers.host import get_localhost_ip -from tenacity.asyncio import AsyncRetrying -from tenacity.wait import wait_fixed - - -@pytest.fixture -async def docker_volume( - async_docker_client: aiodocker.Docker, -) -> AsyncIterator[Callable[[str], Awaitable[dict[str, Any]]]]: - volumes = [] - - async def _volume_creator(name: str) -> dict[str, Any]: - volume = await async_docker_client.volumes.create(config={"Name": name}) - assert volume - print(f"--> created {volume=}") - volumes.append(volume) - return await volume.show() - - yield _volume_creator - - # cleanup - async def _wait_for_volume_deletion(volume: aiodocker.docker.DockerVolume): - inspected_volume = await volume.show() - async for attempt in AsyncRetrying(reraise=True, wait=wait_fixed(1)): - with attempt: - print(f"<-- deleting volume '{inspected_volume['Name']}'...") - await volume.delete() - print(f"<-- volume '{inspected_volume['Name']}' deleted") - - await asyncio.gather(*[_wait_for_volume_deletion(v) for v in volumes]) - - -@pytest.fixture -def gateway_password(faker: Faker) -> str: - return faker.password() - - -def _convert_to_dict(c: traitlets.config.Config | dict) -> dict[str, Any]: - converted_dict = {} - for x, y in c.items(): - if isinstance(y, (dict, traitlets.config.Config)): - converted_dict[x] = _convert_to_dict(y) - else: - converted_dict[x] = f"{y}" - return converted_dict - - -@pytest.fixture -def mock_scheduler_cmd_modifications(mocker): - """This mock is necessary since: - If the osparc-gateway-server is running in the host then: - - dask-scheduler must start with "" for --host, so the dask-scheduler defines its IP as being in docker_gw_bridge (172.18.0.X), accessible from the host - When the osparc-gateway-server is running as a docker container, then the --host must be set - as "cluster_X_scheduler" since this is the hostname of the container and resolves into the dask-gateway network - """ - mocker.patch( - "osparc_gateway_server.backend.osparc.get_osparc_scheduler_cmd_modifications", - autospec=True, - return_value={ - "--dashboard-address": f":{OSPARC_SCHEDULER_DASHBOARD_PORT}", - "--port": f"{OSPARC_SCHEDULER_API_PORT}", - }, - ) - - -@pytest.fixture -async def local_dask_gateway_server( - mock_scheduler_cmd_modifications, - minimal_config: None, - gateway_password: str, -) -> AsyncIterator[DaskGatewayServer]: - """this code is more or less copy/pasted from dask-gateway repo""" - c = traitlets.config.Config() - c.DaskGateway.backend_class = OsparcBackend # type: ignore - c.DaskGateway.address = "127.0.0.1:0" # type: ignore - c.DaskGateway.log_level = "DEBUG" # type: ignore - c.Proxy.address = f"{get_localhost_ip()}:0" # type: ignore - c.DaskGateway.authenticator_class = "dask_gateway_server.auth.SimpleAuthenticator" # type: ignore - c.SimpleAuthenticator.password = gateway_password # type: ignore - print(f"--> local dask gateway config: {json.dumps(_convert_to_dict(c), indent=2)}") - dask_gateway_server = DaskGateway(config=c) - dask_gateway_server.initialize([]) # that is a shitty one! - print("--> local dask gateway server initialized") - await dask_gateway_server.setup() - await dask_gateway_server.backend.proxy._proxy_contacted # pylint: disable=protected-access - print("--> local dask gateway server setup completed") - yield DaskGatewayServer( - f"http://{dask_gateway_server.backend.proxy.address}", - f"gateway://{dask_gateway_server.backend.proxy.tcp_address}", - c.SimpleAuthenticator.password, # type: ignore - dask_gateway_server, - ) - print("<-- local dask gateway server switching off...") - await dask_gateway_server.cleanup() - print("...done") - - -@pytest.fixture -async def gateway_client( - local_dask_gateway_server: DaskGatewayServer, -) -> AsyncIterator[dask_gateway.Gateway]: - async with dask_gateway.Gateway( - local_dask_gateway_server.address, - local_dask_gateway_server.proxy_address, - asynchronous=True, - auth=dask_gateway.BasicAuth( - username="pytest_user", password=local_dask_gateway_server.password - ), - ) as gateway: - assert gateway - print(f"--> {gateway} created") - cluster_options = await gateway.cluster_options() - gateway_versions = await gateway.get_versions() - clusters_list = await gateway.list_clusters() - print(f"--> {gateway_versions}, {cluster_options}, {clusters_list}") - for option in cluster_options.items(): - print(f"--> {option}") - yield gateway diff --git a/services/osparc-gateway-server/tests/integration/test_clusters.py b/services/osparc-gateway-server/tests/integration/test_clusters.py deleted file mode 100644 index 2f31188394e..00000000000 --- a/services/osparc-gateway-server/tests/integration/test_clusters.py +++ /dev/null @@ -1,255 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=redefined-outer-name - -import asyncio -from collections.abc import Awaitable, Callable -from typing import Any - -import pytest -from _dask_helpers import DaskGatewayServer -from aiodocker import Docker -from dask_gateway import Gateway -from faker import Faker -from pytest_simcore.helpers.host import get_localhost_ip -from tenacity.asyncio import AsyncRetrying -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed - - -@pytest.fixture( - params=[ - "local/dask-sidecar:production", - ] -) -def minimal_config( - docker_swarm, - monkeypatch: pytest.MonkeyPatch, - faker: Faker, - request: pytest.FixtureRequest, -): - monkeypatch.setenv("GATEWAY_WORKERS_NETWORK", faker.pystr()) - monkeypatch.setenv("GATEWAY_SERVER_NAME", get_localhost_ip()) - monkeypatch.setenv("COMPUTATIONAL_SIDECAR_VOLUME_NAME", faker.pystr()) - monkeypatch.setenv( - "COMPUTATIONAL_SIDECAR_IMAGE", - request.param, # type: ignore - ) - monkeypatch.setenv("COMPUTATIONAL_SIDECAR_LOG_LEVEL", "DEBUG") - monkeypatch.setenv("GATEWAY_SERVER_ONE_WORKER_PER_NODE", "False") - - -@pytest.fixture -async def gateway_worker_network( - local_dask_gateway_server: DaskGatewayServer, - docker_network: Callable[..., Awaitable[dict[str, Any]]], -) -> dict[str, Any]: - network = await docker_network( - Name=local_dask_gateway_server.server.backend.settings.GATEWAY_WORKERS_NETWORK - ) - return network - - -async def assert_services_stability(docker_client: Docker, service_name: str): - list_services = await docker_client.services.list(filters={"name": service_name}) - assert ( - len(list_services) == 1 - ), f"{service_name} is missing from the expected services in {list_services}" - _SECONDS_STABLE = 10 - print(f"--> {service_name} is up, now checking if it is running...") - async for attempt in AsyncRetrying( - reraise=True, wait=wait_fixed(1), stop=stop_after_delay(60) - ): - with attempt: - tasks_list = await docker_client.tasks.list( - filters={"service": service_name} - ) - tasks_current_state = [t["Status"]["State"] for t in tasks_list] - print(f"--> {service_name} service task states are {tasks_current_state=}") - num_running = sum(current == "running" for current in tasks_current_state) - assert num_running == 1 - print(f"--> {service_name} is running now") - print( - f"--> {service_name} is running, now checking if it is stable during {_SECONDS_STABLE}s..." - ) - - async def _check_stability(service: dict[str, Any]): - inspected_service = await docker_client.services.inspect(service["ID"]) - # we ensure the service remains stable for _SECONDS_STABLE seconds (e.g. only one task runs) - - print( - f"--> checking {_SECONDS_STABLE} seconds for stability of service {inspected_service['Spec']['Name']=}" - ) - for n in range(_SECONDS_STABLE): - service_tasks = await docker_client.tasks.list( - filters={"service": inspected_service["Spec"]["Name"]} - ) - assert ( - len(service_tasks) == 1 - ), f"The service is not stable it shows {service_tasks}" - print(f"the {service_name=} is stable after {n} seconds...") - await asyncio.sleep(1) - print(f"{service_name=} stable!!") - - await asyncio.gather(*[_check_stability(s) for s in list_services]) - - -async def _wait_for_cluster_services_and_secrets( - async_docker_client: Docker, - num_services: int, - num_secrets: int, - timeout_s: int = 10, -) -> list[dict[str, Any]]: - async for attempt in AsyncRetrying( - reraise=True, wait=wait_fixed(1), stop=stop_after_delay(timeout_s) - ): - with attempt: - list_services = await async_docker_client.services.list() - print( - f"--> list of services after {attempt.retry_state.attempt_number}s: {list_services=}, expected {num_services=}" - ) - assert len(list_services) == num_services - # as the secrets - list_secrets = await async_docker_client.secrets.list() - print( - f"--> list of secrets after {attempt.retry_state.attempt_number}s: {list_secrets=}, expected {num_secrets}" - ) - assert len(list_secrets) == num_secrets - return list_services - # needed for pylint - raise AssertionError("Invalid call to _wait_for_cluster_services_and_secrets") - - -async def test_clusters_start_stop( - minimal_config, - gateway_worker_network, - gateway_client: Gateway, - async_docker_client: Docker, -): - """Each cluster is made of 1 scheduler + X number of sidecars (with 0<=X dict[str, Any]: - return await docker_volume(faker.pystr()) - - -@pytest.fixture -def computational_sidecar_mounted_folder() -> str: - return "/comp_shared_folder" - - -@pytest.fixture -def sidecar_envs( - computational_sidecar_mounted_folder: str, - sidecar_computational_shared_volume: dict[str, Any], -) -> dict[str, str]: - return { - "SIDECAR_COMP_SERVICES_SHARED_FOLDER": f"{computational_sidecar_mounted_folder}", - "SIDECAR_COMP_SERVICES_SHARED_VOLUME_NAME": f"{sidecar_computational_shared_volume['Name']}", - } - - -@pytest.fixture -def sidecar_mounts( - sidecar_computational_shared_volume: dict[str, Any], - computational_sidecar_mounted_folder: str, -) -> list[dict[str, Any]]: - return [ # docker socket needed to use the docker api - { - "Source": "/var/run/docker.sock", - "Target": "/var/run/docker.sock", - "Type": "bind", - "ReadOnly": True, - }, - # the sidecar computational data must be mounted - { - "Source": sidecar_computational_shared_volume["Name"], - "Target": computational_sidecar_mounted_folder, - "Type": "volume", - "ReadOnly": False, - }, - ] - - -@pytest.fixture -async def create_docker_service( - async_docker_client: aiodocker.Docker, -) -> AsyncIterator[Callable[..., Awaitable[Mapping[str, Any]]]]: - services = [] - - async def service_creator(**service_kwargs) -> Mapping[str, Any]: - service = await async_docker_client.services.create(**service_kwargs) - assert service - assert "ID" in service - services.append(service["ID"]) - return await async_docker_client.services.inspect(service["ID"]) - - yield service_creator - # cleanup - await asyncio.gather(*[async_docker_client.services.delete(s) for s in services]) - - -async def _wait_for_service_to_be_ready( - docker_client: aiodocker.Docker, service_name: str -): - async for attempt in AsyncRetrying( - reraise=True, wait=wait_fixed(1), stop=stop_after_delay(60) - ): - with attempt: - tasks_list = await docker_client.tasks.list( - filters={"service": service_name} - ) - tasks_current_state = [t["Status"]["State"] for t in tasks_list] - print(f"--> {service_name} service task states are {tasks_current_state=}") - num_running = sum(current == "running" for current in tasks_current_state) - assert num_running == 1 - print(f"--> {service_name} is running now") - - -@pytest.mark.parametrize( - "image_name", - [ - "local/dask-sidecar:production", - ], -) -async def test_computational_sidecar_properly_start_stop( - docker_swarm: None, - sidecar_computational_shared_volume: dict[str, Any], - async_docker_client: aiodocker.Docker, - image_name: str, - sidecar_envs: dict[str, str], - sidecar_mounts: list[dict[str, Any]], - create_docker_service: Callable[..., Awaitable[dict[str, Any]]], -): - scheduler_service = await create_docker_service( - task_template={ - "ContainerSpec": { - "Image": image_name, - "Env": sidecar_envs - | { - "DASK_START_AS_SCHEDULER": "1", - "DASK_SCHEDULER_URL": f"tcp://{get_localhost_ip()}:8786", - }, - "Init": True, - "Mounts": sidecar_mounts, - } - }, - endpoint_spec={"Ports": [{"PublishedPort": 8786, "TargetPort": 8786}]}, - name="pytest_dask_scheduler", - ) - await _wait_for_service_to_be_ready( - async_docker_client, scheduler_service["Spec"]["Name"] - ) - sidecar_service = await create_docker_service( - task_template={ - "ContainerSpec": { - "Image": image_name, - "Env": sidecar_envs - | {"DASK_SCHEDULER_URL": f"tcp://{get_localhost_ip()}:8786"}, - "Init": True, - "Mounts": sidecar_mounts, - } - }, - name="pytest_dask_sidecar", - ) - await _wait_for_service_to_be_ready( - async_docker_client, sidecar_service["Spec"]["Name"] - ) diff --git a/services/osparc-gateway-server/tests/integration/test_gateway.py b/services/osparc-gateway-server/tests/integration/test_gateway.py deleted file mode 100644 index 7009c12cb5b..00000000000 --- a/services/osparc-gateway-server/tests/integration/test_gateway.py +++ /dev/null @@ -1,55 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=redefined-outer-name - -import pytest -import traitlets -import traitlets.config -from dask_gateway_server.app import DaskGateway -from faker import Faker -from osparc_gateway_server.backend.osparc import OsparcBackend -from pytest_simcore.helpers.host import get_localhost_ip - - -@pytest.fixture( - params=[ - "local/dask-sidecar:production", - ] -) -def minimal_config( - docker_swarm, - monkeypatch: pytest.MonkeyPatch, - faker: Faker, - request: pytest.FixtureRequest, -): - monkeypatch.setenv("GATEWAY_WORKERS_NETWORK", faker.pystr()) - monkeypatch.setenv("GATEWAY_SERVER_NAME", get_localhost_ip()) - monkeypatch.setenv("COMPUTATIONAL_SIDECAR_VOLUME_NAME", faker.pystr()) - monkeypatch.setenv( - "COMPUTATIONAL_SIDECAR_IMAGE", - request.param, # type: ignore - ) - monkeypatch.setenv("COMPUTATIONAL_SIDECAR_LOG_LEVEL", "DEBUG") - - -async def test_gateway_configuration_through_env_variables( - minimal_config, monkeypatch, faker: Faker -): - cluster_start_timeout = faker.pyfloat() - monkeypatch.setenv("GATEWAY_CLUSTER_START_TIMEOUT", f"{cluster_start_timeout}") - worker_start_timeout = faker.pyfloat() - monkeypatch.setenv("GATEWAY_WORKER_START_TIMEOUT", f"{worker_start_timeout}") - c = traitlets.config.Config() - c.DaskGateway.backend_class = OsparcBackend # type: ignore - dask_gateway_server = DaskGateway(config=c) - dask_gateway_server.initialize([]) # that is a shitty one! - print("--> local dask gateway server initialized") - await dask_gateway_server.setup() - await dask_gateway_server.backend.proxy._proxy_contacted # pylint: disable=protected-access - print("--> local dask gateway server setup completed") - - assert dask_gateway_server.backend.cluster_start_timeout == cluster_start_timeout - assert dask_gateway_server.backend.worker_start_timeout == worker_start_timeout - - print("<-- local dask gateway server switching off...") - await dask_gateway_server.cleanup() - print("...done") diff --git a/services/osparc-gateway-server/tests/system/Makefile b/services/osparc-gateway-server/tests/system/Makefile deleted file mode 100644 index fc9cd92a3f5..00000000000 --- a/services/osparc-gateway-server/tests/system/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# -# Targets for DEVELOPMENT for system tests -# -include ../../../../scripts/common.Makefile - - -.PHONY: requirements -requirements: ## compiles pip requirements (.in -> .txt) - @$(MAKE_C) requirements reqs - - -.PHONY: install install-dev install-prod install-ci - -install: install-ci - -install-dev install-prod install-ci: _check_venv_active ## install requirements in dev/prod/ci mode - # installing in $(subst install-,,$@) mode - @uv pip sync requirements/$(subst install-,,$@).txt - - -.PHONY: tests -tests: _check_venv_active ## runs all tests [CI] - # running system tests - pytest \ - --asyncio-mode=auto \ - --color=yes \ - --durations=10 \ - -vv \ - $(CURDIR) - -.PHONY: test-dev -tests-dev: _check_venv_active ## runs all tests [DEV] - # running system tests - @pytest \ - --asyncio-mode=auto \ - --color=yes \ - --durations=10 \ - --exitfirst \ - --failed-first \ - --keep-docker-up \ - --pdb \ - -vv \ - $(CURDIR) diff --git a/services/osparc-gateway-server/tests/system/requirements/Makefile b/services/osparc-gateway-server/tests/system/requirements/Makefile deleted file mode 100644 index c447724e305..00000000000 --- a/services/osparc-gateway-server/tests/system/requirements/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# -# Targets to pip-compile requirements -# -include ../../../../../requirements/base.Makefile - -# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt diff --git a/services/osparc-gateway-server/tests/system/requirements/_base.txt b/services/osparc-gateway-server/tests/system/requirements/_base.txt deleted file mode 100644 index 0eb14367cec..00000000000 --- a/services/osparc-gateway-server/tests/system/requirements/_base.txt +++ /dev/null @@ -1,6 +0,0 @@ -# NOTE: -# This file file is just here as placeholder -# to fulfill dependencies of _tools.txt target in requirements/base.Makefile -# -# This is a pure-tests project and all dependencies are added in _test.in -# diff --git a/services/osparc-gateway-server/tests/system/requirements/_test.in b/services/osparc-gateway-server/tests/system/requirements/_test.in deleted file mode 100644 index 09fa07fb7e3..00000000000 --- a/services/osparc-gateway-server/tests/system/requirements/_test.in +++ /dev/null @@ -1,20 +0,0 @@ ---constraint ../../../../../requirements/constraints.txt ---constraint ../../../../dask-sidecar/requirements/_dask-distributed.txt - - - -aiodocker -dask-gateway -docker -faker -lz4 -numpy -pytest -pytest-asyncio -pytest-cov -pytest-icdiff -pytest-instafail -pytest-mock -pytest-runner -pytest-sugar -tenacity diff --git a/services/osparc-gateway-server/tests/system/requirements/_test.txt b/services/osparc-gateway-server/tests/system/requirements/_test.txt deleted file mode 100644 index 29d4e7666d4..00000000000 --- a/services/osparc-gateway-server/tests/system/requirements/_test.txt +++ /dev/null @@ -1,194 +0,0 @@ -aiodocker==0.23.0 - # via -r requirements/_test.in -aiohappyeyeballs==2.4.0 - # via aiohttp -aiohttp==3.10.5 - # via - # -c requirements/../../../../../requirements/constraints.txt - # aiodocker - # dask-gateway -aiosignal==1.3.1 - # via aiohttp -attrs==24.2.0 - # via aiohttp -certifi==2024.8.30 - # via - # -c requirements/../../../../../requirements/constraints.txt - # requests -charset-normalizer==3.3.2 - # via requests -click==8.1.7 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # dask-gateway - # distributed -cloudpickle==3.0.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # distributed -coverage==7.6.1 - # via pytest-cov -dask==2024.5.1 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask-gateway - # distributed -dask-gateway==2024.1.0 - # via -r requirements/_test.in -distributed==2024.5.1 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask-gateway -docker==7.1.0 - # via -r requirements/_test.in -faker==29.0.0 - # via -r requirements/_test.in -frozenlist==1.4.1 - # via - # aiohttp - # aiosignal -fsspec==2024.5.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask -icdiff==2.0.7 - # via pytest-icdiff -idna==3.10 - # via - # requests - # yarl -importlib-metadata==7.1.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask -iniconfig==2.0.0 - # via pytest -jinja2==3.1.4 - # via - # -c requirements/../../../../../requirements/constraints.txt - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -locket==1.0.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # distributed - # partd -lz4==4.3.3 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # -r requirements/_test.in -markupsafe==2.1.5 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # jinja2 -msgpack==1.1.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -multidict==6.1.0 - # via - # aiohttp - # yarl -numpy==1.26.4 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # -r requirements/_test.in -packaging==24.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # distributed - # pytest - # pytest-sugar -partd==1.4.2 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask -pluggy==1.5.0 - # via pytest -pprintpp==0.4.0 - # via pytest-icdiff -psutil==6.0.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -pytest==8.3.3 - # via - # -r requirements/_test.in - # pytest-asyncio - # pytest-cov - # pytest-icdiff - # pytest-instafail - # pytest-mock - # pytest-sugar -pytest-asyncio==0.23.8 - # via - # -c requirements/../../../../../requirements/constraints.txt - # -r requirements/_test.in -pytest-cov==5.0.0 - # via -r requirements/_test.in -pytest-icdiff==0.9 - # via -r requirements/_test.in -pytest-instafail==0.5.0 - # via -r requirements/_test.in -pytest-mock==3.14.0 - # via -r requirements/_test.in -pytest-runner==6.0.1 - # via -r requirements/_test.in -pytest-sugar==1.0.0 - # via -r requirements/_test.in -python-dateutil==2.9.0.post0 - # via faker -pyyaml==6.0.1 - # via - # -c requirements/../../../../../requirements/constraints.txt - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # dask-gateway - # distributed -requests==2.32.3 - # via docker -six==1.16.0 - # via python-dateutil -sortedcontainers==2.4.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -tblib==3.0.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -tenacity==9.0.0 - # via -r requirements/_test.in -termcolor==2.4.0 - # via pytest-sugar -toolz==0.12.1 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # distributed - # partd -tornado==6.4 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask-gateway - # distributed -urllib3==2.2.3 - # via - # -c requirements/../../../../../requirements/constraints.txt - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # distributed - # docker - # requests -yarl==1.12.1 - # via aiohttp -zict==3.0.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -zipp==3.18.2 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # importlib-metadata diff --git a/services/osparc-gateway-server/tests/system/requirements/_tools.in b/services/osparc-gateway-server/tests/system/requirements/_tools.in deleted file mode 100644 index b0503840a27..00000000000 --- a/services/osparc-gateway-server/tests/system/requirements/_tools.in +++ /dev/null @@ -1,4 +0,0 @@ ---constraint ../../../../../requirements/constraints.txt ---constraint _test.txt - ---requirement ../../../../../requirements/devenv.txt diff --git a/services/osparc-gateway-server/tests/system/requirements/_tools.txt b/services/osparc-gateway-server/tests/system/requirements/_tools.txt deleted file mode 100644 index 56217c590ee..00000000000 --- a/services/osparc-gateway-server/tests/system/requirements/_tools.txt +++ /dev/null @@ -1,78 +0,0 @@ -astroid==3.3.4 - # via pylint -black==24.8.0 - # via -r requirements/../../../../../requirements/devenv.txt -build==1.2.2 - # via pip-tools -bump2version==1.0.1 - # via -r requirements/../../../../../requirements/devenv.txt -cfgv==3.4.0 - # via pre-commit -click==8.1.7 - # via - # -c requirements/_test.txt - # black - # pip-tools -dill==0.3.8 - # via pylint -distlib==0.3.8 - # via virtualenv -filelock==3.16.1 - # via virtualenv -identify==2.6.1 - # via pre-commit -isort==5.13.2 - # via - # -r requirements/../../../../../requirements/devenv.txt - # pylint -mccabe==0.7.0 - # via pylint -mypy==1.12.0 - # via -r requirements/../../../../../requirements/devenv.txt -mypy-extensions==1.0.0 - # via - # black - # mypy -nodeenv==1.9.1 - # via pre-commit -packaging==24.0 - # via - # -c requirements/_test.txt - # black - # build -pathspec==0.12.1 - # via black -pip==24.2 - # via pip-tools -pip-tools==7.4.1 - # via -r requirements/../../../../../requirements/devenv.txt -platformdirs==4.3.6 - # via - # black - # pylint - # virtualenv -pre-commit==3.8.0 - # via -r requirements/../../../../../requirements/devenv.txt -pylint==3.3.0 - # via -r requirements/../../../../../requirements/devenv.txt -pyproject-hooks==1.1.0 - # via - # build - # pip-tools -pyyaml==6.0.1 - # via - # -c requirements/../../../../../requirements/constraints.txt - # -c requirements/_test.txt - # pre-commit -ruff==0.6.7 - # via -r requirements/../../../../../requirements/devenv.txt -setuptools==75.1.0 - # via pip-tools -tomlkit==0.13.2 - # via pylint -typing-extensions==4.12.2 - # via mypy -virtualenv==20.26.5 - # via pre-commit -wheel==0.44.0 - # via pip-tools diff --git a/services/osparc-gateway-server/tests/system/requirements/ci.txt b/services/osparc-gateway-server/tests/system/requirements/ci.txt deleted file mode 100644 index 684ed6c7887..00000000000 --- a/services/osparc-gateway-server/tests/system/requirements/ci.txt +++ /dev/null @@ -1,14 +0,0 @@ -# Shortcut to install all packages for the contigous integration (CI) of 'services/web/server' -# -# - As ci.txt but w/ tests -# -# Usage: -# pip install -r requirements/ci.txt -# - -# installs base + tests requirements ---requirement _test.txt ---requirement _tools.txt - -# installs this repo's packages -pytest-simcore @ ../../../../packages/pytest-simcore/ diff --git a/services/osparc-gateway-server/tests/system/requirements/dev.txt b/services/osparc-gateway-server/tests/system/requirements/dev.txt deleted file mode 100644 index 436b5550342..00000000000 --- a/services/osparc-gateway-server/tests/system/requirements/dev.txt +++ /dev/null @@ -1,15 +0,0 @@ -# Shortcut to install all packages needed to develop 'services/web/server' -# -# - As ci.txt but with current and repo packages in develop (edit) mode -# -# Usage: -# pip install -r requirements/dev.txt -# - - -# installs base + tests requirements ---requirement _test.txt ---requirement _tools.txt - -# installs this repo's packages ---editable ../../../../packages/pytest-simcore/ diff --git a/services/osparc-gateway-server/tests/system/test_deploy.py b/services/osparc-gateway-server/tests/system/test_deploy.py deleted file mode 100644 index 7e4044f6337..00000000000 --- a/services/osparc-gateway-server/tests/system/test_deploy.py +++ /dev/null @@ -1,160 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=redefined-outer-name - - -import asyncio -import json -from collections.abc import AsyncIterator -from copy import deepcopy -from pathlib import Path - -import aiohttp -import dask_gateway -import pytest -from faker import Faker -from pytest_simcore.helpers.host import get_localhost_ip -from tenacity.asyncio import AsyncRetrying -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed - -pytest_plugins = ["pytest_simcore.repository_paths", "pytest_simcore.docker_swarm"] - - -@pytest.fixture -async def aiohttp_client() -> AsyncIterator[aiohttp.ClientSession]: - async with aiohttp.ClientSession() as session: - yield session - - -@pytest.fixture -def minimal_config(monkeypatch): - monkeypatch.setenv("SC_BOOT_MODE", "production") - monkeypatch.setenv("GATEWAY_SERVER_ONE_WORKER_PER_NODE", "False") - - -@pytest.fixture(scope="session") -def dask_gateway_entrypoint() -> str: - return f"http://{get_localhost_ip()}:8000" - - -@pytest.fixture(scope="session") -def dask_gateway_password() -> str: - return "asdf" - - -@pytest.fixture -async def dask_gateway_stack_deployed_services( - minimal_config, - package_dir: Path, - docker_swarm, - aiohttp_client: aiohttp.ClientSession, - dask_gateway_entrypoint: str, -): - print("--> Deploying osparc-dask-gateway stack...") - process = await asyncio.create_subprocess_exec( - "make", - "up-prod", - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - cwd=package_dir, - ) - stdout, stderr = await process.communicate() - assert ( - process.returncode == 0 - ), f"Unexpected error while deploying stack:\nstdout:{stdout.decode()}\n\nstderr:{stderr.decode()}" - print(f"{stdout}") - print("--> osparc-dask-gateway stack deployed.") - healtcheck_endpoint = f"{dask_gateway_entrypoint}/api/health" - async for attempt in AsyncRetrying( - reraise=True, wait=wait_fixed(1), stop=stop_after_delay(60) - ): - with attempt: - print( - f"--> Connecting to {healtcheck_endpoint}, " - f"attempt {attempt.retry_state.attempt_number}...", - ) - response = await aiohttp_client.get(healtcheck_endpoint) - response.raise_for_status() - print( - f"--> Connection to gateway server succeeded." - f" [{json.dumps(attempt.retry_state.retry_object.statistics)}]", - ) - - yield - print("<-- Stopping osparc-dask-gateway stack...") - process = await asyncio.create_subprocess_exec( - "make", - "down", - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - cwd=package_dir, - ) - stdout, stderr = await process.communicate() - assert ( - process.returncode == 0 - ), f"Unexpected error while deploying stack:\nstdout:{stdout.decode()}\n\n{stderr.decode()}" - print(f"{stdout}") - print("<-- osparc-dask-gateway stack stopped.") - - -async def test_deployment( - dask_gateway_stack_deployed_services, - dask_gateway_entrypoint: str, - faker: Faker, - dask_gateway_password: str, -): - gateway = dask_gateway.Gateway( - address=dask_gateway_entrypoint, - auth=dask_gateway.BasicAuth(faker.pystr(), dask_gateway_password), - ) - - with gateway.new_cluster() as cluster: - _NUM_WORKERS = 2 - cluster.scale( - _NUM_WORKERS - ) # when returning we are in the process of creating the workers - - # now wait until we get the workers - workers = None - async for attempt in AsyncRetrying( - reraise=True, wait=wait_fixed(1), stop=stop_after_delay(60) - ): - with attempt: - print( - f"--> Waiting to have {_NUM_WORKERS} running," - f" attempt {attempt.retry_state.attempt_number}...", - ) - assert "workers" in cluster.scheduler_info - assert len(cluster.scheduler_info["workers"]) == _NUM_WORKERS - workers = deepcopy(cluster.scheduler_info["workers"]) - print( - f"!-- {_NUM_WORKERS} are running," - f" [{json.dumps(attempt.retry_state.retry_object.statistics)}]", - ) - - # now check all this is stable - _SECONDS_STABLE = 6 - for n in range(_SECONDS_STABLE): - # NOTE: the scheduler_info gets auto-udpated by the dask-gateway internals - assert workers == cluster.scheduler_info["workers"] - await asyncio.sleep(1) - print(f"!-- {_NUM_WORKERS} stable for {n} seconds") - - # send some work - def square(x): - return x**2 - - def neg(x): - return -x - - with cluster.get_client() as client: - square_of_2 = client.submit(square, 2) - assert square_of_2.result(timeout=10) == 4 - assert not square_of_2.exception(timeout=10) - - # now send some more stuff just for the fun - A = client.map(square, range(10)) - B = client.map(neg, A) - - total = client.submit(sum, B) - print("computation completed", total.result(timeout=120)) diff --git a/services/osparc-gateway-server/tests/unit/test_osparc.py b/services/osparc-gateway-server/tests/unit/test_osparc.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/services/osparc-gateway-server/tests/unit/test_settings.py b/services/osparc-gateway-server/tests/unit/test_settings.py deleted file mode 100644 index 37adbcd168b..00000000000 --- a/services/osparc-gateway-server/tests/unit/test_settings.py +++ /dev/null @@ -1,20 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=redefined-outer-name - -import pytest -from osparc_gateway_server.backend.settings import AppSettings - - -@pytest.fixture -def minimal_config(monkeypatch: pytest.MonkeyPatch): - monkeypatch.setenv("GATEWAY_WORKERS_NETWORK", "atestnetwork") - monkeypatch.setenv("GATEWAY_SERVER_NAME", "atestserver") - monkeypatch.setenv("COMPUTATIONAL_SIDECAR_IMAGE", "test/localpytest:latest") - monkeypatch.setenv( - "COMPUTATIONAL_SIDECAR_VOLUME_NAME", "sidecar_computational_volume_name" - ) - - -def test_app_settings(minimal_config): - settings = AppSettings() - assert settings diff --git a/services/osparc-gateway-server/tests/unit/test_utils.py b/services/osparc-gateway-server/tests/unit/test_utils.py deleted file mode 100644 index f512395ee7e..00000000000 --- a/services/osparc-gateway-server/tests/unit/test_utils.py +++ /dev/null @@ -1,460 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=redefined-outer-name - -import asyncio -import socket -from copy import deepcopy -from pathlib import Path -from typing import Any, AsyncIterator, Awaitable, Callable -from unittest import mock - -import aiodocker -import pytest -from dask_gateway_server.backends.db_base import Cluster, JobStatus -from faker import Faker -from osparc_gateway_server.backend.errors import NoHostFoundError -from osparc_gateway_server.backend.settings import AppSettings -from osparc_gateway_server.backend.utils import ( - _DASK_KEY_CERT_PATH_IN_SIDECAR, - DockerSecret, - create_or_update_secret, - create_service_config, - delete_secrets, - get_cluster_information, - get_network_id, - get_next_empty_node_hostname, - is_service_task_running, -) -from pytest_mock.plugin import MockerFixture -from tenacity.asyncio import AsyncRetrying -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed - - -@pytest.fixture -def minimal_config(monkeypatch): - monkeypatch.setenv("GATEWAY_WORKERS_NETWORK", "atestnetwork") - monkeypatch.setenv("GATEWAY_SERVER_NAME", "atestserver") - monkeypatch.setenv("COMPUTATIONAL_SIDECAR_IMAGE", "test/localpytest:latest") - monkeypatch.setenv( - "COMPUTATIONAL_SIDECAR_VOLUME_NAME", "sidecar_computational_volume_name" - ) - - -@pytest.fixture() -async def create_docker_service( - docker_swarm, async_docker_client: aiodocker.Docker, faker: Faker -) -> AsyncIterator[ - Callable[[dict[str, str], dict[str, Any]], Awaitable[dict[str, Any]]] -]: - created_services = [] - - async def _creator( - labels: dict[str, str], override_task_template: dict[str, Any] - ) -> dict[str, Any]: - task_template = { - "ContainerSpec": { - "Image": "busybox:latest", - "Command": ["sleep", "10000"], - } - } - task_template.update(override_task_template) - service = await async_docker_client.services.create( - task_template=task_template, - name=faker.pystr(), - labels=labels, - ) - assert service - created_services.append(service) - print(f"--> created docker service {service}") - inspected_service = await async_docker_client.services.inspect(service["ID"]) - print(f"--> service inspected returned {inspected_service}") - return inspected_service - - yield _creator - - await asyncio.gather( - *[async_docker_client.services.delete(s["ID"]) for s in created_services] - ) - - -@pytest.fixture -def create_running_service( - async_docker_client: aiodocker.Docker, - create_docker_service: Callable[ - [dict[str, str], dict[str, Any]], Awaitable[dict[str, Any]] - ], -) -> Callable[[dict[str, str]], Awaitable[dict[str, Any]]]: - async def _creator(labels: dict[str, str]) -> dict[str, Any]: - service = await create_docker_service(labels, {}) - async for attempt in AsyncRetrying( - reraise=True, wait=wait_fixed(1), stop=stop_after_delay(60) - ): - with attempt: - tasks = await async_docker_client.tasks.list( - filters={"service": f"{service['Spec']['Name']}"} - ) - task_states = [task["Status"]["State"] for task in tasks] - num_running = sum(current == "running" for current in task_states) - print(f"--> service task states {task_states=}") - assert num_running == 1 - print(f"--> service {service['Spec']['Name']} is running now") - return service - raise AssertionError(f"service {service=} could not start") - - return _creator - - -@pytest.fixture -def mocked_logger(mocker: MockerFixture) -> mock.MagicMock: - return mocker.MagicMock() - - -async def test_is_task_running( - docker_swarm, - minimal_config, - async_docker_client: aiodocker.Docker, - create_running_service: Callable[[dict[str, str]], Awaitable[dict[str, Any]]], - mocked_logger: mock.MagicMock, -): - service = await create_running_service({}) - # this service exists and run - assert ( - await is_service_task_running( - async_docker_client, service["Spec"]["Name"], mocked_logger - ) - == True - ) - - # check unknown service raises error - with pytest.raises(aiodocker.DockerError): - await is_service_task_running( - async_docker_client, "unknown_service", mocked_logger - ) - - -async def test_get_network_id( - docker_swarm, - async_docker_client: aiodocker.Docker, - docker_network: Callable[..., Awaitable[dict[str, Any]]], - mocked_logger: mock.MagicMock, -): - # wrong name shall raise - with pytest.raises(ValueError): - await get_network_id(async_docker_client, "a_fake_network_name", mocked_logger) - # create 1 bridge network, shall raise when looking for it - bridge_network = await docker_network(**{"Driver": "bridge"}) - with pytest.raises(ValueError): - await get_network_id(async_docker_client, bridge_network["Name"], mocked_logger) - # create 1 overlay network - overlay_network = await docker_network() - network_id = await get_network_id( - async_docker_client, overlay_network["Name"], mocked_logger - ) - assert network_id == overlay_network["Id"] - - # create a second overlay network with the same name, shall raise on creation, so not possible - with pytest.raises(aiodocker.exceptions.DockerError): - await docker_network(**{"Name": overlay_network["Name"]}) - assert ( - True - ), "If it is possible to have 2 networks with the same name, this must be handled" - - -@pytest.fixture -async def fake_cluster(faker: Faker) -> Cluster: - return Cluster(id=faker.uuid4(), name=faker.pystr(), status=JobStatus.CREATED) - - -@pytest.fixture -async def docker_secret_cleaner( - async_docker_client: aiodocker.Docker, fake_cluster: Cluster -) -> AsyncIterator: - yield - await delete_secrets(async_docker_client, fake_cluster) - - -async def test_create_service_config( - docker_swarm, - async_docker_client: aiodocker.Docker, - minimal_config: None, - faker: Faker, - fake_cluster: Cluster, - docker_secret_cleaner, -): - # let's create some fake service config - settings = AppSettings() # type: ignore - service_env = faker.pydict() - service_name = faker.name() - network_id = faker.uuid4() - cmd = faker.pystr() - fake_labels = faker.pydict() - fake_placement = {"Constraints": [f"node.hostname=={faker.hostname()}"]} - - # create a second one - secrets = [ - await create_or_update_secret( - async_docker_client, - faker.file_path(), - fake_cluster, - secret_data=faker.text(), - ) - for n in range(3) - ] - - assert len(await async_docker_client.secrets.list()) == 3 - - # we shall have some env that tells the service where the secret is located - expected_service_env = deepcopy(service_env) - for s in secrets: - fake_env_key = faker.pystr() - service_env[fake_env_key] = s.secret_file_name - expected_service_env[ - fake_env_key - ] = f"{_DASK_KEY_CERT_PATH_IN_SIDECAR / Path(s.secret_file_name).name}" - - service_parameters = create_service_config( - settings=settings, - service_env=service_env, - service_name=service_name, - network_id=network_id, - service_secrets=secrets, - cmd=cmd, - labels=fake_labels, - placement=fake_placement, - ) - assert service_parameters - assert service_parameters["name"] == service_name - assert network_id in service_parameters["networks"] - - for env_key, env_value in expected_service_env.items(): - assert env_key in service_parameters["task_template"]["ContainerSpec"]["Env"] - assert ( - service_parameters["task_template"]["ContainerSpec"]["Env"][env_key] - == env_value - ) - assert service_parameters["task_template"]["ContainerSpec"]["Command"] == cmd - assert service_parameters["labels"] == fake_labels - assert len(service_parameters["task_template"]["ContainerSpec"]["Secrets"]) == 3 - for service_secret, original_secret in zip( - service_parameters["task_template"]["ContainerSpec"]["Secrets"], secrets - ): - assert service_secret["SecretName"] == original_secret.secret_name - assert service_secret["SecretID"] == original_secret.secret_id - assert ( - service_secret["File"]["Name"] - == f"{_DASK_KEY_CERT_PATH_IN_SIDECAR / Path(original_secret.secret_file_name).name}" - ) - assert service_parameters["task_template"]["Placement"] == fake_placement - - -@pytest.fixture -def fake_secret_file(tmp_path) -> Path: - fake_secret_file = Path(tmp_path / "fake_file") - fake_secret_file.write_text("Hello I am a secret file") - assert fake_secret_file.exists() - return fake_secret_file - - -async def test_create_or_update_docker_secrets_with_invalid_call_raises( - docker_swarm, - async_docker_client: aiodocker.Docker, - fake_cluster: Cluster, - faker: Faker, - docker_secret_cleaner, -): - with pytest.raises(ValueError): - await create_or_update_secret( - async_docker_client, - faker.file_path(), - fake_cluster, - ) - - -async def test_create_or_update_docker_secrets( - docker_swarm, - async_docker_client: aiodocker.Docker, - fake_secret_file: Path, - fake_cluster: Cluster, - faker: Faker, - docker_secret_cleaner, -): - list_of_secrets = await async_docker_client.secrets.list( - filters={"label": f"cluster_id={fake_cluster.id}"} - ) - assert len(list_of_secrets) == 0 - file_original_size = fake_secret_file.stat().st_size - # check secret creation - secret_target_file_name = faker.file_path() - created_secret: DockerSecret = await create_or_update_secret( - async_docker_client, - secret_target_file_name, - fake_cluster, - file_path=fake_secret_file, - ) - list_of_secrets = await async_docker_client.secrets.list( - filters={"label": f"cluster_id={fake_cluster.id}"} - ) - assert len(list_of_secrets) == 1 - secret = list_of_secrets[0] - assert created_secret.secret_id == secret["ID"] - inspected_secret = await async_docker_client.secrets.inspect(secret["ID"]) - - assert created_secret.secret_name == inspected_secret["Spec"]["Name"] - assert "cluster_id" in inspected_secret["Spec"]["Labels"] - assert inspected_secret["Spec"]["Labels"]["cluster_id"] == fake_cluster.id - assert "cluster_name" in inspected_secret["Spec"]["Labels"] - assert inspected_secret["Spec"]["Labels"]["cluster_name"] == fake_cluster.name - - # check update of secret - fake_secret_file.write_text("some additional stuff in the file") - assert fake_secret_file.stat().st_size != file_original_size - - updated_secret: DockerSecret = await create_or_update_secret( - async_docker_client, - secret_target_file_name, - fake_cluster, - file_path=fake_secret_file, - ) - assert updated_secret.secret_id != created_secret.secret_id - secrets = await async_docker_client.secrets.list( - filters={"label": f"cluster_id={fake_cluster.id}"} - ) - assert len(secrets) == 1 - updated_secret = secrets[0] - assert updated_secret != created_secret - - # create a second one - secret_target_file_name2 = faker.file_path() - created_secret: DockerSecret = await create_or_update_secret( - async_docker_client, - secret_target_file_name2, - fake_cluster, - secret_data=faker.text(), - ) - secrets = await async_docker_client.secrets.list( - filters={"label": f"cluster_id={fake_cluster.id}"} - ) - assert len(secrets) == 2 - - # test deletion - await delete_secrets(async_docker_client, fake_cluster) - secrets = await async_docker_client.secrets.list( - filters={"label": f"cluster_id={fake_cluster.id}"} - ) - assert len(secrets) == 0 - - -async def test_get_cluster_information( - docker_swarm, - async_docker_client: aiodocker.Docker, -): - cluster_information = await get_cluster_information(async_docker_client) - assert cluster_information - - # in testing we do have 1 machine, that is... this very host - assert len(cluster_information) == 1 - assert socket.gethostname() in cluster_information - - -@pytest.fixture() -def fake_docker_nodes(faker: Faker) -> list[dict[str, Any]]: - return [ - {"ID": f"{faker.uuid4()}", "Description": {"Hostname": f"{faker.hostname()}"}}, - {"ID": f"{faker.uuid4()}", "Description": {"Hostname": f"{faker.hostname()}"}}, - {"ID": f"{faker.uuid4()}", "Description": {"Hostname": f"{faker.hostname()}"}}, - ] - - -@pytest.fixture() -def mocked_docker_nodes(mocker: MockerFixture, fake_docker_nodes): - mocked_aiodocker_nodes = mocker.patch( - "osparc_gateway_server.backend.utils.aiodocker.nodes.DockerSwarmNodes.list", - autospec=True, - return_value=fake_docker_nodes, - ) - - -async def test_get_empty_node_hostname_rotates_host_names( - fake_docker_nodes: list[dict[str, Any]], - mocked_docker_nodes, - docker_swarm, - async_docker_client: aiodocker.Docker, - fake_cluster: Cluster, -): - available_hostnames = [ - node["Description"]["Hostname"] for node in fake_docker_nodes - ] - num_nodes = len(fake_docker_nodes) - for n in range(num_nodes): - hostname = await get_next_empty_node_hostname(async_docker_client, fake_cluster) - assert hostname in available_hostnames - available_hostnames.pop(available_hostnames.index(hostname)) - # let's do it a second time, since it should again go over all the hosts - available_hostnames = [ - node["Description"]["Hostname"] for node in fake_docker_nodes - ] - for n in range(num_nodes): - hostname = await get_next_empty_node_hostname(async_docker_client, fake_cluster) - assert hostname in available_hostnames - available_hostnames.pop(available_hostnames.index(hostname)) - - -async def test_get_empty_node_hostname_correctly_checks_services_labels( - docker_swarm: None, - async_docker_client: aiodocker.Docker, - fake_cluster: Cluster, - create_running_service: Callable[[dict[str, str]], Awaitable[dict[str, Any]]], -): - hostname = await get_next_empty_node_hostname(async_docker_client, fake_cluster) - assert socket.gethostname() == hostname - - # only services with the required labels shall be used to find if a service is already on a machine - invalid_labels = [ - # no labels - {}, - # only one of the required label - { - "cluster_id": fake_cluster.id, - }, - # only one of the required label - {"type": "worker"}, - ] - await asyncio.gather(*[create_running_service(l) for l in invalid_labels]) - # these services have not the correct labels, so the host is still available - hostname = await get_next_empty_node_hostname(async_docker_client, fake_cluster) - assert socket.gethostname() == hostname - - -async def test_get_empty_node_hostname_raises_no_host_found_if_a_service_is_already_running( - docker_swarm: None, - async_docker_client: aiodocker.Docker, - fake_cluster: Cluster, - create_running_service: Callable[[dict[str, str]], Awaitable[dict[str, Any]]], -): - # now create a service with the required labels - required_labels = {"cluster_id": fake_cluster.id, "type": "worker"} - await create_running_service(required_labels) - with pytest.raises(NoHostFoundError): - await get_next_empty_node_hostname(async_docker_client, fake_cluster) - - -async def test_get_empty_node_hostname_returns_constraint_if_available( - docker_swarm: None, - async_docker_client: aiodocker.Docker, - fake_cluster: Cluster, - create_docker_service: Callable[ - [dict[str, str], dict[str, Any]], Awaitable[dict[str, Any]] - ], -): - # now create a service with the required labels but that is pending - required_labels = {"cluster_id": fake_cluster.id, "type": "worker"} - await create_docker_service( - required_labels, - { - "Placement": {"Constraints": ["node.hostname==pytest"]}, - "Resources": {"Reservations": {"NanoCPUs": int(500 * 10e9)}}, - }, - ) - await get_next_empty_node_hostname(async_docker_client, fake_cluster) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/dependencies.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/dependencies.py index 49ce9523cfe..dacf0ff08b5 100644 --- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/dependencies.py +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/dependencies.py @@ -4,16 +4,11 @@ # import logging -from collections.abc import AsyncGenerator, Callable -from typing import Annotated -from fastapi import Depends from fastapi.requests import Request from servicelib.fastapi.dependencies import get_app, get_reverse_url_mapper from sqlalchemy.ext.asyncio import AsyncEngine -from ...services.modules.db.repositories._base import BaseRepository - logger = logging.getLogger(__name__) @@ -23,15 +18,6 @@ def get_resource_tracker_db_engine(request: Request) -> AsyncEngine: return engine -def get_repository(repo_type: type[BaseRepository]) -> Callable: - async def _get_repo( - engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], - ) -> AsyncGenerator[BaseRepository, None]: - yield repo_type(db_engine=engine) - - return _get_repo - - assert get_reverse_url_mapper # nosec assert get_app # nosec diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_resource_tracker.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_resource_tracker.py index d7e9a5ca74d..5a382782f9d 100644 --- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_resource_tracker.py +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_resource_tracker.py @@ -29,9 +29,6 @@ from ...core.settings import ApplicationSettings from ...services import pricing_plans, pricing_units, service_runs -from ...services.modules.db.repositories.resource_tracker import ( - ResourceTrackerRepository, -) from ...services.modules.s3 import get_s3_client router = RPCRouter() @@ -56,7 +53,7 @@ async def get_service_run_page( return await service_runs.list_service_runs( user_id=user_id, product_name=product_name, - resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine), + db_engine=app.state.engine, limit=limit, offset=offset, wallet_id=wallet_id, @@ -87,7 +84,7 @@ async def export_service_runs( s3_region=s3_settings.S3_REGION, user_id=user_id, product_name=product_name, - resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine), + db_engine=app.state.engine, wallet_id=wallet_id, access_all_wallet_usage=access_all_wallet_usage, order_by=order_by, @@ -111,7 +108,7 @@ async def get_osparc_credits_aggregated_usages_page( return await service_runs.get_osparc_credits_aggregated_usages_page( user_id=user_id, product_name=product_name, - resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine), + db_engine=app.state.engine, aggregated_by=aggregated_by, time_period=time_period, limit=limit, @@ -134,7 +131,7 @@ async def get_pricing_plan( return await pricing_plans.get_pricing_plan( product_name=product_name, pricing_plan_id=pricing_plan_id, - resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine), + db_engine=app.state.engine, ) @@ -146,7 +143,7 @@ async def list_pricing_plans( ) -> list[PricingPlanGet]: return await pricing_plans.list_pricing_plans_by_product( product_name=product_name, - resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine), + db_engine=app.state.engine, ) @@ -158,7 +155,7 @@ async def create_pricing_plan( ) -> PricingPlanGet: return await pricing_plans.create_pricing_plan( data=data, - resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine), + db_engine=app.state.engine, ) @@ -172,7 +169,7 @@ async def update_pricing_plan( return await pricing_plans.update_pricing_plan( product_name=product_name, data=data, - resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine), + db_engine=app.state.engine, ) @@ -191,7 +188,7 @@ async def get_pricing_unit( product_name=product_name, pricing_plan_id=pricing_plan_id, pricing_unit_id=pricing_unit_id, - resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine), + db_engine=app.state.engine, ) @@ -205,7 +202,7 @@ async def create_pricing_unit( return await pricing_units.create_pricing_unit( product_name=product_name, data=data, - resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine), + db_engine=app.state.engine, ) @@ -219,7 +216,7 @@ async def update_pricing_unit( return await pricing_units.update_pricing_unit( product_name=product_name, data=data, - resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine), + db_engine=app.state.engine, ) @@ -238,7 +235,7 @@ async def list_connected_services_to_pricing_plan_by_pricing_plan( ] = await pricing_plans.list_connected_services_to_pricing_plan_by_pricing_plan( product_name=product_name, pricing_plan_id=pricing_plan_id, - resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine), + db_engine=app.state.engine, ) return output @@ -257,5 +254,5 @@ async def connect_service_to_pricing_plan( pricing_plan_id=pricing_plan_id, service_key=service_key, service_version=service_version, - resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine), + db_engine=app.state.engine, ) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/background_task_periodic_heartbeat_check.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/background_task_periodic_heartbeat_check.py index 256b737d479..fba9332502e 100644 --- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/background_task_periodic_heartbeat_check.py +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/background_task_periodic_heartbeat_check.py @@ -10,11 +10,12 @@ ServiceRunStatus, ) from pydantic import NonNegativeInt, PositiveInt +from sqlalchemy.ext.asyncio import AsyncEngine from ..core.settings import ApplicationSettings from ..models.credit_transactions import CreditTransactionCreditsAndStatusUpdate from ..models.service_runs import ServiceRunStoppedAtUpdate -from .modules.db.repositories.resource_tracker import ResourceTrackerRepository +from .modules.db import credit_transactions_db, service_runs_db from .utils import compute_service_run_credit_costs, make_negative _logger = logging.getLogger(__name__) @@ -23,7 +24,7 @@ async def _check_service_heartbeat( - resource_tracker_repo: ResourceTrackerRepository, + db_engine: AsyncEngine, base_start_timestamp: datetime, resource_usage_tracker_missed_heartbeat_interval: timedelta, resource_usage_tracker_missed_heartbeat_counter_fail: NonNegativeInt, @@ -55,7 +56,7 @@ async def _check_service_heartbeat( missed_heartbeat_counter, ) await _close_unhealthy_service( - resource_tracker_repo, service_run_id, base_start_timestamp + db_engine, service_run_id, base_start_timestamp ) else: _logger.warning( @@ -63,13 +64,16 @@ async def _check_service_heartbeat( service_run_id, missed_heartbeat_counter, ) - await resource_tracker_repo.update_service_missed_heartbeat_counter( - service_run_id, last_heartbeat_at, missed_heartbeat_counter + await service_runs_db.update_service_missed_heartbeat_counter( + db_engine, + service_run_id=service_run_id, + last_heartbeat_at=last_heartbeat_at, + missed_heartbeat_counter=missed_heartbeat_counter, ) async def _close_unhealthy_service( - resource_tracker_repo: ResourceTrackerRepository, + db_engine: AsyncEngine, service_run_id: ServiceRunId, base_start_timestamp: datetime, ): @@ -80,8 +84,8 @@ async def _close_unhealthy_service( service_run_status=ServiceRunStatus.ERROR, service_run_status_msg="Service missed more heartbeats. It's considered unhealthy.", ) - running_service = await resource_tracker_repo.update_service_run_stopped_at( - update_service_run_stopped_at + running_service = await service_runs_db.update_service_run_stopped_at( + db_engine, data=update_service_run_stopped_at ) if running_service is None: @@ -108,8 +112,8 @@ async def _close_unhealthy_service( else CreditTransactionStatus.BILLED ), ) - await resource_tracker_repo.update_credit_transaction_credits_and_status( - update_credit_transaction + await credit_transactions_db.update_credit_transaction_credits_and_status( + db_engine, data=update_credit_transaction ) @@ -118,19 +122,18 @@ async def periodic_check_of_running_services_task(app: FastAPI) -> None: # This check runs across all products app_settings: ApplicationSettings = app.state.settings - resource_tracker_repo: ResourceTrackerRepository = ResourceTrackerRepository( - db_engine=app.state.engine - ) + _db_engine = app.state.engine base_start_timestamp = datetime.now(tz=timezone.utc) # Get all current running services (across all products) - total_count: PositiveInt = ( - await resource_tracker_repo.total_service_runs_with_running_status_across_all_products() + total_count: PositiveInt = await service_runs_db.total_service_runs_with_running_status_across_all_products( + _db_engine ) for offset in range(0, total_count, _BATCH_SIZE): - batch_check_services = await resource_tracker_repo.list_service_runs_with_running_status_across_all_products( + batch_check_services = await service_runs_db.list_service_runs_with_running_status_across_all_products( + _db_engine, offset=offset, limit=_BATCH_SIZE, ) @@ -138,7 +141,7 @@ async def periodic_check_of_running_services_task(app: FastAPI) -> None: await asyncio.gather( *( _check_service_heartbeat( - resource_tracker_repo=resource_tracker_repo, + db_engine=_db_engine, base_start_timestamp=base_start_timestamp, resource_usage_tracker_missed_heartbeat_interval=app_settings.RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_INTERVAL_SEC, resource_usage_tracker_missed_heartbeat_counter_fail=app_settings.RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_COUNTER_FAIL, diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/credit_transactions.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/credit_transactions.py index 0d4362e9748..c58eb76be8a 100644 --- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/credit_transactions.py +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/credit_transactions.py @@ -13,19 +13,18 @@ ) from models_library.wallets import WalletID from servicelib.rabbitmq import RabbitMQClient +from sqlalchemy.ext.asyncio import AsyncEngine -from ..api.rest.dependencies import get_repository +from ..api.rest.dependencies import get_resource_tracker_db_engine from ..models.credit_transactions import CreditTransactionCreate -from .modules.db.repositories.resource_tracker import ResourceTrackerRepository +from .modules.db import credit_transactions_db from .modules.rabbitmq import get_rabbitmq_client_from_request from .utils import sum_credit_transactions_and_publish_to_rabbitmq async def create_credit_transaction( credit_transaction_create_body: CreditTransactionCreateBody, - resource_tracker_repo: Annotated[ - ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository)) - ], + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], rabbitmq_client: Annotated[ RabbitMQClient, Depends(get_rabbitmq_client_from_request) ], @@ -47,12 +46,12 @@ async def create_credit_transaction( created_at=credit_transaction_create_body.created_at, last_heartbeat_at=credit_transaction_create_body.created_at, ) - transaction_id = await resource_tracker_repo.create_credit_transaction( - transaction_create + transaction_id = await credit_transactions_db.create_credit_transaction( + db_engine, data=transaction_create ) await sum_credit_transactions_and_publish_to_rabbitmq( - resource_tracker_repo, + db_engine, rabbitmq_client, credit_transaction_create_body.product_name, credit_transaction_create_body.wallet_id, @@ -64,10 +63,8 @@ async def create_credit_transaction( async def sum_credit_transactions_by_product_and_wallet( product_name: ProductName, wallet_id: WalletID, - resource_tracker_repo: Annotated[ - ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository)) - ], + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], ) -> WalletTotalCredits: - return await resource_tracker_repo.sum_credit_transactions_by_product_and_wallet( - product_name, wallet_id + return await credit_transactions_db.sum_credit_transactions_by_product_and_wallet( + db_engine, product_name=product_name, wallet_id=wallet_id ) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/credit_transactions_db.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/credit_transactions_db.py new file mode 100644 index 00000000000..76a8e9f1dfe --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/credit_transactions_db.py @@ -0,0 +1,162 @@ +import logging +from decimal import Decimal +from typing import cast + +import sqlalchemy as sa +from models_library.api_schemas_resource_usage_tracker.credit_transactions import ( + WalletTotalCredits, +) +from models_library.products import ProductName +from models_library.resource_tracker import CreditTransactionId, CreditTransactionStatus +from models_library.wallets import WalletID +from simcore_postgres_database.models.resource_tracker_credit_transactions import ( + resource_tracker_credit_transactions, +) +from simcore_postgres_database.utils_repos import transaction_context +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine + +from ....exceptions.errors import CreditTransactionNotCreatedDBError +from ....models.credit_transactions import ( + CreditTransactionCreate, + CreditTransactionCreditsAndStatusUpdate, + CreditTransactionCreditsUpdate, +) + +_logger = logging.getLogger(__name__) + + +async def create_credit_transaction( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: CreditTransactionCreate +) -> CreditTransactionId: + async with transaction_context(engine, connection) as conn: + insert_stmt = ( + resource_tracker_credit_transactions.insert() + .values( + product_name=data.product_name, + wallet_id=data.wallet_id, + wallet_name=data.wallet_name, + pricing_plan_id=data.pricing_plan_id, + pricing_unit_id=data.pricing_unit_id, + pricing_unit_cost_id=data.pricing_unit_cost_id, + user_id=data.user_id, + user_email=data.user_email, + osparc_credits=data.osparc_credits, + transaction_status=data.transaction_status, + transaction_classification=data.transaction_classification, + service_run_id=data.service_run_id, + payment_transaction_id=data.payment_transaction_id, + created=data.created_at, + last_heartbeat_at=data.last_heartbeat_at, + modified=sa.func.now(), + ) + .returning(resource_tracker_credit_transactions.c.transaction_id) + ) + result = await conn.execute(insert_stmt) + row = result.first() + if row is None: + raise CreditTransactionNotCreatedDBError(data=data) + return cast(CreditTransactionId, row[0]) + + +async def update_credit_transaction_credits( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: CreditTransactionCreditsUpdate +) -> CreditTransactionId | None: + async with transaction_context(engine, connection) as conn: + update_stmt = ( + resource_tracker_credit_transactions.update() + .values( + modified=sa.func.now(), + osparc_credits=data.osparc_credits, + last_heartbeat_at=data.last_heartbeat_at, + ) + .where( + ( + resource_tracker_credit_transactions.c.service_run_id + == data.service_run_id + ) + & ( + resource_tracker_credit_transactions.c.transaction_status + == CreditTransactionStatus.PENDING + ) + & ( + resource_tracker_credit_transactions.c.last_heartbeat_at + <= data.last_heartbeat_at + ) + ) + .returning(resource_tracker_credit_transactions.c.service_run_id) + ) + result = await conn.execute(update_stmt) + row = result.first() + if row is None: + return None + return cast(CreditTransactionId | None, row[0]) + + +async def update_credit_transaction_credits_and_status( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: CreditTransactionCreditsAndStatusUpdate +) -> CreditTransactionId | None: + async with transaction_context(engine, connection) as conn: + update_stmt = ( + resource_tracker_credit_transactions.update() + .values( + modified=sa.func.now(), + osparc_credits=data.osparc_credits, + transaction_status=data.transaction_status, + ) + .where( + ( + resource_tracker_credit_transactions.c.service_run_id + == data.service_run_id + ) + & ( + resource_tracker_credit_transactions.c.transaction_status + == CreditTransactionStatus.PENDING + ) + ) + .returning(resource_tracker_credit_transactions.c.service_run_id) + ) + result = await conn.execute(update_stmt) + row = result.first() + if row is None: + return None + return cast(CreditTransactionId | None, row[0]) + + +async def sum_credit_transactions_by_product_and_wallet( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + wallet_id: WalletID +) -> WalletTotalCredits: + async with transaction_context(engine, connection) as conn: + sum_stmt = sa.select( + sa.func.sum(resource_tracker_credit_transactions.c.osparc_credits) + ).where( + (resource_tracker_credit_transactions.c.product_name == product_name) + & (resource_tracker_credit_transactions.c.wallet_id == wallet_id) + & ( + resource_tracker_credit_transactions.c.transaction_status.in_( + [ + CreditTransactionStatus.BILLED, + CreditTransactionStatus.PENDING, + ] + ) + ) + ) + result = await conn.execute(sum_stmt) + row = result.first() + if row is None or row[0] is None: + return WalletTotalCredits( + wallet_id=wallet_id, available_osparc_credits=Decimal(0) + ) + return WalletTotalCredits(wallet_id=wallet_id, available_osparc_credits=row[0]) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/pricing_plans_db.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/pricing_plans_db.py new file mode 100644 index 00000000000..ea6376cc15b --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/pricing_plans_db.py @@ -0,0 +1,668 @@ +import logging + +import sqlalchemy as sa +from models_library.products import ProductName +from models_library.resource_tracker import ( + PricingPlanCreate, + PricingPlanId, + PricingPlanUpdate, + PricingUnitCostId, + PricingUnitId, + PricingUnitWithCostCreate, + PricingUnitWithCostUpdate, +) +from models_library.services import ServiceKey, ServiceVersion +from simcore_postgres_database.models.resource_tracker_pricing_plan_to_service import ( + resource_tracker_pricing_plan_to_service, +) +from simcore_postgres_database.models.resource_tracker_pricing_plans import ( + resource_tracker_pricing_plans, +) +from simcore_postgres_database.models.resource_tracker_pricing_unit_costs import ( + resource_tracker_pricing_unit_costs, +) +from simcore_postgres_database.models.resource_tracker_pricing_units import ( + resource_tracker_pricing_units, +) +from simcore_postgres_database.utils_repos import transaction_context +from sqlalchemy.dialects.postgresql import ARRAY, INTEGER +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine + +from ....exceptions.errors import ( + PricingPlanAndPricingUnitCombinationDoesNotExistsDBError, + PricingPlanDoesNotExistsDBError, + PricingPlanNotCreatedDBError, + PricingPlanToServiceNotCreatedDBError, + PricingUnitCostDoesNotExistsDBError, + PricingUnitCostNotCreatedDBError, + PricingUnitNotCreatedDBError, +) +from ....models.pricing_plans import ( + PricingPlansDB, + PricingPlansWithServiceDefaultPlanDB, + PricingPlanToServiceDB, +) +from ....models.pricing_unit_costs import PricingUnitCostsDB +from ....models.pricing_units import PricingUnitsDB + +_logger = logging.getLogger(__name__) + + +################################# +# Pricing plans +################################# + + +async def list_active_service_pricing_plans_by_product_and_service( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + service_key: ServiceKey, + service_version: ServiceVersion, +) -> list[PricingPlansWithServiceDefaultPlanDB]: + # NOTE: consilidate with utils_services_environmnets.py + def _version(column_or_value): + # converts version value string to array[integer] that can be compared + return sa.func.string_to_array(column_or_value, ".").cast(ARRAY(INTEGER)) + + async with transaction_context(engine, connection) as conn: + # Firstly find the correct service version + query = ( + sa.select( + resource_tracker_pricing_plan_to_service.c.service_key, + resource_tracker_pricing_plan_to_service.c.service_version, + ) + .select_from( + resource_tracker_pricing_plan_to_service.join( + resource_tracker_pricing_plans, + ( + resource_tracker_pricing_plan_to_service.c.pricing_plan_id + == resource_tracker_pricing_plans.c.pricing_plan_id + ), + ) + ) + .where( + ( + _version(resource_tracker_pricing_plan_to_service.c.service_version) + <= _version(service_version) + ) + & ( + resource_tracker_pricing_plan_to_service.c.service_key + == service_key + ) + & (resource_tracker_pricing_plans.c.product_name == product_name) + & (resource_tracker_pricing_plans.c.is_active.is_(True)) + ) + .order_by( + _version( + resource_tracker_pricing_plan_to_service.c.service_version + ).desc() + ) + .limit(1) + ) + result = await conn.execute(query) + row = result.first() + if row is None: + return [] + latest_service_key, latest_service_version = row + # Now choose all pricing plans connected to this service + query = ( + sa.select( + resource_tracker_pricing_plans.c.pricing_plan_id, + resource_tracker_pricing_plans.c.display_name, + resource_tracker_pricing_plans.c.description, + resource_tracker_pricing_plans.c.classification, + resource_tracker_pricing_plans.c.is_active, + resource_tracker_pricing_plans.c.created, + resource_tracker_pricing_plans.c.pricing_plan_key, + resource_tracker_pricing_plan_to_service.c.service_default_plan, + ) + .select_from( + resource_tracker_pricing_plan_to_service.join( + resource_tracker_pricing_plans, + ( + resource_tracker_pricing_plan_to_service.c.pricing_plan_id + == resource_tracker_pricing_plans.c.pricing_plan_id + ), + ) + ) + .where( + ( + _version(resource_tracker_pricing_plan_to_service.c.service_version) + == _version(latest_service_version) + ) + & ( + resource_tracker_pricing_plan_to_service.c.service_key + == latest_service_key + ) + & (resource_tracker_pricing_plans.c.product_name == product_name) + & (resource_tracker_pricing_plans.c.is_active.is_(True)) + ) + .order_by(resource_tracker_pricing_plan_to_service.c.pricing_plan_id.desc()) + ) + result = await conn.execute(query) + + return [ + PricingPlansWithServiceDefaultPlanDB.model_validate(row) + for row in result.fetchall() + ] + + +async def get_pricing_plan( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + pricing_plan_id: PricingPlanId, +) -> PricingPlansDB: + async with transaction_context(engine, connection) as conn: + select_stmt = sa.select( + resource_tracker_pricing_plans.c.pricing_plan_id, + resource_tracker_pricing_plans.c.display_name, + resource_tracker_pricing_plans.c.description, + resource_tracker_pricing_plans.c.classification, + resource_tracker_pricing_plans.c.is_active, + resource_tracker_pricing_plans.c.created, + resource_tracker_pricing_plans.c.pricing_plan_key, + ).where( + (resource_tracker_pricing_plans.c.pricing_plan_id == pricing_plan_id) + & (resource_tracker_pricing_plans.c.product_name == product_name) + ) + result = await conn.execute(select_stmt) + row = result.first() + if row is None: + raise PricingPlanDoesNotExistsDBError(pricing_plan_id=pricing_plan_id) + return PricingPlansDB.model_validate(row) + + +async def list_pricing_plans_by_product( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, +) -> list[PricingPlansDB]: + async with transaction_context(engine, connection) as conn: + select_stmt = sa.select( + resource_tracker_pricing_plans.c.pricing_plan_id, + resource_tracker_pricing_plans.c.display_name, + resource_tracker_pricing_plans.c.description, + resource_tracker_pricing_plans.c.classification, + resource_tracker_pricing_plans.c.is_active, + resource_tracker_pricing_plans.c.created, + resource_tracker_pricing_plans.c.pricing_plan_key, + ).where(resource_tracker_pricing_plans.c.product_name == product_name) + result = await conn.execute(select_stmt) + + return [PricingPlansDB.model_validate(row) for row in result.fetchall()] + + +async def create_pricing_plan( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: PricingPlanCreate, +) -> PricingPlansDB: + async with transaction_context(engine, connection) as conn: + insert_stmt = ( + resource_tracker_pricing_plans.insert() + .values( + product_name=data.product_name, + display_name=data.display_name, + description=data.description, + classification=data.classification, + is_active=True, + created=sa.func.now(), + modified=sa.func.now(), + pricing_plan_key=data.pricing_plan_key, + ) + .returning( + *[ + resource_tracker_pricing_plans.c.pricing_plan_id, + resource_tracker_pricing_plans.c.display_name, + resource_tracker_pricing_plans.c.description, + resource_tracker_pricing_plans.c.classification, + resource_tracker_pricing_plans.c.is_active, + resource_tracker_pricing_plans.c.created, + resource_tracker_pricing_plans.c.pricing_plan_key, + ] + ) + ) + result = await conn.execute(insert_stmt) + row = result.first() + if row is None: + raise PricingPlanNotCreatedDBError(data=data) + return PricingPlansDB.model_validate(row) + + +async def update_pricing_plan( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + data: PricingPlanUpdate, +) -> PricingPlansDB | None: + async with transaction_context(engine, connection) as conn: + update_stmt = ( + resource_tracker_pricing_plans.update() + .values( + display_name=data.display_name, + description=data.description, + is_active=data.is_active, + modified=sa.func.now(), + ) + .where( + ( + resource_tracker_pricing_plans.c.pricing_plan_id + == data.pricing_plan_id + ) + & (resource_tracker_pricing_plans.c.product_name == product_name) + ) + .returning( + *[ + resource_tracker_pricing_plans.c.pricing_plan_id, + resource_tracker_pricing_plans.c.display_name, + resource_tracker_pricing_plans.c.description, + resource_tracker_pricing_plans.c.classification, + resource_tracker_pricing_plans.c.is_active, + resource_tracker_pricing_plans.c.created, + resource_tracker_pricing_plans.c.pricing_plan_key, + ] + ) + ) + result = await conn.execute(update_stmt) + row = result.first() + if row is None: + return None + return PricingPlansDB.model_validate(row) + + +################################# +# Pricing plan to service +################################# + + +async def list_connected_services_to_pricing_plan_by_pricing_plan( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + pricing_plan_id: PricingPlanId, +) -> list[PricingPlanToServiceDB]: + async with transaction_context(engine, connection) as conn: + query = ( + sa.select( + resource_tracker_pricing_plan_to_service.c.pricing_plan_id, + resource_tracker_pricing_plan_to_service.c.service_key, + resource_tracker_pricing_plan_to_service.c.service_version, + resource_tracker_pricing_plan_to_service.c.created, + ) + .select_from( + resource_tracker_pricing_plan_to_service.join( + resource_tracker_pricing_plans, + ( + resource_tracker_pricing_plan_to_service.c.pricing_plan_id + == resource_tracker_pricing_plans.c.pricing_plan_id + ), + ) + ) + .where( + (resource_tracker_pricing_plans.c.product_name == product_name) + & (resource_tracker_pricing_plans.c.pricing_plan_id == pricing_plan_id) + ) + .order_by(resource_tracker_pricing_plan_to_service.c.pricing_plan_id.desc()) + ) + result = await conn.execute(query) + + return [PricingPlanToServiceDB.model_validate(row) for row in result.fetchall()] + + +async def upsert_service_to_pricing_plan( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + pricing_plan_id: PricingPlanId, + service_key: ServiceKey, + service_version: ServiceVersion, +) -> PricingPlanToServiceDB: + async with transaction_context(engine, connection) as conn: + query = ( + sa.select( + resource_tracker_pricing_plan_to_service.c.pricing_plan_id, + resource_tracker_pricing_plan_to_service.c.service_key, + resource_tracker_pricing_plan_to_service.c.service_version, + resource_tracker_pricing_plan_to_service.c.created, + ) + .select_from( + resource_tracker_pricing_plan_to_service.join( + resource_tracker_pricing_plans, + ( + resource_tracker_pricing_plan_to_service.c.pricing_plan_id + == resource_tracker_pricing_plans.c.pricing_plan_id + ), + ) + ) + .where( + (resource_tracker_pricing_plans.c.product_name == product_name) + & (resource_tracker_pricing_plans.c.pricing_plan_id == pricing_plan_id) + & ( + resource_tracker_pricing_plan_to_service.c.service_key + == service_key + ) + & ( + resource_tracker_pricing_plan_to_service.c.service_version + == service_version + ) + ) + ) + result = await conn.execute(query) + row = result.first() + + if row is not None: + delete_stmt = resource_tracker_pricing_plan_to_service.delete().where( + (resource_tracker_pricing_plans.c.pricing_plan_id == pricing_plan_id) + & ( + resource_tracker_pricing_plan_to_service.c.service_key + == service_key + ) + & ( + resource_tracker_pricing_plan_to_service.c.service_version + == service_version + ) + ) + await conn.execute(delete_stmt) + + insert_stmt = ( + resource_tracker_pricing_plan_to_service.insert() + .values( + pricing_plan_id=pricing_plan_id, + service_key=service_key, + service_version=service_version, + created=sa.func.now(), + modified=sa.func.now(), + service_default_plan=True, + ) + .returning( + *[ + resource_tracker_pricing_plan_to_service.c.pricing_plan_id, + resource_tracker_pricing_plan_to_service.c.service_key, + resource_tracker_pricing_plan_to_service.c.service_version, + resource_tracker_pricing_plan_to_service.c.created, + ] + ) + ) + result = await conn.execute(insert_stmt) + row = result.first() + if row is None: + raise PricingPlanToServiceNotCreatedDBError( + data=f"pricing_plan_id {pricing_plan_id}, service_key {service_key}, service_version {service_version}" + ) + return PricingPlanToServiceDB.model_validate(row) + + +################################# +# Pricing units +################################# + + +def _pricing_units_select_stmt(): + return sa.select( + resource_tracker_pricing_units.c.pricing_unit_id, + resource_tracker_pricing_units.c.pricing_plan_id, + resource_tracker_pricing_units.c.unit_name, + resource_tracker_pricing_units.c.unit_extra_info, + resource_tracker_pricing_units.c.default, + resource_tracker_pricing_units.c.specific_info, + resource_tracker_pricing_units.c.created, + resource_tracker_pricing_units.c.modified, + resource_tracker_pricing_unit_costs.c.cost_per_unit.label( + "current_cost_per_unit" + ), + resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id.label( + "current_cost_per_unit_id" + ), + ) + + +async def list_pricing_units_by_pricing_plan( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + pricing_plan_id: PricingPlanId, +) -> list[PricingUnitsDB]: + async with transaction_context(engine, connection) as conn: + query = ( + _pricing_units_select_stmt() + .select_from( + resource_tracker_pricing_units.join( + resource_tracker_pricing_unit_costs, + ( + ( + resource_tracker_pricing_units.c.pricing_plan_id + == resource_tracker_pricing_unit_costs.c.pricing_plan_id + ) + & ( + resource_tracker_pricing_units.c.pricing_unit_id + == resource_tracker_pricing_unit_costs.c.pricing_unit_id + ) + ), + ) + ) + .where( + (resource_tracker_pricing_units.c.pricing_plan_id == pricing_plan_id) + & (resource_tracker_pricing_unit_costs.c.valid_to.is_(None)) + ) + .order_by(resource_tracker_pricing_unit_costs.c.cost_per_unit.asc()) + ) + result = await conn.execute(query) + + return [PricingUnitsDB.model_validate(row) for row in result.fetchall()] + + +async def get_valid_pricing_unit( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + pricing_plan_id: PricingPlanId, + pricing_unit_id: PricingUnitId, +) -> PricingUnitsDB: + async with transaction_context(engine, connection) as conn: + query = ( + _pricing_units_select_stmt() + .select_from( + resource_tracker_pricing_units.join( + resource_tracker_pricing_unit_costs, + ( + ( + resource_tracker_pricing_units.c.pricing_plan_id + == resource_tracker_pricing_unit_costs.c.pricing_plan_id + ) + & ( + resource_tracker_pricing_units.c.pricing_unit_id + == resource_tracker_pricing_unit_costs.c.pricing_unit_id + ) + ), + ).join( + resource_tracker_pricing_plans, + ( + resource_tracker_pricing_plans.c.pricing_plan_id + == resource_tracker_pricing_units.c.pricing_plan_id + ), + ) + ) + .where( + (resource_tracker_pricing_units.c.pricing_plan_id == pricing_plan_id) + & (resource_tracker_pricing_units.c.pricing_unit_id == pricing_unit_id) + & (resource_tracker_pricing_unit_costs.c.valid_to.is_(None)) + & (resource_tracker_pricing_plans.c.product_name == product_name) + ) + ) + result = await conn.execute(query) + + row = result.first() + if row is None: + raise PricingPlanAndPricingUnitCombinationDoesNotExistsDBError( + pricing_plan_id=pricing_plan_id, + pricing_unit_id=pricing_unit_id, + product_name=product_name, + ) + return PricingUnitsDB.model_validate(row) + + +async def create_pricing_unit_with_cost( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: PricingUnitWithCostCreate, + pricing_plan_key: str, +) -> tuple[PricingUnitId, PricingUnitCostId]: + async with transaction_context(engine, connection) as conn: + # pricing units table + insert_stmt = ( + resource_tracker_pricing_units.insert() + .values( + pricing_plan_id=data.pricing_plan_id, + unit_name=data.unit_name, + unit_extra_info=data.unit_extra_info.model_dump(), + default=data.default, + specific_info=data.specific_info.model_dump(), + created=sa.func.now(), + modified=sa.func.now(), + ) + .returning(resource_tracker_pricing_units.c.pricing_unit_id) + ) + result = await conn.execute(insert_stmt) + row = result.first() + if row is None: + raise PricingUnitNotCreatedDBError(data=data) + _pricing_unit_id = row[0] + + # pricing unit cost table + insert_stmt = ( + resource_tracker_pricing_unit_costs.insert() + .values( + pricing_plan_id=data.pricing_plan_id, + pricing_plan_key=pricing_plan_key, + pricing_unit_id=_pricing_unit_id, + pricing_unit_name=data.unit_name, + cost_per_unit=data.cost_per_unit, + valid_from=sa.func.now(), + valid_to=None, + created=sa.func.now(), + comment=data.comment, + modified=sa.func.now(), + ) + .returning(resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id) + ) + result = await conn.execute(insert_stmt) + row = result.first() + if row is None: + raise PricingUnitCostNotCreatedDBError(data=data) + _pricing_unit_cost_id = row[0] + + return (_pricing_unit_id, _pricing_unit_cost_id) + + +async def update_pricing_unit_with_cost( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: PricingUnitWithCostUpdate, + pricing_plan_key: str, +) -> None: + async with transaction_context(engine, connection) as conn: + # pricing units table + update_stmt = ( + resource_tracker_pricing_units.update() + .values( + unit_name=data.unit_name, + unit_extra_info=data.unit_extra_info.model_dump(), + default=data.default, + specific_info=data.specific_info.model_dump(), + modified=sa.func.now(), + ) + .where( + resource_tracker_pricing_units.c.pricing_unit_id == data.pricing_unit_id + ) + .returning(resource_tracker_pricing_units.c.pricing_unit_id) + ) + await conn.execute(update_stmt) + + # If price change, then we update pricing unit cost table + if data.pricing_unit_cost_update: + # Firstly we close previous price + update_stmt = ( + resource_tracker_pricing_unit_costs.update() + .values( + valid_to=sa.func.now(), # <-- Closing previous price + modified=sa.func.now(), + ) + .where( + resource_tracker_pricing_unit_costs.c.pricing_unit_id + == data.pricing_unit_id + ) + .returning(resource_tracker_pricing_unit_costs.c.pricing_unit_id) + ) + result = await conn.execute(update_stmt) + + # Then we create a new price + insert_stmt = ( + resource_tracker_pricing_unit_costs.insert() + .values( + pricing_plan_id=data.pricing_plan_id, + pricing_plan_key=pricing_plan_key, + pricing_unit_id=data.pricing_unit_id, + pricing_unit_name=data.unit_name, + cost_per_unit=data.pricing_unit_cost_update.cost_per_unit, + valid_from=sa.func.now(), + valid_to=None, # <-- New price is valid + created=sa.func.now(), + comment=data.pricing_unit_cost_update.comment, + modified=sa.func.now(), + ) + .returning(resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id) + ) + result = await conn.execute(insert_stmt) + row = result.first() + if row is None: + raise PricingUnitCostNotCreatedDBError(data=data) + + +################################# +# Pricing unit-costs +################################# + + +async def get_pricing_unit_cost_by_id( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + pricing_unit_cost_id: PricingUnitCostId, +) -> PricingUnitCostsDB: + async with transaction_context(engine, connection) as conn: + query = sa.select( + resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id, + resource_tracker_pricing_unit_costs.c.pricing_plan_id, + resource_tracker_pricing_unit_costs.c.pricing_plan_key, + resource_tracker_pricing_unit_costs.c.pricing_unit_id, + resource_tracker_pricing_unit_costs.c.pricing_unit_name, + resource_tracker_pricing_unit_costs.c.cost_per_unit, + resource_tracker_pricing_unit_costs.c.valid_from, + resource_tracker_pricing_unit_costs.c.valid_to, + resource_tracker_pricing_unit_costs.c.created, + resource_tracker_pricing_unit_costs.c.comment, + resource_tracker_pricing_unit_costs.c.modified, + ).where( + resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id + == pricing_unit_cost_id + ) + result = await conn.execute(query) + + row = result.first() + if row is None: + raise PricingUnitCostDoesNotExistsDBError( + pricing_unit_cost_id=pricing_unit_cost_id + ) + return PricingUnitCostsDB.model_validate(row) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/__init__.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/__init__.py deleted file mode 100644 index 93da4003de3..00000000000 --- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from ._base import BaseRepository - -__all__: tuple[str, ...] = ("BaseRepository",) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/_base.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/_base.py deleted file mode 100644 index 4a20b37c735..00000000000 --- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/_base.py +++ /dev/null @@ -1,12 +0,0 @@ -from dataclasses import dataclass - -from sqlalchemy.ext.asyncio import AsyncEngine - - -@dataclass -class BaseRepository: - """ - Repositories are pulled at every request - """ - - db_engine: AsyncEngine diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/resource_tracker.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/resource_tracker.py deleted file mode 100644 index 46439f26e38..00000000000 --- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/resource_tracker.py +++ /dev/null @@ -1,1382 +0,0 @@ -import logging -from datetime import datetime -from decimal import Decimal -from typing import cast - -import sqlalchemy as sa -from models_library.api_schemas_resource_usage_tracker.credit_transactions import ( - WalletTotalCredits, -) -from models_library.api_schemas_storage import S3BucketName -from models_library.products import ProductName -from models_library.resource_tracker import ( - CreditClassification, - CreditTransactionId, - CreditTransactionStatus, - PricingPlanCreate, - PricingPlanId, - PricingPlanUpdate, - PricingUnitCostId, - PricingUnitId, - PricingUnitWithCostCreate, - PricingUnitWithCostUpdate, - ServiceRunId, - ServiceRunStatus, -) -from models_library.rest_ordering import OrderBy, OrderDirection -from models_library.services import ServiceKey, ServiceVersion -from models_library.users import UserID -from models_library.wallets import WalletID -from pydantic import PositiveInt -from simcore_postgres_database.models.projects_tags import projects_tags -from simcore_postgres_database.models.resource_tracker_credit_transactions import ( - resource_tracker_credit_transactions, -) -from simcore_postgres_database.models.resource_tracker_pricing_plan_to_service import ( - resource_tracker_pricing_plan_to_service, -) -from simcore_postgres_database.models.resource_tracker_pricing_plans import ( - resource_tracker_pricing_plans, -) -from simcore_postgres_database.models.resource_tracker_pricing_unit_costs import ( - resource_tracker_pricing_unit_costs, -) -from simcore_postgres_database.models.resource_tracker_pricing_units import ( - resource_tracker_pricing_units, -) -from simcore_postgres_database.models.resource_tracker_service_runs import ( - resource_tracker_service_runs, -) -from simcore_postgres_database.models.tags import tags -from sqlalchemy.dialects.postgresql import ARRAY, INTEGER - -from .....exceptions.errors import ( - CreditTransactionNotCreatedDBError, - PricingPlanAndPricingUnitCombinationDoesNotExistsDBError, - PricingPlanDoesNotExistsDBError, - PricingPlanNotCreatedDBError, - PricingPlanToServiceNotCreatedDBError, - PricingUnitCostDoesNotExistsDBError, - PricingUnitCostNotCreatedDBError, - PricingUnitNotCreatedDBError, - ServiceRunNotCreatedDBError, -) -from .....models.credit_transactions import ( - CreditTransactionCreate, - CreditTransactionCreditsAndStatusUpdate, - CreditTransactionCreditsUpdate, -) -from .....models.pricing_plans import ( - PricingPlansDB, - PricingPlansWithServiceDefaultPlanDB, - PricingPlanToServiceDB, -) -from .....models.pricing_unit_costs import PricingUnitCostsDB -from .....models.pricing_units import PricingUnitsDB -from .....models.service_runs import ( - OsparcCreditsAggregatedByServiceKeyDB, - ServiceRunCreate, - ServiceRunDB, - ServiceRunForCheckDB, - ServiceRunLastHeartbeatUpdate, - ServiceRunStoppedAtUpdate, - ServiceRunWithCreditsDB, -) -from ._base import BaseRepository - -_logger = logging.getLogger(__name__) - - -class ResourceTrackerRepository( - BaseRepository -): # pylint: disable=too-many-public-methods - ############### - # Service Run - ############### - - async def create_service_run(self, data: ServiceRunCreate) -> ServiceRunId: - async with self.db_engine.begin() as conn: - insert_stmt = ( - resource_tracker_service_runs.insert() - .values( - product_name=data.product_name, - service_run_id=data.service_run_id, - wallet_id=data.wallet_id, - wallet_name=data.wallet_name, - pricing_plan_id=data.pricing_plan_id, - pricing_unit_id=data.pricing_unit_id, - pricing_unit_cost_id=data.pricing_unit_cost_id, - pricing_unit_cost=data.pricing_unit_cost, - simcore_user_agent=data.simcore_user_agent, - user_id=data.user_id, - user_email=data.user_email, - project_id=f"{data.project_id}", - project_name=data.project_name, - node_id=f"{data.node_id}", - node_name=data.node_name, - parent_project_id=f"{data.parent_project_id}", - root_parent_project_id=f"{data.root_parent_project_id}", - root_parent_project_name=data.root_parent_project_name, - parent_node_id=f"{data.parent_node_id}", - root_parent_node_id=f"{data.root_parent_node_id}", - service_key=data.service_key, - service_version=data.service_version, - service_type=data.service_type, - service_resources=data.service_resources, - service_additional_metadata=data.service_additional_metadata, - started_at=data.started_at, - stopped_at=None, - service_run_status=ServiceRunStatus.RUNNING, - modified=sa.func.now(), - last_heartbeat_at=data.last_heartbeat_at, - ) - .returning(resource_tracker_service_runs.c.service_run_id) - ) - result = await conn.execute(insert_stmt) - row = result.first() - if row is None: - raise ServiceRunNotCreatedDBError(data=data) - return cast(ServiceRunId, row[0]) - - async def update_service_run_last_heartbeat( - self, data: ServiceRunLastHeartbeatUpdate - ) -> ServiceRunDB | None: - async with self.db_engine.begin() as conn: - update_stmt = ( - resource_tracker_service_runs.update() - .values( - modified=sa.func.now(), - last_heartbeat_at=data.last_heartbeat_at, - missed_heartbeat_counter=0, - ) - .where( - ( - resource_tracker_service_runs.c.service_run_id - == data.service_run_id - ) - & ( - resource_tracker_service_runs.c.service_run_status - == ServiceRunStatus.RUNNING - ) - & ( - resource_tracker_service_runs.c.last_heartbeat_at - <= data.last_heartbeat_at - ) - ) - .returning(sa.literal_column("*")) - ) - result = await conn.execute(update_stmt) - row = result.first() - if row is None: - return None - return ServiceRunDB.model_validate(row) - - async def update_service_run_stopped_at( - self, data: ServiceRunStoppedAtUpdate - ) -> ServiceRunDB | None: - async with self.db_engine.begin() as conn: - update_stmt = ( - resource_tracker_service_runs.update() - .values( - modified=sa.func.now(), - stopped_at=data.stopped_at, - service_run_status=data.service_run_status, - service_run_status_msg=data.service_run_status_msg, - ) - .where( - ( - resource_tracker_service_runs.c.service_run_id - == data.service_run_id - ) - & ( - resource_tracker_service_runs.c.service_run_status - == ServiceRunStatus.RUNNING - ) - ) - .returning(sa.literal_column("*")) - ) - result = await conn.execute(update_stmt) - row = result.first() - if row is None: - return None - return ServiceRunDB.model_validate(row) - - async def get_service_run_by_id( - self, service_run_id: ServiceRunId - ) -> ServiceRunDB | None: - async with self.db_engine.begin() as conn: - stmt = sa.select(resource_tracker_service_runs).where( - resource_tracker_service_runs.c.service_run_id == service_run_id - ) - result = await conn.execute(stmt) - row = result.first() - if row is None: - return None - return ServiceRunDB.model_validate(row) - - _project_tags_subquery = ( - sa.select( - projects_tags.c.project_uuid_for_rut, - sa.func.array_agg(tags.c.name).label("project_tags"), - ) - .select_from(projects_tags.join(tags, projects_tags.c.tag_id == tags.c.id)) - .group_by(projects_tags.c.project_uuid_for_rut) - ).subquery("project_tags_subquery") - - async def list_service_runs_by_product_and_user_and_wallet( - self, - product_name: ProductName, - *, - user_id: UserID | None, - wallet_id: WalletID | None, - offset: int, - limit: int, - service_run_status: ServiceRunStatus | None = None, - started_from: datetime | None = None, - started_until: datetime | None = None, - order_by: OrderBy | None = None, - ) -> list[ServiceRunWithCreditsDB]: - async with self.db_engine.begin() as conn: - query = ( - sa.select( - resource_tracker_service_runs.c.product_name, - resource_tracker_service_runs.c.service_run_id, - resource_tracker_service_runs.c.wallet_id, - resource_tracker_service_runs.c.wallet_name, - resource_tracker_service_runs.c.pricing_plan_id, - resource_tracker_service_runs.c.pricing_unit_id, - resource_tracker_service_runs.c.pricing_unit_cost_id, - resource_tracker_service_runs.c.pricing_unit_cost, - resource_tracker_service_runs.c.user_id, - resource_tracker_service_runs.c.user_email, - resource_tracker_service_runs.c.project_id, - resource_tracker_service_runs.c.project_name, - resource_tracker_service_runs.c.node_id, - resource_tracker_service_runs.c.node_name, - resource_tracker_service_runs.c.parent_project_id, - resource_tracker_service_runs.c.root_parent_project_id, - resource_tracker_service_runs.c.root_parent_project_name, - resource_tracker_service_runs.c.parent_node_id, - resource_tracker_service_runs.c.root_parent_node_id, - resource_tracker_service_runs.c.service_key, - resource_tracker_service_runs.c.service_version, - resource_tracker_service_runs.c.service_type, - resource_tracker_service_runs.c.service_resources, - resource_tracker_service_runs.c.started_at, - resource_tracker_service_runs.c.stopped_at, - resource_tracker_service_runs.c.service_run_status, - resource_tracker_service_runs.c.modified, - resource_tracker_service_runs.c.last_heartbeat_at, - resource_tracker_service_runs.c.service_run_status_msg, - resource_tracker_service_runs.c.missed_heartbeat_counter, - resource_tracker_credit_transactions.c.osparc_credits, - resource_tracker_credit_transactions.c.transaction_status, - sa.func.coalesce( - self._project_tags_subquery.c.project_tags, - sa.cast(sa.text("'{}'"), sa.ARRAY(sa.String)), - ).label("project_tags"), - ) - .select_from( - resource_tracker_service_runs.join( - resource_tracker_credit_transactions, - ( - resource_tracker_service_runs.c.product_name - == resource_tracker_credit_transactions.c.product_name - ) - & ( - resource_tracker_service_runs.c.service_run_id - == resource_tracker_credit_transactions.c.service_run_id - ), - isouter=True, - ).join( - self._project_tags_subquery, - resource_tracker_service_runs.c.project_id - == self._project_tags_subquery.c.project_uuid_for_rut, - isouter=True, - ) - ) - .where(resource_tracker_service_runs.c.product_name == product_name) - .offset(offset) - .limit(limit) - ) - - if user_id: - query = query.where(resource_tracker_service_runs.c.user_id == user_id) - if wallet_id: - query = query.where( - resource_tracker_service_runs.c.wallet_id == wallet_id - ) - if service_run_status: - query = query.where( - resource_tracker_service_runs.c.service_run_status - == service_run_status - ) - if started_from: - query = query.where( - sa.func.DATE(resource_tracker_service_runs.c.started_at) - >= started_from.date() - ) - if started_until: - query = query.where( - sa.func.DATE(resource_tracker_service_runs.c.started_at) - <= started_until.date() - ) - - if order_by: - if order_by.direction == OrderDirection.ASC: - query = query.order_by(sa.asc(order_by.field)) - else: - query = query.order_by(sa.desc(order_by.field)) - else: - # Default ordering - query = query.order_by( - resource_tracker_service_runs.c.started_at.desc() - ) - - result = await conn.execute(query) - - return [ - ServiceRunWithCreditsDB.model_validate(row) for row in result.fetchall() - ] - - async def get_osparc_credits_aggregated_by_service( - self, - product_name: ProductName, - *, - user_id: UserID | None, - wallet_id: WalletID, - offset: int, - limit: int, - started_from: datetime | None = None, - started_until: datetime | None = None, - ) -> tuple[int, list[OsparcCreditsAggregatedByServiceKeyDB]]: - async with self.db_engine.begin() as conn: - base_query = ( - sa.select( - resource_tracker_service_runs.c.service_key, - sa.func.SUM( - resource_tracker_credit_transactions.c.osparc_credits - ).label("osparc_credits"), - sa.func.SUM( - sa.func.round( - ( - sa.func.extract( - "epoch", - resource_tracker_service_runs.c.stopped_at, - ) - - sa.func.extract( - "epoch", - resource_tracker_service_runs.c.started_at, - ) - ) - / 3600, - 2, - ) - ).label("running_time_in_hours"), - ) - .select_from( - resource_tracker_service_runs.join( - resource_tracker_credit_transactions, - ( - resource_tracker_service_runs.c.product_name - == resource_tracker_credit_transactions.c.product_name - ) - & ( - resource_tracker_service_runs.c.service_run_id - == resource_tracker_credit_transactions.c.service_run_id - ), - isouter=True, - ) - ) - .where( - (resource_tracker_service_runs.c.product_name == product_name) - & ( - resource_tracker_credit_transactions.c.transaction_status - == CreditTransactionStatus.BILLED - ) - & ( - resource_tracker_credit_transactions.c.transaction_classification - == CreditClassification.DEDUCT_SERVICE_RUN - ) - & (resource_tracker_credit_transactions.c.wallet_id == wallet_id) - ) - .group_by(resource_tracker_service_runs.c.service_key) - ) - - if user_id: - base_query = base_query.where( - resource_tracker_service_runs.c.user_id == user_id - ) - if started_from: - base_query = base_query.where( - sa.func.DATE(resource_tracker_service_runs.c.started_at) - >= started_from.date() - ) - if started_until: - base_query = base_query.where( - sa.func.DATE(resource_tracker_service_runs.c.started_at) - <= started_until.date() - ) - - subquery = base_query.subquery() - count_query = sa.select(sa.func.count()).select_from(subquery) - count_result = await conn.execute(count_query) - - # Default ordering and pagination - list_query = ( - base_query.order_by(resource_tracker_service_runs.c.service_key.asc()) - .offset(offset) - .limit(limit) - ) - list_result = await conn.execute(list_query) - - return ( - cast(int, count_result.scalar()), - [ - OsparcCreditsAggregatedByServiceKeyDB.model_validate(row) - for row in list_result.fetchall() - ], - ) - - async def export_service_runs_table_to_s3( - self, - product_name: ProductName, - s3_bucket_name: S3BucketName, - s3_key: str, - s3_region: str, - *, - user_id: UserID | None, - wallet_id: WalletID | None, - started_from: datetime | None = None, - started_until: datetime | None = None, - order_by: OrderBy | None = None, - ): - async with self.db_engine.begin() as conn: - query = ( - sa.select( - resource_tracker_service_runs.c.product_name, - resource_tracker_service_runs.c.service_run_id, - resource_tracker_service_runs.c.wallet_name, - resource_tracker_service_runs.c.user_email, - resource_tracker_service_runs.c.root_parent_project_name.label( - "project_name" - ), - resource_tracker_service_runs.c.node_name, - resource_tracker_service_runs.c.service_key, - resource_tracker_service_runs.c.service_version, - resource_tracker_service_runs.c.service_type, - resource_tracker_service_runs.c.started_at, - resource_tracker_service_runs.c.stopped_at, - resource_tracker_credit_transactions.c.osparc_credits, - resource_tracker_credit_transactions.c.transaction_status, - sa.func.coalesce( - self._project_tags_subquery.c.project_tags, - sa.cast(sa.text("'{}'"), sa.ARRAY(sa.String)), - ).label("project_tags"), - ) - .select_from( - resource_tracker_service_runs.join( - resource_tracker_credit_transactions, - resource_tracker_service_runs.c.service_run_id - == resource_tracker_credit_transactions.c.service_run_id, - isouter=True, - ).join( - self._project_tags_subquery, - resource_tracker_service_runs.c.project_id - == self._project_tags_subquery.c.project_uuid_for_rut, - isouter=True, - ) - ) - .where(resource_tracker_service_runs.c.product_name == product_name) - ) - - if user_id: - query = query.where(resource_tracker_service_runs.c.user_id == user_id) - if wallet_id: - query = query.where( - resource_tracker_service_runs.c.wallet_id == wallet_id - ) - if started_from: - query = query.where( - sa.func.DATE(resource_tracker_service_runs.c.started_at) - >= started_from.date() - ) - if started_until: - query = query.where( - sa.func.DATE(resource_tracker_service_runs.c.started_at) - <= started_until.date() - ) - - if order_by: - if order_by.direction == OrderDirection.ASC: - query = query.order_by(sa.asc(order_by.field)) - else: - query = query.order_by(sa.desc(order_by.field)) - else: - # Default ordering - query = query.order_by( - resource_tracker_service_runs.c.started_at.desc() - ) - - compiled_query = ( - str(query.compile(compile_kwargs={"literal_binds": True})) - .replace("\n", "") - .replace("'", "''") - ) - - result = await conn.execute( - sa.DDL( - f""" - SELECT * from aws_s3.query_export_to_s3('{compiled_query}', - aws_commons.create_s3_uri('{s3_bucket_name}', '{s3_key}', '{s3_region}'), 'format csv, HEADER true'); - """ # noqa: S608 - ) - ) - row = result.first() - assert row - _logger.info( - "Rows uploaded %s, Files uploaded %s, Bytes uploaded %s", - row[0], - row[1], - row[2], - ) - - async def total_service_runs_by_product_and_user_and_wallet( - self, - product_name: ProductName, - *, - user_id: UserID | None, - wallet_id: WalletID | None, - service_run_status: ServiceRunStatus | None = None, - started_from: datetime | None = None, - started_until: datetime | None = None, - ) -> PositiveInt: - async with self.db_engine.begin() as conn: - query = ( - sa.select(sa.func.count()) - .select_from(resource_tracker_service_runs) - .where(resource_tracker_service_runs.c.product_name == product_name) - ) - - if user_id: - query = query.where(resource_tracker_service_runs.c.user_id == user_id) - if wallet_id: - query = query.where( - resource_tracker_service_runs.c.wallet_id == wallet_id - ) - if started_from: - query = query.where( - sa.func.DATE(resource_tracker_service_runs.c.started_at) - >= started_from.date() - ) - if started_until: - query = query.where( - sa.func.DATE(resource_tracker_service_runs.c.started_at) - <= started_until.date() - ) - if service_run_status: - query = query.where( - resource_tracker_service_runs.c.service_run_status - == service_run_status - ) - - result = await conn.execute(query) - row = result.first() - return cast(PositiveInt, row[0]) if row else 0 - - ### For Background check purpose: - - async def list_service_runs_with_running_status_across_all_products( - self, - *, - offset: int, - limit: int, - ) -> list[ServiceRunForCheckDB]: - async with self.db_engine.begin() as conn: - query = ( - sa.select( - resource_tracker_service_runs.c.service_run_id, - resource_tracker_service_runs.c.last_heartbeat_at, - resource_tracker_service_runs.c.missed_heartbeat_counter, - resource_tracker_service_runs.c.modified, - ) - .where( - resource_tracker_service_runs.c.service_run_status - == ServiceRunStatus.RUNNING - ) - .order_by(resource_tracker_service_runs.c.started_at.desc()) # NOTE: - .offset(offset) - .limit(limit) - ) - result = await conn.execute(query) - - return [ServiceRunForCheckDB.model_validate(row) for row in result.fetchall()] - - async def total_service_runs_with_running_status_across_all_products( - self, - ) -> PositiveInt: - async with self.db_engine.begin() as conn: - query = ( - sa.select(sa.func.count()) - .select_from(resource_tracker_service_runs) - .where( - resource_tracker_service_runs.c.service_run_status - == ServiceRunStatus.RUNNING - ) - ) - result = await conn.execute(query) - row = result.first() - return cast(PositiveInt, row[0]) if row else 0 - - async def update_service_missed_heartbeat_counter( - self, - service_run_id: ServiceRunId, - last_heartbeat_at: datetime, - missed_heartbeat_counter: int, - ) -> ServiceRunDB | None: - async with self.db_engine.begin() as conn: - update_stmt = ( - resource_tracker_service_runs.update() - .values( - modified=sa.func.now(), - missed_heartbeat_counter=missed_heartbeat_counter, - ) - .where( - (resource_tracker_service_runs.c.service_run_id == service_run_id) - & ( - resource_tracker_service_runs.c.service_run_status - == ServiceRunStatus.RUNNING - ) - & ( - resource_tracker_service_runs.c.last_heartbeat_at - == last_heartbeat_at - ) - ) - .returning(sa.literal_column("*")) - ) - - result = await conn.execute(update_stmt) - row = result.first() - if row is None: - return None - return ServiceRunDB.model_validate(row) - - ################################# - # Credit transactions - ################################# - - async def create_credit_transaction( - self, data: CreditTransactionCreate - ) -> CreditTransactionId: - async with self.db_engine.begin() as conn: - insert_stmt = ( - resource_tracker_credit_transactions.insert() - .values( - product_name=data.product_name, - wallet_id=data.wallet_id, - wallet_name=data.wallet_name, - pricing_plan_id=data.pricing_plan_id, - pricing_unit_id=data.pricing_unit_id, - pricing_unit_cost_id=data.pricing_unit_cost_id, - user_id=data.user_id, - user_email=data.user_email, - osparc_credits=data.osparc_credits, - transaction_status=data.transaction_status, - transaction_classification=data.transaction_classification, - service_run_id=data.service_run_id, - payment_transaction_id=data.payment_transaction_id, - created=data.created_at, - last_heartbeat_at=data.last_heartbeat_at, - modified=sa.func.now(), - ) - .returning(resource_tracker_credit_transactions.c.transaction_id) - ) - result = await conn.execute(insert_stmt) - row = result.first() - if row is None: - raise CreditTransactionNotCreatedDBError(data=data) - return cast(CreditTransactionId, row[0]) - - async def update_credit_transaction_credits( - self, data: CreditTransactionCreditsUpdate - ) -> CreditTransactionId | None: - async with self.db_engine.begin() as conn: - update_stmt = ( - resource_tracker_credit_transactions.update() - .values( - modified=sa.func.now(), - osparc_credits=data.osparc_credits, - last_heartbeat_at=data.last_heartbeat_at, - ) - .where( - ( - resource_tracker_credit_transactions.c.service_run_id - == data.service_run_id - ) - & ( - resource_tracker_credit_transactions.c.transaction_status - == CreditTransactionStatus.PENDING - ) - & ( - resource_tracker_credit_transactions.c.last_heartbeat_at - <= data.last_heartbeat_at - ) - ) - .returning(resource_tracker_credit_transactions.c.service_run_id) - ) - result = await conn.execute(update_stmt) - row = result.first() - if row is None: - return None - return cast(CreditTransactionId | None, row[0]) - - async def update_credit_transaction_credits_and_status( - self, data: CreditTransactionCreditsAndStatusUpdate - ) -> CreditTransactionId | None: - async with self.db_engine.begin() as conn: - update_stmt = ( - resource_tracker_credit_transactions.update() - .values( - modified=sa.func.now(), - osparc_credits=data.osparc_credits, - transaction_status=data.transaction_status, - ) - .where( - ( - resource_tracker_credit_transactions.c.service_run_id - == data.service_run_id - ) - & ( - resource_tracker_credit_transactions.c.transaction_status - == CreditTransactionStatus.PENDING - ) - ) - .returning(resource_tracker_credit_transactions.c.service_run_id) - ) - result = await conn.execute(update_stmt) - row = result.first() - if row is None: - return None - return cast(CreditTransactionId | None, row[0]) - - async def sum_credit_transactions_by_product_and_wallet( - self, product_name: ProductName, wallet_id: WalletID - ) -> WalletTotalCredits: - async with self.db_engine.begin() as conn: - sum_stmt = sa.select( - sa.func.sum(resource_tracker_credit_transactions.c.osparc_credits) - ).where( - (resource_tracker_credit_transactions.c.product_name == product_name) - & (resource_tracker_credit_transactions.c.wallet_id == wallet_id) - & ( - resource_tracker_credit_transactions.c.transaction_status.in_( - [ - CreditTransactionStatus.BILLED, - CreditTransactionStatus.PENDING, - ] - ) - ) - ) - result = await conn.execute(sum_stmt) - row = result.first() - if row is None or row[0] is None: - return WalletTotalCredits( - wallet_id=wallet_id, available_osparc_credits=Decimal(0) - ) - return WalletTotalCredits(wallet_id=wallet_id, available_osparc_credits=row[0]) - - ################################# - # Pricing plans - ################################# - - async def list_active_service_pricing_plans_by_product_and_service( - self, - product_name: ProductName, - service_key: ServiceKey, - service_version: ServiceVersion, - ) -> list[PricingPlansWithServiceDefaultPlanDB]: - # NOTE: consilidate with utils_services_environmnets.py - def _version(column_or_value): - # converts version value string to array[integer] that can be compared - return sa.func.string_to_array(column_or_value, ".").cast(ARRAY(INTEGER)) - - async with self.db_engine.begin() as conn: - # Firstly find the correct service version - query = ( - sa.select( - resource_tracker_pricing_plan_to_service.c.service_key, - resource_tracker_pricing_plan_to_service.c.service_version, - ) - .select_from( - resource_tracker_pricing_plan_to_service.join( - resource_tracker_pricing_plans, - ( - resource_tracker_pricing_plan_to_service.c.pricing_plan_id - == resource_tracker_pricing_plans.c.pricing_plan_id - ), - ) - ) - .where( - ( - _version( - resource_tracker_pricing_plan_to_service.c.service_version - ) - <= _version(service_version) - ) - & ( - resource_tracker_pricing_plan_to_service.c.service_key - == service_key - ) - & (resource_tracker_pricing_plans.c.product_name == product_name) - & (resource_tracker_pricing_plans.c.is_active.is_(True)) - ) - .order_by( - _version( - resource_tracker_pricing_plan_to_service.c.service_version - ).desc() - ) - .limit(1) - ) - result = await conn.execute(query) - row = result.first() - if row is None: - return [] - latest_service_key, latest_service_version = row - # Now choose all pricing plans connected to this service - query = ( - sa.select( - resource_tracker_pricing_plans.c.pricing_plan_id, - resource_tracker_pricing_plans.c.display_name, - resource_tracker_pricing_plans.c.description, - resource_tracker_pricing_plans.c.classification, - resource_tracker_pricing_plans.c.is_active, - resource_tracker_pricing_plans.c.created, - resource_tracker_pricing_plans.c.pricing_plan_key, - resource_tracker_pricing_plan_to_service.c.service_default_plan, - ) - .select_from( - resource_tracker_pricing_plan_to_service.join( - resource_tracker_pricing_plans, - ( - resource_tracker_pricing_plan_to_service.c.pricing_plan_id - == resource_tracker_pricing_plans.c.pricing_plan_id - ), - ) - ) - .where( - ( - _version( - resource_tracker_pricing_plan_to_service.c.service_version - ) - == _version(latest_service_version) - ) - & ( - resource_tracker_pricing_plan_to_service.c.service_key - == latest_service_key - ) - & (resource_tracker_pricing_plans.c.product_name == product_name) - & (resource_tracker_pricing_plans.c.is_active.is_(True)) - ) - .order_by( - resource_tracker_pricing_plan_to_service.c.pricing_plan_id.desc() - ) - ) - result = await conn.execute(query) - - return [ - PricingPlansWithServiceDefaultPlanDB.model_validate(row) - for row in result.fetchall() - ] - - async def get_pricing_plan( - self, product_name: ProductName, pricing_plan_id: PricingPlanId - ) -> PricingPlansDB: - async with self.db_engine.begin() as conn: - select_stmt = sa.select( - resource_tracker_pricing_plans.c.pricing_plan_id, - resource_tracker_pricing_plans.c.display_name, - resource_tracker_pricing_plans.c.description, - resource_tracker_pricing_plans.c.classification, - resource_tracker_pricing_plans.c.is_active, - resource_tracker_pricing_plans.c.created, - resource_tracker_pricing_plans.c.pricing_plan_key, - ).where( - (resource_tracker_pricing_plans.c.pricing_plan_id == pricing_plan_id) - & (resource_tracker_pricing_plans.c.product_name == product_name) - ) - result = await conn.execute(select_stmt) - row = result.first() - if row is None: - raise PricingPlanDoesNotExistsDBError(pricing_plan_id=pricing_plan_id) - return PricingPlansDB.model_validate(row) - - async def list_pricing_plans_by_product( - self, product_name: ProductName - ) -> list[PricingPlansDB]: - async with self.db_engine.begin() as conn: - select_stmt = sa.select( - resource_tracker_pricing_plans.c.pricing_plan_id, - resource_tracker_pricing_plans.c.display_name, - resource_tracker_pricing_plans.c.description, - resource_tracker_pricing_plans.c.classification, - resource_tracker_pricing_plans.c.is_active, - resource_tracker_pricing_plans.c.created, - resource_tracker_pricing_plans.c.pricing_plan_key, - ).where(resource_tracker_pricing_plans.c.product_name == product_name) - result = await conn.execute(select_stmt) - - return [PricingPlansDB.model_validate(row) for row in result.fetchall()] - - async def create_pricing_plan(self, data: PricingPlanCreate) -> PricingPlansDB: - async with self.db_engine.begin() as conn: - insert_stmt = ( - resource_tracker_pricing_plans.insert() - .values( - product_name=data.product_name, - display_name=data.display_name, - description=data.description, - classification=data.classification, - is_active=True, - created=sa.func.now(), - modified=sa.func.now(), - pricing_plan_key=data.pricing_plan_key, - ) - .returning( - *[ - resource_tracker_pricing_plans.c.pricing_plan_id, - resource_tracker_pricing_plans.c.display_name, - resource_tracker_pricing_plans.c.description, - resource_tracker_pricing_plans.c.classification, - resource_tracker_pricing_plans.c.is_active, - resource_tracker_pricing_plans.c.created, - resource_tracker_pricing_plans.c.pricing_plan_key, - ] - ) - ) - result = await conn.execute(insert_stmt) - row = result.first() - if row is None: - raise PricingPlanNotCreatedDBError(data=data) - return PricingPlansDB.model_validate(row) - - async def update_pricing_plan( - self, product_name: ProductName, data: PricingPlanUpdate - ) -> PricingPlansDB | None: - async with self.db_engine.begin() as conn: - update_stmt = ( - resource_tracker_pricing_plans.update() - .values( - display_name=data.display_name, - description=data.description, - is_active=data.is_active, - modified=sa.func.now(), - ) - .where( - ( - resource_tracker_pricing_plans.c.pricing_plan_id - == data.pricing_plan_id - ) - & (resource_tracker_pricing_plans.c.product_name == product_name) - ) - .returning( - *[ - resource_tracker_pricing_plans.c.pricing_plan_id, - resource_tracker_pricing_plans.c.display_name, - resource_tracker_pricing_plans.c.description, - resource_tracker_pricing_plans.c.classification, - resource_tracker_pricing_plans.c.is_active, - resource_tracker_pricing_plans.c.created, - resource_tracker_pricing_plans.c.pricing_plan_key, - ] - ) - ) - result = await conn.execute(update_stmt) - row = result.first() - if row is None: - return None - return PricingPlansDB.model_validate(row) - - ################################# - # Pricing plan to service - ################################# - - async def list_connected_services_to_pricing_plan_by_pricing_plan( - self, product_name: ProductName, pricing_plan_id: PricingPlanId - ) -> list[PricingPlanToServiceDB]: - async with self.db_engine.begin() as conn: - query = ( - sa.select( - resource_tracker_pricing_plan_to_service.c.pricing_plan_id, - resource_tracker_pricing_plan_to_service.c.service_key, - resource_tracker_pricing_plan_to_service.c.service_version, - resource_tracker_pricing_plan_to_service.c.created, - ) - .select_from( - resource_tracker_pricing_plan_to_service.join( - resource_tracker_pricing_plans, - ( - resource_tracker_pricing_plan_to_service.c.pricing_plan_id - == resource_tracker_pricing_plans.c.pricing_plan_id - ), - ) - ) - .where( - (resource_tracker_pricing_plans.c.product_name == product_name) - & ( - resource_tracker_pricing_plans.c.pricing_plan_id - == pricing_plan_id - ) - ) - .order_by( - resource_tracker_pricing_plan_to_service.c.pricing_plan_id.desc() - ) - ) - result = await conn.execute(query) - - return [ - PricingPlanToServiceDB.model_validate(row) for row in result.fetchall() - ] - - async def upsert_service_to_pricing_plan( - self, - product_name: ProductName, - pricing_plan_id: PricingPlanId, - service_key: ServiceKey, - service_version: ServiceVersion, - ) -> PricingPlanToServiceDB: - async with self.db_engine.begin() as conn: - query = ( - sa.select( - resource_tracker_pricing_plan_to_service.c.pricing_plan_id, - resource_tracker_pricing_plan_to_service.c.service_key, - resource_tracker_pricing_plan_to_service.c.service_version, - resource_tracker_pricing_plan_to_service.c.created, - ) - .select_from( - resource_tracker_pricing_plan_to_service.join( - resource_tracker_pricing_plans, - ( - resource_tracker_pricing_plan_to_service.c.pricing_plan_id - == resource_tracker_pricing_plans.c.pricing_plan_id - ), - ) - ) - .where( - (resource_tracker_pricing_plans.c.product_name == product_name) - & ( - resource_tracker_pricing_plans.c.pricing_plan_id - == pricing_plan_id - ) - & ( - resource_tracker_pricing_plan_to_service.c.service_key - == service_key - ) - & ( - resource_tracker_pricing_plan_to_service.c.service_version - == service_version - ) - ) - ) - result = await conn.execute(query) - row = result.first() - - if row is not None: - delete_stmt = resource_tracker_pricing_plan_to_service.delete().where( - ( - resource_tracker_pricing_plans.c.pricing_plan_id - == pricing_plan_id - ) - & ( - resource_tracker_pricing_plan_to_service.c.service_key - == service_key - ) - & ( - resource_tracker_pricing_plan_to_service.c.service_version - == service_version - ) - ) - await conn.execute(delete_stmt) - - insert_stmt = ( - resource_tracker_pricing_plan_to_service.insert() - .values( - pricing_plan_id=pricing_plan_id, - service_key=service_key, - service_version=service_version, - created=sa.func.now(), - modified=sa.func.now(), - service_default_plan=True, - ) - .returning( - *[ - resource_tracker_pricing_plan_to_service.c.pricing_plan_id, - resource_tracker_pricing_plan_to_service.c.service_key, - resource_tracker_pricing_plan_to_service.c.service_version, - resource_tracker_pricing_plan_to_service.c.created, - ] - ) - ) - result = await conn.execute(insert_stmt) - row = result.first() - if row is None: - raise PricingPlanToServiceNotCreatedDBError( - data=f"pricing_plan_id {pricing_plan_id}, service_key {service_key}, service_version {service_version}" - ) - return PricingPlanToServiceDB.model_validate(row) - - ################################# - # Pricing units - ################################# - - @staticmethod - def _pricing_units_select_stmt(): - return sa.select( - resource_tracker_pricing_units.c.pricing_unit_id, - resource_tracker_pricing_units.c.pricing_plan_id, - resource_tracker_pricing_units.c.unit_name, - resource_tracker_pricing_units.c.unit_extra_info, - resource_tracker_pricing_units.c.default, - resource_tracker_pricing_units.c.specific_info, - resource_tracker_pricing_units.c.created, - resource_tracker_pricing_units.c.modified, - resource_tracker_pricing_unit_costs.c.cost_per_unit.label( - "current_cost_per_unit" - ), - resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id.label( - "current_cost_per_unit_id" - ), - ) - - async def list_pricing_units_by_pricing_plan( - self, - pricing_plan_id: PricingPlanId, - ) -> list[PricingUnitsDB]: - async with self.db_engine.begin() as conn: - query = ( - self._pricing_units_select_stmt() - .select_from( - resource_tracker_pricing_units.join( - resource_tracker_pricing_unit_costs, - ( - ( - resource_tracker_pricing_units.c.pricing_plan_id - == resource_tracker_pricing_unit_costs.c.pricing_plan_id - ) - & ( - resource_tracker_pricing_units.c.pricing_unit_id - == resource_tracker_pricing_unit_costs.c.pricing_unit_id - ) - ), - ) - ) - .where( - ( - resource_tracker_pricing_units.c.pricing_plan_id - == pricing_plan_id - ) - & (resource_tracker_pricing_unit_costs.c.valid_to.is_(None)) - ) - .order_by(resource_tracker_pricing_unit_costs.c.cost_per_unit.asc()) - ) - result = await conn.execute(query) - - return [PricingUnitsDB.model_validate(row) for row in result.fetchall()] - - async def get_valid_pricing_unit( - self, - product_name: ProductName, - pricing_plan_id: PricingPlanId, - pricing_unit_id: PricingUnitId, - ) -> PricingUnitsDB: - async with self.db_engine.begin() as conn: - query = ( - self._pricing_units_select_stmt() - .select_from( - resource_tracker_pricing_units.join( - resource_tracker_pricing_unit_costs, - ( - ( - resource_tracker_pricing_units.c.pricing_plan_id - == resource_tracker_pricing_unit_costs.c.pricing_plan_id - ) - & ( - resource_tracker_pricing_units.c.pricing_unit_id - == resource_tracker_pricing_unit_costs.c.pricing_unit_id - ) - ), - ).join( - resource_tracker_pricing_plans, - ( - resource_tracker_pricing_plans.c.pricing_plan_id - == resource_tracker_pricing_units.c.pricing_plan_id - ), - ) - ) - .where( - ( - resource_tracker_pricing_units.c.pricing_plan_id - == pricing_plan_id - ) - & ( - resource_tracker_pricing_units.c.pricing_unit_id - == pricing_unit_id - ) - & (resource_tracker_pricing_unit_costs.c.valid_to.is_(None)) - & (resource_tracker_pricing_plans.c.product_name == product_name) - ) - ) - result = await conn.execute(query) - - row = result.first() - if row is None: - raise PricingPlanAndPricingUnitCombinationDoesNotExistsDBError( - pricing_plan_id=pricing_plan_id, - pricing_unit_id=pricing_unit_id, - product_name=product_name, - ) - return PricingUnitsDB.model_validate(row) - - async def create_pricing_unit_with_cost( - self, data: PricingUnitWithCostCreate, pricing_plan_key: str - ) -> tuple[PricingUnitId, PricingUnitCostId]: - async with self.db_engine.begin() as conn: - # pricing units table - insert_stmt = ( - resource_tracker_pricing_units.insert() - .values( - pricing_plan_id=data.pricing_plan_id, - unit_name=data.unit_name, - unit_extra_info=data.unit_extra_info.model_dump(), - default=data.default, - specific_info=data.specific_info.model_dump(), - created=sa.func.now(), - modified=sa.func.now(), - ) - .returning(resource_tracker_pricing_units.c.pricing_unit_id) - ) - result = await conn.execute(insert_stmt) - row = result.first() - if row is None: - raise PricingUnitNotCreatedDBError(data=data) - _pricing_unit_id = row[0] - - # pricing unit cost table - insert_stmt = ( - resource_tracker_pricing_unit_costs.insert() - .values( - pricing_plan_id=data.pricing_plan_id, - pricing_plan_key=pricing_plan_key, - pricing_unit_id=_pricing_unit_id, - pricing_unit_name=data.unit_name, - cost_per_unit=data.cost_per_unit, - valid_from=sa.func.now(), - valid_to=None, - created=sa.func.now(), - comment=data.comment, - modified=sa.func.now(), - ) - .returning(resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id) - ) - result = await conn.execute(insert_stmt) - row = result.first() - if row is None: - raise PricingUnitCostNotCreatedDBError(data=data) - _pricing_unit_cost_id = row[0] - - return (_pricing_unit_id, _pricing_unit_cost_id) - - async def update_pricing_unit_with_cost( - self, data: PricingUnitWithCostUpdate, pricing_plan_key: str - ) -> None: - async with self.db_engine.begin() as conn: - # pricing units table - update_stmt = ( - resource_tracker_pricing_units.update() - .values( - unit_name=data.unit_name, - unit_extra_info=data.unit_extra_info.model_dump(), - default=data.default, - specific_info=data.specific_info.model_dump(), - modified=sa.func.now(), - ) - .where( - resource_tracker_pricing_units.c.pricing_unit_id - == data.pricing_unit_id - ) - .returning(resource_tracker_pricing_units.c.pricing_unit_id) - ) - await conn.execute(update_stmt) - - # If price change, then we update pricing unit cost table - if data.pricing_unit_cost_update: - # Firstly we close previous price - update_stmt = ( - resource_tracker_pricing_unit_costs.update() - .values( - valid_to=sa.func.now(), # <-- Closing previous price - modified=sa.func.now(), - ) - .where( - resource_tracker_pricing_unit_costs.c.pricing_unit_id - == data.pricing_unit_id - ) - .returning(resource_tracker_pricing_unit_costs.c.pricing_unit_id) - ) - result = await conn.execute(update_stmt) - - # Then we create a new price - insert_stmt = ( - resource_tracker_pricing_unit_costs.insert() - .values( - pricing_plan_id=data.pricing_plan_id, - pricing_plan_key=pricing_plan_key, - pricing_unit_id=data.pricing_unit_id, - pricing_unit_name=data.unit_name, - cost_per_unit=data.pricing_unit_cost_update.cost_per_unit, - valid_from=sa.func.now(), - valid_to=None, # <-- New price is valid - created=sa.func.now(), - comment=data.pricing_unit_cost_update.comment, - modified=sa.func.now(), - ) - .returning( - resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id - ) - ) - result = await conn.execute(insert_stmt) - row = result.first() - if row is None: - raise PricingUnitCostNotCreatedDBError(data=data) - - ################################# - # Pricing unit-costs - ################################# - - async def get_pricing_unit_cost_by_id( - self, pricing_unit_cost_id: PricingUnitCostId - ) -> PricingUnitCostsDB: - async with self.db_engine.begin() as conn: - query = sa.select( - resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id, - resource_tracker_pricing_unit_costs.c.pricing_plan_id, - resource_tracker_pricing_unit_costs.c.pricing_plan_key, - resource_tracker_pricing_unit_costs.c.pricing_unit_id, - resource_tracker_pricing_unit_costs.c.pricing_unit_name, - resource_tracker_pricing_unit_costs.c.cost_per_unit, - resource_tracker_pricing_unit_costs.c.valid_from, - resource_tracker_pricing_unit_costs.c.valid_to, - resource_tracker_pricing_unit_costs.c.created, - resource_tracker_pricing_unit_costs.c.comment, - resource_tracker_pricing_unit_costs.c.modified, - ).where( - resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id - == pricing_unit_cost_id - ) - result = await conn.execute(query) - - row = result.first() - if row is None: - raise PricingUnitCostDoesNotExistsDBError( - pricing_unit_cost_id=pricing_unit_cost_id - ) - return PricingUnitCostsDB.model_validate(row) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/service_runs_db.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/service_runs_db.py new file mode 100644 index 00000000000..a4ea563803d --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/service_runs_db.py @@ -0,0 +1,622 @@ +# pylint: disable=too-many-arguments +import logging +from datetime import datetime +from typing import cast + +import sqlalchemy as sa +from models_library.api_schemas_storage import S3BucketName +from models_library.products import ProductName +from models_library.resource_tracker import ( + CreditClassification, + CreditTransactionStatus, + ServiceRunId, + ServiceRunStatus, +) +from models_library.rest_ordering import OrderBy, OrderDirection +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import PositiveInt +from simcore_postgres_database.models.projects_tags import projects_tags +from simcore_postgres_database.models.resource_tracker_credit_transactions import ( + resource_tracker_credit_transactions, +) +from simcore_postgres_database.models.resource_tracker_service_runs import ( + resource_tracker_service_runs, +) +from simcore_postgres_database.models.tags import tags +from simcore_postgres_database.utils_repos import transaction_context +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine + +from ....exceptions.errors import ServiceRunNotCreatedDBError +from ....models.service_runs import ( + OsparcCreditsAggregatedByServiceKeyDB, + ServiceRunCreate, + ServiceRunDB, + ServiceRunForCheckDB, + ServiceRunLastHeartbeatUpdate, + ServiceRunStoppedAtUpdate, + ServiceRunWithCreditsDB, +) + +_logger = logging.getLogger(__name__) + + +async def create_service_run( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: ServiceRunCreate, +) -> ServiceRunId: + async with transaction_context(engine, connection) as conn: + insert_stmt = ( + resource_tracker_service_runs.insert() + .values( + product_name=data.product_name, + service_run_id=data.service_run_id, + wallet_id=data.wallet_id, + wallet_name=data.wallet_name, + pricing_plan_id=data.pricing_plan_id, + pricing_unit_id=data.pricing_unit_id, + pricing_unit_cost_id=data.pricing_unit_cost_id, + pricing_unit_cost=data.pricing_unit_cost, + simcore_user_agent=data.simcore_user_agent, + user_id=data.user_id, + user_email=data.user_email, + project_id=f"{data.project_id}", + project_name=data.project_name, + node_id=f"{data.node_id}", + node_name=data.node_name, + parent_project_id=f"{data.parent_project_id}", + root_parent_project_id=f"{data.root_parent_project_id}", + root_parent_project_name=data.root_parent_project_name, + parent_node_id=f"{data.parent_node_id}", + root_parent_node_id=f"{data.root_parent_node_id}", + service_key=data.service_key, + service_version=data.service_version, + service_type=data.service_type, + service_resources=data.service_resources, + service_additional_metadata=data.service_additional_metadata, + started_at=data.started_at, + stopped_at=None, + service_run_status=ServiceRunStatus.RUNNING, + modified=sa.func.now(), + last_heartbeat_at=data.last_heartbeat_at, + ) + .returning(resource_tracker_service_runs.c.service_run_id) + ) + result = await conn.execute(insert_stmt) + row = result.first() + if row is None: + raise ServiceRunNotCreatedDBError(data=data) + return cast(ServiceRunId, row[0]) + + +async def update_service_run_last_heartbeat( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: ServiceRunLastHeartbeatUpdate, +) -> ServiceRunDB | None: + async with transaction_context(engine, connection) as conn: + update_stmt = ( + resource_tracker_service_runs.update() + .values( + modified=sa.func.now(), + last_heartbeat_at=data.last_heartbeat_at, + missed_heartbeat_counter=0, + ) + .where( + (resource_tracker_service_runs.c.service_run_id == data.service_run_id) + & ( + resource_tracker_service_runs.c.service_run_status + == ServiceRunStatus.RUNNING + ) + & ( + resource_tracker_service_runs.c.last_heartbeat_at + <= data.last_heartbeat_at + ) + ) + .returning(sa.literal_column("*")) + ) + result = await conn.execute(update_stmt) + row = result.first() + if row is None: + return None + return ServiceRunDB.model_validate(row) + + +async def update_service_run_stopped_at( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: ServiceRunStoppedAtUpdate, +) -> ServiceRunDB | None: + async with transaction_context(engine, connection) as conn: + update_stmt = ( + resource_tracker_service_runs.update() + .values( + modified=sa.func.now(), + stopped_at=data.stopped_at, + service_run_status=data.service_run_status, + service_run_status_msg=data.service_run_status_msg, + ) + .where( + (resource_tracker_service_runs.c.service_run_id == data.service_run_id) + & ( + resource_tracker_service_runs.c.service_run_status + == ServiceRunStatus.RUNNING + ) + ) + .returning(sa.literal_column("*")) + ) + result = await conn.execute(update_stmt) + row = result.first() + if row is None: + return None + return ServiceRunDB.model_validate(row) + + +async def get_service_run_by_id( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + service_run_id: ServiceRunId, +) -> ServiceRunDB | None: + async with transaction_context(engine, connection) as conn: + stmt = sa.select(resource_tracker_service_runs).where( + resource_tracker_service_runs.c.service_run_id == service_run_id + ) + result = await conn.execute(stmt) + row = result.first() + if row is None: + return None + return ServiceRunDB.model_validate(row) + + +_project_tags_subquery = ( + sa.select( + projects_tags.c.project_uuid_for_rut, + sa.func.array_agg(tags.c.name).label("project_tags"), + ) + .select_from(projects_tags.join(tags, projects_tags.c.tag_id == tags.c.id)) + .group_by(projects_tags.c.project_uuid_for_rut) +).subquery("project_tags_subquery") + + +async def list_service_runs_by_product_and_user_and_wallet( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + user_id: UserID | None, + wallet_id: WalletID | None, + offset: int, + limit: int, + service_run_status: ServiceRunStatus | None = None, + started_from: datetime | None = None, + started_until: datetime | None = None, + order_by: OrderBy | None = None, +) -> list[ServiceRunWithCreditsDB]: + async with transaction_context(engine, connection) as conn: + query = ( + sa.select( + resource_tracker_service_runs.c.product_name, + resource_tracker_service_runs.c.service_run_id, + resource_tracker_service_runs.c.wallet_id, + resource_tracker_service_runs.c.wallet_name, + resource_tracker_service_runs.c.pricing_plan_id, + resource_tracker_service_runs.c.pricing_unit_id, + resource_tracker_service_runs.c.pricing_unit_cost_id, + resource_tracker_service_runs.c.pricing_unit_cost, + resource_tracker_service_runs.c.user_id, + resource_tracker_service_runs.c.user_email, + resource_tracker_service_runs.c.project_id, + resource_tracker_service_runs.c.project_name, + resource_tracker_service_runs.c.node_id, + resource_tracker_service_runs.c.node_name, + resource_tracker_service_runs.c.parent_project_id, + resource_tracker_service_runs.c.root_parent_project_id, + resource_tracker_service_runs.c.root_parent_project_name, + resource_tracker_service_runs.c.parent_node_id, + resource_tracker_service_runs.c.root_parent_node_id, + resource_tracker_service_runs.c.service_key, + resource_tracker_service_runs.c.service_version, + resource_tracker_service_runs.c.service_type, + resource_tracker_service_runs.c.service_resources, + resource_tracker_service_runs.c.started_at, + resource_tracker_service_runs.c.stopped_at, + resource_tracker_service_runs.c.service_run_status, + resource_tracker_service_runs.c.modified, + resource_tracker_service_runs.c.last_heartbeat_at, + resource_tracker_service_runs.c.service_run_status_msg, + resource_tracker_service_runs.c.missed_heartbeat_counter, + resource_tracker_credit_transactions.c.osparc_credits, + resource_tracker_credit_transactions.c.transaction_status, + sa.func.coalesce( + _project_tags_subquery.c.project_tags, + sa.cast(sa.text("'{}'"), sa.ARRAY(sa.String)), + ).label("project_tags"), + ) + .select_from( + resource_tracker_service_runs.join( + resource_tracker_credit_transactions, + ( + resource_tracker_service_runs.c.product_name + == resource_tracker_credit_transactions.c.product_name + ) + & ( + resource_tracker_service_runs.c.service_run_id + == resource_tracker_credit_transactions.c.service_run_id + ), + isouter=True, + ).join( + _project_tags_subquery, + resource_tracker_service_runs.c.project_id + == _project_tags_subquery.c.project_uuid_for_rut, + isouter=True, + ) + ) + .where(resource_tracker_service_runs.c.product_name == product_name) + .offset(offset) + .limit(limit) + ) + + if user_id: + query = query.where(resource_tracker_service_runs.c.user_id == user_id) + if wallet_id: + query = query.where(resource_tracker_service_runs.c.wallet_id == wallet_id) + if service_run_status: + query = query.where( + resource_tracker_service_runs.c.service_run_status == service_run_status + ) + if started_from: + query = query.where( + sa.func.DATE(resource_tracker_service_runs.c.started_at) + >= started_from.date() + ) + if started_until: + query = query.where( + sa.func.DATE(resource_tracker_service_runs.c.started_at) + <= started_until.date() + ) + + if order_by: + if order_by.direction == OrderDirection.ASC: + query = query.order_by(sa.asc(order_by.field)) + else: + query = query.order_by(sa.desc(order_by.field)) + else: + # Default ordering + query = query.order_by(resource_tracker_service_runs.c.started_at.desc()) + + result = await conn.execute(query) + + return [ServiceRunWithCreditsDB.model_validate(row) for row in result.fetchall()] + + +async def get_osparc_credits_aggregated_by_service( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + user_id: UserID | None, + wallet_id: WalletID, + offset: int, + limit: int, + started_from: datetime | None = None, + started_until: datetime | None = None, +) -> tuple[int, list[OsparcCreditsAggregatedByServiceKeyDB]]: + async with transaction_context(engine, connection) as conn: + base_query = ( + sa.select( + resource_tracker_service_runs.c.service_key, + sa.func.SUM( + resource_tracker_credit_transactions.c.osparc_credits + ).label("osparc_credits"), + sa.func.SUM( + sa.func.round( + ( + sa.func.extract( + "epoch", + resource_tracker_service_runs.c.stopped_at, + ) + - sa.func.extract( + "epoch", + resource_tracker_service_runs.c.started_at, + ) + ) + / 3600, + 2, + ) + ).label("running_time_in_hours"), + ) + .select_from( + resource_tracker_service_runs.join( + resource_tracker_credit_transactions, + ( + resource_tracker_service_runs.c.product_name + == resource_tracker_credit_transactions.c.product_name + ) + & ( + resource_tracker_service_runs.c.service_run_id + == resource_tracker_credit_transactions.c.service_run_id + ), + isouter=True, + ) + ) + .where( + (resource_tracker_service_runs.c.product_name == product_name) + & ( + resource_tracker_credit_transactions.c.transaction_status + == CreditTransactionStatus.BILLED + ) + & ( + resource_tracker_credit_transactions.c.transaction_classification + == CreditClassification.DEDUCT_SERVICE_RUN + ) + & (resource_tracker_credit_transactions.c.wallet_id == wallet_id) + ) + .group_by(resource_tracker_service_runs.c.service_key) + ) + + if user_id: + base_query = base_query.where( + resource_tracker_service_runs.c.user_id == user_id + ) + if started_from: + base_query = base_query.where( + sa.func.DATE(resource_tracker_service_runs.c.started_at) + >= started_from.date() + ) + if started_until: + base_query = base_query.where( + sa.func.DATE(resource_tracker_service_runs.c.started_at) + <= started_until.date() + ) + + subquery = base_query.subquery() + count_query = sa.select(sa.func.count()).select_from(subquery) + count_result = await conn.execute(count_query) + + # Default ordering and pagination + list_query = ( + base_query.order_by(resource_tracker_service_runs.c.service_key.asc()) + .offset(offset) + .limit(limit) + ) + list_result = await conn.execute(list_query) + + return ( + cast(int, count_result.scalar()), + [ + OsparcCreditsAggregatedByServiceKeyDB.model_validate(row) + for row in list_result.fetchall() + ], + ) + + +async def export_service_runs_table_to_s3( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + s3_bucket_name: S3BucketName, + s3_key: str, + s3_region: str, + user_id: UserID | None, + wallet_id: WalletID | None, + started_from: datetime | None = None, + started_until: datetime | None = None, + order_by: OrderBy | None = None, +): + async with transaction_context(engine, connection) as conn: + query = ( + sa.select( + resource_tracker_service_runs.c.product_name, + resource_tracker_service_runs.c.service_run_id, + resource_tracker_service_runs.c.wallet_name, + resource_tracker_service_runs.c.user_email, + resource_tracker_service_runs.c.root_parent_project_name.label( + "project_name" + ), + resource_tracker_service_runs.c.node_name, + resource_tracker_service_runs.c.service_key, + resource_tracker_service_runs.c.service_version, + resource_tracker_service_runs.c.service_type, + resource_tracker_service_runs.c.started_at, + resource_tracker_service_runs.c.stopped_at, + resource_tracker_credit_transactions.c.osparc_credits, + resource_tracker_credit_transactions.c.transaction_status, + sa.func.coalesce( + _project_tags_subquery.c.project_tags, + sa.cast(sa.text("'{}'"), sa.ARRAY(sa.String)), + ).label("project_tags"), + ) + .select_from( + resource_tracker_service_runs.join( + resource_tracker_credit_transactions, + resource_tracker_service_runs.c.service_run_id + == resource_tracker_credit_transactions.c.service_run_id, + isouter=True, + ).join( + _project_tags_subquery, + resource_tracker_service_runs.c.project_id + == _project_tags_subquery.c.project_uuid_for_rut, + isouter=True, + ) + ) + .where(resource_tracker_service_runs.c.product_name == product_name) + ) + + if user_id: + query = query.where(resource_tracker_service_runs.c.user_id == user_id) + if wallet_id: + query = query.where(resource_tracker_service_runs.c.wallet_id == wallet_id) + if started_from: + query = query.where( + sa.func.DATE(resource_tracker_service_runs.c.started_at) + >= started_from.date() + ) + if started_until: + query = query.where( + sa.func.DATE(resource_tracker_service_runs.c.started_at) + <= started_until.date() + ) + + if order_by: + if order_by.direction == OrderDirection.ASC: + query = query.order_by(sa.asc(order_by.field)) + else: + query = query.order_by(sa.desc(order_by.field)) + else: + # Default ordering + query = query.order_by(resource_tracker_service_runs.c.started_at.desc()) + + compiled_query = ( + str(query.compile(compile_kwargs={"literal_binds": True})) + .replace("\n", "") + .replace("'", "''") + ) + + result = await conn.execute( + sa.DDL( + f""" + SELECT * from aws_s3.query_export_to_s3('{compiled_query}', + aws_commons.create_s3_uri('{s3_bucket_name}', '{s3_key}', '{s3_region}'), 'format csv, HEADER true'); + """ # noqa: S608 + ) + ) + row = result.first() + assert row + _logger.info( + "Rows uploaded %s, Files uploaded %s, Bytes uploaded %s", + row[0], + row[1], + row[2], + ) + + +async def total_service_runs_by_product_and_user_and_wallet( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + user_id: UserID | None, + wallet_id: WalletID | None, + service_run_status: ServiceRunStatus | None = None, + started_from: datetime | None = None, + started_until: datetime | None = None, +) -> PositiveInt: + async with transaction_context(engine, connection) as conn: + query = ( + sa.select(sa.func.count()) + .select_from(resource_tracker_service_runs) + .where(resource_tracker_service_runs.c.product_name == product_name) + ) + + if user_id: + query = query.where(resource_tracker_service_runs.c.user_id == user_id) + if wallet_id: + query = query.where(resource_tracker_service_runs.c.wallet_id == wallet_id) + if started_from: + query = query.where( + sa.func.DATE(resource_tracker_service_runs.c.started_at) + >= started_from.date() + ) + if started_until: + query = query.where( + sa.func.DATE(resource_tracker_service_runs.c.started_at) + <= started_until.date() + ) + if service_run_status: + query = query.where( + resource_tracker_service_runs.c.service_run_status == service_run_status + ) + + result = await conn.execute(query) + row = result.first() + return cast(PositiveInt, row[0]) if row else 0 + + +### For Background check purpose: + + +async def list_service_runs_with_running_status_across_all_products( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + offset: int, + limit: int, +) -> list[ServiceRunForCheckDB]: + async with transaction_context(engine, connection) as conn: + query = ( + sa.select( + resource_tracker_service_runs.c.service_run_id, + resource_tracker_service_runs.c.last_heartbeat_at, + resource_tracker_service_runs.c.missed_heartbeat_counter, + resource_tracker_service_runs.c.modified, + ) + .where( + resource_tracker_service_runs.c.service_run_status + == ServiceRunStatus.RUNNING + ) + .order_by(resource_tracker_service_runs.c.started_at.desc()) # NOTE: + .offset(offset) + .limit(limit) + ) + result = await conn.execute(query) + + return [ServiceRunForCheckDB.model_validate(row) for row in result.fetchall()] + + +async def total_service_runs_with_running_status_across_all_products( + engine: AsyncEngine, connection: AsyncConnection | None = None +) -> PositiveInt: + async with transaction_context(engine, connection) as conn: + query = ( + sa.select(sa.func.count()) + .select_from(resource_tracker_service_runs) + .where( + resource_tracker_service_runs.c.service_run_status + == ServiceRunStatus.RUNNING + ) + ) + result = await conn.execute(query) + row = result.first() + return cast(PositiveInt, row[0]) if row else 0 + + +async def update_service_missed_heartbeat_counter( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + service_run_id: ServiceRunId, + last_heartbeat_at: datetime, + missed_heartbeat_counter: int, +) -> ServiceRunDB | None: + async with transaction_context(engine, connection) as conn: + update_stmt = ( + resource_tracker_service_runs.update() + .values( + modified=sa.func.now(), + missed_heartbeat_counter=missed_heartbeat_counter, + ) + .where( + (resource_tracker_service_runs.c.service_run_id == service_run_id) + & ( + resource_tracker_service_runs.c.service_run_status + == ServiceRunStatus.RUNNING + ) + & ( + resource_tracker_service_runs.c.last_heartbeat_at + == last_heartbeat_at + ) + ) + .returning(sa.literal_column("*")) + ) + + result = await conn.execute(update_stmt) + row = result.first() + if row is None: + return None + return ServiceRunDB.model_validate(row) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_plans.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_plans.py index 9c3dc38bef3..ed34c334187 100644 --- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_plans.py +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_plans.py @@ -14,12 +14,13 @@ ) from models_library.services import ServiceKey, ServiceVersion from pydantic import TypeAdapter +from sqlalchemy.ext.asyncio import AsyncEngine -from ..api.rest.dependencies import get_repository +from ..api.rest.dependencies import get_resource_tracker_db_engine from ..exceptions.errors import PricingPlanNotFoundForServiceError from ..models.pricing_plans import PricingPlansDB, PricingPlanToServiceDB from ..models.pricing_units import PricingUnitsDB -from .modules.db.repositories.resource_tracker import ResourceTrackerRepository +from .modules.db import pricing_plans_db async def _create_pricing_plan_get( @@ -52,12 +53,15 @@ async def get_service_default_pricing_plan( product_name: ProductName, service_key: ServiceKey, service_version: ServiceVersion, - resource_tracker_repo: Annotated[ - ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository)) - ], + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], ) -> PricingPlanGet: - active_service_pricing_plans = await resource_tracker_repo.list_active_service_pricing_plans_by_product_and_service( - product_name, service_key, service_version + active_service_pricing_plans = ( + await pricing_plans_db.list_active_service_pricing_plans_by_product_and_service( + db_engine, + product_name=product_name, + service_key=service_key, + service_version=service_version, + ) ) default_pricing_plan = None @@ -71,10 +75,8 @@ async def get_service_default_pricing_plan( service_key=service_key, service_version=service_version ) - pricing_plan_unit_db = ( - await resource_tracker_repo.list_pricing_units_by_pricing_plan( - pricing_plan_id=default_pricing_plan.pricing_plan_id - ) + pricing_plan_unit_db = await pricing_plans_db.list_pricing_units_by_pricing_plan( + db_engine, pricing_plan_id=default_pricing_plan.pricing_plan_id ) return await _create_pricing_plan_get(default_pricing_plan, pricing_plan_unit_db) @@ -83,14 +85,12 @@ async def get_service_default_pricing_plan( async def list_connected_services_to_pricing_plan_by_pricing_plan( product_name: ProductName, pricing_plan_id: PricingPlanId, - resource_tracker_repo: Annotated[ - ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository)) - ], + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], ): output_list: list[ PricingPlanToServiceDB - ] = await resource_tracker_repo.list_connected_services_to_pricing_plan_by_pricing_plan( - product_name=product_name, pricing_plan_id=pricing_plan_id + ] = await pricing_plans_db.list_connected_services_to_pricing_plan_by_pricing_plan( + db_engine, product_name=product_name, pricing_plan_id=pricing_plan_id ) return [ TypeAdapter(PricingPlanToServiceGet).validate_python(item.model_dump()) @@ -103,12 +103,11 @@ async def connect_service_to_pricing_plan( pricing_plan_id: PricingPlanId, service_key: ServiceKey, service_version: ServiceVersion, - resource_tracker_repo: Annotated[ - ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository)) - ], + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], ) -> PricingPlanToServiceGet: output: PricingPlanToServiceDB = ( - await resource_tracker_repo.upsert_service_to_pricing_plan( + await pricing_plans_db.upsert_service_to_pricing_plan( + db_engine, product_name=product_name, pricing_plan_id=pricing_plan_id, service_key=service_key, @@ -120,14 +119,12 @@ async def connect_service_to_pricing_plan( async def list_pricing_plans_by_product( product_name: ProductName, - resource_tracker_repo: Annotated[ - ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository)) - ], + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], ) -> list[PricingPlanGet]: pricing_plans_list_db: list[ PricingPlansDB - ] = await resource_tracker_repo.list_pricing_plans_by_product( - product_name=product_name + ] = await pricing_plans_db.list_pricing_plans_by_product( + db_engine, product_name=product_name ) return [ PricingPlanGet( @@ -147,32 +144,24 @@ async def list_pricing_plans_by_product( async def get_pricing_plan( product_name: ProductName, pricing_plan_id: PricingPlanId, - resource_tracker_repo: Annotated[ - ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository)) - ], + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], ) -> PricingPlanGet: - pricing_plan_db = await resource_tracker_repo.get_pricing_plan( - product_name=product_name, pricing_plan_id=pricing_plan_id + pricing_plan_db = await pricing_plans_db.get_pricing_plan( + db_engine, product_name=product_name, pricing_plan_id=pricing_plan_id ) - pricing_plan_unit_db = ( - await resource_tracker_repo.list_pricing_units_by_pricing_plan( - pricing_plan_id=pricing_plan_db.pricing_plan_id - ) + pricing_plan_unit_db = await pricing_plans_db.list_pricing_units_by_pricing_plan( + db_engine, pricing_plan_id=pricing_plan_db.pricing_plan_id ) return await _create_pricing_plan_get(pricing_plan_db, pricing_plan_unit_db) async def create_pricing_plan( data: PricingPlanCreate, - resource_tracker_repo: Annotated[ - ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository)) - ], + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], ) -> PricingPlanGet: - pricing_plan_db = await resource_tracker_repo.create_pricing_plan(data=data) - pricing_plan_unit_db = ( - await resource_tracker_repo.list_pricing_units_by_pricing_plan( - pricing_plan_id=pricing_plan_db.pricing_plan_id - ) + pricing_plan_db = await pricing_plans_db.create_pricing_plan(db_engine, data=data) + pricing_plan_unit_db = await pricing_plans_db.list_pricing_units_by_pricing_plan( + db_engine, pricing_plan_id=pricing_plan_db.pricing_plan_id ) return await _create_pricing_plan_get(pricing_plan_db, pricing_plan_unit_db) @@ -180,24 +169,20 @@ async def create_pricing_plan( async def update_pricing_plan( product_name: ProductName, data: PricingPlanUpdate, - resource_tracker_repo: Annotated[ - ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository)) - ], + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], ) -> PricingPlanGet: # Check whether pricing plan exists - pricing_plan_db = await resource_tracker_repo.get_pricing_plan( - product_name=product_name, pricing_plan_id=data.pricing_plan_id + pricing_plan_db = await pricing_plans_db.get_pricing_plan( + db_engine, product_name=product_name, pricing_plan_id=data.pricing_plan_id ) # Update pricing plan - pricing_plan_updated_db = await resource_tracker_repo.update_pricing_plan( - product_name=product_name, data=data + pricing_plan_updated_db = await pricing_plans_db.update_pricing_plan( + db_engine, product_name=product_name, data=data ) if pricing_plan_updated_db: pricing_plan_db = pricing_plan_updated_db - pricing_plan_unit_db = ( - await resource_tracker_repo.list_pricing_units_by_pricing_plan( - pricing_plan_id=pricing_plan_db.pricing_plan_id - ) + pricing_plan_unit_db = await pricing_plans_db.list_pricing_units_by_pricing_plan( + db_engine, pricing_plan_id=pricing_plan_db.pricing_plan_id ) return await _create_pricing_plan_get(pricing_plan_db, pricing_plan_unit_db) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_units.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_units.py index f2aee53dd80..0a1e72cad65 100644 --- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_units.py +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_units.py @@ -11,21 +11,23 @@ PricingUnitWithCostCreate, PricingUnitWithCostUpdate, ) +from sqlalchemy.ext.asyncio import AsyncEngine -from ..api.rest.dependencies import get_repository -from .modules.db.repositories.resource_tracker import ResourceTrackerRepository +from ..api.rest.dependencies import get_resource_tracker_db_engine +from .modules.db import pricing_plans_db async def get_pricing_unit( product_name: ProductName, pricing_plan_id: PricingPlanId, pricing_unit_id: PricingUnitId, - resource_tracker_repo: Annotated[ - ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository)) - ], + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], ) -> PricingUnitGet: - pricing_unit = await resource_tracker_repo.get_valid_pricing_unit( - product_name, pricing_plan_id, pricing_unit_id + pricing_unit = await pricing_plans_db.get_valid_pricing_unit( + db_engine, + product_name=product_name, + pricing_plan_id=pricing_plan_id, + pricing_unit_id=pricing_unit_id, ) return PricingUnitGet( @@ -42,21 +44,22 @@ async def get_pricing_unit( async def create_pricing_unit( product_name: ProductName, data: PricingUnitWithCostCreate, - resource_tracker_repo: Annotated[ - ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository)) - ], + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], ) -> PricingUnitGet: # Check whether pricing plan exists - pricing_plan_db = await resource_tracker_repo.get_pricing_plan( - product_name=product_name, pricing_plan_id=data.pricing_plan_id + pricing_plan_db = await pricing_plans_db.get_pricing_plan( + db_engine, product_name=product_name, pricing_plan_id=data.pricing_plan_id ) # Create new pricing unit - pricing_unit_id, _ = await resource_tracker_repo.create_pricing_unit_with_cost( - data=data, pricing_plan_key=pricing_plan_db.pricing_plan_key + pricing_unit_id, _ = await pricing_plans_db.create_pricing_unit_with_cost( + db_engine, data=data, pricing_plan_key=pricing_plan_db.pricing_plan_key ) - pricing_unit = await resource_tracker_repo.get_valid_pricing_unit( - product_name, data.pricing_plan_id, pricing_unit_id + pricing_unit = await pricing_plans_db.get_valid_pricing_unit( + db_engine, + product_name=product_name, + pricing_plan_id=data.pricing_plan_id, + pricing_unit_id=pricing_unit_id, ) return PricingUnitGet( pricing_unit_id=pricing_unit.pricing_unit_id, @@ -72,26 +75,30 @@ async def create_pricing_unit( async def update_pricing_unit( product_name: ProductName, data: PricingUnitWithCostUpdate, - resource_tracker_repo: Annotated[ - ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository)) - ], + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], ) -> PricingUnitGet: # Check whether pricing unit exists - await resource_tracker_repo.get_valid_pricing_unit( - product_name, data.pricing_plan_id, data.pricing_unit_id + await pricing_plans_db.get_valid_pricing_unit( + db_engine, + product_name=product_name, + pricing_plan_id=data.pricing_plan_id, + pricing_unit_id=data.pricing_unit_id, ) # Get pricing plan - pricing_plan_db = await resource_tracker_repo.get_pricing_plan( - product_name, data.pricing_plan_id + pricing_plan_db = await pricing_plans_db.get_pricing_plan( + db_engine, product_name=product_name, pricing_plan_id=data.pricing_plan_id ) # Update pricing unit and cost - await resource_tracker_repo.update_pricing_unit_with_cost( - data=data, pricing_plan_key=pricing_plan_db.pricing_plan_key + await pricing_plans_db.update_pricing_unit_with_cost( + db_engine, data=data, pricing_plan_key=pricing_plan_db.pricing_plan_key ) - pricing_unit = await resource_tracker_repo.get_valid_pricing_unit( - product_name, data.pricing_plan_id, data.pricing_unit_id + pricing_unit = await pricing_plans_db.get_valid_pricing_unit( + db_engine, + product_name=product_name, + pricing_plan_id=data.pricing_plan_id, + pricing_unit_id=data.pricing_unit_id, ) return PricingUnitGet( pricing_unit_id=pricing_unit.pricing_unit_id, diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/process_message_running_service.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/process_message_running_service.py index 4907c84ecb1..8300ede8283 100644 --- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/process_message_running_service.py +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/process_message_running_service.py @@ -21,6 +21,7 @@ ) from models_library.services import ServiceType from pydantic import TypeAdapter +from sqlalchemy.ext.asyncio import AsyncEngine from ..models.credit_transactions import ( CreditTransactionCreate, @@ -32,7 +33,7 @@ ServiceRunLastHeartbeatUpdate, ServiceRunStoppedAtUpdate, ) -from .modules.db.repositories.resource_tracker import ResourceTrackerRepository +from .modules.db import credit_transactions_db, pricing_plans_db, service_runs_db from .modules.rabbitmq import RabbitMQClient, get_rabbitmq_client from .utils import ( compute_service_run_credit_costs, @@ -53,24 +54,22 @@ async def process_message(app: FastAPI, data: bytes) -> bool: rabbit_message.message_type, rabbit_message.service_run_id, ) - resource_tracker_repo: ResourceTrackerRepository = ResourceTrackerRepository( - db_engine=app.state.engine - ) + _db_engine = app.state.engine rabbitmq_client = get_rabbitmq_client(app) await RABBIT_MSG_TYPE_TO_PROCESS_HANDLER[rabbit_message.message_type]( - resource_tracker_repo, rabbit_message, rabbitmq_client + _db_engine, rabbit_message, rabbitmq_client ) return True async def _process_start_event( - resource_tracker_repo: ResourceTrackerRepository, + db_engine: AsyncEngine, msg: RabbitResourceTrackingStartedMessage, rabbitmq_client: RabbitMQClient, ): - service_run_db = await resource_tracker_repo.get_service_run_by_id( - service_run_id=msg.service_run_id + service_run_db = await service_runs_db.get_service_run_by_id( + db_engine, service_run_id=msg.service_run_id ) if service_run_db: # NOTE: After we find out why sometimes RUT recieves multiple start events and fix it, we can change it to log level `error` @@ -90,8 +89,8 @@ async def _process_start_event( ) pricing_unit_cost = None if msg.pricing_unit_cost_id: - pricing_unit_cost_db = await resource_tracker_repo.get_pricing_unit_cost_by_id( - pricing_unit_cost_id=msg.pricing_unit_cost_id + pricing_unit_cost_db = await pricing_plans_db.get_pricing_unit_cost_by_id( + db_engine, pricing_unit_cost_id=msg.pricing_unit_cost_id ) pricing_unit_cost = pricing_unit_cost_db.cost_per_unit @@ -125,7 +124,9 @@ async def _process_start_event( service_run_status=ServiceRunStatus.RUNNING, last_heartbeat_at=msg.created_at, ) - service_run_id = await resource_tracker_repo.create_service_run(create_service_run) + service_run_id = await service_runs_db.create_service_run( + db_engine, data=create_service_run + ) if msg.wallet_id and msg.wallet_name: transaction_create = CreditTransactionCreate( @@ -145,21 +146,23 @@ async def _process_start_event( created_at=msg.created_at, last_heartbeat_at=msg.created_at, ) - await resource_tracker_repo.create_credit_transaction(transaction_create) + await credit_transactions_db.create_credit_transaction( + db_engine, data=transaction_create + ) # Publish wallet total credits to RabbitMQ await sum_credit_transactions_and_publish_to_rabbitmq( - resource_tracker_repo, rabbitmq_client, msg.product_name, msg.wallet_id + db_engine, rabbitmq_client, msg.product_name, msg.wallet_id ) async def _process_heartbeat_event( - resource_tracker_repo: ResourceTrackerRepository, + db_engine: AsyncEngine, msg: RabbitResourceTrackingHeartbeatMessage, rabbitmq_client: RabbitMQClient, ): - service_run_db = await resource_tracker_repo.get_service_run_by_id( - service_run_id=msg.service_run_id + service_run_db = await service_runs_db.get_service_run_by_id( + db_engine, service_run_id=msg.service_run_id ) if not service_run_db: _logger.error( @@ -181,8 +184,8 @@ async def _process_heartbeat_event( update_service_run_last_heartbeat = ServiceRunLastHeartbeatUpdate( service_run_id=msg.service_run_id, last_heartbeat_at=msg.created_at ) - running_service = await resource_tracker_repo.update_service_run_last_heartbeat( - update_service_run_last_heartbeat + running_service = await service_runs_db.update_service_run_last_heartbeat( + db_engine, data=update_service_run_last_heartbeat ) if running_service is None: _logger.info("Nothing to update: %s", msg) @@ -201,19 +204,19 @@ async def _process_heartbeat_event( osparc_credits=make_negative(computed_credits), last_heartbeat_at=msg.created_at, ) - await resource_tracker_repo.update_credit_transaction_credits( - update_credit_transaction + await credit_transactions_db.update_credit_transaction_credits( + db_engine, data=update_credit_transaction ) # Publish wallet total credits to RabbitMQ wallet_total_credits = await sum_credit_transactions_and_publish_to_rabbitmq( - resource_tracker_repo, + db_engine, rabbitmq_client, running_service.product_name, running_service.wallet_id, ) if wallet_total_credits.available_osparc_credits < CreditsLimit.OUT_OF_CREDITS: await publish_to_rabbitmq_wallet_credits_limit_reached( - resource_tracker_repo, + db_engine, rabbitmq_client, product_name=running_service.product_name, wallet_id=running_service.wallet_id, @@ -223,12 +226,12 @@ async def _process_heartbeat_event( async def _process_stop_event( - resource_tracker_repo: ResourceTrackerRepository, + db_engine: AsyncEngine, msg: RabbitResourceTrackingStoppedMessage, rabbitmq_client: RabbitMQClient, ): - service_run_db = await resource_tracker_repo.get_service_run_by_id( - service_run_id=msg.service_run_id + service_run_db = await service_runs_db.get_service_run_by_id( + db_engine, service_run_id=msg.service_run_id ) if not service_run_db: # NOTE: ANE/MD discussed. When the RUT receives a stop event and has not received before any start or heartbeat event, it probably means that @@ -262,8 +265,8 @@ async def _process_stop_event( service_run_status_msg=_run_status_msg, ) - running_service = await resource_tracker_repo.update_service_run_stopped_at( - update_service_run_stopped_at + running_service = await service_runs_db.update_service_run_stopped_at( + db_engine, data=update_service_run_stopped_at ) if running_service is None: @@ -287,12 +290,12 @@ async def _process_stop_event( else CreditTransactionStatus.NOT_BILLED ), ) - await resource_tracker_repo.update_credit_transaction_credits_and_status( - update_credit_transaction + await credit_transactions_db.update_credit_transaction_credits_and_status( + db_engine, data=update_credit_transaction ) # Publish wallet total credits to RabbitMQ await sum_credit_transactions_and_publish_to_rabbitmq( - resource_tracker_repo, + db_engine, rabbitmq_client, running_service.product_name, running_service.wallet_id, diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/service_runs.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/service_runs.py index fff896c8ec0..b4d9127733e 100644 --- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/service_runs.py +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/service_runs.py @@ -19,9 +19,10 @@ from models_library.users import UserID from models_library.wallets import WalletID from pydantic import AnyUrl, PositiveInt, TypeAdapter +from sqlalchemy.ext.asyncio import AsyncEngine from ..models.service_runs import ServiceRunWithCreditsDB -from .modules.db.repositories.resource_tracker import ResourceTrackerRepository +from .modules.db import service_runs_db _PRESIGNED_LINK_EXPIRATION_SEC = 7200 @@ -29,7 +30,7 @@ async def list_service_runs( user_id: UserID, product_name: ProductName, - resource_tracker_repo: ResourceTrackerRepository, + db_engine: AsyncEngine, limit: int = 20, offset: int = 0, wallet_id: WalletID | None = None, @@ -45,17 +46,21 @@ async def list_service_runs( # Situation when we want to see all usage of a specific user (ex. for Non billable product) if wallet_id is None and access_all_wallet_usage is False: - total_service_runs: PositiveInt = await resource_tracker_repo.total_service_runs_by_product_and_user_and_wallet( - product_name, - user_id=user_id, - wallet_id=None, - started_from=started_from, - started_until=started_until, + total_service_runs: PositiveInt = ( + await service_runs_db.total_service_runs_by_product_and_user_and_wallet( + db_engine, + product_name=product_name, + user_id=user_id, + wallet_id=None, + started_from=started_from, + started_until=started_until, + ) ) service_runs_db_model: list[ ServiceRunWithCreditsDB - ] = await resource_tracker_repo.list_service_runs_by_product_and_user_and_wallet( - product_name, + ] = await service_runs_db.list_service_runs_by_product_and_user_and_wallet( + db_engine, + product_name=product_name, user_id=user_id, wallet_id=None, offset=offset, @@ -66,8 +71,9 @@ async def list_service_runs( ) # Situation when accountant user can see all users usage of the wallet elif wallet_id and access_all_wallet_usage is True: - total_service_runs: PositiveInt = await resource_tracker_repo.total_service_runs_by_product_and_user_and_wallet( # type: ignore[no-redef] - product_name, + total_service_runs: PositiveInt = await service_runs_db.total_service_runs_by_product_and_user_and_wallet( # type: ignore[no-redef] + db_engine, + product_name=product_name, user_id=None, wallet_id=wallet_id, started_from=started_from, @@ -75,8 +81,9 @@ async def list_service_runs( ) service_runs_db_model: list[ # type: ignore[no-redef] ServiceRunWithCreditsDB - ] = await resource_tracker_repo.list_service_runs_by_product_and_user_and_wallet( - product_name, + ] = await service_runs_db.list_service_runs_by_product_and_user_and_wallet( + db_engine, + product_name=product_name, user_id=None, wallet_id=wallet_id, offset=offset, @@ -87,8 +94,9 @@ async def list_service_runs( ) # Situation when regular user can see only his usage of the wallet elif wallet_id and access_all_wallet_usage is False: - total_service_runs: PositiveInt = await resource_tracker_repo.total_service_runs_by_product_and_user_and_wallet( # type: ignore[no-redef] - product_name, + total_service_runs: PositiveInt = await service_runs_db.total_service_runs_by_product_and_user_and_wallet( # type: ignore[no-redef] + db_engine, + product_name=product_name, user_id=user_id, wallet_id=wallet_id, started_from=started_from, @@ -96,8 +104,9 @@ async def list_service_runs( ) service_runs_db_model: list[ # type: ignore[no-redef] ServiceRunWithCreditsDB - ] = await resource_tracker_repo.list_service_runs_by_product_and_user_and_wallet( - product_name, + ] = await service_runs_db.list_service_runs_by_product_and_user_and_wallet( + db_engine, + product_name=product_name, user_id=user_id, wallet_id=wallet_id, offset=offset, @@ -147,7 +156,7 @@ async def export_service_runs( s3_region: str, user_id: UserID, product_name: ProductName, - resource_tracker_repo: ResourceTrackerRepository, + db_engine: AsyncEngine, wallet_id: WalletID | None = None, access_all_wallet_usage: bool = False, order_by: OrderBy | None = None, @@ -165,7 +174,8 @@ async def export_service_runs( ) # Export CSV to S3 - await resource_tracker_repo.export_service_runs_table_to_s3( + await service_runs_db.export_service_runs_table_to_s3( + db_engine, product_name=product_name, s3_bucket_name=s3_bucket_name, s3_key=s3_object_key, @@ -188,7 +198,7 @@ async def export_service_runs( async def get_osparc_credits_aggregated_usages_page( user_id: UserID, product_name: ProductName, - resource_tracker_repo: ResourceTrackerRepository, + db_engine: AsyncEngine, aggregated_by: ServicesAggregatedUsagesType, time_period: ServicesAggregatedUsagesTimePeriod, wallet_id: WalletID, @@ -204,7 +214,8 @@ async def get_osparc_credits_aggregated_usages_page( ( count_output_list_db, output_list_db, - ) = await resource_tracker_repo.get_osparc_credits_aggregated_by_service( + ) = await service_runs_db.get_osparc_credits_aggregated_by_service( + db_engine, product_name=product_name, user_id=user_id if access_all_wallet_usage is False else None, wallet_id=wallet_id, diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/utils.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/utils.py index 73aa7416244..6047ac2e904 100644 --- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/utils.py +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/utils.py @@ -19,8 +19,9 @@ from models_library.wallets import WalletID from pydantic import PositiveInt from servicelib.rabbitmq import RabbitMQClient +from sqlalchemy.ext.asyncio import AsyncEngine -from .modules.db.repositories.resource_tracker import ResourceTrackerRepository +from .modules.db import credit_transactions_db, service_runs_db _logger = logging.getLogger(__name__) @@ -30,15 +31,16 @@ def make_negative(n): async def sum_credit_transactions_and_publish_to_rabbitmq( - resource_tracker_repo: ResourceTrackerRepository, + db_engine: AsyncEngine, rabbitmq_client: RabbitMQClient, product_name: ProductName, wallet_id: WalletID, ) -> WalletTotalCredits: wallet_total_credits = ( - await resource_tracker_repo.sum_credit_transactions_by_product_and_wallet( - product_name, - wallet_id, + await credit_transactions_db.sum_credit_transactions_by_product_and_wallet( + db_engine, + product_name=product_name, + wallet_id=wallet_id, ) ) publish_message = WalletCreditsMessage.model_construct( @@ -77,7 +79,7 @@ async def _publish_to_rabbitmq_wallet_credits_limit_reached( async def publish_to_rabbitmq_wallet_credits_limit_reached( - resource_tracker_repo: ResourceTrackerRepository, + db_engine: AsyncEngine, rabbitmq_client: RabbitMQClient, product_name: ProductName, wallet_id: WalletID, @@ -86,8 +88,9 @@ async def publish_to_rabbitmq_wallet_credits_limit_reached( ): # Get all current running services for that wallet total_count: PositiveInt = ( - await resource_tracker_repo.total_service_runs_by_product_and_user_and_wallet( - product_name, + await service_runs_db.total_service_runs_by_product_and_user_and_wallet( + db_engine, + product_name=product_name, user_id=None, wallet_id=wallet_id, service_run_status=ServiceRunStatus.RUNNING, @@ -95,13 +98,16 @@ async def publish_to_rabbitmq_wallet_credits_limit_reached( ) for offset in range(0, total_count, _BATCH_SIZE): - batch_services = await resource_tracker_repo.list_service_runs_by_product_and_user_and_wallet( - product_name, - user_id=None, - wallet_id=wallet_id, - offset=offset, - limit=_BATCH_SIZE, - service_run_status=ServiceRunStatus.RUNNING, + batch_services = ( + await service_runs_db.list_service_runs_by_product_and_user_and_wallet( + db_engine, + product_name=product_name, + user_id=None, + wallet_id=wallet_id, + offset=offset, + limit=_BATCH_SIZE, + service_run_status=ServiceRunStatus.RUNNING, + ) ) await asyncio.gather( diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_api_resource_tracker_service_runs__export.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_resource_tracker_service_runs__export.py index 56c9c102df6..44a6ce56016 100644 --- a/services/resource-usage-tracker/tests/unit/with_dbs/test_api_resource_tracker_service_runs__export.py +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_resource_tracker_service_runs__export.py @@ -31,7 +31,7 @@ @pytest.fixture async def mocked_export(mocker: MockerFixture) -> AsyncMock: return mocker.patch( - "simcore_service_resource_usage_tracker.services.service_runs.ResourceTrackerRepository.export_service_runs_table_to_s3", + "simcore_service_resource_usage_tracker.services.service_runs.service_runs_db.export_service_runs_table_to_s3", autospec=True, ) diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_background_task_periodic_heartbeat_check.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_background_task_periodic_heartbeat_check.py index 35114a3cdf6..8ebe34bbd2d 100644 --- a/services/resource-usage-tracker/tests/unit/with_dbs/test_background_task_periodic_heartbeat_check.py +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_background_task_periodic_heartbeat_check.py @@ -23,9 +23,6 @@ from simcore_service_resource_usage_tracker.services.background_task_periodic_heartbeat_check import ( periodic_check_of_running_services_task, ) -from simcore_service_resource_usage_tracker.services.modules.db.repositories.resource_tracker import ( - ResourceTrackerRepository, -) pytest_simcore_core_services_selection = ["postgres", "rabbit"] pytest_simcore_ops_services_selection = [ @@ -132,9 +129,6 @@ async def test_process_event_functions( ): engine = initialized_app.state.engine app_settings: ApplicationSettings = initialized_app.state.settings - resource_tracker_repo: ResourceTrackerRepository = ResourceTrackerRepository( - db_engine=engine - ) for _ in range(app_settings.RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_COUNTER_FAIL): await periodic_check_of_running_services_task(initialized_app) diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message.py index da321f593f3..57eb9735e68 100644 --- a/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message.py +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message.py @@ -8,9 +8,6 @@ SimcorePlatformStatus, ) from servicelib.rabbitmq import RabbitMQClient -from simcore_service_resource_usage_tracker.services.modules.db.repositories.resource_tracker import ( - ResourceTrackerRepository, -) from simcore_service_resource_usage_tracker.services.process_message_running_service import ( _process_heartbeat_event, _process_start_event, @@ -43,10 +40,7 @@ async def test_process_event_functions( pricing_unit_id=None, pricing_unit_cost_id=None, ) - resource_tracker_repo: ResourceTrackerRepository = ResourceTrackerRepository( - db_engine=engine - ) - await _process_start_event(resource_tracker_repo, msg, publisher) + await _process_start_event(engine, msg, publisher) output = await assert_service_runs_db_row(postgres_db, msg.service_run_id) assert output.stopped_at is None assert output.service_run_status == "RUNNING" @@ -55,7 +49,7 @@ async def test_process_event_functions( heartbeat_msg = RabbitResourceTrackingHeartbeatMessage( service_run_id=msg.service_run_id, created_at=datetime.now(tz=timezone.utc) ) - await _process_heartbeat_event(resource_tracker_repo, heartbeat_msg, publisher) + await _process_heartbeat_event(engine, heartbeat_msg, publisher) output = await assert_service_runs_db_row(postgres_db, msg.service_run_id) assert output.stopped_at is None assert output.service_run_status == "RUNNING" @@ -66,7 +60,7 @@ async def test_process_event_functions( created_at=datetime.now(tz=timezone.utc), simcore_platform_status=SimcorePlatformStatus.OK, ) - await _process_stop_event(resource_tracker_repo, stopped_msg, publisher) + await _process_stop_event(engine, stopped_msg, publisher) output = await assert_service_runs_db_row(postgres_db, msg.service_run_id) assert output.stopped_at is not None assert output.service_run_status == "SUCCESS" diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing.py index 637a2219f94..b29863f0b57 100644 --- a/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing.py +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing.py @@ -31,9 +31,6 @@ resource_tracker_pricing_units, ) from simcore_postgres_database.models.services import services_meta_data -from simcore_service_resource_usage_tracker.services.modules.db.repositories.resource_tracker import ( - ResourceTrackerRepository, -) from simcore_service_resource_usage_tracker.services.process_message_running_service import ( _process_heartbeat_event, _process_start_event, @@ -207,10 +204,8 @@ async def test_process_event_functions( pricing_unit_id=1, pricing_unit_cost_id=1, ) - resource_tracker_repo: ResourceTrackerRepository = ResourceTrackerRepository( - db_engine=engine - ) - await _process_start_event(resource_tracker_repo, msg, publisher) + + await _process_start_event(engine, msg, publisher) output = await assert_credit_transactions_db_row(postgres_db, msg.service_run_id) assert output.osparc_credits == 0.0 assert output.transaction_status == "PENDING" @@ -222,7 +217,7 @@ async def test_process_event_functions( heartbeat_msg = RabbitResourceTrackingHeartbeatMessage( service_run_id=msg.service_run_id, created_at=datetime.now(tz=timezone.utc) ) - await _process_heartbeat_event(resource_tracker_repo, heartbeat_msg, publisher) + await _process_heartbeat_event(engine, heartbeat_msg, publisher) output = await assert_credit_transactions_db_row( postgres_db, msg.service_run_id, modified_at ) @@ -240,7 +235,7 @@ async def test_process_event_functions( created_at=datetime.now(tz=timezone.utc), simcore_platform_status=SimcorePlatformStatus.OK, ) - await _process_stop_event(resource_tracker_repo, stopped_msg, publisher) + await _process_stop_event(engine, stopped_msg, publisher) output = await assert_credit_transactions_db_row( postgres_db, msg.service_run_id, modified_at ) diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing_cost_0.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing_cost_0.py index 5b903cf759d..ccffbc9f42e 100644 --- a/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing_cost_0.py +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing_cost_0.py @@ -31,9 +31,6 @@ resource_tracker_pricing_units, ) from simcore_postgres_database.models.services import services_meta_data -from simcore_service_resource_usage_tracker.services.modules.db.repositories.resource_tracker import ( - ResourceTrackerRepository, -) from simcore_service_resource_usage_tracker.services.process_message_running_service import ( _process_heartbeat_event, _process_start_event, @@ -149,10 +146,8 @@ async def test_process_event_functions( pricing_unit_id=1, pricing_unit_cost_id=1, ) - resource_tracker_repo: ResourceTrackerRepository = ResourceTrackerRepository( - db_engine=engine - ) - await _process_start_event(resource_tracker_repo, msg, publisher) + + await _process_start_event(engine, msg, publisher) output = await assert_credit_transactions_db_row(postgres_db, msg.service_run_id) assert output.osparc_credits == 0.0 assert output.transaction_status == "PENDING" @@ -164,7 +159,7 @@ async def test_process_event_functions( heartbeat_msg = RabbitResourceTrackingHeartbeatMessage( service_run_id=msg.service_run_id, created_at=datetime.now(tz=timezone.utc) ) - await _process_heartbeat_event(resource_tracker_repo, heartbeat_msg, publisher) + await _process_heartbeat_event(engine, heartbeat_msg, publisher) output = await assert_credit_transactions_db_row( postgres_db, msg.service_run_id, modified_at ) @@ -177,7 +172,7 @@ async def test_process_event_functions( created_at=datetime.now(tz=timezone.utc), simcore_platform_status=SimcorePlatformStatus.OK, ) - await _process_stop_event(resource_tracker_repo, stopped_msg, publisher) + await _process_stop_event(engine, stopped_msg, publisher) output = await assert_credit_transactions_db_row( postgres_db, msg.service_run_id, modified_at ) diff --git a/services/static-webserver/client/source/class/osparc/CookiePolicy.js b/services/static-webserver/client/source/class/osparc/CookiePolicy.js index 7a0327c09f6..378ff902da4 100644 --- a/services/static-webserver/client/source/class/osparc/CookiePolicy.js +++ b/services/static-webserver/client/source/class/osparc/CookiePolicy.js @@ -154,17 +154,15 @@ qx.Class.define("osparc.CookiePolicy", { control = new qx.ui.basic.Label(text).set({ rich : true }); - osparc.store.Support.getLicenseURL() - .then(licenseLink => { - const lbl = control.getValue(); - if (licenseLink) { - const color = qx.theme.manager.Color.getInstance().resolve("text"); - const textLink = `Licensing.`; - control.setValue(lbl + textLink); - } else { - control.setValue(lbl + this.tr("Licensing.")); - } - }); + const licenseLink = osparc.store.Support.getLicenseURL(); + const lbl = control.getValue(); + if (licenseLink) { + const color = qx.theme.manager.Color.getInstance().resolve("text"); + const textLink = `Licensing.`; + control.setValue(lbl + textLink); + } else { + control.setValue(lbl + this.tr("Licensing.")); + } this._add(control, { column: 0, row: 2 diff --git a/services/static-webserver/client/source/class/osparc/auth/Manager.js b/services/static-webserver/client/source/class/osparc/auth/Manager.js index 8f325a0644a..5b5efeef7d0 100644 --- a/services/static-webserver/client/source/class/osparc/auth/Manager.js +++ b/services/static-webserver/client/source/class/osparc/auth/Manager.js @@ -257,15 +257,6 @@ qx.Class.define("osparc.auth.Manager", { this.updateProfile(profile); const role = profile.role.toLowerCase(); osparc.data.Permissions.getInstance().setRole(role); - - this.__fetchStartUpResources(); - }, - - __fetchStartUpResources: function() { - const isDisabled = osparc.utils.DisabledPlugins.isClustersDisabled(); - if (isDisabled === false) { - osparc.data.Resources.get("clusters"); - } }, __logoutUser: function() { diff --git a/services/static-webserver/client/source/class/osparc/cluster/ClusterMiniView.js b/services/static-webserver/client/source/class/osparc/cluster/ClusterMiniView.js deleted file mode 100644 index 271ce645725..00000000000 --- a/services/static-webserver/client/source/class/osparc/cluster/ClusterMiniView.js +++ /dev/null @@ -1,202 +0,0 @@ -/* ************************************************************************ - - osparc - the simcore frontend - - https://osparc.io - - Copyright: - 2022 IT'IS Foundation, https://itis.swiss - - License: - MIT: https://opensource.org/licenses/MIT - - Authors: - * Odei Maiz (odeimaiz) - -************************************************************************ */ - -qx.Class.define("osparc.cluster.ClusterMiniView", { - extend: qx.ui.core.Widget, - - construct: function() { - this.base(arguments); - - const grid = new qx.ui.layout.Grid(2, 2); - this._setLayout(grid); - - this.__listenToClusterDetails(); - - this.set({ - allowGrowY: false, - cursor: "pointer" - }); - this.addListener("tap", () => osparc.cluster.Utils.popUpClustersDetails(this.__clusterId), this); - - const hint = this.__hint = new osparc.ui.hint.Hint(this).set({ - active: false - }); - const showHint = () => hint.show(); - const hideHint = () => hint.exclude(); - this.addListener("mouseover", showHint); - [ - "mouseout", - "tap" - ].forEach(e => this.addListener(e, hideHint)); - }, - - statics: { - GRID_POS: { - CPU: 0, - RAM: 1, - GPU: 2 - } - }, - - members: { - __clusterId: null, - __hint: null, - - setClusterId: function(clusterId) { - const clusters = osparc.cluster.Utils.getInstance(); - if (this.__clusterId !== null) { - clusters.stopFetchingDetails(this.__clusterId); - } - this.__clusterId = clusterId; - if (clusterId !== null) { - clusters.startFetchingDetails(clusterId); - } - }, - - __listenToClusterDetails: function() { - const clusters = osparc.cluster.Utils.getInstance(); - clusters.addListener("clusterDetailsReceived", e => { - const data = e.getData(); - if (this.__clusterId === data.clusterId) { - if ("error" in data) { - this.__detailsCallFailed(); - } else { - const clusterDetails = data.clusterDetails; - this.__updateWorkersDetails(clusterDetails); - } - } - }); - }, - - __showBulb: function(failed) { - this._removeAll(); - - const clusterStatusImage = new qx.ui.basic.Image().set({ - source: "@FontAwesome5Solid/lightbulb/16", - alignY: "middle", - alignX: "center", - paddingLeft: 3, - textColor: failed ? "failed-red" : "ready-green" - }); - this._add(clusterStatusImage, { - row: 0, - column: 0 - }); - }, - - __detailsCallFailed: function() { - this.__showBulb(true); - this.__hint.setText(this.tr("Connection failed")); - }, - - __updateWorkersDetails: function(clusterDetails) { - this._removeAll(); - - const workers = clusterDetails.scheduler.workers; - if (Object.keys(workers).length === 0) { - this.__showBulb(false); - this.__hint.setText(this.tr("No workers running at the moment in this cluster")); - return; - } - - const resources = { - cpu: { - metric: "cpu", - usedResource: "CPU", - resource: "CPU", - icon: "@FontAwesome5Solid/microchip/10", - available: 0, - used: 0 - }, - ram: { - metric: "memory", - usedResource: "RAM", - resource: "RAM", - icon: "@MaterialIcons/memory/10", - available: 0, - used: 0 - }, - gpu: { - metric: "gpu", - usedResource: "GPU", - resource: "GPU", - icon: "@FontAwesome5Solid/server/10", - available: 0, - used: 0 - } - }; - Object.keys(resources).forEach(resourceKey => { - const resource = resources[resourceKey]; - osparc.cluster.Utils.accumulateWorkersResources(workers, resource); - }); - this.__updateMiniView(resources); - this.__updateHint(resources); - }, - - __updateMiniView: function(resources) { - Object.keys(resources).forEach((resourceKey, idx) => { - const resourceInfo = resources[resourceKey]; - if (resourceInfo.available === 0) { - return; - } - const relativeUsage = resourceInfo.used / resourceInfo.available; - const icon = new qx.ui.basic.Image(resourceInfo.icon).set({ - textColor: relativeUsage > 0.8 ? "busy-orange" : "text" - }); - this._add(icon, { - row: idx, - column: 0 - }); - const progressBar = new qx.ui.indicator.ProgressBar(resourceInfo.used, resourceInfo.available).set({ - height: 10, - width: 60 - }); - osparc.utils.Utils.hideBorder(progressBar); - // orange > 80% - progressBar.getChildControl("progress").set({ - backgroundColor: relativeUsage > 0.8 ? "busy-orange" : "ready-green" - }); - this._add(progressBar, { - row: idx, - column: 1 - }); - }); - }, - - __updateHint: function(resources) { - let text = ""; - Object.keys(resources).forEach(resourceKey => { - const resourceInfo = resources[resourceKey]; - if (resourceInfo.available === 0) { - return; - } - text += resourceInfo.resource + ": "; - if (resourceKey === "ram") { - text += osparc.utils.Utils.bytesToGB(resourceInfo.used) + "GB / " + osparc.utils.Utils.bytesToGB(resourceInfo.available) + "GB"; - } else { - text += resourceInfo.used + " / " + resourceInfo.available; - } - text += "
"; - }); - this.__hint.setText(text); - } - }, - - destruct: function() { - osparc.cluster.Utils.getInstance().stopFetchingDetails(this.__clusterId); - } -}); diff --git a/services/static-webserver/client/source/class/osparc/cluster/ClusterWorkers.js b/services/static-webserver/client/source/class/osparc/cluster/ClusterWorkers.js deleted file mode 100644 index bcb2638439d..00000000000 --- a/services/static-webserver/client/source/class/osparc/cluster/ClusterWorkers.js +++ /dev/null @@ -1,140 +0,0 @@ -/* ************************************************************************ - - osparc - the simcore frontend - - https://osparc.io - - Copyright: - 2022 IT'IS Foundation, https://itis.swiss - - License: - MIT: https://opensource.org/licenses/MIT - - Authors: - * Odei Maiz (odeimaiz) - -************************************************************************ */ - -qx.Class.define("osparc.cluster.ClusterWorkers", { - extend: qx.ui.core.Widget, - - construct: function() { - this.base(arguments); - - const grid = new qx.ui.layout.Grid(5, 8); - for (let i=0; i { - const worker = clusterDetails.scheduler.workers[workerUrl]; - - const img = new qx.ui.basic.Image().set({ - source: "@FontAwesome5Solid/hdd/24", - toolTipText: worker.name, - textColor: "ready-green", - paddingTop: 50 - }); - this._add(img, { - row, - column: this.self().GRID_POS.ICON - }); - - Object.keys(plots).forEach(plotKey => { - const plotInfo = plots[plotKey]; - const gaugeDatas = osparc.wrapper.Plotly.getDefaultGaugeData(); - const gaugeData = gaugeDatas[0]; - gaugeData.title.text = plotInfo.label.toLocaleString(); - let used = osparc.cluster.Utils.getUsedResourcesAttribute(worker, plotInfo.usedResource); - let available = osparc.cluster.Utils.getAvailableResourcesAttribute(worker, plotInfo.resource); - if (plotKey === "ram") { - used = osparc.utils.Utils.bytesToGB(used); - available = osparc.utils.Utils.bytesToGB(available); - } - if (qx.lang.Type.isNumber(available)) { - // orange > 80% - gaugeData.gauge.steps = [{ - range: [0.8*available, available], - color: qx.theme.manager.Color.getInstance().resolve("busy-orange"), - thickness: 0.5 - }]; - } - if (available === "-") { - gaugeData.value = "-"; - } else { - gaugeData.value = used; - gaugeData.gauge.axis.range[1] = available; - } - const layout = osparc.wrapper.Plotly.getDefaultLayout(); - const plotId = "ClusterDetails_" + plotKey + "-" + row; - const w = parseInt(gridW/Object.keys(plots).length); - const h = parseInt(w*0.75); - // hide plotly toolbar - const config = { - displayModeBar: false - }; - const plot = new osparc.widget.PlotlyWidget(plotId, gaugeDatas, layout, config).set({ - width: w, - height: h - }); - this._add(plot, { - row, - column: plotInfo.column - }); - }); - row++; - }); - } - } -}); diff --git a/services/static-webserver/client/source/class/osparc/cluster/ClustersDetails.js b/services/static-webserver/client/source/class/osparc/cluster/ClustersDetails.js deleted file mode 100644 index 033f8ab2466..00000000000 --- a/services/static-webserver/client/source/class/osparc/cluster/ClustersDetails.js +++ /dev/null @@ -1,109 +0,0 @@ -/* ************************************************************************ - - osparc - the simcore frontend - - https://osparc.io - - Copyright: - 2022 IT'IS Foundation, https://itis.swiss - - License: - MIT: https://opensource.org/licenses/MIT - - Authors: - * Odei Maiz (odeimaiz) - -************************************************************************ */ - -qx.Class.define("osparc.cluster.ClustersDetails", { - extend: qx.ui.core.Widget, - - construct: function(selectClusterId) { - this.base(arguments); - - this._setLayout(new qx.ui.layout.VBox(20)); - - if (selectClusterId === undefined) { - selectClusterId = 0; - } - this.__clusterId = selectClusterId; - this.__populateClustersLayout(); - this.__addClusterWorkersLayout(); - this.__startFetchingDetails(); - }, - - members: { - __clustersSelectBox: null, - __clusterId: null, - __clusterWorkers: null, - - __populateClustersLayout: function() { - const clustersLayout = new qx.ui.container.Composite(new qx.ui.layout.HBox(10).set({ - alignY: "middle" - })); - - const clustersLabel = new qx.ui.basic.Label(this.tr("Connected clusters")); - clustersLayout.add(clustersLabel); - - const selectBox = this.__clustersSelectBox = new qx.ui.form.SelectBox().set({ - allowGrowX: false - }); - osparc.cluster.Utils.populateClustersSelectBox(selectBox); - selectBox.addListener("changeSelection", e => { - const selection = e.getData(); - if (selection.length) { - const clusterId = selection[0].id; - this.__selectedClusterChanged(clusterId); - } - }, this); - clustersLayout.add(selectBox); - - clustersLayout.add(new qx.ui.core.Spacer(10, null)); - - const clusterStatusLabel = new qx.ui.basic.Label(this.tr("Status:")); - clustersLayout.add(clusterStatusLabel); - - const clusterStatus = this.__clusterStatus = new qx.ui.basic.Image().set({ - source: "@FontAwesome5Solid/lightbulb/16" - }); - clustersLayout.add(clusterStatus); - - this._add(clustersLayout); - - selectBox.getSelectables().forEach(selectable => { - if (selectable.id === this.__clusterId) { - selectBox.setSelection([selectable]); - } - }); - }, - - __addClusterWorkersLayout: function() { - const clusterWorkers = this.__clusterWorkers = new osparc.cluster.ClusterWorkers(); - this._add(clusterWorkers, { - flex: 1 - }); - }, - - __selectedClusterChanged: function(clusterId) { - osparc.cluster.Utils.getInstance().stopFetchingDetails(this.__clusterId); - this.__clusterId = clusterId; - this.__startFetchingDetails(); - }, - - __startFetchingDetails: function() { - const clusters = osparc.cluster.Utils.getInstance(); - clusters.addListener("clusterDetailsReceived", e => { - const data = e.getData(); - if (this.__clusterId === data.clusterId) { - this.__clusterStatus.setTextColor("error" in data ? "failed-red" : "ready-green"); - this.__clusterWorkers.populateWorkersDetails("error" in data ? null : data.clusterDetails); - } - }); - clusters.startFetchingDetails(this.__clusterId); - } - }, - - destruct: function() { - osparc.cluster.Utils.getInstance().stopFetchingDetails(this.__clusterId); - } -}); diff --git a/services/static-webserver/client/source/class/osparc/cluster/Utils.js b/services/static-webserver/client/source/class/osparc/cluster/Utils.js deleted file mode 100644 index 7140bf5cbf1..00000000000 --- a/services/static-webserver/client/source/class/osparc/cluster/Utils.js +++ /dev/null @@ -1,136 +0,0 @@ -/* ************************************************************************ - - osparc - the simcore frontend - - https://osparc.io - - Copyright: - 2022 IT'IS Foundation, https://itis.swiss - - License: - MIT: https://opensource.org/licenses/MIT - - Authors: - * Odei Maiz (odeimaiz) - -************************************************************************ */ - -/** - * Collection of methods for dealing with clusters. - * - * *Example* - */ - -qx.Class.define("osparc.cluster.Utils", { - extend: qx.core.Object, - type: "singleton", - - construct: function() { - this.base(arguments); - - this.__clusterIds = []; - }, - - statics: { - popUpClustersDetails: function(clusterId) { - const clusters = new osparc.cluster.ClustersDetails(clusterId); - osparc.ui.window.Window.popUpInWindow(clusters, qx.locale.Manager.tr("Clusters & Workers"), 650, 600); - }, - - getUsedResourcesAttribute: function(worker, attributeKey) { - if (attributeKey in worker["used_resources"]) { - return osparc.utils.Utils.toTwoDecimals(worker["used_resources"][attributeKey]); - } - return "-"; - }, - - getAvailableResourcesAttribute: function(worker, attributeKey) { - if (attributeKey in worker.resources) { - return worker.resources[attributeKey]; - } - return "-"; - }, - - accumulateWorkersResources: function(workers, resource) { - Object.keys(workers).forEach(workerUrl => { - const worker = workers[workerUrl]; - const available = this.getAvailableResourcesAttribute(worker, resource.resource); - if (available === "-") { - return; - } - resource.available += available; - const used = this.getUsedResourcesAttribute(worker, resource.usedResource); - resource.used += used; - }); - }, - - populateClustersSelectBox: function(clustersSelectBox) { - clustersSelectBox.removeAll(); - - const store = osparc.store.Store.getInstance(); - const clusters = store.getClusters(); - if (clusters) { - clusters.forEach(cluster => { - const item = new qx.ui.form.ListItem().set({ - label: cluster["name"], - toolTipText: cluster["type"] + "\n" + cluster["description"], - allowGrowY: false - }); - item.id = cluster["id"]; - clustersSelectBox.add(item); - }); - } - } - }, - - events: { - "clusterDetailsReceived": "qx.event.type.Data" - }, - - members: { - __clusterIds: null, - - __fetchDetails: function(cid) { - const params = { - url: { - cid - } - }; - osparc.data.Resources.get("clusterDetails", params) - .then(clusterDetails => { - this.fireDataEvent("clusterDetailsReceived", { - clusterId: cid, - clusterDetails - }); - }) - .catch(err => { - console.error(err); - this.fireDataEvent("clusterDetailsReceived", { - clusterId: cid, - error: err - }); - }) - .finally(() => { - if (this.__clusterIds.includes(cid)) { - const interval = 10000; - qx.event.Timer.once(() => this.__fetchDetails(cid), this, interval); - } - }); - }, - - startFetchingDetails: function(clusterId) { - const found = this.__clusterIds.includes(clusterId); - this.__clusterIds.push(clusterId); - if (!found) { - this.__fetchDetails(clusterId); - } - }, - - stopFetchingDetails: function(clusterId) { - const idx = this.__clusterIds.indexOf(clusterId); - if (idx > -1) { - this.__clusterIds.splice(idx, 1); - } - } - } -}); diff --git a/services/static-webserver/client/source/class/osparc/dashboard/StudyBrowser.js b/services/static-webserver/client/source/class/osparc/dashboard/StudyBrowser.js index c7ef8f916f2..2eecb230400 100644 --- a/services/static-webserver/client/source/class/osparc/dashboard/StudyBrowser.js +++ b/services/static-webserver/client/source/class/osparc/dashboard/StudyBrowser.js @@ -578,11 +578,6 @@ qx.Class.define("osparc.dashboard.StudyBrowser", { const data = e.getData(); const destWorkspaceId = data["workspaceId"]; const destFolderId = data["folderId"]; - if (destWorkspaceId !== currentWorkspaceId) { - const msg = this.tr("Moving folders to Shared Workspaces are coming soon"); - osparc.FlashMessenger.getInstance().logAs(msg, "WARNING"); - return; - } const moveFolder = () => { Promise.all([ this.__moveFolderToWorkspace(folderId, destWorkspaceId), diff --git a/services/static-webserver/client/source/class/osparc/data/Permissions.js b/services/static-webserver/client/source/class/osparc/data/Permissions.js index 5c53b4da76d..0cc74fc9cc0 100644 --- a/services/static-webserver/client/source/class/osparc/data/Permissions.js +++ b/services/static-webserver/client/source/class/osparc/data/Permissions.js @@ -130,7 +130,6 @@ qx.Class.define("osparc.data.Permissions", { "services.all.read", "services.all.reupdate", "services.filePicker.read.all", - "user.clusters.create", "user.wallets.create", "study.everyone.share", "study.snapshot.read", diff --git a/services/static-webserver/client/source/class/osparc/data/Resources.js b/services/static-webserver/client/source/class/osparc/data/Resources.js index d87f6c690bf..4e54df325c3 100644 --- a/services/static-webserver/client/source/class/osparc/data/Resources.js +++ b/services/static-webserver/client/source/class/osparc/data/Resources.js @@ -288,8 +288,8 @@ qx.Class.define("osparc.data.Resources", { url: statics.API + "/projects/{studyId}/folders/{folderId}" }, moveToWorkspace: { - method: "PUT", - url: statics.API + "/projects/{studyId}/workspaces/{workspaceId}" + method: "POST", + url: statics.API + "/projects/{studyId}/workspaces/{workspaceId}:move" }, } }, @@ -342,8 +342,8 @@ qx.Class.define("osparc.data.Resources", { url: statics.API + "/folders/{folderId}" }, moveToWorkspace: { - method: "PUT", - url: statics.API + "/folders/{folderId}/folders/{workspaceId}" + method: "POST", + url: statics.API + "/folders/{folderId}/folders/{workspaceId}:move" }, trash: { method: "POST", @@ -1029,51 +1029,6 @@ qx.Class.define("osparc.data.Resources", { } } }, - /* - * CLUSTERS - */ - "clusters": { - useCache: true, - endpoints: { - get: { - method: "GET", - url: statics.API + "/clusters" - }, - post: { - method: "POST", - url: statics.API + "/clusters" - }, - pingWCredentials: { - method: "POST", - url: statics.API + "/clusters:ping" - }, - getOne: { - method: "GET", - url: statics.API + "/clusters/{cid}" - }, - delete: { - method: "DELETE", - url: statics.API + "/clusters/{cid}" - }, - patch: { - method: "PATCH", - url: statics.API + "/clusters/{cid}" - }, - ping: { - method: "POST", - url: statics.API + "/clusters/{cid}:ping" - } - } - }, - "clusterDetails": { - useCache: false, - endpoints: { - get: { - method: "GET", - url: statics.API + "/clusters/{cid}/details" - } - } - }, /* * CLASSIFIERS * Gets the json object containing sample classifiers diff --git a/services/static-webserver/client/source/class/osparc/desktop/StartStopButtons.js b/services/static-webserver/client/source/class/osparc/desktop/StartStopButtons.js index 15cd77f5288..c1633ddda85 100644 --- a/services/static-webserver/client/source/class/osparc/desktop/StartStopButtons.js +++ b/services/static-webserver/client/source/class/osparc/desktop/StartStopButtons.js @@ -64,31 +64,6 @@ qx.Class.define("osparc.desktop.StartStopButtons", { _createChildControlImpl: function(id) { let control; switch (id) { - case "cluster-layout": - control = new qx.ui.container.Composite(new qx.ui.layout.HBox(5).set({ - alignY: "middle" - })); - this._add(control); - break; - case "cluster-selector": { - control = new qx.ui.form.SelectBox().set({ - maxHeight: 32 - }); - this.getChildControl("cluster-layout").add(control); - const store = osparc.store.Store.getInstance(); - store.addListener("changeClusters", () => this.__populateClustersSelectBox(), this); - break; - } - case "cluster-mini-view": - control = new osparc.cluster.ClusterMiniView(); - this.getChildControl("cluster-layout").add(control); - this.getChildControl("cluster-selector").addListener("changeSelection", e => { - const selection = e.getData(); - if (selection.length) { - control.setClusterId(selection[0].id); - } - }, this); - break; case "dynamics-layout": control = new qx.ui.container.Composite(new qx.ui.layout.HBox(5).set({ alignY: "middle" @@ -140,9 +115,6 @@ qx.Class.define("osparc.desktop.StartStopButtons", { }, __buildLayout: function() { - this.getChildControl("cluster-selector"); - this.getChildControl("cluster-mini-view"); - this.getChildControl("start-service-button"); this.getChildControl("stop-service-button"); @@ -209,24 +181,9 @@ qx.Class.define("osparc.desktop.StartStopButtons", { ]; }, - __populateClustersSelectBox: function() { - osparc.cluster.Utils.populateClustersSelectBox(this.getChildControl("cluster-selector")); - const clusters = osparc.store.Store.getInstance().getClusters(); - this.getChildControl("cluster-layout").setVisibility(Object.keys(clusters).length ? "visible" : "excluded"); - }, - - getClusterId: function() { - if (this.getChildControl("cluster-layout").isVisible()) { - return this.getChildControl("cluster-selector").getSelection()[0].id; - } - return null; - }, - __applyStudy: async function(study) { study.getWorkbench().addListener("pipelineChanged", this.__checkButtonsVisible, this); study.addListener("changePipelineRunning", this.__updateRunButtonsStatus, this); - this.__populateClustersSelectBox(); - this.__getComputations(); this.__checkButtonsVisible(); this.__updateRunButtonsStatus(); }, @@ -250,34 +207,5 @@ qx.Class.define("osparc.desktop.StartStopButtons", { this.__setRunning(study.isPipelineRunning()); } }, - - __getComputations: function() { - const studyId = this.getStudy().getUuid(); - const url = "/computations/" + encodeURIComponent(studyId); - const req = new osparc.io.request.ApiRequest(url, "GET"); - req.addListener("success", e => { - const res = e.getTarget().getResponse(); - if (res && res.data && "cluster_id" in res.data) { - const clusterId = res.data["cluster_id"]; - if (clusterId) { - const clustersBox = this.getChildControl("cluster-selector"); - if (clustersBox.isVisible()) { - clustersBox.getSelectables().forEach(selectable => { - if (selectable.id === clusterId) { - clustersBox.setSelection([selectable]); - } - }); - } - } - } - }, this); - req.addListener("fail", e => { - const res = e.getTarget().getResponse(); - if (res && res.error) { - console.error(res.error); - } - }); - req.send(); - } } }); diff --git a/services/static-webserver/client/source/class/osparc/desktop/StudyEditor.js b/services/static-webserver/client/source/class/osparc/desktop/StudyEditor.js index 14474eabd7d..bfd02dc41eb 100644 --- a/services/static-webserver/client/source/class/osparc/desktop/StudyEditor.js +++ b/services/static-webserver/client/source/class/osparc/desktop/StudyEditor.js @@ -619,10 +619,6 @@ qx.Class.define("osparc.desktop.StudyEditor", { "subgraph": partialPipeline, "force_restart": forceRestart }; - const startStopButtonsWB = this.__workbenchView.getStartStopButtons(); - if (startStopButtonsWB.getClusterId() !== null) { - requestData["cluster_id"] = startStopButtonsWB.getClusterId(); - } req.setRequestData(requestData); req.send(); if (partialPipeline.length) { @@ -911,10 +907,6 @@ qx.Class.define("osparc.desktop.StudyEditor", { this.getStudy().stopStudy(); this.__closeStudy(); } - const clusterMiniView = this.__workbenchView.getStartStopButtons().getChildControl("cluster-mini-view"); - if (clusterMiniView) { - clusterMiniView.setClusterId(null); - } osparc.utils.Utils.closeHangingWindows(); }, diff --git a/services/static-webserver/client/source/class/osparc/desktop/preferences/Preferences.js b/services/static-webserver/client/source/class/osparc/desktop/preferences/Preferences.js index d04b96ceeea..90616bfd3e4 100644 --- a/services/static-webserver/client/source/class/osparc/desktop/preferences/Preferences.js +++ b/services/static-webserver/client/source/class/osparc/desktop/preferences/Preferences.js @@ -29,9 +29,6 @@ qx.Class.define("osparc.desktop.preferences.Preferences", { if (osparc.data.Permissions.getInstance().canDo("user.tag")) { this.__addTagsPage(); } - if (osparc.product.Utils.showClusters()) { - this.__addClustersPage(); - } }, members: { @@ -63,24 +60,5 @@ qx.Class.define("osparc.desktop.preferences.Preferences", { const page = this.addTab(title, iconSrc, tagsPage); osparc.utils.Utils.setIdToWidget(page.getChildControl("button"), "preferencesTagsTabBtn"); }, - - __addClustersPage: function() { - const title = this.tr("Clusters"); - const iconSrc = "@FontAwesome5Solid/server/24"; - const clustersPage = new osparc.desktop.preferences.pages.ClustersPage(); - const page = this.addTab(title, iconSrc, clustersPage); - const clustersBtn = page.getChildControl("button"); - clustersBtn.exclude(); - const isDisabled = osparc.utils.DisabledPlugins.isClustersDisabled(); - if (isDisabled === false) { - osparc.data.Resources.get("clusters") - .then(clusters => { - if (clusters.length || osparc.data.Permissions.getInstance().canDo("user.clusters.create")) { - clustersBtn.show(); - } - }) - .catch(err => console.error(err)); - } - }, } }); diff --git a/services/static-webserver/client/source/class/osparc/desktop/preferences/pages/ClustersPage.js b/services/static-webserver/client/source/class/osparc/desktop/preferences/pages/ClustersPage.js deleted file mode 100644 index d5d7e6ba6dc..00000000000 --- a/services/static-webserver/client/source/class/osparc/desktop/preferences/pages/ClustersPage.js +++ /dev/null @@ -1,563 +0,0 @@ -/* ************************************************************************ - - osparc - the simcore frontend - - https://osparc.io - - Copyright: - 2021 IT'IS Foundation, https://itis.swiss - - License: - MIT: https://opensource.org/licenses/MIT - - Authors: - * Odei Maiz (odeimaiz) - -************************************************************************ */ - -/** - * Clusters and members in preferences dialog - * - */ - -qx.Class.define("osparc.desktop.preferences.pages.ClustersPage", { - extend: qx.ui.core.Widget, - - construct: function() { - this.base(arguments); - - this._setLayout(new qx.ui.layout.VBox(15)); - - const buttonsLayout = new qx.ui.container.Composite(new qx.ui.layout.HBox(10).set({ - alignX: "center" - })); - if (osparc.data.Permissions.getInstance().canDo("user.clusters.create")) { - buttonsLayout.add(this.__getCreateClusterButton()); - } - buttonsLayout.add(this.__getShowClustersDetailsButton()); - this._add(buttonsLayout); - this._add(this.__getClustersSection()); - this._add(this.__getOrgsAndMembersSection(), { - flex: 1 - }); - - this.__reloadClusters(); - }, - - members: { - __currentCluster: null, - __clustersModel: null, - __clustersList: null, - __selectOrgMemberLayout: null, - __organizationsAndMembers: null, - __membersArrayModel: null, - - __getCreateClusterButton: function() { - const createClusterBtn = new qx.ui.form.Button().set({ - appearance: "strong-button", - label: this.tr("New Cluster"), - icon: "@FontAwesome5Solid/plus/14", - allowGrowX: false - }); - createClusterBtn.addListener("execute", function() { - const newCluster = true; - const clusterEditor = new osparc.editor.ClusterEditor(newCluster); - const title = this.tr("Cluster Details Editor"); - const win = osparc.ui.window.Window.popUpInWindow(clusterEditor, title, 400, 260); - clusterEditor.addListener("createCluster", () => { - this.__createCluster(win, clusterEditor.getChildControl("create"), clusterEditor); - }); - clusterEditor.addListener("cancel", () => win.close()); - }, this); - return createClusterBtn; - }, - - __getShowClustersDetailsButton: function() { - const createClusterBtn = new qx.ui.form.Button().set({ - label: this.tr("Show Resources"), - icon: "@FontAwesome5Solid/info/14", - allowGrowX: false - }); - createClusterBtn.addListener("execute", () => osparc.cluster.Utils.popUpClustersDetails(), this); - return createClusterBtn; - }, - - __getClustersSection: function() { - const box = osparc.ui.window.TabbedView.createSectionBox(this.tr("Clusters")); - box.add(this.__getClustersList()); - box.setContentPadding(0); - return box; - }, - - __getClustersList: function() { - const clustersList = this.__clustersList = new qx.ui.form.List().set({ - decorator: "no-border", - spacing: 3, - height: 150, - width: 150 - }); - clustersList.addListener("changeSelection", e => { - this.__clusterSelected(e.getData()); - }, this); - - const clustersModel = this.__clustersModel = new qx.data.Array(); - const clustersCtrl = new qx.data.controller.List(clustersModel, clustersList, "name"); - clustersCtrl.setDelegate({ - createItem: () => new osparc.ui.list.ClusterListItem(), - bindItem: (ctrl, item, id) => { - ctrl.bindProperty("id", "model", null, item, id); - ctrl.bindProperty("id", "key", null, item, id); - ctrl.bindProperty("thumbnail", "thumbnail", null, item, id); - ctrl.bindProperty("name", "title", null, item, id); - ctrl.bindProperty("endpoint", "endpoint", null, item, id); - ctrl.bindProperty("description", "subtitle", null, item, id); - ctrl.bindProperty("accessRights", "members", null, item, id); - }, - configureItem: item => { - item.addListener("openEditCluster", e => { - const clusterId = e.getData(); - this.__openEditCluster(clusterId); - }); - - item.addListener("deleteCluster", e => { - const clusterId = e.getData(); - this.__deleteCluster(clusterId); - }); - } - }); - - return clustersList; - }, - - __getOrgsAndMembersSection: function() { - const box = osparc.ui.window.TabbedView.createSectionBox(this.tr("Organization and Members")); - box.add(this.__getOrgMembersFilter()); - box.add(this.__getMembersList(), { - flex: 1 - }); - box.setContentPadding(0); - return box; - }, - - __getOrgMembersFilter: function() { - const vBox = this.__selectOrgMemberLayout = new qx.ui.container.Composite(new qx.ui.layout.VBox()); - vBox.exclude(); - - const label = new qx.ui.basic.Label(this.tr("Select from the following list")).set({ - paddingLeft: 5 - }); - vBox.add(label); - - const hBox = new qx.ui.container.Composite(new qx.ui.layout.HBox(5).set({ - alignY: "middle" - })); - vBox.add(hBox); - - const organizationsAndMembers = this.__organizationsAndMembers = new osparc.filter.OrganizationsAndMembers("orgAndMembClusters"); - hBox.add(organizationsAndMembers, { - flex: 1 - }); - - const addCollaboratorBtn = new qx.ui.form.Button(this.tr("Add")).set({ - appearance: "strong-button", - allowGrowY: false, - enabled: false - }); - addCollaboratorBtn.addListener("execute", () => { - this.__addMembers(this.__organizationsAndMembers.getSelectedGIDs()); - }, this); - qx.event.message.Bus.getInstance().subscribe("OrgAndMembClustersFilter", () => { - const anySelected = Boolean(this.__organizationsAndMembers.getSelectedGIDs().length); - addCollaboratorBtn.setEnabled(anySelected); - }, this); - - hBox.add(addCollaboratorBtn); - - return vBox; - }, - - __getMembersList: function() { - const membersUIList = new qx.ui.form.List().set({ - decorator: "no-border", - spacing: 3, - width: 150, - backgroundColor: "background-main-2" - }); - - const membersArrayModel = this.__membersArrayModel = new qx.data.Array(); - const membersCtrl = new qx.data.controller.List(membersArrayModel, membersUIList, "name"); - membersCtrl.setDelegate({ - createItem: () => new osparc.ui.list.MemberListItem(), - bindItem: (ctrl, item, id) => { - ctrl.bindProperty("id", "model", null, item, id); - ctrl.bindProperty("id", "key", null, item, id); - ctrl.bindProperty("thumbnail", "thumbnail", null, item, id); - ctrl.bindProperty("name", "title", null, item, id); - ctrl.bindProperty("login", "subtitleMD", null, item, id); - ctrl.bindProperty("accessRights", "accessRights", null, item, id); - ctrl.bindProperty("showOptions", "showOptions", null, item, id); - }, - configureItem: item => { - item.getChildControl("thumbnail").getContentElement() - .setStyles({ - "border-radius": "16px" - }); - item.addListener("promoteToManager", e => { - const clusterMember = e.getData(); - this.__promoteToManager(clusterMember); - }); - item.addListener("removeMember", e => { - const clusterMember = e.getData(); - this.__deleteMember(clusterMember); - }); - } - }); - - return membersUIList; - }, - - __clusterSelected: function(data) { - this.__selectOrgMemberLayout.exclude(); - if (data && data.length) { - this.__currentCluster = data[0]; - } else { - this.__currentCluster = null; - } - this.__reloadClusterMembers(); - }, - - __reloadClusters: function(reloadMembers = false) { - let reloadClusterKey = null; - if (reloadMembers) { - reloadClusterKey = this.__currentCluster.getKey(); - } - - const clustersModel = this.__clustersModel; - clustersModel.removeAll(); - - const isDisabled = osparc.utils.DisabledPlugins.isClustersDisabled(); - if (isDisabled === false) { - osparc.data.Resources.get("clusters") - .then(clusters => { - clusters.forEach(cluster => clustersModel.append(qx.data.marshal.Json.createModel(cluster))); - if (reloadClusterKey) { - const selectables = this.__clustersList.getSelectables(); - selectables.forEach(selectable => { - if (selectable.getKey() === reloadClusterKey) { - this.__currentCluster = selectable; - this.__reloadClusterMembers(); - } - }); - } - }) - .catch(err => console.error(err)); - } - }, - - __reloadClusterMembers: function() { - const membersArrayModel = this.__membersArrayModel; - membersArrayModel.removeAll(); - - const clusterModel = this.__currentCluster; - if (clusterModel === null) { - return; - } - - const clusterMembers = clusterModel.getMembersList(); - - const groupsStore = osparc.store.Groups.getInstance(); - const myGid = groupsStore.getMyGroupId(); - const membersModel = clusterModel.getMembers(); - const getter = "get"+String(myGid); - const canWrite = membersModel[getter] ? membersModel[getter]().getWrite() : false; - if (canWrite) { - this.__selectOrgMemberLayout.show(); - const memberKeys = []; - clusterMembers.forEach(clusterMember => memberKeys.push(clusterMember["gid"])); - this.__organizationsAndMembers.reloadVisibleCollaborators(memberKeys); - } - - const potentialCollaborators = osparc.store.Groups.getInstance().getPotentialCollaborators(); - clusterMembers.forEach(clusterMember => { - const gid = clusterMember.getGroupId(); - if (gid in potentialCollaborators) { - const collaborator = potentialCollaborators[gid]; - const collabObj = {}; - if (collaborator["collabType"] === 1) { - // group - collabObj["thumbnail"] = collaborator.getThumbnail() || "@FontAwesome5Solid/users/24"; - collabObj["login"] = collaborator.getDescription(); - } else if (collaborator["collabType"] === 2) { - // user - collabObj["thumbnail"] = collaborator.getThumbnail() || "@FontAwesome5Solid/user/24"; - collabObj["login"] = collaborator.getLogin(); - } - if (Object.keys(collabObj).length) { - collabObj["id"] = collaborator.getGroupId(); - collabObj["name"] = collaborator.getLabel(); - collabObj["accessRights"] = clusterMember.getAccessRights(); - collabObj["showOptions"] = canWrite; - membersArrayModel.append(qx.data.marshal.Json.createModel(collabObj)); - } - } - }); - }, - - __openEditCluster: function(clusterId) { - let cluster = null; - this.__clustersModel.forEach(clusterModel => { - if (clusterModel.getId() === parseInt(clusterId)) { - cluster = clusterModel; - } - }); - if (cluster === null) { - return; - } - - const newCluster = false; - const clusterEditor = new osparc.editor.ClusterEditor(newCluster); - cluster.bind("id", clusterEditor, "cid"); - cluster.bind("name", clusterEditor, "label"); - cluster.bind("endpoint", clusterEditor, "endpoint"); - clusterEditor.setSimpleAuthenticationUsername(cluster.getAuthentication().getUsername()); - clusterEditor.setSimpleAuthenticationPassword(cluster.getAuthentication().getPassword()); - cluster.bind("description", clusterEditor, "description"); - const title = this.tr("Cluster Details Editor"); - const win = osparc.ui.window.Window.popUpInWindow(clusterEditor, title, 400, 260); - clusterEditor.addListener("updateCluster", () => { - this.__updateCluster(win, clusterEditor.getChildControl("save"), clusterEditor); - }); - clusterEditor.addListener("cancel", () => win.close()); - }, - - __deleteCluster: function(clusterId) { - let cluster = null; - this.__clustersModel.forEach(clusterModel => { - if (clusterModel.getId() === parseInt(clusterId)) { - cluster = clusterModel; - } - }); - if (cluster === null) { - return; - } - - const name = cluster.getName(); - const msg = this.tr("Are you sure you want to delete ") + name + "?"; - const win = new osparc.ui.window.Confirmation(msg).set({ - caption: this.tr("Delete Cluster"), - confirmText: this.tr("Delete"), - confirmAction: "delete" - }); - win.center(); - win.open(); - win.addListener("close", () => { - if (win.getConfirmed()) { - const params = { - url: { - "cid": clusterId - } - }; - osparc.data.Resources.fetch("clusters", "delete", params) - .then(() => { - osparc.store.Store.getInstance().reset("clusters"); - this.__reloadClusters(); - }) - .catch(err => { - osparc.FlashMessenger.getInstance().logAs(this.tr("Something went wrong deleting ") + name, "ERROR"); - console.error(err); - }) - .finally(() => { - win.close(); - }); - } - }, this); - }, - - __createCluster: function(win, button, clusterEditor) { - const clusterKey = clusterEditor.getCid(); - const name = clusterEditor.getLabel(); - const endpoint = clusterEditor.getEndpoint(); - const simpleAuthenticationUsername = clusterEditor.getSimpleAuthenticationUsername(); - const simpleAuthenticationPassword = clusterEditor.getSimpleAuthenticationPassword(); - const description = clusterEditor.getDescription(); - const params = { - url: { - "cid": clusterKey - }, - data: { - "name": name, - "endpoint": endpoint, - "authentication": { - "type": "simple", - "username": simpleAuthenticationUsername, - "password": simpleAuthenticationPassword - }, - "description": description, - "type": "AWS" - } - }; - osparc.data.Resources.fetch("clusters", "post", params) - .then(() => { - osparc.FlashMessenger.getInstance().logAs(name + this.tr(" successfully created")); - button.setFetching(false); - osparc.store.Store.getInstance().reset("clusters"); - this.__reloadClusters(); - }) - .catch(err => { - osparc.FlashMessenger.getInstance().logAs(this.tr("Something went wrong creating ") + name, "ERROR"); - button.setFetching(false); - console.error(err); - }) - .finally(() => { - win.close(); - }); - }, - - __updateCluster: function(win, button, clusterEditor) { - const clusterId = clusterEditor.getCid(); - const name = clusterEditor.getLabel(); - const endpoint = clusterEditor.getEndpoint(); - const authenticationType = "simple"; - const simpleAuthenticationUsername = clusterEditor.getSimpleAuthenticationUsername(); - const simpleAuthenticationPassword = clusterEditor.getSimpleAuthenticationPassword(); - const description = clusterEditor.getDescription(); - const params = { - url: { - "cid": clusterId - }, - data: { - "name": name, - "endpoint": endpoint, - "authentication": { - "type": authenticationType, - "username": simpleAuthenticationUsername, - "password": simpleAuthenticationPassword - }, - "description": description, - "type": "AWS" - } - }; - osparc.data.Resources.fetch("clusters", "patch", params) - .then(() => { - osparc.FlashMessenger.getInstance().logAs(name + this.tr(" successfully edited")); - button.setFetching(false); - win.close(); - osparc.store.Store.getInstance().reset("clusters"); - this.__reloadClusters(); - }) - .catch(err => { - osparc.FlashMessenger.getInstance().logAs(this.tr("Something went wrong editing ") + name, "ERROR"); - button.setFetching(false); - console.error(err); - }); - }, - - __addMembers: function(gids) { - if (this.__currentCluster === null) { - return; - } - - const accessRights = JSON.parse(qx.util.Serializer.toJson(this.__currentCluster.getMembers())); - gids.forEach(gid => { - if (gid in accessRights) { - return; - } - - accessRights[gid] = { - "read": true, - "write": false, - "delete": false - }; - }); - - const params = { - url: { - "cid": this.__currentCluster.getKey() - }, - data: { - "accessRights": accessRights - } - }; - osparc.data.Resources.fetch("clusters", "patch", params) - .then(() => { - osparc.FlashMessenger.getInstance().logAs(this.tr("Cluster successfully shared")); - osparc.store.Store.getInstance().reset("clusters"); - this.__reloadClusters(true); - }) - .catch(err => { - osparc.FlashMessenger.getInstance().logAs(this.tr("Something went wrong sharing the Cluster"), "ERROR"); - console.error(err); - }); - }, - - __promoteToManager: function(clusterMember) { - if (this.__currentCluster === null) { - return; - } - - const accessRights = JSON.parse(qx.util.Serializer.toJson(this.__currentCluster.getMembers())); - if (!(clusterMember["key"] in accessRights)) { - return; - } - - accessRights[clusterMember["key"]] = { - "read": true, - "write": true, - "delete": false - }; - const params = { - url: { - "cid": this.__currentCluster.getKey() - }, - data: { - "accessRights": accessRights - } - }; - osparc.data.Resources.fetch("clusters", "patch", params) - .then(() => { - osparc.FlashMessenger.getInstance().logAs(clusterMember["name"] + this.tr(" successfully promoted")); - osparc.store.Store.getInstance().reset("clusters"); - this.__reloadClusters(true); - }) - .catch(err => { - osparc.FlashMessenger.getInstance().logAs(this.tr("Something went wrong promoting ") + clusterMember["name"], "ERROR"); - console.error(err); - }); - }, - - __deleteMember: function(clusterMember) { - if (this.__currentCluster === null) { - return; - } - - const accessRights = JSON.parse(qx.util.Serializer.toJson(this.__currentCluster.getMembers())); - if (!(clusterMember["key"] in accessRights)) { - return; - } - - accessRights[clusterMember["key"]] = { - "read": false, - "write": false, - "delete": false - }; - const params = { - url: { - "cid": this.__currentCluster.getKey() - }, - data: { - "accessRights": accessRights - } - }; - osparc.data.Resources.fetch("clusters", "patch", params) - .then(() => { - osparc.FlashMessenger.getInstance().logAs(clusterMember["name"] + this.tr(" successfully removed")); - osparc.store.Store.getInstance().reset("clusters"); - this.__reloadClusters(true); - }) - .catch(err => { - osparc.FlashMessenger.getInstance().logAs(this.tr("Something went wrong removing ") + clusterMember["name"], "ERROR"); - console.error(err); - }); - } - } -}); diff --git a/services/static-webserver/client/source/class/osparc/editor/ClusterEditor.js b/services/static-webserver/client/source/class/osparc/editor/ClusterEditor.js deleted file mode 100644 index 8418d2494d9..00000000000 --- a/services/static-webserver/client/source/class/osparc/editor/ClusterEditor.js +++ /dev/null @@ -1,262 +0,0 @@ -/* ************************************************************************ - - osparc - the simcore frontend - - https://osparc.io - - Copyright: - 2021 IT'IS Foundation, https://itis.swiss - - License: - MIT: https://opensource.org/licenses/MIT - - Authors: - * Odei Maiz (odeimaiz) - -************************************************************************ */ - -qx.Class.define("osparc.editor.ClusterEditor", { - extend: qx.ui.core.Widget, - - construct: function(newCluster = true) { - this.base(arguments); - this._setLayout(new qx.ui.layout.VBox(8)); - - this.__newCluster = newCluster; - - const manager = this.__validator = new qx.ui.form.validation.Manager(); - const title = this.getChildControl("title"); - title.setRequired(true); - manager.add(title); - const endpoint = this.getChildControl("endpoint"); - endpoint.setRequired(true); - manager.add(endpoint); - const username = this.getChildControl("simpleAuthenticationUsername"); - username.setRequired(true); - manager.add(username); - const pass = this.getChildControl("simpleAuthenticationPassword"); - pass.setRequired(true); - manager.add(pass); - this._createChildControlImpl("description"); - this._createChildControlImpl("test-layout"); - newCluster ? this._createChildControlImpl("create") : this._createChildControlImpl("save"); - }, - - properties: { - cid: { - check: "Number", - init: 0, - nullable: false, - event: "changeCid" - }, - - label: { - check: "String", - init: "", - nullable: false, - event: "changeLabel" - }, - - endpoint: { - check: "String", - init: "", - nullable: false, - event: "changeEndpoint" - }, - - simpleAuthenticationUsername: { - check: "String", - init: "", - nullable: false, - event: "changeSimpleAuthenticationUsername" - }, - - simpleAuthenticationPassword: { - check: "String", - init: "", - nullable: false, - event: "changeSimpleAuthenticationPassword" - }, - - description: { - check: "String", - init: "", - nullable: false, - event: "changeDescription" - } - }, - - events: { - "createCluster": "qx.event.type.Event", - "updateCluster": "qx.event.type.Event", - "cancel": "qx.event.type.Event" - }, - - members: { - __validator: null, - __newCluster: null, - - _createChildControlImpl: function(id) { - let control; - switch (id) { - case "title": - control = new qx.ui.form.TextField().set({ - font: "text-14", - backgroundColor: "background-main", - placeholder: this.tr("Title") - }); - this.bind("label", control, "value"); - control.bind("value", this, "label"); - this._add(control); - break; - case "endpointLayout": - control = new qx.ui.container.Composite(new qx.ui.layout.HBox(5)); - this._add(control); - break; - case "endpoint": { - const endpointLayout = this.getChildControl("endpointLayout"); - control = new qx.ui.form.TextField().set({ - font: "text-14", - backgroundColor: "background-main", - placeholder: this.tr("Endpoint") - }); - this.bind("endpoint", control, "value"); - control.bind("value", this, "endpoint"); - control.setRequired(true); - endpointLayout.add(control, { - flex: 1 - }); - break; - } - case "simpleAuthenticationUsername": { - const endpointLayout = this.getChildControl("endpointLayout"); - control = new qx.ui.form.TextField().set({ - font: "text-14", - backgroundColor: "background-main", - placeholder: this.tr("Username") - }); - control.getContentElement().setAttribute("autocomplete", "off"); - this.bind("simpleAuthenticationUsername", control, "value"); - control.bind("value", this, "simpleAuthenticationUsername"); - control.setRequired(true); - endpointLayout.add(control); - break; - } - case "simpleAuthenticationPassword": { - const endpointLayout = this.getChildControl("endpointLayout"); - control = new osparc.ui.form.PasswordField().set({ - font: "text-14", - backgroundColor: "background-main", - placeholder: this.tr("Password") - }); - control.getContentElement().setAttribute("autocomplete", "off"); - this.bind("simpleAuthenticationPassword", control, "value"); - control.bind("value", this, "simpleAuthenticationPassword"); - control.setRequired(true); - endpointLayout.add(control); - break; - } - case "description": - control = new qx.ui.form.TextArea().set({ - font: "text-14", - placeholder: this.tr("Description"), - autoSize: true, - minHeight: 70, - maxHeight: 140 - }); - this.bind("description", control, "value"); - control.bind("value", this, "description"); - this._add(control); - break; - case "test-layout": { - control = this.__getTestLayout(); - this._add(control); - break; - } - case "buttonsLayout": { - control = new qx.ui.container.Composite(new qx.ui.layout.HBox(8).set({ - alignX: "right" - })); - const cancelButton = new qx.ui.form.Button(this.tr("Cancel")).set({ - appearance: "form-button-text" - }); - cancelButton.addListener("execute", () => this.fireEvent("cancel"), this); - control.add(cancelButton); - this._add(control); - break; - } - case "create": { - const buttons = this.getChildControl("buttonsLayout"); - control = new osparc.ui.form.FetchButton(this.tr("Create")).set({ - appearance: "form-button" - }); - control.addListener("execute", () => { - if (this.__validator.validate()) { - control.setFetching(true); - this.fireEvent("createCluster"); - } - }, this); - buttons.add(control); - break; - } - case "save": { - const buttons = this.getChildControl("buttonsLayout"); - control = new osparc.ui.form.FetchButton(this.tr("Save")).set({ - appearance: "form-button" - }); - control.addListener("execute", () => { - if (this.__validator.validate()) { - control.setFetching(true); - this.fireEvent("updateCluster"); - } - }, this); - buttons.add(control); - break; - } - } - - return control || this.base(arguments, id); - }, - - __getTestLayout: function() { - const testLayout = new qx.ui.container.Composite(new qx.ui.layout.HBox(8)); - const testButton = new osparc.ui.form.FetchButton(this.tr("Test")); - testLayout.add(testButton); - - const testResult = new qx.ui.basic.Image("@FontAwesome5Solid/lightbulb/16"); - testLayout.add(testResult); - - testButton.addListener("execute", () => { - if (this.__validator.validate()) { - testButton.setFetching(true); - const endpoint = this.__newCluster ? "pingWCredentials" : "ping"; - const params = {}; - if (this.__newCluster) { - params["data"] = { - "endpoint": this.getEndpoint(), - "authentication": { - "type": "simple", - "username": this.getSimpleAuthenticationUsername(), - "password": this.getSimpleAuthenticationPassword() - } - }; - } else { - params["url"] = { - cid: this.getCid() - }; - } - osparc.data.Resources.fetch("clusters", endpoint, params) - .then(() => testResult.setTextColor("ready-green")) - .catch(err => { - testResult.setTextColor("failed-red"); - const msg = err.message || this.tr("Test failed"); - osparc.FlashMessenger.getInstance().logAs(msg, "Error"); - }) - .finally(() => testButton.setFetching(false)); - } - }, this); - - return testLayout; - } - } -}); diff --git a/services/static-webserver/client/source/class/osparc/navigation/UserMenu.js b/services/static-webserver/client/source/class/osparc/navigation/UserMenu.js index dc029edbba9..160ab65ae29 100644 --- a/services/static-webserver/client/source/class/osparc/navigation/UserMenu.js +++ b/services/static-webserver/client/source/class/osparc/navigation/UserMenu.js @@ -91,18 +91,6 @@ qx.Class.define("osparc.navigation.UserMenu", { control.addListener("execute", () => osparc.desktop.organizations.OrganizationsWindow.openWindow(), this); this.add(control); break; - case "clusters": - control = new qx.ui.menu.Button(this.tr("Clusters")); - control.exclude(); - if (osparc.product.Utils.showClusters()) { - const isDisabled = osparc.utils.DisabledPlugins.isClustersDisabled(); - if (isDisabled === false) { - control.show(); - } - } - control.addListener("execute", () => osparc.cluster.Utils.popUpClustersDetails(), this); - this.add(control); - break; case "market": control = new qx.ui.menu.Button(this.tr("Market")); control.addListener("execute", () => osparc.vipMarket.MarketWindow.openWindow()); @@ -127,8 +115,8 @@ qx.Class.define("osparc.navigation.UserMenu", { case "license": control = new qx.ui.menu.Button(this.tr("License")); osparc.utils.Utils.setIdToWidget(control, "userMenuLicenseBtn"); - osparc.store.Support.getLicenseURL() - .then(licenseURL => control.addListener("execute", () => window.open(licenseURL))); + const licenseURL = osparc.store.Support.getLicenseURL(); + control.addListener("execute", () => window.open(licenseURL)); this.add(control); break; case "tip-lite-button": @@ -175,7 +163,6 @@ qx.Class.define("osparc.navigation.UserMenu", { } this.getChildControl("preferences"); this.getChildControl("organizations"); - this.getChildControl("clusters"); } this.addSeparator(); @@ -231,7 +218,6 @@ qx.Class.define("osparc.navigation.UserMenu", { } this.getChildControl("preferences"); this.getChildControl("organizations"); - this.getChildControl("clusters"); } this.addSeparator(); @@ -257,7 +243,7 @@ qx.Class.define("osparc.navigation.UserMenu", { } this.getChildControl("about"); - if (!osparc.product.Utils.isProduct("osparc")) { + if (osparc.product.Utils.showAboutProduct()) { this.getChildControl("about-product"); } this.getChildControl("license"); diff --git a/services/static-webserver/client/source/class/osparc/product/AboutProduct.js b/services/static-webserver/client/source/class/osparc/product/AboutProduct.js index 97e18eeadfd..c0760d01082 100644 --- a/services/static-webserver/client/source/class/osparc/product/AboutProduct.js +++ b/services/static-webserver/client/source/class/osparc/product/AboutProduct.js @@ -57,6 +57,10 @@ qx.Class.define("osparc.product.AboutProduct", { case "s4llite": this.__buildS4LLiteLayout(); break; + case "tis": + case "tiplite": + this.__buildTIPLayout(); + break; default: { const noInfoText = this.tr("Information not available"); const noInfoLabel = osparc.product.quickStart.Utils.createLabel(noInfoText); @@ -67,43 +71,43 @@ qx.Class.define("osparc.product.AboutProduct", { }, __buildS4LLayout: function() { - osparc.store.Support.getLicenseURL() - .then(licenseUrl => { - const text = this.tr(` - sim4life.io is a native implementation of the most advanced simulation platform, Sim4Life, in the cloud. \ - The platform empowers users to simulate, analyze, and predict complex, multifaceted, and dynamic biological interactions within the full anatomical complexity of the human body. \ - It provides the ability to set up and run complex simulations directly within any browser, utilizing cloud technology. -

- sim4life.io makes use of technologies developed by our research partner for the o2S2PARC platform, the IT’IS Foundation, and co-funded by the U.S. National Institutes of Health’s SPARC initiative.\ -

- For more information about Sim4Life, please visit ${osparc.utils.Utils.createHTMLLink("sim4life.swiss", "https://sim4life.swiss/")}. -

- To review license agreements, click ${osparc.utils.Utils.createHTMLLink("here", licenseUrl)}. - `); - - const label = osparc.product.quickStart.Utils.createLabel(text); - this.add(label); - }); + const licenseUrl = osparc.store.Support.getLicenseURL(); + const text = this.tr(` + sim4life.io is a native implementation of the most advanced simulation platform, Sim4Life, in the cloud. \ + The platform empowers users to simulate, analyze, and predict complex, multifaceted, and dynamic biological interactions within the full anatomical complexity of the human body. \ + It provides the ability to set up and run complex simulations directly within any browser, utilizing cloud technology. +

+ sim4life.io makes use of technologies developed by our research partner for the o2S2PARC platform, the IT’IS Foundation, and co-funded by the U.S. National Institutes of Health’s SPARC initiative.\ +

+ For more information about Sim4Life, please visit ${osparc.utils.Utils.createHTMLLink("sim4life.swiss", "https://sim4life.swiss/")}. +

+ To review license agreements, click ${osparc.utils.Utils.createHTMLLink("here", licenseUrl)}. +

+ Send us an email ${this.__getMailTo()} + `); + + const label = osparc.product.quickStart.Utils.createLabel(text); + this.add(label); }, __buildS4LAcademicLayout: function() { - osparc.store.Support.getLicenseURL() - .then(licenseUrl => { - const text = this.tr(` - sim4life.science is a native implementation of the most advanced simulation platform, Sim4Life, in the cloud. \ - The platform empowers users to simulate, analyze, and predict complex, multifaceted, and dynamic biological interactions within the full anatomical complexity of the human body. \ - It provides the ability to set up and run complex simulations directly within any browser, utilizing cloud technology. -

- sim4life.science makes use of technologies developed by our research partner for the o2S2PARC platform, the IT’IS Foundation, and co-funded by the U.S. National Institutes of Health’s SPARC initiative.\ -

- For more information about Sim4Life, please visit ${osparc.utils.Utils.createHTMLLink("sim4life.swiss", "href='https://sim4life.swiss/")}. -

- To review license agreements, click ${osparc.utils.Utils.createHTMLLink("here", licenseUrl)}. - `); - - const label = osparc.product.quickStart.Utils.createLabel(text); - this.add(label); - }); + const licenseUrl = osparc.store.Support.getLicenseURL(); + const text = this.tr(` + sim4life.science is a native implementation of the most advanced simulation platform, Sim4Life, in the cloud. \ + The platform empowers users to simulate, analyze, and predict complex, multifaceted, and dynamic biological interactions within the full anatomical complexity of the human body. \ + It provides the ability to set up and run complex simulations directly within any browser, utilizing cloud technology. +

+ sim4life.science makes use of technologies developed by our research partner for the o2S2PARC platform, the IT’IS Foundation, and co-funded by the U.S. National Institutes of Health’s SPARC initiative.\ +

+ For more information about Sim4Life, please visit ${osparc.utils.Utils.createHTMLLink("sim4life.swiss", "href='https://sim4life.swiss/")}. +

+ To review license agreements, click ${osparc.utils.Utils.createHTMLLink("here", licenseUrl)}. +

+ Send us an email ${this.__getMailTo()} + `); + + const label = osparc.product.quickStart.Utils.createLabel(text); + this.add(label); }, __buildS4LLiteLayout: function() { @@ -117,10 +121,13 @@ qx.Class.define("osparc.product.AboutProduct", { const moreInfoUrl = "https://zmt.swiss/"; const moreInfoText = `For more information about Sim4Life.lite, visit ${osparc.utils.Utils.createHTMLLink("our website", moreInfoUrl)}.`; + const emailText = `Send us an email ${this.__getMailTo()}`; + [ introText, licenseText, - moreInfoText + moreInfoText, + emailText, ].forEach(text => { const label = osparc.product.quickStart.Utils.createLabel(text); this.add(label); @@ -129,6 +136,35 @@ qx.Class.define("osparc.product.AboutProduct", { this.__addCopyright(); }, + __buildTIPLayout: function() { + const licenseUrl = osparc.store.Support.getLicenseURL(); + const text = this.tr(` + TIP (TI Planning Tool) is an innovative online platform designed to optimize targeted neurostimulation protocols using \ + temporal interference (TI) stimulation. Developed by IT'IS Foundation, TIP simplifies the complex process of planning deep \ + brain stimulation. +

+ Powered by o2S2PARC technology, TIP utilizes sophisticated electromagnetic simulations, detailed anatomical head models, \ + and automated optimization to generate comprehensive reports with quantitative and visual information. This tool is \ + invaluable for neuroscientists and brain stimulation experts, especially those with limited computational modeling experience, \ + enabling them to create effective and safe stimulation protocols for their research. \ +

+ For more information about TIP, please visit ${osparc.utils.Utils.createHTMLLink("itis.swiss", "https://itis.swiss/tools-and-systems/ti-planning/overview")}. +

+ To review license agreements, click ${osparc.utils.Utils.createHTMLLink("here", licenseUrl)}. +

+ Send us an email ${this.__getMailTo()} + `); + + const label = osparc.product.quickStart.Utils.createLabel(text); + this.add(label); + }, + + __getMailTo: function() { + const supportEmail = osparc.store.VendorInfo.getInstance().getSupportEmail(); + const productName = osparc.store.StaticInfo.getInstance().getDisplayName(); + return osparc.store.Support.mailToText(supportEmail, "Support " + productName, false); + }, + __addCopyright: function() { const copyrightLink = new osparc.ui.basic.LinkLabel().set({ font: "link-label-14" @@ -141,6 +177,6 @@ qx.Class.define("osparc.product.AboutProduct", { }); } this.add(copyrightLink); - } + }, } }); diff --git a/services/static-webserver/client/source/class/osparc/product/Utils.js b/services/static-webserver/client/source/class/osparc/product/Utils.js index 45d3b7de661..31501afeb34 100644 --- a/services/static-webserver/client/source/class/osparc/product/Utils.js +++ b/services/static-webserver/client/source/class/osparc/product/Utils.js @@ -225,7 +225,12 @@ qx.Class.define("osparc.product.Utils", { }, showAboutProduct: function() { - return (this.isS4LProduct() || this.isProduct("s4llite")); + return ( + this.isS4LProduct() || + this.isProduct("s4llite") || + this.isProduct("tis") || + this.isProduct("tiplite") + ); }, showPreferencesTokens: function() { @@ -242,13 +247,6 @@ qx.Class.define("osparc.product.Utils", { return true; }, - showClusters: function() { - if (this.isProduct("s4llite") || this.isProduct("tis") || this.isProduct("tiplite")) { - return false; - } - return true; - }, - showDisableServiceAutoStart: function() { if (this.isProduct("s4llite")) { return false; diff --git a/services/static-webserver/client/source/class/osparc/product/quickStart/s4l/Welcome.js b/services/static-webserver/client/source/class/osparc/product/quickStart/s4l/Welcome.js index bec6916504e..518416f1373 100644 --- a/services/static-webserver/client/source/class/osparc/product/quickStart/s4l/Welcome.js +++ b/services/static-webserver/client/source/class/osparc/product/quickStart/s4l/Welcome.js @@ -125,12 +125,12 @@ qx.Class.define("osparc.product.quickStart.s4l.Welcome", { textAlign: "center", rich : true }); - osparc.store.Support.getLicenseURL() - .then(licenseUrl => { - const link = osparc.utils.Utils.createHTMLLink("Licensing", licenseUrl); - licenseLink.setValue(link); - licenseLink.show(); - }); + const licenseUrl = osparc.store.Support.getLicenseURL(); + if (licenseUrl) { + const link = osparc.utils.Utils.createHTMLLink("Licensing", licenseUrl); + licenseLink.setValue(link); + licenseLink.show(); + } footerItems.push(licenseLink); const dontShowCB = osparc.product.quickStart.Utils.createDontShowAgain("s4lDontShowQuickStart"); diff --git a/services/static-webserver/client/source/class/osparc/product/quickStart/s4lacad/Welcome.js b/services/static-webserver/client/source/class/osparc/product/quickStart/s4lacad/Welcome.js index ed8a78dbdb6..c81b9813d51 100644 --- a/services/static-webserver/client/source/class/osparc/product/quickStart/s4lacad/Welcome.js +++ b/services/static-webserver/client/source/class/osparc/product/quickStart/s4lacad/Welcome.js @@ -125,12 +125,12 @@ qx.Class.define("osparc.product.quickStart.s4lacad.Welcome", { textAlign: "center", rich : true }); - osparc.store.Support.getLicenseURL() - .then(licenseUrl => { - const link = osparc.utils.Utils.createHTMLLink("Licensing", licenseUrl); - licenseLink.setValue(link); - licenseLink.show(); - }); + const licenseUrl = osparc.store.Support.getLicenseURL(); + if (licenseUrl) { + const link = osparc.utils.Utils.createHTMLLink("Licensing", licenseUrl); + licenseLink.setValue(link); + licenseLink.show(); + } footerItems.push(licenseLink); const dontShowCB = osparc.product.quickStart.Utils.createDontShowAgain("s4lDontShowQuickStart"); diff --git a/services/static-webserver/client/source/class/osparc/product/quickStart/s4llite/Slides.js b/services/static-webserver/client/source/class/osparc/product/quickStart/s4llite/Slides.js index 3d3bcf6e048..d7726632407 100644 --- a/services/static-webserver/client/source/class/osparc/product/quickStart/s4llite/Slides.js +++ b/services/static-webserver/client/source/class/osparc/product/quickStart/s4llite/Slides.js @@ -53,12 +53,12 @@ qx.Class.define("osparc.product.quickStart.s4llite.Slides", { textAlign: "center", rich : true }); - osparc.store.Support.getLicenseURL() - .then(licenseUrl => { - const link = osparc.utils.Utils.createHTMLLink("Licensing", licenseUrl); - licenseLink.setValue(link); - licenseLink.show(); - }); + const licenseUrl = osparc.store.Support.getLicenseURL(); + if (licenseUrl) { + const link = osparc.utils.Utils.createHTMLLink("Licensing", licenseUrl); + licenseLink.setValue(link); + licenseLink.show(); + } footerItems.push(licenseLink); const dontShowCB = osparc.product.quickStart.Utils.createDontShowAgain("s4lliteDontShowQuickStart"); diff --git a/services/static-webserver/client/source/class/osparc/store/Store.js b/services/static-webserver/client/source/class/osparc/store/Store.js index 7b94b336852..cd82975131d 100644 --- a/services/static-webserver/client/source/class/osparc/store/Store.js +++ b/services/static-webserver/client/source/class/osparc/store/Store.js @@ -174,11 +174,6 @@ qx.Class.define("osparc.store.Store", { check: "Array", init: [] }, - clusters: { - check: "Array", - init: [], - event: "changeClusters" - }, services: { check: "Array", init: [] diff --git a/services/static-webserver/client/source/class/osparc/store/Support.js b/services/static-webserver/client/source/class/osparc/store/Support.js index e79de4d1a27..1352ef2eac9 100644 --- a/services/static-webserver/client/source/class/osparc/store/Support.js +++ b/services/static-webserver/client/source/class/osparc/store/Support.js @@ -4,18 +4,15 @@ qx.Class.define("osparc.store.Support", { statics: { getLicenseURL: function() { - return new Promise(resolve => { - const vendor = osparc.store.VendorInfo.getInstance().getVendor(); - if (vendor) { - if ("license_url" in vendor) { - resolve(vendor["license_url"]); - } else if ("url" in vendor) { - resolve(vendor["url"]); - } else { - resolve(""); - } + const vendor = osparc.store.VendorInfo.getInstance().getVendor(); + if (vendor) { + if ("license_url" in vendor) { + return vendor["license_url"]; + } else if ("url" in vendor) { + return vendor["url"]; } - }); + } + return ""; }, getManuals: function() { @@ -135,9 +132,12 @@ qx.Class.define("osparc.store.Support", { }); }, - mailToText: function(email, subject) { + mailToText: function(email, subject, centered = true) { const color = qx.theme.manager.Color.getInstance().resolve("text"); - const textLink = `
${email}  
`; + let textLink = `${email}  `; + if (centered) { + textLink = `
${textLink}
` + } return textLink; }, diff --git a/services/static-webserver/client/source/class/osparc/ui/list/ClusterListItem.js b/services/static-webserver/client/source/class/osparc/ui/list/ClusterListItem.js deleted file mode 100644 index ff24bb6f6b0..00000000000 --- a/services/static-webserver/client/source/class/osparc/ui/list/ClusterListItem.js +++ /dev/null @@ -1,150 +0,0 @@ -/* ************************************************************************ - - osparc - the simcore frontend - - https://osparc.io - - Copyright: - 2021 IT'IS Foundation, https://itis.swiss - - License: - MIT: https://opensource.org/licenses/MIT - - Authors: - * Odei Maiz (odeimaiz) - -************************************************************************ */ - -qx.Class.define("osparc.ui.list.ClusterListItem", { - extend: osparc.ui.list.ListItem, - - construct: function() { - this.base(arguments); - }, - - properties: { - members: { - check: "Object", - nullable: false, - apply: "__applyMembers", - event: "changeMembers" - }, - - accessRights: { - check: "Object", - nullable: false, - apply: "__applyAccessRights", - event: "changeAccessRights" - }, - - endpoint: { - check: "String", - nullable: false, - event: "changeEndpoint" - }, - - simpleAuthenticationUsername: { - check: "String", - init: "", - nullable: false, - event: "changeSimpleAuthenticationUsername" - }, - - simpleAuthenticationPassword: { - check: "String", - init: "", - nullable: false, - event: "changeSimpleAuthenticationPassword" - } - }, - - events: { - "openEditCluster": "qx.event.type.Data", - "deleteCluster": "qx.event.type.Data" - }, - - members: { - _createChildControlImpl: function(id) { - let control; - switch (id) { - case "options": { - const iconSize = 25; - control = new qx.ui.form.MenuButton().set({ - maxWidth: iconSize, - maxHeight: iconSize, - alignX: "center", - alignY: "middle", - icon: "@FontAwesome5Solid/ellipsis-v/"+(iconSize-11), - focusable: false - }); - this._add(control, { - row: 0, - column: 3, - rowSpan: 2 - }); - break; - } - } - - return control || this.base(arguments, id); - }, - - __applyMembers: function(members) { - if (members === null) { - return; - } - - const nMembers = this.getMembersList().length + this.tr(" members"); - this.setContact(nMembers); - - const myGid = osparc.auth.Data.getInstance().getGroupId(); - if ("get"+myGid in members) { - this.setAccessRights(members.get(myGid)); - } - }, - - getMembersList: function() { - const membersList = []; - const members = this.getMembers(); - const memberGids = members.basename.split("|"); - memberGids.forEach(memberGid => { - const member = members.get(memberGid); - member.gid = memberGid; - membersList.push(member); - }); - return membersList; - }, - - __applyAccessRights: function(accessRights) { - if (accessRights === null) { - return; - } - - if (accessRights.getDelete()) { - const optionsMenu = this.getChildControl("options"); - const menu = this.__getOptionsMenu(); - optionsMenu.setMenu(menu); - } - }, - - __getOptionsMenu: function() { - const menu = new qx.ui.menu.Menu().set({ - position: "bottom-right" - }); - - const editClusterButton = new qx.ui.menu.Button(this.tr("Edit details")); - editClusterButton.addListener("execute", () => { - this.fireDataEvent("openEditCluster", this.getKey()); - }); - menu.add(editClusterButton); - - const deleteClusterButton = new qx.ui.menu.Button(this.tr("Delete")); - deleteClusterButton.addListener("execute", () => { - this.fireDataEvent("deleteCluster", this.getKey()); - }); - menu.add(deleteClusterButton); - - return menu; - } - } -}); diff --git a/services/static-webserver/client/source/class/osparc/utils/DisabledPlugins.js b/services/static-webserver/client/source/class/osparc/utils/DisabledPlugins.js index e9e955b1ba9..2b5d9a65995 100644 --- a/services/static-webserver/client/source/class/osparc/utils/DisabledPlugins.js +++ b/services/static-webserver/client/source/class/osparc/utils/DisabledPlugins.js @@ -28,7 +28,6 @@ qx.Class.define("osparc.utils.DisabledPlugins", { SCICRUNCH: "WEBSERVER_SCICRUNCH", VERSION_CONTROL: "WEBSERVER_VERSION_CONTROL", META_MODELING: "WEBSERVER_META_MODELING", - CLUSTERS: "WEBSERVER_CLUSTERS", FOLDERS: "WEBSERVER_FOLDERS", isFoldersEnabled: function() { @@ -53,10 +52,6 @@ qx.Class.define("osparc.utils.DisabledPlugins", { return this.__isPluginDisabled(this.META_MODELING); }, - isClustersDisabled: function() { - return this.__isPluginDisabled(this.CLUSTERS); - }, - __isPluginDisabled: function(key) { const statics = osparc.store.Store.getInstance().get("statics"); if (statics) { diff --git a/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml b/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml index 9cca4bafd06..49278e0f128 100644 --- a/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml +++ b/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml @@ -2347,155 +2347,6 @@ paths: application/json: schema: $ref: '#/components/schemas/Envelope_CatalogServiceGet_' - /v0/clusters: - get: - tags: - - clusters - summary: List Clusters - operationId: list_clusters - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_list_ClusterGet__' - post: - tags: - - clusters - summary: Create Cluster - operationId: create_cluster - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ClusterCreate' - required: true - responses: - '201': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_ClusterGet_' - /v0/clusters:ping: - post: - tags: - - clusters - summary: Ping Cluster - description: Test connectivity with cluster - operationId: ping_cluster - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ClusterPing' - required: true - responses: - '204': - description: Successful Response - /v0/clusters/{cluster_id}: - get: - tags: - - clusters - summary: Get Cluster - operationId: get_cluster - parameters: - - name: cluster_id - in: path - required: true - schema: - type: integer - minimum: 0 - title: Cluster Id - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_ClusterGet_' - patch: - tags: - - clusters - summary: Update Cluster - operationId: update_cluster - parameters: - - name: cluster_id - in: path - required: true - schema: - type: integer - minimum: 0 - title: Cluster Id - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/ClusterPatch' - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_ClusterGet_' - delete: - tags: - - clusters - summary: Delete Cluster - operationId: delete_cluster - parameters: - - name: cluster_id - in: path - required: true - schema: - type: integer - minimum: 0 - title: Cluster Id - responses: - '204': - description: Successful Response - /v0/clusters/{cluster_id}/details: - get: - tags: - - clusters - summary: Get Cluster Details - operationId: get_cluster_details - parameters: - - name: cluster_id - in: path - required: true - schema: - type: integer - minimum: 0 - title: Cluster Id - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_ClusterDetails_' - /v0/clusters/{cluster_id}:ping: - post: - tags: - - clusters - summary: Ping Cluster Cluster Id - description: Tests connectivity with cluster - operationId: ping_cluster_cluster_id - parameters: - - name: cluster_id - in: path - required: true - schema: - type: integer - minimum: 0 - title: Cluster Id - responses: - '204': - description: Successful Response /v0/computations/{project_id}: get: tags: @@ -2517,7 +2368,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Envelope_ComputationTaskGet_' + $ref: '#/components/schemas/Envelope_ComputationGet_' /v0/computations/{project_id}:start: post: tags: @@ -2944,6 +2795,59 @@ paths: schema: $ref: '#/components/schemas/EnvelopedError' description: Service Unavailable + /v0/folders/{folder_id}/workspaces/{workspace_id}:move: + post: + tags: + - folders + - workspaces + summary: Move folder to the workspace + operationId: move_folder_to_workspace + parameters: + - name: folder_id + in: path + required: true + schema: + type: integer + exclusiveMinimum: true + title: Folder Id + minimum: 0 + - name: workspace_id + in: path + required: true + schema: + anyOf: + - type: integer + exclusiveMinimum: true + minimum: 0 + - type: 'null' + title: Workspace Id + responses: + '204': + description: Successful Response + '404': + content: + application/json: + schema: + $ref: '#/components/schemas/EnvelopedError' + description: Not Found + '403': + content: + application/json: + schema: + $ref: '#/components/schemas/EnvelopedError' + description: Forbidden + '409': + content: + application/json: + schema: + $ref: '#/components/schemas/EnvelopedError' + description: Conflict + '503': + content: + application/json: + schema: + $ref: '#/components/schemas/EnvelopedError' + description: Service Unavailable /v0/tasks: get: tags: @@ -4540,7 +4444,7 @@ paths: '403': description: ProjectInvalidRightsError '404': - description: ProjectNotFoundError, UserDefaultWalletNotFoundError + description: UserDefaultWalletNotFoundError, ProjectNotFoundError '409': description: ProjectTooManyProjectOpenedError '422': @@ -4706,13 +4610,13 @@ paths: application/json: schema: $ref: '#/components/schemas/Envelope_WalletGet_' - /v0/projects/{project_id}/workspaces/{workspace_id}: - put: + /v0/projects/{project_id}/workspaces/{workspace_id}:move: + post: tags: - projects - workspaces summary: Move project to the workspace - operationId: replace_project_workspace + operationId: move_project_to_workspace parameters: - name: project_id in: path @@ -7189,268 +7093,6 @@ components: required: - tag title: CheckpointNew - ClusterAccessRights: - properties: - read: - type: boolean - title: Read - description: allows to run pipelines on that cluster - write: - type: boolean - title: Write - description: allows to modify the cluster - delete: - type: boolean - title: Delete - description: allows to delete a cluster - additionalProperties: false - type: object - required: - - read - - write - - delete - title: ClusterAccessRights - ClusterCreate: - properties: - name: - type: string - title: Name - description: The human readable name of the cluster - description: - anyOf: - - type: string - - type: 'null' - title: Description - type: - $ref: '#/components/schemas/ClusterTypeInModel' - owner: - anyOf: - - type: integer - exclusiveMinimum: true - minimum: 0 - - type: 'null' - title: Owner - thumbnail: - anyOf: - - type: string - maxLength: 2083 - minLength: 1 - format: uri - - type: 'null' - title: Thumbnail - description: url to the image describing this cluster - endpoint: - type: string - minLength: 1 - format: uri - title: Endpoint - authentication: - oneOf: - - $ref: '#/components/schemas/SimpleAuthentication' - - $ref: '#/components/schemas/KerberosAuthentication' - - $ref: '#/components/schemas/JupyterHubTokenAuthentication' - title: Authentication - discriminator: - propertyName: type - mapping: - jupyterhub: '#/components/schemas/JupyterHubTokenAuthentication' - kerberos: '#/components/schemas/KerberosAuthentication' - simple: '#/components/schemas/SimpleAuthentication' - accessRights: - additionalProperties: - $ref: '#/components/schemas/ClusterAccessRights' - type: object - title: Accessrights - type: object - required: - - name - - type - - endpoint - - authentication - title: ClusterCreate - ClusterDetails: - properties: - scheduler: - $ref: '#/components/schemas/Scheduler' - description: This contains dask scheduler information given by the underlying - dask library - dashboardLink: - type: string - minLength: 1 - format: uri - title: Dashboardlink - description: Link to this scheduler's dashboard - type: object - required: - - scheduler - - dashboardLink - title: ClusterDetails - ClusterGet: - properties: - name: - type: string - title: Name - description: The human readable name of the cluster - description: - anyOf: - - type: string - - type: 'null' - title: Description - type: - $ref: '#/components/schemas/ClusterTypeInModel' - owner: - type: integer - exclusiveMinimum: true - title: Owner - minimum: 0 - thumbnail: - anyOf: - - type: string - maxLength: 2083 - minLength: 1 - format: uri - - type: 'null' - title: Thumbnail - description: url to the image describing this cluster - endpoint: - type: string - minLength: 1 - format: uri - title: Endpoint - authentication: - oneOf: - - $ref: '#/components/schemas/SimpleAuthentication' - - $ref: '#/components/schemas/KerberosAuthentication' - - $ref: '#/components/schemas/JupyterHubTokenAuthentication' - - $ref: '#/components/schemas/NoAuthentication' - - $ref: '#/components/schemas/TLSAuthentication' - title: Authentication - description: Dask gateway authentication - discriminator: - propertyName: type - mapping: - jupyterhub: '#/components/schemas/JupyterHubTokenAuthentication' - kerberos: '#/components/schemas/KerberosAuthentication' - none: '#/components/schemas/NoAuthentication' - simple: '#/components/schemas/SimpleAuthentication' - tls: '#/components/schemas/TLSAuthentication' - accessRights: - additionalProperties: - $ref: '#/components/schemas/ClusterAccessRights' - type: object - title: Accessrights - default: {} - id: - type: integer - minimum: 0 - title: Id - description: The cluster ID - type: object - required: - - name - - type - - owner - - endpoint - - authentication - - id - title: ClusterGet - ClusterPatch: - properties: - name: - anyOf: - - type: string - - type: 'null' - title: Name - description: - anyOf: - - type: string - - type: 'null' - title: Description - type: - anyOf: - - $ref: '#/components/schemas/ClusterTypeInModel' - - type: 'null' - owner: - anyOf: - - type: integer - exclusiveMinimum: true - minimum: 0 - - type: 'null' - title: Owner - thumbnail: - anyOf: - - type: string - maxLength: 2083 - minLength: 1 - format: uri - - type: 'null' - title: Thumbnail - endpoint: - anyOf: - - type: string - minLength: 1 - format: uri - - type: 'null' - title: Endpoint - authentication: - anyOf: - - oneOf: - - $ref: '#/components/schemas/SimpleAuthentication' - - $ref: '#/components/schemas/KerberosAuthentication' - - $ref: '#/components/schemas/JupyterHubTokenAuthentication' - discriminator: - propertyName: type - mapping: - jupyterhub: '#/components/schemas/JupyterHubTokenAuthentication' - kerberos: '#/components/schemas/KerberosAuthentication' - simple: '#/components/schemas/SimpleAuthentication' - - type: 'null' - title: Authentication - accessRights: - anyOf: - - additionalProperties: - $ref: '#/components/schemas/ClusterAccessRights' - type: object - - type: 'null' - title: Accessrights - type: object - title: ClusterPatch - ClusterPing: - properties: - endpoint: - type: string - minLength: 1 - format: uri - title: Endpoint - authentication: - oneOf: - - $ref: '#/components/schemas/SimpleAuthentication' - - $ref: '#/components/schemas/KerberosAuthentication' - - $ref: '#/components/schemas/JupyterHubTokenAuthentication' - - $ref: '#/components/schemas/NoAuthentication' - - $ref: '#/components/schemas/TLSAuthentication' - title: Authentication - description: Dask gateway authentication - discriminator: - propertyName: type - mapping: - jupyterhub: '#/components/schemas/JupyterHubTokenAuthentication' - kerberos: '#/components/schemas/KerberosAuthentication' - none: '#/components/schemas/NoAuthentication' - simple: '#/components/schemas/SimpleAuthentication' - tls: '#/components/schemas/TLSAuthentication' - type: object - required: - - endpoint - - authentication - title: ClusterPing - ClusterTypeInModel: - type: string - enum: - - AWS - - ON_PREMISE - - ON_DEMAND - title: ClusterTypeInModel CodePageParams: properties: message: @@ -7499,17 +7141,89 @@ components: required: - version title: CompatibleService + ComputationGet: + properties: + id: + type: string + format: uuid + title: Id + description: the id of the computation task + state: + $ref: '#/components/schemas/RunningState' + description: the state of the computational task + result: + anyOf: + - type: string + - type: 'null' + title: Result + description: the result of the computational task + pipeline_details: + $ref: '#/components/schemas/PipelineDetails' + description: the details of the generated pipeline + iteration: + anyOf: + - type: integer + exclusiveMinimum: true + minimum: 0 + - type: 'null' + title: Iteration + description: the iteration id of the computation task (none if no task ran + yet) + started: + anyOf: + - type: string + format: date-time + - type: 'null' + title: Started + description: the timestamp when the computation was started or None if not + started yet + stopped: + anyOf: + - type: string + format: date-time + - type: 'null' + title: Stopped + description: the timestamp when the computation was stopped or None if not + started nor stopped yet + submitted: + anyOf: + - type: string + format: date-time + - type: 'null' + title: Submitted + description: task last modification timestamp or None if the there is no + task + url: + type: string + minLength: 1 + format: uri + title: Url + description: the link where to get the status of the task + stop_url: + anyOf: + - type: string + minLength: 1 + format: uri + - type: 'null' + title: Stop Url + description: the link where to stop the task + type: object + required: + - id + - state + - pipeline_details + - iteration + - started + - stopped + - submitted + - url + title: ComputationGet ComputationStart: properties: force_restart: type: boolean title: Force Restart default: false - cluster_id: - type: integer - minimum: 0 - title: Cluster Id - default: 0 subgraph: items: type: string @@ -7519,18 +7233,6 @@ components: default: [] type: object title: ComputationStart - ComputationTaskGet: - properties: - cluster_id: - anyOf: - - type: integer - minimum: 0 - - type: 'null' - title: Cluster Id - type: object - required: - - cluster_id - title: ComputationTaskGet ConnectServiceToPricingPlanBodyParams: properties: serviceKey: @@ -7704,13 +7406,6 @@ components: example: dataset_id: N:id-aaaa display_name: simcore-testing - DictModel_str_Annotated_float__Gt__: - additionalProperties: - type: number - exclusiveMinimum: true - minimum: 0.0 - type: object - title: DictModel[str, Annotated[float, Gt]] DownloadLink: properties: downloadLink: @@ -7807,11 +7502,11 @@ components: title: Error type: object title: Envelope[CheckpointApiModel] - Envelope_ClusterDetails_: + Envelope_ComputationGet_: properties: data: anyOf: - - $ref: '#/components/schemas/ClusterDetails' + - $ref: '#/components/schemas/ComputationGet' - type: 'null' error: anyOf: @@ -7819,33 +7514,7 @@ components: - type: 'null' title: Error type: object - title: Envelope[ClusterDetails] - Envelope_ClusterGet_: - properties: - data: - anyOf: - - $ref: '#/components/schemas/ClusterGet' - - type: 'null' - error: - anyOf: - - {} - - type: 'null' - title: Error - type: object - title: Envelope[ClusterGet] - Envelope_ComputationTaskGet_: - properties: - data: - anyOf: - - $ref: '#/components/schemas/ComputationTaskGet' - - type: 'null' - error: - anyOf: - - {} - - type: 'null' - title: Error - type: object - title: Envelope[ComputationTaskGet] + title: Envelope[ComputationGet] Envelope_FileMetaDataGet_: properties: data: @@ -8750,22 +8419,6 @@ components: title: Error type: object title: Envelope[list[Announcement]] - Envelope_list_ClusterGet__: - properties: - data: - anyOf: - - items: - $ref: '#/components/schemas/ClusterGet' - type: array - - type: 'null' - title: Data - error: - anyOf: - - {} - - type: 'null' - title: Error - type: object - title: Envelope[list[ClusterGet]] Envelope_list_DatasetMetaData__: properties: data: @@ -10225,35 +9878,6 @@ components: additionalProperties: false type: object title: InvitationInfo - JupyterHubTokenAuthentication: - properties: - type: - type: string - enum: - - jupyterhub - const: jupyterhub - title: Type - default: jupyterhub - api_token: - type: string - title: Api Token - additionalProperties: false - type: object - required: - - api_token - title: JupyterHubTokenAuthentication - KerberosAuthentication: - properties: - type: - type: string - enum: - - kerberos - const: kerberos - title: Type - default: kerberos - additionalProperties: false - type: object - title: KerberosAuthentication Limits: properties: cpus: @@ -10452,18 +10076,6 @@ components: description: Some foundation gid: '16' label: Blue Fundation - NoAuthentication: - properties: - type: - type: string - enum: - - none - const: none - title: Type - default: none - additionalProperties: false - type: object - title: NoAuthentication Node-Input: properties: key: @@ -11465,6 +11077,39 @@ components: - phone - code title: PhoneConfirmationBody + PipelineDetails: + properties: + adjacency_list: + additionalProperties: + items: + type: string + format: uuid + type: array + type: object + title: Adjacency List + description: 'The adjacency list of the current pipeline in terms of {NodeID: + [successor NodeID]}' + progress: + anyOf: + - type: number + maximum: 1.0 + minimum: 0.0 + - type: 'null' + title: Progress + description: the progress of the pipeline (None if there are no computational + tasks) + node_states: + additionalProperties: + $ref: '#/components/schemas/NodeState' + type: object + title: Node States + description: The states of each of the computational nodes in the pipeline + type: object + required: + - adjacency_list + - progress + - node_states + title: PipelineDetails PortLink: properties: nodeUuid: @@ -12877,23 +12522,6 @@ components: SEE StateType for task state' - Scheduler: - properties: - status: - type: string - title: Status - description: The running status of the scheduler - workers: - anyOf: - - additionalProperties: - $ref: '#/components/schemas/Worker' - type: object - - type: 'null' - title: Workers - type: object - required: - - status - title: Scheduler SelectBox: properties: structure: @@ -13419,29 +13047,6 @@ components: - path title: SimCoreFileLink description: I/O port type to hold a link to a file in simcore S3 storage - SimpleAuthentication: - properties: - type: - type: string - enum: - - simple - const: simple - title: Type - default: simple - username: - type: string - title: Username - password: - type: string - format: password - title: Password - writeOnly: true - additionalProperties: false - type: object - required: - - username - - password - title: SimpleAuthentication Slideshow: properties: position: @@ -13610,34 +13215,6 @@ components: additionalProperties: true type: object title: StudyUI - TLSAuthentication: - properties: - type: - type: string - enum: - - tls - const: tls - title: Type - default: tls - tls_ca_file: - type: string - format: path - title: Tls Ca File - tls_client_cert: - type: string - format: path - title: Tls Client Cert - tls_client_key: - type: string - format: path - title: Tls Client Key - additionalProperties: false - type: object - required: - - tls_ca_file - - tls_client_cert - - tls_client_key - title: TLSAuthentication TableSynchronisation: properties: dry_run: @@ -13802,22 +13379,6 @@ components: title: Priority type: object title: TagUpdate - TaskCounts: - properties: - error: - type: integer - title: Error - default: 0 - memory: - type: integer - title: Memory - default: 0 - executing: - type: integer - title: Executing - default: 0 - type: object - title: TaskCounts TaskGet: properties: task_id: @@ -14167,12 +13728,6 @@ components: - number - e_tag title: UploadedPart - UsedResources: - additionalProperties: - type: number - minimum: 0.0 - type: object - title: UsedResources UserNotification: properties: user_id: @@ -14663,58 +14218,6 @@ components: - url - checkpoint_url title: WorkbenchViewApiModel - Worker: - properties: - id: - type: string - title: Id - name: - type: string - title: Name - resources: - $ref: '#/components/schemas/DictModel_str_Annotated_float__Gt__' - used_resources: - $ref: '#/components/schemas/UsedResources' - memory_limit: - type: integer - minimum: 0 - title: Memory Limit - metrics: - $ref: '#/components/schemas/WorkerMetrics' - type: object - required: - - id - - name - - resources - - used_resources - - memory_limit - - metrics - title: Worker - WorkerMetrics: - properties: - cpu: - type: number - title: Cpu - description: consumed % of cpus - memory: - type: integer - minimum: 0 - title: Memory - description: consumed memory - num_fds: - type: integer - title: Num Fds - description: consumed file descriptors - task_counts: - $ref: '#/components/schemas/TaskCounts' - description: task details - type: object - required: - - cpu - - memory - - num_fds - - task_counts - title: WorkerMetrics WorkspaceCreateBodyParams: properties: name: diff --git a/services/web/server/src/simcore_service_webserver/application.py b/services/web/server/src/simcore_service_webserver/application.py index 9e6e4f393d6..79477051ddb 100644 --- a/services/web/server/src/simcore_service_webserver/application.py +++ b/services/web/server/src/simcore_service_webserver/application.py @@ -1,6 +1,7 @@ """ Main application """ + import logging from pprint import pformat from typing import Any @@ -14,7 +15,6 @@ from .api_keys.plugin import setup_api_keys from .application_settings import get_application_settings, setup_settings from .catalog.plugin import setup_catalog -from .clusters.plugin import setup_clusters from .db.plugin import setup_db from .db_listener.plugin import setup_db_listener from .diagnostics.plugin import setup_diagnostics, setup_profiling_middleware @@ -147,7 +147,6 @@ def create_application() -> web.Application: setup_publications(app) setup_studies_dispatcher(app) setup_exporter(app) - setup_clusters(app) # NOTE: *last* events app.on_startup.append(_welcome_banner) diff --git a/services/web/server/src/simcore_service_webserver/application_settings.py b/services/web/server/src/simcore_service_webserver/application_settings.py index e5aa008377a..ed4e519141b 100644 --- a/services/web/server/src/simcore_service_webserver/application_settings.py +++ b/services/web/server/src/simcore_service_webserver/application_settings.py @@ -268,7 +268,6 @@ class ApplicationSettings(BaseCustomSettings, MixinLoggingSettings): # These plugins only require (for the moment) an entry to toggle between enabled/disabled WEBSERVER_ANNOUNCEMENTS: bool = False WEBSERVER_API_KEYS: bool = True - WEBSERVER_CLUSTERS: bool = False WEBSERVER_DB_LISTENER: bool = True WEBSERVER_FOLDERS: bool = True WEBSERVER_GROUPS: bool = True @@ -370,7 +369,6 @@ def _get_disabled_public_plugins(self) -> list[str]: # TODO: more reliable definition of a "plugin" and whether it can be advertised or not # (extra var? e.g. Field( ... , x_advertise_plugin=True)) public_plugin_candidates: Final = { - "WEBSERVER_CLUSTERS", "WEBSERVER_EXPORTER", "WEBSERVER_FOLDERS", "WEBSERVER_META_MODELING", diff --git a/services/web/server/src/simcore_service_webserver/application_settings_utils.py b/services/web/server/src/simcore_service_webserver/application_settings_utils.py index 9843e84afdd..162a927e0ad 100644 --- a/services/web/server/src/simcore_service_webserver/application_settings_utils.py +++ b/services/web/server/src/simcore_service_webserver/application_settings_utils.py @@ -157,7 +157,6 @@ def convert_to_app_config(app_settings: ApplicationSettings) -> dict[str, Any]: app_settings.WEBSERVER_ACTIVITY, "PROMETHEUS_VTAG", None ), }, - "clusters": {"enabled": app_settings.WEBSERVER_CLUSTERS}, "computation": {"enabled": app_settings.is_enabled("WEBSERVER_NOTIFICATIONS")}, "diagnostics": {"enabled": app_settings.is_enabled("WEBSERVER_DIAGNOSTICS")}, "director-v2": {"enabled": app_settings.is_enabled("WEBSERVER_DIRECTOR_V2")}, @@ -310,7 +309,6 @@ def _set_if_disabled(field_name, section): _set_if_disabled("WEBSERVER_STATICWEB", section) for settings_name in ( - "WEBSERVER_CLUSTERS", "WEBSERVER_GARBAGE_COLLECTOR", "WEBSERVER_GROUPS", "WEBSERVER_META_MODELING", diff --git a/services/web/server/src/simcore_service_webserver/clusters/__init__.py b/services/web/server/src/simcore_service_webserver/clusters/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/services/web/server/src/simcore_service_webserver/clusters/_handlers.py b/services/web/server/src/simcore_service_webserver/clusters/_handlers.py deleted file mode 100644 index 0df3dd792a2..00000000000 --- a/services/web/server/src/simcore_service_webserver/clusters/_handlers.py +++ /dev/null @@ -1,200 +0,0 @@ -import functools -import logging - -from aiohttp import web -from models_library.api_schemas_webserver.clusters import ( - ClusterCreate, - ClusterDetails, - ClusterGet, - ClusterPatch, - ClusterPathParams, - ClusterPing, -) -from pydantic import TypeAdapter -from servicelib.aiohttp import status -from servicelib.aiohttp.requests_validation import ( - parse_request_body_as, - parse_request_path_parameters_as, -) -from servicelib.aiohttp.typing_extension import Handler - -from .._meta import api_version_prefix -from ..director_v2 import api as director_v2_api -from ..director_v2.exceptions import ( - ClusterAccessForbidden, - ClusterNotFoundError, - ClusterPingError, - DirectorServiceError, -) -from ..login.decorators import login_required -from ..models import RequestContext -from ..security.decorators import permission_required -from ..utils_aiohttp import envelope_json_response - -_logger = logging.getLogger(__name__) - - -def _handle_cluster_exceptions(handler: Handler): - # maps API exceptions to HTTP errors - @functools.wraps(handler) - async def wrapper(request: web.Request) -> web.StreamResponse: - try: - return await handler(request) - - except ClusterPingError as exc: - raise web.HTTPUnprocessableEntity(reason=f"{exc}") from exc - - except ClusterNotFoundError as exc: - raise web.HTTPNotFound(reason=f"{exc}") from exc - - except ClusterAccessForbidden as exc: - raise web.HTTPForbidden(reason=f"{exc}") from exc - - except DirectorServiceError as exc: - raise web.HTTPServiceUnavailable(reason=f"{exc}") from exc - - return wrapper - - -# -# API handlers -# - -routes = web.RouteTableDef() - - -@routes.post(f"/{api_version_prefix}/clusters", name="create_cluster") -@login_required -@permission_required("clusters.create") -@_handle_cluster_exceptions -async def create_cluster(request: web.Request) -> web.Response: - req_ctx = RequestContext.model_validate(request) - new_cluster = await parse_request_body_as(ClusterCreate, request) - - created_cluster = await director_v2_api.create_cluster( - app=request.app, - user_id=req_ctx.user_id, - new_cluster=new_cluster, - ) - return envelope_json_response(created_cluster, web.HTTPCreated) - - -@routes.get(f"/{api_version_prefix}/clusters", name="list_clusters") -@login_required -@permission_required("clusters.read") -@_handle_cluster_exceptions -async def list_clusters(request: web.Request) -> web.Response: - req_ctx = RequestContext.model_validate(request) - - clusters = await director_v2_api.list_clusters( - app=request.app, - user_id=req_ctx.user_id, - ) - assert TypeAdapter(list[ClusterGet]).validate_python(clusters) is not None # nosec - return envelope_json_response(clusters) - - -@routes.get(f"/{api_version_prefix}/clusters/{{cluster_id}}", name="get_cluster") -@login_required -@permission_required("clusters.read") -@_handle_cluster_exceptions -async def get_cluster(request: web.Request) -> web.Response: - req_ctx = RequestContext.model_validate(request) - path_params = parse_request_path_parameters_as(ClusterPathParams, request) - - cluster = await director_v2_api.get_cluster( - app=request.app, - user_id=req_ctx.user_id, - cluster_id=path_params.cluster_id, - ) - assert ClusterGet.model_validate(cluster) is not None # nosec - return envelope_json_response(cluster) - - -@routes.patch(f"/{api_version_prefix}/clusters/{{cluster_id}}", name="update_cluster") -@login_required -@permission_required("clusters.write") -@_handle_cluster_exceptions -async def update_cluster(request: web.Request) -> web.Response: - req_ctx = RequestContext.model_validate(request) - path_params = parse_request_path_parameters_as(ClusterPathParams, request) - cluster_patch = await parse_request_body_as(ClusterPatch, request) - - updated_cluster = await director_v2_api.update_cluster( - app=request.app, - user_id=req_ctx.user_id, - cluster_id=path_params.cluster_id, - cluster_patch=cluster_patch, - ) - - assert ClusterGet.model_validate(updated_cluster) is not None # nosec - return envelope_json_response(updated_cluster) - - -@routes.delete(f"/{api_version_prefix}/clusters/{{cluster_id}}", name="delete_cluster") -@login_required -@permission_required("clusters.delete") -@_handle_cluster_exceptions -async def delete_cluster(request: web.Request) -> web.Response: - req_ctx = RequestContext.model_validate(request) - path_params = parse_request_path_parameters_as(ClusterPathParams, request) - - await director_v2_api.delete_cluster( - app=request.app, - user_id=req_ctx.user_id, - cluster_id=path_params.cluster_id, - ) - return web.json_response(status=status.HTTP_204_NO_CONTENT) - - -@routes.get( - f"/{api_version_prefix}/clusters/{{cluster_id}}/details", - name="get_cluster_details", -) -@login_required -@permission_required("clusters.read") -@_handle_cluster_exceptions -async def get_cluster_details(request: web.Request) -> web.Response: - req_ctx = RequestContext.model_validate(request) - path_params = parse_request_path_parameters_as(ClusterPathParams, request) - - cluster_details = await director_v2_api.get_cluster_details( - app=request.app, - user_id=req_ctx.user_id, - cluster_id=path_params.cluster_id, - ) - assert ClusterDetails.model_validate(cluster_details) is not None # nosec - return envelope_json_response(cluster_details) - - -@routes.post(f"/{api_version_prefix}/clusters:ping", name="ping_cluster") -@login_required -@permission_required("clusters.read") -@_handle_cluster_exceptions -async def ping_cluster(request: web.Request) -> web.Response: - cluster_ping = await parse_request_body_as(ClusterPing, request) - - await director_v2_api.ping_cluster( - app=request.app, - cluster_ping=cluster_ping, - ) - return web.json_response(status=status.HTTP_204_NO_CONTENT) - - -@routes.post( - f"/{api_version_prefix}/clusters/{{cluster_id}}:ping", - name="ping_cluster_cluster_id", -) -@login_required -@permission_required("clusters.read") -@_handle_cluster_exceptions -async def ping_cluster_cluster_id(request: web.Request) -> web.Response: - req_ctx = RequestContext.model_validate(request) - path_params = parse_request_path_parameters_as(ClusterPathParams, request) - - await director_v2_api.ping_specific_cluster( - app=request.app, - user_id=req_ctx.user_id, - cluster_id=path_params.cluster_id, - ) - return web.json_response(status=status.HTTP_204_NO_CONTENT) diff --git a/services/web/server/src/simcore_service_webserver/clusters/plugin.py b/services/web/server/src/simcore_service_webserver/clusters/plugin.py deleted file mode 100644 index 59a406b731b..00000000000 --- a/services/web/server/src/simcore_service_webserver/clusters/plugin.py +++ /dev/null @@ -1,34 +0,0 @@ -""" clusters app module setup - - Allows a user to manage clusters depending on user group(s) access rights: - - create, modify, delete clusters - - monitor clusters - - send computational jobs to clusters - -""" -import logging - -from aiohttp import web -from servicelib.aiohttp.application_setup import ModuleCategory, app_module_setup - -from .._constants import APP_SETTINGS_KEY -from ..director_v2 import plugin as director_v2 -from . import _handlers - -_logger = logging.getLogger(__name__) - - -@app_module_setup( - "simcore_service_webserver.clusters", - ModuleCategory.ADDON, - settings_name="WEBSERVER_CLUSTERS", - logger=_logger, -) -def setup_clusters(app: web.Application): - director_v2.setup_director_v2(app) - assert app[APP_SETTINGS_KEY].WEBSERVER_CLUSTERS # nosec - - app.add_routes(_handlers.routes) - - -__all__: tuple[str, ...] = ("setup_clusters",) diff --git a/services/web/server/src/simcore_service_webserver/director_v2/_core_computations.py b/services/web/server/src/simcore_service_webserver/director_v2/_core_computations.py index c034f93a660..7785f7936d2 100644 --- a/services/web/server/src/simcore_service_webserver/director_v2/_core_computations.py +++ b/services/web/server/src/simcore_service_webserver/director_v2/_core_computations.py @@ -9,24 +9,14 @@ from uuid import UUID from aiohttp import web -from common_library.serialization import model_dump_with_secrets -from models_library.api_schemas_directorv2.clusters import ( - ClusterCreate, - ClusterDetails, - ClusterGet, - ClusterPatch, - ClusterPing, -) from models_library.api_schemas_directorv2.comp_tasks import ( TasksOutputs, TasksSelection, ) -from models_library.clusters import ClusterID from models_library.projects import ProjectID from models_library.projects_pipeline import ComputationTask from models_library.users import UserID from models_library.utils.fastapi_encoders import jsonable_encoder -from pydantic import TypeAdapter from pydantic.types import PositiveInt from servicelib.aiohttp import status from servicelib.logging_utils import log_decorator @@ -34,14 +24,7 @@ from ..products.api import get_product from ._api_utils import get_wallet_info from ._core_base import DataType, request_director_v2 -from .exceptions import ( - ClusterAccessForbidden, - ClusterDefinedPingError, - ClusterNotFoundError, - ClusterPingError, - ComputationNotFoundError, - DirectorServiceError, -) +from .exceptions import ComputationNotFoundError, DirectorServiceError from .settings import DirectorV2Settings, get_plugin_settings _logger = logging.getLogger(__name__) @@ -229,200 +212,6 @@ async def delete_pipeline( ) -# -# CLUSTER RESOURCE ---------------------- -# - - -@log_decorator(logger=_logger) -async def create_cluster( - app: web.Application, user_id: UserID, new_cluster: ClusterCreate -) -> DataType: - settings: DirectorV2Settings = get_plugin_settings(app) - cluster = await request_director_v2( - app, - "POST", - url=(settings.base_url / "clusters").update_query(user_id=int(user_id)), - expected_status=web.HTTPCreated, - data=model_dump_with_secrets( - new_cluster, show_secrets=True, by_alias=True, exclude_unset=True - ), - ) - assert isinstance(cluster, dict) # nosec - assert ClusterGet.model_validate(cluster) is not None # nosec - return cluster - - -async def list_clusters(app: web.Application, user_id: UserID) -> list[DataType]: - settings: DirectorV2Settings = get_plugin_settings(app) - clusters = await request_director_v2( - app, - "GET", - url=(settings.base_url / "clusters").update_query(user_id=int(user_id)), - expected_status=web.HTTPOk, - ) - - assert isinstance(clusters, list) # nosec - assert TypeAdapter(list[ClusterGet]).validate_python(clusters) is not None # nosec - return clusters - - -async def get_cluster( - app: web.Application, user_id: UserID, cluster_id: ClusterID -) -> DataType: - settings: DirectorV2Settings = get_plugin_settings(app) - cluster = await request_director_v2( - app, - "GET", - url=(settings.base_url / f"clusters/{cluster_id}").update_query( - user_id=int(user_id) - ), - expected_status=web.HTTPOk, - on_error={ - status.HTTP_404_NOT_FOUND: ( - ClusterNotFoundError, - {"cluster_id": cluster_id}, - ), - status.HTTP_403_FORBIDDEN: ( - ClusterAccessForbidden, - {"cluster_id": cluster_id}, - ), - }, - ) - - assert isinstance(cluster, dict) # nosec - assert ClusterGet.model_validate(cluster) is not None # nosec - return cluster - - -async def get_cluster_details( - app: web.Application, user_id: UserID, cluster_id: ClusterID -) -> DataType: - settings: DirectorV2Settings = get_plugin_settings(app) - - cluster = await request_director_v2( - app, - "GET", - url=(settings.base_url / f"clusters/{cluster_id}/details").update_query( - user_id=int(user_id) - ), - expected_status=web.HTTPOk, - on_error={ - status.HTTP_404_NOT_FOUND: ( - ClusterNotFoundError, - {"cluster_id": cluster_id}, - ), - status.HTTP_403_FORBIDDEN: ( - ClusterAccessForbidden, - {"cluster_id": cluster_id}, - ), - }, - ) - assert isinstance(cluster, dict) # nosec - assert ClusterDetails.model_validate(cluster) is not None # nosec - return cluster - - -async def update_cluster( - app: web.Application, - user_id: UserID, - cluster_id: ClusterID, - cluster_patch: ClusterPatch, -) -> DataType: - settings: DirectorV2Settings = get_plugin_settings(app) - cluster = await request_director_v2( - app, - "PATCH", - url=(settings.base_url / f"clusters/{cluster_id}").update_query( - user_id=int(user_id) - ), - expected_status=web.HTTPOk, - data=model_dump_with_secrets( - cluster_patch, show_secrets=True, by_alias=True, exclude_none=True - ), - on_error={ - status.HTTP_404_NOT_FOUND: ( - ClusterNotFoundError, - {"cluster_id": cluster_id}, - ), - status.HTTP_403_FORBIDDEN: ( - ClusterAccessForbidden, - {"cluster_id": cluster_id}, - ), - }, - ) - - assert isinstance(cluster, dict) # nosec - assert ClusterGet.model_validate(cluster) is not None # nosec - return cluster - - -async def delete_cluster( - app: web.Application, user_id: UserID, cluster_id: ClusterID -) -> None: - settings: DirectorV2Settings = get_plugin_settings(app) - await request_director_v2( - app, - "DELETE", - url=(settings.base_url / f"clusters/{cluster_id}").update_query( - user_id=int(user_id) - ), - expected_status=web.HTTPNoContent, - on_error={ - status.HTTP_404_NOT_FOUND: ( - ClusterNotFoundError, - {"cluster_id": cluster_id}, - ), - status.HTTP_403_FORBIDDEN: ( - ClusterAccessForbidden, - {"cluster_id": cluster_id}, - ), - }, - ) - - -async def ping_cluster(app: web.Application, cluster_ping: ClusterPing) -> None: - settings: DirectorV2Settings = get_plugin_settings(app) - await request_director_v2( - app, - "POST", - url=settings.base_url / "clusters:ping", - expected_status=web.HTTPNoContent, - data=model_dump_with_secrets( - cluster_ping, - show_secrets=True, - by_alias=True, - exclude_unset=True, - ), - on_error={ - status.HTTP_422_UNPROCESSABLE_ENTITY: ( - ClusterPingError, - {"endpoint": f"{cluster_ping.endpoint}"}, - ) - }, - ) - - -async def ping_specific_cluster( - app: web.Application, user_id: UserID, cluster_id: ClusterID -) -> None: - settings: DirectorV2Settings = get_plugin_settings(app) - await request_director_v2( - app, - "POST", - url=(settings.base_url / f"clusters/{cluster_id}:ping").update_query( - user_id=int(user_id) - ), - expected_status=web.HTTPNoContent, - on_error={ - status.HTTP_422_UNPROCESSABLE_ENTITY: ( - ClusterDefinedPingError, - {"cluster_id": f"{cluster_id}"}, - ) - }, - ) - - # # COMPUTATIONS TASKS RESOURCE ---------------------- # diff --git a/services/web/server/src/simcore_service_webserver/director_v2/_handlers.py b/services/web/server/src/simcore_service_webserver/director_v2/_handlers.py index 1a999b35c0e..aa3914ee6df 100644 --- a/services/web/server/src/simcore_service_webserver/director_v2/_handlers.py +++ b/services/web/server/src/simcore_service_webserver/director_v2/_handlers.py @@ -4,12 +4,10 @@ from aiohttp import web from common_library.json_serialization import json_dumps +from models_library.api_schemas_directorv2.comp_tasks import ComputationGet from models_library.api_schemas_webserver.computations import ComputationStart -from models_library.clusters import ClusterID from models_library.projects import ProjectID -from models_library.users import UserID from pydantic import BaseModel, Field, TypeAdapter, ValidationError -from pydantic.types import NonNegativeInt from servicelib.aiohttp import status from servicelib.aiohttp.rest_responses import create_http_error, exception_to_response from servicelib.aiohttp.web_exceptions_extension import get_http_error_class_or_none @@ -69,7 +67,6 @@ async def start_computation(request: web.Request) -> web.Response: subgraph: set[str] = set() force_restart: bool = False # NOTE: deprecate this entry - cluster_id: NonNegativeInt = 0 if request.can_read_body: body = await request.json() @@ -79,7 +76,6 @@ async def start_computation(request: web.Request) -> web.Response: subgraph = body.get("subgraph", []) force_restart = bool(body.get("force_restart", force_restart)) - cluster_id = body.get("cluster_id") simcore_user_agent = request.headers.get( X_SIMCORE_USER_AGENT, UNDEFINED_DEFAULT_SIMCORE_USER_AGENT_VALUE @@ -106,9 +102,6 @@ async def start_computation(request: web.Request) -> web.Response: "start_pipeline": True, "subgraph": list(subgraph), # sets are not natively json serializable "force_restart": force_restart, - "cluster_id": ( - None if group_properties.use_on_demand_clusters else cluster_id - ), "simcore_user_agent": simcore_user_agent, "use_on_demand_clusters": group_properties.use_on_demand_clusters, "wallet_info": wallet_info, @@ -212,10 +205,6 @@ async def stop_computation(request: web.Request) -> web.Response: ) -class ComputationTaskGet(BaseModel): - cluster_id: ClusterID | None - - @routes.get(f"/{VTAG}/computations/{{project_id}}", name="get_computation") @login_required @permission_required("services.pipeline.*") @@ -225,7 +214,7 @@ async def get_computation(request: web.Request) -> web.Response: run_policy = get_project_run_policy(request.app) assert run_policy # nosec - user_id = UserID(request[RQT_USERID_KEY]) + user_id = request[RQT_USERID_KEY] project_id = ProjectID(request.match_info["project_id"]) try: @@ -233,7 +222,7 @@ async def get_computation(request: web.Request) -> web.Response: request, project_id ) _logger.debug("Project %s will get %d variants", project_id, len(project_ids)) - list_computation_tasks = TypeAdapter(list[ComputationTaskGet]).validate_python( + list_computation_tasks = TypeAdapter(list[ComputationGet]).validate_python( await asyncio.gather( *[ computations.get(project_id=pid, user_id=user_id) @@ -242,12 +231,7 @@ async def get_computation(request: web.Request) -> web.Response: ), ) assert len(list_computation_tasks) == len(project_ids) # nosec - # NOTE: until changed all the versions of a meta project shall use the same cluster - # this should fail the day that changes - assert all( - c.cluster_id == list_computation_tasks[0].cluster_id - for c in list_computation_tasks - ) + return web.json_response( data={"data": list_computation_tasks[0].model_dump(by_alias=True)}, dumps=json_dumps, diff --git a/services/web/server/src/simcore_service_webserver/director_v2/_models.py b/services/web/server/src/simcore_service_webserver/director_v2/_models.py deleted file mode 100644 index 966229c4221..00000000000 --- a/services/web/server/src/simcore_service_webserver/director_v2/_models.py +++ /dev/null @@ -1,88 +0,0 @@ -from models_library.clusters import ( - CLUSTER_ADMIN_RIGHTS, - CLUSTER_MANAGER_RIGHTS, - CLUSTER_USER_RIGHTS, - BaseCluster, - ClusterAccessRights, - ClusterTypeInModel, - ExternalClusterAuthentication, -) -from models_library.users import GroupID -from pydantic import AnyHttpUrl, BaseModel, ConfigDict, Field, field_validator -from pydantic.networks import AnyUrl, HttpUrl -from simcore_postgres_database.models.clusters import ClusterType - - -class ClusterPing(BaseModel): - endpoint: AnyHttpUrl - authentication: ExternalClusterAuthentication - - -_DEFAULT_THUMBNAILS = { - f"{ClusterTypeInModel.AWS}": "https://upload.wikimedia.org/wikipedia/commons/thumb/9/93/Amazon_Web_Services_Logo.svg/250px-Amazon_Web_Services_Logo.svg.png", - f"{ClusterTypeInModel.ON_PREMISE}": "https://upload.wikimedia.org/wikipedia/commons/thumb/a/ac/Crystal_Clear_app_network_local.png/120px-Crystal_Clear_app_network_local.png", -} - - -class ClusterCreate(BaseCluster): - owner: GroupID | None # type: ignore[assignment] - authentication: ExternalClusterAuthentication - access_rights: dict[GroupID, ClusterAccessRights] = Field( - alias="accessRights", default_factory=dict - ) - - @field_validator("thumbnail", mode="before") - @classmethod - def set_default_thumbnail_if_empty(cls, v, values): - if v is None and ( - cluster_type := values.get("type", f"{ClusterTypeInModel.ON_PREMISE}") - ): - return _DEFAULT_THUMBNAILS[f"{cluster_type}"] - return v - - model_config = ConfigDict( - json_schema_extra={ - "examples": [ - { - "name": "My awesome cluster", - "type": f"{ClusterType.ON_PREMISE}", # can use also values from equivalent enum - "endpoint": "https://registry.osparc-development.fake.dev", - "authentication": { - "type": "simple", - "username": "someuser", - "password": "somepassword", - }, - }, - { - "name": "My AWS cluster", - "description": "a AWS cluster administered by me", - "type": f"{ClusterType.AWS}", - "owner": 154, - "endpoint": "https://registry.osparc-development.fake.dev", - "authentication": { - "type": "simple", - "username": "someuser", - "password": "somepassword", - }, - "access_rights": { - 154: CLUSTER_ADMIN_RIGHTS.model_dump(), # type:ignore[dict-item] - 12: CLUSTER_MANAGER_RIGHTS.model_dump(), # type:ignore[dict-item] - 7899: CLUSTER_USER_RIGHTS.model_dump(), # type:ignore[dict-item] - }, - }, - ] - } - ) - - -class ClusterPatch(BaseCluster): - name: str | None # type: ignore[assignment] - description: str | None - type: ClusterType | None # type: ignore[assignment] - owner: GroupID | None # type: ignore[assignment] - thumbnail: HttpUrl | None - endpoint: AnyUrl | None # type: ignore[assignment] - authentication: ExternalClusterAuthentication | None # type: ignore[assignment] - access_rights: dict[GroupID, ClusterAccessRights] | None = Field( # type: ignore[assignment] - alias="accessRights" - ) diff --git a/services/web/server/src/simcore_service_webserver/director_v2/api.py b/services/web/server/src/simcore_service_webserver/director_v2/api.py index 4d1efd822f6..2de6b49e4a2 100644 --- a/services/web/server/src/simcore_service_webserver/director_v2/api.py +++ b/services/web/server/src/simcore_service_webserver/director_v2/api.py @@ -3,27 +3,18 @@ PLEASE avoid importing from any other module to access this plugin's functionality """ - from ._abc import ( AbstractProjectRunPolicy, get_project_run_policy, set_project_run_policy, ) from ._core_computations import ( - create_cluster, create_or_update_pipeline, - delete_cluster, delete_pipeline, get_batch_tasks_outputs, - get_cluster, - get_cluster_details, get_computation_task, is_pipeline_running, - list_clusters, - ping_cluster, - ping_specific_cluster, stop_pipeline, - update_cluster, ) from ._core_dynamic_services import ( get_project_inactivity, @@ -34,40 +25,26 @@ update_dynamic_service_networks_in_project, ) from ._core_utils import is_healthy -from .exceptions import ( - ClusterAccessForbidden, - ClusterNotFoundError, - DirectorServiceError, -) +from .exceptions import DirectorServiceError # director-v2 module internal API __all__: tuple[str, ...] = ( "AbstractProjectRunPolicy", - "ClusterAccessForbidden", - "ClusterNotFoundError", - "create_cluster", "create_or_update_pipeline", - "delete_cluster", "delete_pipeline", "DirectorServiceError", "get_batch_tasks_outputs", - "get_cluster_details", - "get_cluster", "get_computation_task", "get_project_inactivity", "get_project_run_policy", "is_healthy", "is_pipeline_running", - "list_clusters", "list_dynamic_services", - "ping_cluster", - "ping_specific_cluster", "request_retrieve_dyn_service", "restart_dynamic_service", "retrieve", "set_project_run_policy", "stop_pipeline", - "update_cluster", "update_dynamic_service_networks_in_project", ) # nopycln: file diff --git a/services/web/server/src/simcore_service_webserver/director_v2/exceptions.py b/services/web/server/src/simcore_service_webserver/director_v2/exceptions.py index 8fe3a2a0478..2301815f754 100644 --- a/services/web/server/src/simcore_service_webserver/director_v2/exceptions.py +++ b/services/web/server/src/simcore_service_webserver/director_v2/exceptions.py @@ -20,29 +20,5 @@ class ComputationNotFoundError(DirectorServiceError): msg_template = "Computation '{project_id}' not found" -class ClusterNotFoundError(DirectorServiceError): - """Cluster was not found in director-v2""" - - msg_template = "Cluster '{cluster_id}' not found" - - -class ClusterAccessForbidden(DirectorServiceError): - """Cluster access is forbidden""" - - msg_template = "Cluster '{cluster_id}' access forbidden!" - - -class ClusterPingError(DirectorServiceError): - """Cluster ping failed""" - - msg_template = "Connection to cluster in '{endpoint}' failed, received '{reason}'" - - -class ClusterDefinedPingError(DirectorServiceError): - """Cluster ping failed""" - - msg_template = "Connection to cluster '{cluster_id}' failed, received '{reason}'" - - class ServiceWaitingForManualIntervention(DirectorServiceError): msg_template = "Service '{service_uuid}' is waiting for user manual intervention" diff --git a/services/web/server/src/simcore_service_webserver/folders/_folders_db.py b/services/web/server/src/simcore_service_webserver/folders/_folders_db.py index 7e3a54d0bb5..88bb3987de4 100644 --- a/services/web/server/src/simcore_service_webserver/folders/_folders_db.py +++ b/services/web/server/src/simcore_service_webserver/folders/_folders_db.py @@ -6,7 +6,7 @@ import logging from datetime import datetime -from typing import Any, Final, cast +from typing import Final, cast import sqlalchemy as sa from aiohttp import web @@ -33,6 +33,7 @@ from simcore_postgres_database.utils_workspaces_sql import ( create_my_workspace_access_rights_subquery, ) +from simcore_service_webserver.utils import UnSet, as_dict_exclude_unset from sqlalchemy import func from sqlalchemy.ext.asyncio import AsyncConnection from sqlalchemy.orm import aliased @@ -43,18 +44,9 @@ _logger = logging.getLogger(__name__) - -class UnSet: - ... - - _unset: Final = UnSet() -def as_dict_exclude_unset(**params) -> dict[str, Any]: - return {k: v for k, v in params.items() if not isinstance(v, UnSet)} - - _SELECTION_ARGS = ( folders_v2.c.folder_id, folders_v2.c.name, @@ -324,6 +316,8 @@ async def update( parent_folder_id: FolderID | None | UnSet = _unset, trashed_at: datetime | None | UnSet = _unset, trashed_explicitly: bool | UnSet = _unset, + workspace_id: WorkspaceID | None | UnSet = _unset, + user_id: UserID | None | UnSet = _unset, ) -> FolderDB: """ Batch/single patch of folder/s @@ -334,6 +328,8 @@ async def update( parent_folder_id=parent_folder_id, trashed_at=trashed_at, trashed_explicitly=trashed_explicitly, + workspace_id=workspace_id, + user_id=user_id, ) query = ( @@ -467,6 +463,60 @@ async def get_projects_recursively_only_if_user_is_owner( return [ProjectID(row[0]) async for row in result] +async def get_all_folders_and_projects_ids_recursively( + app: web.Application, + connection: AsyncConnection | None = None, + *, + folder_id: FolderID, + private_workspace_user_id_or_none: UserID | None, + product_name: ProductName, +) -> tuple[list[FolderID], list[ProjectID]]: + """ + The purpose of this function is to retrieve all projects within the provided folder ID. + """ + + async with pass_or_acquire_connection(get_asyncpg_engine(app), connection) as conn: + + # Step 1: Define the base case for the recursive CTE + base_query = select( + folders_v2.c.folder_id, folders_v2.c.parent_folder_id + ).where( + (folders_v2.c.folder_id == folder_id) # <-- specified folder id + & (folders_v2.c.product_name == product_name) + ) + folder_hierarchy_cte = base_query.cte(name="folder_hierarchy", recursive=True) + + # Step 2: Define the recursive case + folder_alias = aliased(folders_v2) + recursive_query = select( + folder_alias.c.folder_id, folder_alias.c.parent_folder_id + ).select_from( + folder_alias.join( + folder_hierarchy_cte, + folder_alias.c.parent_folder_id == folder_hierarchy_cte.c.folder_id, + ) + ) + + # Step 3: Combine base and recursive cases into a CTE + folder_hierarchy_cte = folder_hierarchy_cte.union_all(recursive_query) + + # Step 4: Execute the query to get all descendants + final_query = select(folder_hierarchy_cte) + result = await conn.stream(final_query) + # list of tuples [(folder_id, parent_folder_id), ...] ex. [(1, None), (2, 1)] + folder_ids = [item.folder_id async for item in result] + + query = select(projects_to_folders.c.project_uuid).where( + (projects_to_folders.c.folder_id.in_(folder_ids)) + & (projects_to_folders.c.user_id == private_workspace_user_id_or_none) + ) + + result = await conn.stream(query) + project_ids = [ProjectID(row.project_uuid) async for row in result] + + return folder_ids, project_ids + + async def get_folders_recursively( app: web.Application, connection: AsyncConnection | None = None, diff --git a/services/web/server/src/simcore_service_webserver/folders/_models.py b/services/web/server/src/simcore_service_webserver/folders/_models.py index 9cac8a2f1a1..553d43bd64c 100644 --- a/services/web/server/src/simcore_service_webserver/folders/_models.py +++ b/services/web/server/src/simcore_service_webserver/folders/_models.py @@ -18,10 +18,9 @@ null_or_none_str_to_none_validator, ) from models_library.workspaces import WorkspaceID -from pydantic import BeforeValidator, ConfigDict, Field -from servicelib.request_keys import RQT_USERID_KEY +from pydantic import BaseModel, BeforeValidator, ConfigDict, Field -from .._constants import RQ_PRODUCT_KEY +from .._constants import RQ_PRODUCT_KEY, RQT_USERID_KEY _logger = logging.getLogger(__name__) @@ -88,3 +87,12 @@ class FolderSearchQueryParams( class FolderTrashQueryParams(RemoveQueryParams): ... + + +class _FolderWorkspacesPathParams(BaseModel): + folder_id: FolderID + workspace_id: Annotated[ + WorkspaceID | None, BeforeValidator(null_or_none_str_to_none_validator) + ] = Field(default=None) + + model_config = ConfigDict(extra="forbid") diff --git a/services/web/server/src/simcore_service_webserver/folders/_workspaces_api.py b/services/web/server/src/simcore_service_webserver/folders/_workspaces_api.py new file mode 100644 index 00000000000..115ff2c8d8e --- /dev/null +++ b/services/web/server/src/simcore_service_webserver/folders/_workspaces_api.py @@ -0,0 +1,138 @@ +import logging + +from aiohttp import web +from models_library.folders import FolderID +from models_library.products import ProductName +from models_library.users import UserID +from models_library.workspaces import WorkspaceID +from simcore_postgres_database.utils_repos import transaction_context + +from ..db.plugin import get_asyncpg_engine +from ..projects import _folders_db as project_to_folders_db +from ..projects import _groups_db as project_groups_db +from ..projects import _projects_db as projects_db +from ..projects._access_rights_api import check_user_project_permission +from ..users.api import get_user +from ..workspaces.api import check_user_workspace_access +from . import _folders_db + +_logger = logging.getLogger(__name__) + + +async def move_folder_into_workspace( + app: web.Application, + *, + user_id: UserID, + folder_id: FolderID, + workspace_id: WorkspaceID | None, + product_name: ProductName, +) -> None: + # 1. User needs to have delete permission on source folder + folder_db = await _folders_db.get( + app, folder_id=folder_id, product_name=product_name + ) + workspace_is_private = True + if folder_db.workspace_id: + await check_user_workspace_access( + app, + user_id=user_id, + workspace_id=folder_db.workspace_id, + product_name=product_name, + permission="delete", + ) + workspace_is_private = False + + # 2. User needs to have write permission on destination workspace + if workspace_id is not None: + await check_user_workspace_access( + app, + user_id=user_id, + workspace_id=workspace_id, + product_name=product_name, + permission="write", + ) + + # 3. User needs to have delete permission on all the projects inside source folder + ( + folder_ids, + project_ids, + ) = await _folders_db.get_all_folders_and_projects_ids_recursively( + app, + connection=None, + folder_id=folder_id, + private_workspace_user_id_or_none=user_id if workspace_is_private else None, + product_name=product_name, + ) + # NOTE: Not the most effective, can be improved + for project_id in project_ids: + await check_user_project_permission( + app, + project_id=project_id, + user_id=user_id, + product_name=product_name, + permission="delete", + ) + + # ⬆️ Here we have already guaranties that user has all the right permissions to do this operation ⬆️ + + async with transaction_context(get_asyncpg_engine(app)) as conn: + # 4. Update workspace ID on the project resource + for project_id in project_ids: + await projects_db.patch_project( + app=app, + connection=conn, + project_uuid=project_id, + new_partial_project_data={"workspace_id": workspace_id}, + ) + + # 5. BATCH update of folders with workspace_id + await _folders_db.update( + app, + connection=conn, + folders_id_or_ids=set(folder_ids), + product_name=product_name, + workspace_id=workspace_id, # <-- Updating workspace_id + user_id=user_id if workspace_id is None else None, # <-- Updating user_id + ) + + # 6. Update source folder parent folder ID with NULL (it will appear in the root directory) + await _folders_db.update( + app, + connection=conn, + folders_id_or_ids=folder_id, + product_name=product_name, + parent_folder_id=None, # <-- Updating parent folder ID + ) + + # 7. Remove all records of project to folders that are not in the folders that we are moving + # (ex. If we are moving from private workspace, the same project can be in different folders for different users) + await project_to_folders_db.delete_all_project_to_folder_by_project_ids_not_in_folder_ids( + app, + connection=conn, + project_id_or_ids=set(project_ids), + not_in_folder_ids=set(folder_ids), + ) + + # 8. Update the user id field for the remaining folders + await project_to_folders_db.update_project_to_folder( + app, + connection=conn, + folders_id_or_ids=set(folder_ids), + user_id=user_id if workspace_id is None else None, # <-- Updating user_id + ) + + # 9. Remove all project permissions, leave only the user who moved the project + user = await get_user(app, user_id=user_id) + for project_id in project_ids: + await project_groups_db.delete_all_project_groups( + app, connection=conn, project_id=project_id + ) + await project_groups_db.update_or_insert_project_group( + app, + connection=conn, + project_id=project_id, + group_id=user["primary_gid"], + read=True, + write=True, + delete=True, + ) diff --git a/services/web/server/src/simcore_service_webserver/folders/_workspaces_handlers.py b/services/web/server/src/simcore_service_webserver/folders/_workspaces_handlers.py new file mode 100644 index 00000000000..faa505ecd31 --- /dev/null +++ b/services/web/server/src/simcore_service_webserver/folders/_workspaces_handlers.py @@ -0,0 +1,38 @@ +import logging + +from aiohttp import web +from servicelib.aiohttp import status +from servicelib.aiohttp.requests_validation import parse_request_path_parameters_as + +from .._meta import api_version_prefix as VTAG +from ..login.decorators import login_required +from ..security.decorators import permission_required +from . import _workspaces_api +from ._exceptions_handlers import handle_plugin_requests_exceptions +from ._models import FoldersRequestContext, _FolderWorkspacesPathParams + +_logger = logging.getLogger(__name__) + + +routes = web.RouteTableDef() + + +@routes.post( + f"/{VTAG}/folders/{{folder_id}}/workspaces/{{workspace_id}}:move", + name="move_folder_to_workspace", +) +@login_required +@permission_required("folder.update") +@handle_plugin_requests_exceptions +async def move_folder_to_workspace(request: web.Request): + req_ctx = FoldersRequestContext.model_validate(request) + path_params = parse_request_path_parameters_as(_FolderWorkspacesPathParams, request) + + await _workspaces_api.move_folder_into_workspace( + app=request.app, + user_id=req_ctx.user_id, + folder_id=path_params.folder_id, + workspace_id=path_params.workspace_id, + product_name=req_ctx.product_name, + ) + return web.json_response(status=status.HTTP_204_NO_CONTENT) diff --git a/services/web/server/src/simcore_service_webserver/folders/plugin.py b/services/web/server/src/simcore_service_webserver/folders/plugin.py index 8ddef03ec1f..2601962e52f 100644 --- a/services/web/server/src/simcore_service_webserver/folders/plugin.py +++ b/services/web/server/src/simcore_service_webserver/folders/plugin.py @@ -7,7 +7,7 @@ from servicelib.aiohttp.application_keys import APP_SETTINGS_KEY from servicelib.aiohttp.application_setup import ModuleCategory, app_module_setup -from . import _folders_handlers, _trash_handlers +from . import _folders_handlers, _trash_handlers, _workspaces_handlers _logger = logging.getLogger(__name__) @@ -25,3 +25,4 @@ def setup_folders(app: web.Application): # routes app.router.add_routes(_folders_handlers.routes) app.router.add_routes(_trash_handlers.routes) + app.router.add_routes(_workspaces_handlers.routes) diff --git a/services/web/server/src/simcore_service_webserver/projects/_folders_db.py b/services/web/server/src/simcore_service_webserver/projects/_folders_db.py index 59ea8ebe282..e655cc17bf5 100644 --- a/services/web/server/src/simcore_service_webserver/projects/_folders_db.py +++ b/services/web/server/src/simcore_service_webserver/projects/_folders_db.py @@ -6,6 +6,7 @@ import logging from datetime import datetime +from typing import Final from aiohttp import web from models_library.folders import FolderID @@ -13,15 +14,17 @@ from models_library.users import UserID from pydantic import BaseModel from simcore_postgres_database.models.projects_to_folders import projects_to_folders +from simcore_postgres_database.utils_repos import transaction_context +from simcore_service_webserver.utils import UnSet, as_dict_exclude_unset from sqlalchemy import func, literal_column +from sqlalchemy.ext.asyncio import AsyncConnection from sqlalchemy.sql import select -from ..db.plugin import get_database_engine +from ..db.plugin import get_asyncpg_engine, get_database_engine _logger = logging.getLogger(__name__) - -_logger = logging.getLogger(__name__) +_unset: Final = UnSet() ### Models @@ -100,13 +103,79 @@ async def delete_project_to_folder( ) +### AsyncPg + + async def delete_all_project_to_folder_by_project_id( app: web.Application, + connection: AsyncConnection | None = None, + *, project_id: ProjectID, ) -> None: - async with get_database_engine(app).acquire() as conn: - await conn.execute( + async with transaction_context(get_asyncpg_engine(app), connection) as conn: + await conn.stream( projects_to_folders.delete().where( projects_to_folders.c.project_uuid == f"{project_id}" ) ) + + +async def update_project_to_folder( + app: web.Application, + connection: AsyncConnection | None = None, + *, + folders_id_or_ids: FolderID | set[FolderID], + # updatable columns + user_id: UserID | None | UnSet = _unset, +) -> None: + """ + Batch/single patch of project to folders + """ + # NOTE: exclude unset can also be done using a pydantic model and dict(exclude_unset=True) + updated = as_dict_exclude_unset( + user_id=user_id, + ) + + query = projects_to_folders.update().values(modified=func.now(), **updated) + + if isinstance(folders_id_or_ids, set): + # batch-update + query = query.where( + projects_to_folders.c.folder_id.in_(list(folders_id_or_ids)) + ) + else: + # single-update + query = query.where(projects_to_folders.c.folder_id == folders_id_or_ids) + + async with transaction_context(get_asyncpg_engine(app), connection) as conn: + await conn.stream(query) + + +async def delete_all_project_to_folder_by_project_ids_not_in_folder_ids( + app: web.Application, + connection: AsyncConnection | None = None, + *, + project_id_or_ids: ProjectID | set[ProjectID], + not_in_folder_ids: set[FolderID], +) -> None: + query = projects_to_folders.delete() + + if isinstance(project_id_or_ids, set): + # batch-delete + query = query.where( + projects_to_folders.c.project_uuid.in_( + [f"{project_id}" for project_id in project_id_or_ids] + ) + ) + else: + # single-delete + query = query.where( + projects_to_folders.c.project_uuid == f"{project_id_or_ids}" + ) + + query = query.where( + projects_to_folders.c.folder_id.not_in(not_in_folder_ids) # <-- NOT IN! + ) + + async with transaction_context(get_asyncpg_engine(app), connection) as conn: + await conn.stream(query) diff --git a/services/web/server/src/simcore_service_webserver/projects/_groups_api.py b/services/web/server/src/simcore_service_webserver/projects/_groups_api.py index 7ae45f0f90c..b32a6d15fa1 100644 --- a/services/web/server/src/simcore_service_webserver/projects/_groups_api.py +++ b/services/web/server/src/simcore_service_webserver/projects/_groups_api.py @@ -80,7 +80,8 @@ async def list_project_groups_by_user_and_project( ] = await projects_groups_db.list_project_groups(app=app, project_id=project_id) project_groups_api: list[ProjectGroupGet] = [ - ProjectGroupGet.model_validate(group.model_dump()) for group in project_groups_db + ProjectGroupGet.model_validate(group.model_dump()) + for group in project_groups_db ] return project_groups_api diff --git a/services/web/server/src/simcore_service_webserver/projects/_groups_db.py b/services/web/server/src/simcore_service_webserver/projects/_groups_db.py index 5b963b90cdb..4355f0c9d92 100644 --- a/services/web/server/src/simcore_service_webserver/projects/_groups_db.py +++ b/services/web/server/src/simcore_service_webserver/projects/_groups_db.py @@ -3,19 +3,22 @@ - Adds a layer to the postgres API with a focus on the projects comments """ + import logging from datetime import datetime from aiohttp import web from models_library.projects import ProjectID from models_library.users import GroupID -from pydantic import BaseModel, TypeAdapter +from pydantic import BaseModel, ConfigDict, TypeAdapter from simcore_postgres_database.models.project_to_groups import project_to_groups +from simcore_postgres_database.utils_repos import transaction_context from sqlalchemy import func, literal_column from sqlalchemy.dialects.postgresql import insert as pg_insert +from sqlalchemy.ext.asyncio import AsyncConnection from sqlalchemy.sql import select -from ..db.plugin import get_database_engine +from ..db.plugin import get_asyncpg_engine from .exceptions import ProjectGroupNotFoundError _logger = logging.getLogger(__name__) @@ -31,39 +34,46 @@ class ProjectGroupGetDB(BaseModel): created: datetime modified: datetime + model_config = ConfigDict(from_attributes=True) + ## DB API async def create_project_group( app: web.Application, + connection: AsyncConnection | None = None, + *, project_id: ProjectID, group_id: GroupID, - *, read: bool, write: bool, delete: bool, ) -> ProjectGroupGetDB: - async with get_database_engine(app).acquire() as conn: - result = await conn.execute( - project_to_groups.insert() - .values( - project_uuid=f"{project_id}", - gid=group_id, - read=read, - write=write, - delete=delete, - created=func.now(), - modified=func.now(), - ) - .returning(literal_column("*")) + query = ( + project_to_groups.insert() + .values( + project_uuid=f"{project_id}", + gid=group_id, + read=read, + write=write, + delete=delete, + created=func.now(), + modified=func.now(), ) + .returning(literal_column("*")) + ) + + async with transaction_context(get_asyncpg_engine(app), connection) as conn: + result = await conn.stream(query) row = await result.first() return ProjectGroupGetDB.model_validate(row) async def list_project_groups( app: web.Application, + connection: AsyncConnection | None = None, + *, project_id: ProjectID, ) -> list[ProjectGroupGetDB]: stmt = ( @@ -79,14 +89,16 @@ async def list_project_groups( .where(project_to_groups.c.project_uuid == f"{project_id}") ) - async with get_database_engine(app).acquire() as conn: - result = await conn.execute(stmt) - rows = await result.fetchall() or [] + async with transaction_context(get_asyncpg_engine(app), connection) as conn: + result = await conn.stream(stmt) + rows = await result.all() or [] return TypeAdapter(list[ProjectGroupGetDB]).validate_python(rows) async def get_project_group( app: web.Application, + connection: AsyncConnection | None = None, + *, project_id: ProjectID, group_id: GroupID, ) -> ProjectGroupGetDB: @@ -106,8 +118,8 @@ async def get_project_group( ) ) - async with get_database_engine(app).acquire() as conn: - result = await conn.execute(stmt) + async with transaction_context(get_asyncpg_engine(app), connection) as conn: + result = await conn.stream(stmt) row = await result.first() if row is None: raise ProjectGroupNotFoundError( @@ -118,27 +130,31 @@ async def get_project_group( async def replace_project_group( app: web.Application, + connection: AsyncConnection | None = None, + *, project_id: ProjectID, group_id: GroupID, - *, read: bool, write: bool, delete: bool, ) -> ProjectGroupGetDB: - async with get_database_engine(app).acquire() as conn: - result = await conn.execute( - project_to_groups.update() - .values( - read=read, - write=write, - delete=delete, - ) - .where( - (project_to_groups.c.project_uuid == f"{project_id}") - & (project_to_groups.c.gid == group_id) - ) - .returning(literal_column("*")) + + query = ( + project_to_groups.update() + .values( + read=read, + write=write, + delete=delete, + ) + .where( + (project_to_groups.c.project_uuid == f"{project_id}") + & (project_to_groups.c.gid == group_id) ) + .returning(literal_column("*")) + ) + + async with transaction_context(get_asyncpg_engine(app), connection) as conn: + result = await conn.stream(query) row = await result.first() if row is None: raise ProjectGroupNotFoundError( @@ -149,14 +165,15 @@ async def replace_project_group( async def update_or_insert_project_group( app: web.Application, + connection: AsyncConnection | None = None, + *, project_id: ProjectID, group_id: GroupID, - *, read: bool, write: bool, delete: bool, ) -> None: - async with get_database_engine(app).acquire() as conn: + async with transaction_context(get_asyncpg_engine(app), connection) as conn: insert_stmt = pg_insert(project_to_groups).values( project_uuid=f"{project_id}", gid=group_id, @@ -175,16 +192,18 @@ async def update_or_insert_project_group( "modified": func.now(), }, ) - await conn.execute(on_update_stmt) + await conn.stream(on_update_stmt) async def delete_project_group( app: web.Application, + connection: AsyncConnection | None = None, + *, project_id: ProjectID, group_id: GroupID, ) -> None: - async with get_database_engine(app).acquire() as conn: - await conn.execute( + async with transaction_context(get_asyncpg_engine(app), connection) as conn: + await conn.stream( project_to_groups.delete().where( (project_to_groups.c.project_uuid == f"{project_id}") & (project_to_groups.c.gid == group_id) @@ -194,10 +213,12 @@ async def delete_project_group( async def delete_all_project_groups( app: web.Application, + connection: AsyncConnection | None = None, + *, project_id: ProjectID, ) -> None: - async with get_database_engine(app).acquire() as conn: - await conn.execute( + async with transaction_context(get_asyncpg_engine(app), connection) as conn: + await conn.stream( project_to_groups.delete().where( project_to_groups.c.project_uuid == f"{project_id}" ) diff --git a/services/web/server/src/simcore_service_webserver/projects/_projects_db.py b/services/web/server/src/simcore_service_webserver/projects/_projects_db.py new file mode 100644 index 00000000000..3c94e9e7cdc --- /dev/null +++ b/services/web/server/src/simcore_service_webserver/projects/_projects_db.py @@ -0,0 +1,59 @@ +import logging + +import sqlalchemy as sa +from aiohttp import web +from models_library.projects import ProjectID +from simcore_postgres_database.utils_repos import transaction_context +from simcore_postgres_database.webserver_models import projects +from sqlalchemy.ext.asyncio import AsyncConnection + +from ..db.plugin import get_asyncpg_engine +from .exceptions import ProjectNotFoundError +from .models import ProjectDB + +_logger = logging.getLogger(__name__) + + +# NOTE: MD: I intentionally didn't include the workbench. There is a special interface +# for the workbench, and at some point, this column should be removed from the table. +# The same holds true for access_rights/ui/classifiers/quality, but we have decided to proceed step by step. +_SELECTION_PROJECT_DB_ARGS = [ # noqa: RUF012 + projects.c.id, + projects.c.type, + projects.c.uuid, + projects.c.name, + projects.c.description, + projects.c.thumbnail, + projects.c.prj_owner, + projects.c.creation_date, + projects.c.last_change_date, + projects.c.ui, + projects.c.classifiers, + projects.c.dev, + projects.c.quality, + projects.c.published, + projects.c.hidden, + projects.c.workspace_id, + projects.c.trashed_at, +] + + +async def patch_project( + app: web.Application, + connection: AsyncConnection | None = None, + *, + project_uuid: ProjectID, + new_partial_project_data: dict, +) -> ProjectDB: + + async with transaction_context(get_asyncpg_engine(app), connection) as conn: + result = await conn.stream( + projects.update() + .values(last_change_date=sa.func.now(), **new_partial_project_data) + .where(projects.c.uuid == f"{project_uuid}") + .returning(*_SELECTION_PROJECT_DB_ARGS) + ) + row = await result.first() + if row is None: + raise ProjectNotFoundError(project_uuid=project_uuid) + return ProjectDB.model_validate(row) diff --git a/services/web/server/src/simcore_service_webserver/projects/_workspaces_api.py b/services/web/server/src/simcore_service_webserver/projects/_workspaces_api.py index 105decdd3ac..1462168fa52 100644 --- a/services/web/server/src/simcore_service_webserver/projects/_workspaces_api.py +++ b/services/web/server/src/simcore_service_webserver/projects/_workspaces_api.py @@ -5,13 +5,15 @@ from models_library.projects import ProjectID from models_library.users import UserID from models_library.workspaces import WorkspaceID +from simcore_postgres_database.utils_repos import transaction_context +from ..db.plugin import get_asyncpg_engine from ..projects._access_rights_api import get_user_project_access_rights from ..users.api import get_user from ..workspaces.api import check_user_workspace_access from . import _folders_db as project_to_folders_db from . import _groups_db as project_groups_db -from .db import APP_PROJECT_DBAPI, ProjectDBAPI +from . import _projects_db from .exceptions import ProjectInvalidRightsError _logger = logging.getLogger(__name__) @@ -25,8 +27,6 @@ async def move_project_into_workspace( workspace_id: WorkspaceID | None, product_name: ProductName, ) -> None: - project_api: ProjectDBAPI = app[APP_PROJECT_DBAPI] - # 1. User needs to have delete permission on project project_access_rights = await get_user_project_access_rights( app, project_id=project_id, user_id=user_id, product_name=product_name @@ -44,26 +44,33 @@ async def move_project_into_workspace( permission="write", ) - # 3. Delete project to folders (for everybody) - await project_to_folders_db.delete_all_project_to_folder_by_project_id( - app, - project_id=project_id, - ) + async with transaction_context(get_asyncpg_engine(app)) as conn: + # 3. Delete project to folders (for everybody) + await project_to_folders_db.delete_all_project_to_folder_by_project_id( + app, + connection=conn, + project_id=project_id, + ) - # 4. Update workspace ID on the project resource - await project_api.patch_project( - project_uuid=project_id, - new_partial_project_data={"workspace_id": workspace_id}, - ) + # 4. Update workspace ID on the project resource + await _projects_db.patch_project( + app=app, + connection=conn, + project_uuid=project_id, + new_partial_project_data={"workspace_id": workspace_id}, + ) - # 5. Remove all project permissions, leave only the user who moved the project - user = await get_user(app, user_id=user_id) - await project_groups_db.delete_all_project_groups(app, project_id=project_id) - await project_groups_db.update_or_insert_project_group( - app, - project_id=project_id, - group_id=user["primary_gid"], - read=True, - write=True, - delete=True, - ) + # 5. Remove all project permissions, leave only the user who moved the project + user = await get_user(app, user_id=user_id) + await project_groups_db.delete_all_project_groups( + app, connection=conn, project_id=project_id + ) + await project_groups_db.update_or_insert_project_group( + app, + connection=conn, + project_id=project_id, + group_id=user["primary_gid"], + read=True, + write=True, + delete=True, + ) diff --git a/services/web/server/src/simcore_service_webserver/projects/_workspaces_handlers.py b/services/web/server/src/simcore_service_webserver/projects/_workspaces_handlers.py index ff881b418af..ef3d20b3c5a 100644 --- a/services/web/server/src/simcore_service_webserver/projects/_workspaces_handlers.py +++ b/services/web/server/src/simcore_service_webserver/projects/_workspaces_handlers.py @@ -51,19 +51,21 @@ async def wrapper(request: web.Request) -> web.StreamResponse: class _ProjectWorkspacesPathParams(BaseModel): project_id: ProjectID - workspace_id: Annotated[WorkspaceID | None, BeforeValidator(null_or_none_str_to_none_validator)] = Field(default=None) + workspace_id: Annotated[ + WorkspaceID | None, BeforeValidator(null_or_none_str_to_none_validator) + ] = Field(default=None) model_config = ConfigDict(extra="forbid") -@routes.put( - f"/{VTAG}/projects/{{project_id}}/workspaces/{{workspace_id}}", - name="replace_project_workspace", +@routes.post( + f"/{VTAG}/projects/{{project_id}}/workspaces/{{workspace_id}}:move", + name="move_project_to_workspace", ) @login_required @permission_required("project.workspaces.*") @_handle_projects_workspaces_exceptions -async def replace_project_workspace(request: web.Request): +async def move_project_to_workspace(request: web.Request): req_ctx = RequestContext.model_validate(request) path_params = parse_request_path_parameters_as( _ProjectWorkspacesPathParams, request diff --git a/services/web/server/src/simcore_service_webserver/projects/db.py b/services/web/server/src/simcore_service_webserver/projects/db.py index cdaed691e71..b0fc7c5551a 100644 --- a/services/web/server/src/simcore_service_webserver/projects/db.py +++ b/services/web/server/src/simcore_service_webserver/projects/db.py @@ -85,6 +85,7 @@ patch_workbench, update_workbench, ) +from ._projects_db import _SELECTION_PROJECT_DB_ARGS from .exceptions import ( ProjectDeleteError, ProjectInvalidRightsError, @@ -676,33 +677,10 @@ async def get_project( project_type, ) - # NOTE: MD: I intentionally didn't include the workbench. There is a special interface - # for the workbench, and at some point, this column should be removed from the table. - # The same holds true for access_rights/ui/classifiers/quality, but we have decided to proceed step by step. - _SELECTION_PROJECT_DB_ARGS = [ # noqa: RUF012 - projects.c.id, - projects.c.type, - projects.c.uuid, - projects.c.name, - projects.c.description, - projects.c.thumbnail, - projects.c.prj_owner, - projects.c.creation_date, - projects.c.last_change_date, - projects.c.ui, - projects.c.classifiers, - projects.c.dev, - projects.c.quality, - projects.c.published, - projects.c.hidden, - projects.c.workspace_id, - projects.c.trashed_at, - ] - async def get_project_db(self, project_uuid: ProjectID) -> ProjectDB: async with self.engine.acquire() as conn: result = await conn.execute( - sa.select(*self._SELECTION_PROJECT_DB_ARGS).where( + sa.select(*_SELECTION_PROJECT_DB_ARGS).where( projects.c.uuid == f"{project_uuid}" ) ) @@ -716,9 +694,7 @@ async def get_user_specific_project_data_db( ) -> UserSpecificProjectDataDB: async with self.engine.acquire() as conn: result = await conn.execute( - sa.select( - *self._SELECTION_PROJECT_DB_ARGS, projects_to_folders.c.folder_id - ) + sa.select(*_SELECTION_PROJECT_DB_ARGS, projects_to_folders.c.folder_id) .select_from( projects.join( projects_to_folders, @@ -865,21 +841,6 @@ async def replace_project( msg = "linter unhappy without this" raise RuntimeError(msg) - async def patch_project( - self, project_uuid: ProjectID, new_partial_project_data: dict - ) -> ProjectDB: - async with self.engine.acquire() as conn: - result = await conn.execute( - projects.update() - .values(last_change_date=sa.func.now(), **new_partial_project_data) - .where(projects.c.uuid == f"{project_uuid}") - .returning(*self._SELECTION_PROJECT_DB_ARGS) - ) - row = await result.fetchone() - if row is None: - raise ProjectNotFoundError(project_uuid=project_uuid) - return ProjectDB.model_validate(row) - async def get_project_product(self, project_uuid: ProjectID) -> ProductName: async with self.engine.acquire() as conn: result = await conn.execute( diff --git a/services/web/server/src/simcore_service_webserver/projects/projects_api.py b/services/web/server/src/simcore_service_webserver/projects/projects_api.py index 6876c63718d..cf9445985c6 100644 --- a/services/web/server/src/simcore_service_webserver/projects/projects_api.py +++ b/services/web/server/src/simcore_service_webserver/projects/projects_api.py @@ -120,7 +120,7 @@ from ..wallets import api as wallets_api from ..wallets.errors import WalletNotEnoughCreditsError from ..workspaces import _workspaces_db as workspaces_db -from . import _crud_api_delete, _nodes_api +from . import _crud_api_delete, _nodes_api, _projects_db from ._access_rights_api import ( check_user_project_permission, has_user_project_access_rights, @@ -253,8 +253,8 @@ async def patch_project( project_patch: ProjectPatch | ProjectPatchExtended, product_name: ProductName, ): - _project_patch_exclude_unset: dict[str, Any] = jsonable_encoder( - project_patch, exclude_unset=True, by_alias=False + _project_patch_exclude_unset = project_patch.model_dump( + exclude_unset=True, by_alias=False ) db: ProjectDBAPI = app[APP_PROJECT_DBAPI] @@ -289,7 +289,8 @@ async def patch_project( raise ProjectOwnerNotFoundInTheProjectAccessRightsError # 4. Patch the project - await db.patch_project( + await _projects_db.patch_project( + app=app, project_uuid=project_uuid, new_partial_project_data=_project_patch_exclude_unset, ) diff --git a/services/web/server/src/simcore_service_webserver/utils.py b/services/web/server/src/simcore_service_webserver/utils.py index c6eade6345d..1f73ac06e0a 100644 --- a/services/web/server/src/simcore_service_webserver/utils.py +++ b/services/web/server/src/simcore_service_webserver/utils.py @@ -194,3 +194,17 @@ def compute_sha1_on_small_dataset(d: Any) -> SHA1Str: # SEE options in https://github.com/ijl/orjson#option data_bytes = orjson.dumps(d, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_SORT_KEYS) return SHA1Str(hashlib.sha1(data_bytes).hexdigest()) # nosec # NOSONAR + + +# ----------------------------------------------- +# +# UNSET +# + + +class UnSet: + ... + + +def as_dict_exclude_unset(**params) -> dict[str, Any]: + return {k: v for k, v in params.items() if not isinstance(v, UnSet)} diff --git a/services/web/server/tests/conftest.py b/services/web/server/tests/conftest.py index 7085050f331..f215368ad1d 100644 --- a/services/web/server/tests/conftest.py +++ b/services/web/server/tests/conftest.py @@ -358,7 +358,7 @@ async def _creator( for group_id, permissions in _access_rights.items(): await update_or_insert_project_group( client.app, - data["uuid"], + project_id=data["uuid"], group_id=int(group_id), read=permissions["read"], write=permissions["write"], diff --git a/services/web/server/tests/integration/01/test_garbage_collection.py b/services/web/server/tests/integration/01/test_garbage_collection.py index c52977d7115..d3aee60764d 100644 --- a/services/web/server/tests/integration/01/test_garbage_collection.py +++ b/services/web/server/tests/integration/01/test_garbage_collection.py @@ -237,7 +237,7 @@ async def new_project( for group_id, permissions in access_rights.items(): await update_or_insert_project_group( client.app, - project["uuid"], + project_id=project["uuid"], group_id=int(group_id), read=permissions["read"], write=permissions["write"], diff --git a/services/web/server/tests/unit/isolated/test_application_settings.py b/services/web/server/tests/unit/isolated/test_application_settings.py index afedd1f0149..da7fbf2f34f 100644 --- a/services/web/server/tests/unit/isolated/test_application_settings.py +++ b/services/web/server/tests/unit/isolated/test_application_settings.py @@ -61,7 +61,7 @@ def test_settings_to_client_statics(app_settings: ApplicationSettings): # special alias assert statics["stackName"] == "master-simcore" - assert statics["pluginsDisabled"] == ["WEBSERVER_CLUSTERS"] + assert statics["pluginsDisabled"] == [] def test_settings_to_client_statics_plugins( @@ -100,13 +100,13 @@ def test_settings_to_client_statics_plugins( assert statics["vcsReleaseTag"] assert TypeAdapter(HttpUrl).validate_python(statics["vcsReleaseUrl"]) - assert set(statics["pluginsDisabled"]) == (disable_plugins | {"WEBSERVER_CLUSTERS"}) + assert set(statics["pluginsDisabled"]) == (disable_plugins) @pytest.mark.parametrize("is_dev_feature_enabled", [True, False]) @pytest.mark.parametrize( "plugin_name", - ["WEBSERVER_META_MODELING", "WEBSERVER_VERSION_CONTROL"] + ["WEBSERVER_META_MODELING", "WEBSERVER_VERSION_CONTROL"], # NOTE: this is the list in _enable_only_if_dev_features_allowed ) def test_disabled_plugins_settings_to_client_statics( diff --git a/services/web/server/tests/unit/with_dbs/01/clusters/conftest.py b/services/web/server/tests/unit/with_dbs/01/clusters/conftest.py deleted file mode 100644 index afc0a5aada7..00000000000 --- a/services/web/server/tests/unit/with_dbs/01/clusters/conftest.py +++ /dev/null @@ -1,10 +0,0 @@ -import pytest -from pytest_simcore.helpers.typing_env import EnvVarsDict - - -@pytest.fixture -def enable_webserver_clusters_feature( - app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch -) -> EnvVarsDict: - monkeypatch.setenv("WEBSERVER_CLUSTERS", "1") - return app_environment | {"WEBSERVER_CLUSTERS": "1"} diff --git a/services/web/server/tests/unit/with_dbs/01/clusters/test_clusters_handlers.py b/services/web/server/tests/unit/with_dbs/01/clusters/test_clusters_handlers.py deleted file mode 100644 index e75aee0866f..00000000000 --- a/services/web/server/tests/unit/with_dbs/01/clusters/test_clusters_handlers.py +++ /dev/null @@ -1,531 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name -# pylint:disable=no-value-for-parameter -# pylint:disable=too-many-arguments -# pylint:disable=too-many-statements - - -import json -import random -from http import HTTPStatus -from typing import Any - -import hypothesis -import hypothesis.provisional -import pytest -from aiohttp.test_utils import TestClient -from faker import Faker -from hypothesis import strategies as st -from models_library.api_schemas_webserver.clusters import ( - ClusterCreate, - ClusterPatch, - ClusterPing, -) -from models_library.clusters import ( - CLUSTER_ADMIN_RIGHTS, - Cluster, - ClusterTypeInModel, - SimpleAuthentication, -) -from pydantic import HttpUrl, TypeAdapter -from pydantic_core import Url -from pytest_mock import MockerFixture -from pytest_simcore.helpers.assert_checks import assert_status -from pytest_simcore.helpers.webserver_parametrizations import ( # nopycln: import - ExpectedResponse, - standard_role_response, -) -from servicelib.aiohttp import status -from simcore_postgres_database.models.clusters import ClusterType -from simcore_postgres_database.models.users import UserRole -from simcore_service_webserver.director_v2.exceptions import ( - ClusterAccessForbidden, - ClusterNotFoundError, - ClusterPingError, - DirectorServiceError, -) - - -@st.composite -def http_url_strategy(draw): - return TypeAdapter(HttpUrl).validate_python(draw(hypothesis.provisional.urls())) - - -@st.composite -def cluster_patch_strategy(draw): - return ClusterPatch( - name=draw(st.text()), - description=draw(st.text()), - owner=draw(st.integers(min_value=1)), - type=draw(st.sampled_from(ClusterTypeInModel)), - thumbnail=draw(http_url_strategy()), - endpoint=draw(http_url_strategy()), - authentication=None, - accessRights={}, - ) - - -st.register_type_strategy(ClusterPatch, cluster_patch_strategy()) -st.register_type_strategy(Url, http_url_strategy()) - - -@pytest.fixture -def mocked_director_v2_api(mocker: MockerFixture): - mocked_director_v2_api = mocker.patch( - "simcore_service_webserver.clusters._handlers.director_v2_api", autospec=True - ) - - mocked_director_v2_api.create_cluster.return_value = random.choice( - Cluster.model_config["json_schema_extra"]["examples"] - ) - mocked_director_v2_api.list_clusters.return_value = [] - mocked_director_v2_api.get_cluster.return_value = random.choice( - Cluster.model_config["json_schema_extra"]["examples"] - ) - mocked_director_v2_api.get_cluster_details.return_value = { - "scheduler": {"status": "running"}, - "dashboardLink": "https://link.to.dashboard", - } - mocked_director_v2_api.update_cluster.return_value = random.choice( - Cluster.model_config["json_schema_extra"]["examples"] - ) - mocked_director_v2_api.delete_cluster.return_value = None - mocked_director_v2_api.ping_cluster.return_value = None - mocked_director_v2_api.ping_specific_cluster.return_value = None - - -@pytest.fixture -def mocked_director_v2_with_error( - mocker: MockerFixture, faker: Faker, director_v2_error: type[DirectorServiceError] -): - mocked_director_v2_api = mocker.patch( - "simcore_service_webserver.clusters._handlers.director_v2_api", autospec=True - ) - error = director_v2_error( - status=status.HTTP_503_SERVICE_UNAVAILABLE, - reason="no director-v2", - url=faker.uri(), - cluster_id=faker.pyint(min_value=1), - endpoint=faker.uri(), - ) - mocked_director_v2_api.create_cluster.side_effect = error - mocked_director_v2_api.list_clusters.side_effect = error - mocked_director_v2_api.get_cluster.side_effect = error - mocked_director_v2_api.get_cluster_details.side_effect = error - mocked_director_v2_api.update_cluster.side_effect = error - mocked_director_v2_api.delete_cluster.side_effect = error - mocked_director_v2_api.ping_cluster.side_effect = error - mocked_director_v2_api.ping_specific_cluster.side_effect = error - - -@pytest.fixture() -def cluster_create(faker: Faker) -> ClusterCreate: - instance = ClusterCreate( - name=faker.name(), - endpoint=faker.uri(), - type=random.choice(list(ClusterType)), - owner=faker.pyint(), - authentication=SimpleAuthentication( - username=faker.user_name(), password=faker.password() - ), - ) - assert instance - return instance - - -@pytest.mark.parametrize(*standard_role_response(), ids=str) -async def test_create_cluster( - enable_webserver_clusters_feature: None, - mocked_director_v2_api, - client: TestClient, - logged_user: dict[str, Any], - faker: Faker, - cluster_create: ClusterCreate, - user_role: UserRole, - expected: ExpectedResponse, -): - cluster_create.access_rights[logged_user["id"]] = CLUSTER_ADMIN_RIGHTS - print(f"--> creating {cluster_create=!r}") - # check we can create a cluster - assert client.app - url = client.app.router["create_cluster"].url_for() - rsp = await client.post( - f"{url}", - json=json.loads(cluster_create.model_dump_json(by_alias=True)), - ) - data, error = await assert_status( - rsp, - ( - expected.forbidden if user_role == UserRole.USER else expected.created - ), # only accessible for TESTER - ) - if error: - # we are done here - return - - created_cluster = Cluster.model_validate(data) - assert created_cluster - - -@pytest.mark.parametrize(*standard_role_response(), ids=str) -async def test_list_clusters( - enable_webserver_clusters_feature: None, - mocked_director_v2_api, - client: TestClient, - logged_user: dict[str, Any], - expected: ExpectedResponse, -): - # check empty clusters - assert client.app - url = client.app.router["list_clusters"].url_for() - rsp = await client.get(f"{url}") - data, error = await assert_status(rsp, expected.ok) - if not error: - assert isinstance(data, list) - - -@pytest.mark.parametrize(*standard_role_response(), ids=str) -async def test_get_cluster( - enable_webserver_clusters_feature: None, - mocked_director_v2_api, - client: TestClient, - logged_user: dict[str, Any], - user_role: UserRole, - expected: ExpectedResponse, -): - # check not found - assert client.app - url = client.app.router["get_cluster"].url_for(cluster_id=f"{25}") - rsp = await client.get(f"{url}") - data, error = await assert_status(rsp, expected.ok) - if not error: - assert isinstance(data, dict) - - -@pytest.mark.parametrize(*standard_role_response(), ids=str) -async def test_get_cluster_details( - enable_webserver_clusters_feature: None, - mocked_director_v2_api, - client: TestClient, - logged_user: dict[str, Any], - user_role: UserRole, - expected: ExpectedResponse, -): - # check not found - assert client.app - url = client.app.router["get_cluster_details"].url_for(cluster_id=f"{25}") - rsp = await client.get(f"{url}") - data, error = await assert_status(rsp, expected.ok) - if not error: - assert isinstance(data, dict) - - -@pytest.mark.parametrize(*standard_role_response(), ids=str) -@hypothesis.given(cluster_patch=st.from_type(ClusterPatch)) -@hypothesis.settings( - # hypothesis does not play well with fixtures, hence the warning - # it will create several tests but not replay the fixtures - suppress_health_check=[ - hypothesis.HealthCheck.function_scoped_fixture, - hypothesis.HealthCheck.too_slow, - ], - deadline=None, -) -async def test_update_cluster( - enable_webserver_clusters_feature: None, - mocked_director_v2_api, - client: TestClient, - logged_user: dict[str, Any], - cluster_patch: ClusterPatch, - expected: ExpectedResponse, -): - print(f"--> updating {cluster_patch=!r}") - _PATCH_EXPORT = {"by_alias": True, "exclude_unset": True, "exclude_none": True} - assert client.app - url = client.app.router["update_cluster"].url_for(cluster_id=f"{25}") - rsp = await client.patch( - f"{url}", - json=json.loads(cluster_patch.model_dump_json(**_PATCH_EXPORT)), - ) - data, error = await assert_status(rsp, expected.ok) - if not error: - assert isinstance(data, dict) - - -@pytest.mark.parametrize(*standard_role_response(), ids=str) -async def test_delete_cluster( - enable_webserver_clusters_feature: None, - mocked_director_v2_api, - client: TestClient, - logged_user: dict[str, Any], - expected: ExpectedResponse, -): - assert client.app - url = client.app.router["delete_cluster"].url_for(cluster_id=f"{25}") - rsp = await client.delete(f"{url}") - data, error = await assert_status(rsp, expected.no_content) - if not error: - assert data is None - - -@pytest.mark.parametrize(*standard_role_response(), ids=str) -@hypothesis.given(cluster_ping=st.from_type(ClusterPing)) -@hypothesis.settings( - # hypothesis does not play well with fixtures, hence the warning - # it will create several tests but not replay the fixtures - suppress_health_check=[ - hypothesis.HealthCheck.function_scoped_fixture, - hypothesis.HealthCheck.too_slow, - ], - deadline=None, -) -async def test_ping_cluster( - enable_webserver_clusters_feature: None, - mocked_director_v2_api, - client: TestClient, - logged_user: dict[str, Any], - expected: ExpectedResponse, - cluster_ping: ClusterPing, -): - print(f"--> pinging {cluster_ping=!r}") - assert client.app - url = client.app.router["ping_cluster"].url_for() - rsp = await client.post( - f"{url}", json=json.loads(cluster_ping.model_dump_json(by_alias=True)) - ) - data, error = await assert_status(rsp, expected.no_content) - if not error: - assert data is None - - -@pytest.mark.parametrize(*standard_role_response(), ids=str) -async def test_ping_specific_cluster( - enable_webserver_clusters_feature: None, - mocked_director_v2_api, - client: TestClient, - logged_user: dict[str, Any], - faker: Faker, - expected: ExpectedResponse, -): - assert client.app - url = client.app.router["ping_cluster_cluster_id"].url_for( - cluster_id=f"{faker.pyint(min_value=1)}" - ) - rsp = await client.post(f"{url}") - data, error = await assert_status(rsp, expected.no_content) - if not error: - assert data is None - - -@pytest.mark.parametrize("user_role", [UserRole.TESTER], ids=str) -@pytest.mark.parametrize( - "director_v2_error, expected_http_error", - [ - (DirectorServiceError, status.HTTP_503_SERVICE_UNAVAILABLE), - ], -) -async def test_create_cluster_with_error( - enable_webserver_clusters_feature: None, - mocked_director_v2_with_error, - client: TestClient, - logged_user: dict[str, Any], - faker: Faker, - cluster_create: ClusterCreate, - expected_http_error: HTTPStatus, -): - cluster_create.access_rights[logged_user["id"]] = CLUSTER_ADMIN_RIGHTS - print(f"--> creating {cluster_create=!r}") - # check we can create a cluster - assert client.app - url = client.app.router["create_cluster"].url_for() - rsp = await client.post( - f"{url}", - json=json.loads(cluster_create.model_dump_json(by_alias=True)), - ) - data, error = await assert_status(rsp, expected_http_error) - assert not data - assert error - - -@pytest.mark.parametrize("user_role", [UserRole.TESTER], ids=str) -@pytest.mark.parametrize( - "director_v2_error, expected_http_error", - [ - (DirectorServiceError, status.HTTP_503_SERVICE_UNAVAILABLE), - ], -) -async def test_list_clusters_with_error( - enable_webserver_clusters_feature: None, - mocked_director_v2_with_error, - client: TestClient, - logged_user: dict[str, Any], - expected_http_error: HTTPStatus, -): - # check empty clusters - assert client.app - url = client.app.router["list_clusters"].url_for() - rsp = await client.get(f"{url}") - data, error = await assert_status(rsp, expected_http_error) - assert not data - assert error - - -@pytest.mark.parametrize("user_role", [UserRole.TESTER], ids=str) -@pytest.mark.parametrize( - "director_v2_error, expected_http_error", - [ - (DirectorServiceError, status.HTTP_503_SERVICE_UNAVAILABLE), - (ClusterNotFoundError, status.HTTP_404_NOT_FOUND), - (ClusterAccessForbidden, status.HTTP_403_FORBIDDEN), - ], -) -async def test_get_cluster_with_error( - enable_webserver_clusters_feature: None, - mocked_director_v2_with_error, - client: TestClient, - logged_user: dict[str, Any], - expected_http_error: HTTPStatus, -): - # check empty clusters - assert client.app - url = client.app.router["get_cluster"].url_for(cluster_id=f"{25}") - rsp = await client.get(f"{url}") - data, error = await assert_status(rsp, expected_http_error) - assert not data - assert error - - -@pytest.mark.parametrize("user_role", [UserRole.TESTER], ids=str) -@pytest.mark.parametrize( - "director_v2_error, expected_http_error", - [ - (DirectorServiceError, status.HTTP_503_SERVICE_UNAVAILABLE), - (ClusterNotFoundError, status.HTTP_404_NOT_FOUND), - (ClusterAccessForbidden, status.HTTP_403_FORBIDDEN), - ], -) -async def test_get_cluster_details_with_error( - enable_webserver_clusters_feature: None, - mocked_director_v2_with_error, - client: TestClient, - logged_user: dict[str, Any], - expected_http_error: HTTPStatus, -): - # check not found - assert client.app - url = client.app.router["get_cluster_details"].url_for(cluster_id=f"{25}") - rsp = await client.get(f"{url}") - data, error = await assert_status(rsp, expected_http_error) - assert not data - assert error - - -@pytest.mark.parametrize("user_role", [UserRole.TESTER], ids=str) -@pytest.mark.parametrize( - "director_v2_error, expected_http_error", - [ - (DirectorServiceError, status.HTTP_503_SERVICE_UNAVAILABLE), - (ClusterNotFoundError, status.HTTP_404_NOT_FOUND), - (ClusterAccessForbidden, status.HTTP_403_FORBIDDEN), - ], -) -async def test_update_cluster_with_error( - enable_webserver_clusters_feature: None, - mocked_director_v2_with_error, - client: TestClient, - logged_user: dict[str, Any], - expected_http_error: HTTPStatus, -): - _PATCH_EXPORT = {"by_alias": True, "exclude_unset": True, "exclude_none": True} - assert client.app - url = client.app.router["update_cluster"].url_for(cluster_id=f"{25}") - rsp = await client.patch( - f"{url}", - json=json.loads(ClusterPatch().model_dump_json(**_PATCH_EXPORT)), - ) - data, error = await assert_status(rsp, expected_http_error) - assert not data - assert error - - -@pytest.mark.parametrize("user_role", [UserRole.TESTER], ids=str) -@pytest.mark.parametrize( - "director_v2_error, expected_http_error", - [ - (DirectorServiceError, status.HTTP_503_SERVICE_UNAVAILABLE), - (ClusterNotFoundError, status.HTTP_404_NOT_FOUND), - (ClusterAccessForbidden, status.HTTP_403_FORBIDDEN), - ], -) -async def test_delete_cluster_with_error( - enable_webserver_clusters_feature: None, - mocked_director_v2_with_error, - client: TestClient, - logged_user: dict[str, Any], - expected_http_error: HTTPStatus, -): - assert client.app - url = client.app.router["delete_cluster"].url_for(cluster_id=f"{25}") - rsp = await client.delete(f"{url}") - data, error = await assert_status(rsp, expected_http_error) - assert not data - assert error - - -@pytest.mark.parametrize("user_role", [UserRole.TESTER], ids=str) -@pytest.mark.parametrize( - "director_v2_error, expected_http_error", - [ - (DirectorServiceError, status.HTTP_503_SERVICE_UNAVAILABLE), - (ClusterPingError, status.HTTP_422_UNPROCESSABLE_ENTITY), - ], -) -async def test_ping_cluster_with_error( - enable_webserver_clusters_feature: None, - mocked_director_v2_with_error, - client: TestClient, - logged_user: dict[str, Any], - faker: Faker, - expected_http_error, -): - cluster_ping = ClusterPing( - endpoint=faker.uri(), - authentication=SimpleAuthentication( - username=faker.user_name(), password=faker.password() - ), - ) - assert client.app - url = client.app.router["ping_cluster"].url_for() - rsp = await client.post( - f"{url}", json=json.loads(cluster_ping.model_dump_json(by_alias=True)) - ) - data, error = await assert_status(rsp, expected_http_error) - assert not data - assert error - - -@pytest.mark.parametrize("user_role", [UserRole.TESTER], ids=str) -@pytest.mark.parametrize( - "director_v2_error, expected_http_error", - [ - (DirectorServiceError, status.HTTP_503_SERVICE_UNAVAILABLE), - (ClusterPingError, status.HTTP_422_UNPROCESSABLE_ENTITY), - ], -) -async def test_ping_specific_cluster_with_error( - enable_webserver_clusters_feature: None, - mocked_director_v2_with_error, - client: TestClient, - logged_user: dict[str, Any], - faker: Faker, - expected_http_error, -): - assert client.app - url = client.app.router["ping_cluster_cluster_id"].url_for( - cluster_id=f"{faker.pyint(min_value=1)}" - ) - rsp = await client.post(f"{url}") - data, error = await assert_status(rsp, expected_http_error) - assert not data - assert error diff --git a/services/web/server/tests/unit/with_dbs/01/clusters/test_clusters_plugin_setup.py b/services/web/server/tests/unit/with_dbs/01/clusters/test_clusters_plugin_setup.py deleted file mode 100644 index c9731dcc0bd..00000000000 --- a/services/web/server/tests/unit/with_dbs/01/clusters/test_clusters_plugin_setup.py +++ /dev/null @@ -1,26 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - -from aiohttp.test_utils import TestClient -from servicelib.aiohttp.application_keys import APP_SETTINGS_KEY -from simcore_service_webserver.application_settings import ApplicationSettings - - -def test_module_setup_defaults_to_false(client: TestClient): - assert client.app - settings: ApplicationSettings = client.app[APP_SETTINGS_KEY] - - assert settings.WEBSERVER_CLUSTERS - assert "list_clusters" in client.app.router - - -def test_module_setup_can_be_properly_enabled( - enable_webserver_clusters_feature: None, - client: TestClient, -): - assert client.app - settings: ApplicationSettings = client.app[APP_SETTINGS_KEY] - - assert settings.WEBSERVER_CLUSTERS - assert "list_clusters" in client.app.router diff --git a/services/web/server/tests/unit/with_dbs/01/test_director_v2.py b/services/web/server/tests/unit/with_dbs/01/test_director_v2.py index 93956089fb9..f18bc9e1754 100644 --- a/services/web/server/tests/unit/with_dbs/01/test_director_v2.py +++ b/services/web/server/tests/unit/with_dbs/01/test_director_v2.py @@ -6,14 +6,6 @@ import pytest from aioresponses import aioresponses from faker import Faker -from hypothesis import HealthCheck, given, settings -from hypothesis import strategies as st -from models_library.api_schemas_webserver.clusters import ( - ClusterCreate, - ClusterPatch, - ClusterPing, -) -from models_library.clusters import ClusterID from models_library.projects import ProjectID from models_library.projects_pipeline import ComputationTask from models_library.projects_state import RunningState @@ -38,11 +30,6 @@ def project_id(faker: Faker) -> ProjectID: return ProjectID(faker.uuid4()) -@pytest.fixture -def cluster_id(faker: Faker) -> ClusterID: - return ClusterID(faker.pyint(min_value=0)) - - async def test_create_pipeline( mocked_director_v2, client, @@ -74,70 +61,3 @@ async def test_delete_pipeline( mocked_director_v2, client, user_id: UserID, project_id: ProjectID ): await api.delete_pipeline(client.app, user_id, project_id) - - -@settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) -@given(cluster_create=st.builds(ClusterCreate)) -async def test_create_cluster( - mocked_director_v2, client, user_id: UserID, cluster_create -): - created_cluster = await api.create_cluster( - client.app, user_id=user_id, new_cluster=cluster_create - ) - assert created_cluster is not None - assert isinstance(created_cluster, dict) - assert "id" in created_cluster - - -async def test_list_clusters(mocked_director_v2, client, user_id: UserID): - list_of_clusters = await api.list_clusters(client.app, user_id=user_id) - assert isinstance(list_of_clusters, list) - assert len(list_of_clusters) > 0 - - -async def test_get_cluster( - mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID -): - cluster = await api.get_cluster(client.app, user_id=user_id, cluster_id=cluster_id) - assert isinstance(cluster, dict) - assert cluster["id"] == cluster_id - - -async def test_get_cluster_details( - mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID -): - cluster_details = await api.get_cluster_details( - client.app, user_id=user_id, cluster_id=cluster_id - ) - assert isinstance(cluster_details, dict) - - -@settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) -@given(cluster_patch=st.from_type(ClusterPatch)) -async def test_update_cluster( - mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID, cluster_patch -): - print(f"--> updating cluster with {cluster_patch=}") - updated_cluster = await api.update_cluster( - client.app, user_id=user_id, cluster_id=cluster_id, cluster_patch=cluster_patch - ) - assert isinstance(updated_cluster, dict) - assert updated_cluster["id"] == cluster_id - - -async def test_delete_cluster( - mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID -): - await api.delete_cluster(client.app, user_id=user_id, cluster_id=cluster_id) - - -@settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) -@given(cluster_ping=st.builds(ClusterPing)) -async def test_ping_cluster(mocked_director_v2, client, cluster_ping: ClusterPing): - await api.ping_cluster(client.app, cluster_ping=cluster_ping) - - -async def test_ping_specific_cluster( - mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID -): - await api.ping_specific_cluster(client.app, user_id=user_id, cluster_id=cluster_id) diff --git a/services/web/server/tests/unit/with_dbs/01/test_director_v2_handlers.py b/services/web/server/tests/unit/with_dbs/01/test_director_v2_handlers.py index 8cbcfbdf739..e2c9b7e03c1 100644 --- a/services/web/server/tests/unit/with_dbs/01/test_director_v2_handlers.py +++ b/services/web/server/tests/unit/with_dbs/01/test_director_v2_handlers.py @@ -111,9 +111,11 @@ async def test_stop_computation( rsp = await client.post(f"{url}") await assert_status( rsp, - status.HTTP_204_NO_CONTENT - if user_role == UserRole.GUEST - else expected.no_content, + ( + status.HTTP_204_NO_CONTENT + if user_role == UserRole.GUEST + else expected.no_content + ), ) diff --git a/services/web/server/tests/unit/with_dbs/03/test_project_db.py b/services/web/server/tests/unit/with_dbs/03/test_project_db.py index fadfe561267..1d73a0e88c4 100644 --- a/services/web/server/tests/unit/with_dbs/03/test_project_db.py +++ b/services/web/server/tests/unit/with_dbs/03/test_project_db.py @@ -201,7 +201,7 @@ async def _inserter(prj: dict[str, Any], **overrides) -> dict[str, Any]: for group_id, permissions in _access_rights.items(): await update_or_insert_project_group( client.app, - new_project["uuid"], + project_id=new_project["uuid"], group_id=int(group_id), read=permissions["read"], write=permissions["write"], diff --git a/services/web/server/tests/unit/with_dbs/04/workspaces/conftest.py b/services/web/server/tests/unit/with_dbs/04/workspaces/conftest.py index 744b30da23b..fa008269aaf 100644 --- a/services/web/server/tests/unit/with_dbs/04/workspaces/conftest.py +++ b/services/web/server/tests/unit/with_dbs/04/workspaces/conftest.py @@ -5,6 +5,7 @@ import pytest import sqlalchemy as sa +from simcore_postgres_database.models.projects import projects from simcore_postgres_database.models.workspaces import workspaces @@ -13,3 +14,4 @@ def workspaces_clean_db(postgres_db: sa.engine.Engine) -> Iterator[None]: with postgres_db.connect() as con: yield con.execute(workspaces.delete()) + con.execute(projects.delete()) diff --git a/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces__moving_folders_between_workspaces.py b/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces__moving_folders_between_workspaces.py new file mode 100644 index 00000000000..ea7105a3338 --- /dev/null +++ b/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces__moving_folders_between_workspaces.py @@ -0,0 +1,274 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments +# pylint: disable=too-many-statements + + +from copy import deepcopy +from http.client import NO_CONTENT + +import pytest +from aiohttp.test_utils import TestClient +from pytest_mock import MockerFixture +from pytest_simcore.helpers.assert_checks import assert_status +from pytest_simcore.helpers.webserver_login import UserInfoDict +from pytest_simcore.helpers.webserver_projects import create_project +from servicelib.aiohttp import status +from simcore_service_webserver.db.models import UserRole +from simcore_service_webserver.db.plugin import setup_db +from simcore_service_webserver.projects.models import ProjectDict + + +@pytest.fixture +def user_role() -> UserRole: + return UserRole.USER + + +@pytest.fixture +def mock_catalog_api_get_services_for_user_in_product(mocker: MockerFixture): + mocker.patch( + "simcore_service_webserver.projects._crud_api_read.get_services_for_user_in_product", + spec=True, + return_value=[], + ) + mocker.patch( + "simcore_service_webserver.projects._crud_handlers.get_services_for_user_in_product", + spec=True, + return_value=[], + ) + mocker.patch( + "simcore_service_webserver.projects._crud_handlers.project_uses_available_services", + spec=True, + return_value=True, + ) + + +@pytest.fixture +async def moving_folder_id( + client: TestClient, + logged_user: UserInfoDict, + fake_project: ProjectDict, +) -> str: + assert client.app + setup_db(client.app) + + ### Project creation + + # Create 2 projects + project_data = deepcopy(fake_project) + first_project = await create_project( + client.app, + params_override=project_data, + user_id=logged_user["id"], + product_name="osparc", + ) + second_project = await create_project( + client.app, + params_override=project_data, + user_id=logged_user["id"], + product_name="osparc", + ) + + ### Folder creation + + # Create folder + url = client.app.router["create_folder"].url_for() + resp = await client.post( + f"{url}", + json={ + "name": "Original user folder", + }, + ) + first_folder, _ = await assert_status(resp, status.HTTP_201_CREATED) + + # Create sub folder of previous folder + url = client.app.router["create_folder"].url_for() + resp = await client.post( + f"{url}", + json={ + "name": "Second user folder", + "parentFolderId": f"{first_folder['folderId']}", + }, + ) + second_folder, _ = await assert_status(resp, status.HTTP_201_CREATED) + + # Create sub sub folder of previous sub folder + url = client.app.router["create_folder"].url_for() + resp = await client.post( + f"{url}", + json={ + "name": "Third user folder", + "parentFolderId": f"{second_folder['folderId']}", + }, + ) + third_folder, _ = await assert_status(resp, status.HTTP_201_CREATED) + + ### Move projects to subfolder + # add first project to the folder + url = client.app.router["replace_project_folder"].url_for( + folder_id=f"{second_folder['folderId']}", project_id=f"{first_project['uuid']}" + ) + resp = await client.put(f"{url}") + await assert_status(resp, status.HTTP_204_NO_CONTENT) + # add second project to the folder + url = client.app.router["replace_project_folder"].url_for( + folder_id=f"{second_folder['folderId']}", project_id=f"{second_project['uuid']}" + ) + resp = await client.put(f"{url}") + await assert_status(resp, status.HTTP_204_NO_CONTENT) + + ## Double check whether everything is setup OK + url = ( + client.app.router["list_projects"] + .url_for() + .with_query({"folder_id": f"{second_folder['folderId']}"}) + ) + resp = await client.get(f"{url}") + data, _ = await assert_status(resp, status.HTTP_200_OK) + assert len(data) == 2 + + url = ( + client.app.router["list_projects"] + .url_for() + .with_query({"folder_id": f"{first_folder['folderId']}"}) + ) + resp = await client.get(f"{url}") + data, _ = await assert_status(resp, status.HTTP_200_OK) + assert len(data) == 0 + + url = client.app.router["list_projects"].url_for().with_query({"folder_id": "null"}) + resp = await client.get(f"{url}") + data, _ = await assert_status(resp, status.HTTP_200_OK) + assert len(data) == 0 + + url = client.app.router["list_folders"].url_for().with_query({"folder_id": "null"}) + resp = await client.get(f"{url}") + data, _ = await assert_status(resp, status.HTTP_200_OK) + assert len(data) == 1 + + url = ( + client.app.router["list_folders"] + .url_for() + .with_query({"folder_id": f"{first_folder['folderId']}"}) + ) + resp = await client.get(f"{url}") + data, _ = await assert_status(resp, status.HTTP_200_OK) + assert len(data) == 1 + + return f"{second_folder['folderId']}" + + +async def _move_folder_to_workspace_and_assert( + client: TestClient, folder_id: str, workspace_id: str +): + assert client.app + + # MOVE + url = client.app.router["move_folder_to_workspace"].url_for( + folder_id=folder_id, + workspace_id=workspace_id, + ) + resp = await client.post(f"{url}") + await assert_status(resp, NO_CONTENT) + + # ASSERT + url = ( + client.app.router["list_projects"] + .url_for() + .with_query( + { + "folder_id": folder_id, + "workspace_id": workspace_id, + } + ) + ) + resp = await client.get(f"{url}") + data, _ = await assert_status(resp, status.HTTP_200_OK) + assert len(data) == 2 + + url = ( + client.app.router["list_folders"] + .url_for() + .with_query( + { + "folder_id": folder_id, + "workspace_id": workspace_id, + } + ) + ) + resp = await client.get(f"{url}") + data, _ = await assert_status(resp, status.HTTP_200_OK) + assert len(data) == 1 + + +async def test_moving_between_private_and_shared_workspaces( + client: TestClient, + logged_user: UserInfoDict, + mock_catalog_api_get_services_for_user_in_product: MockerFixture, + fake_project: ProjectDict, + moving_folder_id: str, + workspaces_clean_db: None, +): + assert client.app + + # We will test these scenarios of moving folders: + # 1. Private workspace -> Shared workspace + # 2. Shared workspace A -> Shared workspace B + # 3. Shared workspace A -> Shared workspace A (Corner case - This endpoint is not used like this) + # 4. Shared workspace -> Private workspace + # 5. Private workspace -> Private workspace (Corner case - This endpoint is not used like this) + + # create a new workspace + url = client.app.router["create_workspace"].url_for() + resp = await client.post( + f"{url}", + json={ + "name": "A", + "description": "A", + "thumbnail": None, + }, + ) + shared_workspace_A, _ = await assert_status(resp, status.HTTP_201_CREATED) + + # 1. Private workspace -> Shared workspace A + await _move_folder_to_workspace_and_assert( + client, + folder_id=moving_folder_id, + workspace_id=f"{shared_workspace_A['workspaceId']}", + ) + + # create a new workspace + url = client.app.router["create_workspace"].url_for() + resp = await client.post( + f"{url}", + json={ + "name": "B", + "description": "B", + "thumbnail": None, + }, + ) + shared_workspace_B, _ = await assert_status(resp, status.HTTP_201_CREATED) + # 2. Shared workspace A -> Shared workspace B + await _move_folder_to_workspace_and_assert( + client, + folder_id=moving_folder_id, + workspace_id=f"{shared_workspace_B['workspaceId']}", + ) + + # 3. (Corner case) Shared workspace B -> Shared workspace B + await _move_folder_to_workspace_and_assert( + client, + folder_id=moving_folder_id, + workspace_id=f"{shared_workspace_B['workspaceId']}", + ) + + # 4. Shared workspace -> Private workspace + await _move_folder_to_workspace_and_assert( + client, folder_id=moving_folder_id, workspace_id="null" + ) + + # 5. (Corner case) Private workspace -> Private workspace + await _move_folder_to_workspace_and_assert( + client, folder_id=moving_folder_id, workspace_id="null" + ) diff --git a/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces__moving_projects_between_workspaces.py b/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces__moving_projects_between_workspaces.py index 21b16ea9738..a81c76012a0 100644 --- a/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces__moving_projects_between_workspaces.py +++ b/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces__moving_projects_between_workspaces.py @@ -55,10 +55,10 @@ async def test_moving_between_workspaces_user_role_permissions( workspaces_clean_db: None, ): # Move project from workspace to your private workspace - base_url = client.app.router["replace_project_workspace"].url_for( + base_url = client.app.router["move_project_to_workspace"].url_for( project_id=fake_project["uuid"], workspace_id="null" ) - resp = await client.put(f"{base_url}") + resp = await client.post(f"{base_url}") await assert_status(resp, expected.no_content) @@ -103,10 +103,10 @@ async def test_moving_between_private_and_shared_workspaces( assert data["workspaceId"] == added_workspace["workspaceId"] # <-- Workspace ID # Move project from workspace to your private workspace - base_url = client.app.router["replace_project_workspace"].url_for( + base_url = client.app.router["move_project_to_workspace"].url_for( project_id=project["uuid"], workspace_id="null" ) - resp = await client.put(f"{base_url}") + resp = await client.post(f"{base_url}") await assert_status(resp, status.HTTP_204_NO_CONTENT) # Get project in workspace @@ -116,10 +116,10 @@ async def test_moving_between_private_and_shared_workspaces( assert data["workspaceId"] is None # <-- Workspace ID is None # Move project from your private workspace to shared workspace - base_url = client.app.router["replace_project_workspace"].url_for( + base_url = client.app.router["move_project_to_workspace"].url_for( project_id=project["uuid"], workspace_id=f"{added_workspace['workspaceId']}" ) - resp = await client.put(f"{base_url}") + resp = await client.post(f"{base_url}") await assert_status(resp, status.HTTP_204_NO_CONTENT) # Get project in workspace @@ -182,10 +182,10 @@ async def test_moving_between_shared_and_shared_workspaces( assert data["workspaceId"] == added_workspace["workspaceId"] # <-- Workspace ID # Move project from workspace to your private workspace - base_url = client.app.router["replace_project_workspace"].url_for( + base_url = client.app.router["move_project_to_workspace"].url_for( project_id=project["uuid"], workspace_id=f"{second_workspace['workspaceId']}" ) - resp = await client.put(f"{base_url}") + resp = await client.post(f"{base_url}") await assert_status(resp, status.HTTP_204_NO_CONTENT) # Get project in workspace @@ -262,10 +262,10 @@ async def test_moving_between_workspaces_check_removed_from_folder( assert data["workspaceId"] == added_workspace["workspaceId"] # <-- Workspace ID # Move project from workspace to your private workspace - base_url = client.app.router["replace_project_workspace"].url_for( + base_url = client.app.router["move_project_to_workspace"].url_for( project_id=project["uuid"], workspace_id="none" ) - resp = await client.put(f"{base_url}") + resp = await client.post(f"{base_url}") await assert_status(resp, status.HTTP_204_NO_CONTENT) # Get project in workspace diff --git a/tests/environment-setup/test_used_docker_compose.py b/tests/environment-setup/test_used_docker_compose.py index 93d07ba9b66..c083c79b206 100644 --- a/tests/environment-setup/test_used_docker_compose.py +++ b/tests/environment-setup/test_used_docker_compose.py @@ -76,7 +76,7 @@ def ensure_env_file(env_devel_file: Path) -> Iterable[Path]: def _skip_not_useful_docker_composes(p) -> bool: - result = "osparc-gateway-server" not in f"{p}" and "manual" not in f"{p}" + result = "manual" not in f"{p}" result &= "tests/performance" not in f"{p}" return result