diff --git a/packages/models-library/src/models_library/projects_pipeline.py b/packages/models-library/src/models_library/projects_pipeline.py index 975d4726b4e4..6b8f16d58d39 100644 --- a/packages/models-library/src/models_library/projects_pipeline.py +++ b/packages/models-library/src/models_library/projects_pipeline.py @@ -4,7 +4,6 @@ import arrow from pydantic import BaseModel, ConfigDict, Field, PositiveInt -from .clusters import ClusterID from .projects_nodes import NodeState from .projects_nodes_io import NodeID from .projects_state import RunningState @@ -40,10 +39,6 @@ class ComputationTask(BaseModel): ..., description="the iteration id of the computation task (none if no task ran yet)", ) - cluster_id: ClusterID | None = Field( - ..., - description="the cluster on which the computaional task runs/ran (none if no task ran yet)", - ) started: datetime.datetime | None = Field( ..., description="the timestamp when the computation was started or None if not started yet", diff --git a/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py b/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py index 379dcec4dbda..f0b6e635ac7c 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py +++ b/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py @@ -28,9 +28,8 @@ ComputationGet, ComputationStop, ) -from models_library.clusters import DEFAULT_CLUSTER_ID from models_library.projects import ProjectAtDB, ProjectID -from models_library.projects_nodes_io import NodeID, NodeIDStr +from models_library.projects_nodes_io import NodeID from models_library.projects_state import RunningState from models_library.services import ServiceKeyVersion from models_library.users import UserID @@ -155,7 +154,7 @@ async def _get_project_node_names( project_uuid: ProjectID, node_id: NodeID ) -> tuple[str, str]: prj = await project_repo.get_project(project_uuid) - node_id_str = NodeIDStr(f"{node_id}") + node_id_str = f"{node_id}" if node_id_str not in prj.workbench: _logger.error( "%s not found in %s. it is an ancestor of %s. Please check!", @@ -228,7 +227,6 @@ async def _try_start_pipeline( app, user_id=computation.user_id, project_id=computation.project_id, - cluster_id=DEFAULT_CLUSTER_ID, run_metadata=RunMetadataDict( node_id_names_map={ NodeID(node_idstr): node_data.label @@ -391,7 +389,6 @@ async def create_computation( # noqa: PLR0913 # pylint: disable=too-many-positi else None ), iteration=last_run.iteration if last_run else None, - cluster_id=last_run.cluster_id if last_run else None, result=None, started=compute_pipeline_started_timestamp( minimal_computational_dag, comp_tasks @@ -498,7 +495,6 @@ async def get_computation( else None ), iteration=last_run.iteration if last_run else None, - cluster_id=last_run.cluster_id if last_run else None, result=None, started=compute_pipeline_started_timestamp(pipeline_dag, all_tasks), stopped=compute_pipeline_stopped_timestamp(pipeline_dag, all_tasks), @@ -573,7 +569,6 @@ async def stop_computation( url=TypeAdapter(AnyHttpUrl).validate_python(f"{request.url}"), stop_url=None, iteration=last_run.iteration if last_run else None, - cluster_id=last_run.cluster_id if last_run else None, result=None, started=compute_pipeline_started_timestamp(pipeline_dag, tasks), stopped=compute_pipeline_stopped_timestamp(pipeline_dag, tasks), diff --git a/services/director-v2/src/simcore_service_director_v2/core/application.py b/services/director-v2/src/simcore_service_director_v2/core/application.py index 43a9dcc4e031..4b62b4ce73cb 100644 --- a/services/director-v2/src/simcore_service_director_v2/core/application.py +++ b/services/director-v2/src/simcore_service_director_v2/core/application.py @@ -35,7 +35,6 @@ ) from ..modules.osparc_variables import substitutions from .errors import ( - ClusterAccessForbiddenError, ClusterNotFoundError, PipelineNotFoundError, ProjectNetworkNotFoundError, @@ -75,12 +74,6 @@ def _set_exception_handlers(app: FastAPI): status.HTTP_404_NOT_FOUND, ClusterNotFoundError ), ) - app.add_exception_handler( - ClusterAccessForbiddenError, - make_http_error_handler_for_exception( - status.HTTP_403_FORBIDDEN, ClusterAccessForbiddenError - ), - ) # SEE https://docs.python.org/3/library/exceptions.html#exception-hierarchy app.add_exception_handler( diff --git a/services/director-v2/src/simcore_service_director_v2/core/errors.py b/services/director-v2/src/simcore_service_director_v2/core/errors.py index 01c58ad6206c..1dd0243bc4d4 100644 --- a/services/director-v2/src/simcore_service_director_v2/core/errors.py +++ b/services/director-v2/src/simcore_service_director_v2/core/errors.py @@ -165,14 +165,6 @@ class ClusterNotFoundError(ComputationalSchedulerError): msg_template = "The cluster '{cluster_id}' not found" -class ClusterAccessForbiddenError(ComputationalSchedulerError): - msg_template = "Insufficient rights to access cluster '{cluster_id}'" - - -class ClusterInvalidOperationError(ComputationalSchedulerError): - msg_template = "Invalid operation on cluster '{cluster_id}'" - - # # SCHEDULER/CLIENT ERRORS # diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py index 281c9fc46303..6abadf9139e9 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py @@ -4,7 +4,7 @@ import networkx as nx from aiopg.sa import Engine from fastapi import FastAPI -from models_library.clusters import ClusterID +from models_library.clusters import DEFAULT_CLUSTER_ID from models_library.projects import ProjectID from models_library.users import UserID from servicelib.background_task import start_periodic_task, stop_periodic_task @@ -36,7 +36,6 @@ async def run_new_pipeline( *, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, run_metadata: RunMetadataDict, use_on_demand_clusters: bool, ) -> None: @@ -56,7 +55,7 @@ async def run_new_pipeline( new_run = await CompRunsRepository.instance(db_engine).create( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, + cluster_id=DEFAULT_CLUSTER_ID, metadata=run_metadata, use_on_demand_clusters=use_on_demand_clusters, ) diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py index 7609f6e956e0..d9559b6c75eb 100644 --- a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py +++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py @@ -30,7 +30,6 @@ from dask_task_models_library.container_tasks.protocol import TaskOwner from faker import Faker from fastapi.applications import FastAPI -from models_library.clusters import DEFAULT_CLUSTER_ID from models_library.projects import ProjectAtDB, ProjectID from models_library.projects_nodes_io import NodeID from models_library.projects_state import RunningState @@ -169,7 +168,6 @@ async def _assert_start_pipeline( app, user_id=published_project.project.prj_owner, project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, run_metadata=run_metadata, use_on_demand_clusters=False, ) @@ -253,7 +251,6 @@ async def _return_tasks_pending(job_ids: list[str]) -> list[DaskClientTaskState] mock.call( user_id=published_project.project.prj_owner, project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, tasks={f"{p.node_id}": p.image}, callback=mock.ANY, metadata=mock.ANY, @@ -651,7 +648,6 @@ async def _return_random_task_result(job_id) -> TaskOutputData: mocked_dask_client.send_computation_tasks.assert_called_once_with( user_id=published_project.project.prj_owner, project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, tasks={ f"{next_pending_task.node_id}": next_pending_task.image, }, @@ -1115,7 +1111,6 @@ async def test_broken_pipeline_configuration_is_not_scheduled_and_aborted( initialized_app, user_id=user["id"], project_id=sleepers_project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, run_metadata=run_metadata, use_on_demand_clusters=False, ) @@ -1241,7 +1236,6 @@ async def test_handling_of_disconnected_scheduler_dask( initialized_app, user_id=published_project.project.prj_owner, project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, run_metadata=run_metadata, use_on_demand_clusters=False, ) @@ -1749,7 +1743,6 @@ async def test_pipeline_with_on_demand_cluster_with_not_ready_backend_waits( initialized_app, user_id=published_project.project.prj_owner, project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, run_metadata=run_metadata, use_on_demand_clusters=True, ) @@ -1854,7 +1847,6 @@ async def test_pipeline_with_on_demand_cluster_with_no_clusters_keeper_fails( initialized_app, user_id=published_project.project.prj_owner, project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, run_metadata=run_metadata, use_on_demand_clusters=True, )