From 42476f340fe167c140e4708cd12066f27c1dc0c0 Mon Sep 17 00:00:00 2001
From: Odei Maiz <33152403+odeimaiz@users.noreply.github.com>
Date: Fri, 29 Nov 2024 14:15:47 +0100
Subject: [PATCH 01/16] =?UTF-8?q?=E2=9C=A8=20[Frontend]=20Trash=20bin=20(#?=
=?UTF-8?q?6590)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../source/class/osparc/dashboard/CardBase.js | 14 +-
.../osparc/dashboard/ContextBreadcrumbs.js | 13 +-
.../osparc/dashboard/FolderButtonItem.js | 36 +-
.../class/osparc/dashboard/GridButtonItem.js | 3 +-
.../class/osparc/dashboard/ListButtonItem.js | 3 +-
.../osparc/dashboard/ResourceBrowserBase.js | 22 +-
.../dashboard/ResourceContainerManager.js | 46 +-
.../class/osparc/dashboard/ResourceFilter.js | 83 ++++
.../class/osparc/dashboard/ServiceBrowser.js | 6 +-
.../class/osparc/dashboard/StudyBrowser.js | 460 +++++++++++++++---
.../osparc/dashboard/StudyBrowserHeader.js | 204 ++++----
.../class/osparc/dashboard/TemplateBrowser.js | 6 +-
.../osparc/dashboard/WorkspaceButtonItem.js | 81 ++-
.../dashboard/WorkspacesAndFoldersTree.js | 2 +-
.../dashboard/WorkspacesAndFoldersTreeItem.js | 1 +
.../source/class/osparc/data/Resources.js | 74 ++-
.../source/class/osparc/data/model/Folder.js | 2 +-
.../class/osparc/data/model/IframeHandler.js | 2 +-
.../class/osparc/data/model/Workspace.js | 16 +-
.../source/class/osparc/store/Folders.js | 58 +++
.../source/class/osparc/store/StaticInfo.js | 10 +
.../client/source/class/osparc/store/Store.js | 39 +-
.../source/class/osparc/store/Workspaces.js | 118 ++++-
23 files changed, 1062 insertions(+), 237 deletions(-)
diff --git a/services/static-webserver/client/source/class/osparc/dashboard/CardBase.js b/services/static-webserver/client/source/class/osparc/dashboard/CardBase.js
index acb3fe35386..3bcd200c2ee 100644
--- a/services/static-webserver/client/source/class/osparc/dashboard/CardBase.js
+++ b/services/static-webserver/client/source/class/osparc/dashboard/CardBase.js
@@ -395,6 +395,14 @@ qx.Class.define("osparc.dashboard.CardBase", {
return this.getResourceType() === resourceType;
},
+ isItemNotClickable: function() {
+ const studyBrowserContext = osparc.store.Store.getInstance().getStudyBrowserContext();
+ return (
+ this.getBlocked() === true || // It could be blocked by IN_USE or UNKNOWN_SERVICE
+ (this.isResourceType("study") && (studyBrowserContext === "trash")) // It could a trashed study
+ );
+ },
+
__applyResourceData: function(resourceData) {
let uuid = null;
let owner = null;
@@ -776,9 +784,9 @@ qx.Class.define("osparc.dashboard.CardBase", {
if (moveToButton) {
moveToButton.setEnabled(osparc.study.Utils.canMoveTo(resourceData));
}
- const deleteButton = menuButtons.find(menuBtn => "deleteButton" in menuBtn);
- if (deleteButton) {
- deleteButton.setEnabled(osparc.study.Utils.canBeDeleted(resourceData));
+ const trashButton = menuButtons.find(menuBtn => "trashButton" in menuBtn);
+ if (trashButton) {
+ trashButton.setEnabled(osparc.study.Utils.canBeDeleted(resourceData));
}
}
},
diff --git a/services/static-webserver/client/source/class/osparc/dashboard/ContextBreadcrumbs.js b/services/static-webserver/client/source/class/osparc/dashboard/ContextBreadcrumbs.js
index dba858ffad8..4358bc68848 100644
--- a/services/static-webserver/client/source/class/osparc/dashboard/ContextBreadcrumbs.js
+++ b/services/static-webserver/client/source/class/osparc/dashboard/ContextBreadcrumbs.js
@@ -24,6 +24,8 @@ qx.Class.define("osparc.dashboard.ContextBreadcrumbs", {
this._setLayout(new qx.ui.layout.HBox(5).set({
alignY: "middle"
}));
+
+ osparc.store.Store.getInstance().addListener("changeStudyBrowserContext", () => this.__rebuild(), this);
},
events: {
@@ -31,14 +33,6 @@ qx.Class.define("osparc.dashboard.ContextBreadcrumbs", {
},
properties: {
- currentContext: {
- check: ["studiesAndFolders", "workspaces", "search"],
- nullable: false,
- init: "studiesAndFolders",
- event: "changeCurrentContext",
- apply: "__rebuild"
- },
-
currentWorkspaceId: {
check: "Number",
nullable: true,
@@ -60,7 +54,8 @@ qx.Class.define("osparc.dashboard.ContextBreadcrumbs", {
__rebuild: function() {
this._removeAll();
- if (this.getCurrentContext() !== "studiesAndFolders") {
+ const currentContext = osparc.store.Store.getInstance().getStudyBrowserContext();
+ if (currentContext !== "studiesAndFolders") {
return;
}
diff --git a/services/static-webserver/client/source/class/osparc/dashboard/FolderButtonItem.js b/services/static-webserver/client/source/class/osparc/dashboard/FolderButtonItem.js
index 2932a774c92..ac919b73579 100644
--- a/services/static-webserver/client/source/class/osparc/dashboard/FolderButtonItem.js
+++ b/services/static-webserver/client/source/class/osparc/dashboard/FolderButtonItem.js
@@ -46,6 +46,8 @@ qx.Class.define("osparc.dashboard.FolderButtonItem", {
"folderSelected": "qx.event.type.Data",
"folderUpdated": "qx.event.type.Data",
"moveFolderToRequested": "qx.event.type.Data",
+ "trashFolderRequested": "qx.event.type.Data",
+ "untrashFolderRequested": "qx.event.type.Data",
"deleteFolderRequested": "qx.event.type.Data",
"changeContext": "qx.event.type.Data",
},
@@ -85,7 +87,7 @@ qx.Class.define("osparc.dashboard.FolderButtonItem", {
check: "Date",
nullable: true,
apply: "__applyLastModified"
- }
+ },
},
members: {
@@ -219,6 +221,16 @@ qx.Class.define("osparc.dashboard.FolderButtonItem", {
menu.addSeparator();
+ const trashButton = new qx.ui.menu.Button(this.tr("Trash"), "@FontAwesome5Solid/trash/12");
+ trashButton.addListener("execute", () => this.__trashFolderRequested(), this);
+ menu.add(trashButton);
+ } else if (studyBrowserContext === "trash") {
+ const restoreButton = new qx.ui.menu.Button(this.tr("Restore"), "@MaterialIcons/restore_from_trash/16");
+ restoreButton.addListener("execute", () => this.fireDataEvent("untrashFolderRequested", this.getFolder()), this);
+ menu.add(restoreButton);
+
+ menu.addSeparator();
+
const deleteButton = new qx.ui.menu.Button(this.tr("Delete"), "@FontAwesome5Solid/trash/12");
osparc.utils.Utils.setIdToWidget(deleteButton, "deleteFolderMenuItem");
deleteButton.addListener("execute", () => this.__deleteFolderRequested(), this);
@@ -229,7 +241,9 @@ qx.Class.define("osparc.dashboard.FolderButtonItem", {
},
__itemSelected: function(newVal) {
- if (newVal) {
+ const studyBrowserContext = osparc.store.Store.getInstance().getStudyBrowserContext();
+ // do not allow selecting workspace
+ if (studyBrowserContext !== "trash" && newVal) {
this.fireDataEvent("folderSelected", this.getFolderId());
}
this.setValue(false);
@@ -262,6 +276,24 @@ qx.Class.define("osparc.dashboard.FolderButtonItem", {
folderEditor.addListener("cancel", () => win.close());
},
+ __trashFolderRequested: function() {
+ const trashDays = osparc.store.StaticInfo.getInstance().getTrashRetentionDays();
+ let msg = this.tr("Are you sure you want to move the Folder and all its content to the trash?");
+ msg += "
" + this.tr("It will be permanently deleted after ") + trashDays + " days.";
+ const confirmationWin = new osparc.ui.window.Confirmation(msg).set({
+ caption: this.tr("Move to Trash"),
+ confirmText: this.tr("Move to Trash"),
+ confirmAction: "delete"
+ });
+ confirmationWin.center();
+ confirmationWin.open();
+ confirmationWin.addListener("close", () => {
+ if (confirmationWin.getConfirmed()) {
+ this.fireDataEvent("trashFolderRequested", this.getFolderId());
+ }
+ }, this);
+ },
+
__deleteFolderRequested: function() {
const msg = this.tr("Are you sure you want to delete") + " " + this.getTitle() + "?";
const confirmationWin = new osparc.ui.window.Confirmation(msg).set({
diff --git a/services/static-webserver/client/source/class/osparc/dashboard/GridButtonItem.js b/services/static-webserver/client/source/class/osparc/dashboard/GridButtonItem.js
index 828a0c74ba7..e9019262342 100644
--- a/services/static-webserver/client/source/class/osparc/dashboard/GridButtonItem.js
+++ b/services/static-webserver/client/source/class/osparc/dashboard/GridButtonItem.js
@@ -188,8 +188,7 @@ qx.Class.define("osparc.dashboard.GridButtonItem", {
},
__itemSelected: function() {
- // It could be blocked by IN_USE or UNKNOWN_SERVICE
- if (this.getBlocked() === true) {
+ if (this.isItemNotClickable()) {
this.setValue(false);
return;
}
diff --git a/services/static-webserver/client/source/class/osparc/dashboard/ListButtonItem.js b/services/static-webserver/client/source/class/osparc/dashboard/ListButtonItem.js
index 71f59b970df..9c433550185 100644
--- a/services/static-webserver/client/source/class/osparc/dashboard/ListButtonItem.js
+++ b/services/static-webserver/client/source/class/osparc/dashboard/ListButtonItem.js
@@ -274,8 +274,7 @@ qx.Class.define("osparc.dashboard.ListButtonItem", {
},
__itemSelected: function() {
- // It could be blocked by IN_USE or UNKNOWN_SERVICE
- if (this.getBlocked() === true) {
+ if (this.isItemNotClickable()) {
this.setValue(false);
return;
}
diff --git a/services/static-webserver/client/source/class/osparc/dashboard/ResourceBrowserBase.js b/services/static-webserver/client/source/class/osparc/dashboard/ResourceBrowserBase.js
index ca565c756f3..c007ca05f7e 100644
--- a/services/static-webserver/client/source/class/osparc/dashboard/ResourceBrowserBase.js
+++ b/services/static-webserver/client/source/class/osparc/dashboard/ResourceBrowserBase.js
@@ -244,7 +244,7 @@ qx.Class.define("osparc.dashboard.ResourceBrowserBase", {
this._addToLayout(searchBarFilter);
},
- _createResourcesLayout: function() {
+ _createResourcesLayout: function(flatListId) {
const toolbar = this._toolbar = new qx.ui.toolbar.ToolBar().set({
backgroundColor: "transparent",
spacing: 10,
@@ -255,7 +255,13 @@ qx.Class.define("osparc.dashboard.ResourceBrowserBase", {
this.__viewModeLayout = new qx.ui.toolbar.Part();
- const resourcesContainer = this._resourcesContainer = new osparc.dashboard.ResourceContainerManager();
+ const resourcesContainer = this._resourcesContainer = new osparc.dashboard.ResourceContainerManager(this._resourceType);
+ if (flatListId) {
+ const list = this._resourcesContainer.getFlatList();
+ if (list) {
+ osparc.utils.Utils.setIdToWidget(list, flatListId);
+ }
+ }
if (this._resourceType === "study") {
const viewMode = osparc.utils.Utils.localCache.getLocalStorageItem("studiesViewMode");
if (viewMode) {
@@ -270,6 +276,8 @@ qx.Class.define("osparc.dashboard.ResourceBrowserBase", {
resourcesContainer.addListener("emptyStudyClicked", e => this._deleteResourceRequested(e.getData()));
resourcesContainer.addListener("folderUpdated", e => this._folderUpdated(e.getData()));
resourcesContainer.addListener("moveFolderToRequested", e => this._moveFolderToRequested(e.getData()));
+ resourcesContainer.addListener("trashFolderRequested", e => this._trashFolderRequested(e.getData()));
+ resourcesContainer.addListener("untrashFolderRequested", e => this._untrashFolderRequested(e.getData()));
resourcesContainer.addListener("deleteFolderRequested", e => this._deleteFolderRequested(e.getData()));
resourcesContainer.addListener("folderSelected", e => {
const folderId = e.getData();
@@ -288,6 +296,8 @@ qx.Class.define("osparc.dashboard.ResourceBrowserBase", {
this._changeContext(context, workspaceId, folderId);
}, this);
resourcesContainer.addListener("workspaceUpdated", e => this._workspaceUpdated(e.getData()));
+ resourcesContainer.addListener("trashWorkspaceRequested", e => this._trashWorkspaceRequested(e.getData()));
+ resourcesContainer.addListener("untrashWorkspaceRequested", e => this._untrashWorkspaceRequested(e.getData()));
resourcesContainer.addListener("deleteWorkspaceRequested", e => this._deleteWorkspaceRequested(e.getData()));
this._addToLayout(resourcesContainer);
@@ -502,6 +512,14 @@ qx.Class.define("osparc.dashboard.ResourceBrowserBase", {
throw new Error("Abstract method called!");
},
+ _trashFolderRequested: function(folderId) {
+ throw new Error("Abstract method called!");
+ },
+
+ _untrashFolderRequested: function(folder) {
+ throw new Error("Abstract method called!");
+ },
+
_deleteFolderRequested: function(folderId) {
throw new Error("Abstract method called!");
},
diff --git a/services/static-webserver/client/source/class/osparc/dashboard/ResourceContainerManager.js b/services/static-webserver/client/source/class/osparc/dashboard/ResourceContainerManager.js
index ba1485f024f..b33d5015231 100644
--- a/services/static-webserver/client/source/class/osparc/dashboard/ResourceContainerManager.js
+++ b/services/static-webserver/client/source/class/osparc/dashboard/ResourceContainerManager.js
@@ -18,7 +18,7 @@
qx.Class.define("osparc.dashboard.ResourceContainerManager", {
extend: qx.ui.core.Widget,
- construct: function() {
+ construct: function(resourceType) {
this.base(arguments);
this._setLayout(new qx.ui.layout.VBox(15));
@@ -27,19 +27,20 @@ qx.Class.define("osparc.dashboard.ResourceContainerManager", {
paddingBottom: 60
});
- this.__foldersList = [];
this.__workspacesList = [];
+ this.__foldersList = [];
this.__resourcesList = [];
this.__groupedContainersList = [];
+ if (resourceType === "study") {
+ const workspacesContainer = this.__workspacesContainer = new osparc.dashboard.ToggleButtonContainer();
+ this._add(workspacesContainer);
+ workspacesContainer.setVisibility(osparc.utils.DisabledPlugins.isFoldersEnabled() ? "visible" : "excluded");
- const workspacesContainer = this.__workspacesContainer = new osparc.dashboard.ToggleButtonContainer();
- workspacesContainer.setVisibility(osparc.utils.DisabledPlugins.isFoldersEnabled() ? "visible" : "excluded");
-
-
- const foldersContainer = this.__foldersContainer = new osparc.dashboard.ToggleButtonContainer();
- this._add(foldersContainer);
- foldersContainer.setVisibility(osparc.utils.DisabledPlugins.isFoldersEnabled() ? "visible" : "excluded");
+ const foldersContainer = this.__foldersContainer = new osparc.dashboard.ToggleButtonContainer();
+ this._add(foldersContainer);
+ foldersContainer.setVisibility(osparc.utils.DisabledPlugins.isFoldersEnabled() ? "visible" : "excluded");
+ }
const nonGroupedContainer = this.__nonGroupedContainer = this.__createFlatList();
this._add(nonGroupedContainer);
@@ -75,9 +76,13 @@ qx.Class.define("osparc.dashboard.ResourceContainerManager", {
"folderSelected": "qx.event.type.Data",
"folderUpdated": "qx.event.type.Data",
"moveFolderToRequested": "qx.event.type.Data",
+ "trashFolderRequested": "qx.event.type.Data",
+ "untrashFolderRequested": "qx.event.type.Data",
"deleteFolderRequested": "qx.event.type.Data",
"workspaceSelected": "qx.event.type.Data",
"workspaceUpdated": "qx.event.type.Data",
+ "trashWorkspaceRequested": "qx.event.type.Data",
+ "untrashWorkspaceRequested": "qx.event.type.Data",
"deleteWorkspaceRequested": "qx.event.type.Data",
"changeContext": "qx.event.type.Data",
},
@@ -260,9 +265,13 @@ qx.Class.define("osparc.dashboard.ResourceContainerManager", {
},
__cleanAll: function() {
- if (this.__workspacesContainer) {
- this.__workspacesContainer.removeAll();
+ if (this._getChildren().includes(this.__nonGroupedContainer)) {
+ this._remove(this.__nonGroupedContainer);
+ }
+ if (this._getChildren().includes(this.__groupedContainers)) {
+ this._remove(this.__groupedContainers);
}
+
if (this.__nonGroupedContainer) {
this.__nonGroupedContainer.removeAll();
this.__nonGroupedContainer = null;
@@ -274,7 +283,6 @@ qx.Class.define("osparc.dashboard.ResourceContainerManager", {
groupedContainer.getContentContainer().removeAll();
});
this.__groupedContainersList = [];
- this._removeAll();
},
__addFoldersContainer: function() {
@@ -299,9 +307,6 @@ qx.Class.define("osparc.dashboard.ResourceContainerManager", {
__rebuildLayout: function(resourceType) {
this.__cleanAll();
- if (osparc.utils.DisabledPlugins.isFoldersEnabled()) {
- this.__addFoldersContainer();
- }
if (this.getGroupBy()) {
const noGroupContainer = this.__createGroupContainer("no-group", "No Group", "transparent");
this.__groupedContainers.add(noGroupContainer);
@@ -361,8 +366,9 @@ qx.Class.define("osparc.dashboard.ResourceContainerManager", {
},
reloadWorkspaces: function() {
- this.__cleanAll();
- this._add(this.__workspacesContainer);
+ if (this.__workspacesContainer) {
+ this.__workspacesContainer.removeAll();
+ }
let workspacesCards = [];
this.__workspacesList.forEach(workspaceData => workspacesCards.push(this.__workspaceToCard(workspaceData)));
return workspacesCards;
@@ -383,6 +389,8 @@ qx.Class.define("osparc.dashboard.ResourceContainerManager", {
[
"workspaceSelected",
"workspaceUpdated",
+ "trashWorkspaceRequested",
+ "untrashWorkspaceRequested",
"deleteWorkspaceRequested",
].forEach(eName => card.addListener(eName, e => this.fireDataEvent(eName, e.getData())));
return card;
@@ -419,6 +427,8 @@ qx.Class.define("osparc.dashboard.ResourceContainerManager", {
"folderSelected",
"folderUpdated",
"moveFolderToRequested",
+ "trashFolderRequested",
+ "untrashFolderRequested",
"deleteFolderRequested",
"changeContext",
].forEach(eName => card.addListener(eName, e => this.fireDataEvent(eName, e.getData())));
@@ -494,7 +504,7 @@ qx.Class.define("osparc.dashboard.ResourceContainerManager", {
} else {
groupContainer.exclude();
}
- this._add(groupContainer);
+ this.__groupedContainers.add(groupContainer);
this.__moveNoGroupToLast();
}
const card = this.__createCard(resourceData);
diff --git a/services/static-webserver/client/source/class/osparc/dashboard/ResourceFilter.js b/services/static-webserver/client/source/class/osparc/dashboard/ResourceFilter.js
index 0c452e3e33a..dcda538841c 100644
--- a/services/static-webserver/client/source/class/osparc/dashboard/ResourceFilter.js
+++ b/services/static-webserver/client/source/class/osparc/dashboard/ResourceFilter.js
@@ -34,6 +34,7 @@ qx.Class.define("osparc.dashboard.ResourceFilter", {
},
events: {
+ "trashContext": "qx.event.type.Event",
"changeSharedWith": "qx.event.type.Data",
"changeSelectedTags": "qx.event.type.Data",
"changeServiceType": "qx.event.type.Data"
@@ -42,6 +43,7 @@ qx.Class.define("osparc.dashboard.ResourceFilter", {
members: {
__resourceType: null,
__workspacesAndFoldersTree: null,
+ __trashButton: null,
__sharedWithButtons: null,
__tagButtons: null,
__serviceTypeButtons: null,
@@ -49,6 +51,7 @@ qx.Class.define("osparc.dashboard.ResourceFilter", {
__buildLayout: function() {
if (this.__resourceType === "study" && osparc.utils.DisabledPlugins.isFoldersEnabled()) {
this._add(this.__createWorkspacesAndFoldersTree());
+ this._add(this.__createTrashBin());
} else {
this._add(this.__createSharedWithFilterLayout());
}
@@ -62,6 +65,16 @@ qx.Class.define("osparc.dashboard.ResourceFilter", {
}
},
+ contextChanged: function(context, workspaceId, folderId) {
+ this.__workspacesAndFoldersTree.set({
+ currentWorkspaceId: workspaceId,
+ currentFolderId: folderId,
+ });
+ this.__workspacesAndFoldersTree.contextChanged(context);
+
+ this.__trashButton.setValue(context === "trash");
+ },
+
/* WORKSPACES AND FOLDERS */
__createWorkspacesAndFoldersTree: function() {
const workspacesAndFoldersTree = this.__workspacesAndFoldersTree = new osparc.dashboard.WorkspacesAndFoldersTree();
@@ -85,6 +98,76 @@ qx.Class.define("osparc.dashboard.ResourceFilter", {
},
/* /WORKSPACES AND FOLDERS */
+ /* TRASH BIN */
+ __createTrashBin: function() {
+ const trashButton = this.__trashButton = new qx.ui.toolbar.RadioButton().set({
+ value: false,
+ appearance: "filter-toggle-button",
+ label: this.tr("Trash"),
+ icon: "@FontAwesome5Solid/trash/18",
+ });
+ trashButton.addListener("changeValue", e => {
+ const trashEnabled = e.getData();
+ if (trashEnabled) {
+ this.fireEvent("trashContext");
+ }
+ });
+ this.evaluateTrashEmpty();
+ return trashButton;
+ },
+
+ evaluateTrashEmpty: function() {
+ const studiesParams = {
+ url: {
+ offset: 0,
+ limit: 1, // just one
+ orderBy: JSON.stringify({
+ field: "last_change_date",
+ direction: "desc"
+ }),
+ }
+ };
+ const foldersParams = {
+ url: {
+ offset: 0,
+ limit: 1, // just one
+ orderBy: JSON.stringify({
+ field: "modified_at",
+ direction: "desc"
+ }),
+ }
+ };
+ const workspacesParams = {
+ url: {
+ offset: 0,
+ limit: 1, // just one
+ orderBy: JSON.stringify({
+ field: "modified_at",
+ direction: "desc"
+ }),
+ }
+ };
+ Promise.all([
+ osparc.data.Resources.fetch("studies", "getPageTrashed", studiesParams),
+ osparc.data.Resources.fetch("folders", "getPageTrashed", foldersParams),
+ osparc.data.Resources.fetch("workspaces", "getPageTrashed", workspacesParams),
+ ])
+ .then(values => {
+ const nTrashedStudies = values[0].length;
+ const nTrashedFolders = values[1].length;
+ const nTrashedWorkspaces = values[2].length;
+ this.setTrashEmpty((nTrashedStudies+nTrashedFolders+nTrashedWorkspaces) === 0);
+ })
+ .catch(err => console.error(err));
+ },
+
+ setTrashEmpty: function(isEmpty) {
+ this.__trashButton.set({
+ textColor: isEmpty ? "text" : "danger-red"
+ });
+ },
+ /* /TRASH BIN */
+
/* SHARED WITH */
__createSharedWithFilterLayout: function() {
const sharedWithLayout = new qx.ui.container.Composite(new qx.ui.layout.VBox(5));
diff --git a/services/static-webserver/client/source/class/osparc/dashboard/ServiceBrowser.js b/services/static-webserver/client/source/class/osparc/dashboard/ServiceBrowser.js
index 15278743e54..5fbaa4ebaf7 100644
--- a/services/static-webserver/client/source/class/osparc/dashboard/ServiceBrowser.js
+++ b/services/static-webserver/client/source/class/osparc/dashboard/ServiceBrowser.js
@@ -129,11 +129,7 @@ qx.Class.define("osparc.dashboard.ServiceBrowser", {
// LAYOUT //
_createLayout: function() {
this._createSearchBar();
- this._createResourcesLayout();
- const list = this._resourcesContainer.getFlatList();
- if (list) {
- osparc.utils.Utils.setIdToWidget(list, "servicesList");
- }
+ this._createResourcesLayout("servicesList");
this.__addNewServiceButtons();
this._toolbar.add(new qx.ui.core.Spacer(), {
diff --git a/services/static-webserver/client/source/class/osparc/dashboard/StudyBrowser.js b/services/static-webserver/client/source/class/osparc/dashboard/StudyBrowser.js
index 882848dd295..c7ef8f916f2 100644
--- a/services/static-webserver/client/source/class/osparc/dashboard/StudyBrowser.js
+++ b/services/static-webserver/client/source/class/osparc/dashboard/StudyBrowser.js
@@ -42,6 +42,9 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
construct: function() {
this._resourceType = "study";
this.base(arguments);
+
+ const store = osparc.store.Store.getInstance();
+ this.bind("currentContext", store, "studyBrowserContext");
},
events: {
@@ -50,7 +53,7 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
properties: {
currentContext: {
- check: ["studiesAndFolders", "workspaces", "search"],
+ check: ["studiesAndFolders", "workspaces", "search", "trash"],
nullable: false,
init: "studiesAndFolders",
event: "changeCurrentContext"
@@ -96,6 +99,7 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
__workspacesList: null,
__foldersList: null,
__loadingFolders: null,
+ __loadingWorkspaces: null,
// overridden
initResources: function() {
@@ -160,10 +164,48 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
},
__reloadWorkspaces: function() {
+ if (
+ !osparc.auth.Manager.getInstance().isLoggedIn() ||
+ !osparc.utils.DisabledPlugins.isFoldersEnabled() ||
+ this.getCurrentContext() === "studiesAndFolders" ||
+ this.getCurrentContext() === "search" || // not yet implemented for workspaces
+ this.__loadingWorkspaces
+ ) {
+ return;
+ }
+
+ let request = null;
+ switch (this.getCurrentContext()) {
+ case "search": {
+ const filterData = this._searchBarFilter.getFilterData();
+ const text = filterData.text ? encodeURIComponent(filterData.text) : "";
+ request = osparc.store.Workspaces.getInstance().searchWorkspaces(text);
+ break;
+ }
+ case "workspaces": {
+ request = osparc.store.Workspaces.getInstance().fetchWorkspaces();
+ break;
+ }
+ case "trash":
+ request = osparc.store.Workspaces.getInstance().fetchAllTrashedWorkspaces();
+ break;
+ }
+
+ this.__loadingWorkspaces = true;
this.__setWorkspacesToList([]);
- osparc.store.Workspaces.getInstance().fetchWorkspaces()
+ request
.then(workspaces => {
this.__setWorkspacesToList(workspaces);
+ if (this.getCurrentContext() === "trash") {
+ if (workspaces.length) {
+ this.__header.getChildControl("empty-trash-button").show();
+ }
+ }
+ })
+ .catch(console.error)
+ .finally(() => {
+ this.__addNewWorkspaceButton();
+ this.__loadingWorkspaces = null;
});
},
@@ -177,7 +219,6 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
return;
}
- this.__loadingFolders = true;
let request = null;
switch (this.getCurrentContext()) {
case "search": {
@@ -192,14 +233,27 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
request = osparc.store.Folders.getInstance().fetchFolders(folderId, workspaceId, this.getOrderBy());
break;
}
+ case "trash":
+ request = osparc.store.Folders.getInstance().fetchAllTrashedFolders(this.getOrderBy());
+ break;
}
+
+ this.__loadingFolders = true;
this.__setFoldersToList([]);
request
.then(folders => {
this.__setFoldersToList(folders);
+ if (this.getCurrentContext() === "trash") {
+ if (folders.length) {
+ this.__header.getChildControl("empty-trash-button").show();
+ }
+ }
})
.catch(console.error)
- .finally(() => this.__loadingFolders = null);
+ .finally(() => {
+ this.__addNewFolderButton();
+ this.__loadingFolders = null;
+ });
},
__reloadStudies: function() {
@@ -257,6 +311,12 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
this._resourcesContainer.getFlatList().nextRequest = resp["_links"]["next"];
}
+ if (this.getCurrentContext() === "trash") {
+ if (this._resourcesList.length) {
+ this.__header.getChildControl("empty-trash-button").show();
+ }
+ }
+
// Show Quick Start if there are no studies in the root folder of the personal workspace
const quickStartInfo = osparc.product.quickStart.Utils.getQuickStart();
if (quickStartInfo) {
@@ -267,6 +327,7 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
const nStudies = "_meta" in resp ? resp["_meta"]["total"] : 0;
if (
nStudies === 0 &&
+ this.getCurrentContext() === "studiesAndFolders" &&
this.getCurrentWorkspaceId() === null &&
this.getCurrentFolderId() === null
) {
@@ -354,9 +415,6 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
const cards = this._resourcesContainer.reloadCards("studies");
this.__configureStudyCards(cards);
- // they were removed in the above reloadCards
- this.__reloadFolders();
-
this.__addNewStudyButtons();
const loadMoreBtn = this.__createLoadMoreButton();
@@ -382,6 +440,12 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
__reloadWorkspaceCards: function() {
this._resourcesContainer.setWorkspacesToList(this.__workspacesList);
this._resourcesContainer.reloadWorkspaces();
+ },
+
+ __addNewWorkspaceButton: function() {
+ if (this.getCurrentContext() !== "workspaces") {
+ return;
+ }
const newWorkspaceCard = new osparc.dashboard.WorkspaceButtonNew();
newWorkspaceCard.setCardKey("new-workspace");
@@ -401,13 +465,44 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
},
_workspaceUpdated: function() {
- this.__reloadWorkspaceCards();
+ this.__reloadWorkspaces();
+ },
+
+ _trashWorkspaceRequested: function(workspaceId) {
+ osparc.store.Workspaces.getInstance().trashWorkspace(workspaceId)
+ .then(() => {
+ this.__reloadWorkspaces();
+ const msg = this.tr("Successfully moved to Trash");
+ osparc.FlashMessenger.getInstance().logAs(msg, "INFO");
+ this._resourceFilter.setTrashEmpty(false);
+ })
+ .catch(err => {
+ console.error(err);
+ osparc.FlashMessenger.getInstance().logAs(err, "ERROR");
+ });
+ },
+
+ _untrashWorkspaceRequested: function(workspace) {
+ osparc.store.Workspaces.getInstance().untrashWorkspace(workspace)
+ .then(() => {
+ this.__reloadWorkspaces();
+ const msg = this.tr("Successfully restored");
+ osparc.FlashMessenger.getInstance().logAs(msg, "INFO");
+ this._resourceFilter.evaluateTrashEmpty();
+ })
+ .catch(err => {
+ console.error(err);
+ osparc.FlashMessenger.getInstance().logAs(err, "ERROR");
+ });
},
_deleteWorkspaceRequested: function(workspaceId) {
osparc.store.Workspaces.getInstance().deleteWorkspace(workspaceId)
.then(() => {
this.__reloadWorkspaces();
+ const msg = this.tr("Successfully deleted");
+ osparc.FlashMessenger.getInstance().logAs(msg, "INFO");
+ this._resourceFilter.evaluateTrashEmpty();
})
.catch(err => {
console.error(err);
@@ -420,8 +515,6 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
__reloadFolderCards: function() {
this._resourcesContainer.setFoldersToList(this.__foldersList);
this._resourcesContainer.reloadFolders();
-
- this.__addNewFolderButton();
},
__addNewFolderButton: function() {
@@ -544,9 +637,42 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
.catch(err => console.error(err));
},
+ _trashFolderRequested: function(folderId) {
+ osparc.store.Folders.getInstance().trashFolder(folderId, this.getCurrentWorkspaceId())
+ .then(() => {
+ this.__reloadFolders();
+ const msg = this.tr("Successfully moved to Trash");
+ osparc.FlashMessenger.getInstance().logAs(msg, "INFO");
+ this._resourceFilter.setTrashEmpty(false);
+ })
+ .catch(err => {
+ console.error(err);
+ osparc.FlashMessenger.getInstance().logAs(err, "ERROR");
+ })
+ },
+
+ _untrashFolderRequested: function(folder) {
+ osparc.store.Folders.getInstance().untrashFolder(folder)
+ .then(() => {
+ this.__reloadFolders();
+ const msg = this.tr("Successfully restored");
+ osparc.FlashMessenger.getInstance().logAs(msg, "INFO");
+ this._resourceFilter.evaluateTrashEmpty();
+ })
+ .catch(err => {
+ console.error(err);
+ osparc.FlashMessenger.getInstance().logAs(err, "ERROR");
+ })
+ },
+
_deleteFolderRequested: function(folderId) {
osparc.store.Folders.getInstance().deleteFolder(folderId, this.getCurrentWorkspaceId())
- .then(() => this.__reloadFolders())
+ .then(() => {
+ this.__reloadFolders();
+ const msg = this.tr("Successfully deleted");
+ osparc.FlashMessenger.getInstance().logAs(msg, "INFO");
+ this._resourceFilter.evaluateTrashEmpty();
+ })
.catch(err => console.error(err));
},
// /FOLDERS
@@ -555,7 +681,7 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
cards.forEach(card => {
card.setMultiSelectionMode(this.getMultiSelection());
card.addListener("tap", e => {
- if (card.getBlocked() === true) {
+ if (card.isItemNotClickable()) {
card.setValue(false);
} else {
this.__itemClicked(card, e.getNativeEvent().shiftKey);
@@ -634,6 +760,11 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
// loose equality: will do a Number to String conversion if necessary
sameContext &= key in reqParams && reqParams[key] == value;
});
+ // both ways
+ Object.entries(reqParams).forEach(([key, value]) => {
+ // loose equality: will do a Number to String conversion if necessary
+ sameContext &= key in currentParams && currentParams[key] == value;
+ });
return !sameContext;
},
@@ -711,6 +842,9 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
let request = null;
switch (this.getCurrentContext()) {
+ case "trash":
+ request = osparc.data.Resources.fetch("studies", "getPageTrashed", params, options);
+ break;
case "search":
request = osparc.data.Resources.fetch("studies", "getPageSearch", params, options);
break;
@@ -864,15 +998,11 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
if (osparc.utils.DisabledPlugins.isFoldersEnabled()) {
const header = this.__header = new osparc.dashboard.StudyBrowserHeader();
+ this.__header.addListener("emptyTrashRequested", () => this.__emptyTrash(), this);
this._addToLayout(header);
}
- this._createResourcesLayout();
-
- const list = this._resourcesContainer.getFlatList();
- if (list) {
- osparc.utils.Utils.setIdToWidget(list, "studiesList");
- }
+ this._createResourcesLayout("studiesList");
const importStudyButton = this.__createImportButton();
const isDisabled = osparc.utils.DisabledPlugins.isImportDisabled();
@@ -882,10 +1012,13 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
const selectStudiesButton = this.__createSelectButton();
this._toolbar.add(selectStudiesButton);
- const studiesMoveButton = this.__createMoveStudiesButton(false);
+ const studiesMoveButton = this.__createMoveStudiesButton();
this._toolbar.add(studiesMoveButton);
- const studiesDeleteButton = this.__createDeleteButton(false);
+ const studiesTrashButton = this.__createTrashStudiesButton();
+ this._toolbar.add(studiesTrashButton);
+
+ const studiesDeleteButton = this.__createDeleteStudiesButton();
this._toolbar.add(studiesDeleteButton);
this._toolbar.add(new qx.ui.core.Spacer(), {
@@ -924,8 +1057,13 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
label: selection.length > 1 ? this.tr("Move selected")+" ("+selection.length+")" : this.tr("Move")
});
+ studiesTrashButton.set({
+ visibility: selection.length && currentContext === "studiesAndFolders" ? "visible" : "excluded",
+ label: selection.length > 1 ? this.tr("Trash selected")+" ("+selection.length+")" : this.tr("Trash")
+ });
+
studiesDeleteButton.set({
- visibility: selection.length ? "visible" : "excluded",
+ visibility: selection.length && currentContext === "trash" ? "visible" : "excluded",
label: selection.length > 1 ? this.tr("Delete selected")+" ("+selection.length+")" : this.tr("Delete")
});
});
@@ -956,6 +1094,10 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
}
}, this);
+ this._resourceFilter.addListener("trashContext", () => {
+ this._changeContext("trash");
+ });
+
this._searchBarFilter.addListener("filterChanged", e => {
const filterData = e.getData();
if (filterData.text) {
@@ -987,41 +1129,56 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
currentWorkspaceId: workspaceId,
currentFolderId: folderId,
});
- this._loadingResourcesBtn.setFetching(false);
this.resetSelection();
this.setMultiSelection(false);
- this.invalidateStudies();
- this._resourcesContainer.setResourcesToList([]);
+
+ // reset lists
+ this.__setWorkspacesToList([]);
+ this.__setFoldersToList([]);
+ this._resourcesList = [];
+ this._resourcesContainer.setResourcesToList(this._resourcesList);
+ this._resourcesContainer.reloadCards("studies");
this._toolbar.show();
- if (context === "search") {
- this.__reloadFolders();
- this.__reloadStudies();
- } else if (context === "workspaces") {
- this._toolbar.hide();
- this._searchBarFilter.resetFilters();
- this.__reloadWorkspaces();
- } else if (context === "studiesAndFolders") {
- this._searchBarFilter.resetFilters();
- this.__reloadFolders();
- this.__reloadStudies();
+ switch (this.getCurrentContext()) {
+ case "studiesAndFolders":
+ this._searchBarFilter.resetFilters();
+ this.__reloadFolders();
+ this._loadingResourcesBtn.setFetching(false);
+ this.invalidateStudies();
+ this.__reloadStudies();
+ break;
+ case "workspaces":
+ this._toolbar.exclude();
+ this._searchBarFilter.resetFilters();
+ this.__reloadWorkspaces();
+ break;
+ case "search":
+ this.__reloadWorkspaces();
+ this.__reloadFolders();
+ this._loadingResourcesBtn.setFetching(false);
+ this.invalidateStudies();
+ this.__reloadStudies();
+ break;
+ case "trash":
+ this._searchBarFilter.resetFilters();
+ this.__reloadWorkspaces();
+ this.__reloadFolders();
+ this._loadingResourcesBtn.setFetching(false);
+ this.invalidateStudies();
+ this.__reloadStudies();
+ break;
}
// notify header
const header = this.__header;
header.set({
- currentContext: context,
currentWorkspaceId: workspaceId,
currentFolderId: folderId,
});
- // notify workspacesAndFoldersTree
- const workspacesAndFoldersTree = this._resourceFilter.getWorkspacesAndFoldersTree();
- workspacesAndFoldersTree.set({
- currentWorkspaceId: workspaceId,
- currentFolderId: folderId,
- });
- workspacesAndFoldersTree.contextChanged(context);
+ // notify Filters on the left
+ this._resourceFilter.contextChanged(context, workspaceId, folderId);
}
},
@@ -1129,7 +1286,32 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
return moveStudiesButton;
},
- __createDeleteButton: function() {
+ __createTrashStudiesButton: function() {
+ const trashButton = new qx.ui.form.Button(this.tr("Trash"), "@FontAwesome5Solid/trash/14").set({
+ appearance: "danger-button",
+ visibility: "excluded"
+ });
+ osparc.utils.Utils.setIdToWidget(trashButton, "deleteStudiesBtn");
+ trashButton.addListener("execute", () => {
+ const selection = this._resourcesContainer.getSelection();
+ const preferencesSettings = osparc.Preferences.getInstance();
+ if (preferencesSettings.getConfirmDeleteStudy()) {
+ const win = this.__createConfirmTrashWindow(selection.map(button => button.getTitle()));
+ win.center();
+ win.open();
+ win.addListener("close", () => {
+ if (win.getConfirmed()) {
+ this.__trashStudies(selection.map(button => this.__getStudyData(button.getUuid(), false)), false);
+ }
+ }, this);
+ } else {
+ this.__trashStudies(selection.map(button => this.__getStudyData(button.getUuid(), false)), false);
+ }
+ }, this);
+ return trashButton;
+ },
+
+ __createDeleteStudiesButton: function() {
const deleteButton = new qx.ui.form.Button(this.tr("Delete"), "@FontAwesome5Solid/trash/14").set({
appearance: "danger-button",
visibility: "excluded"
@@ -1139,7 +1321,7 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
const selection = this._resourcesContainer.getSelection();
const preferencesSettings = osparc.Preferences.getInstance();
if (preferencesSettings.getConfirmDeleteStudy()) {
- const win = this.__createConfirmWindow(selection.map(button => button.getTitle()));
+ const win = this.__createConfirmDeleteWindow(selection.map(button => button.getTitle()));
win.center();
win.open();
win.addListener("close", () => {
@@ -1154,6 +1336,20 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
return deleteButton;
},
+ __emptyTrash: function() {
+ const win = this.__createConfirmEmptyTrashWindow();
+ win.center();
+ win.open();
+ win.addListener("close", () => {
+ if (win.getConfirmed()) {
+ osparc.data.Resources.fetch("trash", "delete")
+ .then(() => {
+ this.__resetStudiesList();
+ });
+ }
+ }, this);
+ },
+
__createSelectButton: function() {
const selectButton = new qx.ui.form.ToggleButton().set({
appearance: "form-button-outlined",
@@ -1297,9 +1493,25 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
const menu = card.getMenu();
const studyData = card.getResourceData();
+ const trashed = Boolean(studyData["trashedAt"]);
const writeAccess = osparc.data.model.Study.canIWrite(studyData["accessRights"]);
const deleteAccess = osparc.data.model.Study.canIDelete(studyData["accessRights"]);
+ if (this.getCurrentContext() === "trash") {
+ if (trashed) {
+ if (writeAccess) {
+ const untrashButton = this.__getUntrashStudyMenuButton(studyData);
+ menu.add(untrashButton);
+ }
+ if (deleteAccess) {
+ const deleteButton = this.__getDeleteStudyMenuButton(studyData, false);
+ menu.addSeparator();
+ menu.add(deleteButton);
+ }
+ }
+ return;
+ }
+
const openButton = this._getOpenMenuButton(studyData);
if (openButton) {
menu.add(openButton);
@@ -1363,11 +1575,14 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
}
if (deleteAccess) {
+ menu.addSeparator();
+ const trashButton = this.__getTrashStudyMenuButton(studyData, false);
+ menu.add(trashButton);
+ } else if (this.__deleteOrRemoveMe(studyData) === "remove") {
+ // if I'm a collaborator, let me remove myself from the study. In that case it would be a Delete for me
+ menu.addSeparator();
const deleteButton = this.__getDeleteStudyMenuButton(studyData, false);
- if (deleteButton) {
- menu.addSeparator();
- menu.add(deleteButton);
- }
+ menu.add(deleteButton);
}
card.evaluateMenuButtons();
@@ -1549,13 +1764,33 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
},
_deleteResourceRequested: function(studyId) {
- this.__deleteStudyRequested(this.__getStudyData(studyId));
+ if (this.getCurrentContext() === "trash") {
+ this.__deleteStudyRequested(this.__getStudyData(studyId));
+ } else {
+ this.__trashStudyRequested(this.__getStudyData(studyId));
+ }
+ },
+
+ __trashStudyRequested: function(studyData) {
+ const preferencesSettings = osparc.Preferences.getInstance();
+ if (preferencesSettings.getConfirmDeleteStudy()) {
+ const win = this.__createConfirmTrashWindow([studyData.name]);
+ win.center();
+ win.open();
+ win.addListener("close", () => {
+ if (win.getConfirmed()) {
+ this.__trashStudy(studyData);
+ }
+ }, this);
+ } else {
+ this.__trashStudy(studyData);
+ }
},
__deleteStudyRequested: function(studyData) {
const preferencesSettings = osparc.Preferences.getInstance();
if (preferencesSettings.getConfirmDeleteStudy()) {
- const win = this.__createConfirmWindow([studyData.name]);
+ const win = this.__deleteOrRemoveMe(studyData) === "remove" ? this.__createConfirmRemoveForMeWindow(studyData.name) : this.__createConfirmDeleteWindow([studyData.name]);
win.center();
win.open();
win.addListener("close", () => {
@@ -1568,6 +1803,27 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
}
},
+ __getTrashStudyMenuButton: function(studyData) {
+ const trashButton = new qx.ui.menu.Button(this.tr("Trash"), "@FontAwesome5Solid/trash/12");
+ trashButton["trashButton"] = true;
+ trashButton.set({
+ appearance: "menu-button"
+ });
+ osparc.utils.Utils.setIdToWidget(trashButton, "studyItemMenuDelete");
+ trashButton.addListener("execute", () => this.__trashStudyRequested(studyData), this);
+ return trashButton;
+ },
+
+ __getUntrashStudyMenuButton: function(studyData) {
+ const restoreButton = new qx.ui.menu.Button(this.tr("Restore"), "@MaterialIcons/restore_from_trash/16");
+ restoreButton["untrashButton"] = true;
+ restoreButton.set({
+ appearance: "menu-button"
+ });
+ restoreButton.addListener("execute", () => this.__untrashStudy(studyData), this);
+ return restoreButton;
+ },
+
__getDeleteStudyMenuButton: function(studyData) {
const deleteButton = new qx.ui.menu.Button(this.tr("Delete"), "@FontAwesome5Solid/trash/12");
deleteButton["deleteButton"] = true;
@@ -1730,17 +1986,60 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
req.send(body);
},
- __doDeleteStudy: function(studyData) {
+ __untrashStudy: function(studyData) {
+ osparc.store.Store.getInstance().untrashStudy(studyData.uuid)
+ .then(() => {
+ this.__removeFromStudyList(studyData.uuid);
+ const msg = this.tr("Successfully restored");
+ osparc.FlashMessenger.getInstance().logAs(msg, "INFO");
+ this._resourceFilter.evaluateTrashEmpty();
+ })
+ .catch(err => {
+ console.error(err);
+ osparc.FlashMessenger.getInstance().logAs(err, "ERROR");
+ })
+ .finally(() => this.resetSelection());
+ },
+
+ __trashStudy: function(studyData) {
+ osparc.store.Store.getInstance().trashStudy(studyData.uuid)
+ .then(() => {
+ this.__removeFromStudyList(studyData.uuid);
+ const msg = this.tr("Successfully moved to Trash");
+ osparc.FlashMessenger.getInstance().logAs(msg, "INFO");
+ this._resourceFilter.setTrashEmpty(false);
+ })
+ .catch(err => {
+ console.error(err);
+ osparc.FlashMessenger.getInstance().logAs(err, "ERROR");
+ })
+ .finally(() => this.resetSelection());
+ },
+
+ __trashStudies: function(studiesData) {
+ studiesData.forEach(studyData => this.__trashStudy(studyData));
+ },
+
+ __deleteOrRemoveMe: function(studyData) {
+ const deleteAccess = osparc.data.model.Study.canIDelete(studyData["accessRights"]);
const myGid = osparc.auth.Data.getInstance().getGroupId();
const collabGids = Object.keys(studyData["accessRights"]);
const amICollaborator = collabGids.indexOf(myGid) > -1;
+ return (!deleteAccess && collabGids.length > 1 && amICollaborator) ? "remove" : "delete";
+ },
+ __removeMeFromCollaborators: function(studyData) {
+ const arCopy = osparc.utils.Utils.deepCloneObject(studyData["accessRights"]);
+ // remove me from collaborators
+ const myGid = osparc.auth.Data.getInstance().getGroupId();
+ delete arCopy[myGid];
+ return osparc.info.StudyUtils.patchStudyData(studyData, "accessRights", arCopy);
+ },
+
+ __doDeleteStudy: function(studyData) {
let operationPromise = null;
- if (collabGids.length > 1 && amICollaborator) {
- const arCopy = osparc.utils.Utils.deepCloneObject(studyData["accessRights"]);
- // remove collaborator
- delete arCopy[myGid];
- operationPromise = osparc.info.StudyUtils.patchStudyData(studyData, "accessRights", arCopy);
+ if (this.__deleteOrRemoveMe(studyData) === "remove") {
+ operationPromise = this.__removeMeFromCollaborators(studyData);
} else {
// delete study
operationPromise = osparc.store.Store.getInstance().deleteStudy(studyData.uuid);
@@ -1758,10 +2057,41 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
studiesData.forEach(studyData => this.__doDeleteStudy(studyData));
},
- __createConfirmWindow: function(studyNames) {
- const rUSure = this.tr("Are you sure you want to delete");
+ __createConfirmTrashWindow: function(studyNames) {
+ let msg = this.tr("Are you sure you want to move");
+ if (studyNames.length > 1) {
+ const studiesText = osparc.product.Utils.getStudyAlias({plural: true});
+ msg += ` ${studyNames.length} ${studiesText} `
+ } else {
+ msg += ` '${studyNames[0]}' `;
+ }
+ msg += this.tr("to the Trash?");
+ const trashDays = osparc.store.StaticInfo.getInstance().getTrashRetentionDays();
+ msg += "
" + (studyNames.length > 1 ? "They" : "It") + this.tr(` will be permanently deleted after ${trashDays} days.`);
+ const confirmationWin = new osparc.ui.window.Confirmation(msg).set({
+ caption: this.tr("Move to Trash"),
+ confirmText: this.tr("Move to Trash"),
+ confirmAction: "delete"
+ });
+ osparc.utils.Utils.setIdToWidget(confirmationWin.getConfirmButton(), "confirmDeleteStudyBtn");
+ return confirmationWin;
+ },
+
+ __createConfirmRemoveForMeWindow: function(studyName) {
+ const msg = `'${studyName} ` + this.tr("will be removed from your list. Collaborators will still have access.");
+ const confirmationWin = new osparc.ui.window.Confirmation(msg).set({
+ caption: this.tr("Remove"),
+ confirmText: this.tr("Remove"),
+ confirmAction: "delete"
+ });
+ osparc.utils.Utils.setIdToWidget(confirmationWin.getConfirmButton(), "confirmDeleteStudyBtn");
+ return confirmationWin;
+ },
+
+ __createConfirmDeleteWindow: function(studyNames) {
+ let msg = this.tr("Are you sure you want to delete");
const studyAlias = osparc.product.Utils.getStudyAlias({plural: studyNames.length > 1});
- const msg = rUSure + (studyNames.length > 1 ? ` ${studyNames.length} ${studyAlias}?` : ` ${studyNames[0]}?`)
+ msg += (studyNames.length > 1 ? ` ${studyNames.length} ${studyAlias}?` : ` ${studyNames[0]}?`);
const confirmationWin = new osparc.ui.window.Confirmation(msg).set({
caption: this.tr("Delete") + " " + studyAlias,
confirmText: this.tr("Delete"),
@@ -1771,6 +2101,16 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
return confirmationWin;
},
+ __createConfirmEmptyTrashWindow: function() {
+ const msg = this.tr("Items in the bin will be permanently deleted");
+ const confirmationWin = new osparc.ui.window.Confirmation(msg).set({
+ caption: this.tr("Delete"),
+ confirmText: this.tr("Delete forever"),
+ confirmAction: "delete"
+ });
+ return confirmationWin;
+ },
+
// TASKS //
__tasksReceived: function(tasks) {
tasks.forEach(taskData => this._taskDataReceived(taskData));
diff --git a/services/static-webserver/client/source/class/osparc/dashboard/StudyBrowserHeader.js b/services/static-webserver/client/source/class/osparc/dashboard/StudyBrowserHeader.js
index 87a6a366b58..cb2e130cfcf 100644
--- a/services/static-webserver/client/source/class/osparc/dashboard/StudyBrowserHeader.js
+++ b/services/static-webserver/client/source/class/osparc/dashboard/StudyBrowserHeader.js
@@ -16,7 +16,7 @@
************************************************************************ */
/**
- * Widget used for displaying a Workspace information
+ * Widget used for displaying a Study Browser's context information
*
*/
@@ -35,27 +35,20 @@ qx.Class.define("osparc.dashboard.StudyBrowserHeader", {
alignY: "middle",
});
- this.__spacers = [];
-
this.initCurrentWorkspaceId();
this.initCurrentFolderId();
+
+ osparc.store.Store.getInstance().addListener("changeStudyBrowserContext", () => this.__buildLayout(), this);
},
events: {
"locationChanged": "qx.event.type.Data",
"workspaceUpdated": "qx.event.type.Data",
- "deleteWorkspaceRequested": "qx.event.type.Data"
+ "deleteWorkspaceRequested": "qx.event.type.Data",
+ "emptyTrashRequested": "qx.event.type.Event",
},
properties: {
- currentContext: {
- check: ["studiesAndFolders", "workspaces", "search"],
- nullable: false,
- init: "studiesAndFolders",
- event: "changeCurrentContext",
- apply: "__buildLayout"
- },
-
currentWorkspaceId: {
check: "Number",
nullable: true,
@@ -76,7 +69,7 @@ qx.Class.define("osparc.dashboard.StudyBrowserHeader", {
nullable: false,
init: {},
event: "changeAccessRights",
- apply: "__applyAccessRights"
+ apply: "__updateShareInfo"
},
myAccessRights: {
@@ -89,7 +82,17 @@ qx.Class.define("osparc.dashboard.StudyBrowserHeader", {
},
statics: {
- HEIGHT: 36
+ HEIGHT: 36,
+ POS: {
+ ICON: 0,
+ TITLE: 1,
+ BREADCRUMBS: 2,
+ EDIT_BUTTON: 3,
+ SHARE_LAYOUT: 4,
+ ROLE_LAYOUT: 5,
+ DESCRIPTION: 2,
+ EMPTY_TRASH_BUTTON: 3,
+ }
},
members: {
@@ -106,26 +109,25 @@ qx.Class.define("osparc.dashboard.StudyBrowserHeader", {
allowGrowY: true,
decorator: "rounded",
});
- this._add(control);
+ this._addAt(control, this.self().POS.ICON);
break;
case "title":
control = new qx.ui.basic.Label().set({
font: "text-16",
alignY: "middle",
});
- this._add(control);
+ this._addAt(control, this.self().POS.TITLE);
break;
case "breadcrumbs":
control = new osparc.dashboard.ContextBreadcrumbs();
this.bind("currentWorkspaceId", control, "currentWorkspaceId");
this.bind("currentFolderId", control, "currentFolderId");
- this.bind("currentContext", control, "currentContext");
control.bind("currentWorkspaceId", this, "currentWorkspaceId");
control.bind("currentFolderId", this, "currentFolderId");
control.addListener("locationChanged", e => {
this.fireDataEvent("locationChanged", e.getData())
});
- this._add(control);
+ this._addAt(control, this.self().POS.BREADCRUMBS);
break;
case "edit-button":
control = new qx.ui.form.MenuButton().set({
@@ -142,14 +144,15 @@ qx.Class.define("osparc.dashboard.StudyBrowserHeader", {
control.getContentElement().setStyles({
"border-radius": `${22 / 2}px`
});
- this._add(control);
+ this._addAt(control, this.self().POS.EDIT_BUTTON);
break;
case "share-layout":
- this.__addSpacer();
control = new qx.ui.container.Composite(new qx.ui.layout.HBox(10).set({
- alignY: "middle"
- }));
- this._add(control);
+ alignY: "middle",
+ })).set({
+ marginLeft: 10,
+ });
+ this._addAt(control, this.self().POS.SHARE_LAYOUT);
break;
case "share-text": {
control = new qx.ui.basic.Label().set({
@@ -160,11 +163,12 @@ qx.Class.define("osparc.dashboard.StudyBrowserHeader", {
break;
}
case "role-layout":
- this.__addSpacer();
control = new qx.ui.container.Composite(new qx.ui.layout.HBox(5).set({
- alignY: "middle"
- }));
- this._add(control);
+ alignY: "middle",
+ })).set({
+ marginLeft: 10,
+ });
+ this._addAt(control, this.self().POS.ROLE_LAYOUT);
break;
case "role-text": {
control = new qx.ui.basic.Label().set({
@@ -180,6 +184,24 @@ qx.Class.define("osparc.dashboard.StudyBrowserHeader", {
layout.addAt(control, 1);
break;
}
+ case "description": {
+ control = new qx.ui.basic.Label().set({
+ font: "text-14",
+ alignY: "middle",
+ });
+ this._addAt(control, this.self().POS.DESCRIPTION);
+ break;
+ }
+ case "empty-trash-button": {
+ control = new qx.ui.form.Button(this.tr("Empty Trash"), "@FontAwesome5Solid/trash/14").set({
+ appearance: "danger-button",
+ allowGrowY: false,
+ alignY: "middle",
+ });
+ control.addListener("execute", () => this.fireEvent("emptyTrashRequested"));
+ this._addAt(control, this.self().POS.EMPTY_TRASH_BUTTON);
+ break;
+ }
}
return control || this.base(arguments, id);
@@ -198,66 +220,82 @@ qx.Class.define("osparc.dashboard.StudyBrowserHeader", {
__buildLayout: function() {
this.getChildControl("icon");
const title = this.getChildControl("title");
- title.resetCursor();
- title.removeListener("tap", this.__titleTapped, this);
- this.getChildControl("breadcrumbs");
- this.getChildControl("edit-button").exclude();
- this.resetAccessRights();
- this.resetMyAccessRights();
-
- const currentContext = this.getCurrentContext();
- if (currentContext === "search") {
- this.__setIcon("@FontAwesome5Solid/search/24");
- title.set({
- value: this.tr("Search results"),
- });
- } else if (currentContext === "workspaces") {
- this.__setIcon(osparc.store.Workspaces.iconPath(32));
- title.set({
- value: this.tr("Shared Workspaces"),
- })
- } else if (currentContext === "studiesAndFolders") {
- const workspaceId = this.getCurrentWorkspaceId();
- title.setCursor("pointer");
- title.addListener("tap", this.__titleTapped, this);
- const workspace = osparc.store.Workspaces.getInstance().getWorkspace(workspaceId);
- if (workspace) {
- const thumbnail = workspace.getThumbnail();
- this.__setIcon(thumbnail ? thumbnail : osparc.store.Workspaces.iconPath(32));
- workspace.bind("name", title, "value");
- workspace.bind("accessRights", this, "accessRights");
- workspace.bind("myAccessRights", this, "myAccessRights");
- } else {
- this.__setIcon("@FontAwesome5Solid/home/30");
- title.setValue(this.tr("My Workspace"));
- }
- }
- },
- __addSpacer: function() {
- const spacer = new qx.ui.basic.Label("-").set({
- font: "text-16",
- alignY: "middle",
+ const locationBreadcrumbs = this.getChildControl("breadcrumbs").set({
+ visibility: "excluded"
+ });
+ const editWorkspace = this.getChildControl("edit-button").set({
+ visibility: "excluded"
+ });
+ const shareWorkspaceLayout = this.getChildControl("share-layout").set({
+ visibility: "excluded"
+ });
+ const roleWorkspaceLayout = this.getChildControl("role-layout").set({
+ visibility: "excluded"
+ });
+
+ const description = this.getChildControl("description").set({
+ visibility: "excluded"
});
- this.__spacers.push(spacer);
- this._add(spacer);
+ // the study browser will take care of making it visible
+ this.getChildControl("empty-trash-button").set({
+ visibility: "excluded"
+ });
+
+ const currentContext = osparc.store.Store.getInstance().getStudyBrowserContext();
+ switch (currentContext) {
+ case "studiesAndFolders": {
+ const workspaceId = this.getCurrentWorkspaceId();
+ title.setCursor("pointer");
+ title.addListener("tap", this.__titleTapped, this);
+ locationBreadcrumbs.show();
+ const workspace = osparc.store.Workspaces.getInstance().getWorkspace(workspaceId);
+ if (workspace) {
+ const thumbnail = workspace.getThumbnail();
+ this.__setIcon(thumbnail ? thumbnail : osparc.store.Workspaces.iconPath(32));
+ workspace.bind("name", title, "value");
+ editWorkspace.show();
+ shareWorkspaceLayout.show();
+ roleWorkspaceLayout.show();
+ workspace.bind("accessRights", this, "accessRights");
+ workspace.bind("myAccessRights", this, "myAccessRights");
+ } else {
+ this.__setIcon("@FontAwesome5Solid/home/30");
+ title.setValue(this.tr("My Workspace"));
+ }
+ break;
+ }
+ case "workspaces":
+ this.__setIcon(osparc.store.Workspaces.iconPath(32));
+ title.setValue(this.tr("Shared Workspaces"));
+ break;
+ case "search":
+ this.__setIcon("@FontAwesome5Solid/search/24");
+ title.setValue(this.tr("Search results"));
+ break;
+ case "trash": {
+ this.__setIcon("@FontAwesome5Solid/trash/20");
+ title.setValue(this.tr("Trash"));
+ const trashDays = osparc.store.StaticInfo.getInstance().getTrashRetentionDays();
+ description.set({
+ value: this.tr(`Items in the bin will be permanently deleted after ${trashDays} days.`),
+ visibility: "visible",
+ });
+ break;
+ }
+ }
},
- __resetIcon: function() {
+ __setIcon: function(source) {
+ // reset icon first
const icon = this.getChildControl("icon");
const image = icon.getChildControl("image");
image.resetSource();
icon.getContentElement().setStyles({
"background-image": "none"
});
- },
- __setIcon: function(source) {
- this.__resetIcon();
-
- const icon = this.getChildControl("icon");
if (source.includes("@")) {
- const image = icon.getChildControl("image");
image.set({
source
});
@@ -272,10 +310,6 @@ qx.Class.define("osparc.dashboard.StudyBrowserHeader", {
}
},
- __showSpacers: function(show) {
- this.__spacers.forEach(spacer => spacer.setVisibility(show ? "visible" : "excluded"));
- },
-
__getShareIcon: function() {
// reset previous
const layout = this.getChildControl("share-layout");
@@ -292,7 +326,7 @@ qx.Class.define("osparc.dashboard.StudyBrowserHeader", {
return shareIcon;
},
- __applyAccessRights: function(accessRights) {
+ __updateShareInfo: function(accessRights) {
const shareIcon = this.__getShareIcon();
const shareText = this.getChildControl("share-text");
if (accessRights && Object.keys(accessRights).length) {
@@ -300,11 +334,9 @@ qx.Class.define("osparc.dashboard.StudyBrowserHeader", {
shareText.setValue(Object.keys(accessRights).length + " members");
shareIcon.show();
shareText.show();
- this.__showSpacers(true);
} else {
shareIcon.exclude();
shareText.exclude();
- this.__showSpacers(false);
}
},
@@ -312,7 +344,8 @@ qx.Class.define("osparc.dashboard.StudyBrowserHeader", {
const editButton = this.getChildControl("edit-button");
const roleText = this.getChildControl("role-text");
const roleIcon = this.getChildControl("role-icon");
- if (value && Object.keys(value).length) {
+ const currentContext = osparc.store.Store.getInstance().getStudyBrowserContext();
+ if (currentContext === "studiesAndFolders" && value && Object.keys(value).length) {
editButton.setVisibility(value["delete"] ? "visible" : "excluded");
const menu = new qx.ui.menu.Menu().set({
position: "bottom-right"
@@ -328,12 +361,10 @@ qx.Class.define("osparc.dashboard.StudyBrowserHeader", {
roleText.setValue(osparc.data.Roles.WORKSPACE[val].label);
roleText.show();
roleIcon.show();
- this.__showSpacers(true);
} else {
editButton.exclude();
roleText.exclude();
roleIcon.exclude();
- this.__showSpacers(false);
}
},
@@ -346,6 +377,7 @@ qx.Class.define("osparc.dashboard.StudyBrowserHeader", {
win.close();
this.__buildLayout();
}, this);
+ workspaceEditor.addListener("cancel", () => win.close());
},
__openShareWith: function() {
@@ -355,7 +387,7 @@ qx.Class.define("osparc.dashboard.StudyBrowserHeader", {
const win = osparc.ui.window.Window.popUpInWindow(permissionsView, title, 500, 400);
permissionsView.addListener("updateAccessRights", () => {
win.close();
- this.__applyAccessRights(workspace.getAccessRights());
+ this.__updateShareInfo(workspace.getAccessRights());
}, this);
},
}
diff --git a/services/static-webserver/client/source/class/osparc/dashboard/TemplateBrowser.js b/services/static-webserver/client/source/class/osparc/dashboard/TemplateBrowser.js
index 3ff37cd8412..0b6fc8ccd26 100644
--- a/services/static-webserver/client/source/class/osparc/dashboard/TemplateBrowser.js
+++ b/services/static-webserver/client/source/class/osparc/dashboard/TemplateBrowser.js
@@ -246,11 +246,7 @@ qx.Class.define("osparc.dashboard.TemplateBrowser", {
// LAYOUT //
_createLayout: function() {
this._createSearchBar();
- this._createResourcesLayout();
- const list = this._resourcesContainer.getFlatList();
- if (list) {
- osparc.utils.Utils.setIdToWidget(list, "templatesList");
- }
+ this._createResourcesLayout("templatesList");
const updateAllButton = this.__createUpdateAllButton();
if (updateAllButton) {
diff --git a/services/static-webserver/client/source/class/osparc/dashboard/WorkspaceButtonItem.js b/services/static-webserver/client/source/class/osparc/dashboard/WorkspaceButtonItem.js
index 187be3598e2..eb777ca5dd7 100644
--- a/services/static-webserver/client/source/class/osparc/dashboard/WorkspaceButtonItem.js
+++ b/services/static-webserver/client/source/class/osparc/dashboard/WorkspaceButtonItem.js
@@ -45,7 +45,9 @@ qx.Class.define("osparc.dashboard.WorkspaceButtonItem", {
events: {
"workspaceSelected": "qx.event.type.Data",
"workspaceUpdated": "qx.event.type.Data",
- "deleteWorkspaceRequested": "qx.event.type.Data"
+ "trashWorkspaceRequested": "qx.event.type.Data",
+ "untrashWorkspaceRequested": "qx.event.type.Data",
+ "deleteWorkspaceRequested": "qx.event.type.Data",
},
properties: {
@@ -183,31 +185,46 @@ qx.Class.define("osparc.dashboard.WorkspaceButtonItem", {
position: "bottom-right"
});
- const editButton = new qx.ui.menu.Button(this.tr("Edit..."), "@FontAwesome5Solid/pencil-alt/12");
- editButton.addListener("execute", () => {
- const workspace = this.getWorkspace();
- const workspaceEditor = new osparc.editor.WorkspaceEditor(workspace);
- const title = this.tr("Edit Workspace");
- const win = osparc.ui.window.Window.popUpInWindow(workspaceEditor, title, 300, 150);
- workspaceEditor.addListener("workspaceUpdated", () => {
- win.close();
- this.fireDataEvent("workspaceUpdated", workspace.getWorkspaceId());
+ const studyBrowserContext = osparc.store.Store.getInstance().getStudyBrowserContext();
+ if (
+ studyBrowserContext === "search" ||
+ studyBrowserContext === "workspaces"
+ ) {
+ const editButton = new qx.ui.menu.Button(this.tr("Edit..."), "@FontAwesome5Solid/pencil-alt/12");
+ editButton.addListener("execute", () => {
+ const workspace = this.getWorkspace();
+ const workspaceEditor = new osparc.editor.WorkspaceEditor(workspace);
+ const title = this.tr("Edit Workspace");
+ const win = osparc.ui.window.Window.popUpInWindow(workspaceEditor, title, 300, 150);
+ workspaceEditor.addListener("workspaceUpdated", () => {
+ win.close();
+ this.fireDataEvent("workspaceUpdated", workspace.getWorkspaceId());
+ });
+ workspaceEditor.addListener("cancel", () => win.close());
});
- workspaceEditor.addListener("cancel", () => win.close());
- });
- menu.add(editButton);
+ menu.add(editButton);
+
+ const shareButton = new qx.ui.menu.Button(this.tr("Share..."), "@FontAwesome5Solid/share-alt/12");
+ shareButton.addListener("execute", () => this.__openShareWith(), this);
+ menu.add(shareButton);
- const shareButton = new qx.ui.menu.Button(this.tr("Share..."), "@FontAwesome5Solid/share-alt/12");
- shareButton.addListener("execute", () => this.__openShareWith(), this);
- menu.add(shareButton);
+ menu.addSeparator();
- menu.addSeparator();
+ const trashButton = new qx.ui.menu.Button(this.tr("Trash"), "@FontAwesome5Solid/trash/12");
+ trashButton.addListener("execute", () => this.__trashWorkspaceRequested(), this);
+ menu.add(trashButton);
+ } else if (studyBrowserContext === "trash") {
+ const restoreButton = new qx.ui.menu.Button(this.tr("Restore"), "@MaterialIcons/restore_from_trash/16");
+ restoreButton.addListener("execute", () => this.fireDataEvent("untrashWorkspaceRequested", this.getWorkspace()), this);
+ menu.add(restoreButton);
- const deleteButton = new qx.ui.menu.Button(this.tr("Delete"), "@FontAwesome5Solid/trash/12");
- osparc.utils.Utils.setIdToWidget(deleteButton, "deleteWorkspaceMenuItem");
- deleteButton.addListener("execute", () => this.__deleteWorkspaceRequested(), this);
- menu.add(deleteButton);
+ menu.addSeparator();
+ const deleteButton = new qx.ui.menu.Button(this.tr("Delete"), "@FontAwesome5Solid/trash/12");
+ osparc.utils.Utils.setIdToWidget(deleteButton, "deleteWorkspaceMenuItem");
+ deleteButton.addListener("execute", () => this.__deleteWorkspaceRequested(), this);
+ menu.add(deleteButton);
+ }
menuButton.setMenu(menu);
}
},
@@ -237,7 +254,9 @@ qx.Class.define("osparc.dashboard.WorkspaceButtonItem", {
},
__itemSelected: function(newVal) {
- if (newVal) {
+ const studyBrowserContext = osparc.store.Store.getInstance().getStudyBrowserContext();
+ // do not allow selecting workspace
+ if (studyBrowserContext !== "trash" && newVal) {
this.fireDataEvent("workspaceSelected", this.getWorkspaceId());
}
this.setValue(false);
@@ -250,6 +269,24 @@ qx.Class.define("osparc.dashboard.WorkspaceButtonItem", {
permissionsView.addListener("updateAccessRights", () => this.__applyAccessRights(this.getWorkspace().getAccessRights()), this);
},
+ __trashWorkspaceRequested: function() {
+ const trashDays = osparc.store.StaticInfo.getInstance().getTrashRetentionDays();
+ let msg = this.tr("Are you sure you want to move the Workspace and all its content to the trash?");
+ msg += "
" + this.tr("It will be permanently deleted after ") + trashDays + " days.";
+ const confirmationWin = new osparc.ui.window.Confirmation(msg).set({
+ caption: this.tr("Move to Trash"),
+ confirmText: this.tr("Move to Trash"),
+ confirmAction: "delete"
+ });
+ confirmationWin.center();
+ confirmationWin.open();
+ confirmationWin.addListener("close", () => {
+ if (confirmationWin.getConfirmed()) {
+ this.fireDataEvent("trashWorkspaceRequested", this.getWorkspaceId());
+ }
+ }, this);
+ },
+
__deleteWorkspaceRequested: function() {
let msg = this.tr("Are you sure you want to delete") + " " + this.getTitle() + "?";
msg += "
" + this.tr("All the content of the workspace will be deleted.");
diff --git a/services/static-webserver/client/source/class/osparc/dashboard/WorkspacesAndFoldersTree.js b/services/static-webserver/client/source/class/osparc/dashboard/WorkspacesAndFoldersTree.js
index 533c90c4643..da5a21797f1 100644
--- a/services/static-webserver/client/source/class/osparc/dashboard/WorkspacesAndFoldersTree.js
+++ b/services/static-webserver/client/source/class/osparc/dashboard/WorkspacesAndFoldersTree.js
@@ -46,7 +46,7 @@ qx.Class.define("osparc.dashboard.WorkspacesAndFoldersTree", {
this.__initTree();
// preselect "My Workspace"
- this.contextChanged(null, null);
+ this.contextChanged("studiesAndFolders");
osparc.store.Folders.getInstance().addListener("folderAdded", e => {
const folder = e.getData();
diff --git a/services/static-webserver/client/source/class/osparc/dashboard/WorkspacesAndFoldersTreeItem.js b/services/static-webserver/client/source/class/osparc/dashboard/WorkspacesAndFoldersTreeItem.js
index 13793622cb7..75f120a86c5 100644
--- a/services/static-webserver/client/source/class/osparc/dashboard/WorkspacesAndFoldersTreeItem.js
+++ b/services/static-webserver/client/source/class/osparc/dashboard/WorkspacesAndFoldersTreeItem.js
@@ -24,6 +24,7 @@ qx.Class.define("osparc.dashboard.WorkspacesAndFoldersTreeItem", {
this.set({
indent: 12, // defaults to 19,
decorator: "rounded",
+ padding: 2,
maxWidth: osparc.dashboard.ResourceBrowserBase.SIDE_SPACER_WIDTH - 12,
});
diff --git a/services/static-webserver/client/source/class/osparc/data/Resources.js b/services/static-webserver/client/source/class/osparc/data/Resources.js
index 8ff5eb822ba..ad1b51cc840 100644
--- a/services/static-webserver/client/source/class/osparc/data/Resources.js
+++ b/services/static-webserver/client/source/class/osparc/data/Resources.js
@@ -119,6 +119,16 @@ qx.Class.define("osparc.data.Resources", {
method: "GET",
url: statics.API + "/projects?type=user"
},
+ getOne: {
+ useCache: false,
+ method: "GET",
+ url: statics.API + "/projects/{studyId}"
+ },
+ getActive: {
+ useCache: false,
+ method: "GET",
+ url: statics.API + "/projects/active?client_session_id={tabId}"
+ },
getPage: {
useCache: false,
method: "GET",
@@ -129,15 +139,10 @@ qx.Class.define("osparc.data.Resources", {
method: "GET",
url: statics.API + "/projects:search?offset={offset}&limit={limit}&text={text}&tag_ids={tagIds}&order_by={orderBy}"
},
- getOne: {
- useCache: false,
- method: "GET",
- url: statics.API + "/projects/{studyId}"
- },
- getActive: {
+ getPageTrashed: {
useCache: false,
method: "GET",
- url: statics.API + "/projects/active?client_session_id={tabId}"
+ url: statics.API + "/projects?filters={%22trashed%22:%22true%22}&offset={offset}&limit={limit}&order_by={orderBy}"
},
postToTemplate: {
method: "POST",
@@ -187,6 +192,14 @@ qx.Class.define("osparc.data.Resources", {
method: "PATCH",
url: statics.API + "/projects/{studyId}"
},
+ trash: {
+ method: "POST",
+ url: statics.API + "/projects/{studyId}:trash"
+ },
+ untrash: {
+ method: "POST",
+ url: statics.API + "/projects/{studyId}:untrash"
+ },
delete: {
method: "DELETE",
url: statics.API + "/projects/{studyId}"
@@ -301,14 +314,19 @@ qx.Class.define("osparc.data.Resources", {
method: "GET",
url: statics.API + "/folders?workspace_id={workspaceId}&folder_id={folderId}&offset={offset}&limit={limit}&order_by={orderBy}"
},
+ getOne: {
+ method: "GET",
+ url: statics.API + "/folders/{folderId}"
+ },
getPageSearch: {
useCache: false,
method: "GET",
url: statics.API + "/folders:search?offset={offset}&limit={limit}&text={text}&order_by={orderBy}"
},
- getOne: {
+ getPageTrashed: {
+ useCache: false,
method: "GET",
- url: statics.API + "/folders/{folderId}"
+ url: statics.API + "/folders?filters={%22trashed%22:%22true%22}&offset={offset}&limit={limit}&order_by={orderBy}"
},
post: {
method: "POST",
@@ -326,6 +344,14 @@ qx.Class.define("osparc.data.Resources", {
method: "PUT",
url: statics.API + "/folders/{folderId}/folders/{workspaceId}"
},
+ trash: {
+ method: "POST",
+ url: statics.API + "/folders/{folderId}:trash"
+ },
+ untrash: {
+ method: "POST",
+ url: statics.API + "/folders/{folderId}:untrash"
+ },
}
},
"workspaces": {
@@ -338,6 +364,16 @@ qx.Class.define("osparc.data.Resources", {
method: "GET",
url: statics.API + "/workspaces/{workspaceId}"
},
+ getPageSearch: {
+ useCache: false,
+ method: "GET",
+ url: statics.API + "/workspaces:search?offset={offset}&limit={limit}&text={text}&order_by={orderBy}"
+ },
+ getPageTrashed: {
+ useCache: false,
+ method: "GET",
+ url: statics.API + "/workspaces?filters={%22trashed%22:%22true%22}&offset={offset}&limit={limit}&order_by={orderBy}"
+ },
post: {
method: "POST",
url: statics.API + "/workspaces"
@@ -350,6 +386,14 @@ qx.Class.define("osparc.data.Resources", {
method: "DELETE",
url: statics.API + "/workspaces/{workspaceId}"
},
+ trash: {
+ method: "POST",
+ url: statics.API + "/workspaces/{workspaceId}:trash"
+ },
+ untrash: {
+ method: "POST",
+ url: statics.API + "/workspaces/{workspaceId}:untrash"
+ },
postAccessRights: {
method: "POST",
url: statics.API + "/workspaces/{workspaceId}/groups/{groupId}"
@@ -402,6 +446,18 @@ qx.Class.define("osparc.data.Resources", {
}
},
+ /*
+ * TRASH
+ */
+ "trash": {
+ endpoints: {
+ delete: {
+ method: "DELETE",
+ url: statics.API + "/trash"
+ }
+ }
+ },
+
/*
* SNAPSHOTS
*/
diff --git a/services/static-webserver/client/source/class/osparc/data/model/Folder.js b/services/static-webserver/client/source/class/osparc/data/model/Folder.js
index b8b9eb03b21..6a255a45dc5 100644
--- a/services/static-webserver/client/source/class/osparc/data/model/Folder.js
+++ b/services/static-webserver/client/source/class/osparc/data/model/Folder.js
@@ -37,7 +37,7 @@ qx.Class.define("osparc.data.model.Folder", {
owner: folderData.owner,
createdAt: new Date(folderData.createdAt),
lastModified: new Date(folderData.modifiedAt),
- trashedAt: folderData.trashedAt ? new Date(folderData.trashedAt) : this.getTrashedAt(),
+ trashedAt: folderData.trashedAt ? new Date(folderData.trashedAt) : null,
});
},
diff --git a/services/static-webserver/client/source/class/osparc/data/model/IframeHandler.js b/services/static-webserver/client/source/class/osparc/data/model/IframeHandler.js
index fa037642af4..583b31f979e 100644
--- a/services/static-webserver/client/source/class/osparc/data/model/IframeHandler.js
+++ b/services/static-webserver/client/source/class/osparc/data/model/IframeHandler.js
@@ -300,7 +300,7 @@ qx.Class.define("osparc.data.model.IframeHandler", {
if (response.status < 400) {
this.__serviceReadyIn(srvUrl);
} else {
- console.log(`Connecting: ${srvUrl} is not reachable. Status: ${response.status}`);
+ console.error(`Connecting: ${srvUrl} is not reachable. Status: ${response.status}`);
retry();
}
})
diff --git a/services/static-webserver/client/source/class/osparc/data/model/Workspace.js b/services/static-webserver/client/source/class/osparc/data/model/Workspace.js
index 56023d1eb4e..65592fb1789 100644
--- a/services/static-webserver/client/source/class/osparc/data/model/Workspace.js
+++ b/services/static-webserver/client/source/class/osparc/data/model/Workspace.js
@@ -37,6 +37,8 @@ qx.Class.define("osparc.data.model.Workspace", {
accessRights: workspaceData.accessRights,
createdAt: new Date(workspaceData.createdAt),
modifiedAt: new Date(workspaceData.modifiedAt),
+ trashedAt: workspaceData.trashedAt ? new Date(workspaceData.trashedAt) : null,
+ trashedBy: workspaceData.trashedBy,
});
},
@@ -95,7 +97,19 @@ qx.Class.define("osparc.data.model.Workspace", {
nullable: true,
init: null,
event: "changeModifiedAt"
- }
+ },
+
+ trashedAt: {
+ check: "Date",
+ nullable: true,
+ init: null,
+ },
+
+ trashedBy: {
+ check: "Number",
+ nullable: true,
+ init: null,
+ },
},
statics: {
diff --git a/services/static-webserver/client/source/class/osparc/store/Folders.js b/services/static-webserver/client/source/class/osparc/store/Folders.js
index d6e83d8fb23..4206cb212dd 100644
--- a/services/static-webserver/client/source/class/osparc/store/Folders.js
+++ b/services/static-webserver/client/source/class/osparc/store/Folders.js
@@ -78,6 +78,33 @@ qx.Class.define("osparc.store.Folders", {
});
},
+ fetchAllTrashedFolders: function(orderBy = {
+ field: "modified_at",
+ direction: "desc"
+ }) {
+ if (osparc.auth.Data.getInstance().isGuest()) {
+ return new Promise(resolve => {
+ resolve([]);
+ });
+ }
+
+ const curatedOrderBy = this.self().curateOrderBy(orderBy);
+ const params = {
+ url: {
+ orderBy: JSON.stringify(curatedOrderBy),
+ }
+ };
+ return osparc.data.Resources.getInstance().getAllPages("folders", params, "getPageTrashed")
+ .then(trashedFoldersData => {
+ const folders = [];
+ trashedFoldersData.forEach(folderData => {
+ const folder = this.__addToCache(folderData);
+ folders.push(folder);
+ });
+ return folders;
+ });
+ },
+
searchFolders: function(
text,
orderBy = {
@@ -126,6 +153,37 @@ qx.Class.define("osparc.store.Folders", {
});
},
+ trashFolder: function(folderId, workspaceId) {
+ const params = {
+ "url": {
+ folderId
+ }
+ };
+ return osparc.data.Resources.getInstance().fetch("folders", "trash", params)
+ .then(() => {
+ const folder = this.getFolder(folderId);
+ if (folder) {
+ this.__deleteFromCache(folderId, workspaceId);
+ this.fireDataEvent("folderRemoved", folder);
+ }
+ })
+ .catch(console.error);
+ },
+
+ untrashFolder: function(folder) {
+ const params = {
+ "url": {
+ folderId: folder.getFolderId(),
+ }
+ };
+ return osparc.data.Resources.getInstance().fetch("folders", "untrash", params)
+ .then(() => {
+ this.foldersCached.unshift(folder);
+ this.fireDataEvent("folderAdded", folder);
+ })
+ .catch(console.error);
+ },
+
deleteFolder: function(folderId, workspaceId) {
const params = {
"url": {
diff --git a/services/static-webserver/client/source/class/osparc/store/StaticInfo.js b/services/static-webserver/client/source/class/osparc/store/StaticInfo.js
index 1681a801cf4..2ac96fd58b0 100644
--- a/services/static-webserver/client/source/class/osparc/store/StaticInfo.js
+++ b/services/static-webserver/client/source/class/osparc/store/StaticInfo.js
@@ -69,6 +69,16 @@ qx.Class.define("osparc.store.StaticInfo", {
return null;
},
+ getTrashRetentionDays: function() {
+ const staticKey = "webserverProjects";
+ const wsStaticData = this.getValue(staticKey);
+ const key = "PROJECTS_TRASH_RETENTION_DAYS";
+ if (key in wsStaticData) {
+ return wsStaticData[key];
+ }
+ return "unknown";
+ },
+
getAccountDeletionRetentionDays: function() {
const staticKey = "webserverLogin";
const wsStaticData = this.getValue(staticKey);
diff --git a/services/static-webserver/client/source/class/osparc/store/Store.js b/services/static-webserver/client/source/class/osparc/store/Store.js
index d2c2d24104e..7b94b336852 100644
--- a/services/static-webserver/client/source/class/osparc/store/Store.js
+++ b/services/static-webserver/client/source/class/osparc/store/Store.js
@@ -67,7 +67,7 @@ qx.Class.define("osparc.store.Store", {
nullable: true
},
studyBrowserContext: {
- check: ["studiesAndFolders", "workspaces", "search"],
+ check: ["studiesAndFolders", "workspaces", "search", "trash"],
init: "studiesAndFolders",
nullable: false,
event: "changeStudyBrowserContext",
@@ -413,6 +413,43 @@ qx.Class.define("osparc.store.Store", {
}
},
+ trashStudy: function(studyId) {
+ const params = {
+ url: {
+ studyId
+ }
+ };
+ return new Promise((resolve, reject) => {
+ osparc.data.Resources.fetch("studies", "trash", params)
+ .then(() => {
+ this.remove("studies", "uuid", studyId);
+ resolve();
+ })
+ .catch(err => {
+ console.error(err);
+ reject(err);
+ });
+ });
+ },
+
+ untrashStudy: function(studyId) {
+ const params = {
+ url: {
+ studyId
+ }
+ };
+ return new Promise((resolve, reject) => {
+ osparc.data.Resources.fetch("studies", "untrash", params)
+ .then(() => {
+ resolve();
+ })
+ .catch(err => {
+ console.error(err);
+ reject(err);
+ });
+ });
+ },
+
getTemplate: function(templateId) {
const templates = this.getTemplates();
return templates.find(template => template["uuid"] === templateId);
diff --git a/services/static-webserver/client/source/class/osparc/store/Workspaces.js b/services/static-webserver/client/source/class/osparc/store/Workspaces.js
index 253ac714a1d..924312639de 100644
--- a/services/static-webserver/client/source/class/osparc/store/Workspaces.js
+++ b/services/static-webserver/client/source/class/osparc/store/Workspaces.js
@@ -46,6 +46,15 @@ qx.Class.define("osparc.store.Workspaces", {
thumbnail,
};
},
+
+ curateOrderBy: function(orderBy) {
+ const curatedOrderBy = osparc.utils.Utils.deepCloneObject(orderBy);
+ if (curatedOrderBy.field !== "name") {
+ // only "modified_at" and "name" supported
+ curatedOrderBy.field = "modified_at";
+ }
+ return curatedOrderBy;
+ },
},
members: {
@@ -61,26 +70,105 @@ qx.Class.define("osparc.store.Workspaces", {
return osparc.data.Resources.getInstance().getAllPages("workspaces")
.then(workspacesData => {
workspacesData.forEach(workspaceData => {
- const workspace = new osparc.data.model.Workspace(workspaceData);
- this.__addToCache(workspace);
+ this.__addToCache(workspaceData);
});
return this.workspacesCached;
});
},
+ fetchAllTrashedWorkspaces: function(orderBy = {
+ field: "modified_at",
+ direction: "desc"
+ }) {
+ if (osparc.auth.Data.getInstance().isGuest()) {
+ return new Promise(resolve => {
+ resolve([]);
+ });
+ }
+
+ const curatedOrderBy = this.self().curateOrderBy(orderBy);
+ const params = {
+ url: {
+ orderBy: JSON.stringify(curatedOrderBy),
+ }
+ };
+ return osparc.data.Resources.getInstance().getAllPages("workspaces", params, "getPageTrashed")
+ .then(trashedWorkspacesData => {
+ const workspaces = [];
+ trashedWorkspacesData.forEach(workspaceData => {
+ const workspace = this.__addToCache(workspaceData);
+ workspaces.push(workspace);
+ });
+ return workspaces;
+ });
+ },
+
+ searchWorkspaces: function(text) {
+ if (osparc.auth.Data.getInstance().isGuest()) {
+ return new Promise(resolve => {
+ resolve([]);
+ });
+ }
+
+ const params = {
+ url: {
+ text,
+ }
+ };
+ return osparc.data.Resources.getInstance().getAllPages("workspaces", params, "getPageSearch")
+ .then(workspacesData => {
+ const workspaces = [];
+ workspacesData.forEach(workspaceData => {
+ const workspace = this.__addToCache(workspaceData);
+ workspaces.push(workspace);
+ });
+ return workspaces;
+ });
+ },
+
postWorkspace: function(newWorkspaceData) {
const params = {
data: newWorkspaceData
};
return osparc.data.Resources.getInstance().fetch("workspaces", "post", params)
.then(workspaceData => {
- const newWorkspace = new osparc.data.model.Workspace(workspaceData);
- this.__addToCache(newWorkspace);
+ const newWorkspace = this.__addToCache(workspaceData);
this.fireDataEvent("workspaceAdded", newWorkspace);
return newWorkspace;
});
},
+ trashWorkspace: function(workspaceId) {
+ const params = {
+ "url": {
+ workspaceId
+ }
+ };
+ return osparc.data.Resources.getInstance().fetch("workspaces", "trash", params)
+ .then(() => {
+ const workspace = this.getWorkspace(workspaceId);
+ if (workspace) {
+ this.__deleteFromCache(workspaceId);
+ this.fireDataEvent("workspaceRemoved", workspace);
+ }
+ })
+ .catch(console.error);
+ },
+
+ untrashWorkspace: function(workspace) {
+ const params = {
+ "url": {
+ workspaceId: workspace.getWorkspaceId(),
+ }
+ };
+ return osparc.data.Resources.getInstance().fetch("workspaces", "untrash", params)
+ .then(() => {
+ this.workspacesCached.unshift(workspace);
+ this.fireDataEvent("workspaceAdded", workspace);
+ })
+ .catch(console.error);
+ },
+
deleteWorkspace: function(workspaceId) {
const params = {
"url": {
@@ -201,11 +289,27 @@ qx.Class.define("osparc.store.Workspaces", {
return this.workspacesCached;
},
- __addToCache: function(workspace) {
- const found = this.workspacesCached.find(w => w.getWorkspaceId() === workspace.getWorkspaceId());
- if (!found) {
+ __addToCache: function(workspaceData) {
+ let workspace = this.workspacesCached.find(w => w.getWorkspaceId() === workspaceData["workspaceId"]);
+ if (workspace) {
+ const props = Object.keys(qx.util.PropertyUtil.getProperties(osparc.data.model.Workspace));
+ // put
+ Object.keys(workspaceData).forEach(key => {
+ if (key === "createdAt") {
+ workspace.set("createdAt", new Date(workspaceData["createdAt"]));
+ } else if (key === "modifiedAt") {
+ workspace.set("modifiedAt", new Date(workspaceData["modifiedAt"]));
+ } else if (key === "trashedAt") {
+ workspace.set("trashedAt", new Date(workspaceData["trashedAt"]));
+ } else if (props.includes(key)) {
+ workspace.set(key, workspaceData[key]);
+ }
+ });
+ } else {
+ workspace = new osparc.data.model.Workspace(workspaceData);
this.workspacesCached.unshift(workspace);
}
+ return workspace;
},
__deleteFromCache: function(workspaceId) {
From 3dded08a8e931fd7ba78c44990bbb2a817f0900b Mon Sep 17 00:00:00 2001
From: Pedro Crespo-Valero <32402063+pcrespov@users.noreply.github.com>
Date: Mon, 2 Dec 2024 00:44:29 +0100
Subject: [PATCH 02/16] =?UTF-8?q?=F0=9F=8E=A8=20web-server:=20exception=20?=
=?UTF-8?q?handling=20framework=20(#6655)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../exception_handling/__init__.py | 12 ++
.../exception_handling/_base.py | 172 +++++++++++++++
.../exception_handling/_factory.py | 134 ++++++++++++
.../exceptions_handlers.py | 90 --------
.../folders/_exceptions_handlers.py | 18 +-
.../projects/_trash_handlers.py | 16 +-
.../workspaces/_exceptions_handlers.py | 17 +-
.../unit/isolated/test_exception_handling.py | 198 ++++++++++++++++++
.../isolated/test_exception_handling_base.py | 126 +++++++++++
.../test_exception_handling_factory.py | 154 ++++++++++++++
.../unit/isolated/test_exceptions_handlers.py | 117 -----------
.../tests/unit/with_dbs/03/test_trash.py | 1 -
12 files changed, 816 insertions(+), 239 deletions(-)
create mode 100644 services/web/server/src/simcore_service_webserver/exception_handling/__init__.py
create mode 100644 services/web/server/src/simcore_service_webserver/exception_handling/_base.py
create mode 100644 services/web/server/src/simcore_service_webserver/exception_handling/_factory.py
delete mode 100644 services/web/server/src/simcore_service_webserver/exceptions_handlers.py
create mode 100644 services/web/server/tests/unit/isolated/test_exception_handling.py
create mode 100644 services/web/server/tests/unit/isolated/test_exception_handling_base.py
create mode 100644 services/web/server/tests/unit/isolated/test_exception_handling_factory.py
delete mode 100644 services/web/server/tests/unit/isolated/test_exceptions_handlers.py
diff --git a/services/web/server/src/simcore_service_webserver/exception_handling/__init__.py b/services/web/server/src/simcore_service_webserver/exception_handling/__init__.py
new file mode 100644
index 00000000000..a2f31a08861
--- /dev/null
+++ b/services/web/server/src/simcore_service_webserver/exception_handling/__init__.py
@@ -0,0 +1,12 @@
+from ._base import ExceptionHandlersMap, exception_handling_decorator
+from ._factory import ExceptionToHttpErrorMap, HttpErrorInfo, to_exceptions_handlers_map
+
+__all__: tuple[str, ...] = (
+ "ExceptionHandlersMap",
+ "ExceptionToHttpErrorMap",
+ "HttpErrorInfo",
+ "exception_handling_decorator",
+ "to_exceptions_handlers_map",
+)
+
+# nopycln: file
diff --git a/services/web/server/src/simcore_service_webserver/exception_handling/_base.py b/services/web/server/src/simcore_service_webserver/exception_handling/_base.py
new file mode 100644
index 00000000000..0c9c123bbfb
--- /dev/null
+++ b/services/web/server/src/simcore_service_webserver/exception_handling/_base.py
@@ -0,0 +1,172 @@
+import functools
+import logging
+from collections.abc import Callable, Iterable
+from contextlib import AbstractAsyncContextManager
+from types import TracebackType
+from typing import Protocol, TypeAlias
+
+from aiohttp import web
+from servicelib.aiohttp.typing_extension import Handler as WebHandler
+from servicelib.aiohttp.typing_extension import Middleware as WebMiddleware
+
+_logger = logging.getLogger(__name__)
+
+
+class AiohttpExceptionHandler(Protocol):
+ __name__: str
+
+ async def __call__(
+ self,
+ request: web.Request,
+ exception: Exception,
+ ) -> web.StreamResponse:
+ """
+ Callback that handles an exception produced during a request and transforms it into a response
+
+ Arguments:
+ request -- current request
+ exception -- exception raised in web handler during this request
+ """
+
+
+ExceptionHandlersMap: TypeAlias = dict[type[Exception], AiohttpExceptionHandler]
+
+
+def _sort_exceptions_by_specificity(
+ exceptions: Iterable[type[Exception]], *, concrete_first: bool = True
+) -> list[type[Exception]]:
+ """
+ Keyword Arguments:
+ concrete_first -- If True, concrete subclasses precede their superclass (default: {True}).
+ """
+ return sorted(
+ exceptions,
+ key=lambda exc: sum(issubclass(e, exc) for e in exceptions if e is not exc),
+ reverse=not concrete_first,
+ )
+
+
+class ExceptionHandlingContextManager(AbstractAsyncContextManager):
+ """
+ A dynamic try-except context manager for handling exceptions in web handlers.
+ Maps exception types to corresponding handlers, allowing structured error management, i.e.
+ essentially something like
+ ```
+ try:
+
+ resp = await handler(request)
+
+ except exc_type1 as exc1:
+ resp = await exc_handler1(request)
+ except exc_type2 as exc1:
+ resp = await exc_handler2(request)
+ # etc
+
+ ```
+ and `exception_handlers_map` defines the mapping of exception types (`exc_type*`) to their handlers (`exc_handler*`).
+ """
+
+ def __init__(
+ self,
+ exception_handlers_map: ExceptionHandlersMap,
+ *,
+ request: web.Request,
+ ):
+ self._exc_handlers_map = exception_handlers_map
+ self._exc_types_by_specificity = _sort_exceptions_by_specificity(
+ list(self._exc_handlers_map.keys()), concrete_first=True
+ )
+ self._request: web.Request = request
+ self._response: web.StreamResponse | None = None
+
+ def _get_exc_handler_or_none(
+ self, exc_type: type[Exception], exc_value: Exception
+ ) -> AiohttpExceptionHandler | None:
+ exc_handler = self._exc_handlers_map.get(exc_type)
+ if not exc_handler and (
+ base_exc_type := next(
+ (
+ _type
+ for _type in self._exc_types_by_specificity
+ if isinstance(exc_value, _type)
+ ),
+ None,
+ )
+ ):
+ exc_handler = self._exc_handlers_map[base_exc_type]
+ return exc_handler
+
+ async def __aenter__(self):
+ self._response = None
+ return self
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> bool:
+ if (
+ exc_value is not None
+ and exc_type is not None
+ and isinstance(exc_value, Exception)
+ and issubclass(exc_type, Exception)
+ and (exc_handler := self._get_exc_handler_or_none(exc_type, exc_value))
+ ):
+ self._response = await exc_handler(
+ request=self._request, exception=exc_value
+ )
+ return True # suppress
+ return False # reraise
+
+ def get_response_or_none(self) -> web.StreamResponse | None:
+ """
+ Returns the response generated by the exception handler, if an exception was handled. Otherwise None
+ """
+ return self._response
+
+
+def exception_handling_decorator(
+ exception_handlers_map: dict[type[Exception], AiohttpExceptionHandler]
+) -> Callable[[WebHandler], WebHandler]:
+ """Creates a decorator to manage exceptions raised in a given route handler.
+ Ensures consistent exception management across decorated handlers.
+
+ SEE examples test_exception_handling
+ """
+
+ def _decorator(handler: WebHandler):
+ @functools.wraps(handler)
+ async def _wrapper(request: web.Request) -> web.StreamResponse:
+ cm = ExceptionHandlingContextManager(
+ exception_handlers_map, request=request
+ )
+ async with cm:
+ return await handler(request)
+
+ # If an exception was handled, return the exception handler's return value
+ response = cm.get_response_or_none()
+ assert response is not None # nosec
+ return response
+
+ return _wrapper
+
+ return _decorator
+
+
+def exception_handling_middleware(
+ exception_handlers_map: dict[type[Exception], AiohttpExceptionHandler]
+) -> WebMiddleware:
+ """Constructs middleware to handle exceptions raised across app routes
+
+ SEE examples test_exception_handling
+ """
+ _handle_excs = exception_handling_decorator(
+ exception_handlers_map=exception_handlers_map
+ )
+
+ @web.middleware
+ async def middleware_handler(request: web.Request, handler: WebHandler):
+ return await _handle_excs(handler)(request)
+
+ return middleware_handler
diff --git a/services/web/server/src/simcore_service_webserver/exception_handling/_factory.py b/services/web/server/src/simcore_service_webserver/exception_handling/_factory.py
new file mode 100644
index 00000000000..baae399f76b
--- /dev/null
+++ b/services/web/server/src/simcore_service_webserver/exception_handling/_factory.py
@@ -0,0 +1,134 @@
+import logging
+from typing import NamedTuple, TypeAlias
+
+from aiohttp import web
+from common_library.error_codes import create_error_code
+from common_library.json_serialization import json_dumps
+from models_library.rest_error import ErrorGet
+from servicelib.aiohttp.web_exceptions_extension import get_all_aiohttp_http_exceptions
+from servicelib.logging_errors import create_troubleshotting_log_kwargs
+from servicelib.status_codes_utils import is_5xx_server_error, is_error
+
+from ._base import AiohttpExceptionHandler, ExceptionHandlersMap
+
+_logger = logging.getLogger(__name__)
+
+
+_STATUS_CODE_TO_HTTP_ERRORS: dict[
+ int, type[web.HTTPError]
+] = get_all_aiohttp_http_exceptions(web.HTTPError)
+
+
+class _DefaultDict(dict):
+ def __missing__(self, key):
+ return f"'{key}=?'"
+
+
+class HttpErrorInfo(NamedTuple):
+ """Info provided to auto-create HTTPError"""
+
+ status_code: int
+ msg_template: str # sets HTTPError.reason
+
+
+ExceptionToHttpErrorMap: TypeAlias = dict[type[Exception], HttpErrorInfo]
+
+
+def create_error_response(error: ErrorGet, status_code: int) -> web.Response:
+ assert is_error(status_code), f"{status_code=} must be an error [{error=}]" # nosec
+
+ return web.json_response(
+ data={"error": error.model_dump(exclude_unset=True, mode="json")},
+ dumps=json_dumps,
+ reason=error.message,
+ status=status_code,
+ )
+
+
+def create_exception_handler_from_http_info(
+ status_code: int,
+ msg_template: str,
+) -> AiohttpExceptionHandler:
+ """
+ Custom Exception-Handler factory
+
+ Creates a custom `WebApiExceptionHandler` that maps specific exception to a given http status code error
+
+ Given an `ExceptionToHttpErrorMap`, this function returns a handler that checks if an exception
+ matches one in the map, returning an HTTP error with the mapped status code and message.
+ Server errors (5xx) include additional logging with request context. Unmapped exceptions are
+ returned as-is for re-raising.
+
+ Arguments:
+ status_code: the http status code to associate at the web-api interface to this error
+ msg_template: a template string to pass to the HttpError
+
+ Returns:
+ A web api exception handler
+ """
+ assert is_error( # nosec
+ status_code
+ ), f"{status_code=} must be an error [{msg_template=}]"
+
+ async def _exception_handler(
+ request: web.Request,
+ exception: BaseException,
+ ) -> web.Response:
+
+ # safe formatting, i.e. does not raise
+ user_msg = msg_template.format_map(
+ _DefaultDict(getattr(exception, "__dict__", {}))
+ )
+
+ error = ErrorGet.model_construct(message=user_msg)
+
+ if is_5xx_server_error(status_code):
+ oec = create_error_code(exception)
+ _logger.exception(
+ **create_troubleshotting_log_kwargs(
+ user_msg,
+ error=exception,
+ error_code=oec,
+ error_context={
+ "request": request,
+ "request.remote": f"{request.remote}",
+ "request.method": f"{request.method}",
+ "request.path": f"{request.path}",
+ },
+ )
+ )
+ error = ErrorGet.model_construct(message=user_msg, support_id=oec)
+
+ return create_error_response(error, status_code=status_code)
+
+ return _exception_handler
+
+
+def to_exceptions_handlers_map(
+ exc_to_http_error_map: ExceptionToHttpErrorMap,
+) -> ExceptionHandlersMap:
+ """Data adapter to convert ExceptionToHttpErrorMap ot ExceptionHandlersMap, i.e.
+ - from { exc_type: (status, msg), ... }
+ - to { exc_type: callable, ... }
+ """
+ exc_handlers_map: ExceptionHandlersMap = {
+ exc_type: create_exception_handler_from_http_info(
+ status_code=info.status_code, msg_template=info.msg_template
+ )
+ for exc_type, info in exc_to_http_error_map.items()
+ }
+
+ return exc_handlers_map
+
+
+def create_http_error_exception_handlers_map() -> ExceptionHandlersMap:
+ """
+ Auto create handlers for **all** web.HTTPError
+ """
+ exc_handlers_map: ExceptionHandlersMap = {
+ exc_type: create_exception_handler_from_http_info(
+ status_code=code, msg_template="{reason}"
+ )
+ for code, exc_type in _STATUS_CODE_TO_HTTP_ERRORS.items()
+ }
+ return exc_handlers_map
diff --git a/services/web/server/src/simcore_service_webserver/exceptions_handlers.py b/services/web/server/src/simcore_service_webserver/exceptions_handlers.py
deleted file mode 100644
index 7e1ae0bd3e0..00000000000
--- a/services/web/server/src/simcore_service_webserver/exceptions_handlers.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import functools
-import logging
-from collections.abc import Iterable
-from typing import NamedTuple, TypeAlias
-
-from aiohttp import web
-from servicelib.aiohttp.typing_extension import Handler
-from servicelib.aiohttp.web_exceptions_extension import get_http_error_class_or_none
-from servicelib.logging_errors import create_troubleshotting_log_kwargs
-from servicelib.status_codes_utils import is_5xx_server_error
-
-_logger = logging.getLogger(__name__)
-
-
-class HttpErrorInfo(NamedTuple):
- status_code: int
- msg_template: str
-
-
-ExceptionToHttpErrorMap: TypeAlias = dict[type[BaseException], HttpErrorInfo]
-
-
-class _DefaultDict(dict):
- def __missing__(self, key):
- return f"'{key}=?'"
-
-
-def _sort_exceptions_by_specificity(
- exceptions: Iterable[type[BaseException]], *, concrete_first: bool = True
-) -> list[type[BaseException]]:
- return sorted(
- exceptions,
- key=lambda exc: sum(issubclass(e, exc) for e in exceptions if e is not exc),
- reverse=not concrete_first,
- )
-
-
-def create_exception_handlers_decorator(
- exceptions_catch: type[BaseException] | tuple[type[BaseException], ...],
- exc_to_status_map: ExceptionToHttpErrorMap,
-):
- mapped_classes: tuple[type[BaseException], ...] = tuple(
- _sort_exceptions_by_specificity(exc_to_status_map.keys())
- )
-
- assert all( # nosec
- issubclass(cls, exceptions_catch) for cls in mapped_classes
- ), f"Every {mapped_classes=} must inherit by one or more of {exceptions_catch=}"
-
- def _decorator(handler: Handler):
- @functools.wraps(handler)
- async def _wrapper(request: web.Request) -> web.StreamResponse:
- try:
- return await handler(request)
-
- except exceptions_catch as exc:
- if exc_cls := next(
- (cls for cls in mapped_classes if isinstance(exc, cls)), None
- ):
- http_error_info = exc_to_status_map[exc_cls]
-
- # safe formatting, i.e. does not raise
- user_msg = http_error_info.msg_template.format_map(
- _DefaultDict(getattr(exc, "__dict__", {}))
- )
-
- http_error_cls = get_http_error_class_or_none(
- http_error_info.status_code
- )
- assert http_error_cls # nosec
-
- if is_5xx_server_error(http_error_info.status_code):
- _logger.exception(
- **create_troubleshotting_log_kwargs(
- user_msg,
- error=exc,
- error_context={
- "request": request,
- "request.remote": f"{request.remote}",
- "request.method": f"{request.method}",
- "request.path": f"{request.path}",
- },
- )
- )
- raise http_error_cls(reason=user_msg) from exc
- raise # reraise
-
- return _wrapper
-
- return _decorator
diff --git a/services/web/server/src/simcore_service_webserver/folders/_exceptions_handlers.py b/services/web/server/src/simcore_service_webserver/folders/_exceptions_handlers.py
index c611809decd..5d98db3647d 100644
--- a/services/web/server/src/simcore_service_webserver/folders/_exceptions_handlers.py
+++ b/services/web/server/src/simcore_service_webserver/folders/_exceptions_handlers.py
@@ -2,21 +2,17 @@
from servicelib.aiohttp import status
-from ..exceptions_handlers import (
+from ..exception_handling import (
ExceptionToHttpErrorMap,
HttpErrorInfo,
- create_exception_handlers_decorator,
-)
-from ..projects.exceptions import (
- BaseProjectError,
- ProjectRunningConflictError,
- ProjectStoppingError,
+ exception_handling_decorator,
+ to_exceptions_handlers_map,
)
+from ..projects.exceptions import ProjectRunningConflictError, ProjectStoppingError
from ..workspaces.errors import (
WorkspaceAccessForbiddenError,
WorkspaceFolderInconsistencyError,
WorkspaceNotFoundError,
- WorkspacesValueError,
)
from .errors import (
FolderAccessForbiddenError,
@@ -69,7 +65,7 @@
}
-handle_plugin_requests_exceptions = create_exception_handlers_decorator(
- exceptions_catch=(BaseProjectError, FoldersValueError, WorkspacesValueError),
- exc_to_status_map=_TO_HTTP_ERROR_MAP,
+handle_plugin_requests_exceptions = exception_handling_decorator(
+ to_exceptions_handlers_map(_TO_HTTP_ERROR_MAP)
)
+# this is one decorator with a single exception handler
diff --git a/services/web/server/src/simcore_service_webserver/projects/_trash_handlers.py b/services/web/server/src/simcore_service_webserver/projects/_trash_handlers.py
index ced4d0442bd..963b81c4900 100644
--- a/services/web/server/src/simcore_service_webserver/projects/_trash_handlers.py
+++ b/services/web/server/src/simcore_service_webserver/projects/_trash_handlers.py
@@ -8,10 +8,11 @@
)
from .._meta import API_VTAG as VTAG
-from ..exceptions_handlers import (
+from ..exception_handling import (
ExceptionToHttpErrorMap,
HttpErrorInfo,
- create_exception_handlers_decorator,
+ exception_handling_decorator,
+ to_exceptions_handlers_map,
)
from ..login.decorators import get_user_id, login_required
from ..products.api import get_product_name
@@ -19,11 +20,7 @@
from ..security.decorators import permission_required
from . import _trash_api
from ._common_models import RemoveQueryParams
-from .exceptions import (
- ProjectRunningConflictError,
- ProjectStoppingError,
- ProjectTrashError,
-)
+from .exceptions import ProjectRunningConflictError, ProjectStoppingError
_logger = logging.getLogger(__name__)
@@ -44,10 +41,11 @@
}
-_handle_exceptions = create_exception_handlers_decorator(
- exceptions_catch=ProjectTrashError, exc_to_status_map=_TO_HTTP_ERROR_MAP
+_handle_exceptions = exception_handling_decorator(
+ to_exceptions_handlers_map(_TO_HTTP_ERROR_MAP)
)
+
#
# ROUTES
#
diff --git a/services/web/server/src/simcore_service_webserver/workspaces/_exceptions_handlers.py b/services/web/server/src/simcore_service_webserver/workspaces/_exceptions_handlers.py
index f6470f461f7..1bb16355b80 100644
--- a/services/web/server/src/simcore_service_webserver/workspaces/_exceptions_handlers.py
+++ b/services/web/server/src/simcore_service_webserver/workspaces/_exceptions_handlers.py
@@ -2,21 +2,17 @@
from servicelib.aiohttp import status
-from ..exceptions_handlers import (
+from ..exception_handling import (
ExceptionToHttpErrorMap,
HttpErrorInfo,
- create_exception_handlers_decorator,
-)
-from ..projects.exceptions import (
- BaseProjectError,
- ProjectRunningConflictError,
- ProjectStoppingError,
+ exception_handling_decorator,
+ to_exceptions_handlers_map,
)
+from ..projects.exceptions import ProjectRunningConflictError, ProjectStoppingError
from .errors import (
WorkspaceAccessForbiddenError,
WorkspaceGroupNotFoundError,
WorkspaceNotFoundError,
- WorkspacesValueError,
)
_logger = logging.getLogger(__name__)
@@ -47,7 +43,6 @@
}
-handle_plugin_requests_exceptions = create_exception_handlers_decorator(
- exceptions_catch=(BaseProjectError, WorkspacesValueError),
- exc_to_status_map=_TO_HTTP_ERROR_MAP,
+handle_plugin_requests_exceptions = exception_handling_decorator(
+ to_exceptions_handlers_map(_TO_HTTP_ERROR_MAP)
)
diff --git a/services/web/server/tests/unit/isolated/test_exception_handling.py b/services/web/server/tests/unit/isolated/test_exception_handling.py
new file mode 100644
index 00000000000..775fe452a21
--- /dev/null
+++ b/services/web/server/tests/unit/isolated/test_exception_handling.py
@@ -0,0 +1,198 @@
+# pylint: disable=protected-access
+# pylint: disable=redefined-outer-name
+# pylint: disable=too-many-arguments
+# pylint: disable=too-many-statements
+# pylint: disable=unused-argument
+# pylint: disable=unused-variable
+
+from collections.abc import Callable
+
+import pytest
+from aiohttp import web
+from aiohttp.test_utils import TestClient
+from models_library.rest_error import ErrorGet
+from servicelib.aiohttp import status
+from servicelib.mimetype_constants import MIMETYPE_APPLICATION_JSON, MIMETYPE_TEXT_PLAIN
+from simcore_service_webserver.exception_handling import (
+ ExceptionHandlersMap,
+ HttpErrorInfo,
+ exception_handling_decorator,
+ to_exceptions_handlers_map,
+)
+from simcore_service_webserver.exception_handling._base import (
+ exception_handling_middleware,
+)
+from simcore_service_webserver.exception_handling._factory import (
+ create_http_error_exception_handlers_map,
+)
+
+
+@pytest.fixture
+def exception_handlers_map(build_method: str) -> ExceptionHandlersMap:
+ """
+ Two different ways to build the exception_handlers_map
+ """
+ exception_handlers_map: ExceptionHandlersMap = {}
+
+ if build_method == "function":
+
+ async def _value_error_as_422_func(
+ request: web.Request, exception: BaseException
+ ) -> web.Response:
+ # custom exception handler
+ return web.json_response(
+ reason=f"{build_method=}", status=status.HTTP_422_UNPROCESSABLE_ENTITY
+ )
+
+ exception_handlers_map = {
+ ValueError: _value_error_as_422_func,
+ }
+
+ elif build_method == "http_map":
+ exception_handlers_map = to_exceptions_handlers_map(
+ {
+ ValueError: HttpErrorInfo(
+ status.HTTP_422_UNPROCESSABLE_ENTITY, f"{build_method=}"
+ )
+ }
+ )
+ else:
+ pytest.fail(f"Undefined {build_method=}")
+
+ return exception_handlers_map
+
+
+@pytest.mark.parametrize("build_method", ["function", "http_map"])
+async def test_handling_exceptions_decorating_a_route(
+ aiohttp_client: Callable,
+ exception_handlers_map: ExceptionHandlersMap,
+ build_method: str,
+):
+
+ # 1. create decorator
+ exc_handling = exception_handling_decorator(exception_handlers_map)
+
+ # adding new routes
+ routes = web.RouteTableDef()
+
+ @routes.post("/{what}")
+ @exc_handling # < ----- 2. using decorator
+ async def _handler(request: web.Request):
+ what = request.match_info["what"]
+ match what:
+ case "ValueError":
+ raise ValueError # handled
+ case "IndexError":
+ raise IndexError # not-handled
+ case "HTTPConflict":
+ raise web.HTTPConflict # not-handled
+ case "HTTPOk":
+ # non errors should NOT be raised,
+ # SEE https://github.com/ITISFoundation/osparc-simcore/pull/6829
+ # but if it is so ...
+ raise web.HTTPOk # not-handled
+
+ return web.Response(text=what)
+
+ app = web.Application()
+ app.add_routes(routes)
+
+ # 3. testing from the client side
+ client: TestClient = await aiohttp_client(app)
+
+ # success
+ resp = await client.post("/ok")
+ assert resp.status == status.HTTP_200_OK
+
+ # handled non-HTTPException exception
+ resp = await client.post("/ValueError")
+ assert resp.status == status.HTTP_422_UNPROCESSABLE_ENTITY
+ if build_method == "http_map":
+ body = await resp.json()
+ error = ErrorGet.model_validate(body["error"])
+ assert error.message == f"{build_method=}"
+
+ # undhandled non-HTTPException
+ resp = await client.post("/IndexError")
+ assert resp.status == status.HTTP_500_INTERNAL_SERVER_ERROR
+
+ # undhandled HTTPError
+ resp = await client.post("/HTTPConflict")
+ assert resp.status == status.HTTP_409_CONFLICT
+
+ # undhandled HTTPSuccess
+ resp = await client.post("/HTTPOk")
+ assert resp.status == status.HTTP_200_OK
+
+
+@pytest.mark.parametrize("build_method", ["function", "http_map"])
+async def test_handling_exceptions_with_middelware(
+ aiohttp_client: Callable,
+ exception_handlers_map: ExceptionHandlersMap,
+ build_method: str,
+):
+ routes = web.RouteTableDef()
+
+ @routes.post("/{what}") # NO decorantor now
+ async def _handler(request: web.Request):
+ match request.match_info["what"]:
+ case "ValueError":
+ raise ValueError # handled
+ return web.Response()
+
+ app = web.Application()
+ app.add_routes(routes)
+
+ # 1. create & install middleware
+ exc_handling = exception_handling_middleware(exception_handlers_map)
+ app.middlewares.append(exc_handling)
+
+ # 2. testing from the client side
+ client: TestClient = await aiohttp_client(app)
+
+ # success
+ resp = await client.post("/ok")
+ assert resp.status == status.HTTP_200_OK
+
+ # handled non-HTTPException exception
+ resp = await client.post("/ValueError")
+ assert resp.status == status.HTTP_422_UNPROCESSABLE_ENTITY
+ if build_method == "http_map":
+ body = await resp.json()
+ error = ErrorGet.model_validate(body["error"])
+ assert error.message == f"{build_method=}"
+
+
+@pytest.mark.parametrize("with_middleware", [True, False])
+async def test_raising_aiohttp_http_errors(
+ aiohttp_client: Callable, with_middleware: bool
+):
+ routes = web.RouteTableDef()
+
+ @routes.post("/raise-http-error")
+ async def _handler1(request: web.Request):
+ # 1. raises aiohttp.web_exceptions.HttpError
+ raise web.HTTPConflict
+
+ app = web.Application()
+ app.add_routes(routes)
+
+ # 2. create & install middleware handlers for ALL http (optional)
+ if with_middleware:
+ exc_handling = exception_handling_middleware(
+ exception_handlers_map=create_http_error_exception_handlers_map()
+ )
+ app.middlewares.append(exc_handling)
+
+ # 3. testing from the client side
+ client: TestClient = await aiohttp_client(app)
+
+ resp = await client.post("/raise-http-error")
+ assert resp.status == status.HTTP_409_CONFLICT
+
+ if with_middleware:
+ assert resp.content_type == MIMETYPE_APPLICATION_JSON
+ ErrorGet.model_construct((await resp.json())["error"])
+ else:
+ # default
+ assert resp.content_type == MIMETYPE_TEXT_PLAIN
diff --git a/services/web/server/tests/unit/isolated/test_exception_handling_base.py b/services/web/server/tests/unit/isolated/test_exception_handling_base.py
new file mode 100644
index 00000000000..b9c3bc87f9d
--- /dev/null
+++ b/services/web/server/tests/unit/isolated/test_exception_handling_base.py
@@ -0,0 +1,126 @@
+# pylint: disable=protected-access
+# pylint: disable=redefined-outer-name
+# pylint: disable=too-many-arguments
+# pylint: disable=too-many-statements
+# pylint: disable=unused-argument
+# pylint: disable=unused-variable
+
+
+import pytest
+from aiohttp import web
+from aiohttp.test_utils import make_mocked_request
+from simcore_service_webserver.errors import WebServerBaseError
+from simcore_service_webserver.exception_handling._base import (
+ AiohttpExceptionHandler,
+ ExceptionHandlingContextManager,
+ _sort_exceptions_by_specificity,
+ exception_handling_decorator,
+)
+
+# Some custom errors in my service
+
+
+class BaseError(WebServerBaseError):
+ ...
+
+
+class OneError(BaseError):
+ ...
+
+
+class OtherError(BaseError):
+ ...
+
+
+def test_sort_concrete_first():
+ assert _sort_exceptions_by_specificity([Exception, BaseError]) == [
+ BaseError,
+ Exception,
+ ]
+
+ assert _sort_exceptions_by_specificity(
+ [Exception, BaseError], concrete_first=False
+ ) == [
+ Exception,
+ BaseError,
+ ]
+
+
+def test_sort_exceptions_by_specificity():
+
+ got_exceptions_cls = _sort_exceptions_by_specificity(
+ [
+ Exception,
+ OtherError,
+ OneError,
+ BaseError,
+ ValueError,
+ ArithmeticError,
+ ZeroDivisionError,
+ ]
+ )
+
+ for from_, exc in enumerate(got_exceptions_cls, start=1):
+ for exc_after in got_exceptions_cls[from_:]:
+ assert not issubclass(exc_after, exc), f"{got_exceptions_cls=}"
+
+
+async def test__handled_exception_context_manager():
+
+ expected_request = make_mocked_request("GET", "/foo")
+ expected_response = web.json_response({"error": {"msg": "Foo"}})
+
+ # define exception-handler function
+ async def _base_exc_handler(request, exception):
+ assert request == expected_request
+ assert isinstance(exception, BaseError)
+ assert not isinstance(exception, OtherError)
+ return expected_response
+
+ async def _concrete_exc_handler(request, exception):
+ assert request == expected_request
+ assert isinstance(exception, OtherError)
+ return expected_response
+
+ exception_handlers_map: dict[type[BaseException], AiohttpExceptionHandler] = {
+ BaseError: _base_exc_handler,
+ OtherError: _concrete_exc_handler,
+ }
+
+ # handles any BaseError returning a response
+ cm = ExceptionHandlingContextManager(
+ exception_handlers_map, request=expected_request
+ )
+ async with cm:
+ raise OneError
+ assert cm.get_response_or_none() == expected_response
+
+ async with cm:
+ raise OtherError
+ assert cm.get_response_or_none() == expected_response
+
+ # reraises
+ with pytest.raises(ArithmeticError):
+ async with cm:
+ raise ArithmeticError
+
+
+@pytest.mark.parametrize("exception_cls", [OneError, OtherError])
+async def test_async_try_except_decorator(exception_cls: type[Exception]):
+ expected_request = make_mocked_request("GET", "/foo")
+ expected_exception = exception_cls()
+ expected_response = web.Response(reason=f"suppressed {exception_cls}")
+
+ # creates exception handler
+ async def _suppress_all(request: web.Request, exception):
+ assert exception == expected_exception
+ assert request == expected_request
+ return expected_response
+
+ @exception_handling_decorator({BaseError: _suppress_all})
+ async def _rest_handler(request: web.Request) -> web.Response:
+ raise expected_exception
+
+ # emulates request/response workflow
+ resp = await _rest_handler(expected_request)
+ assert resp == expected_response
diff --git a/services/web/server/tests/unit/isolated/test_exception_handling_factory.py b/services/web/server/tests/unit/isolated/test_exception_handling_factory.py
new file mode 100644
index 00000000000..e87ef0b53c3
--- /dev/null
+++ b/services/web/server/tests/unit/isolated/test_exception_handling_factory.py
@@ -0,0 +1,154 @@
+# pylint: disable=protected-access
+# pylint: disable=redefined-outer-name
+# pylint: disable=too-many-arguments
+# pylint: disable=too-many-statements
+# pylint: disable=unused-argument
+# pylint: disable=unused-variable
+
+
+import logging
+
+import pytest
+from aiohttp import web
+from aiohttp.test_utils import make_mocked_request
+from servicelib.aiohttp import status
+from servicelib.mimetype_constants import MIMETYPE_APPLICATION_JSON
+from simcore_service_webserver.errors import WebServerBaseError
+from simcore_service_webserver.exception_handling._base import (
+ ExceptionHandlingContextManager,
+ exception_handling_decorator,
+)
+from simcore_service_webserver.exception_handling._factory import (
+ ExceptionToHttpErrorMap,
+ HttpErrorInfo,
+ create_exception_handler_from_http_info,
+ to_exceptions_handlers_map,
+)
+
+# Some custom errors in my service
+
+
+class BaseError(WebServerBaseError):
+ ...
+
+
+class OneError(BaseError):
+ ...
+
+
+class OtherError(BaseError):
+ ...
+
+
+@pytest.fixture
+def fake_request() -> web.Request:
+ return make_mocked_request("GET", "/foo")
+
+
+async def test_factory__create_exception_handler_from_http_error(
+ fake_request: web.Request,
+):
+ one_error_to_404 = create_exception_handler_from_http_info(
+ status_code=status.HTTP_404_NOT_FOUND,
+ msg_template="one error message for the user: {code} {value}",
+ )
+
+ # calling exception handler
+ caught = OneError()
+ response = await one_error_to_404(fake_request, caught)
+ assert response.status == status.HTTP_404_NOT_FOUND
+ assert response.text is not None
+ assert "one error message" in response.reason
+ assert response.content_type == MIMETYPE_APPLICATION_JSON
+
+
+async def test_handling_different_exceptions_with_context(
+ fake_request: web.Request,
+ caplog: pytest.LogCaptureFixture,
+):
+ exc_to_http_error_map: ExceptionToHttpErrorMap = {
+ OneError: HttpErrorInfo(status.HTTP_400_BAD_REQUEST, "Error {code} to 400"),
+ OtherError: HttpErrorInfo(status.HTTP_500_INTERNAL_SERVER_ERROR, "{code}"),
+ }
+ cm = ExceptionHandlingContextManager(
+ to_exceptions_handlers_map(exc_to_http_error_map), request=fake_request
+ )
+
+ with caplog.at_level(logging.ERROR):
+ # handles as 4XX
+ async with cm:
+ raise OneError
+
+ response = cm.get_response_or_none()
+ assert response is not None
+ assert response.status == status.HTTP_400_BAD_REQUEST
+ assert response.reason == exc_to_http_error_map[OneError].msg_template.format(
+ code="WebServerBaseError.BaseError.OneError"
+ )
+ assert not caplog.records
+
+ # unhandled -> reraises
+ err = RuntimeError()
+ with pytest.raises(RuntimeError) as err_info:
+ async with cm:
+ raise err
+
+ assert cm.get_response_or_none() is None
+ assert err_info.value == err
+
+ # handles as 5XX and logs
+ async with cm:
+ raise OtherError
+
+ response = cm.get_response_or_none()
+ assert response is not None
+ assert response.status == status.HTTP_500_INTERNAL_SERVER_ERROR
+ assert response.reason == exc_to_http_error_map[OtherError].msg_template.format(
+ code="WebServerBaseError.BaseError.OtherError"
+ )
+ assert caplog.records, "Expected 5XX troubleshooting logged as error"
+ assert caplog.records[0].levelno == logging.ERROR
+
+
+async def test_handling_different_exceptions_with_decorator(
+ fake_request: web.Request,
+ caplog: pytest.LogCaptureFixture,
+):
+ exc_to_http_error_map: ExceptionToHttpErrorMap = {
+ OneError: HttpErrorInfo(status.HTTP_503_SERVICE_UNAVAILABLE, "{code}"),
+ }
+
+ exc_handling_decorator = exception_handling_decorator(
+ to_exceptions_handlers_map(exc_to_http_error_map)
+ )
+
+ @exc_handling_decorator
+ async def _rest_handler(request: web.Request) -> web.Response:
+ if request.query.get("raise") == "OneError":
+ raise OneError
+ if request.query.get("raise") == "ArithmeticError":
+ raise ArithmeticError
+ return web.json_response(reason="all good")
+
+ with caplog.at_level(logging.ERROR):
+
+ # emulates successful call
+ resp = await _rest_handler(make_mocked_request("GET", "/foo"))
+ assert resp.status == status.HTTP_200_OK
+ assert resp.reason == "all good"
+
+ assert not caplog.records
+
+ # reraised
+ with pytest.raises(ArithmeticError):
+ await _rest_handler(
+ make_mocked_request("GET", "/foo?raise=ArithmeticError")
+ )
+
+ assert not caplog.records
+
+ # handles as 5XX and logs
+ resp = await _rest_handler(make_mocked_request("GET", "/foo?raise=OneError"))
+ assert resp.status == status.HTTP_503_SERVICE_UNAVAILABLE
+ assert caplog.records, "Expected 5XX troubleshooting logged as error"
+ assert caplog.records[0].levelno == logging.ERROR
diff --git a/services/web/server/tests/unit/isolated/test_exceptions_handlers.py b/services/web/server/tests/unit/isolated/test_exceptions_handlers.py
deleted file mode 100644
index 27cde72283b..00000000000
--- a/services/web/server/tests/unit/isolated/test_exceptions_handlers.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# pylint: disable=protected-access
-# pylint: disable=redefined-outer-name
-# pylint: disable=too-many-arguments
-# pylint: disable=too-many-statements
-# pylint: disable=unused-argument
-# pylint: disable=unused-variable
-
-
-import logging
-
-import pytest
-from aiohttp import web
-from aiohttp.test_utils import make_mocked_request
-from servicelib.aiohttp import status
-from simcore_service_webserver.errors import WebServerBaseError
-from simcore_service_webserver.exceptions_handlers import (
- HttpErrorInfo,
- _sort_exceptions_by_specificity,
- create_exception_handlers_decorator,
-)
-
-
-class BasePluginError(WebServerBaseError):
- ...
-
-
-class OneError(BasePluginError):
- ...
-
-
-class OtherError(BasePluginError):
- ...
-
-
-def test_sort_concrete_first():
- assert _sort_exceptions_by_specificity([Exception, BasePluginError]) == [
- BasePluginError,
- Exception,
- ]
-
- assert _sort_exceptions_by_specificity(
- [Exception, BasePluginError], concrete_first=False
- ) == [
- Exception,
- BasePluginError,
- ]
-
-
-def test_sort_exceptions_by_specificity():
-
- got_exceptions_cls = _sort_exceptions_by_specificity(
- [
- Exception,
- OtherError,
- OneError,
- BasePluginError,
- ValueError,
- ArithmeticError,
- ZeroDivisionError,
- ]
- )
-
- for from_, exc in enumerate(got_exceptions_cls, start=1):
- for exc_after in got_exceptions_cls[from_:]:
- assert not issubclass(exc_after, exc), f"{got_exceptions_cls=}"
-
-
-async def test_exception_handlers_decorator(
- caplog: pytest.LogCaptureFixture,
-):
-
- _handle_exceptions = create_exception_handlers_decorator(
- exceptions_catch=BasePluginError,
- exc_to_status_map={
- OneError: HttpErrorInfo(
- status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
- msg_template="This is one error for front-end",
- )
- },
- )
-
- @_handle_exceptions
- async def _rest_handler(request: web.Request) -> web.Response:
- if request.query.get("raise") == "OneError":
- raise OneError
- if request.query.get("raise") == "ArithmeticError":
- raise ArithmeticError
-
- return web.Response(reason="all good")
-
- with caplog.at_level(logging.ERROR):
-
- # emulates successful call
- resp = await _rest_handler(make_mocked_request("GET", "/foo"))
- assert resp.status == status.HTTP_200_OK
- assert resp.reason == "all good"
-
- assert not caplog.records
-
- # this will be passed and catched by the outermost error middleware
- with pytest.raises(ArithmeticError):
- await _rest_handler(
- make_mocked_request("GET", "/foo?raise=ArithmeticError")
- )
-
- assert not caplog.records
-
- # this is a 5XX will be converted to response but is logged as error as well
- with pytest.raises(web.HTTPException) as exc_info:
- await _rest_handler(make_mocked_request("GET", "/foo?raise=OneError"))
-
- resp = exc_info.value
- assert resp.status == status.HTTP_503_SERVICE_UNAVAILABLE
- assert "front-end" in resp.reason
-
- assert caplog.records
- assert caplog.records[0].levelno == logging.ERROR
diff --git a/services/web/server/tests/unit/with_dbs/03/test_trash.py b/services/web/server/tests/unit/with_dbs/03/test_trash.py
index 2489ea6107c..76f4aefb46b 100644
--- a/services/web/server/tests/unit/with_dbs/03/test_trash.py
+++ b/services/web/server/tests/unit/with_dbs/03/test_trash.py
@@ -133,7 +133,6 @@ async def test_trash_projects( # noqa: PLR0915
could_not_trash = is_project_running and not force
if could_not_trash:
- assert error["status"] == status.HTTP_409_CONFLICT
assert "Current study is in use" in error["message"]
# GET
From 994c575e709a3f79136125281b5de3ea746abf54 Mon Sep 17 00:00:00 2001
From: Giancarlo Romeo
Date: Mon, 2 Dec 2024 10:22:47 +0100
Subject: [PATCH 03/16] =?UTF-8?q?=E2=99=BB=EF=B8=8F=20Remove=20deprecated?=
=?UTF-8?q?=20`from=5Form`=20method=20(Pydantic=20v2)=20(#6869)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../utils_projects_metadata.py | 6 +++---
.../utils_projects_nodes.py | 8 ++++----
.../src/pytest_simcore/db_entries_mocks.py | 2 +-
.../db/repositories/groups.py | 2 +-
.../db/repositories/services.py | 16 ++++++++--------
5 files changed, 17 insertions(+), 17 deletions(-)
diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_projects_metadata.py b/packages/postgres-database/src/simcore_postgres_database/utils_projects_metadata.py
index c8aa9962d43..149bb50b6a1 100644
--- a/packages/postgres-database/src/simcore_postgres_database/utils_projects_metadata.py
+++ b/packages/postgres-database/src/simcore_postgres_database/utils_projects_metadata.py
@@ -93,7 +93,7 @@ async def get(connection: SAConnection, project_uuid: uuid.UUID) -> ProjectMetad
row: RowProxy | None = await result.first()
if row is None:
raise DBProjectNotFoundError(project_uuid=project_uuid)
- return ProjectMetadata.from_orm(row)
+ return ProjectMetadata.model_validate(row)
def _check_valid_ancestors_combination(
@@ -202,7 +202,7 @@ async def set_project_ancestors(
result: ResultProxy = await connection.execute(upsert_stmt)
row: RowProxy | None = await result.first()
assert row # nosec
- return ProjectMetadata.from_orm(row)
+ return ProjectMetadata.model_validate(row)
except ForeignKeyViolation as err:
assert err.pgerror is not None # nosec # noqa: PT017
@@ -234,7 +234,7 @@ async def set_project_custom_metadata(
result: ResultProxy = await connection.execute(upsert_stmt)
row: RowProxy | None = await result.first()
assert row # nosec
- return ProjectMetadata.from_orm(row)
+ return ProjectMetadata.model_validate(row)
except ForeignKeyViolation as err:
raise DBProjectNotFoundError(project_uuid=project_uuid) from err
diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_projects_nodes.py b/packages/postgres-database/src/simcore_postgres_database/utils_projects_nodes.py
index cb47141b1ab..42b40c778dc 100644
--- a/packages/postgres-database/src/simcore_postgres_database/utils_projects_nodes.py
+++ b/packages/postgres-database/src/simcore_postgres_database/utils_projects_nodes.py
@@ -102,7 +102,7 @@ async def add(
assert result # nosec
rows = await result.fetchall()
assert rows is not None # nosec
- return [ProjectNode.from_orm(r) for r in rows]
+ return [ProjectNode.model_validate(r) for r in rows]
except ForeignKeyViolation as exc:
# this happens when the project does not exist, as we first check the node exists
raise ProjectNodesProjectNotFoundError(
@@ -128,7 +128,7 @@ async def list(self, connection: SAConnection) -> list[ProjectNode]:
assert result # nosec
rows = await result.fetchall()
assert rows is not None # nosec
- return [ProjectNode.from_orm(row) for row in rows]
+ return [ProjectNode.model_validate(row) for row in rows]
async def get(self, connection: SAConnection, *, node_id: uuid.UUID) -> ProjectNode:
"""get a node in the current project
@@ -154,7 +154,7 @@ async def get(self, connection: SAConnection, *, node_id: uuid.UUID) -> ProjectN
project_uuid=self.project_uuid, node_id=node_id
)
assert row # nosec
- return ProjectNode.from_orm(row)
+ return ProjectNode.model_validate(row)
async def update(
self, connection: SAConnection, *, node_id: uuid.UUID, **values
@@ -184,7 +184,7 @@ async def update(
project_uuid=self.project_uuid, node_id=node_id
)
assert row # nosec
- return ProjectNode.from_orm(row)
+ return ProjectNode.model_validate(row)
async def delete(self, connection: SAConnection, *, node_id: uuid.UUID) -> None:
"""delete a node in the current project
diff --git a/packages/pytest-simcore/src/pytest_simcore/db_entries_mocks.py b/packages/pytest-simcore/src/pytest_simcore/db_entries_mocks.py
index af77a9d28b5..67e8ec1722d 100644
--- a/packages/pytest-simcore/src/pytest_simcore/db_entries_mocks.py
+++ b/packages/pytest-simcore/src/pytest_simcore/db_entries_mocks.py
@@ -93,7 +93,7 @@ async def creator(
.returning(sa.literal_column("*"))
)
- inserted_project = ProjectAtDB.from_orm(await result.first())
+ inserted_project = ProjectAtDB.model_validate(await result.first())
project_nodes_repo = ProjectNodesRepo(project_uuid=project_uuid)
# NOTE: currently no resources is passed until it becomes necessary
default_node_config = {"required_resources": {}}
diff --git a/services/catalog/src/simcore_service_catalog/db/repositories/groups.py b/services/catalog/src/simcore_service_catalog/db/repositories/groups.py
index 8a1540b3f1a..d7061947a10 100644
--- a/services/catalog/src/simcore_service_catalog/db/repositories/groups.py
+++ b/services/catalog/src/simcore_service_catalog/db/repositories/groups.py
@@ -37,7 +37,7 @@ async def get_everyone_group(self) -> GroupAtDB:
raise UninitializedGroupError(
group=GroupType.EVERYONE, repo_cls=GroupsRepository
)
- return GroupAtDB.from_orm(row)
+ return GroupAtDB.model_validate(row)
async def get_user_gid_from_email(
self, user_email: LowerCaseEmailStr
diff --git a/services/catalog/src/simcore_service_catalog/db/repositories/services.py b/services/catalog/src/simcore_service_catalog/db/repositories/services.py
index bae22e11597..e848fb9b164 100644
--- a/services/catalog/src/simcore_service_catalog/db/repositories/services.py
+++ b/services/catalog/src/simcore_service_catalog/db/repositories/services.py
@@ -83,7 +83,7 @@ async def list_services(
async with self.db_engine.connect() as conn:
return [
- ServiceMetaDataAtDB.from_orm(row)
+ ServiceMetaDataAtDB.model_validate(row)
async for row in await conn.stream(
list_services_stmt(
gids=gids,
@@ -134,7 +134,7 @@ async def list_service_releases(
async with self.db_engine.connect() as conn:
releases = [
- ServiceMetaDataAtDB.from_orm(row)
+ ServiceMetaDataAtDB.model_validate(row)
async for row in await conn.stream(query)
]
@@ -163,7 +163,7 @@ async def get_latest_release(self, key: str) -> ServiceMetaDataAtDB | None:
result = await conn.execute(query)
row = result.first()
if row:
- return ServiceMetaDataAtDB.from_orm(row)
+ return ServiceMetaDataAtDB.model_validate(row)
return None # mypy
async def get_service(
@@ -208,7 +208,7 @@ async def get_service(
result = await conn.execute(query)
row = result.first()
if row:
- return ServiceMetaDataAtDB.from_orm(row)
+ return ServiceMetaDataAtDB.model_validate(row)
return None # mypy
async def create_or_update_service(
@@ -234,7 +234,7 @@ async def create_or_update_service(
)
row = result.first()
assert row # nosec
- created_service = ServiceMetaDataAtDB.from_orm(row)
+ created_service = ServiceMetaDataAtDB.model_validate(row)
for access_rights in new_service_access_rights:
insert_stmt = pg_insert(services_access_rights).values(
@@ -468,7 +468,7 @@ async def get_service_access_rights(
async with self.db_engine.connect() as conn:
return [
- ServiceAccessRightsAtDB.from_orm(row)
+ ServiceAccessRightsAtDB.model_validate(row)
async for row in await conn.stream(query)
]
@@ -494,7 +494,7 @@ async def list_services_access_rights(
async with self.db_engine.connect() as conn:
async for row in await conn.stream(query):
service_to_access_rights[(row.key, row.version)].append(
- ServiceAccessRightsAtDB.from_orm(row)
+ ServiceAccessRightsAtDB.model_validate(row)
)
return service_to_access_rights
@@ -585,7 +585,7 @@ async def get_service_specifications(
try:
_logger.debug("found following %s", f"{row=}")
# validate the specs first
- db_service_spec = ServiceSpecificationsAtDB.from_orm(row)
+ db_service_spec = ServiceSpecificationsAtDB.model_validate(row)
db_spec_version = packaging.version.parse(
db_service_spec.service_version
)
From a2f9058df1ac025fc7018cd1a6bbee9d3b9e18f7 Mon Sep 17 00:00:00 2001
From: Sylvain <35365065+sanderegg@users.noreply.github.com>
Date: Mon, 2 Dec 2024 11:36:01 +0100
Subject: [PATCH 04/16] =?UTF-8?q?=F0=9F=8E=A8Computational=20backend:=20DV?=
=?UTF-8?q?-2=20computational=20scheduler=20becomes=20replicable=20(?=
=?UTF-8?q?=F0=9F=97=83=EF=B8=8F=F0=9F=9A=A8)=20(#6736)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.coveragerc | 9 +-
.../src/models_library/projects_nodes_io.py | 2 +-
.../7ad64e963e0f_add_timezone_comp_tasks.py | 68 +
...6d8aa2_added_distributed_comp_scheduler.py | 33 +
.../e05bdc5b3c7b_add_timezone_comp_runs.py | 87 +
.../models/comp_runs.py | 37 +-
.../models/comp_tasks.py | 13 +-
.../src/pytest_simcore/rabbit_service.py | 23 +-
.../src/servicelib/exception_utils.py | 38 +-
.../src/servicelib/rabbitmq/_models.py | 6 +-
.../tests/test_exception_utils.py | 59 +-
.../api/dependencies/scheduler.py | 8 +-
.../api/routes/computations.py | 38 +-
.../core/errors.py | 4 +
.../core/settings.py | 12 +-
.../models/comp_pipelines.py | 4 +-
.../models/comp_runs.py | 16 +-
.../modules/comp_scheduler/__init__.py | 26 +-
.../modules/comp_scheduler/_constants.py | 7 +
.../modules/comp_scheduler/_manager.py | 173 ++
.../modules/comp_scheduler/_models.py | 19 +
.../modules/comp_scheduler/_publisher.py | 32 +
...{_base_scheduler.py => _scheduler_base.py} | 347 ++--
...{_dask_scheduler.py => _scheduler_dask.py} | 9 +-
.../comp_scheduler/_scheduler_factory.py | 19 +-
.../comp_scheduler/_utils.py} | 44 +-
.../modules/comp_scheduler/_worker.py | 94 ++
.../modules/db/__init__.py | 12 +-
.../modules/db/repositories/comp_runs.py | 142 +-
.../db/repositories/comp_tasks/_utils.py | 30 +-
services/director-v2/tests/conftest.py | 17 +-
services/director-v2/tests/unit/_helpers.py | 77 +-
services/director-v2/tests/unit/conftest.py | 5 +
.../tests/unit/test_utils_comp_scheduler.py | 2 +-
.../unit/with_dbs/comp_scheduler/conftest.py | 69 +
.../test_db_repositories_comp_runs.py | 525 ++++++
.../with_dbs/comp_scheduler/test_manager.py | 371 +++++
.../test_scheduler_dask.py} | 1452 ++++++++++-------
.../with_dbs/comp_scheduler/test_worker.py | 135 ++
.../tests/unit/with_dbs/conftest.py | 172 +-
.../unit/with_dbs/test_api_route_clusters.py | 40 +-
.../test_api_route_clusters_details.py | 6 +-
.../with_dbs/test_api_route_computations.py | 41 +-
.../test_api_route_computations_tasks.py | 14 +-
.../unit/with_dbs/test_utils_rabbitmq.py | 12 +-
45 files changed, 3224 insertions(+), 1125 deletions(-)
create mode 100644 packages/postgres-database/src/simcore_postgres_database/migration/versions/7ad64e963e0f_add_timezone_comp_tasks.py
create mode 100644 packages/postgres-database/src/simcore_postgres_database/migration/versions/b7f23f6d8aa2_added_distributed_comp_scheduler.py
create mode 100644 packages/postgres-database/src/simcore_postgres_database/migration/versions/e05bdc5b3c7b_add_timezone_comp_runs.py
create mode 100644 services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_constants.py
create mode 100644 services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py
create mode 100644 services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_models.py
create mode 100644 services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_publisher.py
rename services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/{_base_scheduler.py => _scheduler_base.py} (75%)
rename services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/{_dask_scheduler.py => _scheduler_dask.py} (98%)
rename services/director-v2/src/simcore_service_director_v2/{utils/comp_scheduler.py => modules/comp_scheduler/_utils.py} (61%)
create mode 100644 services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_worker.py
create mode 100644 services/director-v2/tests/unit/with_dbs/comp_scheduler/conftest.py
create mode 100644 services/director-v2/tests/unit/with_dbs/comp_scheduler/test_db_repositories_comp_runs.py
create mode 100644 services/director-v2/tests/unit/with_dbs/comp_scheduler/test_manager.py
rename services/director-v2/tests/unit/with_dbs/{test_modules_comp_scheduler_dask_scheduler.py => comp_scheduler/test_scheduler_dask.py} (55%)
create mode 100644 services/director-v2/tests/unit/with_dbs/comp_scheduler/test_worker.py
diff --git a/.coveragerc b/.coveragerc
index fb3d7c12624..ebf1465b0fb 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -7,25 +7,20 @@ parallel = True
[report]
# Regexes for lines to exclude from consideration
-exclude_lines =
- # Have to re-enable the standard pragma
- pragma: no cover
-
+exclude_also =
# Don't complain about missing debug-only code:
def __repr__
if self\.debug
-
# Don't complain if tests don't hit defensive assertion code:
raise AssertionError
raise NotImplementedError
-
# Don't complain if non-runnable code isn't run:
if 0:
if __name__ == .__main__.:
if __name__ == __main__.:
+ class .*\bProtocol\):
# Don't complain about abstract methods, they aren't run:
@(abc\.)?abstract(((class|static)?method)|property)
-
# Don't complain about type checking
if TYPE_CHECKING:
diff --git a/packages/models-library/src/models_library/projects_nodes_io.py b/packages/models-library/src/models_library/projects_nodes_io.py
index 3a79b6acf00..4d4637ac362 100644
--- a/packages/models-library/src/models_library/projects_nodes_io.py
+++ b/packages/models-library/src/models_library/projects_nodes_io.py
@@ -34,7 +34,7 @@
UUIDStr: TypeAlias = Annotated[str, StringConstraints(pattern=UUID_RE)]
-NodeIDStr = UUIDStr
+NodeIDStr: TypeAlias = UUIDStr
LocationID = int
LocationName = str
diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/7ad64e963e0f_add_timezone_comp_tasks.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/7ad64e963e0f_add_timezone_comp_tasks.py
new file mode 100644
index 00000000000..fe56f4c548f
--- /dev/null
+++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/7ad64e963e0f_add_timezone_comp_tasks.py
@@ -0,0 +1,68 @@
+"""add_timezone_comp_tasks
+
+Revision ID: 7ad64e963e0f
+Revises: b7f23f6d8aa2
+Create Date: 2024-11-27 22:28:51.898433+00:00
+
+"""
+import sqlalchemy as sa
+from alembic import op
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision = "7ad64e963e0f"
+down_revision = "b7f23f6d8aa2"
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.alter_column(
+ "comp_tasks",
+ "submit",
+ existing_type=postgresql.TIMESTAMP(),
+ type_=sa.DateTime(timezone=True),
+ existing_nullable=True,
+ )
+ op.alter_column(
+ "comp_tasks",
+ "start",
+ existing_type=postgresql.TIMESTAMP(),
+ type_=sa.DateTime(timezone=True),
+ existing_nullable=True,
+ )
+ op.alter_column(
+ "comp_tasks",
+ "end",
+ existing_type=postgresql.TIMESTAMP(),
+ type_=sa.DateTime(timezone=True),
+ existing_nullable=True,
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.alter_column(
+ "comp_tasks",
+ "end",
+ existing_type=sa.DateTime(timezone=True),
+ type_=postgresql.TIMESTAMP(),
+ existing_nullable=True,
+ )
+ op.alter_column(
+ "comp_tasks",
+ "start",
+ existing_type=sa.DateTime(timezone=True),
+ type_=postgresql.TIMESTAMP(),
+ existing_nullable=True,
+ )
+ op.alter_column(
+ "comp_tasks",
+ "submit",
+ existing_type=sa.DateTime(timezone=True),
+ type_=postgresql.TIMESTAMP(),
+ existing_nullable=True,
+ )
+ # ### end Alembic commands ###
diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/b7f23f6d8aa2_added_distributed_comp_scheduler.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/b7f23f6d8aa2_added_distributed_comp_scheduler.py
new file mode 100644
index 00000000000..b1e5bc9f30c
--- /dev/null
+++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/b7f23f6d8aa2_added_distributed_comp_scheduler.py
@@ -0,0 +1,33 @@
+"""added_distributed_comp_scheduler
+
+Revision ID: b7f23f6d8aa2
+Revises: c9db8bf5091e
+Create Date: 2024-11-26 17:06:27.053774+00:00
+
+"""
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = "b7f23f6d8aa2"
+down_revision = "c9db8bf5091e"
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column(
+ "comp_runs", sa.Column("scheduled", sa.DateTime(timezone=True), nullable=True)
+ )
+ op.add_column(
+ "comp_runs", sa.Column("processed", sa.DateTime(timezone=True), nullable=True)
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column("comp_runs", "processed")
+ op.drop_column("comp_runs", "scheduled")
+ # ### end Alembic commands ###
diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/e05bdc5b3c7b_add_timezone_comp_runs.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e05bdc5b3c7b_add_timezone_comp_runs.py
new file mode 100644
index 00000000000..3d3d6c6896a
--- /dev/null
+++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e05bdc5b3c7b_add_timezone_comp_runs.py
@@ -0,0 +1,87 @@
+"""add_timezone_comp_runs
+
+Revision ID: e05bdc5b3c7b
+Revises: 7ad64e963e0f
+Create Date: 2024-11-27 22:51:21.112336+00:00
+
+"""
+
+import sqlalchemy as sa
+from alembic import op
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision = "e05bdc5b3c7b"
+down_revision = "7ad64e963e0f"
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.alter_column(
+ "comp_runs",
+ "created",
+ existing_type=postgresql.TIMESTAMP(),
+ type_=sa.DateTime(timezone=True),
+ existing_nullable=False,
+ existing_server_default="now()",
+ )
+ op.alter_column(
+ "comp_runs",
+ "modified",
+ existing_type=postgresql.TIMESTAMP(),
+ type_=sa.DateTime(timezone=True),
+ existing_nullable=False,
+ existing_server_default="now()",
+ )
+ op.alter_column(
+ "comp_runs",
+ "started",
+ existing_type=postgresql.TIMESTAMP(),
+ type_=sa.DateTime(timezone=True),
+ existing_nullable=True,
+ )
+ op.alter_column(
+ "comp_runs",
+ "ended",
+ existing_type=postgresql.TIMESTAMP(),
+ type_=sa.DateTime(timezone=True),
+ existing_nullable=True,
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.alter_column(
+ "comp_runs",
+ "ended",
+ existing_type=sa.DateTime(timezone=True),
+ type_=postgresql.TIMESTAMP(),
+ existing_nullable=True,
+ )
+ op.alter_column(
+ "comp_runs",
+ "started",
+ existing_type=sa.DateTime(timezone=True),
+ type_=postgresql.TIMESTAMP(),
+ existing_nullable=True,
+ )
+ op.alter_column(
+ "comp_runs",
+ "modified",
+ existing_type=sa.DateTime(timezone=True),
+ type_=postgresql.TIMESTAMP(),
+ existing_nullable=False,
+ existing_server_default="now()",
+ )
+ op.alter_column(
+ "comp_runs",
+ "created",
+ existing_type=sa.DateTime(timezone=True),
+ type_=postgresql.TIMESTAMP(),
+ existing_nullable=False,
+ existing_server_default="now()",
+ )
+ # ### end Alembic commands ###
diff --git a/packages/postgres-database/src/simcore_postgres_database/models/comp_runs.py b/packages/postgres-database/src/simcore_postgres_database/models/comp_runs.py
index 3975cb91eee..d92227c07e2 100644
--- a/packages/postgres-database/src/simcore_postgres_database/models/comp_runs.py
+++ b/packages/postgres-database/src/simcore_postgres_database/models/comp_runs.py
@@ -1,11 +1,11 @@
""" Computational Runs Table
"""
+
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import JSONB
-from sqlalchemy.sql import func
-from ._common import RefActions
+from ._common import RefActions, column_created_datetime, column_modified_datetime
from .base import metadata
from .comp_pipeline import StateType
@@ -72,31 +72,18 @@
doc="The result of the run entry",
),
# dag node id and class
- sa.Column(
- "created",
- sa.DateTime(),
- nullable=False,
- server_default=func.now(),
- doc="When the run entry was created",
- ),
- sa.Column(
- "modified",
- sa.DateTime(),
- nullable=False,
- server_default=func.now(),
- onupdate=func.now(), # this will auto-update on modification
- doc="When the run entry was last modified",
- ),
+ column_created_datetime(timezone=True),
+ column_modified_datetime(timezone=True),
# utc timestamps for submission/start/end
sa.Column(
"started",
- sa.DateTime,
+ sa.DateTime(timezone=True),
nullable=True,
doc="When the run was started",
),
sa.Column(
"ended",
- sa.DateTime,
+ sa.DateTime(timezone=True),
nullable=True,
doc="When the run was finished",
),
@@ -106,6 +93,18 @@
nullable=True,
doc="If filled, when cancellation was requested",
),
+ sa.Column(
+ "scheduled",
+ sa.DateTime(timezone=True),
+ nullable=True,
+ doc="last time the pipeline was scheduled to be processed",
+ ),
+ sa.Column(
+ "processed",
+ sa.DateTime(timezone=True),
+ nullable=True,
+ doc="last time the pipeline was actually processed",
+ ),
sa.Column("metadata", JSONB, nullable=True, doc="the run optional metadata"),
sa.Column(
"use_on_demand_clusters",
diff --git a/packages/postgres-database/src/simcore_postgres_database/models/comp_tasks.py b/packages/postgres-database/src/simcore_postgres_database/models/comp_tasks.py
index 60bfc3f95c3..af5dc451cc3 100644
--- a/packages/postgres-database/src/simcore_postgres_database/models/comp_tasks.py
+++ b/packages/postgres-database/src/simcore_postgres_database/models/comp_tasks.py
@@ -1,6 +1,7 @@
""" Computational Tasks Table
"""
+
import enum
import sqlalchemy as sa
@@ -77,9 +78,15 @@ class NodeClass(enum.Enum):
doc="current progress of the task if available",
),
# utc timestamps for submission/start/end
- sa.Column("submit", sa.DateTime, doc="UTC timestamp for task submission"),
- sa.Column("start", sa.DateTime, doc="UTC timestamp when task started"),
- sa.Column("end", sa.DateTime, doc="UTC timestamp for task completion"),
+ sa.Column(
+ "submit", sa.DateTime(timezone=True), doc="UTC timestamp for task submission"
+ ),
+ sa.Column(
+ "start", sa.DateTime(timezone=True), doc="UTC timestamp when task started"
+ ),
+ sa.Column(
+ "end", sa.DateTime(timezone=True), doc="UTC timestamp for task completion"
+ ),
sa.Column(
"last_heartbeat",
sa.DateTime(timezone=True),
diff --git a/packages/pytest-simcore/src/pytest_simcore/rabbit_service.py b/packages/pytest-simcore/src/pytest_simcore/rabbit_service.py
index d8dc38feb09..91873a69d08 100644
--- a/packages/pytest-simcore/src/pytest_simcore/rabbit_service.py
+++ b/packages/pytest-simcore/src/pytest_simcore/rabbit_service.py
@@ -6,11 +6,12 @@
import asyncio
import logging
from collections.abc import AsyncIterator, Awaitable, Callable
+from contextlib import suppress
import aio_pika
import pytest
import tenacity
-from servicelib.rabbitmq import RabbitMQClient, RabbitMQRPCClient
+from servicelib.rabbitmq import QueueName, RabbitMQClient, RabbitMQRPCClient
from settings_library.rabbit import RabbitSettings
from tenacity.before_sleep import before_sleep_log
from tenacity.stop import stop_after_attempt
@@ -131,3 +132,23 @@ async def _creator(client_name: str, *, heartbeat: int = 60) -> RabbitMQRPCClien
yield _creator
# cleanup, properly close the clients
await asyncio.gather(*(client.close() for client in created_clients))
+
+
+@pytest.fixture
+async def ensure_parametrized_queue_is_empty(
+ create_rabbitmq_client: Callable[[str], RabbitMQClient], queue_name: QueueName
+) -> AsyncIterator[None]:
+ rabbitmq_client = create_rabbitmq_client("pytest-purger")
+
+ async def _queue_messages_purger() -> None:
+ with suppress(aio_pika.exceptions.ChannelClosed):
+ assert rabbitmq_client._channel_pool # noqa: SLF001
+ async with rabbitmq_client._channel_pool.acquire() as channel: # noqa: SLF001
+ assert isinstance(channel, aio_pika.RobustChannel)
+ queue = await channel.get_queue(queue_name)
+ await queue.purge()
+
+ await _queue_messages_purger()
+ yield
+ # cleanup
+ await _queue_messages_purger()
diff --git a/packages/service-library/src/servicelib/exception_utils.py b/packages/service-library/src/servicelib/exception_utils.py
index 4f44d673838..2de33fd98e6 100644
--- a/packages/service-library/src/servicelib/exception_utils.py
+++ b/packages/service-library/src/servicelib/exception_utils.py
@@ -1,6 +1,9 @@
+import inspect
import logging
+from collections.abc import Callable
from datetime import datetime
-from typing import Final
+from functools import wraps
+from typing import Any, Final, ParamSpec, TypeVar
from pydantic import BaseModel, Field, NonNegativeFloat, PrivateAttr
@@ -65,3 +68,36 @@ def else_reset(self) -> None:
"""error no longer occurs reset tracking"""
self._first_exception_skip = None
self._failure_counter = 0
+
+
+P = ParamSpec("P")
+R = TypeVar("R")
+
+F = TypeVar("F", bound=Callable[..., Any])
+
+
+def silence_exceptions(exceptions: tuple[type[BaseException], ...]) -> Callable[[F], F]:
+ def _decorator(func_or_coro: F) -> F:
+
+ if inspect.iscoroutinefunction(func_or_coro):
+
+ @wraps(func_or_coro)
+ async def _async_wrapper(*args, **kwargs) -> Any:
+ try:
+ assert inspect.iscoroutinefunction(func_or_coro) # nosec
+ return await func_or_coro(*args, **kwargs)
+ except exceptions:
+ return None
+
+ return _async_wrapper # type: ignore[return-value] # decorators typing is hard
+
+ @wraps(func_or_coro)
+ def _sync_wrapper(*args, **kwargs) -> Any:
+ try:
+ return func_or_coro(*args, **kwargs)
+ except exceptions:
+ return None
+
+ return _sync_wrapper # type: ignore[return-value] # decorators typing is hard
+
+ return _decorator
diff --git a/packages/service-library/src/servicelib/rabbitmq/_models.py b/packages/service-library/src/servicelib/rabbitmq/_models.py
index d713edfdc1d..cd674e526ff 100644
--- a/packages/service-library/src/servicelib/rabbitmq/_models.py
+++ b/packages/service-library/src/servicelib/rabbitmq/_models.py
@@ -18,11 +18,9 @@
class RabbitMessage(Protocol):
- def body(self) -> bytes:
- ...
+ def body(self) -> bytes: ...
- def routing_key(self) -> str | None:
- ...
+ def routing_key(self) -> str | None: ...
class RPCNamespacedMethodName(ConstrainedStr):
diff --git a/packages/service-library/tests/test_exception_utils.py b/packages/service-library/tests/test_exception_utils.py
index 299855e8241..a884d3dafb1 100644
--- a/packages/service-library/tests/test_exception_utils.py
+++ b/packages/service-library/tests/test_exception_utils.py
@@ -4,7 +4,7 @@
import pytest
from pydantic import PositiveFloat, PositiveInt
-from servicelib.exception_utils import DelayedExceptionHandler
+from servicelib.exception_utils import DelayedExceptionHandler, silence_exceptions
TOLERANCE: Final[PositiveFloat] = 0.1
SLEEP_FOR: Final[PositiveFloat] = TOLERANCE * 0.1
@@ -49,3 +49,60 @@ def test_workflow_passes() -> None:
def test_workflow_raises() -> None:
with pytest.raises(TargetException):
workflow(stop_raising_after=ITERATIONS + 1)
+
+
+# Define some custom exceptions for testing
+class CustomError(Exception):
+ pass
+
+
+class AnotherCustomError(Exception):
+ pass
+
+
+@silence_exceptions((CustomError,))
+def sync_function(*, raise_error: bool, raise_another_error: bool) -> str:
+ if raise_error:
+ raise CustomError
+ if raise_another_error:
+ raise AnotherCustomError
+ return "Success"
+
+
+@silence_exceptions((CustomError,))
+async def async_function(*, raise_error: bool, raise_another_error: bool) -> str:
+ if raise_error:
+ raise CustomError
+ if raise_another_error:
+ raise AnotherCustomError
+ return "Success"
+
+
+def test_sync_function_no_exception():
+ result = sync_function(raise_error=False, raise_another_error=False)
+ assert result == "Success"
+
+
+def test_sync_function_with_exception_is_silenced():
+ result = sync_function(raise_error=True, raise_another_error=False)
+ assert result is None
+
+
+async def test_async_function_no_exception():
+ result = await async_function(raise_error=False, raise_another_error=False)
+ assert result == "Success"
+
+
+async def test_async_function_with_exception_is_silenced():
+ result = await async_function(raise_error=True, raise_another_error=False)
+ assert result is None
+
+
+def test_sync_function_with_different_exception():
+ with pytest.raises(AnotherCustomError):
+ sync_function(raise_error=False, raise_another_error=True)
+
+
+async def test_async_function_with_different_exception():
+ with pytest.raises(AnotherCustomError):
+ await async_function(raise_error=False, raise_another_error=True)
diff --git a/services/director-v2/src/simcore_service_director_v2/api/dependencies/scheduler.py b/services/director-v2/src/simcore_service_director_v2/api/dependencies/scheduler.py
index aa01af1f34b..e480d204d3b 100644
--- a/services/director-v2/src/simcore_service_director_v2/api/dependencies/scheduler.py
+++ b/services/director-v2/src/simcore_service_director_v2/api/dependencies/scheduler.py
@@ -1,17 +1,11 @@
from typing import Annotated
-from fastapi import Depends, FastAPI, Request
+from fastapi import Depends, FastAPI
from ...core.settings import ComputationalBackendSettings
-from ...modules.comp_scheduler import BaseCompScheduler
from . import get_app
-def get_scheduler(request: Request) -> BaseCompScheduler:
- scheduler: BaseCompScheduler = request.app.state.scheduler
- return scheduler
-
-
def get_scheduler_settings(
app: Annotated[FastAPI, Depends(get_app)]
) -> ComputationalBackendSettings:
diff --git a/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py b/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py
index 251e35fa638..f25fdf32ece 100644
--- a/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py
+++ b/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py
@@ -21,7 +21,7 @@
from typing import Annotated, Any, Final
import networkx as nx
-from fastapi import APIRouter, Depends, HTTPException
+from fastapi import APIRouter, Depends, FastAPI, HTTPException
from models_library.api_schemas_directorv2.comp_tasks import (
ComputationCreate,
ComputationDelete,
@@ -63,7 +63,7 @@
from ...models.comp_runs import CompRunsAtDB, ProjectMetadataDict, RunMetadataDict
from ...models.comp_tasks import CompTaskAtDB
from ...modules.catalog import CatalogClient
-from ...modules.comp_scheduler import BaseCompScheduler
+from ...modules.comp_scheduler import run_new_pipeline, stop_pipeline
from ...modules.db.repositories.clusters import ClustersRepository
from ...modules.db.repositories.comp_pipelines import CompPipelinesRepository
from ...modules.db.repositories.comp_runs import CompRunsRepository
@@ -89,7 +89,6 @@
from ..dependencies.director_v0 import get_director_v0_client
from ..dependencies.rabbitmq import rabbitmq_rpc_client
from ..dependencies.rut_client import get_rut_client
-from ..dependencies.scheduler import get_scheduler
from .computations_tasks import analyze_pipeline
_PIPELINE_ABORT_TIMEOUT_S: Final[int] = 10
@@ -212,12 +211,12 @@ async def _get_project_node_names(
async def _try_start_pipeline(
+ app: FastAPI,
*,
project_repo: ProjectsRepository,
computation: ComputationCreate,
complete_dag: nx.DiGraph,
minimal_dag: nx.DiGraph,
- scheduler: BaseCompScheduler,
project: ProjectAtDB,
users_repo: UsersRepository,
projects_metadata_repo: ProjectsMetadataRepository,
@@ -242,11 +241,12 @@ async def _try_start_pipeline(
wallet_id = computation.wallet_info.wallet_id
wallet_name = computation.wallet_info.wallet_name
- await scheduler.run_new_pipeline(
- computation.user_id,
- computation.project_id,
- computation.cluster_id or DEFAULT_CLUSTER_ID,
- RunMetadataDict(
+ await run_new_pipeline(
+ app,
+ user_id=computation.user_id,
+ project_id=computation.project_id,
+ cluster_id=computation.cluster_id or DEFAULT_CLUSTER_ID,
+ run_metadata=RunMetadataDict(
node_id_names_map={
NodeID(node_idstr): node_data.label
for node_idstr, node_data in project.workbench.items()
@@ -313,7 +313,6 @@ async def create_computation( # noqa: PLR0913 # pylint: disable=too-many-positi
ProjectsMetadataRepository, Depends(get_repository(ProjectsMetadataRepository))
],
director_client: Annotated[DirectorV0Client, Depends(get_director_v0_client)],
- scheduler: Annotated[BaseCompScheduler, Depends(get_scheduler)],
catalog_client: Annotated[CatalogClient, Depends(get_catalog_client)],
rut_client: Annotated[ResourceUsageTrackerClient, Depends(get_rut_client)],
rpc_client: Annotated[RabbitMQRPCClient, Depends(rabbitmq_rpc_client)],
@@ -370,11 +369,11 @@ async def create_computation( # noqa: PLR0913 # pylint: disable=too-many-positi
if computation.start_pipeline:
await _try_start_pipeline(
+ request.app,
project_repo=project_repo,
computation=computation,
complete_dag=complete_dag,
minimal_dag=minimal_computational_dag,
- scheduler=scheduler,
project=project,
users_repo=users_repo,
projects_metadata_repo=projects_metadata_repo,
@@ -549,7 +548,6 @@ async def stop_computation(
comp_runs_repo: Annotated[
CompRunsRepository, Depends(get_repository(CompRunsRepository))
],
- scheduler: Annotated[BaseCompScheduler, Depends(get_scheduler)],
) -> ComputationGet:
_logger.debug(
"User %s stopping computation for project %s",
@@ -575,7 +573,9 @@ async def stop_computation(
pipeline_state = utils.get_pipeline_state_from_task_states(filtered_tasks)
if utils.is_pipeline_running(pipeline_state):
- await scheduler.stop_pipeline(computation_stop.user_id, project_id)
+ await stop_pipeline(
+ request.app, user_id=computation_stop.user_id, project_id=project_id
+ )
# get run details if any
last_run: CompRunsAtDB | None = None
@@ -615,6 +615,7 @@ async def stop_computation(
async def delete_computation(
computation_stop: ComputationDelete,
project_id: ProjectID,
+ request: Request,
project_repo: Annotated[
ProjectsRepository, Depends(get_repository(ProjectsRepository))
],
@@ -624,7 +625,6 @@ async def delete_computation(
comp_tasks_repo: Annotated[
CompTasksRepository, Depends(get_repository(CompTasksRepository))
],
- scheduler: Annotated[BaseCompScheduler, Depends(get_scheduler)],
) -> None:
try:
# get the project
@@ -642,7 +642,9 @@ async def delete_computation(
)
# abort the pipeline first
try:
- await scheduler.stop_pipeline(computation_stop.user_id, project_id)
+ await stop_pipeline(
+ request.app, user_id=computation_stop.user_id, project_id=project_id
+ )
except ComputationalSchedulerError as e:
_logger.warning(
"Project %s could not be stopped properly.\n reason: %s",
@@ -663,9 +665,9 @@ def return_last_value(retry_state: Any) -> Any:
before_sleep=before_sleep_log(_logger, logging.INFO),
)
async def check_pipeline_stopped() -> bool:
- comp_tasks: list[
- CompTaskAtDB
- ] = await comp_tasks_repo.list_computational_tasks(project_id)
+ comp_tasks: list[CompTaskAtDB] = (
+ await comp_tasks_repo.list_computational_tasks(project_id)
+ )
pipeline_state = utils.get_pipeline_state_from_task_states(
comp_tasks,
)
diff --git a/services/director-v2/src/simcore_service_director_v2/core/errors.py b/services/director-v2/src/simcore_service_director_v2/core/errors.py
index 18a5b674ed2..492e75bdeab 100644
--- a/services/director-v2/src/simcore_service_director_v2/core/errors.py
+++ b/services/director-v2/src/simcore_service_director_v2/core/errors.py
@@ -35,6 +35,10 @@ class ConfigurationError(DirectorError):
msg_template: str = "Application misconfiguration: {msg}"
+class UserNotFoundError(DirectorError):
+ msg_template: str = "user {user_id} not found"
+
+
class ProjectNotFoundError(DirectorError):
msg_template: str = "project {project_id} not found"
diff --git a/services/director-v2/src/simcore_service_director_v2/core/settings.py b/services/director-v2/src/simcore_service_director_v2/core/settings.py
index 0ccdce64de1..fe0af49fc5c 100644
--- a/services/director-v2/src/simcore_service_director_v2/core/settings.py
+++ b/services/director-v2/src/simcore_service_director_v2/core/settings.py
@@ -4,9 +4,10 @@
import datetime
from functools import cached_property
-from typing import Annotated
+from typing import Annotated, cast
from common_library.pydantic_validators import validate_numeric_string_as_timedelta
+from fastapi import FastAPI
from models_library.basic_types import LogLevel, PortInt, VersionTag
from models_library.clusters import (
DEFAULT_CLUSTER_ID,
@@ -21,6 +22,7 @@
AnyUrl,
Field,
NonNegativeInt,
+ PositiveInt,
field_validator,
)
from servicelib.logging_utils_filtering import LoggerName, MessageSubstring
@@ -72,6 +74,10 @@ class ComputationalBackendSettings(BaseCustomSettings):
COMPUTATIONAL_BACKEND_ENABLED: bool = Field(
default=True,
)
+ COMPUTATIONAL_BACKEND_SCHEDULING_CONCURRENCY: PositiveInt = Field(
+ default=50,
+ description="defines how many pipelines the application can schedule concurrently",
+ )
COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED: bool = Field(
default=True,
)
@@ -263,3 +269,7 @@ def _validate_loglevel(cls, value: str) -> str:
_validate_service_tracking_heartbeat = validate_numeric_string_as_timedelta(
"SERVICE_TRACKING_HEARTBEAT"
)
+
+
+def get_application_settings(app: FastAPI) -> AppSettings:
+ return cast(AppSettings, app.state.settings)
diff --git a/services/director-v2/src/simcore_service_director_v2/models/comp_pipelines.py b/services/director-v2/src/simcore_service_director_v2/models/comp_pipelines.py
index 5de823d826b..63017ee62e7 100644
--- a/services/director-v2/src/simcore_service_director_v2/models/comp_pipelines.py
+++ b/services/director-v2/src/simcore_service_director_v2/models/comp_pipelines.py
@@ -17,7 +17,7 @@ class CompPipelineAtDB(BaseModel):
@field_validator("state", mode="before")
@classmethod
- def convert_state_from_state_type_enum_if_needed(cls, v):
+ def _convert_state_from_state_type_enum_if_needed(cls, v):
if isinstance(v, str):
# try to convert to a StateType, if it fails the validations will continue
# and pydantic will try to convert it to a RunninState later on
@@ -29,7 +29,7 @@ def convert_state_from_state_type_enum_if_needed(cls, v):
@field_validator("dag_adjacency_list", mode="before")
@classmethod
- def auto_convert_dag(cls, v):
+ def _auto_convert_dag(cls, v):
# this enforcement is here because the serialization using json is not happy with non str Dict keys, also comparison gets funny if the lists are having sometimes UUIDs or str.
# NOTE: this might not be necessary anymore once we have something fully defined
return {str(key): [str(n) for n in value] for key, value in v.items()}
diff --git a/services/director-v2/src/simcore_service_director_v2/models/comp_runs.py b/services/director-v2/src/simcore_service_director_v2/models/comp_runs.py
index 62270380293..f3fedc6a9f9 100644
--- a/services/director-v2/src/simcore_service_director_v2/models/comp_runs.py
+++ b/services/director-v2/src/simcore_service_director_v2/models/comp_runs.py
@@ -1,5 +1,6 @@
import datetime
from contextlib import suppress
+from typing import TypeAlias
from models_library.clusters import DEFAULT_CLUSTER_ID, ClusterID
from models_library.projects import ProjectID
@@ -37,12 +38,15 @@ class RunMetadataDict(TypedDict, total=False):
project_metadata: ProjectMetadataDict
+Iteration: TypeAlias = PositiveInt
+
+
class CompRunsAtDB(BaseModel):
run_id: PositiveInt
project_uuid: ProjectID
user_id: UserID
cluster_id: ClusterID | None
- iteration: PositiveInt
+ iteration: Iteration
result: RunningState
created: datetime.datetime
modified: datetime.datetime
@@ -51,6 +55,8 @@ class CompRunsAtDB(BaseModel):
cancelled: datetime.datetime | None
metadata: RunMetadataDict = RunMetadataDict()
use_on_demand_clusters: bool
+ scheduled: datetime.datetime | None
+ processed: datetime.datetime | None
@field_validator("result", mode="before")
@classmethod
@@ -103,6 +109,8 @@ def convert_null_to_empty_metadata(cls, v):
"modified": "2021-03-01T13:07:34.191610",
"cancelled": None,
"use_on_demand_clusters": False,
+ "scheduled": None,
+ "processed": None,
},
{
"run_id": 432,
@@ -117,6 +125,8 @@ def convert_null_to_empty_metadata(cls, v):
"modified": "2021-03-01T13:07:34.191610",
"cancelled": None,
"use_on_demand_clusters": False,
+ "scheduled": None,
+ "processed": None,
},
{
"run_id": 43243,
@@ -138,6 +148,8 @@ def convert_null_to_empty_metadata(cls, v):
"some-other-metadata-which-is-an-array": [1, 3, 4],
},
"use_on_demand_clusters": False,
+ "scheduled": None,
+ "processed": None,
},
{
"run_id": 43243,
@@ -153,6 +165,8 @@ def convert_null_to_empty_metadata(cls, v):
"cancelled": None,
"metadata": None,
"use_on_demand_clusters": False,
+ "scheduled": None,
+ "processed": None,
},
]
},
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/__init__.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/__init__.py
index 2b29acf16c9..cf3370f4da8 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/__init__.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/__init__.py
@@ -1,12 +1,13 @@
import logging
from collections.abc import Callable, Coroutine
-from typing import Any, cast
+from typing import Any
from fastapi import FastAPI
from servicelib.logging_utils import log_context
-from . import _scheduler_factory
-from ._base_scheduler import BaseCompScheduler
+from ._constants import MODULE_NAME_SCHEDULER
+from ._manager import run_new_pipeline, setup_manager, shutdown_manager, stop_pipeline
+from ._worker import setup_worker, shutdown_worker
_logger = logging.getLogger(__name__)
@@ -14,24 +15,25 @@
def on_app_startup(app: FastAPI) -> Callable[[], Coroutine[Any, Any, None]]:
async def start_scheduler() -> None:
with log_context(
- _logger, level=logging.INFO, msg="starting computational scheduler"
+ _logger, level=logging.INFO, msg=f"starting {MODULE_NAME_SCHEDULER}"
):
- app.state.scheduler = await _scheduler_factory.create_from_db(app)
+ await setup_worker(app)
+ await setup_manager(app)
return start_scheduler
def on_app_shutdown(app: FastAPI) -> Callable[[], Coroutine[Any, Any, None]]:
async def stop_scheduler() -> None:
- await get_scheduler(app).shutdown()
+ with log_context(
+ _logger, level=logging.INFO, msg=f"stopping {MODULE_NAME_SCHEDULER}"
+ ):
+ await shutdown_manager(app)
+ await shutdown_worker(app)
return stop_scheduler
-def get_scheduler(app: FastAPI) -> BaseCompScheduler:
- return cast(BaseCompScheduler, app.state.scheduler)
-
-
def setup(app: FastAPI):
app.add_event_handler("startup", on_app_startup(app))
app.add_event_handler("shutdown", on_app_shutdown(app))
@@ -39,6 +41,6 @@ def setup(app: FastAPI):
__all__: tuple[str, ...] = (
"setup",
- "BaseCompScheduler",
- "get_scheduler",
+ "run_new_pipeline",
+ "stop_pipeline",
)
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_constants.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_constants.py
new file mode 100644
index 00000000000..45efe93f0b0
--- /dev/null
+++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_constants.py
@@ -0,0 +1,7 @@
+import datetime
+from typing import Final
+
+MODULE_NAME_SCHEDULER: Final[str] = "computational-distributed-scheduler"
+MODULE_NAME_WORKER: Final[str] = "computational-distributed-worker"
+SCHEDULER_INTERVAL: Final[datetime.timedelta] = datetime.timedelta(seconds=5)
+MAX_CONCURRENT_PIPELINE_SCHEDULING: Final[int] = 10
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py
new file mode 100644
index 00000000000..281c9fc4630
--- /dev/null
+++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py
@@ -0,0 +1,173 @@
+import logging
+from typing import Final
+
+import networkx as nx
+from aiopg.sa import Engine
+from fastapi import FastAPI
+from models_library.clusters import ClusterID
+from models_library.projects import ProjectID
+from models_library.users import UserID
+from servicelib.background_task import start_periodic_task, stop_periodic_task
+from servicelib.exception_utils import silence_exceptions
+from servicelib.logging_utils import log_context
+from servicelib.redis import CouldNotAcquireLockError
+from servicelib.redis_utils import exclusive
+from servicelib.utils import limited_gather
+
+from ...models.comp_runs import RunMetadataDict
+from ...utils.rabbitmq import publish_project_log
+from ..db import get_db_engine
+from ..db.repositories.comp_pipelines import CompPipelinesRepository
+from ..db.repositories.comp_runs import CompRunsRepository
+from ..rabbitmq import get_rabbitmq_client
+from ._constants import (
+ MAX_CONCURRENT_PIPELINE_SCHEDULING,
+ MODULE_NAME_SCHEDULER,
+ SCHEDULER_INTERVAL,
+)
+from ._publisher import request_pipeline_scheduling
+from ._utils import SCHEDULED_STATES, get_redis_client_from_app, get_redis_lock_key
+
+_logger = logging.getLogger(__name__)
+
+
+async def run_new_pipeline(
+ app: FastAPI,
+ *,
+ user_id: UserID,
+ project_id: ProjectID,
+ cluster_id: ClusterID,
+ run_metadata: RunMetadataDict,
+ use_on_demand_clusters: bool,
+) -> None:
+ """Sets a new pipeline to be scheduled on the computational resources.
+ Passing cluster_id=0 will use the default cluster. Passing an existing ID will instruct
+ the scheduler to run the tasks on the defined cluster"""
+ # ensure the pipeline exists and is populated with something
+ db_engine = get_db_engine(app)
+ dag = await _get_pipeline_dag(project_id, db_engine)
+ if not dag:
+ _logger.warning(
+ "project %s has no computational dag defined. not scheduled for a run.",
+ f"{project_id=}",
+ )
+ return
+
+ new_run = await CompRunsRepository.instance(db_engine).create(
+ user_id=user_id,
+ project_id=project_id,
+ cluster_id=cluster_id,
+ metadata=run_metadata,
+ use_on_demand_clusters=use_on_demand_clusters,
+ )
+
+ rabbitmq_client = get_rabbitmq_client(app)
+ await request_pipeline_scheduling(
+ rabbitmq_client,
+ db_engine,
+ user_id=new_run.user_id,
+ project_id=new_run.project_uuid,
+ iteration=new_run.iteration,
+ )
+ await publish_project_log(
+ rabbitmq_client,
+ user_id,
+ project_id,
+ log=f"Project pipeline scheduled using {'on-demand clusters' if use_on_demand_clusters else 'pre-defined clusters'}, starting soon...",
+ log_level=logging.INFO,
+ )
+
+
+async def stop_pipeline(
+ app: FastAPI,
+ *,
+ user_id: UserID,
+ project_id: ProjectID,
+ iteration: int | None = None,
+) -> None:
+ db_engine = get_db_engine(app)
+ comp_run = await CompRunsRepository.instance(db_engine).get(
+ user_id, project_id, iteration
+ )
+
+ # mark the scheduled pipeline for stopping
+ updated_comp_run = await CompRunsRepository.instance(
+ db_engine
+ ).mark_for_cancellation(
+ user_id=user_id, project_id=project_id, iteration=comp_run.iteration
+ )
+ if updated_comp_run:
+ # ensure the scheduler starts right away
+ rabbitmq_client = get_rabbitmq_client(app)
+ await request_pipeline_scheduling(
+ rabbitmq_client,
+ db_engine,
+ user_id=updated_comp_run.user_id,
+ project_id=updated_comp_run.project_uuid,
+ iteration=updated_comp_run.iteration,
+ )
+
+
+async def _get_pipeline_dag(project_id: ProjectID, db_engine: Engine) -> nx.DiGraph:
+ comp_pipeline_repo = CompPipelinesRepository.instance(db_engine)
+ pipeline_at_db = await comp_pipeline_repo.get_pipeline(project_id)
+ return pipeline_at_db.get_graph()
+
+
+_LOST_TASKS_FACTOR: Final[int] = 10
+
+
+@exclusive(
+ get_redis_client_from_app,
+ lock_key=get_redis_lock_key(MODULE_NAME_SCHEDULER, unique_lock_key_builder=None),
+)
+async def schedule_all_pipelines(app: FastAPI) -> None:
+ with log_context(_logger, logging.DEBUG, msg="scheduling pipelines"):
+ db_engine = get_db_engine(app)
+ runs_to_schedule = await CompRunsRepository.instance(db_engine).list(
+ filter_by_state=SCHEDULED_STATES,
+ never_scheduled=True,
+ processed_since=SCHEDULER_INTERVAL,
+ )
+ possibly_lost_scheduled_pipelines = await CompRunsRepository.instance(
+ db_engine
+ ).list(
+ filter_by_state=SCHEDULED_STATES,
+ scheduled_since=SCHEDULER_INTERVAL * _LOST_TASKS_FACTOR,
+ )
+ if possibly_lost_scheduled_pipelines:
+ _logger.error(
+ "found %d lost pipelines, they will be re-scheduled now",
+ len(possibly_lost_scheduled_pipelines),
+ )
+
+ rabbitmq_client = get_rabbitmq_client(app)
+ with log_context(_logger, logging.DEBUG, msg="distributing pipelines"):
+ await limited_gather(
+ *(
+ request_pipeline_scheduling(
+ rabbitmq_client,
+ db_engine,
+ user_id=run.user_id,
+ project_id=run.project_uuid,
+ iteration=run.iteration,
+ )
+ for run in runs_to_schedule + possibly_lost_scheduled_pipelines
+ ),
+ limit=MAX_CONCURRENT_PIPELINE_SCHEDULING,
+ )
+ if runs_to_schedule:
+ _logger.debug("distributed %d pipelines", len(runs_to_schedule))
+
+
+async def setup_manager(app: FastAPI) -> None:
+ app.state.scheduler_manager = start_periodic_task(
+ silence_exceptions((CouldNotAcquireLockError,))(schedule_all_pipelines),
+ interval=SCHEDULER_INTERVAL,
+ task_name=MODULE_NAME_SCHEDULER,
+ app=app,
+ )
+
+
+async def shutdown_manager(app: FastAPI) -> None:
+ await stop_periodic_task(app.state.scheduler_manager)
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_models.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_models.py
new file mode 100644
index 00000000000..28dca04dc53
--- /dev/null
+++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_models.py
@@ -0,0 +1,19 @@
+from typing import Literal
+
+from models_library.projects import ProjectID
+from models_library.rabbitmq_messages import RabbitMessageBase
+from models_library.users import UserID
+
+from ...models.comp_runs import Iteration
+
+
+class SchedulePipelineRabbitMessage(RabbitMessageBase):
+ channel_name: Literal[
+ "simcore.services.director-v2.scheduling"
+ ] = "simcore.services.director-v2.scheduling"
+ user_id: UserID
+ project_id: ProjectID
+ iteration: Iteration
+
+ def routing_key(self) -> str | None: # pylint: disable=no-self-use # abstract
+ return None
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_publisher.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_publisher.py
new file mode 100644
index 00000000000..1c7ea23ac43
--- /dev/null
+++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_publisher.py
@@ -0,0 +1,32 @@
+from aiopg.sa import Engine
+from models_library.projects import ProjectID
+from models_library.users import UserID
+from servicelib.rabbitmq import RabbitMQClient
+
+from ...models.comp_runs import Iteration
+from ..db.repositories.comp_runs import CompRunsRepository
+from ._models import SchedulePipelineRabbitMessage
+
+
+async def request_pipeline_scheduling(
+ rabbitmq_client: RabbitMQClient,
+ db_engine: Engine,
+ *,
+ user_id: UserID,
+ project_id: ProjectID,
+ iteration: Iteration
+) -> None:
+ # NOTE: we should use the transaction and the asyncpg engine here to ensure 100% consistency
+ # https://github.com/ITISFoundation/osparc-simcore/issues/6818
+ # async with transaction_context(get_asyncpg_engine(app)) as connection:
+ await rabbitmq_client.publish(
+ SchedulePipelineRabbitMessage.get_channel_name(),
+ SchedulePipelineRabbitMessage(
+ user_id=user_id,
+ project_id=project_id,
+ iteration=iteration,
+ ),
+ )
+ await CompRunsRepository.instance(db_engine).mark_for_scheduling(
+ user_id=user_id, project_id=project_id, iteration=iteration
+ )
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_base_scheduler.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py
similarity index 75%
rename from services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_base_scheduler.py
rename to services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py
index 2d663aec9a1..a16821d0fba 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_base_scheduler.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py
@@ -12,32 +12,26 @@
"""
import asyncio
-import contextlib
import datetime
-import functools
import logging
from abc import ABC, abstractmethod
from collections.abc import Callable
-from dataclasses import dataclass, field
-from typing import Final, TypeAlias
+from dataclasses import dataclass
+from typing import Final
import arrow
import networkx as nx
from aiopg.sa.engine import Engine
-from models_library.clusters import ClusterID
from models_library.projects import ProjectID
from models_library.projects_nodes_io import NodeID, NodeIDStr
from models_library.projects_state import RunningState
-from models_library.services import ServiceKey, ServiceType, ServiceVersion
+from models_library.services import ServiceType
from models_library.users import UserID
from networkx.classes.reportviews import InDegreeView
-from pydantic import PositiveInt
-from servicelib.background_task import start_periodic_task, stop_periodic_task
from servicelib.common_headers import UNDEFINED_DEFAULT_SIMCORE_USER_AGENT_VALUE
-from servicelib.logging_utils import log_context
+from servicelib.logging_utils import log_catch, log_context
from servicelib.rabbitmq import RabbitMQClient, RabbitMQRPCClient
-from servicelib.redis import CouldNotAcquireLockError, RedisClientSDK
-from servicelib.redis_utils import exclusive
+from servicelib.redis import RedisClientSDK
from ...constants import UNDEFINED_STR_METADATA
from ...core.errors import (
@@ -45,7 +39,6 @@
ComputationalBackendNotConnectedError,
ComputationalBackendOnDemandNotReadyError,
ComputationalSchedulerChangedError,
- ComputationalSchedulerError,
DaskClientAcquisisitonError,
InvalidPipelineError,
PipelineNotFoundError,
@@ -53,19 +46,8 @@
)
from ...core.settings import ComputationalBackendSettings
from ...models.comp_pipelines import CompPipelineAtDB
-from ...models.comp_runs import CompRunsAtDB, RunMetadataDict
+from ...models.comp_runs import CompRunsAtDB, Iteration, RunMetadataDict
from ...models.comp_tasks import CompTaskAtDB
-from ...utils.comp_scheduler import (
- COMPLETED_STATES,
- PROCESSING_STATES,
- RUNNING_STATES,
- SCHEDULED_STATES,
- TASK_TO_START_STATES,
- WAITING_FOR_START_STATES,
- Iteration,
- create_service_resources_from_task,
- get_resource_tracking_run_id,
-)
from ...utils.computations import get_pipeline_state_from_task_states
from ...utils.rabbitmq import (
publish_project_log,
@@ -76,6 +58,16 @@
from ..db.repositories.comp_pipelines import CompPipelinesRepository
from ..db.repositories.comp_runs import CompRunsRepository
from ..db.repositories.comp_tasks import CompTasksRepository
+from ._publisher import request_pipeline_scheduling
+from ._utils import (
+ COMPLETED_STATES,
+ PROCESSING_STATES,
+ RUNNING_STATES,
+ TASK_TO_START_STATES,
+ WAITING_FOR_START_STATES,
+ create_service_resources_from_task,
+ get_resource_tracking_run_id,
+)
_logger = logging.getLogger(__name__)
@@ -83,13 +75,36 @@
_Previous = CompTaskAtDB
_Current = CompTaskAtDB
_MAX_WAITING_FOR_CLUSTER_TIMEOUT_IN_MIN: Final[int] = 10
-_SCHEDULER_INTERVAL: Final[datetime.timedelta] = datetime.timedelta(seconds=5)
-_TASK_NAME_TEMPLATE: Final[
- str
-] = "computational-scheduler-{user_id}:{project_id}:{iteration}"
-PipelineSchedulingTask: TypeAlias = asyncio.Task
-PipelineSchedulingWakeUpEvent: TypeAlias = asyncio.Event
+
+def _auto_schedule_callback(
+ loop: asyncio.AbstractEventLoop,
+ db_engine: Engine,
+ rabbit_mq_client: RabbitMQClient,
+ *,
+ user_id: UserID,
+ project_id: ProjectID,
+ iteration: Iteration,
+) -> Callable[[], None]:
+ """this function is called via Dask-backend from a separate thread.
+ Therefore the need to use run_coroutine_threadsafe to request a new
+ pipeline scheduling"""
+
+ def _cb() -> None:
+ async def _async_cb() -> None:
+ await request_pipeline_scheduling(
+ rabbit_mq_client,
+ db_engine,
+ user_id=user_id,
+ project_id=project_id,
+ iteration=iteration,
+ )
+
+ future = asyncio.run_coroutine_threadsafe(_async_cb(), loop)
+ with log_catch(_logger, reraise=False):
+ future.result(timeout=10)
+
+ return _cb
@dataclass(frozen=True, slots=True)
@@ -141,15 +156,6 @@ async def _triage_changed_tasks(
)
-@dataclass(kw_only=True)
-class ScheduledPipelineParams:
- scheduler_task: asyncio.Task
- scheduler_waker: asyncio.Event
-
- def wake_up(self) -> None:
- self.scheduler_waker.set()
-
-
@dataclass
class BaseCompScheduler(ABC):
db_engine: Engine
@@ -159,169 +165,6 @@ class BaseCompScheduler(ABC):
service_runtime_heartbeat_interval: datetime.timedelta
redis_client: RedisClientSDK
- # NOTE: this is a trick to be able to inheritate from the class
- _scheduled_pipelines: dict[
- tuple[UserID, ProjectID, Iteration], ScheduledPipelineParams
- ] = field(default_factory=dict, init=False)
-
- def __post_init__(self) -> None:
- self._scheduled_pipelines = {}
-
- async def restore_scheduling_from_db(self) -> None:
- # get currently scheduled runs
- comp_runs = await CompRunsRepository.instance(self.db_engine).list(
- filter_by_state=SCHEDULED_STATES
- )
-
- for run in comp_runs:
- task, wake_up_event = self._start_scheduling(
- run.user_id, run.project_uuid, run.iteration
- )
- self._scheduled_pipelines |= {
- (
- run.user_id,
- run.project_uuid,
- run.iteration,
- ): ScheduledPipelineParams(
- scheduler_task=task, scheduler_waker=wake_up_event
- )
- }
-
- async def run_new_pipeline(
- self,
- user_id: UserID,
- project_id: ProjectID,
- cluster_id: ClusterID,
- run_metadata: RunMetadataDict,
- *,
- use_on_demand_clusters: bool,
- ) -> None:
- """Sets a new pipeline to be scheduled on the computational resources.
- Passing cluster_id=0 will use the default cluster. Passing an existing ID will instruct
- the scheduler to run the tasks on the defined cluster"""
- # ensure the pipeline exists and is populated with something
- dag = await self._get_pipeline_dag(project_id)
- if not dag:
- _logger.warning(
- "project %s has no computational dag defined. not scheduled for a run.",
- f"{project_id=}",
- )
- return
-
- runs_repo = CompRunsRepository.instance(self.db_engine)
- new_run = await runs_repo.create(
- user_id=user_id,
- project_id=project_id,
- cluster_id=cluster_id,
- metadata=run_metadata,
- use_on_demand_clusters=use_on_demand_clusters,
- )
- task, wake_up_event = self._start_scheduling(
- user_id, project_id, new_run.iteration
- )
- self._scheduled_pipelines[
- (user_id, project_id, new_run.iteration)
- ] = ScheduledPipelineParams(scheduler_task=task, scheduler_waker=wake_up_event)
- await publish_project_log(
- self.rabbitmq_client,
- user_id,
- project_id,
- log=f"Project pipeline scheduled using {'on-demand clusters' if use_on_demand_clusters else 'pre-defined clusters'}, starting soon...",
- log_level=logging.INFO,
- )
-
- async def stop_pipeline(
- self, user_id: UserID, project_id: ProjectID, iteration: int | None = None
- ) -> None:
- if iteration is None:
- # if no iteration given find the latest one in the list
- possible_iterations = {
- it
- for u_id, p_id, it in self._scheduled_pipelines
- if u_id == user_id and p_id == project_id
- }
- if not possible_iterations:
- msg = f"There are no pipeline scheduled for {user_id}:{project_id}"
- raise ComputationalSchedulerError(msg=msg)
- current_max_iteration = max(possible_iterations)
- selected_iteration = current_max_iteration
- else:
- selected_iteration = iteration
-
- # mark the scheduled pipeline for stopping
- updated_comp_run = await CompRunsRepository.instance(
- self.db_engine
- ).mark_for_cancellation(
- user_id=user_id, project_id=project_id, iteration=selected_iteration
- )
- if updated_comp_run:
- assert updated_comp_run.cancelled is not None # nosec
- # ensure the scheduler starts right away
- self._scheduled_pipelines[
- (user_id, project_id, selected_iteration)
- ].wake_up()
-
- async def shutdown(self) -> None:
- # cancel all current scheduling processes
- await asyncio.gather(
- *(
- stop_periodic_task(p.scheduler_task, timeout=3)
- for p in self._scheduled_pipelines.values()
- if p.scheduler_task
- ),
- return_exceptions=True,
- )
-
- def _get_last_iteration(self, user_id: UserID, project_id: ProjectID) -> Iteration:
- # if no iteration given find the latest one in the list
- possible_iterations = {
- it
- for u_id, p_id, it in self._scheduled_pipelines
- if u_id == user_id and p_id == project_id
- }
- if not possible_iterations:
- msg = f"There are no pipeline scheduled for {user_id}:{project_id}"
- raise ComputationalSchedulerError(msg=msg)
- return max(possible_iterations)
-
- def _start_scheduling(
- self,
- user_id: UserID,
- project_id: ProjectID,
- iteration: Iteration,
- ) -> tuple[PipelineSchedulingTask, PipelineSchedulingWakeUpEvent]:
- async def _exclusive_safe_schedule_pipeline(
- *,
- user_id: UserID,
- project_id: ProjectID,
- iteration: Iteration,
- wake_up_callback: Callable[[], None],
- ) -> None:
- with contextlib.suppress(CouldNotAcquireLockError):
- await self._schedule_pipeline(
- user_id=user_id,
- project_id=project_id,
- iteration=iteration,
- wake_up_callback=wake_up_callback,
- )
-
- pipeline_wake_up_event = asyncio.Event()
- pipeline_task = start_periodic_task(
- functools.partial(
- _exclusive_safe_schedule_pipeline,
- user_id=user_id,
- project_id=project_id,
- iteration=iteration,
- wake_up_callback=pipeline_wake_up_event.set,
- ),
- interval=_SCHEDULER_INTERVAL,
- task_name=_TASK_NAME_TEMPLATE.format(
- user_id=user_id, project_id=project_id, iteration=iteration
- ),
- early_wake_up_event=pipeline_wake_up_event,
- )
- return pipeline_task, pipeline_wake_up_event
-
async def _get_pipeline_dag(self, project_id: ProjectID) -> nx.DiGraph:
comp_pipeline_repo = CompPipelinesRepository.instance(self.db_engine)
pipeline_at_db: CompPipelineAtDB = await comp_pipeline_repo.get_pipeline(
@@ -336,7 +179,7 @@ async def _get_pipeline_tasks(
) -> dict[NodeIDStr, CompTaskAtDB]:
comp_tasks_repo = CompTasksRepository.instance(self.db_engine)
pipeline_comp_tasks: dict[NodeIDStr, CompTaskAtDB] = {
- NodeIDStr(f"{t.node_id}"): t
+ f"{t.node_id}": t
for t in await comp_tasks_repo.list_computational_tasks(project_id)
if (f"{t.node_id}" in list(pipeline_dag.nodes()))
}
@@ -352,7 +195,7 @@ async def _update_run_result_from_tasks(
self,
user_id: UserID,
project_id: ProjectID,
- iteration: PositiveInt,
+ iteration: Iteration,
pipeline_tasks: dict[NodeIDStr, CompTaskAtDB],
) -> RunningState:
pipeline_state_from_tasks: RunningState = get_pipeline_state_from_task_states(
@@ -372,7 +215,7 @@ async def _set_run_result(
self,
user_id: UserID,
project_id: ProjectID,
- iteration: PositiveInt,
+ iteration: Iteration,
run_result: RunningState,
) -> None:
comp_runs_repo = CompRunsRepository.instance(self.db_engine)
@@ -384,6 +227,18 @@ async def _set_run_result(
final_state=(run_result in COMPLETED_STATES),
)
+ async def _set_schedule_done(
+ self,
+ user_id: UserID,
+ project_id: ProjectID,
+ iteration: Iteration,
+ ) -> None:
+ await CompRunsRepository.instance(self.db_engine).mark_as_processed(
+ user_id=user_id,
+ project_id=project_id,
+ iteration=iteration,
+ )
+
async def _set_states_following_failed_to_aborted(
self, project_id: ProjectID, dag: nx.DiGraph
) -> dict[NodeIDStr, CompTaskAtDB]:
@@ -394,9 +249,9 @@ async def _set_states_following_failed_to_aborted(
for task in tasks.values():
if task.state == RunningState.FAILED:
node_ids_to_set_as_aborted.update(nx.bfs_tree(dag, f"{task.node_id}"))
- node_ids_to_set_as_aborted.remove(NodeIDStr(f"{task.node_id}"))
+ node_ids_to_set_as_aborted.remove(f"{task.node_id}")
for node_id in node_ids_to_set_as_aborted:
- tasks[NodeIDStr(f"{node_id}")].state = RunningState.ABORTED
+ tasks[f"{node_id}"].state = RunningState.ABORTED
if node_ids_to_set_as_aborted:
# update the current states back in DB
comp_tasks_repo = CompTasksRepository.instance(self.db_engine)
@@ -544,8 +399,8 @@ async def _process_started_tasks(
root_parent_node_id=run_metadata.get("project_metadata", {}).get(
"root_parent_node_id"
),
- service_key=ServiceKey(t.image.name),
- service_version=ServiceVersion(t.image.tag),
+ service_key=t.image.name,
+ service_version=t.image.tag,
service_type=ServiceType.COMPUTATIONAL,
service_resources=create_service_resources_from_task(t),
service_additional_metadata={},
@@ -654,19 +509,19 @@ async def _start_tasks(
comp_run: CompRunsAtDB,
wake_up_callback: Callable[[], None],
) -> None:
- ...
+ """start tasks in the 3rd party backend"""
@abstractmethod
async def _get_tasks_status(
self, user_id: UserID, tasks: list[CompTaskAtDB], comp_run: CompRunsAtDB
) -> list[RunningState]:
- ...
+ """returns tasks status from the 3rd party backend"""
@abstractmethod
async def _stop_tasks(
self, user_id: UserID, tasks: list[CompTaskAtDB], comp_run: CompRunsAtDB
) -> None:
- ...
+ """stop tasks in the 3rd party backend"""
@abstractmethod
async def _process_completed_tasks(
@@ -676,32 +531,20 @@ async def _process_completed_tasks(
iteration: Iteration,
comp_run: CompRunsAtDB,
) -> None:
- ...
-
- @staticmethod
- def _build_exclusive_lock_key(*args, **kwargs) -> str:
- assert args # nosec
- return f"{kwargs['user_id']}:{kwargs['project_id']}:{kwargs['iteration']}"
-
- @staticmethod
- def _redis_client_getter(*args, **kwargs) -> RedisClientSDK:
- assert kwargs # nosec
- zelf = args[0]
- assert isinstance(zelf, BaseCompScheduler) # nosec
- return zelf.redis_client
-
- @exclusive(
- redis=_redis_client_getter,
- lock_key=_build_exclusive_lock_key,
- )
- async def _schedule_pipeline(
+ """process tasks from the 3rd party backend"""
+
+ async def apply(
self,
*,
user_id: UserID,
project_id: ProjectID,
- iteration: PositiveInt,
- wake_up_callback: Callable[[], None],
+ iteration: Iteration,
) -> None:
+ """schedules a pipeline for a given user, project and iteration.
+
+ Arguments:
+ wake_up_callback -- a callback function that is called in a separate thread everytime a pipeline node is completed
+ """
with log_context(
_logger,
level=logging.INFO,
@@ -734,7 +577,14 @@ async def _schedule_pipeline(
comp_tasks=comp_tasks,
dag=dag,
comp_run=comp_run,
- wake_up_callback=wake_up_callback,
+ wake_up_callback=_auto_schedule_callback(
+ asyncio.get_running_loop(),
+ self.db_engine,
+ self.rabbitmq_client,
+ user_id=user_id,
+ project_id=project_id,
+ iteration=iteration,
+ ),
)
# 4. timeout if waiting for cluster has been there for more than X minutes
comp_tasks = await self._timeout_if_waiting_for_cluster_too_long(
@@ -753,17 +603,11 @@ async def _schedule_pipeline(
# 7. Are we done scheduling that pipeline?
if not dag.nodes() or pipeline_result in COMPLETED_STATES:
# there is nothing left, the run is completed, we're done here
- self._scheduled_pipelines.pop(
- (user_id, project_id, iteration), None
- )
_logger.info(
"pipeline %s scheduling completed with result %s",
f"{project_id=}",
f"{pipeline_result=}",
)
- current_task = asyncio.current_task()
- assert current_task is not None # nosec
- current_task.cancel()
except PipelineNotFoundError:
_logger.warning(
"pipeline %s does not exist in comp_pipeline table, it will be removed from scheduler",
@@ -772,7 +616,6 @@ async def _schedule_pipeline(
await self._set_run_result(
user_id, project_id, iteration, RunningState.ABORTED
)
- self._scheduled_pipelines.pop((user_id, project_id, iteration), None)
except InvalidPipelineError as exc:
_logger.warning(
"pipeline %s appears to be misconfigured, it will be removed from scheduler. Please check pipeline:\n%s",
@@ -782,7 +625,6 @@ async def _schedule_pipeline(
await self._set_run_result(
user_id, project_id, iteration, RunningState.ABORTED
)
- self._scheduled_pipelines.pop((user_id, project_id, iteration), None)
except (DaskClientAcquisisitonError, ClustersKeeperNotAvailableError):
_logger.exception(
"Unexpected error while connecting with computational backend, aborting pipeline"
@@ -799,9 +641,10 @@ async def _schedule_pipeline(
await self._set_run_result(
user_id, project_id, iteration, RunningState.FAILED
)
- self._scheduled_pipelines.pop((user_id, project_id, iteration), None)
except ComputationalBackendNotConnectedError:
_logger.exception("Computational backend is not connected!")
+ finally:
+ await self._set_schedule_done(user_id, project_id, iteration)
async def _schedule_tasks_to_stop(
self,
@@ -846,9 +689,9 @@ async def _schedule_tasks_to_start( # noqa: C901
# get the tasks to start
tasks_ready_to_start: dict[NodeID, CompTaskAtDB] = {
- node_id: comp_tasks[NodeIDStr(f"{node_id}")]
+ node_id: comp_tasks[f"{node_id}"]
for node_id in next_task_node_ids
- if comp_tasks[NodeIDStr(f"{node_id}")].state in TASK_TO_START_STATES
+ if comp_tasks[f"{node_id}"].state in TASK_TO_START_STATES
}
if not tasks_ready_to_start:
@@ -879,9 +722,7 @@ async def _schedule_tasks_to_start( # noqa: C901
RunningState.WAITING_FOR_CLUSTER,
)
for task in tasks_ready_to_start:
- comp_tasks[
- NodeIDStr(f"{task}")
- ].state = RunningState.WAITING_FOR_CLUSTER
+ comp_tasks[f"{task}"].state = RunningState.WAITING_FOR_CLUSTER
except ComputationalBackendOnDemandNotReadyError as exc:
_logger.info(
@@ -903,9 +744,7 @@ async def _schedule_tasks_to_start( # noqa: C901
RunningState.WAITING_FOR_CLUSTER,
)
for task in tasks_ready_to_start:
- comp_tasks[
- NodeIDStr(f"{task}")
- ].state = RunningState.WAITING_FOR_CLUSTER
+ comp_tasks[f"{task}"].state = RunningState.WAITING_FOR_CLUSTER
except ClustersKeeperNotAvailableError:
_logger.exception("Unexpected error while starting tasks:")
await publish_project_log(
@@ -926,7 +765,7 @@ async def _schedule_tasks_to_start( # noqa: C901
optional_stopped=arrow.utcnow().datetime,
)
for task in tasks_ready_to_start:
- comp_tasks[NodeIDStr(f"{task}")].state = RunningState.FAILED
+ comp_tasks[f"{task}"].state = RunningState.FAILED
raise
except TaskSchedulingError as exc:
_logger.exception(
@@ -944,7 +783,7 @@ async def _schedule_tasks_to_start( # noqa: C901
optional_progress=1.0,
optional_stopped=arrow.utcnow().datetime,
)
- comp_tasks[NodeIDStr(f"{exc.node_id}")].state = RunningState.FAILED
+ comp_tasks[f"{exc.node_id}"].state = RunningState.FAILED
except Exception:
_logger.exception(
"Unexpected error for %s with %s on %s happened when scheduling %s:",
@@ -963,7 +802,7 @@ async def _schedule_tasks_to_start( # noqa: C901
optional_stopped=arrow.utcnow().datetime,
)
for task in tasks_ready_to_start:
- comp_tasks[NodeIDStr(f"{task}")].state = RunningState.FAILED
+ comp_tasks[f"{task}"].state = RunningState.FAILED
raise
return comp_tasks
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_dask_scheduler.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_dask.py
similarity index 98%
rename from services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_dask_scheduler.py
rename to services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_dask.py
index 2fdf7acd2e9..adc67853686 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_dask_scheduler.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_dask.py
@@ -1,10 +1,9 @@
import asyncio
import contextlib
import logging
-from collections.abc import AsyncIterator
+from collections.abc import AsyncIterator, Callable
from contextlib import asynccontextmanager
from dataclasses import dataclass
-from typing import Callable
import arrow
from dask_task_models_library.container_tasks.errors import TaskCancelledError
@@ -28,10 +27,9 @@
ComputationalBackendOnDemandNotReadyError,
TaskSchedulingError,
)
-from ...models.comp_runs import CompRunsAtDB, RunMetadataDict
+from ...models.comp_runs import CompRunsAtDB, Iteration, RunMetadataDict
from ...models.comp_tasks import CompTaskAtDB
from ...models.dask_subsystem import DaskClientTaskState
-from ...utils.comp_scheduler import Iteration, get_resource_tracking_run_id
from ...utils.dask import (
clean_task_output_and_log_files_if_invalid,
parse_dask_job_id,
@@ -50,7 +48,8 @@
from ..db.repositories.clusters import ClustersRepository
from ..db.repositories.comp_runs import CompRunsRepository
from ..db.repositories.comp_tasks import CompTasksRepository
-from ._base_scheduler import BaseCompScheduler
+from ._scheduler_base import BaseCompScheduler
+from ._utils import get_resource_tracking_run_id
_logger = logging.getLogger(__name__)
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_factory.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_factory.py
index 524dfc7e8ad..edda456f303 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_factory.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_factory.py
@@ -4,35 +4,28 @@
from servicelib.logging_utils import log_context
from settings_library.redis import RedisDatabase
-from ...core.errors import ConfigurationError
from ...core.settings import AppSettings
from ..dask_clients_pool import DaskClientsPool
+from ..db import get_db_engine
from ..rabbitmq import get_rabbitmq_client, get_rabbitmq_rpc_client
from ..redis import get_redis_client_manager
-from ._base_scheduler import BaseCompScheduler
-from ._dask_scheduler import DaskScheduler
+from ._scheduler_base import BaseCompScheduler
+from ._scheduler_dask import DaskScheduler
_logger = logging.getLogger(__name__)
-async def create_from_db(app: FastAPI) -> BaseCompScheduler:
- if not hasattr(app.state, "engine"):
- msg = "Database connection is missing. Please check application configuration."
- raise ConfigurationError(msg=msg)
- db_engine = app.state.engine
-
+def create_scheduler(app: FastAPI) -> BaseCompScheduler:
with log_context(
_logger, logging.INFO, msg="Creating Dask-based computational scheduler"
):
app_settings: AppSettings = app.state.settings
- scheduler = DaskScheduler(
+ return DaskScheduler(
settings=app_settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND,
dask_clients_pool=DaskClientsPool.instance(app),
rabbitmq_client=get_rabbitmq_client(app),
rabbitmq_rpc_client=get_rabbitmq_rpc_client(app),
redis_client=get_redis_client_manager(app).client(RedisDatabase.LOCKS),
- db_engine=db_engine,
+ db_engine=get_db_engine(app),
service_runtime_heartbeat_interval=app_settings.SERVICE_TRACKING_HEARTBEAT,
)
- await scheduler.restore_scheduling_from_db()
- return scheduler
diff --git a/services/director-v2/src/simcore_service_director_v2/utils/comp_scheduler.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_utils.py
similarity index 61%
rename from services/director-v2/src/simcore_service_director_v2/utils/comp_scheduler.py
rename to services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_utils.py
index 15f3481da10..0458b159811 100644
--- a/services/director-v2/src/simcore_service_director_v2/utils/comp_scheduler.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_utils.py
@@ -1,5 +1,6 @@
-from typing import TypeAlias
+from typing import Callable
+from fastapi import FastAPI
from models_library.docker import DockerGenericTag
from models_library.projects import ProjectID
from models_library.projects_nodes_io import NodeID
@@ -10,9 +11,12 @@
ServiceResourcesDictHelpers,
)
from models_library.users import UserID
-from pydantic import PositiveInt
+from servicelib.redis import RedisClientSDK
+from settings_library.redis import RedisDatabase
+from simcore_service_director_v2.modules.redis import get_redis_client_manager
-from ..models.comp_tasks import CompTaskAtDB
+from ...models.comp_runs import Iteration
+from ...models.comp_tasks import CompTaskAtDB
SCHEDULED_STATES: set[RunningState] = {
RunningState.PUBLISHED,
@@ -51,9 +55,6 @@
}
-Iteration: TypeAlias = PositiveInt
-
-
def get_resource_tracking_run_id(
user_id: UserID, project_id: ProjectID, node_id: NodeID, iteration: Iteration
) -> str:
@@ -73,3 +74,34 @@ def create_service_resources_from_task(task: CompTaskAtDB) -> ServiceResourcesDi
},
[task.image.boot_mode],
)
+
+
+def _get_app_from_args(*args, **kwargs) -> FastAPI:
+ assert kwargs is not None # nosec
+ if args:
+ app = args[0]
+ else:
+ assert "app" in kwargs # nosec
+ app = kwargs["app"]
+ assert isinstance(app, FastAPI) # nosec
+ return app
+
+
+def get_redis_client_from_app(*args, **kwargs) -> RedisClientSDK:
+ app = _get_app_from_args(*args, **kwargs)
+ return get_redis_client_manager(app).client(RedisDatabase.LOCKS)
+
+
+def get_redis_lock_key(
+ suffix: str, *, unique_lock_key_builder: Callable[..., str] | None
+) -> Callable[..., str]:
+ def _(*args, **kwargs) -> str:
+ app = _get_app_from_args(*args, **kwargs)
+ unique_lock_part = (
+ unique_lock_key_builder(*args, **kwargs) if unique_lock_key_builder else ""
+ )
+ if unique_lock_part:
+ unique_lock_part = f"-{unique_lock_part}"
+ return f"{app.title}-{suffix}{unique_lock_part}"
+
+ return _
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_worker.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_worker.py
new file mode 100644
index 00000000000..397b68db0c9
--- /dev/null
+++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_worker.py
@@ -0,0 +1,94 @@
+import asyncio
+import contextlib
+import functools
+import logging
+from typing import cast
+
+from fastapi import FastAPI
+from models_library.projects import ProjectID
+from models_library.users import UserID
+from servicelib.logging_utils import log_context
+from servicelib.redis import CouldNotAcquireLockError
+from servicelib.redis_utils import exclusive
+
+from ...core.settings import get_application_settings
+from ...models.comp_runs import Iteration
+from ..rabbitmq import get_rabbitmq_client
+from ._constants import MODULE_NAME_WORKER
+from ._models import SchedulePipelineRabbitMessage
+from ._scheduler_base import BaseCompScheduler
+from ._scheduler_factory import create_scheduler
+from ._utils import get_redis_client_from_app, get_redis_lock_key
+
+_logger = logging.getLogger(__name__)
+
+
+def _get_scheduler_worker(app: FastAPI) -> BaseCompScheduler:
+ return cast(BaseCompScheduler, app.state.scheduler_worker)
+
+
+def _unique_key_builder(
+ _app, user_id: UserID, project_id: ProjectID, iteration: Iteration
+) -> str:
+ return f"{user_id}:{project_id}:{iteration}"
+
+
+@exclusive(
+ redis=get_redis_client_from_app,
+ lock_key=get_redis_lock_key(
+ MODULE_NAME_WORKER, unique_lock_key_builder=_unique_key_builder
+ ),
+)
+async def _exclusively_schedule_pipeline(
+ app: FastAPI, *, user_id: UserID, project_id: ProjectID, iteration: Iteration
+) -> None:
+ await _get_scheduler_worker(app).apply(
+ user_id=user_id,
+ project_id=project_id,
+ iteration=iteration,
+ )
+
+
+async def _handle_apply_distributed_schedule(app: FastAPI, data: bytes) -> bool:
+
+ with log_context(_logger, logging.DEBUG, msg="handling scheduling"):
+ to_schedule_pipeline = SchedulePipelineRabbitMessage.model_validate_json(data)
+ with contextlib.suppress(CouldNotAcquireLockError):
+ await _exclusively_schedule_pipeline(
+ app,
+ user_id=to_schedule_pipeline.user_id,
+ project_id=to_schedule_pipeline.project_id,
+ iteration=to_schedule_pipeline.iteration,
+ )
+ return True
+
+
+async def setup_worker(app: FastAPI) -> None:
+ app_settings = get_application_settings(app)
+ rabbitmq_client = get_rabbitmq_client(app)
+ app.state.scheduler_worker_consumers = await asyncio.gather(
+ *(
+ rabbitmq_client.subscribe(
+ SchedulePipelineRabbitMessage.get_channel_name(),
+ functools.partial(_handle_apply_distributed_schedule, app),
+ exclusive_queue=False,
+ )
+ for _ in range(
+ app_settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND.COMPUTATIONAL_BACKEND_SCHEDULING_CONCURRENCY
+ )
+ )
+ )
+
+ app.state.scheduler_worker = create_scheduler(app)
+
+
+async def shutdown_worker(app: FastAPI) -> None:
+ assert app.state.scheduler_worker # nosec
+ rabbitmq_client = get_rabbitmq_client(app)
+ await asyncio.gather(
+ *(
+ rabbitmq_client.unsubscribe_consumer(*consumer)
+ for consumer in app.state.scheduler_worker_consumers
+ ),
+ return_exceptions=False,
+ )
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/__init__.py b/services/director-v2/src/simcore_service_director_v2/modules/db/__init__.py
index 7a5826d1aa3..a112ae63d46 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/db/__init__.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/db/__init__.py
@@ -1,3 +1,6 @@
+from typing import cast
+
+from aiopg.sa import Engine
from fastapi import FastAPI
from settings_library.postgres import PostgresSettings
@@ -22,4 +25,11 @@ async def on_shutdown() -> None:
app.add_event_handler("shutdown", on_shutdown)
-__all__: tuple[str, ...] = ("get_asyncpg_engine",)
+def get_db_engine(app: FastAPI) -> Engine:
+ return cast(Engine, app.state.engine)
+
+
+__all__: tuple[str, ...] = (
+ "get_asyncpg_engine",
+ "get_db_engine",
+)
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py
index 9ce28bcda8d..b746407a8aa 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py
@@ -1,7 +1,6 @@
import datetime
import logging
-from collections import deque
-from typing import Any
+from typing import Any, Final
import arrow
import sqlalchemy as sa
@@ -17,7 +16,13 @@
from sqlalchemy.sql.elements import literal_column
from sqlalchemy.sql.expression import desc
-from ....core.errors import ClusterNotFoundError, ComputationalRunNotFoundError
+from ....core.errors import (
+ ClusterNotFoundError,
+ ComputationalRunNotFoundError,
+ DirectorError,
+ ProjectNotFoundError,
+ UserNotFoundError,
+)
from ....models.comp_runs import CompRunsAtDB, RunMetadataDict
from ....utils.db import RUNNING_STATE_TO_DB
from ..tables import comp_runs
@@ -25,6 +30,24 @@
logger = logging.getLogger(__name__)
+_POSTGRES_FK_COLUMN_TO_ERROR_MAP: Final[
+ dict[sa.Column, tuple[type[DirectorError], tuple[str, ...]]]
+] = {
+ comp_runs.c.user_id: (UserNotFoundError, ("users", "user_id")),
+ comp_runs.c.project_uuid: (
+ ProjectNotFoundError,
+ ("projects", "project_id"),
+ ),
+ comp_runs.c.cluster_id: (
+ ClusterNotFoundError,
+ ("clusters", "cluster_id"),
+ ),
+}
+_DEFAULT_FK_CONSTRAINT_TO_ERROR: Final[tuple[type[DirectorError], tuple]] = (
+ DirectorError,
+ (),
+)
+
class CompRunsRepository(BaseRepository):
async def get(
@@ -55,24 +78,74 @@ async def get(
return CompRunsAtDB.model_validate(row)
async def list(
- self, filter_by_state: set[RunningState] | None = None
+ self,
+ *,
+ filter_by_state: set[RunningState] | None = None,
+ never_scheduled: bool = False,
+ processed_since: datetime.timedelta | None = None,
+ scheduled_since: datetime.timedelta | None = None,
) -> list[CompRunsAtDB]:
- if not filter_by_state:
- filter_by_state = set()
- runs_in_db: deque[CompRunsAtDB] = deque()
+ """lists the computational runs:
+ filter_by_state AND (never_scheduled OR processed_since OR scheduled_since)
+
+
+ Keyword Arguments:
+ filter_by_state -- will return only the runs with result in filter_by_state (default: {None})
+ never_scheduled -- will return the runs which were never scheduled (default: {False})
+ processed_since -- will return the runs which were processed since X, which are not re-scheduled since then (default: {None})
+ scheduled_since -- will return the runs which were scheduled since X, which are not processed since then (default: {None})
+ """
+
+ conditions = []
+ if filter_by_state:
+ conditions.append(
+ or_(
+ *[
+ comp_runs.c.result == RUNNING_STATE_TO_DB[s]
+ for s in filter_by_state
+ ]
+ )
+ )
+
+ scheduling_or_conditions = []
+ if never_scheduled:
+ scheduling_or_conditions.append(comp_runs.c.scheduled.is_(None))
+ if scheduled_since is not None:
+ # a scheduled run is a run that has been scheduled but not processed yet
+ # e.g. the processing timepoint is either null or before the scheduling timepoint
+ scheduled_cutoff = arrow.utcnow().datetime - scheduled_since
+ scheduling_filter = (
+ comp_runs.c.scheduled.is_not(None)
+ & (
+ comp_runs.c.processed.is_(None)
+ | (comp_runs.c.scheduled > comp_runs.c.processed)
+ )
+ & (comp_runs.c.scheduled <= scheduled_cutoff)
+ )
+ scheduling_or_conditions.append(scheduling_filter)
+
+ if processed_since is not None:
+ # a processed run is a run that has been scheduled and processed
+ # and the processing timepoint is after the scheduling timepoint
+ processed_cutoff = arrow.utcnow().datetime - processed_since
+ processed_filter = (
+ comp_runs.c.processed.is_not(None)
+ & (comp_runs.c.processed > comp_runs.c.scheduled)
+ & (comp_runs.c.processed <= processed_cutoff)
+ )
+
+ scheduling_or_conditions.append(processed_filter)
+
+ if scheduling_or_conditions:
+ conditions.append(sa.or_(*scheduling_or_conditions))
+
async with self.db_engine.acquire() as conn:
- async for row in conn.execute(
- sa.select(comp_runs).where(
- or_(
- *[
- comp_runs.c.result == RUNNING_STATE_TO_DB[s]
- for s in filter_by_state
- ]
- )
+ return [
+ CompRunsAtDB.model_validate(row)
+ async for row in conn.execute(
+ sa.select(comp_runs).where(sa.and_(*conditions))
)
- ):
- runs_in_db.append(CompRunsAtDB.model_validate(row))
- return list(runs_in_db)
+ ]
async def create(
self,
@@ -117,7 +190,17 @@ async def create(
row = await result.first()
return CompRunsAtDB.model_validate(row)
except ForeignKeyViolation as exc:
- raise ClusterNotFoundError(cluster_id=cluster_id) from exc
+ assert exc.diag.constraint_name # nosec # noqa: PT017
+ for foreign_key in comp_runs.foreign_keys:
+ if exc.diag.constraint_name == foreign_key.name:
+ assert foreign_key.parent is not None # nosec
+ exc_type, exc_keys = _POSTGRES_FK_COLUMN_TO_ERROR_MAP[
+ foreign_key.parent
+ ]
+ raise exc_type(
+ **{f"{k}": locals().get(k) for k in exc_keys}
+ ) from exc
+ raise DirectorError from exc
async def update(
self, user_id: UserID, project_id: ProjectID, iteration: PositiveInt, **values
@@ -164,3 +247,24 @@ async def mark_for_cancellation(
iteration,
cancelled=arrow.utcnow().datetime,
)
+
+ async def mark_for_scheduling(
+ self, *, user_id: UserID, project_id: ProjectID, iteration: PositiveInt
+ ) -> CompRunsAtDB | None:
+ return await self.update(
+ user_id,
+ project_id,
+ iteration,
+ scheduled=arrow.utcnow().datetime,
+ processed=None,
+ )
+
+ async def mark_as_processed(
+ self, *, user_id: UserID, project_id: ProjectID, iteration: PositiveInt
+ ) -> CompRunsAtDB | None:
+ return await self.update(
+ user_id,
+ project_id,
+ iteration,
+ processed=arrow.utcnow().datetime,
+ )
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py
index 637e0c7faf6..51082b698f1 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py
@@ -55,9 +55,9 @@
from .....models.comp_tasks import CompTaskAtDB, Image, NodeSchema
from .....models.pricing import PricingInfo
from .....modules.resource_usage_tracker_client import ResourceUsageTrackerClient
-from .....utils.comp_scheduler import COMPLETED_STATES
from .....utils.computations import to_node_class
from ....catalog import CatalogClient
+from ....comp_scheduler._utils import COMPLETED_STATES
from ....director_v0 import DirectorV0Client
from ...tables import NodeClass
@@ -146,12 +146,12 @@ async def _get_node_infos(
None,
)
- result: tuple[
- ServiceMetaDataPublished, ServiceExtras, SimcoreServiceLabels
- ] = await asyncio.gather(
- _get_service_details(catalog_client, user_id, product_name, node),
- director_client.get_service_extras(node.key, node.version),
- director_client.get_service_labels(node),
+ result: tuple[ServiceMetaDataPublished, ServiceExtras, SimcoreServiceLabels] = (
+ await asyncio.gather(
+ _get_service_details(catalog_client, user_id, product_name, node),
+ director_client.get_service_extras(node.key, node.version),
+ director_client.get_service_labels(node),
+ )
)
return result
@@ -247,9 +247,9 @@ async def _get_pricing_and_hardware_infos(
return pricing_info, hardware_info
-_RAM_SAFE_MARGIN_RATIO: Final[
- float
-] = 0.1 # NOTE: machines always have less available RAM than advertised
+_RAM_SAFE_MARGIN_RATIO: Final[float] = (
+ 0.1 # NOTE: machines always have less available RAM than advertised
+)
_CPUS_SAFE_MARGIN: Final[float] = 0.1
@@ -267,11 +267,11 @@ async def _update_project_node_resources_from_hardware_info(
if not hardware_info.aws_ec2_instances:
return
try:
- unordered_list_ec2_instance_types: list[
- EC2InstanceTypeGet
- ] = await get_instance_type_details(
- rabbitmq_rpc_client,
- instance_type_names=set(hardware_info.aws_ec2_instances),
+ unordered_list_ec2_instance_types: list[EC2InstanceTypeGet] = (
+ await get_instance_type_details(
+ rabbitmq_rpc_client,
+ instance_type_names=set(hardware_info.aws_ec2_instances),
+ )
)
assert unordered_list_ec2_instance_types # nosec
diff --git a/services/director-v2/tests/conftest.py b/services/director-v2/tests/conftest.py
index fcc0db6dbf1..72b94ec3262 100644
--- a/services/director-v2/tests/conftest.py
+++ b/services/director-v2/tests/conftest.py
@@ -197,23 +197,24 @@ def mock_env(
@pytest.fixture()
-async def client(mock_env: EnvVarsDict) -> AsyncIterator[TestClient]:
+async def initialized_app(mock_env: EnvVarsDict) -> AsyncIterable[FastAPI]:
settings = AppSettings.create_from_envs()
app = init_app(settings)
print("Application settings\n", settings.model_dump_json(indent=2))
- # NOTE: this way we ensure the events are run in the application
- # since it starts the app on a test server
- with TestClient(app, raise_server_exceptions=True) as test_client:
- yield test_client
+ async with LifespanManager(app):
+ yield app
@pytest.fixture()
-async def initialized_app(mock_env: EnvVarsDict) -> AsyncIterable[FastAPI]:
+async def client(mock_env: EnvVarsDict) -> AsyncIterator[TestClient]:
+ # NOTE: this way we ensure the events are run in the application
+ # since it starts the app on a test server
settings = AppSettings.create_from_envs()
app = init_app(settings)
+ # NOTE: we cannot use the initialized_app fixture here as the TestClient also creates it
print("Application settings\n", settings.model_dump_json(indent=2))
- async with LifespanManager(app):
- yield app
+ with TestClient(app, raise_server_exceptions=True) as test_client:
+ yield test_client
@pytest.fixture()
diff --git a/services/director-v2/tests/unit/_helpers.py b/services/director-v2/tests/unit/_helpers.py
index 779d6cdd117..45632d0454a 100644
--- a/services/director-v2/tests/unit/_helpers.py
+++ b/services/director-v2/tests/unit/_helpers.py
@@ -1,38 +1,33 @@
from dataclasses import dataclass
-from typing import Any
+from typing import Any, Callable
import aiopg
import aiopg.sa
-from models_library.projects import ProjectAtDB
+import sqlalchemy as sa
+from models_library.projects import ProjectAtDB, ProjectID
from models_library.projects_nodes_io import NodeID
-from simcore_postgres_database.models.comp_pipeline import StateType
+from models_library.projects_state import RunningState
+from pydantic import TypeAdapter
+from simcore_postgres_database.models.comp_runs import comp_runs
from simcore_postgres_database.models.comp_tasks import comp_tasks
from simcore_service_director_v2.models.comp_pipelines import CompPipelineAtDB
from simcore_service_director_v2.models.comp_runs import CompRunsAtDB
from simcore_service_director_v2.models.comp_tasks import CompTaskAtDB
+from sqlalchemy.ext.asyncio import AsyncEngine
@dataclass
class PublishedProject:
+ user: dict[str, Any]
project: ProjectAtDB
pipeline: CompPipelineAtDB
tasks: list[CompTaskAtDB]
-@dataclass
+@dataclass(kw_only=True)
class RunningProject(PublishedProject):
runs: CompRunsAtDB
-
-
-async def set_comp_task_state(
- aiopg_engine: aiopg.sa.engine.Engine, node_id: str, state: StateType
-) -> None:
- async with aiopg_engine.acquire() as conn:
- await conn.execute(
- comp_tasks.update()
- .where(comp_tasks.c.node_id == node_id)
- .values(state=state)
- )
+ task_to_callback_mapping: dict[NodeID, Callable[[], None]]
async def set_comp_task_outputs(
@@ -61,3 +56,55 @@ async def set_comp_task_inputs(
.where(comp_tasks.c.node_id == f"{node_id}")
.values(inputs=inputs, schema={"outputs": {}, "inputs": inputs_schema})
)
+
+
+async def assert_comp_runs(
+ sqlalchemy_async_engine: AsyncEngine,
+ *,
+ expected_total: int,
+ expected_state: RunningState | None = None,
+ where_statement: Any | None = None,
+) -> list[CompRunsAtDB]:
+ async with sqlalchemy_async_engine.connect() as conn:
+ query = sa.select(comp_runs)
+ if where_statement is not None:
+ query = query.where(where_statement)
+ list_of_comp_runs = [
+ CompRunsAtDB.model_validate(row) for row in await conn.execute(query)
+ ]
+ assert len(list_of_comp_runs) == expected_total
+ if list_of_comp_runs and expected_state:
+ assert all(
+ r.result is expected_state for r in list_of_comp_runs
+ ), f"expected state '{expected_state}', got {[r.result for r in list_of_comp_runs]}"
+ return list_of_comp_runs
+
+
+async def assert_comp_runs_empty(sqlalchemy_async_engine: AsyncEngine) -> None:
+ await assert_comp_runs(sqlalchemy_async_engine, expected_total=0)
+
+
+async def assert_comp_tasks(
+ sqlalchemy_async_engine: AsyncEngine,
+ *,
+ project_uuid: ProjectID,
+ task_ids: list[NodeID],
+ expected_state: RunningState,
+ expected_progress: float | None,
+) -> list[CompTaskAtDB]:
+ # check the database is correctly updated, the run is published
+ async with sqlalchemy_async_engine.connect() as conn:
+ result = await conn.execute(
+ comp_tasks.select().where(
+ (comp_tasks.c.project_id == f"{project_uuid}")
+ & (comp_tasks.c.node_id.in_([f"{n}" for n in task_ids]))
+ ) # there is only one entry
+ )
+ tasks = TypeAdapter(list[CompTaskAtDB]).validate_python(result.fetchall())
+ assert all(
+ t.state == expected_state for t in tasks
+ ), f"expected state: {expected_state}, found: {[t.state for t in tasks]}"
+ assert all(
+ t.progress == expected_progress for t in tasks
+ ), f"{expected_progress=}, found: {[t.progress for t in tasks]}"
+ return tasks
diff --git a/services/director-v2/tests/unit/conftest.py b/services/director-v2/tests/unit/conftest.py
index 1375795f0cb..cdf0751fab4 100644
--- a/services/director-v2/tests/unit/conftest.py
+++ b/services/director-v2/tests/unit/conftest.py
@@ -184,6 +184,11 @@ def fake_s3_settings(faker: Faker) -> S3Settings:
)
+@pytest.fixture
+def fake_s3_envs(fake_s3_settings: S3Settings) -> EnvVarsDict:
+ return fake_s3_settings.model_dump()
+
+
@pytest.fixture
def mocked_storage_service_api(
fake_s3_settings: S3Settings,
diff --git a/services/director-v2/tests/unit/test_utils_comp_scheduler.py b/services/director-v2/tests/unit/test_utils_comp_scheduler.py
index dfb7c0326b1..05c899a5e40 100644
--- a/services/director-v2/tests/unit/test_utils_comp_scheduler.py
+++ b/services/director-v2/tests/unit/test_utils_comp_scheduler.py
@@ -10,7 +10,7 @@
from models_library.projects_state import RunningState
from models_library.users import UserID
from simcore_service_director_v2.models.comp_tasks import CompTaskAtDB
-from simcore_service_director_v2.utils.comp_scheduler import (
+from simcore_service_director_v2.modules.comp_scheduler._utils import (
COMPLETED_STATES,
SCHEDULED_STATES,
TASK_TO_START_STATES,
diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/conftest.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/conftest.py
new file mode 100644
index 00000000000..8f1c2898222
--- /dev/null
+++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/conftest.py
@@ -0,0 +1,69 @@
+# pylint:disable=unused-variable
+# pylint:disable=unused-argument
+# pylint:disable=redefined-outer-name
+# pylint:disable=no-value-for-parameter
+# pylint:disable=protected-access
+# pylint:disable=too-many-arguments
+# pylint:disable=no-name-in-module
+# pylint: disable=too-many-statements
+
+
+from unittest import mock
+
+import pytest
+import sqlalchemy as sa
+from pytest_mock.plugin import MockerFixture
+from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict
+from pytest_simcore.helpers.typing_env import EnvVarsDict
+from settings_library.rabbit import RabbitSettings
+from settings_library.redis import RedisSettings
+
+
+@pytest.fixture
+def mock_env(
+ mock_env: EnvVarsDict,
+ monkeypatch: pytest.MonkeyPatch,
+ fake_s3_envs: EnvVarsDict,
+ postgres_db: sa.engine.Engine,
+ postgres_host_config: dict[str, str],
+ rabbit_service: RabbitSettings,
+ redis_service: RedisSettings,
+) -> EnvVarsDict:
+ return mock_env | setenvs_from_dict(
+ monkeypatch,
+ {k: f"{v}" for k, v in fake_s3_envs.items()}
+ | {
+ "COMPUTATIONAL_BACKEND_ENABLED": True,
+ "COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED": True,
+ },
+ )
+
+
+@pytest.fixture
+def with_disabled_auto_scheduling(mocker: MockerFixture) -> mock.Mock:
+ mocker.patch(
+ "simcore_service_director_v2.modules.comp_scheduler.shutdown_manager",
+ )
+ return mocker.patch(
+ "simcore_service_director_v2.modules.comp_scheduler.setup_manager",
+ )
+
+
+@pytest.fixture
+def with_disabled_scheduler_worker(mocker: MockerFixture) -> mock.Mock:
+ mocker.patch(
+ "simcore_service_director_v2.modules.comp_scheduler.shutdown_worker",
+ autospec=True,
+ )
+ return mocker.patch(
+ "simcore_service_director_v2.modules.comp_scheduler.setup_worker",
+ autospec=True,
+ )
+
+
+@pytest.fixture
+def with_disabled_scheduler_publisher(mocker: MockerFixture) -> mock.Mock:
+ return mocker.patch(
+ "simcore_service_director_v2.modules.comp_scheduler._manager.request_pipeline_scheduling",
+ autospec=True,
+ )
diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_db_repositories_comp_runs.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_db_repositories_comp_runs.py
new file mode 100644
index 00000000000..ba903d1b069
--- /dev/null
+++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_db_repositories_comp_runs.py
@@ -0,0 +1,525 @@
+# pylint: disable=no-value-for-parameter
+# pylint: disable=protected-access
+# pylint: disable=redefined-outer-name
+# pylint: disable=too-many-arguments
+# pylint: disable=unused-argument
+# pylint: disable=unused-variable
+
+import asyncio
+import datetime
+import random
+from collections.abc import Awaitable, Callable
+from typing import cast
+
+import arrow
+import pytest
+from _helpers import PublishedProject
+from faker import Faker
+from models_library.clusters import DEFAULT_CLUSTER_ID, Cluster
+from models_library.projects import ProjectID
+from models_library.projects_state import RunningState
+from models_library.users import UserID
+from simcore_service_director_v2.core.errors import (
+ ClusterNotFoundError,
+ ComputationalRunNotFoundError,
+ ProjectNotFoundError,
+ UserNotFoundError,
+)
+from simcore_service_director_v2.models.comp_runs import CompRunsAtDB, RunMetadataDict
+from simcore_service_director_v2.modules.comp_scheduler._constants import (
+ SCHEDULER_INTERVAL,
+)
+from simcore_service_director_v2.modules.db.repositories.comp_runs import (
+ CompRunsRepository,
+)
+
+pytest_simcore_core_services_selection = [
+ "postgres",
+]
+pytest_simcore_ops_services_selection = [
+ "adminer",
+]
+
+
+@pytest.fixture
+def fake_user_id(faker: Faker) -> UserID:
+ return faker.pyint(min_value=1)
+
+
+@pytest.fixture
+def fake_project_id(faker: Faker) -> ProjectID:
+ return ProjectID(f"{faker.uuid4(cast_to=None)}")
+
+
+async def test_get(
+ aiopg_engine,
+ fake_user_id: UserID,
+ fake_project_id: ProjectID,
+ publish_project: Callable[[], Awaitable[PublishedProject]],
+ create_comp_run: Callable[..., Awaitable[CompRunsAtDB]],
+):
+ with pytest.raises(ComputationalRunNotFoundError):
+ await CompRunsRepository(aiopg_engine).get(fake_user_id, fake_project_id)
+
+ published_project = await publish_project()
+ assert published_project.project.prj_owner
+ # there is still no comp run created
+ with pytest.raises(ComputationalRunNotFoundError):
+ await CompRunsRepository(aiopg_engine).get(
+ published_project.project.prj_owner, published_project.project.uuid
+ )
+
+ await create_comp_run(published_project.user, published_project.project)
+ await CompRunsRepository(aiopg_engine).get(
+ published_project.project.prj_owner, published_project.project.uuid
+ )
+
+
+async def test_list(
+ aiopg_engine,
+ publish_project: Callable[[], Awaitable[PublishedProject]],
+ run_metadata: RunMetadataDict,
+ faker: Faker,
+):
+ assert await CompRunsRepository(aiopg_engine).list() == []
+
+ published_project = await publish_project()
+ assert await CompRunsRepository(aiopg_engine).list() == []
+
+ created = await CompRunsRepository(aiopg_engine).create(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ iteration=None,
+ metadata=run_metadata,
+ use_on_demand_clusters=faker.pybool(),
+ )
+ assert await CompRunsRepository(aiopg_engine).list() == [created]
+
+ created = [created] + await asyncio.gather(
+ *(
+ CompRunsRepository(aiopg_engine).create(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ iteration=created.iteration + n + 1,
+ metadata=run_metadata,
+ use_on_demand_clusters=faker.pybool(),
+ )
+ for n in range(50)
+ )
+ )
+ assert sorted(
+ await CompRunsRepository(aiopg_engine).list(), key=lambda x: x.iteration
+ ) == sorted(created, key=lambda x: x.iteration)
+
+ # test with filter of state
+ any_state_but_published = {
+ s for s in RunningState if s is not RunningState.PUBLISHED
+ }
+ assert (
+ await CompRunsRepository(aiopg_engine).list(
+ filter_by_state=any_state_but_published
+ )
+ == []
+ )
+ assert sorted(
+ await CompRunsRepository(aiopg_engine).list(
+ filter_by_state={RunningState.PUBLISHED}
+ ),
+ key=lambda x: x.iteration,
+ ) == sorted(created, key=lambda x: x.iteration)
+
+ # test with never scheduled filter, let's create a bunch of scheduled entries,
+ assert sorted(
+ await CompRunsRepository(aiopg_engine).list(never_scheduled=True),
+ key=lambda x: x.iteration,
+ ) == sorted(created, key=lambda x: x.iteration)
+ comp_runs_marked_for_scheduling = random.sample(created, k=25)
+ await asyncio.gather(
+ *(
+ CompRunsRepository(aiopg_engine).mark_for_scheduling(
+ user_id=comp_run.user_id,
+ project_id=comp_run.project_uuid,
+ iteration=comp_run.iteration,
+ )
+ for comp_run in comp_runs_marked_for_scheduling
+ )
+ )
+ # filter them away
+ created = [r for r in created if r not in comp_runs_marked_for_scheduling]
+ assert sorted(
+ await CompRunsRepository(aiopg_engine).list(never_scheduled=True),
+ key=lambda x: x.iteration,
+ ) == sorted(created, key=lambda x: x.iteration)
+
+ # now mark a few of them as processed
+ comp_runs_marked_as_processed = random.sample(comp_runs_marked_for_scheduling, k=11)
+ await asyncio.gather(
+ *(
+ CompRunsRepository(aiopg_engine).mark_as_processed(
+ user_id=comp_run.user_id,
+ project_id=comp_run.project_uuid,
+ iteration=comp_run.iteration,
+ )
+ for comp_run in comp_runs_marked_as_processed
+ )
+ )
+ # filter them away
+ comp_runs_marked_for_scheduling = [
+ r
+ for r in comp_runs_marked_for_scheduling
+ if r not in comp_runs_marked_as_processed
+ ]
+ # since they were just marked as processed now, we will get nothing
+ assert (
+ sorted(
+ await CompRunsRepository(aiopg_engine).list(
+ never_scheduled=False, processed_since=SCHEDULER_INTERVAL
+ ),
+ key=lambda x: x.iteration,
+ )
+ == []
+ )
+ # now we artificially change the scheduled/processed time and set it 2x the scheduler interval
+ # these are correctly processed ones, so we should get them back
+ fake_scheduled_time = arrow.utcnow().datetime - 2 * SCHEDULER_INTERVAL
+ fake_processed_time = fake_scheduled_time + 0.5 * SCHEDULER_INTERVAL
+ comp_runs_marked_as_processed = (
+ cast( # NOTE: the cast here is ok since gather will raise if there is an error
+ list[CompRunsAtDB],
+ await asyncio.gather(
+ *(
+ CompRunsRepository(aiopg_engine).update(
+ user_id=comp_run.user_id,
+ project_id=comp_run.project_uuid,
+ iteration=comp_run.iteration,
+ scheduled=fake_scheduled_time,
+ processed=fake_processed_time,
+ )
+ for comp_run in comp_runs_marked_as_processed
+ )
+ ),
+ )
+ )
+ # now we should get them
+ assert sorted(
+ await CompRunsRepository(aiopg_engine).list(
+ never_scheduled=False, processed_since=SCHEDULER_INTERVAL
+ ),
+ key=lambda x: x.iteration,
+ ) == sorted(comp_runs_marked_as_processed, key=lambda x: x.iteration)
+
+ # now some of them were never processed (e.g. processed time is either null or before schedule time)
+ comp_runs_waiting_for_processing_or_never_processed = random.sample(
+ comp_runs_marked_as_processed, k=6
+ )
+ comp_runs_marked_as_processed = [
+ r
+ for r in comp_runs_marked_as_processed
+ if r not in comp_runs_waiting_for_processing_or_never_processed
+ ]
+ # now we artificially change the processed time to be before the scheduled time
+ comp_runs_waiting_for_processing_or_never_processed = cast(
+ list[CompRunsAtDB],
+ await asyncio.gather(
+ *(
+ CompRunsRepository(aiopg_engine).update(
+ user_id=comp_run.user_id,
+ project_id=comp_run.project_uuid,
+ iteration=comp_run.iteration,
+ scheduled=fake_processed_time, # NOTE: we invert here the timings
+ processed=random.choice([fake_scheduled_time, None]), # noqa: S311
+ )
+ for comp_run in comp_runs_waiting_for_processing_or_never_processed
+ )
+ ),
+ )
+ # so the processed ones shall remain
+ assert sorted(
+ await CompRunsRepository(aiopg_engine).list(
+ never_scheduled=False, processed_since=SCHEDULER_INTERVAL
+ ),
+ key=lambda x: x.iteration,
+ ) == sorted(comp_runs_marked_as_processed, key=lambda x: x.iteration)
+ # the ones waiting for scheduling now
+ assert sorted(
+ await CompRunsRepository(aiopg_engine).list(
+ never_scheduled=False, scheduled_since=SCHEDULER_INTERVAL
+ ),
+ key=lambda x: x.iteration,
+ ) == sorted(
+ comp_runs_waiting_for_processing_or_never_processed, key=lambda x: x.iteration
+ )
+
+
+async def test_create(
+ aiopg_engine,
+ fake_user_id: UserID,
+ fake_project_id: ProjectID,
+ run_metadata: RunMetadataDict,
+ faker: Faker,
+ publish_project: Callable[[], Awaitable[PublishedProject]],
+ create_cluster: Callable[..., Awaitable[Cluster]],
+):
+ with pytest.raises(ProjectNotFoundError):
+ await CompRunsRepository(aiopg_engine).create(
+ user_id=fake_user_id,
+ project_id=fake_project_id,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ iteration=None,
+ metadata=run_metadata,
+ use_on_demand_clusters=faker.pybool(),
+ )
+ published_project = await publish_project()
+ with pytest.raises(UserNotFoundError):
+ await CompRunsRepository(aiopg_engine).create(
+ user_id=fake_user_id,
+ project_id=published_project.project.uuid,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ iteration=None,
+ metadata=run_metadata,
+ use_on_demand_clusters=faker.pybool(),
+ )
+
+ created = await CompRunsRepository(aiopg_engine).create(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ iteration=None,
+ metadata=run_metadata,
+ use_on_demand_clusters=faker.pybool(),
+ )
+ got = await CompRunsRepository(aiopg_engine).get(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ )
+ assert created == got
+
+ # creating a second one auto increment the iteration
+ created = await CompRunsRepository(aiopg_engine).create(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ iteration=None,
+ metadata=run_metadata,
+ use_on_demand_clusters=faker.pybool(),
+ )
+ assert created != got
+ assert created.iteration == got.iteration + 1
+
+ # getting without specifying the iteration returns the latest
+ got = await CompRunsRepository(aiopg_engine).get(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ )
+ assert created == got
+
+ with pytest.raises(ClusterNotFoundError):
+ await CompRunsRepository(aiopg_engine).create(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ cluster_id=faker.pyint(min_value=1),
+ iteration=None,
+ metadata=run_metadata,
+ use_on_demand_clusters=faker.pybool(),
+ )
+ cluster = await create_cluster(published_project.user)
+ await CompRunsRepository(aiopg_engine).create(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ cluster_id=cluster.id,
+ iteration=None,
+ metadata=run_metadata,
+ use_on_demand_clusters=faker.pybool(),
+ )
+
+
+async def test_update(
+ aiopg_engine,
+ fake_user_id: UserID,
+ fake_project_id: ProjectID,
+ run_metadata: RunMetadataDict,
+ faker: Faker,
+ publish_project: Callable[[], Awaitable[PublishedProject]],
+):
+ # this updates nothing but also does not complain
+ updated = await CompRunsRepository(aiopg_engine).update(
+ fake_user_id, fake_project_id, faker.pyint(min_value=1)
+ )
+ assert updated is None
+ # now let's create a valid one
+ published_project = await publish_project()
+ created = await CompRunsRepository(aiopg_engine).create(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ iteration=None,
+ metadata=run_metadata,
+ use_on_demand_clusters=faker.pybool(),
+ )
+
+ got = await CompRunsRepository(aiopg_engine).get(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ )
+ assert created == got
+
+ updated = await CompRunsRepository(aiopg_engine).update(
+ created.user_id,
+ created.project_uuid,
+ created.iteration,
+ scheduled=datetime.datetime.now(datetime.UTC),
+ )
+ assert updated is not None
+ assert created != updated
+ assert created.scheduled is None
+ assert updated.scheduled is not None
+
+
+async def test_set_run_result(
+ aiopg_engine,
+ run_metadata: RunMetadataDict,
+ faker: Faker,
+ publish_project: Callable[[], Awaitable[PublishedProject]],
+):
+ published_project = await publish_project()
+ created = await CompRunsRepository(aiopg_engine).create(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ iteration=None,
+ metadata=run_metadata,
+ use_on_demand_clusters=faker.pybool(),
+ )
+ got = await CompRunsRepository(aiopg_engine).get(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ )
+ assert created == got
+ assert created.result is not RunningState.PENDING
+ assert created.ended is None
+
+ updated = await CompRunsRepository(aiopg_engine).set_run_result(
+ user_id=created.user_id,
+ project_id=created.project_uuid,
+ iteration=created.iteration,
+ result_state=RunningState.PENDING,
+ final_state=False,
+ )
+ assert updated
+ assert updated != created
+ assert updated.result is RunningState.PENDING
+ assert updated.ended is None
+
+ final_updated = await CompRunsRepository(aiopg_engine).set_run_result(
+ user_id=created.user_id,
+ project_id=created.project_uuid,
+ iteration=created.iteration,
+ result_state=RunningState.ABORTED,
+ final_state=True,
+ )
+ assert final_updated
+ assert final_updated != updated
+ assert final_updated.result is RunningState.ABORTED
+ assert final_updated.ended is not None
+
+
+async def test_mark_for_cancellation(
+ aiopg_engine,
+ run_metadata: RunMetadataDict,
+ faker: Faker,
+ publish_project: Callable[[], Awaitable[PublishedProject]],
+):
+ published_project = await publish_project()
+ created = await CompRunsRepository(aiopg_engine).create(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ iteration=None,
+ metadata=run_metadata,
+ use_on_demand_clusters=faker.pybool(),
+ )
+ got = await CompRunsRepository(aiopg_engine).get(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ )
+ assert created == got
+ assert created.cancelled is None
+
+ updated = await CompRunsRepository(aiopg_engine).mark_for_cancellation(
+ user_id=created.user_id,
+ project_id=created.project_uuid,
+ iteration=created.iteration,
+ )
+ assert updated
+ assert updated != created
+ assert updated.cancelled is not None
+
+
+async def test_mark_for_scheduling(
+ aiopg_engine,
+ run_metadata: RunMetadataDict,
+ faker: Faker,
+ publish_project: Callable[[], Awaitable[PublishedProject]],
+):
+ published_project = await publish_project()
+ created = await CompRunsRepository(aiopg_engine).create(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ iteration=None,
+ metadata=run_metadata,
+ use_on_demand_clusters=faker.pybool(),
+ )
+ got = await CompRunsRepository(aiopg_engine).get(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ )
+ assert created == got
+ assert created.scheduled is None
+ assert created.processed is None
+
+ updated = await CompRunsRepository(aiopg_engine).mark_for_scheduling(
+ user_id=created.user_id,
+ project_id=created.project_uuid,
+ iteration=created.iteration,
+ )
+ assert updated
+ assert updated != created
+ assert updated.scheduled is not None
+ assert updated.processed is None
+
+
+async def test_mark_scheduling_done(
+ aiopg_engine,
+ run_metadata: RunMetadataDict,
+ faker: Faker,
+ publish_project: Callable[[], Awaitable[PublishedProject]],
+):
+ published_project = await publish_project()
+ created = await CompRunsRepository(aiopg_engine).create(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ iteration=None,
+ metadata=run_metadata,
+ use_on_demand_clusters=faker.pybool(),
+ )
+ got = await CompRunsRepository(aiopg_engine).get(
+ user_id=published_project.user["id"],
+ project_id=published_project.project.uuid,
+ )
+ assert created == got
+ assert created.scheduled is None
+ assert created.processed is None
+
+ updated = await CompRunsRepository(aiopg_engine).mark_as_processed(
+ user_id=created.user_id,
+ project_id=created.project_uuid,
+ iteration=created.iteration,
+ )
+ assert updated
+ assert updated != created
+ assert updated.scheduled is None
+ assert updated.processed is not None
diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_manager.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_manager.py
new file mode 100644
index 00000000000..ac5bbbcc942
--- /dev/null
+++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_manager.py
@@ -0,0 +1,371 @@
+# pylint:disable=unused-variable
+# pylint:disable=unused-argument
+# pylint:disable=redefined-outer-name
+# pylint:disable=no-value-for-parameter
+# pylint:disable=protected-access
+# pylint:disable=too-many-arguments
+# pylint:disable=no-name-in-module
+# pylint: disable=too-many-statements
+
+
+import asyncio
+import datetime
+import logging
+from collections.abc import AsyncIterator, Awaitable, Callable
+from typing import Any
+from unittest import mock
+
+import pytest
+from _helpers import PublishedProject, assert_comp_runs, assert_comp_runs_empty
+from fastapi import FastAPI
+from models_library.clusters import DEFAULT_CLUSTER_ID
+from models_library.projects import ProjectAtDB
+from models_library.projects_state import RunningState
+from pytest_mock.plugin import MockerFixture
+from servicelib.rabbitmq._client import RabbitMQClient
+from servicelib.redis import CouldNotAcquireLockError
+from servicelib.utils import limited_gather
+from simcore_service_director_v2.core.errors import PipelineNotFoundError
+from simcore_service_director_v2.models.comp_pipelines import CompPipelineAtDB
+from simcore_service_director_v2.models.comp_runs import RunMetadataDict
+from simcore_service_director_v2.modules.comp_scheduler._manager import (
+ _LOST_TASKS_FACTOR,
+ SCHEDULER_INTERVAL,
+ run_new_pipeline,
+ schedule_all_pipelines,
+ stop_pipeline,
+)
+from simcore_service_director_v2.modules.comp_scheduler._models import (
+ SchedulePipelineRabbitMessage,
+)
+from simcore_service_director_v2.modules.db.repositories.comp_runs import (
+ CompRunsRepository,
+)
+from sqlalchemy.ext.asyncio import AsyncEngine
+
+pytest_simcore_core_services_selection = ["postgres", "rabbit", "redis"]
+pytest_simcore_ops_services_selection = ["adminer", "redis-commander"]
+
+
+@pytest.fixture
+async def scheduler_rabbit_client_parser(
+ create_rabbitmq_client: Callable[[str], RabbitMQClient], mocker: MockerFixture
+) -> AsyncIterator[mock.AsyncMock]:
+ client = create_rabbitmq_client("scheduling_pytest_consumer")
+ mock = mocker.AsyncMock(return_value=True)
+ queue_name, _ = await client.subscribe(
+ SchedulePipelineRabbitMessage.get_channel_name(), mock, exclusive_queue=False
+ )
+ yield mock
+ await client.unsubscribe(queue_name)
+
+
+@pytest.fixture
+def with_fast_scheduling(mocker: MockerFixture) -> None:
+ from simcore_service_director_v2.modules.comp_scheduler import _manager
+
+ mocker.patch.object(
+ _manager, "SCHEDULER_INTERVAL", datetime.timedelta(seconds=0.01)
+ )
+
+
+@pytest.fixture
+def mocked_schedule_all_pipelines(mocker: MockerFixture) -> mock.Mock:
+ return mocker.patch(
+ "simcore_service_director_v2.modules.comp_scheduler._manager.schedule_all_pipelines",
+ autospec=True,
+ )
+
+
+async def test_manager_starts_and_auto_schedules_pipelines(
+ with_fast_scheduling: None,
+ with_disabled_scheduler_worker: mock.Mock,
+ mocked_schedule_all_pipelines: mock.Mock,
+ initialized_app: FastAPI,
+ sqlalchemy_async_engine: AsyncEngine,
+):
+ await assert_comp_runs_empty(sqlalchemy_async_engine)
+ mocked_schedule_all_pipelines.assert_called()
+
+
+async def test_schedule_all_pipelines_empty_db(
+ with_disabled_auto_scheduling: mock.Mock,
+ with_disabled_scheduler_worker: mock.Mock,
+ initialized_app: FastAPI,
+ scheduler_rabbit_client_parser: mock.AsyncMock,
+ sqlalchemy_async_engine: AsyncEngine,
+):
+ await assert_comp_runs_empty(sqlalchemy_async_engine)
+
+ await schedule_all_pipelines(initialized_app)
+
+ # check nothing was distributed
+ scheduler_rabbit_client_parser.assert_not_called()
+
+ # check comp_runs is still empty
+ await assert_comp_runs_empty(sqlalchemy_async_engine)
+
+
+async def test_schedule_all_pipelines_concurently_runs_exclusively_and_raises(
+ with_disabled_auto_scheduling: mock.Mock,
+ initialized_app: FastAPI,
+ mocker: MockerFixture,
+):
+ CONCURRENCY = 5
+ # NOTE: this ensure no flakyness as empty scheduling is very fast
+ # so we slow down the limited_gather function
+ original_function = limited_gather
+
+ async def slow_limited_gather(*args, **kwargs):
+ result = await original_function(*args, **kwargs)
+ await asyncio.sleep(3) # to ensure flakyness does not occur
+ return result
+
+ mock_function = mocker.patch(
+ "simcore_service_director_v2.modules.comp_scheduler._manager.limited_gather",
+ autospec=True,
+ side_effect=slow_limited_gather,
+ )
+
+ results = await asyncio.gather(
+ *(schedule_all_pipelines(initialized_app) for _ in range(CONCURRENCY)),
+ return_exceptions=True,
+ )
+
+ assert results.count(None) == 1, f"Only one task should have run: {results}"
+ for r in results:
+ if r:
+ assert isinstance(r, CouldNotAcquireLockError)
+ mock_function.assert_called_once()
+
+
+async def test_schedule_all_pipelines(
+ with_disabled_auto_scheduling: mock.Mock,
+ with_disabled_scheduler_worker: mock.Mock,
+ initialized_app: FastAPI,
+ published_project: PublishedProject,
+ sqlalchemy_async_engine: AsyncEngine,
+ aiopg_engine,
+ run_metadata: RunMetadataDict,
+ scheduler_rabbit_client_parser: mock.AsyncMock,
+):
+ await assert_comp_runs_empty(sqlalchemy_async_engine)
+ assert published_project.project.prj_owner
+ # now we schedule a pipeline
+ await run_new_pipeline(
+ initialized_app,
+ user_id=published_project.project.prj_owner,
+ project_id=published_project.project.uuid,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ run_metadata=run_metadata,
+ use_on_demand_clusters=False,
+ )
+ # this directly schedule a new pipeline
+ scheduler_rabbit_client_parser.assert_called_once_with(
+ SchedulePipelineRabbitMessage(
+ user_id=published_project.project.prj_owner,
+ project_id=published_project.project.uuid,
+ iteration=1,
+ ).body()
+ )
+ scheduler_rabbit_client_parser.reset_mock()
+ comp_run = (await assert_comp_runs(sqlalchemy_async_engine, expected_total=1))[0]
+ assert comp_run.project_uuid == published_project.project.uuid
+ assert comp_run.user_id == published_project.project.prj_owner
+ assert comp_run.iteration == 1
+ assert comp_run.cancelled is None
+ assert comp_run.cluster_id == DEFAULT_CLUSTER_ID
+ assert comp_run.metadata == run_metadata
+ assert comp_run.result is RunningState.PUBLISHED
+ assert comp_run.scheduled is not None
+ assert comp_run.processed is None
+ start_schedule_time = comp_run.scheduled
+ start_modified_time = comp_run.modified
+
+ # this will now not schedule the pipeline since it was already scheduled
+ await schedule_all_pipelines(initialized_app)
+ scheduler_rabbit_client_parser.assert_not_called()
+ comp_runs = await assert_comp_runs(sqlalchemy_async_engine, expected_total=1)
+ comp_run = comp_runs[0]
+ assert comp_run.scheduled
+ assert comp_run.scheduled == start_schedule_time, "scheduled time changed!"
+ assert comp_run.cancelled is None
+ assert comp_run.modified == start_modified_time
+
+ # to simulate that the worker did its job we will set times in the past
+ await CompRunsRepository(aiopg_engine).update(
+ user_id=comp_run.user_id,
+ project_id=comp_run.project_uuid,
+ iteration=comp_run.iteration,
+ scheduled=comp_run.scheduled - 1.5 * SCHEDULER_INTERVAL,
+ processed=comp_run.scheduled - 1.1 * SCHEDULER_INTERVAL,
+ )
+
+ # now we schedule a pipeline again, but we wait for the scheduler interval to pass
+ # this will trigger a new schedule
+ await schedule_all_pipelines(initialized_app)
+ scheduler_rabbit_client_parser.assert_called_once_with(
+ SchedulePipelineRabbitMessage(
+ user_id=published_project.project.prj_owner,
+ project_id=published_project.project.uuid,
+ iteration=1,
+ ).body()
+ )
+ scheduler_rabbit_client_parser.reset_mock()
+ comp_runs = await assert_comp_runs(sqlalchemy_async_engine, expected_total=1)
+ comp_run = comp_runs[0]
+ assert comp_run.scheduled is not None
+ assert comp_run.scheduled > start_schedule_time
+ last_schedule_time = comp_run.scheduled
+ assert comp_run.cancelled is None
+ assert comp_run.modified > start_modified_time
+
+ # now we stop the pipeline, which should instantly trigger a schedule
+ await stop_pipeline(
+ initialized_app,
+ user_id=published_project.project.prj_owner,
+ project_id=published_project.project.uuid,
+ )
+ await schedule_all_pipelines(initialized_app)
+ scheduler_rabbit_client_parser.assert_called_once_with(
+ SchedulePipelineRabbitMessage(
+ user_id=published_project.project.prj_owner,
+ project_id=published_project.project.uuid,
+ iteration=1,
+ ).body()
+ )
+ scheduler_rabbit_client_parser.reset_mock()
+ comp_runs = await assert_comp_runs(sqlalchemy_async_engine, expected_total=1)
+ comp_run = comp_runs[0]
+ assert comp_run.scheduled is not None
+ assert comp_run.scheduled > last_schedule_time
+ assert comp_run.cancelled is not None
+
+
+async def test_schedule_all_pipelines_logs_error_if_it_find_old_pipelines(
+ with_disabled_auto_scheduling: mock.Mock,
+ with_disabled_scheduler_worker: mock.Mock,
+ initialized_app: FastAPI,
+ published_project: PublishedProject,
+ sqlalchemy_async_engine: AsyncEngine,
+ aiopg_engine,
+ run_metadata: RunMetadataDict,
+ scheduler_rabbit_client_parser: mock.AsyncMock,
+ caplog: pytest.LogCaptureFixture,
+):
+ await assert_comp_runs_empty(sqlalchemy_async_engine)
+ assert published_project.project.prj_owner
+ # now we schedule a pipeline
+ await run_new_pipeline(
+ initialized_app,
+ user_id=published_project.project.prj_owner,
+ project_id=published_project.project.uuid,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ run_metadata=run_metadata,
+ use_on_demand_clusters=False,
+ )
+ # this directly schedule a new pipeline
+ scheduler_rabbit_client_parser.assert_called_once_with(
+ SchedulePipelineRabbitMessage(
+ user_id=published_project.project.prj_owner,
+ project_id=published_project.project.uuid,
+ iteration=1,
+ ).body()
+ )
+ scheduler_rabbit_client_parser.reset_mock()
+ comp_run = (await assert_comp_runs(sqlalchemy_async_engine, expected_total=1))[0]
+ assert comp_run.project_uuid == published_project.project.uuid
+ assert comp_run.user_id == published_project.project.prj_owner
+ assert comp_run.iteration == 1
+ assert comp_run.cancelled is None
+ assert comp_run.cluster_id == DEFAULT_CLUSTER_ID
+ assert comp_run.metadata == run_metadata
+ assert comp_run.result is RunningState.PUBLISHED
+ assert comp_run.scheduled is not None
+ start_schedule_time = comp_run.scheduled
+ start_modified_time = comp_run.modified
+
+ # this will now not schedule the pipeline since it was already scheduled
+ await schedule_all_pipelines(initialized_app)
+ scheduler_rabbit_client_parser.assert_not_called()
+ comp_runs = await assert_comp_runs(sqlalchemy_async_engine, expected_total=1)
+ comp_run = comp_runs[0]
+ assert comp_run.scheduled == start_schedule_time, "scheduled time changed!"
+ assert comp_run.cancelled is None
+ assert comp_run.modified == start_modified_time
+
+ # now we artificially set the last_schedule time well in the past
+ await CompRunsRepository(aiopg_engine).update(
+ comp_run.user_id,
+ comp_run.project_uuid,
+ comp_run.iteration,
+ scheduled=datetime.datetime.now(tz=datetime.UTC)
+ - SCHEDULER_INTERVAL * (_LOST_TASKS_FACTOR + 1),
+ )
+ with caplog.at_level(logging.ERROR):
+ await schedule_all_pipelines(initialized_app)
+ assert (
+ "found 1 lost pipelines, they will be re-scheduled now" in caplog.messages
+ )
+ scheduler_rabbit_client_parser.assert_called_once_with(
+ SchedulePipelineRabbitMessage(
+ user_id=published_project.project.prj_owner,
+ project_id=published_project.project.uuid,
+ iteration=1,
+ ).body()
+ )
+ scheduler_rabbit_client_parser.reset_mock()
+ comp_runs = await assert_comp_runs(sqlalchemy_async_engine, expected_total=1)
+ comp_run = comp_runs[0]
+ assert comp_run.scheduled is not None
+ assert comp_run.scheduled > start_schedule_time
+ assert comp_run.cancelled is None
+ assert comp_run.modified > start_modified_time
+
+
+async def test_empty_pipeline_is_not_scheduled(
+ with_disabled_auto_scheduling: mock.Mock,
+ with_disabled_scheduler_worker: mock.Mock,
+ initialized_app: FastAPI,
+ registered_user: Callable[..., dict[str, Any]],
+ project: Callable[..., Awaitable[ProjectAtDB]],
+ create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]],
+ run_metadata: RunMetadataDict,
+ sqlalchemy_async_engine: AsyncEngine,
+ scheduler_rabbit_client_parser: mock.AsyncMock,
+ caplog: pytest.LogCaptureFixture,
+):
+ await assert_comp_runs_empty(sqlalchemy_async_engine)
+ user = registered_user()
+ empty_project = await project(user)
+
+ # the project is not in the comp_pipeline, therefore scheduling it should fail
+ with pytest.raises(PipelineNotFoundError):
+ await run_new_pipeline(
+ initialized_app,
+ user_id=user["id"],
+ project_id=empty_project.uuid,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ run_metadata=run_metadata,
+ use_on_demand_clusters=False,
+ )
+ await assert_comp_runs_empty(sqlalchemy_async_engine)
+ scheduler_rabbit_client_parser.assert_not_called()
+
+ # create the empty pipeline now
+ await create_pipeline(project_id=f"{empty_project.uuid}")
+
+ # creating a run with an empty pipeline is useless, check the scheduler is not kicking in
+ with caplog.at_level(logging.WARNING):
+ await run_new_pipeline(
+ initialized_app,
+ user_id=user["id"],
+ project_id=empty_project.uuid,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ run_metadata=run_metadata,
+ use_on_demand_clusters=False,
+ )
+ assert len(caplog.records) == 1
+ assert "no computational dag defined" in caplog.records[0].message
+ await assert_comp_runs_empty(sqlalchemy_async_engine)
+ scheduler_rabbit_client_parser.assert_not_called()
diff --git a/services/director-v2/tests/unit/with_dbs/test_modules_comp_scheduler_dask_scheduler.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py
similarity index 55%
rename from services/director-v2/tests/unit/with_dbs/test_modules_comp_scheduler_dask_scheduler.py
rename to services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py
index 6f016f297c0..f6a041b934e 100644
--- a/services/director-v2/tests/unit/with_dbs/test_modules_comp_scheduler_dask_scheduler.py
+++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py
@@ -16,12 +16,14 @@
from typing import Any, cast
from unittest import mock
-import aiopg
-import aiopg.sa
-import httpx
import pytest
-from _helpers import PublishedProject, RunningProject
-from dask.distributed import SpecCluster
+from _helpers import (
+ PublishedProject,
+ RunningProject,
+ assert_comp_runs,
+ assert_comp_runs_empty,
+ assert_comp_tasks,
+)
from dask_task_models_library.container_tasks.errors import TaskCancelledError
from dask_task_models_library.container_tasks.events import TaskProgressEvent
from dask_task_models_library.container_tasks.io import TaskOutputData
@@ -43,14 +45,9 @@
from models_library.users import UserID
from pydantic import TypeAdapter
from pytest_mock.plugin import MockerFixture
-from pytest_simcore.helpers.typing_env import EnvVarsDict
from servicelib.rabbitmq import RabbitMQClient
-from servicelib.redis import CouldNotAcquireLockError
-from settings_library.rabbit import RabbitSettings
-from settings_library.redis import RedisSettings
from simcore_postgres_database.models.comp_runs import comp_runs
-from simcore_postgres_database.models.comp_tasks import NodeClass, comp_tasks
-from simcore_service_director_v2.core.application import init_app
+from simcore_postgres_database.models.comp_tasks import NodeClass
from simcore_service_director_v2.core.errors import (
ClustersKeeperNotAvailableError,
ComputationalBackendNotConnectedError,
@@ -59,28 +56,32 @@
ComputationalBackendTaskResultsNotReadyError,
ComputationalSchedulerChangedError,
ComputationalSchedulerError,
- ConfigurationError,
- PipelineNotFoundError,
)
-from simcore_service_director_v2.core.settings import AppSettings
from simcore_service_director_v2.models.comp_pipelines import CompPipelineAtDB
from simcore_service_director_v2.models.comp_runs import CompRunsAtDB, RunMetadataDict
from simcore_service_director_v2.models.comp_tasks import CompTaskAtDB, Image
from simcore_service_director_v2.models.dask_subsystem import DaskClientTaskState
-from simcore_service_director_v2.modules.comp_scheduler import (
+from simcore_service_director_v2.modules.comp_scheduler._manager import (
+ run_new_pipeline,
+ stop_pipeline,
+)
+from simcore_service_director_v2.modules.comp_scheduler._scheduler_base import (
BaseCompScheduler,
- get_scheduler,
)
-from simcore_service_director_v2.modules.comp_scheduler._dask_scheduler import (
+from simcore_service_director_v2.modules.comp_scheduler._scheduler_dask import (
DaskScheduler,
)
+from simcore_service_director_v2.modules.comp_scheduler._utils import COMPLETED_STATES
+from simcore_service_director_v2.modules.comp_scheduler._worker import (
+ _get_scheduler_worker,
+)
from simcore_service_director_v2.modules.dask_client import (
DaskJobID,
PublishedComputationTask,
)
-from simcore_service_director_v2.utils.comp_scheduler import COMPLETED_STATES
from simcore_service_director_v2.utils.dask_client_utils import TaskHandlers
-from starlette.testclient import TestClient
+from sqlalchemy import and_
+from sqlalchemy.ext.asyncio import AsyncEngine
from tenacity.asyncio import AsyncRetrying
from tenacity.retry import retry_if_exception_type
from tenacity.stop import stop_after_delay
@@ -113,119 +114,8 @@ def _assert_dask_client_correctly_initialized(
)
-async def _assert_comp_run_db(
- aiopg_engine: aiopg.sa.engine.Engine,
- pub_project: PublishedProject,
- expected_state: RunningState,
-) -> None:
- # check the database is correctly updated, the run is published
- async with aiopg_engine.acquire() as conn:
- result = await conn.execute(
- comp_runs.select().where(
- (comp_runs.c.user_id == pub_project.project.prj_owner)
- & (comp_runs.c.project_uuid == f"{pub_project.project.uuid}")
- ) # there is only one entry
- )
- run_entry = CompRunsAtDB.model_validate(await result.first())
- assert (
- run_entry.result == expected_state
- ), f"comp_runs: expected state '{expected_state}, found '{run_entry.result}'"
-
-
-async def _assert_comp_tasks_db(
- aiopg_engine: aiopg.sa.engine.Engine,
- project_uuid: ProjectID,
- task_ids: list[NodeID],
- *,
- expected_state: RunningState,
- expected_progress: float | None,
-) -> None:
- # check the database is correctly updated, the run is published
- async with aiopg_engine.acquire() as conn:
- result = await conn.execute(
- comp_tasks.select().where(
- (comp_tasks.c.project_id == f"{project_uuid}")
- & (comp_tasks.c.node_id.in_([f"{n}" for n in task_ids]))
- ) # there is only one entry
- )
- tasks = TypeAdapter(list[CompTaskAtDB]).validate_python(await result.fetchall())
- assert all(
- t.state == expected_state for t in tasks
- ), f"expected state: {expected_state}, found: {[t.state for t in tasks]}"
- assert all(
- t.progress == expected_progress for t in tasks
- ), f"{expected_progress=}, found: {[t.progress for t in tasks]}"
-
-
-async def schedule_all_pipelines(scheduler: BaseCompScheduler) -> None:
- # NOTE: we take a copy of the pipelines, as this could change quickly if there are
- # misconfigured pipelines that would be removed from the scheduler
- # NOTE: we simulate multiple dv-2 replicas by running several times
- # the same pipeline scheduling
- local_pipelines = deepcopy(scheduler._scheduled_pipelines) # noqa: SLF001
- results = await asyncio.gather(
- *(
- scheduler._schedule_pipeline( # noqa: SLF001
- user_id=user_id,
- project_id=project_id,
- iteration=iteration,
- wake_up_callback=params.scheduler_waker.set,
- )
- for _ in range(3)
- for (
- user_id,
- project_id,
- iteration,
- ), params in local_pipelines.items()
- ),
- return_exceptions=True,
- )
- # we should have exceptions 2/3 of the time
- could_not_acquire_lock_count = sum(
- isinstance(r, CouldNotAcquireLockError) for r in results
- )
- total_results_count = len(results)
-
- # Check if 2/3 of the results are CouldNotAcquireLockError
- # checks that scheduling is done exclusively
- assert could_not_acquire_lock_count == (2 / 3) * total_results_count
-
-
-@pytest.fixture
-def minimal_dask_scheduler_config(
- mock_env: EnvVarsDict,
- postgres_host_config: dict[str, str],
- monkeypatch: pytest.MonkeyPatch,
- rabbit_service: RabbitSettings,
- redis_service: RedisSettings,
- faker: Faker,
-) -> None:
- """set a minimal configuration for testing the dask connection only"""
- monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SIDECAR_ENABLED", "false")
- monkeypatch.setenv("DIRECTOR_V0_ENABLED", "0")
- monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "1")
- monkeypatch.setenv("COMPUTATIONAL_BACKEND_ENABLED", "1")
- monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO")
- monkeypatch.setenv("S3_ENDPOINT", faker.url())
- monkeypatch.setenv("S3_ACCESS_KEY", faker.pystr())
- monkeypatch.setenv("S3_REGION", faker.pystr())
- monkeypatch.setenv("S3_SECRET_KEY", faker.pystr())
- monkeypatch.setenv("S3_BUCKET_NAME", faker.pystr())
-
-
-@pytest.fixture
-def scheduler(
- minimal_dask_scheduler_config: None,
- aiopg_engine: aiopg.sa.engine.Engine,
- minimal_app: FastAPI,
-) -> BaseCompScheduler:
- scheduler = get_scheduler(minimal_app)
- assert scheduler is not None
- return scheduler
-
-
@pytest.fixture
-def mocked_dask_client(mocker: MockerFixture) -> mock.MagicMock:
+def mocked_dask_client(mocker: MockerFixture) -> mock.Mock:
mocked_dask_client = mocker.patch(
"simcore_service_director_v2.modules.dask_clients_pool.DaskClient",
autospec=True,
@@ -237,246 +127,80 @@ def mocked_dask_client(mocker: MockerFixture) -> mock.MagicMock:
@pytest.fixture
def mocked_parse_output_data_fct(mocker: MockerFixture) -> mock.Mock:
return mocker.patch(
- "simcore_service_director_v2.modules.comp_scheduler._dask_scheduler.parse_output_data",
+ "simcore_service_director_v2.modules.comp_scheduler._scheduler_dask.parse_output_data",
autospec=True,
)
@pytest.fixture
-def mocked_clean_task_output_fct(mocker: MockerFixture) -> mock.MagicMock:
+def mocked_clean_task_output_fct(mocker: MockerFixture) -> mock.Mock:
return mocker.patch(
- "simcore_service_director_v2.modules.comp_scheduler._dask_scheduler.clean_task_output_and_log_files_if_invalid",
+ "simcore_service_director_v2.modules.comp_scheduler._scheduler_dask.clean_task_output_and_log_files_if_invalid",
return_value=None,
autospec=True,
)
@pytest.fixture
-def with_disabled_auto_scheduling(mocker: MockerFixture) -> mock.MagicMock:
- """disables the scheduler task, note that it needs to be triggered manu>ally then"""
-
- def _fake_starter(
- self: BaseCompScheduler,
- *args,
- **kwargs,
- ):
- scheduler_task = mocker.MagicMock()
- scheduler_task_wake_up_event = mocker.MagicMock()
- return scheduler_task, scheduler_task_wake_up_event
-
+def mocked_clean_task_output_and_log_files_if_invalid(
+ mocker: MockerFixture,
+) -> mock.Mock:
return mocker.patch(
- "simcore_service_director_v2.modules.comp_scheduler._base_scheduler.BaseCompScheduler._start_scheduling",
+ "simcore_service_director_v2.modules.comp_scheduler._scheduler_dask.clean_task_output_and_log_files_if_invalid",
autospec=True,
- side_effect=_fake_starter,
)
@pytest.fixture
-async def minimal_app(async_client: httpx.AsyncClient) -> FastAPI:
- # must use the minimal app from from the `async_client``
- # the`client` uses starlette's TestClient which spawns
- # a new thread on which it creates a new loop
- # causing issues downstream with coroutines not
- # being created on the same loop
- return async_client._transport.app # type: ignore # noqa: SLF001
-
-
-@pytest.fixture
-def mocked_clean_task_output_and_log_files_if_invalid(mocker: MockerFixture) -> None:
- mocker.patch(
- "simcore_service_director_v2.modules.comp_scheduler._dask_scheduler.clean_task_output_and_log_files_if_invalid",
- autospec=True,
- )
-
-
-async def test_scheduler_gracefully_starts_and_stops(
- minimal_dask_scheduler_config: None,
- aiopg_engine: aiopg.sa.engine.Engine,
- dask_spec_local_cluster: SpecCluster,
- minimal_app: FastAPI,
-):
- # check it started correctly
- assert get_scheduler(minimal_app) is not None
-
-
-@pytest.mark.parametrize(
- "missing_dependency",
- [
- "COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED",
- ],
-)
-def test_scheduler_raises_exception_for_missing_dependencies(
- minimal_dask_scheduler_config: None,
- aiopg_engine: aiopg.sa.engine.Engine,
- dask_spec_local_cluster: SpecCluster,
- monkeypatch: pytest.MonkeyPatch,
- missing_dependency: str,
-):
- # disable the dependency
- monkeypatch.setenv(missing_dependency, "0")
- # create the client
- settings = AppSettings.create_from_envs()
- app = init_app(settings)
-
- with pytest.raises(ConfigurationError), TestClient(
- app, raise_server_exceptions=True
- ) as _:
- pass
-
-
-async def test_empty_pipeline_is_not_scheduled(
- with_disabled_auto_scheduling: None,
- scheduler: BaseCompScheduler,
- registered_user: Callable[..., dict[str, Any]],
- project: Callable[..., Awaitable[ProjectAtDB]],
- pipeline: Callable[..., CompPipelineAtDB],
- aiopg_engine: aiopg.sa.engine.Engine,
- run_metadata: RunMetadataDict,
-):
- user = registered_user()
- empty_project = await project(user)
-
- # the project is not in the comp_pipeline, therefore scheduling it should fail
- with pytest.raises(PipelineNotFoundError):
- await scheduler.run_new_pipeline(
- user_id=user["id"],
- project_id=empty_project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
- run_metadata=run_metadata,
- use_on_demand_clusters=False,
- )
- # create the empty pipeline now
- pipeline(project_id=f"{empty_project.uuid}")
-
- # creating a run with an empty pipeline is useless, check the scheduler is not kicking in
- await scheduler.run_new_pipeline(
- user_id=user["id"],
- project_id=empty_project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
- run_metadata=run_metadata,
- use_on_demand_clusters=False,
- )
- assert len(scheduler._scheduled_pipelines) == 0 # noqa: SLF001
- # check the database is empty
- async with aiopg_engine.acquire() as conn:
- result = await conn.scalar(
- comp_runs.select().where(
- (comp_runs.c.user_id == user["id"])
- & (comp_runs.c.project_uuid == f"{empty_project.uuid}")
- ) # there is only one entry
- )
- assert result is None
-
-
-async def test_misconfigured_pipeline_is_not_scheduled(
- with_disabled_auto_scheduling: None,
- scheduler: BaseCompScheduler,
- registered_user: Callable[..., dict[str, Any]],
- project: Callable[..., Awaitable[ProjectAtDB]],
- pipeline: Callable[..., CompPipelineAtDB],
- fake_workbench_without_outputs: dict[str, Any],
- fake_workbench_adjacency: dict[str, Any],
- aiopg_engine: aiopg.sa.engine.Engine,
- run_metadata: RunMetadataDict,
-):
- """A pipeline which comp_tasks are missing should not be scheduled.
- It shall be aborted and shown as such in the comp_runs db"""
- user = registered_user()
- sleepers_project = await project(user, workbench=fake_workbench_without_outputs)
- pipeline(
- project_id=f"{sleepers_project.uuid}",
- dag_adjacency_list=fake_workbench_adjacency,
- )
- # check the pipeline is correctly added to the scheduled pipelines
- await scheduler.run_new_pipeline(
- user_id=user["id"],
- project_id=sleepers_project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
- run_metadata=run_metadata,
- use_on_demand_clusters=False,
- )
- assert len(scheduler._scheduled_pipelines) == 1 # noqa: SLF001
- for (
- u_id,
- p_id,
- it,
- ) in scheduler._scheduled_pipelines: # noqa: SLF001
- assert u_id == user["id"]
- assert p_id == sleepers_project.uuid
- assert it > 0
- # check the database was properly updated
- async with aiopg_engine.acquire() as conn:
- result = await conn.execute(
- comp_runs.select().where(
- (comp_runs.c.user_id == user["id"])
- & (comp_runs.c.project_uuid == f"{sleepers_project.uuid}")
- ) # there is only one entry
- )
- run_entry = CompRunsAtDB.model_validate(await result.first())
- assert run_entry.result == RunningState.PUBLISHED
- # let the scheduler kick in
- await schedule_all_pipelines(scheduler)
- # check the scheduled pipelines is again empty since it's misconfigured
- assert len(scheduler._scheduled_pipelines) == 0 # noqa: SLF001
- # check the database entry is correctly updated
- async with aiopg_engine.acquire() as conn:
- result = await conn.execute(
- comp_runs.select().where(
- (comp_runs.c.user_id == user["id"])
- & (comp_runs.c.project_uuid == f"{sleepers_project.uuid}")
- ) # there is only one entry
- )
- run_entry = CompRunsAtDB.model_validate(await result.first())
- assert run_entry.result == RunningState.ABORTED
- assert run_entry.metadata == run_metadata
+def scheduler_api(initialized_app: FastAPI) -> BaseCompScheduler:
+ return _get_scheduler_worker(initialized_app)
async def _assert_start_pipeline(
- aiopg_engine,
+ app: FastAPI,
+ *,
+ sqlalchemy_async_engine: AsyncEngine,
published_project: PublishedProject,
- scheduler: BaseCompScheduler,
run_metadata: RunMetadataDict,
-) -> list[CompTaskAtDB]:
+) -> tuple[CompRunsAtDB, list[CompTaskAtDB]]:
exp_published_tasks = deepcopy(published_project.tasks)
assert published_project.project.prj_owner
- await scheduler.run_new_pipeline(
+ await run_new_pipeline(
+ app,
user_id=published_project.project.prj_owner,
project_id=published_project.project.uuid,
cluster_id=DEFAULT_CLUSTER_ID,
run_metadata=run_metadata,
use_on_demand_clusters=False,
)
- assert (
- len(scheduler._scheduled_pipelines) == 1 # noqa: SLF001
- ), "the pipeline is not scheduled!"
- for (
- u_id,
- p_id,
- it,
- ) in scheduler._scheduled_pipelines: # noqa: SLF001
- assert u_id == published_project.project.prj_owner
- assert p_id == published_project.project.uuid
- assert it > 0
# check the database is correctly updated, the run is published
- await _assert_comp_run_db(aiopg_engine, published_project, RunningState.PUBLISHED)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [p.node_id for p in exp_published_tasks],
+ runs = await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.PUBLISHED,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
+ )
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[p.node_id for p in exp_published_tasks],
expected_state=RunningState.PUBLISHED,
expected_progress=None,
)
- return exp_published_tasks
+ return runs[0], exp_published_tasks
-async def _assert_schedule_pipeline_PENDING( # noqa: N802
- aiopg_engine,
+async def _assert_publish_in_dask_backend(
+ sqlalchemy_async_engine: AsyncEngine,
published_project: PublishedProject,
published_tasks: list[CompTaskAtDB],
mocked_dask_client: mock.MagicMock,
scheduler: BaseCompScheduler,
-) -> list[CompTaskAtDB]:
+) -> tuple[list[CompTaskAtDB], dict[NodeID, Callable[[], None]]]:
expected_pending_tasks = [
published_tasks[1],
published_tasks[3],
@@ -488,26 +212,42 @@ async def _return_tasks_pending(job_ids: list[str]) -> list[DaskClientTaskState]
return [DaskClientTaskState.PENDING for job_id in job_ids]
mocked_dask_client.get_tasks_status.side_effect = _return_tasks_pending
- await schedule_all_pipelines(scheduler)
+ assert published_project.project.prj_owner
+ await scheduler.apply(
+ user_id=published_project.project.prj_owner,
+ project_id=published_project.project.uuid,
+ iteration=1,
+ )
_assert_dask_client_correctly_initialized(mocked_dask_client, scheduler)
- await _assert_comp_run_db(aiopg_engine, published_project, RunningState.PUBLISHED)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [p.node_id for p in expected_pending_tasks],
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.PUBLISHED,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
+ )
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[p.node_id for p in expected_pending_tasks],
expected_state=RunningState.PENDING,
expected_progress=None,
)
# the other tasks are still waiting in published state
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [p.node_id for p in published_tasks],
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[p.node_id for p in published_tasks],
expected_state=RunningState.PUBLISHED,
expected_progress=None, # since we bypass the API entrypoint this is correct
)
# tasks were send to the backend
assert published_project.project.prj_owner is not None
+ assert isinstance(mocked_dask_client.send_computation_tasks, mock.Mock)
+ assert isinstance(mocked_dask_client.get_tasks_status, mock.Mock)
+ assert isinstance(mocked_dask_client.get_task_result, mock.Mock)
mocked_dask_client.send_computation_tasks.assert_has_calls(
calls=[
mock.call(
@@ -523,23 +263,39 @@ async def _return_tasks_pending(job_ids: list[str]) -> list[DaskClientTaskState]
],
any_order=True,
)
+ task_to_callback_mapping = {
+ task.node_id: mocked_dask_client.send_computation_tasks.call_args_list[
+ i
+ ].kwargs["callback"]
+ for i, task in enumerate(expected_pending_tasks)
+ }
mocked_dask_client.send_computation_tasks.reset_mock()
mocked_dask_client.get_tasks_status.assert_not_called()
mocked_dask_client.get_task_result.assert_not_called()
# there is a second run of the scheduler to move comp_runs to pending, the rest does not change
- await schedule_all_pipelines(scheduler)
- await _assert_comp_run_db(aiopg_engine, published_project, RunningState.PENDING)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [p.node_id for p in expected_pending_tasks],
+ await scheduler.apply(
+ user_id=published_project.project.prj_owner,
+ project_id=published_project.project.uuid,
+ iteration=1,
+ )
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.PENDING,
+ where_statement=(comp_runs.c.user_id == published_project.project.prj_owner)
+ & (comp_runs.c.project_uuid == f"{published_project.project.uuid}"),
+ )
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[p.node_id for p in expected_pending_tasks],
expected_state=RunningState.PENDING,
expected_progress=None,
)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [p.node_id for p in published_tasks],
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[p.node_id for p in published_tasks],
expected_state=RunningState.PUBLISHED,
expected_progress=None,
)
@@ -549,7 +305,7 @@ async def _return_tasks_pending(job_ids: list[str]) -> list[DaskClientTaskState]
)
mocked_dask_client.get_tasks_status.reset_mock()
mocked_dask_client.get_task_result.assert_not_called()
- return expected_pending_tasks
+ return expected_pending_tasks, task_to_callback_mapping
@pytest.fixture
@@ -606,9 +362,9 @@ async def _assert_message_received(
return parsed_messages
-def _mock_send_computation_tasks(
+def _with_mock_send_computation_tasks(
tasks: list[CompTaskAtDB], mocked_dask_client: mock.MagicMock
-) -> None:
+) -> mock.Mock:
node_id_to_job_id_map = {task.node_id: task.job_id for task in tasks}
async def _send_computation_tasks(
@@ -625,6 +381,7 @@ async def _send_computation_tasks(
] # type: ignore
mocked_dask_client.send_computation_tasks.side_effect = _send_computation_tasks
+ return mocked_dask_client.send_computation_tasks
async def _trigger_progress_event(
@@ -653,35 +410,45 @@ async def _trigger_progress_event(
@pytest.mark.acceptance_test()
async def test_proper_pipeline_is_scheduled( # noqa: PLR0915
- with_disabled_auto_scheduling: None,
+ with_disabled_auto_scheduling: mock.Mock,
+ with_disabled_scheduler_publisher: mock.Mock,
+ initialized_app: FastAPI,
mocked_dask_client: mock.MagicMock,
- scheduler: BaseCompScheduler,
- aiopg_engine: aiopg.sa.engine.Engine,
+ scheduler_api: BaseCompScheduler,
+ sqlalchemy_async_engine: AsyncEngine,
published_project: PublishedProject,
mocked_parse_output_data_fct: mock.Mock,
- mocked_clean_task_output_and_log_files_if_invalid: None,
+ mocked_clean_task_output_and_log_files_if_invalid: mock.Mock,
instrumentation_rabbit_client_parser: mock.AsyncMock,
resource_tracking_rabbit_client_parser: mock.AsyncMock,
run_metadata: RunMetadataDict,
):
- _mock_send_computation_tasks(published_project.tasks, mocked_dask_client)
-
- expected_published_tasks = await _assert_start_pipeline(
- aiopg_engine, published_project, scheduler, run_metadata
+ with_disabled_auto_scheduling.assert_called_once()
+ _with_mock_send_computation_tasks(published_project.tasks, mocked_dask_client)
+
+ #
+ # Initiate new pipeline run
+ #
+ run_in_db, expected_published_tasks = await _assert_start_pipeline(
+ initialized_app,
+ sqlalchemy_async_engine=sqlalchemy_async_engine,
+ published_project=published_project,
+ run_metadata=run_metadata,
)
+ with_disabled_scheduler_publisher.assert_called()
# -------------------------------------------------------------------------------
- # 1. first run will move comp_tasks to PENDING so the worker can take them
- expected_pending_tasks = await _assert_schedule_pipeline_PENDING(
- aiopg_engine,
+ # 1. first run will move comp_tasks to PENDING so the dask-worker can take them
+ expected_pending_tasks, _ = await _assert_publish_in_dask_backend(
+ sqlalchemy_async_engine,
published_project,
expected_published_tasks,
mocked_dask_client,
- scheduler,
+ scheduler_api,
)
# -------------------------------------------------------------------------------
- # 2.1. the worker might be taking the task, until we get a progress we do not know
+ # 2.1. the dask-worker might be taking the task, until we get a progress we do not know
# whether it effectively started or it is still queued in the worker process
exp_started_task = expected_pending_tasks[0]
expected_pending_tasks.remove(exp_started_task)
@@ -697,28 +464,32 @@ async def _return_1st_task_running(job_ids: list[str]) -> list[DaskClientTaskSta
]
mocked_dask_client.get_tasks_status.side_effect = _return_1st_task_running
-
- await schedule_all_pipelines(scheduler)
-
- await _assert_comp_run_db(aiopg_engine, published_project, RunningState.PENDING)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [exp_started_task.node_id],
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
expected_state=RunningState.PENDING,
- expected_progress=None,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [p.node_id for p in expected_pending_tasks],
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[exp_started_task.node_id]
+ + [p.node_id for p in expected_pending_tasks],
expected_state=RunningState.PENDING,
expected_progress=None,
)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [p.node_id for p in expected_published_tasks],
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[p.node_id for p in expected_published_tasks],
expected_state=RunningState.PUBLISHED,
expected_progress=None, # since we bypass the API entrypoint this is correct
)
@@ -730,41 +501,52 @@ async def _return_1st_task_running(job_ids: list[str]) -> list[DaskClientTaskSta
mocked_dask_client.get_task_result.assert_not_called()
# -------------------------------------------------------------------------------
- # 3. the "worker" starts processing a task
- # here we trigger a progress from the worker
+ # 3. the dask-worker starts processing a task here we simulate a progress event
assert exp_started_task.job_id
assert exp_started_task.project_id
assert exp_started_task.node_id
assert published_project.project.prj_owner
await _trigger_progress_event(
- scheduler,
+ scheduler_api,
job_id=exp_started_task.job_id,
user_id=published_project.project.prj_owner,
project_id=exp_started_task.project_id,
node_id=exp_started_task.node_id,
)
- await schedule_all_pipelines(scheduler)
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
# comp_run, the comp_task switch to STARTED
- await _assert_comp_run_db(aiopg_engine, published_project, RunningState.STARTED)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [exp_started_task.node_id],
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.STARTED,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
+ )
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[exp_started_task.node_id],
expected_state=RunningState.STARTED,
expected_progress=0,
)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [p.node_id for p in expected_pending_tasks],
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[p.node_id for p in expected_pending_tasks],
expected_state=RunningState.PENDING,
expected_progress=None,
)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [p.node_id for p in expected_published_tasks],
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[p.node_id for p in expected_published_tasks],
expected_state=RunningState.PUBLISHED,
expected_progress=None,
)
@@ -774,6 +556,7 @@ async def _return_1st_task_running(job_ids: list[str]) -> list[DaskClientTaskSta
)
mocked_dask_client.get_tasks_status.reset_mock()
mocked_dask_client.get_task_result.assert_not_called()
+ # check the metrics are properly published
messages = await _assert_message_received(
instrumentation_rabbit_client_parser,
1,
@@ -782,9 +565,7 @@ async def _return_1st_task_running(job_ids: list[str]) -> list[DaskClientTaskSta
assert messages[0].metrics == "service_started"
assert messages[0].service_uuid == exp_started_task.node_id
- def _parser(x) -> RabbitResourceTrackingMessages:
- return TypeAdapter(RabbitResourceTrackingMessages).validate_json(x)
-
+ # check the RUT messages are properly published
messages = await _assert_message_received(
resource_tracking_rabbit_client_parser,
1,
@@ -793,7 +574,7 @@ def _parser(x) -> RabbitResourceTrackingMessages:
assert messages[0].node_id == exp_started_task.node_id
# -------------------------------------------------------------------------------
- # 4. the "worker" completed the task successfully
+ # 4. the dask-worker completed the task successfully
async def _return_1st_task_success(job_ids: list[str]) -> list[DaskClientTaskState]:
return [
(
@@ -810,15 +591,28 @@ async def _return_random_task_result(job_id) -> TaskOutputData:
return TaskOutputData.model_validate({"out_1": None, "out_2": 45})
mocked_dask_client.get_task_result.side_effect = _return_random_task_result
- await schedule_all_pipelines(scheduler)
- await _assert_comp_run_db(aiopg_engine, published_project, RunningState.STARTED)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [exp_started_task.node_id],
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.STARTED,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
+ )
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[exp_started_task.node_id],
expected_state=RunningState.SUCCESS,
expected_progress=1,
)
+ # check metrics are published
messages = await _assert_message_received(
instrumentation_rabbit_client_parser,
1,
@@ -826,6 +620,7 @@ async def _return_random_task_result(job_id) -> TaskOutputData:
)
assert messages[0].metrics == "service_stopped"
assert messages[0].service_uuid == exp_started_task.node_id
+ # check RUT messages are published
messages = await _assert_message_received(
resource_tracking_rabbit_client_parser,
1,
@@ -835,17 +630,17 @@ async def _return_random_task_result(job_id) -> TaskOutputData:
completed_tasks = [exp_started_task]
next_pending_task = published_project.tasks[2]
expected_pending_tasks.append(next_pending_task)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [p.node_id for p in expected_pending_tasks],
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[p.node_id for p in expected_pending_tasks],
expected_state=RunningState.PENDING,
expected_progress=None,
)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[
p.node_id
for p in published_project.tasks
if p not in expected_pending_tasks + completed_tasks
@@ -884,7 +679,7 @@ async def _return_random_task_result(job_id) -> TaskOutputData:
mocked_parse_output_data_fct.reset_mock()
# -------------------------------------------------------------------------------
- # 6. the "worker" starts processing a task
+ # 6. the dask-worker starts processing a task
exp_started_task = next_pending_task
async def _return_2nd_task_running(job_ids: list[str]) -> list[DaskClientTaskState]:
@@ -901,18 +696,30 @@ async def _return_2nd_task_running(job_ids: list[str]) -> list[DaskClientTaskSta
# trigger the scheduler, run state should keep to STARTED, task should be as well
assert exp_started_task.job_id
await _trigger_progress_event(
- scheduler,
+ scheduler_api,
job_id=exp_started_task.job_id,
user_id=published_project.project.prj_owner,
project_id=exp_started_task.project_id,
node_id=exp_started_task.node_id,
)
- await schedule_all_pipelines(scheduler)
- await _assert_comp_run_db(aiopg_engine, published_project, RunningState.STARTED)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [exp_started_task.node_id],
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.STARTED,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
+ )
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[exp_started_task.node_id],
expected_state=RunningState.STARTED,
expected_progress=0,
)
@@ -951,12 +758,27 @@ async def _return_2nd_task_failed(job_ids: list[str]) -> list[DaskClientTaskStat
mocked_dask_client.get_tasks_status.side_effect = _return_2nd_task_failed
mocked_dask_client.get_task_result.side_effect = None
- await schedule_all_pipelines(scheduler)
- await _assert_comp_run_db(aiopg_engine, published_project, RunningState.STARTED)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [exp_started_task.node_id],
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
+ mocked_clean_task_output_and_log_files_if_invalid.assert_called_once()
+ mocked_clean_task_output_and_log_files_if_invalid.reset_mock()
+
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.STARTED,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
+ )
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[exp_started_task.node_id],
expected_state=RunningState.FAILED,
expected_progress=1,
)
@@ -1000,13 +822,26 @@ async def _return_3rd_task_success(job_ids: list[str]) -> list[DaskClientTaskSta
mocked_dask_client.get_task_result.side_effect = _return_random_task_result
# trigger the scheduler, it should switch to FAILED, as we are done
- await schedule_all_pipelines(scheduler)
- await _assert_comp_run_db(aiopg_engine, published_project, RunningState.FAILED)
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
+ mocked_clean_task_output_and_log_files_if_invalid.assert_not_called()
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.FAILED,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
+ )
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [exp_started_task.node_id],
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[exp_started_task.node_id],
expected_state=RunningState.SUCCESS,
expected_progress=1,
)
@@ -1020,7 +855,11 @@ async def _return_3rd_task_success(job_ids: list[str]) -> list[DaskClientTaskSta
2,
InstrumentationRabbitMessage.model_validate_json,
)
+
# NOTE: the service was fast and went directly to success
+ def _parser(x) -> RabbitResourceTrackingMessages:
+ return TypeAdapter(RabbitResourceTrackingMessages).validate_json(x)
+
assert messages[0].metrics == "service_started"
assert messages[0].service_uuid == exp_started_task.node_id
assert messages[1].metrics == "service_stopped"
@@ -1033,32 +872,309 @@ async def _return_3rd_task_success(job_ids: list[str]) -> list[DaskClientTaskSta
assert isinstance(messages[0], RabbitResourceTrackingStartedMessage)
assert isinstance(messages[1], RabbitResourceTrackingStoppedMessage)
- # the scheduled pipeline shall be removed
- assert scheduler._scheduled_pipelines == {} # noqa: SLF001
+
+@pytest.fixture
+async def with_started_project(
+ with_disabled_auto_scheduling: mock.Mock,
+ with_disabled_scheduler_publisher: mock.Mock,
+ initialized_app: FastAPI,
+ sqlalchemy_async_engine: AsyncEngine,
+ publish_project: Callable[[], Awaitable[PublishedProject]],
+ mocked_dask_client: mock.Mock,
+ run_metadata: RunMetadataDict,
+ scheduler_api: BaseCompScheduler,
+ instrumentation_rabbit_client_parser: mock.AsyncMock,
+ resource_tracking_rabbit_client_parser: mock.AsyncMock,
+) -> RunningProject:
+ with_disabled_auto_scheduling.assert_called_once()
+ published_project = await publish_project()
+ #
+ # 1. Initiate new pipeline run
+ #
+ run_in_db, expected_published_tasks = await _assert_start_pipeline(
+ initialized_app,
+ sqlalchemy_async_engine=sqlalchemy_async_engine,
+ published_project=published_project,
+ run_metadata=run_metadata,
+ )
+ with_disabled_scheduler_publisher.assert_called_once()
+
+ #
+ # 2. This runs the scheduler until the project is started scheduled in the back-end
+ #
+ (
+ expected_pending_tasks,
+ task_to_callback_mapping,
+ ) = await _assert_publish_in_dask_backend(
+ sqlalchemy_async_engine,
+ published_project,
+ expected_published_tasks,
+ mocked_dask_client,
+ scheduler_api,
+ )
+
+ #
+ # The dask-worker can take a job when it is PENDING, but the dask scheduler makes
+ # no difference between PENDING and STARTED
+ #
+ exp_started_task = expected_pending_tasks[0]
+ expected_pending_tasks.remove(exp_started_task)
+
+ async def _return_1st_task_running(job_ids: list[str]) -> list[DaskClientTaskState]:
+ return [
+ (
+ DaskClientTaskState.PENDING_OR_STARTED
+ if job_id == exp_started_task.job_id
+ else DaskClientTaskState.PENDING
+ )
+ for job_id in job_ids
+ ]
+
+ assert isinstance(mocked_dask_client.get_tasks_status, mock.Mock)
+ assert isinstance(mocked_dask_client.send_computation_tasks, mock.Mock)
+ assert isinstance(mocked_dask_client.get_task_result, mock.Mock)
+ mocked_dask_client.get_tasks_status.side_effect = _return_1st_task_running
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.PENDING,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
+ )
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[exp_started_task.node_id]
+ + [p.node_id for p in expected_pending_tasks],
+ expected_state=RunningState.PENDING,
+ expected_progress=None,
+ )
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[p.node_id for p in expected_published_tasks],
+ expected_state=RunningState.PUBLISHED,
+ expected_progress=None, # since we bypass the API entrypoint this is correct
+ )
+ mocked_dask_client.send_computation_tasks.assert_not_called()
+ mocked_dask_client.get_tasks_status.assert_called_once_with(
+ [p.job_id for p in (exp_started_task, *expected_pending_tasks)],
+ )
+ mocked_dask_client.get_tasks_status.reset_mock()
+ mocked_dask_client.get_task_result.assert_not_called()
+
+ # -------------------------------------------------------------------------------
+ # 4. the dask-worker starts processing a task here we simulate a progress event
+ assert exp_started_task.job_id
+ assert exp_started_task.project_id
+ assert exp_started_task.node_id
+ assert published_project.project.prj_owner
+ await _trigger_progress_event(
+ scheduler_api,
+ job_id=exp_started_task.job_id,
+ user_id=published_project.project.prj_owner,
+ project_id=exp_started_task.project_id,
+ node_id=exp_started_task.node_id,
+ )
+
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
+ # comp_run, the comp_task switch to STARTED
+ run_in_db = (
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.STARTED,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
+ )
+ )[0]
+ tasks_in_db = await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[exp_started_task.node_id],
+ expected_state=RunningState.STARTED,
+ expected_progress=0,
+ )
+ tasks_in_db += await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[p.node_id for p in expected_pending_tasks],
+ expected_state=RunningState.PENDING,
+ expected_progress=None,
+ )
+ tasks_in_db += await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[p.node_id for p in expected_published_tasks],
+ expected_state=RunningState.PUBLISHED,
+ expected_progress=None,
+ )
+ mocked_dask_client.send_computation_tasks.assert_not_called()
+ mocked_dask_client.get_tasks_status.assert_called_once_with(
+ [p.job_id for p in (exp_started_task, *expected_pending_tasks)],
+ )
+ mocked_dask_client.get_tasks_status.reset_mock()
+ mocked_dask_client.get_task_result.assert_not_called()
+ # check the metrics are properly published
+ messages = await _assert_message_received(
+ instrumentation_rabbit_client_parser,
+ 1,
+ InstrumentationRabbitMessage.model_validate_json,
+ )
+ assert messages[0].metrics == "service_started"
+ assert messages[0].service_uuid == exp_started_task.node_id
+
+ # check the RUT messages are properly published
+ messages = await _assert_message_received(
+ resource_tracking_rabbit_client_parser,
+ 1,
+ RabbitResourceTrackingStartedMessage.model_validate_json,
+ )
+ assert messages[0].node_id == exp_started_task.node_id
+
+ return RunningProject(
+ published_project.user,
+ published_project.project,
+ published_project.pipeline,
+ tasks_in_db,
+ runs=run_in_db,
+ task_to_callback_mapping=task_to_callback_mapping,
+ )
+
+
+@pytest.fixture
+def mocked_worker_publisher(mocker: MockerFixture) -> mock.Mock:
+ return mocker.patch(
+ "simcore_service_director_v2.modules.comp_scheduler._scheduler_base.request_pipeline_scheduling",
+ autospec=True,
+ )
+
+
+async def test_completed_task_triggers_new_scheduling_task(
+ mocked_worker_publisher: mock.Mock,
+ with_started_project: RunningProject,
+):
+ """When a pipeline job completes, the Dask backend provides a callback
+ that runs in a separate thread. We use that callback to ask the
+ director-v2 computational scheduler manager to ask for a new schedule
+ After fiddling in distributed source code, here is a similar way to trigger that callback
+ """
+ completed_node_id = with_started_project.tasks[0].node_id
+ callback = with_started_project.task_to_callback_mapping[completed_node_id]
+ await asyncio.to_thread(callback)
+
+ mocked_worker_publisher.assert_called_once_with(
+ mock.ANY,
+ mock.ANY,
+ user_id=with_started_project.runs.user_id,
+ project_id=with_started_project.runs.project_uuid,
+ iteration=with_started_project.runs.iteration,
+ )
+
+
+async def test_broken_pipeline_configuration_is_not_scheduled_and_aborted(
+ with_disabled_auto_scheduling: mock.Mock,
+ with_disabled_scheduler_publisher: mock.Mock,
+ initialized_app: FastAPI,
+ scheduler_api: BaseCompScheduler,
+ registered_user: Callable[..., dict[str, Any]],
+ project: Callable[..., Awaitable[ProjectAtDB]],
+ create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]],
+ fake_workbench_without_outputs: dict[str, Any],
+ fake_workbench_adjacency: dict[str, Any],
+ sqlalchemy_async_engine: AsyncEngine,
+ run_metadata: RunMetadataDict,
+):
+ """A pipeline which comp_tasks are missing should not be scheduled.
+ It shall be aborted and shown as such in the comp_runs db"""
+ user = registered_user()
+ sleepers_project = await project(user, workbench=fake_workbench_without_outputs)
+ await create_pipeline(
+ project_id=f"{sleepers_project.uuid}",
+ dag_adjacency_list=fake_workbench_adjacency,
+ )
+ await assert_comp_runs_empty(sqlalchemy_async_engine)
+
+ #
+ # Initiate new pipeline scheduling
+ #
+ await run_new_pipeline(
+ initialized_app,
+ user_id=user["id"],
+ project_id=sleepers_project.uuid,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ run_metadata=run_metadata,
+ use_on_demand_clusters=False,
+ )
+ with_disabled_scheduler_publisher.assert_called_once()
+ # we shall have a a new comp_runs row with the new pipeline job
+ run_entry = (
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.PUBLISHED,
+ where_statement=(comp_runs.c.user_id == user["id"])
+ & (comp_runs.c.project_uuid == f"{sleepers_project.uuid}"),
+ )
+ )[0]
+
+ #
+ # Trigger scheduling manually. since the pipeline is broken, it shall be aborted
+ #
+ await scheduler_api.apply(
+ user_id=run_entry.user_id,
+ project_id=run_entry.project_uuid,
+ iteration=run_entry.iteration,
+ )
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.ABORTED,
+ where_statement=(comp_runs.c.user_id == user["id"])
+ & (comp_runs.c.project_uuid == f"{sleepers_project.uuid}"),
+ )
async def test_task_progress_triggers(
- with_disabled_auto_scheduling: None,
+ with_disabled_auto_scheduling: mock.Mock,
+ with_disabled_scheduler_publisher: mock.Mock,
+ initialized_app: FastAPI,
mocked_dask_client: mock.MagicMock,
- scheduler: BaseCompScheduler,
- aiopg_engine: aiopg.sa.engine.Engine,
+ scheduler_api: BaseCompScheduler,
+ sqlalchemy_async_engine: AsyncEngine,
published_project: PublishedProject,
- mocked_parse_output_data_fct: None,
- mocked_clean_task_output_and_log_files_if_invalid: None,
+ mocked_parse_output_data_fct: mock.Mock,
+ mocked_clean_task_output_and_log_files_if_invalid: mock.Mock,
run_metadata: RunMetadataDict,
):
- _mock_send_computation_tasks(published_project.tasks, mocked_dask_client)
- expected_published_tasks = await _assert_start_pipeline(
- aiopg_engine, published_project, scheduler, run_metadata
+ _with_mock_send_computation_tasks(published_project.tasks, mocked_dask_client)
+ _run_in_db, expected_published_tasks = await _assert_start_pipeline(
+ initialized_app,
+ sqlalchemy_async_engine=sqlalchemy_async_engine,
+ published_project=published_project,
+ run_metadata=run_metadata,
)
# -------------------------------------------------------------------------------
- # 1. first run will move comp_tasks to PENDING so the worker can take them
- expected_pending_tasks = await _assert_schedule_pipeline_PENDING(
- aiopg_engine,
+ # 1. first run will move comp_tasks to PENDING so the dask-worker can take them
+ expected_pending_tasks, _ = await _assert_publish_in_dask_backend(
+ sqlalchemy_async_engine,
published_project,
expected_published_tasks,
mocked_dask_client,
- scheduler,
+ scheduler_api,
)
# send some progress
@@ -1078,13 +1194,13 @@ async def test_task_progress_triggers(
),
)
await cast( # noqa: SLF001
- DaskScheduler, scheduler
+ DaskScheduler, scheduler_api
)._task_progress_change_handler(progress_event.model_dump_json())
# NOTE: not sure whether it should switch to STARTED.. it would make sense
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [started_task.node_id],
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[started_task.node_id],
expected_state=RunningState.STARTED,
expected_progress=min(max(0, progress), 1),
)
@@ -1100,11 +1216,13 @@ async def test_task_progress_triggers(
),
],
)
-async def test_handling_of_disconnected_dask_scheduler(
- with_disabled_auto_scheduling: None,
+async def test_handling_of_disconnected_scheduler_dask(
+ with_disabled_auto_scheduling: mock.Mock,
+ with_disabled_scheduler_publisher: mock.Mock,
+ initialized_app: FastAPI,
mocked_dask_client: mock.MagicMock,
- scheduler: BaseCompScheduler,
- aiopg_engine: aiopg.sa.engine.Engine,
+ scheduler_api: BaseCompScheduler,
+ sqlalchemy_async_engine: AsyncEngine,
mocker: MockerFixture,
published_project: PublishedProject,
backend_error: ComputationalSchedulerError,
@@ -1112,14 +1230,15 @@ async def test_handling_of_disconnected_dask_scheduler(
):
# this will create a non connected backend issue that will trigger re-connection
mocked_dask_client_send_task = mocker.patch(
- "simcore_service_director_v2.modules.comp_scheduler._dask_scheduler.DaskClient.send_computation_tasks",
+ "simcore_service_director_v2.modules.comp_scheduler._scheduler_dask.DaskClient.send_computation_tasks",
side_effect=backend_error,
)
assert mocked_dask_client_send_task
# running the pipeline will now raise and the tasks are set back to PUBLISHED
assert published_project.project.prj_owner
- await scheduler.run_new_pipeline(
+ await run_new_pipeline(
+ initialized_app,
user_id=published_project.project.prj_owner,
project_id=published_project.project.uuid,
cluster_id=DEFAULT_CLUSTER_ID,
@@ -1129,28 +1248,42 @@ async def test_handling_of_disconnected_dask_scheduler(
# since there is no cluster, there is no dask-scheduler,
# the tasks shall all still be in PUBLISHED state now
- await _assert_comp_run_db(aiopg_engine, published_project, RunningState.PUBLISHED)
+ runs_in_db = await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.PUBLISHED,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
+ )
+ run_in_db = runs_in_db[0]
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [t.node_id for t in published_project.tasks],
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[t.node_id for t in published_project.tasks],
expected_state=RunningState.PUBLISHED,
expected_progress=None,
)
# on the next iteration of the pipeline it will try to re-connect
# now try to abort the tasks since we are wondering what is happening, this should auto-trigger the scheduler
- await scheduler.stop_pipeline(
+ await stop_pipeline(
+ initialized_app,
user_id=published_project.project.prj_owner,
project_id=published_project.project.uuid,
)
# we ensure the scheduler was run
- await schedule_all_pipelines(scheduler)
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
# after this step the tasks are marked as ABORTED
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[
t.node_id
for t in published_project.tasks
if t.node_class == NodeClass.COMPUTATIONAL
@@ -1159,9 +1292,21 @@ async def test_handling_of_disconnected_dask_scheduler(
expected_progress=1,
)
# then we have another scheduler run
- await schedule_all_pipelines(scheduler)
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
# now the run should be ABORTED
- await _assert_comp_run_db(aiopg_engine, published_project, RunningState.ABORTED)
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.ABORTED,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
+ )
@dataclass(frozen=True, kw_only=True)
@@ -1175,9 +1320,6 @@ class RebootState:
expected_run_state: RunningState
-@pytest.mark.skip(
- reason="awaiting refactor in https://github.com/ITISFoundation/osparc-simcore/pull/6736"
-)
@pytest.mark.parametrize(
"reboot_state",
[
@@ -1245,14 +1387,15 @@ class RebootState:
),
],
)
-async def test_handling_scheduling_after_reboot(
- with_disabled_auto_scheduling: None,
+async def test_handling_scheduled_tasks_after_director_reboots(
+ with_disabled_auto_scheduling: mock.Mock,
+ with_disabled_scheduler_publisher: mock.Mock,
mocked_dask_client: mock.MagicMock,
- aiopg_engine: aiopg.sa.engine.Engine,
+ sqlalchemy_async_engine: AsyncEngine,
running_project: RunningProject,
- scheduler: BaseCompScheduler,
- mocked_parse_output_data_fct: mock.MagicMock,
- mocked_clean_task_output_fct: mock.MagicMock,
+ scheduler_api: BaseCompScheduler,
+ mocked_parse_output_data_fct: mock.Mock,
+ mocked_clean_task_output_fct: mock.Mock,
reboot_state: RebootState,
):
"""After the dask client is rebooted, or that the director-v2 reboots the dv-2 internal scheduler
@@ -1270,8 +1413,12 @@ async def mocked_get_task_result(_job_id: str) -> TaskOutputData:
return reboot_state.task_result
mocked_dask_client.get_task_result.side_effect = mocked_get_task_result
-
- await schedule_all_pipelines(scheduler)
+ assert running_project.project.prj_owner
+ await scheduler_api.apply(
+ user_id=running_project.project.prj_owner,
+ project_id=running_project.project.uuid,
+ iteration=1,
+ )
# the status will be called once for all RUNNING tasks
mocked_dask_client.get_tasks_status.assert_called_once()
if reboot_state.expected_run_state in COMPLETED_STATES:
@@ -1303,10 +1450,10 @@ async def mocked_get_task_result(_job_id: str) -> TaskOutputData:
else:
mocked_clean_task_output_fct.assert_not_called()
- await _assert_comp_tasks_db(
- aiopg_engine,
- running_project.project.uuid,
- [
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=running_project.project.uuid,
+ task_ids=[
running_project.tasks[1].node_id,
running_project.tasks[2].node_id,
running_project.tasks[3].node_id,
@@ -1314,40 +1461,58 @@ async def mocked_get_task_result(_job_id: str) -> TaskOutputData:
expected_state=reboot_state.expected_task_state_group1,
expected_progress=reboot_state.expected_task_progress_group1,
)
- await _assert_comp_tasks_db(
- aiopg_engine,
- running_project.project.uuid,
- [running_project.tasks[4].node_id],
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=running_project.project.uuid,
+ task_ids=[running_project.tasks[4].node_id],
expected_state=reboot_state.expected_task_state_group2,
expected_progress=reboot_state.expected_task_progress_group2,
)
assert running_project.project.prj_owner
- await _assert_comp_run_db(
- aiopg_engine, running_project, reboot_state.expected_run_state
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=reboot_state.expected_run_state,
+ where_statement=and_(
+ comp_runs.c.user_id == running_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{running_project.project.uuid}",
+ ),
)
async def test_handling_cancellation_of_jobs_after_reboot(
- with_disabled_auto_scheduling: None,
+ with_disabled_auto_scheduling: mock.Mock,
+ with_disabled_scheduler_publisher: mock.Mock,
mocked_dask_client: mock.MagicMock,
- aiopg_engine: aiopg.sa.engine.Engine,
+ sqlalchemy_async_engine: AsyncEngine,
running_project_mark_for_cancellation: RunningProject,
- scheduler: BaseCompScheduler,
- mocked_parse_output_data_fct: mock.MagicMock,
- mocked_clean_task_output_fct: mock.MagicMock,
+ scheduler_api: BaseCompScheduler,
+ mocked_parse_output_data_fct: mock.Mock,
+ mocked_clean_task_output_fct: mock.Mock,
):
"""A running pipeline was cancelled by a user and the DV-2 was restarted BEFORE
It could actually cancel the task. On reboot the DV-2 shall recover
and actually cancel the pipeline properly"""
# check initial status
- await _assert_comp_run_db(
- aiopg_engine, running_project_mark_for_cancellation, RunningState.STARTED
- )
- await _assert_comp_tasks_db(
- aiopg_engine,
- running_project_mark_for_cancellation.project.uuid,
- [t.node_id for t in running_project_mark_for_cancellation.tasks],
+ run_in_db = (
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.STARTED,
+ where_statement=and_(
+ comp_runs.c.user_id
+ == running_project_mark_for_cancellation.project.prj_owner,
+ comp_runs.c.project_uuid
+ == f"{running_project_mark_for_cancellation.project.uuid}",
+ ),
+ )
+ )[0]
+
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=running_project_mark_for_cancellation.project.uuid,
+ task_ids=[t.node_id for t in running_project_mark_for_cancellation.tasks],
expected_state=RunningState.STARTED,
expected_progress=0,
)
@@ -1358,7 +1523,11 @@ async def mocked_get_tasks_status(job_ids: list[str]) -> list[DaskClientTaskStat
mocked_dask_client.get_tasks_status.side_effect = mocked_get_tasks_status
# Running the scheduler, should actually cancel the run now
- await schedule_all_pipelines(scheduler)
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
mocked_dask_client.abort_computation_task.assert_called()
assert mocked_dask_client.abort_computation_task.call_count == len(
[
@@ -1368,10 +1537,10 @@ async def mocked_get_tasks_status(job_ids: list[str]) -> list[DaskClientTaskStat
]
)
# in the DB they are still running, they will be stopped in the next iteration
- await _assert_comp_tasks_db(
- aiopg_engine,
- running_project_mark_for_cancellation.project.uuid,
- [
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=running_project_mark_for_cancellation.project.uuid,
+ task_ids=[
t.node_id
for t in running_project_mark_for_cancellation.tasks
if t.node_class == NodeClass.COMPUTATIONAL
@@ -1379,8 +1548,16 @@ async def mocked_get_tasks_status(job_ids: list[str]) -> list[DaskClientTaskStat
expected_state=RunningState.STARTED,
expected_progress=0,
)
- await _assert_comp_run_db(
- aiopg_engine, running_project_mark_for_cancellation, RunningState.STARTED
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.STARTED,
+ where_statement=and_(
+ comp_runs.c.user_id
+ == running_project_mark_for_cancellation.project.prj_owner,
+ comp_runs.c.project_uuid
+ == f"{running_project_mark_for_cancellation.project.uuid}",
+ ),
)
# the backend shall now report the tasks as aborted
@@ -1395,12 +1572,16 @@ async def _return_random_task_result(job_id) -> TaskOutputData:
raise TaskCancelledError
mocked_dask_client.get_task_result.side_effect = _return_random_task_result
- await schedule_all_pipelines(scheduler)
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
# now should be stopped
- await _assert_comp_tasks_db(
- aiopg_engine,
- running_project_mark_for_cancellation.project.uuid,
- [
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=running_project_mark_for_cancellation.project.uuid,
+ task_ids=[
t.node_id
for t in running_project_mark_for_cancellation.tasks
if t.node_class == NodeClass.COMPUTATIONAL
@@ -1408,8 +1589,16 @@ async def _return_random_task_result(job_id) -> TaskOutputData:
expected_state=RunningState.ABORTED,
expected_progress=1,
)
- await _assert_comp_run_db(
- aiopg_engine, running_project_mark_for_cancellation, RunningState.ABORTED
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.ABORTED,
+ where_statement=and_(
+ comp_runs.c.user_id
+ == running_project_mark_for_cancellation.project.prj_owner,
+ comp_runs.c.project_uuid
+ == f"{running_project_mark_for_cancellation.project.uuid}",
+ ),
)
mocked_clean_task_output_fct.assert_called()
@@ -1422,27 +1611,32 @@ def with_fast_service_heartbeat_s(monkeypatch: pytest.MonkeyPatch) -> int:
async def test_running_pipeline_triggers_heartbeat(
- with_disabled_auto_scheduling: None,
+ with_disabled_auto_scheduling: mock.Mock,
+ with_disabled_scheduler_publisher: mock.Mock,
with_fast_service_heartbeat_s: int,
+ initialized_app: FastAPI,
mocked_dask_client: mock.MagicMock,
- scheduler: BaseCompScheduler,
- aiopg_engine: aiopg.sa.engine.Engine,
+ scheduler_api: BaseCompScheduler,
+ sqlalchemy_async_engine: AsyncEngine,
published_project: PublishedProject,
resource_tracking_rabbit_client_parser: mock.AsyncMock,
run_metadata: RunMetadataDict,
):
- _mock_send_computation_tasks(published_project.tasks, mocked_dask_client)
- expected_published_tasks = await _assert_start_pipeline(
- aiopg_engine, published_project, scheduler, run_metadata
+ _with_mock_send_computation_tasks(published_project.tasks, mocked_dask_client)
+ run_in_db, expected_published_tasks = await _assert_start_pipeline(
+ initialized_app,
+ sqlalchemy_async_engine=sqlalchemy_async_engine,
+ published_project=published_project,
+ run_metadata=run_metadata,
)
# -------------------------------------------------------------------------------
- # 1. first run will move comp_tasks to PENDING so the worker can take them
- expected_pending_tasks = await _assert_schedule_pipeline_PENDING(
- aiopg_engine,
+ # 1. first run will move comp_tasks to PENDING so the dask-worker can take them
+ expected_pending_tasks, _ = await _assert_publish_in_dask_backend(
+ sqlalchemy_async_engine,
published_project,
expected_published_tasks,
mocked_dask_client,
- scheduler,
+ scheduler_api,
)
# -------------------------------------------------------------------------------
# 2. the "worker" starts processing a task
@@ -1463,13 +1657,17 @@ async def _return_1st_task_running(job_ids: list[str]) -> list[DaskClientTaskSta
assert exp_started_task.job_id
assert published_project.project.prj_owner
await _trigger_progress_event(
- scheduler,
+ scheduler_api,
job_id=exp_started_task.job_id,
user_id=published_project.project.prj_owner,
project_id=exp_started_task.project_id,
node_id=exp_started_task.node_id,
)
- await schedule_all_pipelines(scheduler)
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
messages = await _assert_message_received(
resource_tracking_rabbit_client_parser,
@@ -1481,8 +1679,16 @@ async def _return_1st_task_running(job_ids: list[str]) -> list[DaskClientTaskSta
# -------------------------------------------------------------------------------
# 3. wait a bit and run again we should get another heartbeat, but only one!
await asyncio.sleep(with_fast_service_heartbeat_s + 1)
- await schedule_all_pipelines(scheduler)
- await schedule_all_pipelines(scheduler)
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
messages = await _assert_message_received(
resource_tracking_rabbit_client_parser,
1,
@@ -1493,8 +1699,16 @@ async def _return_1st_task_running(job_ids: list[str]) -> list[DaskClientTaskSta
# -------------------------------------------------------------------------------
# 4. wait a bit and run again we should get another heartbeat, but only one!
await asyncio.sleep(with_fast_service_heartbeat_s + 1)
- await schedule_all_pipelines(scheduler)
- await schedule_all_pipelines(scheduler)
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
messages = await _assert_message_received(
resource_tracking_rabbit_client_parser,
1,
@@ -1506,15 +1720,17 @@ async def _return_1st_task_running(job_ids: list[str]) -> list[DaskClientTaskSta
@pytest.fixture
async def mocked_get_or_create_cluster(mocker: MockerFixture) -> mock.Mock:
return mocker.patch(
- "simcore_service_director_v2.modules.comp_scheduler._dask_scheduler.get_or_create_on_demand_cluster",
+ "simcore_service_director_v2.modules.comp_scheduler._scheduler_dask.get_or_create_on_demand_cluster",
autospec=True,
)
async def test_pipeline_with_on_demand_cluster_with_not_ready_backend_waits(
- with_disabled_auto_scheduling: None,
- scheduler: BaseCompScheduler,
- aiopg_engine: aiopg.sa.engine.Engine,
+ with_disabled_auto_scheduling: mock.Mock,
+ with_disabled_scheduler_publisher: mock.Mock,
+ initialized_app: FastAPI,
+ scheduler_api: BaseCompScheduler,
+ sqlalchemy_async_engine: AsyncEngine,
published_project: PublishedProject,
run_metadata: RunMetadataDict,
mocked_get_or_create_cluster: mock.Mock,
@@ -1527,7 +1743,8 @@ async def test_pipeline_with_on_demand_cluster_with_not_ready_backend_waits(
)
# running the pipeline will trigger a call to the clusters-keeper
assert published_project.project.prj_owner
- await scheduler.run_new_pipeline(
+ await run_new_pipeline(
+ initialized_app,
user_id=published_project.project.prj_owner,
project_id=published_project.project.uuid,
cluster_id=DEFAULT_CLUSTER_ID,
@@ -1536,11 +1753,21 @@ async def test_pipeline_with_on_demand_cluster_with_not_ready_backend_waits(
)
# we ask to use an on-demand cluster, therefore the tasks are published first
- await _assert_comp_run_db(aiopg_engine, published_project, RunningState.PUBLISHED)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [t.node_id for t in published_project.tasks],
+ run_in_db = (
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.PUBLISHED,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
+ )
+ )[0]
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[t.node_id for t in published_project.tasks],
expected_state=RunningState.PUBLISHED,
expected_progress=None,
)
@@ -1550,32 +1777,52 @@ async def test_pipeline_with_on_demand_cluster_with_not_ready_backend_waits(
published_project.tasks[1],
published_project.tasks[3],
]
- await schedule_all_pipelines(scheduler)
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
mocked_get_or_create_cluster.assert_called()
assert mocked_get_or_create_cluster.call_count == 1
mocked_get_or_create_cluster.reset_mock()
- await _assert_comp_run_db(
- aiopg_engine, published_project, RunningState.WAITING_FOR_CLUSTER
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.WAITING_FOR_CLUSTER,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [t.node_id for t in expected_waiting_tasks],
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[t.node_id for t in expected_waiting_tasks],
expected_state=RunningState.WAITING_FOR_CLUSTER,
expected_progress=None,
)
# again will trigger the same response
- await schedule_all_pipelines(scheduler)
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
mocked_get_or_create_cluster.assert_called()
assert mocked_get_or_create_cluster.call_count == 1
mocked_get_or_create_cluster.reset_mock()
- await _assert_comp_run_db(
- aiopg_engine, published_project, RunningState.WAITING_FOR_CLUSTER
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.WAITING_FOR_CLUSTER,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [t.node_id for t in expected_waiting_tasks],
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[t.node_id for t in expected_waiting_tasks],
expected_state=RunningState.WAITING_FOR_CLUSTER,
expected_progress=None,
)
@@ -1586,18 +1833,23 @@ async def test_pipeline_with_on_demand_cluster_with_not_ready_backend_waits(
[ClustersKeeperNotAvailableError],
)
async def test_pipeline_with_on_demand_cluster_with_no_clusters_keeper_fails(
- with_disabled_auto_scheduling: None,
- scheduler: BaseCompScheduler,
- aiopg_engine: aiopg.sa.engine.Engine,
+ with_disabled_auto_scheduling: mock.Mock,
+ with_disabled_scheduler_publisher: mock.Mock,
+ initialized_app: FastAPI,
+ scheduler_api: BaseCompScheduler,
+ sqlalchemy_async_engine: AsyncEngine,
published_project: PublishedProject,
run_metadata: RunMetadataDict,
mocked_get_or_create_cluster: mock.Mock,
get_or_create_exception: Exception,
):
+ # needs to change: https://github.com/ITISFoundation/osparc-simcore/issues/6817
+
mocked_get_or_create_cluster.side_effect = get_or_create_exception
# running the pipeline will trigger a call to the clusters-keeper
assert published_project.project.prj_owner
- await scheduler.run_new_pipeline(
+ await run_new_pipeline(
+ initialized_app,
user_id=published_project.project.prj_owner,
project_id=published_project.project.uuid,
cluster_id=DEFAULT_CLUSTER_ID,
@@ -1606,11 +1858,21 @@ async def test_pipeline_with_on_demand_cluster_with_no_clusters_keeper_fails(
)
# we ask to use an on-demand cluster, therefore the tasks are published first
- await _assert_comp_run_db(aiopg_engine, published_project, RunningState.PUBLISHED)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [t.node_id for t in published_project.tasks],
+ run_in_db = (
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.PUBLISHED,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
+ )
+ )[0]
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[t.node_id for t in published_project.tasks],
expected_state=RunningState.PUBLISHED,
expected_progress=None,
)
@@ -1619,26 +1881,50 @@ async def test_pipeline_with_on_demand_cluster_with_no_clusters_keeper_fails(
published_project.tasks[1],
published_project.tasks[3],
]
- await schedule_all_pipelines(scheduler)
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
mocked_get_or_create_cluster.assert_called()
assert mocked_get_or_create_cluster.call_count == 1
mocked_get_or_create_cluster.reset_mock()
- await _assert_comp_run_db(aiopg_engine, published_project, RunningState.FAILED)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [t.node_id for t in expected_failed_tasks],
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.FAILED,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
+ )
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[t.node_id for t in expected_failed_tasks],
expected_state=RunningState.FAILED,
expected_progress=1.0,
)
# again will not re-trigger the call to clusters-keeper
- await schedule_all_pipelines(scheduler)
+ await scheduler_api.apply(
+ user_id=run_in_db.user_id,
+ project_id=run_in_db.project_uuid,
+ iteration=run_in_db.iteration,
+ )
mocked_get_or_create_cluster.assert_not_called()
- await _assert_comp_run_db(aiopg_engine, published_project, RunningState.FAILED)
- await _assert_comp_tasks_db(
- aiopg_engine,
- published_project.project.uuid,
- [t.node_id for t in expected_failed_tasks],
+ await assert_comp_runs(
+ sqlalchemy_async_engine,
+ expected_total=1,
+ expected_state=RunningState.FAILED,
+ where_statement=and_(
+ comp_runs.c.user_id == published_project.project.prj_owner,
+ comp_runs.c.project_uuid == f"{published_project.project.uuid}",
+ ),
+ )
+ await assert_comp_tasks(
+ sqlalchemy_async_engine,
+ project_uuid=published_project.project.uuid,
+ task_ids=[t.node_id for t in expected_failed_tasks],
expected_state=RunningState.FAILED,
expected_progress=1.0,
)
diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_worker.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_worker.py
new file mode 100644
index 00000000000..9eb301e0910
--- /dev/null
+++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_worker.py
@@ -0,0 +1,135 @@
+# pylint:disable=unused-variable
+# pylint:disable=unused-argument
+# pylint:disable=redefined-outer-name
+# pylint:disable=no-value-for-parameter
+# pylint:disable=protected-access
+# pylint:disable=too-many-arguments
+# pylint:disable=no-name-in-module
+# pylint: disable=too-many-statements
+
+import asyncio
+from collections.abc import Awaitable, Callable
+from unittest import mock
+
+import pytest
+from _helpers import PublishedProject
+from fastapi import FastAPI
+from models_library.clusters import DEFAULT_CLUSTER_ID
+from pytest_mock import MockerFixture
+from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict
+from pytest_simcore.helpers.typing_env import EnvVarsDict
+from simcore_service_director_v2.models.comp_runs import RunMetadataDict
+from simcore_service_director_v2.modules.comp_scheduler._manager import run_new_pipeline
+from simcore_service_director_v2.modules.comp_scheduler._models import (
+ SchedulePipelineRabbitMessage,
+)
+from simcore_service_director_v2.modules.comp_scheduler._worker import (
+ _get_scheduler_worker,
+)
+
+pytest_simcore_core_services_selection = ["postgres", "rabbit", "redis"]
+pytest_simcore_ops_services_selection = ["adminer"]
+
+
+async def test_worker_starts_and_stops(initialized_app: FastAPI):
+ assert _get_scheduler_worker(initialized_app) is not None
+
+
+@pytest.fixture
+def mock_schedule_pipeline(mocker: MockerFixture) -> mock.Mock:
+ mock_scheduler_worker = mock.Mock()
+ mock_scheduler_worker.schedule_pipeline = mocker.AsyncMock(return_value=True)
+ return mock_scheduler_worker
+
+
+@pytest.fixture
+def mocked_get_scheduler_worker(
+ mocker: MockerFixture,
+ mock_schedule_pipeline: mock.Mock,
+) -> mock.Mock:
+ # Mock `_get_scheduler_worker` to return our mock scheduler
+ return mocker.patch(
+ "simcore_service_director_v2.modules.comp_scheduler._worker._get_scheduler_worker",
+ return_value=mock_schedule_pipeline,
+ )
+
+
+async def test_worker_properly_autocalls_scheduler_api(
+ with_disabled_auto_scheduling: mock.Mock,
+ initialized_app: FastAPI,
+ mocked_get_scheduler_worker: mock.Mock,
+ published_project: PublishedProject,
+ run_metadata: RunMetadataDict,
+):
+ assert published_project.project.prj_owner
+ await run_new_pipeline(
+ initialized_app,
+ user_id=published_project.project.prj_owner,
+ project_id=published_project.project.uuid,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ run_metadata=run_metadata,
+ use_on_demand_clusters=False,
+ )
+ mocked_get_scheduler_worker.assert_called_once_with(initialized_app)
+ mocked_get_scheduler_worker.return_value.apply.assert_called_once_with(
+ user_id=published_project.project.prj_owner,
+ project_id=published_project.project.uuid,
+ iteration=1,
+ )
+
+
+@pytest.fixture
+async def mocked_scheduler_api(mocker: MockerFixture) -> mock.Mock:
+ return mocker.patch(
+ "simcore_service_director_v2.modules.comp_scheduler._scheduler_base.BaseCompScheduler.apply"
+ )
+
+
+@pytest.fixture
+def with_scheduling_concurrency(
+ mock_env: EnvVarsDict, monkeypatch: pytest.MonkeyPatch, scheduling_concurrency: int
+) -> EnvVarsDict:
+ return mock_env | setenvs_from_dict(
+ monkeypatch,
+ {"COMPUTATIONAL_BACKEND_SCHEDULING_CONCURRENCY": f"{scheduling_concurrency}"},
+ )
+
+
+@pytest.mark.parametrize("scheduling_concurrency", [1, 50, 100])
+@pytest.mark.parametrize(
+ "queue_name", [SchedulePipelineRabbitMessage.get_channel_name()]
+)
+async def test_worker_scheduling_parallelism(
+ scheduling_concurrency: int,
+ with_scheduling_concurrency: EnvVarsDict,
+ with_disabled_auto_scheduling: mock.Mock,
+ mocked_scheduler_api: mock.Mock,
+ initialized_app: FastAPI,
+ publish_project: Callable[[], Awaitable[PublishedProject]],
+ run_metadata: RunMetadataDict,
+ ensure_parametrized_queue_is_empty: None,
+):
+ with_disabled_auto_scheduling.assert_called_once()
+
+ async def _side_effect(*args, **kwargs):
+ await asyncio.sleep(10)
+
+ mocked_scheduler_api.side_effect = _side_effect
+
+ async def _project_pipeline_creation_workflow() -> None:
+ published_project = await publish_project()
+ assert published_project.project.prj_owner
+ await run_new_pipeline(
+ initialized_app,
+ user_id=published_project.project.prj_owner,
+ project_id=published_project.project.uuid,
+ cluster_id=DEFAULT_CLUSTER_ID,
+ run_metadata=run_metadata,
+ use_on_demand_clusters=False,
+ )
+
+ await asyncio.gather(
+ *(_project_pipeline_creation_workflow() for _ in range(scheduling_concurrency))
+ )
+ mocked_scheduler_api.assert_called()
+ assert mocked_scheduler_api.call_count == scheduling_concurrency
diff --git a/services/director-v2/tests/unit/with_dbs/conftest.py b/services/director-v2/tests/unit/with_dbs/conftest.py
index fdb3b7d5a64..ee8259f9f5b 100644
--- a/services/director-v2/tests/unit/with_dbs/conftest.py
+++ b/services/director-v2/tests/unit/with_dbs/conftest.py
@@ -6,7 +6,7 @@
import datetime
-from collections.abc import Awaitable, Callable, Iterator
+from collections.abc import AsyncIterator, Awaitable, Callable
from typing import Any, cast
from uuid import uuid4
@@ -36,23 +36,24 @@
from simcore_service_director_v2.utils.dask import generate_dask_job_id
from simcore_service_director_v2.utils.db import to_clusters_db
from sqlalchemy.dialects.postgresql import insert as pg_insert
+from sqlalchemy.ext.asyncio import AsyncEngine
@pytest.fixture
-def pipeline(
- postgres_db: sa.engine.Engine,
-) -> Iterator[Callable[..., CompPipelineAtDB]]:
+async def create_pipeline(
+ sqlalchemy_async_engine: AsyncEngine,
+) -> AsyncIterator[Callable[..., Awaitable[CompPipelineAtDB]]]:
created_pipeline_ids: list[str] = []
- def creator(**pipeline_kwargs) -> CompPipelineAtDB:
+ async def _(**pipeline_kwargs) -> CompPipelineAtDB:
pipeline_config = {
"project_id": f"{uuid4()}",
"dag_adjacency_list": {},
"state": StateType.NOT_STARTED,
}
pipeline_config.update(**pipeline_kwargs)
- with postgres_db.begin() as conn:
- result = conn.execute(
+ async with sqlalchemy_async_engine.begin() as conn:
+ result = await conn.execute(
comp_pipeline.insert()
.values(**pipeline_config)
.returning(sa.literal_column("*"))
@@ -63,11 +64,11 @@ def creator(**pipeline_kwargs) -> CompPipelineAtDB:
created_pipeline_ids.append(f"{new_pipeline.project_id}")
return new_pipeline
- yield creator
+ yield _
# cleanup
- with postgres_db.connect() as conn:
- conn.execute(
+ async with sqlalchemy_async_engine.begin() as conn:
+ await conn.execute(
comp_pipeline.delete().where(
comp_pipeline.c.project_id.in_(created_pipeline_ids)
)
@@ -75,12 +76,12 @@ def creator(**pipeline_kwargs) -> CompPipelineAtDB:
@pytest.fixture
-def tasks(
- postgres_db: sa.engine.Engine,
-) -> Iterator[Callable[..., list[CompTaskAtDB]]]:
+async def create_tasks(
+ sqlalchemy_async_engine: AsyncEngine,
+) -> AsyncIterator[Callable[..., Awaitable[list[CompTaskAtDB]]]]:
created_task_ids: list[int] = []
- def creator(
+ async def _(
user: dict[str, Any], project: ProjectAtDB, **overrides_kwargs
) -> list[CompTaskAtDB]:
created_tasks: list[CompTaskAtDB] = []
@@ -122,7 +123,7 @@ def creator(
),
"node_class": to_node_class(node_data.key),
"internal_id": internal_id + 1,
- "submit": datetime.datetime.now(tz=datetime.UTC),
+ "submit": datetime.datetime.now(datetime.UTC),
"job_id": generate_dask_job_id(
service_key=node_data.key,
service_version=node_data.version,
@@ -132,8 +133,8 @@ def creator(
),
}
task_config.update(**overrides_kwargs)
- with postgres_db.connect() as conn:
- result = conn.execute(
+ async with sqlalchemy_async_engine.begin() as conn:
+ result = await conn.execute(
comp_tasks.insert()
.values(**task_config)
.returning(sa.literal_column("*"))
@@ -143,11 +144,11 @@ def creator(
created_task_ids.extend([t.task_id for t in created_tasks if t.task_id])
return created_tasks
- yield creator
+ yield _
# cleanup
- with postgres_db.connect() as conn:
- conn.execute(
+ async with sqlalchemy_async_engine.begin() as conn:
+ await conn.execute(
comp_tasks.delete().where(comp_tasks.c.task_id.in_(created_task_ids))
)
@@ -186,64 +187,74 @@ def run_metadata(
@pytest.fixture
-def runs(
- postgres_db: sa.engine.Engine, run_metadata: RunMetadataDict
-) -> Iterator[Callable[..., CompRunsAtDB]]:
+async def create_comp_run(
+ sqlalchemy_async_engine: AsyncEngine, run_metadata: RunMetadataDict
+) -> AsyncIterator[Callable[..., Awaitable[CompRunsAtDB]]]:
created_run_ids: list[int] = []
- def creator(
+ async def _(
user: dict[str, Any], project: ProjectAtDB, **run_kwargs
) -> CompRunsAtDB:
run_config = {
"project_uuid": f"{project.uuid}",
- "user_id": f"{user['id']}",
+ "user_id": user["id"],
"iteration": 1,
"result": StateType.NOT_STARTED,
- "metadata": run_metadata,
+ "metadata": jsonable_encoder(run_metadata),
"use_on_demand_clusters": False,
}
run_config.update(**run_kwargs)
- with postgres_db.connect() as conn:
- result = conn.execute(
+ async with sqlalchemy_async_engine.begin() as conn:
+ result = await conn.execute(
comp_runs.insert()
- .values(**jsonable_encoder(run_config))
+ .values(**run_config)
.returning(sa.literal_column("*"))
)
new_run = CompRunsAtDB.model_validate(result.first())
created_run_ids.append(new_run.run_id)
return new_run
- yield creator
+ yield _
# cleanup
- with postgres_db.connect() as conn:
- conn.execute(comp_runs.delete().where(comp_runs.c.run_id.in_(created_run_ids)))
+ async with sqlalchemy_async_engine.begin() as conn:
+ await conn.execute(
+ comp_runs.delete().where(comp_runs.c.run_id.in_(created_run_ids))
+ )
@pytest.fixture
-def cluster(
- postgres_db: sa.engine.Engine,
-) -> Iterator[Callable[..., Cluster]]:
+async def create_cluster(
+ sqlalchemy_async_engine: AsyncEngine,
+) -> AsyncIterator[Callable[..., Awaitable[Cluster]]]:
created_cluster_ids: list[str] = []
- def creator(user: dict[str, Any], **cluster_kwargs) -> Cluster:
+ async def _(user: dict[str, Any], **cluster_kwargs) -> Cluster:
+ assert "json_schema_extra" in Cluster.model_config
+ assert isinstance(Cluster.model_config["json_schema_extra"], dict)
+ assert isinstance(Cluster.model_config["json_schema_extra"]["examples"], list)
+ assert isinstance(
+ Cluster.model_config["json_schema_extra"]["examples"][1], dict
+ )
cluster_config = Cluster.model_config["json_schema_extra"]["examples"][1]
cluster_config["owner"] = user["primary_gid"]
cluster_config.update(**cluster_kwargs)
new_cluster = Cluster.model_validate(cluster_config)
assert new_cluster
- with postgres_db.connect() as conn:
+ async with sqlalchemy_async_engine.begin() as conn:
# insert basic cluster
- created_cluster = conn.execute(
- sa.insert(clusters)
- .values(to_clusters_db(new_cluster, only_update=False))
- .returning(sa.literal_column("*"))
+ created_cluster = (
+ await conn.execute(
+ sa.insert(clusters)
+ .values(to_clusters_db(new_cluster, only_update=False))
+ .returning(sa.literal_column("*"))
+ )
).one()
created_cluster_ids.append(created_cluster.id)
if "access_rights" in cluster_kwargs:
for gid, rights in cluster_kwargs["access_rights"].items():
- conn.execute(
+ await conn.execute(
pg_insert(cluster_to_groups)
.values(
cluster_id=created_cluster.id,
@@ -256,7 +267,7 @@ def creator(user: dict[str, Any], **cluster_kwargs) -> Cluster:
)
)
access_rights_in_db = {}
- for row in conn.execute(
+ for row in await conn.execute(
sa.select(
cluster_to_groups.c.gid,
cluster_to_groups.c.read,
@@ -284,44 +295,57 @@ def creator(user: dict[str, Any], **cluster_kwargs) -> Cluster:
thumbnail=None,
)
- yield creator
+ yield _
# cleanup
- with postgres_db.connect() as conn:
- conn.execute(
- # pylint: disable=no-value-for-parameter
+ async with sqlalchemy_async_engine.begin() as conn:
+ await conn.execute(
clusters.delete().where(clusters.c.id.in_(created_cluster_ids))
)
@pytest.fixture
-async def published_project(
+async def publish_project(
registered_user: Callable[..., dict[str, Any]],
project: Callable[..., Awaitable[ProjectAtDB]],
- pipeline: Callable[..., CompPipelineAtDB],
- tasks: Callable[..., list[CompTaskAtDB]],
+ create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]],
+ create_tasks: Callable[..., Awaitable[list[CompTaskAtDB]]],
fake_workbench_without_outputs: dict[str, Any],
fake_workbench_adjacency: dict[str, Any],
-) -> PublishedProject:
+) -> Callable[[], Awaitable[PublishedProject]]:
user = registered_user()
- created_project = await project(user, workbench=fake_workbench_without_outputs)
- return PublishedProject(
- project=created_project,
- pipeline=pipeline(
- project_id=f"{created_project.uuid}",
- dag_adjacency_list=fake_workbench_adjacency,
- ),
- tasks=tasks(user=user, project=created_project, state=StateType.PUBLISHED),
- )
+
+ async def _() -> PublishedProject:
+ created_project = await project(user, workbench=fake_workbench_without_outputs)
+ return PublishedProject(
+ user=user,
+ project=created_project,
+ pipeline=await create_pipeline(
+ project_id=f"{created_project.uuid}",
+ dag_adjacency_list=fake_workbench_adjacency,
+ ),
+ tasks=await create_tasks(
+ user=user, project=created_project, state=StateType.PUBLISHED
+ ),
+ )
+
+ return _
+
+
+@pytest.fixture
+async def published_project(
+ publish_project: Callable[[], Awaitable[PublishedProject]]
+) -> PublishedProject:
+ return await publish_project()
@pytest.fixture
async def running_project(
registered_user: Callable[..., dict[str, Any]],
project: Callable[..., Awaitable[ProjectAtDB]],
- pipeline: Callable[..., CompPipelineAtDB],
- tasks: Callable[..., list[CompTaskAtDB]],
- runs: Callable[..., CompRunsAtDB],
+ create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]],
+ create_tasks: Callable[..., Awaitable[list[CompTaskAtDB]]],
+ create_comp_run: Callable[..., Awaitable[CompRunsAtDB]],
fake_workbench_without_outputs: dict[str, Any],
fake_workbench_adjacency: dict[str, Any],
) -> RunningProject:
@@ -329,24 +353,26 @@ async def running_project(
created_project = await project(user, workbench=fake_workbench_without_outputs)
now_time = arrow.utcnow().datetime
return RunningProject(
+ user=user,
project=created_project,
- pipeline=pipeline(
+ pipeline=await create_pipeline(
project_id=f"{created_project.uuid}",
dag_adjacency_list=fake_workbench_adjacency,
),
- tasks=tasks(
+ tasks=await create_tasks(
user=user,
project=created_project,
state=StateType.RUNNING,
progress=0.0,
start=now_time,
),
- runs=runs(
+ runs=await create_comp_run(
user=user,
project=created_project,
started=now_time,
result=StateType.RUNNING,
),
+ task_to_callback_mapping={},
)
@@ -354,9 +380,9 @@ async def running_project(
async def running_project_mark_for_cancellation(
registered_user: Callable[..., dict[str, Any]],
project: Callable[..., Awaitable[ProjectAtDB]],
- pipeline: Callable[..., CompPipelineAtDB],
- tasks: Callable[..., list[CompTaskAtDB]],
- runs: Callable[..., CompRunsAtDB],
+ create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]],
+ create_tasks: Callable[..., Awaitable[list[CompTaskAtDB]]],
+ create_comp_run: Callable[..., Awaitable[CompRunsAtDB]],
fake_workbench_without_outputs: dict[str, Any],
fake_workbench_adjacency: dict[str, Any],
) -> RunningProject:
@@ -364,25 +390,27 @@ async def running_project_mark_for_cancellation(
created_project = await project(user, workbench=fake_workbench_without_outputs)
now_time = arrow.utcnow().datetime
return RunningProject(
+ user=user,
project=created_project,
- pipeline=pipeline(
+ pipeline=await create_pipeline(
project_id=f"{created_project.uuid}",
dag_adjacency_list=fake_workbench_adjacency,
),
- tasks=tasks(
+ tasks=await create_tasks(
user=user,
project=created_project,
state=StateType.RUNNING,
progress=0.0,
start=now_time,
),
- runs=runs(
+ runs=await create_comp_run(
user=user,
project=created_project,
result=StateType.RUNNING,
started=now_time,
cancelled=now_time + datetime.timedelta(seconds=5),
),
+ task_to_callback_mapping={},
)
diff --git a/services/director-v2/tests/unit/with_dbs/test_api_route_clusters.py b/services/director-v2/tests/unit/with_dbs/test_api_route_clusters.py
index 19ab0ea2df3..9f55e71f935 100644
--- a/services/director-v2/tests/unit/with_dbs/test_api_route_clusters.py
+++ b/services/director-v2/tests/unit/with_dbs/test_api_route_clusters.py
@@ -4,7 +4,7 @@
import random
from collections.abc import Callable, Iterator
-from typing import Any
+from typing import Any, Awaitable
import httpx
import pytest
@@ -85,7 +85,7 @@ def clusters_cleaner(postgres_db: sa.engine.Engine) -> Iterator:
async def test_list_clusters(
clusters_config: None,
registered_user: Callable[..., dict],
- cluster: Callable[..., Cluster],
+ create_cluster: Callable[..., Awaitable[Cluster]],
async_client: httpx.AsyncClient,
):
user_1 = registered_user()
@@ -106,7 +106,7 @@ async def test_list_clusters(
# let's create some clusters
NUM_CLUSTERS = 111
for n in range(NUM_CLUSTERS):
- cluster(user_1, name=f"pytest cluster{n:04}")
+ await create_cluster(user_1, name=f"pytest cluster{n:04}")
response = await async_client.get(list_clusters_url)
assert response.status_code == status.HTTP_200_OK
@@ -141,7 +141,7 @@ async def test_list_clusters(
(CLUSTER_MANAGER_RIGHTS, "manager rights"),
(CLUSTER_ADMIN_RIGHTS, "admin rights"),
]:
- cluster(
+ await create_cluster(
user_1, # cluster is owned by user_1
name=f"cluster with {name}",
access_rights={
@@ -172,7 +172,7 @@ async def test_list_clusters(
async def test_get_cluster(
clusters_config: None,
registered_user: Callable[..., dict],
- cluster: Callable[..., Cluster],
+ create_cluster: Callable[..., Awaitable[Cluster]],
async_client: httpx.AsyncClient,
):
user_1 = registered_user()
@@ -183,7 +183,7 @@ async def test_get_cluster(
assert response.status_code == status.HTTP_404_NOT_FOUND
# let's create some clusters
a_bunch_of_clusters = [
- cluster(user_1, name=f"pytest cluster{n:04}") for n in range(111)
+ await create_cluster(user_1, name=f"pytest cluster{n:04}") for n in range(111)
]
the_cluster = random.choice(a_bunch_of_clusters)
@@ -213,7 +213,7 @@ async def test_get_cluster(
(CLUSTER_MANAGER_RIGHTS, True),
(CLUSTER_ADMIN_RIGHTS, True),
]:
- a_cluster = cluster(
+ a_cluster = await create_cluster(
user_2, # cluster is owned by user_2
access_rights={
user_2["primary_gid"]: CLUSTER_ADMIN_RIGHTS,
@@ -243,7 +243,7 @@ async def test_get_cluster(
async def test_get_another_cluster(
clusters_config: None,
registered_user: Callable[..., dict],
- cluster: Callable[..., Cluster],
+ create_cluster: Callable[..., Awaitable[Cluster]],
async_client: httpx.AsyncClient,
cluster_sharing_rights: ClusterAccessRights,
can_use: bool,
@@ -252,7 +252,7 @@ async def test_get_another_cluster(
user_2 = registered_user()
# let's create some clusters
a_bunch_of_clusters = [
- cluster(
+ await create_cluster(
user_1,
name=f"pytest cluster{n:04}",
access_rights={
@@ -349,7 +349,7 @@ async def test_create_cluster(
async def test_update_own_cluster(
clusters_config: None,
registered_user: Callable[..., dict],
- cluster: Callable[..., Cluster],
+ create_cluster: Callable[..., Awaitable[Cluster]],
cluster_simple_authentication: Callable,
async_client: httpx.AsyncClient,
faker: Faker,
@@ -366,7 +366,7 @@ async def test_update_own_cluster(
assert response.status_code == status.HTTP_404_NOT_FOUND
# let's create some clusters
a_bunch_of_clusters = [
- cluster(user_1, name=f"pytest cluster{n:04}") for n in range(111)
+ await create_cluster(user_1, name=f"pytest cluster{n:04}") for n in range(111)
]
the_cluster = random.choice(a_bunch_of_clusters)
# get the original one
@@ -471,7 +471,7 @@ async def test_update_own_cluster(
async def test_update_default_cluster_fails(
clusters_config: None,
registered_user: Callable[..., dict],
- cluster: Callable[..., Cluster],
+ create_cluster: Callable[..., Awaitable[Cluster]],
cluster_simple_authentication: Callable,
async_client: httpx.AsyncClient,
faker: Faker,
@@ -506,7 +506,7 @@ async def test_update_default_cluster_fails(
async def test_update_another_cluster(
clusters_config: None,
registered_user: Callable[..., dict],
- cluster: Callable[..., Cluster],
+ create_cluster: Callable[..., Awaitable[Cluster]],
cluster_simple_authentication: Callable,
async_client: httpx.AsyncClient,
faker: Faker,
@@ -522,7 +522,7 @@ async def test_update_another_cluster(
user_2 = registered_user()
# let's create some clusters
a_bunch_of_clusters = [
- cluster(
+ await create_cluster(
user_1,
name=f"pytest cluster{n:04}",
access_rights={
@@ -603,13 +603,13 @@ async def test_update_another_cluster(
async def test_delete_cluster(
clusters_config: None,
registered_user: Callable[..., dict],
- cluster: Callable[..., Cluster],
+ create_cluster: Callable[..., Awaitable[Cluster]],
async_client: httpx.AsyncClient,
):
user_1 = registered_user()
# let's create some clusters
a_bunch_of_clusters = [
- cluster(
+ await create_cluster(
user_1,
name=f"pytest cluster{n:04}",
access_rights={
@@ -647,7 +647,7 @@ async def test_delete_cluster(
async def test_delete_another_cluster(
clusters_config: None,
registered_user: Callable[..., dict],
- cluster: Callable[..., Cluster],
+ create_cluster: Callable[..., Awaitable[Cluster]],
cluster_simple_authentication: Callable,
async_client: httpx.AsyncClient,
faker: Faker,
@@ -658,7 +658,7 @@ async def test_delete_another_cluster(
user_2 = registered_user()
# let's create some clusters
a_bunch_of_clusters = [
- cluster(
+ await create_cluster(
user_1,
name=f"pytest cluster{n:04}",
access_rights={
@@ -754,7 +754,7 @@ async def test_ping_cluster(
async def test_ping_specific_cluster(
clusters_config: None,
registered_user: Callable[..., dict],
- cluster: Callable[..., Cluster],
+ create_cluster: Callable[..., Awaitable[Cluster]],
async_client: httpx.AsyncClient,
local_dask_gateway_server: DaskGatewayServer,
):
@@ -767,7 +767,7 @@ async def test_ping_specific_cluster(
# let's create some clusters and ping one
a_bunch_of_clusters = [
- cluster(
+ await create_cluster(
user_1,
name=f"pytest cluster{n:04}",
endpoint=local_dask_gateway_server.address,
diff --git a/services/director-v2/tests/unit/with_dbs/test_api_route_clusters_details.py b/services/director-v2/tests/unit/with_dbs/test_api_route_clusters_details.py
index 5dd1abaa594..357f3b7647a 100644
--- a/services/director-v2/tests/unit/with_dbs/test_api_route_clusters_details.py
+++ b/services/director-v2/tests/unit/with_dbs/test_api_route_clusters_details.py
@@ -4,7 +4,7 @@
import json
from collections.abc import Callable
-from typing import Any
+from typing import Any, Awaitable
import httpx
import pytest
@@ -142,14 +142,14 @@ async def test_get_cluster_details(
registered_user: Callable[..., dict[str, Any]],
async_client: httpx.AsyncClient,
local_dask_gateway_server: DaskGatewayServer,
- cluster: Callable[..., Cluster],
+ create_cluster: Callable[..., Awaitable[Cluster]],
dask_gateway_cluster: GatewayCluster,
dask_gateway_cluster_client: DaskClient,
gateway_username: str,
):
user_1 = registered_user()
# define the cluster in the DB
- some_cluster = cluster(
+ some_cluster = await create_cluster(
user_1,
endpoint=local_dask_gateway_server.address,
authentication=SimpleAuthentication(
diff --git a/services/director-v2/tests/unit/with_dbs/test_api_route_computations.py b/services/director-v2/tests/unit/with_dbs/test_api_route_computations.py
index add9c4d77d3..2e75b18c009 100644
--- a/services/director-v2/tests/unit/with_dbs/test_api_route_computations.py
+++ b/services/director-v2/tests/unit/with_dbs/test_api_route_computations.py
@@ -87,7 +87,6 @@ def minimal_configuration(
rabbit_service: RabbitSettings,
redis_service: RedisSettings,
monkeypatch: pytest.MonkeyPatch,
- mocked_rabbit_mq_client: None,
faker: Faker,
):
monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SIDECAR_ENABLED", "false")
@@ -789,12 +788,12 @@ async def test_start_computation_with_deprecated_services_raises_406(
@pytest.fixture
-def unusable_cluster(
+async def unusable_cluster(
registered_user: Callable[..., dict[str, Any]],
- cluster: Callable[..., Cluster],
+ create_cluster: Callable[..., Awaitable[Cluster]],
) -> ClusterID:
user = registered_user()
- created_cluster = cluster(user)
+ created_cluster = await create_cluster(user)
return created_cluster.id
@@ -865,7 +864,7 @@ async def test_get_computation_from_empty_project(
fake_workbench_adjacency: dict[str, Any],
registered_user: Callable[..., dict[str, Any]],
project: Callable[..., Awaitable[ProjectAtDB]],
- pipeline: Callable[..., CompPipelineAtDB],
+ create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]],
faker: Faker,
async_client: httpx.AsyncClient,
):
@@ -884,8 +883,8 @@ async def test_get_computation_from_empty_project(
response = await async_client.get(get_computation_url)
assert response.status_code == status.HTTP_404_NOT_FOUND, response.text
# create an empty pipeline
- pipeline(
- project_id=proj.uuid,
+ await create_pipeline(
+ project_id=f"{proj.uuid}",
)
response = await async_client.get(get_computation_url)
assert response.status_code == status.HTTP_200_OK, response.text
@@ -917,8 +916,8 @@ async def test_get_computation_from_not_started_computation_task(
fake_workbench_adjacency: dict[str, Any],
registered_user: Callable[..., dict[str, Any]],
project: Callable[..., Awaitable[ProjectAtDB]],
- pipeline: Callable[..., CompPipelineAtDB],
- tasks: Callable[..., list[CompTaskAtDB]],
+ create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]],
+ create_tasks: Callable[..., Awaitable[list[CompTaskAtDB]]],
async_client: httpx.AsyncClient,
):
user = registered_user()
@@ -926,8 +925,8 @@ async def test_get_computation_from_not_started_computation_task(
get_computation_url = httpx.URL(
f"/v2/computations/{proj.uuid}?user_id={user['id']}"
)
- pipeline(
- project_id=proj.uuid,
+ await create_pipeline(
+ project_id=f"{proj.uuid}",
dag_adjacency_list=fake_workbench_adjacency,
)
# create no task this should trigger an exception
@@ -935,7 +934,7 @@ async def test_get_computation_from_not_started_computation_task(
assert response.status_code == status.HTTP_409_CONFLICT, response.text
# now create the expected tasks and the state is good again
- comp_tasks = tasks(user=user, project=proj)
+ comp_tasks = await create_tasks(user=user, project=proj)
response = await async_client.get(get_computation_url)
assert response.status_code == status.HTTP_200_OK, response.text
returned_computation = ComputationGet.model_validate(response.json())
@@ -989,19 +988,23 @@ async def test_get_computation_from_published_computation_task(
fake_workbench_adjacency: dict[str, Any],
registered_user: Callable[..., dict[str, Any]],
project: Callable[..., Awaitable[ProjectAtDB]],
- pipeline: Callable[..., CompPipelineAtDB],
- tasks: Callable[..., list[CompTaskAtDB]],
- runs: Callable[..., CompRunsAtDB],
+ create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]],
+ create_tasks: Callable[..., Awaitable[list[CompTaskAtDB]]],
+ create_comp_run: Callable[..., Awaitable[CompRunsAtDB]],
async_client: httpx.AsyncClient,
):
user = registered_user()
proj = await project(user, workbench=fake_workbench_without_outputs)
- pipeline(
- project_id=proj.uuid,
+ await create_pipeline(
+ project_id=f"{proj.uuid}",
dag_adjacency_list=fake_workbench_adjacency,
)
- comp_tasks = tasks(user=user, project=proj, state=StateType.PUBLISHED, progress=0)
- comp_runs = runs(user=user, project=proj, result=StateType.PUBLISHED)
+ comp_tasks = await create_tasks(
+ user=user, project=proj, state=StateType.PUBLISHED, progress=0
+ )
+ comp_runs = await create_comp_run(
+ user=user, project=proj, result=StateType.PUBLISHED
+ )
assert comp_runs
get_computation_url = httpx.URL(
f"/v2/computations/{proj.uuid}?user_id={user['id']}"
diff --git a/services/director-v2/tests/unit/with_dbs/test_api_route_computations_tasks.py b/services/director-v2/tests/unit/with_dbs/test_api_route_computations_tasks.py
index 10bd1ba3a2f..845983b99cb 100644
--- a/services/director-v2/tests/unit/with_dbs/test_api_route_computations_tasks.py
+++ b/services/director-v2/tests/unit/with_dbs/test_api_route_computations_tasks.py
@@ -9,7 +9,6 @@
from uuid import uuid4
import httpx
-from pydantic import TypeAdapter
import pytest
from faker import Faker
from fastapi import FastAPI, status
@@ -22,6 +21,7 @@
from models_library.projects import ProjectAtDB, ProjectID
from models_library.projects_nodes_io import NodeID
from models_library.users import UserID
+from pydantic import TypeAdapter
from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict
from pytest_simcore.helpers.typing_env import EnvVarsDict
from simcore_service_director_v2.core.settings import AppSettings
@@ -116,21 +116,21 @@ async def project_id(
fake_workbench_adjacency: dict[str, Any],
user: dict[str, Any],
project: Callable[..., Awaitable[ProjectAtDB]],
- pipeline: Callable[..., CompPipelineAtDB],
- tasks: Callable[..., list[CompTaskAtDB]],
-):
+ create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]],
+ create_tasks: Callable[..., Awaitable[list[CompTaskAtDB]]],
+) -> ProjectID:
"""project uuid of a saved project (w/ tasks up-to-date)"""
# insert project -> db
proj = await project(user, workbench=fake_workbench_without_outputs)
# insert pipeline -> comp_pipeline
- pipeline(
- project_id=proj.uuid,
+ await create_pipeline(
+ project_id=f"{proj.uuid}",
dag_adjacency_list=fake_workbench_adjacency,
)
# insert tasks -> comp_tasks
- comp_tasks = tasks(user=user, project=proj)
+ comp_tasks = await create_tasks(user=user, project=proj)
return proj.uuid
diff --git a/services/director-v2/tests/unit/with_dbs/test_utils_rabbitmq.py b/services/director-v2/tests/unit/with_dbs/test_utils_rabbitmq.py
index a041f70ecc7..8778d17245e 100644
--- a/services/director-v2/tests/unit/with_dbs/test_utils_rabbitmq.py
+++ b/services/director-v2/tests/unit/with_dbs/test_utils_rabbitmq.py
@@ -93,18 +93,18 @@ async def project(
@pytest.fixture
-def tasks(
+async def tasks(
user: dict[str, Any],
project: ProjectAtDB,
fake_workbench_adjacency: dict[str, Any],
- pipeline: Callable[..., CompPipelineAtDB],
- tasks: Callable[..., list[CompTaskAtDB]],
+ create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]],
+ create_tasks: Callable[..., Awaitable[list[CompTaskAtDB]]],
) -> list[CompTaskAtDB]:
- pipeline(
- project_id=project.uuid,
+ await create_pipeline(
+ project_id=f"{project.uuid}",
dag_adjacency_list=fake_workbench_adjacency,
)
- comp_tasks = tasks(user, project)
+ comp_tasks = await create_tasks(user, project)
assert len(comp_tasks) > 0
return comp_tasks
From 0c5a0682bf4348fcc63ca9947612c4c44648169b Mon Sep 17 00:00:00 2001
From: Odei Maiz <33152403+odeimaiz@users.noreply.github.com>
Date: Mon, 2 Dec 2024 13:18:09 +0100
Subject: [PATCH 05/16] =?UTF-8?q?=E2=9C=A8=20[Frontend]=20VIP=20Market=20P?=
=?UTF-8?q?oC=20(#6862)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../osparc/dashboard/SortedByMenuButton.js | 1 +
.../source/class/osparc/data/Resources.js | 1 +
.../class/osparc/navigation/UserMenu.js | 15 ++
.../source/class/osparc/product/Utils.js | 8 +
.../vipMarket/AnatomicalModelDetails.js | 206 +++++++++++++++++
.../vipMarket/AnatomicalModelListItem.js | 188 +++++++++++++++
.../source/class/osparc/vipMarket/Market.js | 43 ++++
.../class/osparc/vipMarket/MarketWindow.js | 54 +++++
.../osparc/vipMarket/SortModelsButtons.js | 101 ++++++++
.../class/osparc/vipMarket/VipMarket.js | 215 ++++++++++++++++++
10 files changed, 832 insertions(+)
create mode 100644 services/static-webserver/client/source/class/osparc/vipMarket/AnatomicalModelDetails.js
create mode 100644 services/static-webserver/client/source/class/osparc/vipMarket/AnatomicalModelListItem.js
create mode 100644 services/static-webserver/client/source/class/osparc/vipMarket/Market.js
create mode 100644 services/static-webserver/client/source/class/osparc/vipMarket/MarketWindow.js
create mode 100644 services/static-webserver/client/source/class/osparc/vipMarket/SortModelsButtons.js
create mode 100644 services/static-webserver/client/source/class/osparc/vipMarket/VipMarket.js
diff --git a/services/static-webserver/client/source/class/osparc/dashboard/SortedByMenuButton.js b/services/static-webserver/client/source/class/osparc/dashboard/SortedByMenuButton.js
index 24427e4995b..7bb0bcb8d4a 100644
--- a/services/static-webserver/client/source/class/osparc/dashboard/SortedByMenuButton.js
+++ b/services/static-webserver/client/source/class/osparc/dashboard/SortedByMenuButton.js
@@ -65,6 +65,7 @@ qx.Class.define("osparc.dashboard.SortedByMenuButton", {
field: "last_change_date",
direction: "desc"
},
+
getSortByOptions: function() {
return [{
id: "name",
diff --git a/services/static-webserver/client/source/class/osparc/data/Resources.js b/services/static-webserver/client/source/class/osparc/data/Resources.js
index ad1b51cc840..d87f6c690bf 100644
--- a/services/static-webserver/client/source/class/osparc/data/Resources.js
+++ b/services/static-webserver/client/source/class/osparc/data/Resources.js
@@ -98,6 +98,7 @@ qx.Class.define("osparc.data.Resources", {
* added by oSPARC as compilation vars
*/
"appSummary": {
+ useCache: false,
endpoints: {
get: {
method: "GET",
diff --git a/services/static-webserver/client/source/class/osparc/navigation/UserMenu.js b/services/static-webserver/client/source/class/osparc/navigation/UserMenu.js
index e4798ed1464..dc029edbba9 100644
--- a/services/static-webserver/client/source/class/osparc/navigation/UserMenu.js
+++ b/services/static-webserver/client/source/class/osparc/navigation/UserMenu.js
@@ -103,6 +103,11 @@ qx.Class.define("osparc.navigation.UserMenu", {
control.addListener("execute", () => osparc.cluster.Utils.popUpClustersDetails(), this);
this.add(control);
break;
+ case "market":
+ control = new qx.ui.menu.Button(this.tr("Market"));
+ control.addListener("execute", () => osparc.vipMarket.MarketWindow.openWindow());
+ this.add(control);
+ break;
case "about":
control = new qx.ui.menu.Button(this.tr("About oSPARC"));
osparc.utils.Utils.setIdToWidget(control, "userMenuAboutBtn");
@@ -178,6 +183,11 @@ qx.Class.define("osparc.navigation.UserMenu", {
this.addSeparator();
this.__addAnnouncements();
+
+ if (osparc.product.Utils.showS4LStore()) {
+ this.getChildControl("market");
+ }
+
this.getChildControl("about");
if (osparc.product.Utils.showAboutProduct()) {
this.getChildControl("about-product");
@@ -241,6 +251,11 @@ qx.Class.define("osparc.navigation.UserMenu", {
this.addSeparator();
this.__addAnnouncements();
+
+ if (osparc.product.Utils.showS4LStore()) {
+ this.getChildControl("market");
+ }
+
this.getChildControl("about");
if (!osparc.product.Utils.isProduct("osparc")) {
this.getChildControl("about-product");
diff --git a/services/static-webserver/client/source/class/osparc/product/Utils.js b/services/static-webserver/client/source/class/osparc/product/Utils.js
index 123d993e01b..45d3b7de661 100644
--- a/services/static-webserver/client/source/class/osparc/product/Utils.js
+++ b/services/static-webserver/client/source/class/osparc/product/Utils.js
@@ -270,6 +270,14 @@ qx.Class.define("osparc.product.Utils", {
return true;
},
+ showS4LStore: function() {
+ const platformName = osparc.store.StaticInfo.getInstance().getPlatformName();
+ if (platformName !== "master") {
+ return false;
+ }
+ return this.isS4LProduct();
+ },
+
getProductThumbUrl: function(asset = "Default.png") {
const base = "https://raw.githubusercontent.com/ZurichMedTech/s4l-assets/main/app/full/project_thumbnails"
let url;
diff --git a/services/static-webserver/client/source/class/osparc/vipMarket/AnatomicalModelDetails.js b/services/static-webserver/client/source/class/osparc/vipMarket/AnatomicalModelDetails.js
new file mode 100644
index 00000000000..40ffda37b27
--- /dev/null
+++ b/services/static-webserver/client/source/class/osparc/vipMarket/AnatomicalModelDetails.js
@@ -0,0 +1,206 @@
+/* ************************************************************************
+
+ osparc - the simcore frontend
+
+ https://osparc.io
+
+ Copyright:
+ 2024 IT'IS Foundation, https://itis.swiss
+
+ License:
+ MIT: https://opensource.org/licenses/MIT
+
+ Authors:
+ * Odei Maiz (odeimaiz)
+
+************************************************************************ */
+
+qx.Class.define("osparc.vipMarket.AnatomicalModelDetails", {
+ extend: qx.ui.core.Widget,
+
+ construct: function() {
+ this.base(arguments);
+
+ const layout = new qx.ui.layout.Grow();
+ this._setLayout(layout);
+
+ this.__poplulateLayout();
+ },
+
+ events: {
+ "modelLeased": "qx.event.type.Event",
+ },
+
+ properties: {
+ anatomicalModelsData: {
+ check: "Object",
+ init: null,
+ nullable: true,
+ apply: "__poplulateLayout"
+ },
+ },
+
+ members: {
+ __poplulateLayout: function() {
+ this._removeAll();
+
+ const anatomicalModelsData = this.getAnatomicalModelsData();
+ if (anatomicalModelsData) {
+ const card = this.__createcCard(anatomicalModelsData);
+ this._add(card);
+ } else {
+ const selectModelLabel = new qx.ui.basic.Label().set({
+ value: this.tr("Select a model for more details"),
+ font: "text-16",
+ alignX: "center",
+ alignY: "middle",
+ allowGrowX: true,
+ allowGrowY: true,
+ });
+ this._add(selectModelLabel);
+ }
+ },
+
+ __createcCard: function(anatomicalModelsData) {
+ console.log(anatomicalModelsData);
+
+ const cardGrid = new qx.ui.layout.Grid(16, 16);
+ const cardLayout = new qx.ui.container.Composite(cardGrid);
+
+ const description = anatomicalModelsData["Description"];
+ description.split(" - ").forEach((desc, idx) => {
+ const titleLabel = new qx.ui.basic.Label().set({
+ value: desc,
+ font: "text-16",
+ alignX: "center",
+ alignY: "middle",
+ allowGrowX: true,
+ allowGrowY: true,
+ });
+ cardLayout.add(titleLabel, {
+ column: 0,
+ row: idx,
+ colSpan: 2,
+ });
+ });
+
+ const thumbnail = new qx.ui.basic.Image().set({
+ source: anatomicalModelsData["Thumbnail"],
+ alignY: "middle",
+ scale: true,
+ allowGrowX: true,
+ allowGrowY: true,
+ allowShrinkX: true,
+ allowShrinkY: true,
+ maxWidth: 256,
+ maxHeight: 256,
+ });
+ cardLayout.add(thumbnail, {
+ column: 0,
+ row: 2,
+ });
+
+ const features = anatomicalModelsData["Features"];
+ const featuresGrid = new qx.ui.layout.Grid(8, 8);
+ const featuresLayout = new qx.ui.container.Composite(featuresGrid);
+ let idx = 0;
+ [
+ "Name",
+ "Version",
+ "Sex",
+ "Age",
+ "Weight",
+ "Height",
+ "Date",
+ "Ethnicity",
+ "Functionality",
+ ].forEach(key => {
+ if (key.toLowerCase() in features) {
+ const titleLabel = new qx.ui.basic.Label().set({
+ value: key,
+ font: "text-14",
+ alignX: "right",
+ });
+ featuresLayout.add(titleLabel, {
+ column: 0,
+ row: idx,
+ });
+
+ const nameLabel = new qx.ui.basic.Label().set({
+ value: features[key.toLowerCase()],
+ font: "text-14",
+ alignX: "left",
+ });
+ featuresLayout.add(nameLabel, {
+ column: 1,
+ row: idx,
+ });
+
+ idx++;
+ }
+ });
+
+ const doiTitle = new qx.ui.basic.Label().set({
+ value: "DOI",
+ font: "text-14",
+ alignX: "right",
+ marginTop: 16,
+ });
+ featuresLayout.add(doiTitle, {
+ column: 0,
+ row: idx,
+ });
+
+ const doiValue = new qx.ui.basic.Label().set({
+ value: anatomicalModelsData["DOI"] ? anatomicalModelsData["DOI"] : "-",
+ font: "text-14",
+ alignX: "left",
+ marginTop: 16,
+ });
+ featuresLayout.add(doiValue, {
+ column: 1,
+ row: idx,
+ });
+
+ cardLayout.add(featuresLayout, {
+ column: 1,
+ row: 2,
+ });
+
+ const buttonsLayout = new qx.ui.container.Composite(new qx.ui.layout.HBox(5));
+ if (anatomicalModelsData["leased"]) {
+ const leaseModelButton = new qx.ui.form.Button().set({
+ label: this.tr("3 seats Leased (27 days left)"),
+ appearance: "strong-button",
+ center: true,
+ enabled: false,
+ });
+ buttonsLayout.add(leaseModelButton, {
+ flex: 1
+ });
+ }
+ const leaseModelButton = new osparc.ui.form.FetchButton().set({
+ label: this.tr("Lease model (2 for months)"),
+ appearance: "strong-button",
+ center: true,
+ });
+ leaseModelButton.addListener("execute", () => {
+ leaseModelButton.setFetching(true);
+ setTimeout(() => {
+ leaseModelButton.setFetching(false);
+ this.fireDataEvent("modelLeased", this.getAnatomicalModelsData()["ID"]);
+ }, 2000);
+ });
+ buttonsLayout.add(leaseModelButton, {
+ flex: 1
+ });
+ cardLayout.add(buttonsLayout, {
+ column: 0,
+ row: 3,
+ colSpan: 2,
+ });
+
+ return cardLayout;
+ },
+ }
+});
diff --git a/services/static-webserver/client/source/class/osparc/vipMarket/AnatomicalModelListItem.js b/services/static-webserver/client/source/class/osparc/vipMarket/AnatomicalModelListItem.js
new file mode 100644
index 00000000000..4beac36d736
--- /dev/null
+++ b/services/static-webserver/client/source/class/osparc/vipMarket/AnatomicalModelListItem.js
@@ -0,0 +1,188 @@
+/* ************************************************************************
+
+ osparc - the simcore frontend
+
+ https://osparc.io
+
+ Copyright:
+ 2024 IT'IS Foundation, https://itis.swiss
+
+ License:
+ MIT: https://opensource.org/licenses/MIT
+
+ Authors:
+ * Odei Maiz (odeimaiz)
+
+************************************************************************ */
+
+qx.Class.define("osparc.vipMarket.AnatomicalModelListItem", {
+ extend: qx.ui.core.Widget,
+ implement : [qx.ui.form.IModel, osparc.filter.IFilterable],
+ include : [qx.ui.form.MModelProperty, osparc.filter.MFilterable],
+
+ construct: function() {
+ this.base(arguments);
+
+ const layout = new qx.ui.layout.Grid(5, 5);
+ layout.setColumnWidth(0, 64);
+ layout.setRowFlex(0, 1);
+ layout.setColumnFlex(1, 1);
+ layout.setColumnAlign(0, "center", "middle");
+ layout.setColumnAlign(1, "left", "middle");
+ this._setLayout(layout);
+
+ this.set({
+ padding: 5,
+ height: 48,
+ alignY: "middle",
+ decorator: "rounded",
+ });
+
+ this.addListener("pointerover", this._onPointerOver, this);
+ this.addListener("pointerout", this._onPointerOut, this);
+ },
+
+ events: {
+ /** (Fired by {@link qx.ui.form.List}) */
+ "action" : "qx.event.type.Event"
+ },
+
+ properties: {
+ appearance: {
+ refine: true,
+ init: "selectable"
+ },
+
+ modelId: {
+ check: "Number",
+ init: null,
+ nullable: false,
+ event: "changemodelId",
+ },
+
+ thumbnail: {
+ check: "String",
+ init: null,
+ nullable: true,
+ event: "changeThumbnail",
+ apply: "__applyThumbnail",
+ },
+
+ name: {
+ check: "String",
+ init: null,
+ nullable: false,
+ event: "changeName",
+ apply: "__applyName",
+ },
+
+ date: {
+ check: "Date",
+ init: null,
+ nullable: true,
+ event: "changeDate",
+ },
+
+ leased: {
+ check: "Boolean",
+ init: false,
+ nullable: true,
+ event: "changeLeased",
+ apply: "__applyLeased",
+ },
+ },
+
+ members: { // eslint-disable-line qx-rules/no-refs-in-members
+ // overridden
+ _forwardStates: {
+ focused : true,
+ hovered : true,
+ selected : true,
+ dragover : true
+ },
+
+ _createChildControlImpl: function(id) {
+ let control;
+ switch (id) {
+ case "thumbnail":
+ control = new qx.ui.basic.Image().set({
+ alignY: "middle",
+ scale: true,
+ allowGrowX: true,
+ allowGrowY: true,
+ allowShrinkX: true,
+ allowShrinkY: true,
+ maxWidth: 32,
+ maxHeight: 32
+ });
+ this._add(control, {
+ row: 0,
+ column: 0,
+ rowSpan: 2
+ });
+ break;
+ case "name":
+ control = new qx.ui.basic.Label().set({
+ font: "text-14",
+ alignY: "middle",
+ });
+ this._add(control, {
+ row: 0,
+ column: 1
+ });
+ break;
+ }
+
+ return control || this.base(arguments, id);
+ },
+
+ __applyThumbnail: function(value) {
+ this.getChildControl("thumbnail").setSource(value);
+ },
+
+ __applyName: function(value) {
+ this.getChildControl("name").setValue(value);
+ },
+
+ __applyLeased: function(value) {
+ if (value) {
+ this.setBackgroundColor("strong-main");
+ }
+ },
+
+ _onPointerOver: function() {
+ this.addState("hovered");
+ },
+
+ _onPointerOut : function() {
+ this.removeState("hovered");
+ },
+
+ _filter: function() {
+ this.exclude();
+ },
+
+ _unfilter: function() {
+ this.show();
+ },
+
+ _shouldApplyFilter: function(data) {
+ if (data.text) {
+ const checks = [
+ this.getName(),
+ ];
+ if (checks.filter(check => check && check.toLowerCase().trim().includes(data.text)).length == 0) {
+ return true;
+ }
+ }
+ return false;
+ },
+
+ _shouldReactToFilter: function(data) {
+ if (data.text && data.text.length > 1) {
+ return true;
+ }
+ return false;
+ }
+ }
+});
diff --git a/services/static-webserver/client/source/class/osparc/vipMarket/Market.js b/services/static-webserver/client/source/class/osparc/vipMarket/Market.js
new file mode 100644
index 00000000000..dd6a2250c44
--- /dev/null
+++ b/services/static-webserver/client/source/class/osparc/vipMarket/Market.js
@@ -0,0 +1,43 @@
+/* ************************************************************************
+
+ osparc - the simcore frontend
+
+ https://osparc.io
+
+ Copyright:
+ 2024 IT'IS Foundation, https://itis.swiss
+
+ License:
+ MIT: https://opensource.org/licenses/MIT
+
+ Authors:
+ * Odei Maiz (odeimaiz)
+
+************************************************************************ */
+
+qx.Class.define("osparc.vipMarket.Market", {
+ extend: osparc.ui.window.TabbedView,
+
+ construct: function() {
+ this.base(arguments);
+
+ const miniWallet = osparc.desktop.credits.BillingCenter.createMiniWalletView().set({
+ paddingRight: 10
+ });
+ this.addWidgetOnTopOfTheTabs(miniWallet);
+
+ this.__vipMarketPage = this.__getVipMarketPage();
+ },
+
+ members: {
+ __vipMarketPage: null,
+
+ __getVipMarketPage: function() {
+ const title = this.tr("ViP Models");
+ const iconSrc = "@FontAwesome5Solid/users/22";
+ const vipMarketView = new osparc.vipMarket.VipMarket();
+ const page = this.addTab(title, iconSrc, vipMarketView);
+ return page;
+ },
+ }
+});
diff --git a/services/static-webserver/client/source/class/osparc/vipMarket/MarketWindow.js b/services/static-webserver/client/source/class/osparc/vipMarket/MarketWindow.js
new file mode 100644
index 00000000000..d01207f883f
--- /dev/null
+++ b/services/static-webserver/client/source/class/osparc/vipMarket/MarketWindow.js
@@ -0,0 +1,54 @@
+/* ************************************************************************
+
+ osparc - the simcore frontend
+
+ https://osparc.io
+
+ Copyright:
+ 2024 IT'IS Foundation, https://itis.swiss
+
+ License:
+ MIT: https://opensource.org/licenses/MIT
+
+ Authors:
+ * Odei Maiz (odeimaiz)
+
+************************************************************************ */
+
+qx.Class.define("osparc.vipMarket.MarketWindow", {
+ extend: osparc.ui.window.TabbedWindow,
+
+ construct: function() {
+ this.base(arguments, "store", this.tr("Market"));
+
+
+ osparc.utils.Utils.setIdToWidget(this, "storeWindow");
+
+ const width = 1035;
+ const height = 700;
+ this.set({
+ width,
+ height
+ })
+
+ const vipMarket = this.__vipMarket = new osparc.vipMarket.Market();
+ this._setTabbedView(vipMarket);
+ },
+
+ statics: {
+ openWindow: function() {
+ const storeWindow = new osparc.vipMarket.MarketWindow();
+ storeWindow.center();
+ storeWindow.open();
+ return storeWindow;
+ }
+ },
+
+ members: {
+ __vipMarket: null,
+
+ openVipMarket: function() {
+ return this.__vipMarket.openVipMarket();
+ },
+ }
+});
diff --git a/services/static-webserver/client/source/class/osparc/vipMarket/SortModelsButtons.js b/services/static-webserver/client/source/class/osparc/vipMarket/SortModelsButtons.js
new file mode 100644
index 00000000000..da3ed278f2b
--- /dev/null
+++ b/services/static-webserver/client/source/class/osparc/vipMarket/SortModelsButtons.js
@@ -0,0 +1,101 @@
+/* ************************************************************************
+
+ osparc - the simcore frontend
+
+ https://osparc.io
+
+ Copyright:
+ 2024 IT'IS Foundation, https://itis.swiss
+
+ License:
+ MIT: https://opensource.org/licenses/MIT
+
+ Authors:
+ * Odei Maiz (odeimaiz)
+
+************************************************************************ */
+
+qx.Class.define("osparc.vipMarket.SortModelsButtons", {
+ extend: qx.ui.form.MenuButton,
+
+ construct: function() {
+ this.base(arguments, this.tr("Sort"), "@FontAwesome5Solid/chevron-down/10");
+
+ this.set({
+ iconPosition: "left",
+ marginRight: 8
+ });
+
+ const sortByMenu = new qx.ui.menu.Menu().set({
+ font: "text-14"
+ });
+ this.setMenu(sortByMenu);
+
+ const nameAsc = new qx.ui.menu.Button().set({
+ label: this.tr("Name"),
+ icon: "@FontAwesome5Solid/sort-alpha-down/14"
+ });
+ nameAsc["sortBy"] = "name";
+ nameAsc["orderBy"] = "down";
+ const nameDesc = new qx.ui.menu.Button().set({
+ label: this.tr("Name"),
+ icon: "@FontAwesome5Solid/sort-alpha-up/14"
+ });
+ nameDesc["sortBy"] = "name";
+ nameDesc["orderBy"] = "up";
+
+ const dateDesc = new qx.ui.menu.Button().set({
+ label: this.tr("Date"),
+ icon: "@FontAwesome5Solid/arrow-down/14"
+ });
+ dateDesc["sortBy"] = "date";
+ dateDesc["orderBy"] = "down";
+ const dateAsc = new qx.ui.menu.Button().set({
+ label: this.tr("Date"),
+ icon: "@FontAwesome5Solid/arrow-up/14"
+ });
+ dateAsc["sortBy"] = "date";
+ dateAsc["orderBy"] = "down";
+
+ [
+ nameAsc,
+ nameDesc,
+ dateDesc,
+ dateAsc,
+ ].forEach((btn, idx) => {
+ sortByMenu.add(btn);
+
+ btn.addListener("execute", () => this.__buttonExecuted(btn));
+
+ if (idx === 0) {
+ btn.execute();
+ }
+ });
+ },
+
+ events: {
+ "sortBy": "qx.event.type.Data"
+ },
+
+ statics: {
+ DefaultSorting: {
+ "sort": "name",
+ "order": "down"
+ }
+ },
+
+ members: {
+ __buttonExecuted: function(btn) {
+ this.set({
+ label: btn.getLabel(),
+ icon: btn.getIcon()
+ });
+
+ const data = {
+ "sort": btn["sortBy"],
+ "order": btn["orderBy"]
+ };
+ this.fireDataEvent("sortBy", data);
+ }
+ }
+});
diff --git a/services/static-webserver/client/source/class/osparc/vipMarket/VipMarket.js b/services/static-webserver/client/source/class/osparc/vipMarket/VipMarket.js
new file mode 100644
index 00000000000..ff0af06af15
--- /dev/null
+++ b/services/static-webserver/client/source/class/osparc/vipMarket/VipMarket.js
@@ -0,0 +1,215 @@
+/* ************************************************************************
+
+ osparc - the simcore frontend
+
+ https://osparc.io
+
+ Copyright:
+ 2024 IT'IS Foundation, https://itis.swiss
+
+ License:
+ MIT: https://opensource.org/licenses/MIT
+
+ Authors:
+ * Odei Maiz (odeimaiz)
+
+************************************************************************ */
+
+qx.Class.define("osparc.vipMarket.VipMarket", {
+ extend: qx.ui.core.Widget,
+
+ construct: function() {
+ this.base(arguments);
+
+ this._setLayout(new qx.ui.layout.VBox(10));
+
+ this.__buildLayout();
+ },
+
+ statics: {
+ curateAnatomicalModels: function(anatomicalModelsRaw) {
+ const anatomicalModels = [];
+ const models = anatomicalModelsRaw["availableDownloads"];
+ models.forEach(model => {
+ const curatedModel = {};
+ Object.keys(model).forEach(key => {
+ if (key === "Features") {
+ let featuresRaw = model["Features"];
+ featuresRaw = featuresRaw.substring(1, featuresRaw.length-1); // remove brackets
+ featuresRaw = featuresRaw.split(","); // split the string by commas
+ const features = {};
+ featuresRaw.forEach(pair => { // each pair is "key: value"
+ const keyValue = pair.split(":");
+ features[keyValue[0].trim()] = keyValue[1].trim()
+ });
+ curatedModel["Features"] = features;
+ } else {
+ curatedModel[key] = model[key];
+ }
+ if (key === "ID") {
+ curatedModel["leased"] = [22].includes(model[key]);
+ }
+ });
+ anatomicalModels.push(curatedModel);
+ });
+ return anatomicalModels;
+ },
+ },
+
+ members: {
+ __anatomicalModelsModel: null,
+ __anatomicalModels: null,
+ __sortByButton: null,
+
+ __buildLayout: function() {
+ const toolbarLayout = new qx.ui.container.Composite(new qx.ui.layout.HBox(10)).set({
+ alignY: "middle",
+ });
+ this._add(toolbarLayout);
+
+ const sortModelsButtons = this.__sortByButton = new osparc.vipMarket.SortModelsButtons().set({
+ alignY: "bottom",
+ maxHeight: 27,
+ });
+ toolbarLayout.add(sortModelsButtons);
+
+ const filter = new osparc.filter.TextFilter("text", "vipModels").set({
+ alignY: "middle",
+ allowGrowY: false,
+ minWidth: 170,
+ });
+ this.addListener("appear", () => filter.getChildControl("textfield").focus());
+ toolbarLayout.add(filter);
+
+ const modelsLayout = new qx.ui.container.Composite(new qx.ui.layout.HBox(10));
+ this._add(modelsLayout, {
+ flex: 1
+ });
+
+ const modelsUIList = new qx.ui.form.List().set({
+ decorator: "no-border",
+ spacing: 5,
+ minWidth: 250,
+ maxWidth: 250
+ });
+ modelsLayout.add(modelsUIList)
+
+ const anatomicalModelsModel = this.__anatomicalModelsModel = new qx.data.Array();
+ const membersCtrl = new qx.data.controller.List(anatomicalModelsModel, modelsUIList, "name");
+ membersCtrl.setDelegate({
+ createItem: () => new osparc.vipMarket.AnatomicalModelListItem(),
+ bindItem: (ctrl, item, id) => {
+ ctrl.bindProperty("id", "modelId", null, item, id);
+ ctrl.bindProperty("thumbnail", "thumbnail", null, item, id);
+ ctrl.bindProperty("name", "name", null, item, id);
+ ctrl.bindProperty("date", "date", null, item, id);
+ ctrl.bindProperty("leased", "leased", null, item, id);
+ },
+ configureItem: item => {
+ item.subscribeToFilterGroup("vipModels");
+ },
+ });
+
+ const loadingModel = {
+ id: 0,
+ thumbnail: "@FontAwesome5Solid/spinner/32",
+ name: this.tr("Loading"),
+ };
+ this.__anatomicalModelsModel.append(qx.data.marshal.Json.createModel(loadingModel));
+
+ const anatomicModelDetails = new osparc.vipMarket.AnatomicalModelDetails().set({
+ padding: 20,
+ });
+ modelsLayout.add(anatomicModelDetails, {
+ flex: 1
+ });
+
+ modelsUIList.addListener("changeSelection", e => {
+ const selection = e.getData();
+ if (selection.length) {
+ const modelId = selection[0].getModelId();
+ const modelFound = this.__anatomicalModels.find(anatomicalModel => anatomicalModel["ID"] === modelId);
+ if (modelFound) {
+ anatomicModelDetails.setAnatomicalModelsData(modelFound);
+ return;
+ }
+ }
+ anatomicModelDetails.setAnatomicalModelsData(null);
+ }, this);
+
+ fetch("https://itis.swiss/PD_DirectDownload/getDownloadableItems/AnatomicalModels", {
+ method:"POST"
+ })
+ .then(resp => resp.json())
+ .then(anatomicalModelsRaw => {
+ this.__anatomicalModels = this.self().curateAnatomicalModels(anatomicalModelsRaw);
+ this.__populateModels();
+
+ anatomicModelDetails.addListener("modelLeased", e => {
+ const modelId = e.getData();
+ const found = this.__anatomicalModels.find(model => model["ID"] === modelId);
+ if (found) {
+ found["leased"] = true;
+ this.__populateModels();
+ anatomicModelDetails.setAnatomicalModelsData(found);
+ };
+ }, this);
+ })
+ .catch(err => console.error(err));
+ },
+
+ __populateModels: function() {
+ const models = [];
+ this.__anatomicalModels.forEach(model => {
+ const anatomicalModel = {};
+ anatomicalModel["id"] = model["ID"];
+ anatomicalModel["thumbnail"] = model["Thumbnail"];
+ anatomicalModel["name"] = model["Features"]["name"] + " " + model["Features"]["version"];
+ anatomicalModel["date"] = new Date(model["Features"]["date"]);
+ anatomicalModel["leased"] = model["leased"];
+ models.push(anatomicalModel);
+ });
+
+ this.__anatomicalModelsModel.removeAll();
+ const sortModel = sortBy => {
+ models.sort((a, b) => {
+ // first criteria
+ if (b["leased"] !== a["leased"]) {
+ // leased first
+ return b["leased"] - a["leased"];
+ }
+ // second criteria
+ if (sortBy) {
+ if (sortBy["sort"] === "name") {
+ if (sortBy["order"] === "down") {
+ // A -> Z
+ return a["name"].localeCompare(b["name"]);
+ } else {
+ return b["name"].localeCompare(a["name"]);
+ }
+ } else if (sortBy["sort"] === "date") {
+ if (sortBy["order"] === "down") {
+ // Now -> Yesterday
+ return b["date"] - a["date"];
+ } else {
+ return a["date"] - b["date"];
+ }
+ }
+ }
+ // default criteria
+ // A -> Z
+ return a["name"].localeCompare(b["name"]);
+ });
+ };
+ sortModel();
+ models.forEach(model => this.__anatomicalModelsModel.append(qx.data.marshal.Json.createModel(model)));
+
+ this.__sortByButton.addListener("sortBy", e => {
+ this.__anatomicalModelsModel.removeAll();
+ const sortBy = e.getData();
+ sortModel(sortBy);
+ models.forEach(model => this.__anatomicalModelsModel.append(qx.data.marshal.Json.createModel(model)));
+ }, this);
+ },
+ }
+});
From 931595e47b0a0f37664e8de98102fe69cbd62751 Mon Sep 17 00:00:00 2001
From: Matus Drobuliak <60785969+matusdrobuliak66@users.noreply.github.com>
Date: Mon, 2 Dec 2024 14:21:48 +0100
Subject: [PATCH 06/16] =?UTF-8?q?=E2=9C=A8=20introduce=20search=20paramete?=
=?UTF-8?q?r=20to=20the=20listing=20workspaces=20(#6872)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Co-authored-by: Odei Maiz <33152403+odeimaiz@users.noreply.github.com>
---
.../api/v0/openapi.yaml | 2 +-
.../workspaces/_models.py | 12 ++-
.../workspaces/_workspaces_api.py | 2 +
.../workspaces/_workspaces_db.py | 6 ++
.../workspaces/_workspaces_handlers.py | 1 +
.../with_dbs/04/workspaces/test_workspaces.py | 84 ++++++++++++++++++-
6 files changed, 102 insertions(+), 5 deletions(-)
diff --git a/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml b/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml
index 33979c6bf3d..9cca4bafd06 100644
--- a/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml
+++ b/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml
@@ -4540,7 +4540,7 @@ paths:
'403':
description: ProjectInvalidRightsError
'404':
- description: UserDefaultWalletNotFoundError, ProjectNotFoundError
+ description: ProjectNotFoundError, UserDefaultWalletNotFoundError
'409':
description: ProjectTooManyProjectOpenedError
'422':
diff --git a/services/web/server/src/simcore_service_webserver/workspaces/_models.py b/services/web/server/src/simcore_service_webserver/workspaces/_models.py
index 362d68884c7..af35fe4b63f 100644
--- a/services/web/server/src/simcore_service_webserver/workspaces/_models.py
+++ b/services/web/server/src/simcore_service_webserver/workspaces/_models.py
@@ -1,4 +1,5 @@
import logging
+from typing import Annotated
from models_library.basic_types import IDStr
from models_library.rest_base import RequestParameters, StrictRequestParameters
@@ -11,8 +12,9 @@
from models_library.rest_pagination import PageQueryParameters
from models_library.trash import RemoveQueryParams
from models_library.users import GroupID, UserID
+from models_library.utils.common_validators import empty_str_to_none_pre_validator
from models_library.workspaces import WorkspaceID
-from pydantic import BaseModel, ConfigDict, Field
+from pydantic import BaseModel, BeforeValidator, ConfigDict, Field
from servicelib.request_keys import RQT_USERID_KEY
from .._constants import RQ_PRODUCT_KEY
@@ -46,6 +48,14 @@ class WorkspacesFilters(Filters):
default=False,
description="Set to true to list trashed, false to list non-trashed (default), None to list all",
)
+ text: Annotated[
+ str | None, BeforeValidator(empty_str_to_none_pre_validator)
+ ] = Field(
+ default=None,
+ description="Multi column full text search",
+ max_length=100,
+ examples=["My Workspace"],
+ )
class WorkspacesListQueryParams(
diff --git a/services/web/server/src/simcore_service_webserver/workspaces/_workspaces_api.py b/services/web/server/src/simcore_service_webserver/workspaces/_workspaces_api.py
index 3fd6633bb06..b4881c2816c 100644
--- a/services/web/server/src/simcore_service_webserver/workspaces/_workspaces_api.py
+++ b/services/web/server/src/simcore_service_webserver/workspaces/_workspaces_api.py
@@ -91,6 +91,7 @@ async def list_workspaces(
user_id: UserID,
product_name: ProductName,
filter_trashed: bool | None,
+ filter_by_text: str | None,
offset: NonNegativeInt,
limit: int,
order_by: OrderBy,
@@ -100,6 +101,7 @@ async def list_workspaces(
user_id=user_id,
product_name=product_name,
filter_trashed=filter_trashed,
+ filter_by_text=filter_by_text,
offset=offset,
limit=limit,
order_by=order_by,
diff --git a/services/web/server/src/simcore_service_webserver/workspaces/_workspaces_db.py b/services/web/server/src/simcore_service_webserver/workspaces/_workspaces_db.py
index 7c55e0a9428..3835e82f9e0 100644
--- a/services/web/server/src/simcore_service_webserver/workspaces/_workspaces_db.py
+++ b/services/web/server/src/simcore_service_webserver/workspaces/_workspaces_db.py
@@ -114,6 +114,7 @@ async def list_workspaces_for_user(
user_id: UserID,
product_name: ProductName,
filter_trashed: bool | None,
+ filter_by_text: str | None,
offset: NonNegativeInt,
limit: NonNegativeInt,
order_by: OrderBy,
@@ -140,6 +141,11 @@ async def list_workspaces_for_user(
if filter_trashed
else workspaces.c.trashed.is_(None)
)
+ if filter_by_text is not None:
+ base_query = base_query.where(
+ (workspaces.c.name.ilike(f"%{filter_by_text}%"))
+ | (workspaces.c.description.ilike(f"%{filter_by_text}%"))
+ )
# Select total count from base_query
subquery = base_query.subquery()
diff --git a/services/web/server/src/simcore_service_webserver/workspaces/_workspaces_handlers.py b/services/web/server/src/simcore_service_webserver/workspaces/_workspaces_handlers.py
index 9889e286dda..c1f706f259a 100644
--- a/services/web/server/src/simcore_service_webserver/workspaces/_workspaces_handlers.py
+++ b/services/web/server/src/simcore_service_webserver/workspaces/_workspaces_handlers.py
@@ -77,6 +77,7 @@ async def list_workspaces(request: web.Request):
user_id=req_ctx.user_id,
product_name=req_ctx.product_name,
filter_trashed=query_params.filters.trashed,
+ filter_by_text=query_params.filters.text,
offset=query_params.offset,
limit=query_params.limit,
order_by=OrderBy.model_construct(**query_params.order_by.model_dump()),
diff --git a/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces.py b/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces.py
index 5e7d81afd2b..362eca1d82b 100644
--- a/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces.py
+++ b/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces.py
@@ -1,3 +1,5 @@
+from collections.abc import AsyncIterator
+
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=unused-variable
@@ -46,6 +48,7 @@ async def test_workspaces_user_role_permissions(
logged_user: UserInfoDict,
user_project: ProjectDict,
expected: ExpectedResponse,
+ workspaces_clean_db: AsyncIterator[None],
):
assert client.app
@@ -60,6 +63,7 @@ async def test_workspaces_workflow(
logged_user: UserInfoDict,
user_project: ProjectDict,
expected: HTTPStatus,
+ workspaces_clean_db: AsyncIterator[None],
):
assert client.app
@@ -139,13 +143,87 @@ async def test_workspaces_workflow(
@pytest.mark.parametrize("user_role,expected", [(UserRole.USER, status.HTTP_200_OK)])
-async def test_project_workspace_movement_full_workflow(
+async def test_list_workspaces_with_text_search(
client: TestClient,
logged_user: UserInfoDict,
user_project: ProjectDict,
expected: HTTPStatus,
+ workspaces_clean_db: AsyncIterator[None],
):
assert client.app
- # NOTE: MD: not yet implemented
- # SEE https://github.com/ITISFoundation/osparc-simcore/issues/6778
+ # list user workspaces
+ url = client.app.router["list_workspaces"].url_for()
+ resp = await client.get(f"{url}")
+ data, _ = await assert_status(resp, status.HTTP_200_OK)
+ assert data == []
+
+ # CREATE a new workspace
+ url = client.app.router["create_workspace"].url_for()
+ resp = await client.post(
+ f"{url}",
+ json={
+ "name": "My first workspace",
+ "description": "Custom description",
+ "thumbnail": None,
+ },
+ )
+ data, _ = await assert_status(resp, status.HTTP_201_CREATED)
+ added_workspace = WorkspaceGet.model_validate(data)
+
+ # CREATE a new workspace
+ url = client.app.router["create_workspace"].url_for()
+ resp = await client.post(
+ f"{url}",
+ json={
+ "name": "My second workspace",
+ "description": "Sharing important projects",
+ "thumbnail": None,
+ },
+ )
+ data, _ = await assert_status(resp, status.HTTP_201_CREATED)
+ added_workspace = WorkspaceGet.model_validate(data)
+
+ # LIST user workspaces
+ url = client.app.router["list_workspaces"].url_for()
+ resp = await client.get(f"{url}")
+ data, _, meta, links = await assert_status(
+ resp, status.HTTP_200_OK, include_meta=True, include_links=True
+ )
+ assert len(data) == 2
+
+ # LIST user workspaces
+ url = (
+ client.app.router["list_workspaces"]
+ .url_for()
+ .with_query({"filters": '{"text": "first"}'})
+ )
+ resp = await client.get(f"{url}")
+ data, _, meta, links = await assert_status(
+ resp, status.HTTP_200_OK, include_meta=True, include_links=True
+ )
+ assert len(data) == 1
+
+ # LIST user workspaces
+ url = (
+ client.app.router["list_workspaces"]
+ .url_for()
+ .with_query({"filters": '{"text": "important"}'})
+ )
+ resp = await client.get(f"{url}")
+ data, _, meta, links = await assert_status(
+ resp, status.HTTP_200_OK, include_meta=True, include_links=True
+ )
+ assert len(data) == 1
+
+ # LIST user workspaces
+ url = (
+ client.app.router["list_workspaces"]
+ .url_for()
+ .with_query({"filters": '{"text": "non-existing"}'})
+ )
+ resp = await client.get(f"{url}")
+ data, _, meta, links = await assert_status(
+ resp, status.HTTP_200_OK, include_meta=True, include_links=True
+ )
+ assert len(data) == 0
From dc3575752b3cf5e3e2950fa85687d35e3817fd3a Mon Sep 17 00:00:00 2001
From: Sylvain <35365065+sanderegg@users.noreply.github.com>
Date: Mon, 2 Dec 2024 18:30:32 +0100
Subject: [PATCH 07/16] =?UTF-8?q?=E2=99=BB=EF=B8=8FPydantic=20V2=20and=20S?=
=?UTF-8?q?QLAlchemy=20warning=20fixes=20(#6877)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../core/settings.py | 9 ----
.../test_modules_clusters_management_core.py | 13 ++++--
.../modules/comp_scheduler/_scheduler_base.py | 6 +--
.../modules/db/repositories/comp_runs.py | 4 +-
.../db/repositories/comp_tasks/_utils.py | 46 +++++++++----------
.../test_api_route_computations.py | 10 ++--
.../test_api_route_computations_tasks.py | 0
.../comp_scheduler/test_scheduler_dask.py | 4 +-
.../tests/unit/with_dbs/conftest.py | 6 +--
9 files changed, 43 insertions(+), 55 deletions(-)
rename services/director-v2/tests/unit/with_dbs/{ => comp_scheduler}/test_api_route_computations.py (99%)
rename services/director-v2/tests/unit/with_dbs/{ => comp_scheduler}/test_api_route_computations_tasks.py (100%)
diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/core/settings.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/core/settings.py
index 3e85015ee4a..c4f656c68fb 100644
--- a/services/clusters-keeper/src/simcore_service_clusters_keeper/core/settings.py
+++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/core/settings.py
@@ -381,15 +381,6 @@ def LOG_LEVEL(self) -> LogLevel: # noqa: N802
def _valid_log_level(cls, value: str) -> str:
return cls.validate_log_level(value)
- @field_validator("SERVICE_TRACKING_HEARTBEAT", mode="before")
- @classmethod
- def _validate_interval(
- cls, value: str | datetime.timedelta
- ) -> int | datetime.timedelta:
- if isinstance(value, str):
- return int(value)
- return value
-
def get_application_settings(app: FastAPI) -> ApplicationSettings:
return cast(ApplicationSettings, app.state.settings)
diff --git a/services/clusters-keeper/tests/unit/test_modules_clusters_management_core.py b/services/clusters-keeper/tests/unit/test_modules_clusters_management_core.py
index 438e69ee72e..d06a6aaeedd 100644
--- a/services/clusters-keeper/tests/unit/test_modules_clusters_management_core.py
+++ b/services/clusters-keeper/tests/unit/test_modules_clusters_management_core.py
@@ -4,6 +4,7 @@
import asyncio
import dataclasses
+import datetime
from collections.abc import Awaitable, Callable
from typing import Final
from unittest.mock import MagicMock
@@ -36,7 +37,9 @@ def wallet_id(faker: Faker, request: pytest.FixtureRequest) -> WalletID | None:
return faker.pyint(min_value=1) if request.param == "with_wallet" else None
-_FAST_TIME_BEFORE_TERMINATION_SECONDS: Final[int] = 10
+_FAST_TIME_BEFORE_TERMINATION_SECONDS: Final[datetime.timedelta] = datetime.timedelta(
+ seconds=10
+)
@pytest.fixture
@@ -149,7 +152,7 @@ async def test_cluster_management_core_properly_removes_unused_instances(
mocked_dask_ping_scheduler.is_scheduler_busy.reset_mock()
# running the cluster management task after the heartbeat came in shall not remove anything
- await asyncio.sleep(_FAST_TIME_BEFORE_TERMINATION_SECONDS + 1)
+ await asyncio.sleep(_FAST_TIME_BEFORE_TERMINATION_SECONDS.total_seconds() + 1)
await cluster_heartbeat(initialized_app, user_id=user_id, wallet_id=wallet_id)
await check_clusters(initialized_app)
await _assert_cluster_exist_and_state(
@@ -161,7 +164,7 @@ async def test_cluster_management_core_properly_removes_unused_instances(
mocked_dask_ping_scheduler.is_scheduler_busy.reset_mock()
# after waiting the termination time, running the task shall remove the cluster
- await asyncio.sleep(_FAST_TIME_BEFORE_TERMINATION_SECONDS + 1)
+ await asyncio.sleep(_FAST_TIME_BEFORE_TERMINATION_SECONDS.total_seconds() + 1)
await check_clusters(initialized_app)
await _assert_cluster_exist_and_state(
ec2_client, instances=created_clusters, state="terminated"
@@ -201,7 +204,7 @@ async def test_cluster_management_core_properly_removes_workers_on_shutdown(
ec2_client, instance_ids=worker_instance_ids, state="running"
)
# after waiting the termination time, running the task shall remove the cluster
- await asyncio.sleep(_FAST_TIME_BEFORE_TERMINATION_SECONDS + 1)
+ await asyncio.sleep(_FAST_TIME_BEFORE_TERMINATION_SECONDS.total_seconds() + 1)
await check_clusters(initialized_app)
await _assert_cluster_exist_and_state(
ec2_client, instances=created_clusters, state="terminated"
@@ -314,7 +317,7 @@ async def test_cluster_management_core_removes_broken_clusters_after_some_delay(
mocked_dask_ping_scheduler.is_scheduler_busy.reset_mock()
# waiting for the termination time will now terminate the cluster
- await asyncio.sleep(_FAST_TIME_BEFORE_TERMINATION_SECONDS + 1)
+ await asyncio.sleep(_FAST_TIME_BEFORE_TERMINATION_SECONDS.total_seconds() + 1)
await check_clusters(initialized_app)
await _assert_cluster_exist_and_state(
ec2_client, instances=created_clusters, state="terminated"
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py
index a16821d0fba..d8fdccc1663 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py
@@ -540,11 +540,7 @@ async def apply(
project_id: ProjectID,
iteration: Iteration,
) -> None:
- """schedules a pipeline for a given user, project and iteration.
-
- Arguments:
- wake_up_callback -- a callback function that is called in a separate thread everytime a pipeline node is completed
- """
+ """apply the scheduling of a pipeline for a given user, project and iteration."""
with log_context(
_logger,
level=logging.INFO,
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py
index b746407a8aa..13e01a4276f 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py
@@ -143,7 +143,9 @@ async def list(
return [
CompRunsAtDB.model_validate(row)
async for row in conn.execute(
- sa.select(comp_runs).where(sa.and_(*conditions))
+ sa.select(comp_runs).where(
+ sa.and_(True, *conditions) # noqa: FBT003
+ )
)
]
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py
index 51082b698f1..c5fd0819fcd 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py
@@ -89,9 +89,7 @@ async def _get_service_details(
node.version,
product_name,
)
- obj: ServiceMetaDataPublished = ServiceMetaDataPublished.model_construct(
- **service_details
- )
+ obj: ServiceMetaDataPublished = ServiceMetaDataPublished(**service_details)
return obj
@@ -105,7 +103,7 @@ def _compute_node_requirements(
node_defined_resources[resource_name] = node_defined_resources.get(
resource_name, 0
) + min(resource_value.limit, resource_value.reservation)
- return NodeRequirements.model_validate(node_defined_resources)
+ return NodeRequirements(**node_defined_resources)
def _compute_node_boot_mode(node_resources: ServiceResourcesDict) -> BootMode:
@@ -146,12 +144,12 @@ async def _get_node_infos(
None,
)
- result: tuple[ServiceMetaDataPublished, ServiceExtras, SimcoreServiceLabels] = (
- await asyncio.gather(
- _get_service_details(catalog_client, user_id, product_name, node),
- director_client.get_service_extras(node.key, node.version),
- director_client.get_service_labels(node),
- )
+ result: tuple[
+ ServiceMetaDataPublished, ServiceExtras, SimcoreServiceLabels
+ ] = await asyncio.gather(
+ _get_service_details(catalog_client, user_id, product_name, node),
+ director_client.get_service_extras(node.key, node.version),
+ director_client.get_service_labels(node),
)
return result
@@ -189,7 +187,7 @@ async def _generate_task_image(
data.update(envs=_compute_node_envs(node_labels))
if node_extras and node_extras.container_spec:
data.update(command=node_extras.container_spec.command)
- return Image.model_validate(data)
+ return Image(**data)
async def _get_pricing_and_hardware_infos(
@@ -247,9 +245,9 @@ async def _get_pricing_and_hardware_infos(
return pricing_info, hardware_info
-_RAM_SAFE_MARGIN_RATIO: Final[float] = (
- 0.1 # NOTE: machines always have less available RAM than advertised
-)
+_RAM_SAFE_MARGIN_RATIO: Final[
+ float
+] = 0.1 # NOTE: machines always have less available RAM than advertised
_CPUS_SAFE_MARGIN: Final[float] = 0.1
@@ -267,11 +265,11 @@ async def _update_project_node_resources_from_hardware_info(
if not hardware_info.aws_ec2_instances:
return
try:
- unordered_list_ec2_instance_types: list[EC2InstanceTypeGet] = (
- await get_instance_type_details(
- rabbitmq_rpc_client,
- instance_type_names=set(hardware_info.aws_ec2_instances),
- )
+ unordered_list_ec2_instance_types: list[
+ EC2InstanceTypeGet
+ ] = await get_instance_type_details(
+ rabbitmq_rpc_client,
+ instance_type_names=set(hardware_info.aws_ec2_instances),
)
assert unordered_list_ec2_instance_types # nosec
@@ -347,7 +345,7 @@ async def generate_tasks_list_from_project(
list_comp_tasks = []
unique_service_key_versions: set[ServiceKeyVersion] = {
- ServiceKeyVersion.model_construct(
+ ServiceKeyVersion(
key=node.key, version=node.version
) # the service key version is frozen
for node in project.workbench.values()
@@ -366,9 +364,7 @@ async def generate_tasks_list_from_project(
for internal_id, node_id in enumerate(project.workbench, 1):
node: Node = project.workbench[node_id]
- node_key_version = ServiceKeyVersion.model_construct(
- key=node.key, version=node.version
- )
+ node_key_version = ServiceKeyVersion(key=node.key, version=node.version)
node_details, node_extras, node_labels = key_version_to_node_infos.get(
node_key_version,
(None, None, None),
@@ -434,8 +430,8 @@ async def generate_tasks_list_from_project(
task_db = CompTaskAtDB(
project_id=project.uuid,
node_id=NodeID(node_id),
- schema=NodeSchema.model_validate(
- node_details.model_dump(
+ schema=NodeSchema(
+ **node_details.model_dump(
exclude_unset=True, by_alias=True, include={"inputs", "outputs"}
)
),
diff --git a/services/director-v2/tests/unit/with_dbs/test_api_route_computations.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py
similarity index 99%
rename from services/director-v2/tests/unit/with_dbs/test_api_route_computations.py
rename to services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py
index 2e75b18c009..4381c9311d4 100644
--- a/services/director-v2/tests/unit/with_dbs/test_api_route_computations.py
+++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py
@@ -88,6 +88,8 @@ def minimal_configuration(
redis_service: RedisSettings,
monkeypatch: pytest.MonkeyPatch,
faker: Faker,
+ with_disabled_auto_scheduling: mock.Mock,
+ with_disabled_scheduler_publisher: mock.Mock,
):
monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SIDECAR_ENABLED", "false")
monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "1")
@@ -588,11 +590,7 @@ async def test_create_computation_with_wallet(
@pytest.mark.parametrize(
"default_pricing_plan",
- [
- PricingPlanGet.model_construct(
- **PricingPlanGet.model_config["json_schema_extra"]["examples"][0]
- )
- ],
+ [PricingPlanGet(**PricingPlanGet.model_config["json_schema_extra"]["examples"][0])],
)
async def test_create_computation_with_wallet_with_invalid_pricing_unit_name_raises_422(
minimal_configuration: None,
@@ -631,7 +629,7 @@ async def test_create_computation_with_wallet_with_invalid_pricing_unit_name_rai
@pytest.mark.parametrize(
"default_pricing_plan",
[
- PricingPlanGet.model_construct(
+ PricingPlanGet(
**PricingPlanGet.model_config["json_schema_extra"]["examples"][0] # type: ignore
)
],
diff --git a/services/director-v2/tests/unit/with_dbs/test_api_route_computations_tasks.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations_tasks.py
similarity index 100%
rename from services/director-v2/tests/unit/with_dbs/test_api_route_computations_tasks.py
rename to services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations_tasks.py
diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py
index f6a041b934e..7609f6e956e 100644
--- a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py
+++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py
@@ -1606,7 +1606,9 @@ async def _return_random_task_result(job_id) -> TaskOutputData:
@pytest.fixture
def with_fast_service_heartbeat_s(monkeypatch: pytest.MonkeyPatch) -> int:
seconds = 1
- monkeypatch.setenv("SERVICE_TRACKING_HEARTBEAT", f"{seconds}")
+ monkeypatch.setenv(
+ "SERVICE_TRACKING_HEARTBEAT", f"{datetime.timedelta(seconds=seconds)}"
+ )
return seconds
diff --git a/services/director-v2/tests/unit/with_dbs/conftest.py b/services/director-v2/tests/unit/with_dbs/conftest.py
index ee8259f9f5b..56784acba13 100644
--- a/services/director-v2/tests/unit/with_dbs/conftest.py
+++ b/services/director-v2/tests/unit/with_dbs/conftest.py
@@ -278,9 +278,9 @@ async def _(user: dict[str, Any], **cluster_kwargs) -> Cluster:
.where(clusters.c.id == created_cluster.id)
):
access_rights_in_db[row.gid] = {
- "read": row[cluster_to_groups.c.read],
- "write": row[cluster_to_groups.c.write],
- "delete": row[cluster_to_groups.c.delete],
+ "read": row.read,
+ "write": row.write,
+ "delete": row.delete,
}
return Cluster(
From dfd146331799d9b77786f940b0c6e4a09de539e3 Mon Sep 17 00:00:00 2001
From: Matus Drobuliak <60785969+matusdrobuliak66@users.noreply.github.com>
Date: Mon, 2 Dec 2024 19:25:59 +0100
Subject: [PATCH 08/16] =?UTF-8?q?=E2=99=BB=EF=B8=8F=20Maintenance:=20remov?=
=?UTF-8?q?ing/adding=20EFS=20env=20vars=20:warning:=20=20(#6837)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.env-devel | 2 +-
packages/settings-library/src/settings_library/efs.py | 4 ----
services/docker-compose.yml | 3 +--
services/efs-guardian/tests/conftest.py | 1 -
4 files changed, 2 insertions(+), 8 deletions(-)
diff --git a/.env-devel b/.env-devel
index 32facad8357..7703fa8082a 100644
--- a/.env-devel
+++ b/.env-devel
@@ -90,8 +90,8 @@ EFS_GROUP_NAME=efs-group
EFS_DNS_NAME=fs-xxx.efs.us-east-1.amazonaws.com
EFS_MOUNTED_PATH=/tmp/efs
EFS_PROJECT_SPECIFIC_DATA_DIRECTORY=project-specific-data
-EFS_ONLY_ENABLED_FOR_USERIDS=[]
EFS_GUARDIAN_TRACING={}
+EFS_DEFAULT_USER_SERVICE_SIZE_BYTES=10000
# DATCORE_ADAPTER
DATCORE_ADAPTER_TRACING={}
diff --git a/packages/settings-library/src/settings_library/efs.py b/packages/settings-library/src/settings_library/efs.py
index 34c48f9dca6..d8ad2b7395d 100644
--- a/packages/settings-library/src/settings_library/efs.py
+++ b/packages/settings-library/src/settings_library/efs.py
@@ -14,10 +14,6 @@ class AwsEfsSettings(BaseCustomSettings):
EFS_MOUNTED_PATH: Path = Field(
description="This is the path where EFS is mounted to the EC2 machine",
)
- EFS_ONLY_ENABLED_FOR_USERIDS: list[int] = Field(
- description="This is temporary solution so we can enable it for specific users for testing purpose",
- examples=[[1]],
- )
NFS_PROTOCOL = "4.1"
diff --git a/services/docker-compose.yml b/services/docker-compose.yml
index e31261ca20c..691e544b0c0 100644
--- a/services/docker-compose.yml
+++ b/services/docker-compose.yml
@@ -345,7 +345,6 @@ services:
EFS_DNS_NAME: ${EFS_DNS_NAME}
EFS_MOUNTED_PATH: ${EFS_MOUNTED_PATH}
- EFS_ONLY_ENABLED_FOR_USERIDS: ${EFS_ONLY_ENABLED_FOR_USERIDS}
EFS_PROJECT_SPECIFIC_DATA_DIRECTORY: ${EFS_PROJECT_SPECIFIC_DATA_DIRECTORY}
RABBIT_HOST: ${RABBIT_HOST}
@@ -433,8 +432,8 @@ services:
EFS_GROUP_ID: ${EFS_GROUP_ID}
EFS_GROUP_NAME: ${EFS_GROUP_NAME}
EFS_DNS_NAME: ${EFS_DNS_NAME}
+ EFS_DEFAULT_USER_SERVICE_SIZE_BYTES: ${EFS_DEFAULT_USER_SERVICE_SIZE_BYTES}
EFS_MOUNTED_PATH: ${EFS_MOUNTED_PATH}
- EFS_ONLY_ENABLED_FOR_USERIDS: ${EFS_ONLY_ENABLED_FOR_USERIDS}
EFS_PROJECT_SPECIFIC_DATA_DIRECTORY: ${EFS_PROJECT_SPECIFIC_DATA_DIRECTORY}
EFS_GUARDIAN_TRACING: ${EFS_GUARDIAN_TRACING}
TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT}
diff --git a/services/efs-guardian/tests/conftest.py b/services/efs-guardian/tests/conftest.py
index 260e5a74026..96585f4c87b 100644
--- a/services/efs-guardian/tests/conftest.py
+++ b/services/efs-guardian/tests/conftest.py
@@ -72,7 +72,6 @@ def app_environment(
"EFS_DNS_NAME": "fs-xxx.efs.us-east-1.amazonaws.com",
"EFS_MOUNTED_PATH": "/tmp/efs",
"EFS_PROJECT_SPECIFIC_DATA_DIRECTORY": "project-specific-data",
- "EFS_ONLY_ENABLED_FOR_USERIDS": "[]",
"EFS_GUARDIAN_TRACING": "null",
"SC_USER_ID": "8004",
"SC_USER_NAME": "scu",
From 6a7b07360d3895b3d58f6a27f20e88842e25401c Mon Sep 17 00:00:00 2001
From: Matus Drobuliak <60785969+matusdrobuliak66@users.noreply.github.com>
Date: Tue, 3 Dec 2024 13:24:44 +0100
Subject: [PATCH 09/16] =?UTF-8?q?=F0=9F=90=9B=20fix=20github=20ci=20(#6892?=
=?UTF-8?q?)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.github/workflows/ci-testing-deploy.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/ci-testing-deploy.yml b/.github/workflows/ci-testing-deploy.yml
index d1a3ec75078..96091cff042 100644
--- a/.github/workflows/ci-testing-deploy.yml
+++ b/.github/workflows/ci-testing-deploy.yml
@@ -2460,7 +2460,7 @@ jobs:
integration-tests:
# NOTE: this is a github required status check!
- if: ${{ !cancelled() }}
+ if: ${{ always() }}
needs:
[
integration-test-director-v2-01,
@@ -2792,7 +2792,7 @@ jobs:
system-tests:
# NOTE: this is a github required status check!
- if: ${{ !cancelled() }}
+ if: ${{ always() }}
needs:
[
system-test-e2e,
From 59aeb9964941234c040ac1c39eec9a8217db244d Mon Sep 17 00:00:00 2001
From: Matus Drobuliak <60785969+matusdrobuliak66@users.noreply.github.com>
Date: Tue, 3 Dec 2024 14:04:26 +0100
Subject: [PATCH 10/16] =?UTF-8?q?=F0=9F=8E=A8=20moving=20folders=20to=20wo?=
=?UTF-8?q?rkspaces=20(#6851)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
api/specs/web-server/_folders.py | 15 +
api/specs/web-server/_projects_workspaces.py | 6 +-
.../api_schemas_webserver/projects.py | 24 +-
.../helpers/webserver_projects.py | 2 +-
.../db/repositories/comp_tasks/_utils.py | 3 +-
.../test_api_route_computations.py | 26 +-
.../class/osparc/dashboard/StudyBrowser.js | 5 -
.../source/class/osparc/data/Resources.js | 8 +-
.../api/v0/openapi.yaml | 59 +++-
.../folders/_folders_db.py | 70 ++++-
.../folders/_models.py | 14 +-
.../folders/_workspaces_api.py | 138 +++++++++
.../folders/_workspaces_handlers.py | 38 +++
.../folders/plugin.py | 3 +-
.../projects/_folders_db.py | 79 ++++-
.../projects/_groups_api.py | 3 +-
.../projects/_groups_db.py | 105 ++++---
.../projects/_projects_db.py | 59 ++++
.../projects/_workspaces_api.py | 55 ++--
.../projects/_workspaces_handlers.py | 12 +-
.../simcore_service_webserver/projects/db.py | 45 +--
.../projects/projects_api.py | 9 +-
.../src/simcore_service_webserver/utils.py | 14 +
services/web/server/tests/conftest.py | 2 +-
.../integration/01/test_garbage_collection.py | 2 +-
.../tests/unit/with_dbs/03/test_project_db.py | 2 +-
.../unit/with_dbs/04/workspaces/conftest.py | 2 +
...aces__moving_folders_between_workspaces.py | 274 ++++++++++++++++++
...ces__moving_projects_between_workspaces.py | 20 +-
29 files changed, 918 insertions(+), 176 deletions(-)
create mode 100644 services/web/server/src/simcore_service_webserver/folders/_workspaces_api.py
create mode 100644 services/web/server/src/simcore_service_webserver/folders/_workspaces_handlers.py
create mode 100644 services/web/server/src/simcore_service_webserver/projects/_projects_db.py
create mode 100644 services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces__moving_folders_between_workspaces.py
diff --git a/api/specs/web-server/_folders.py b/api/specs/web-server/_folders.py
index 88a2b19ce9e..2aa77e485d4 100644
--- a/api/specs/web-server/_folders.py
+++ b/api/specs/web-server/_folders.py
@@ -25,6 +25,9 @@
FoldersListQueryParams,
FoldersPathParams,
)
+from simcore_service_webserver.folders._workspaces_handlers import (
+ _FolderWorkspacesPathParams,
+)
router = APIRouter(
prefix=f"/{API_VTAG}",
@@ -97,3 +100,15 @@ async def delete_folder(
_path: Annotated[FoldersPathParams, Depends()],
):
...
+
+
+@router.post(
+ "/folders/{folder_id}/workspaces/{workspace_id}:move",
+ status_code=status.HTTP_204_NO_CONTENT,
+ summary="Move folder to the workspace",
+ tags=["workspaces"],
+)
+async def move_folder_to_workspace(
+ _path: Annotated[_FolderWorkspacesPathParams, Depends()],
+):
+ ...
diff --git a/api/specs/web-server/_projects_workspaces.py b/api/specs/web-server/_projects_workspaces.py
index 533d3c72a9b..caaccfca05c 100644
--- a/api/specs/web-server/_projects_workspaces.py
+++ b/api/specs/web-server/_projects_workspaces.py
@@ -23,12 +23,12 @@
)
-@router.put(
- "/projects/{project_id}/workspaces/{workspace_id}",
+@router.post(
+ "/projects/{project_id}/workspaces/{workspace_id}:move",
status_code=status.HTTP_204_NO_CONTENT,
summary="Move project to the workspace",
)
-async def replace_project_workspace(
+async def move_project_to_workspace(
_path: Annotated[_ProjectWorkspacesPathParams, Depends()],
):
...
diff --git a/packages/models-library/src/models_library/api_schemas_webserver/projects.py b/packages/models-library/src/models_library/api_schemas_webserver/projects.py
index 7c4116a136c..a918ece3b92 100644
--- a/packages/models-library/src/models_library/api_schemas_webserver/projects.py
+++ b/packages/models-library/src/models_library/api_schemas_webserver/projects.py
@@ -9,8 +9,16 @@
from typing import Annotated, Any, Literal, TypeAlias
from models_library.folders import FolderID
+from models_library.utils._original_fastapi_encoders import jsonable_encoder
from models_library.workspaces import WorkspaceID
-from pydantic import BeforeValidator, ConfigDict, Field, HttpUrl, field_validator
+from pydantic import (
+ BeforeValidator,
+ ConfigDict,
+ Field,
+ HttpUrl,
+ PlainSerializer,
+ field_validator,
+)
from ..api_schemas_long_running_tasks.tasks import TaskGet
from ..basic_types import LongTruncatedStr, ShortTruncatedStr
@@ -130,12 +138,22 @@ class ProjectPatch(InputSchema):
name: ShortTruncatedStr | None = Field(default=None)
description: LongTruncatedStr | None = Field(default=None)
thumbnail: Annotated[
- HttpUrl | None, BeforeValidator(empty_str_to_none_pre_validator)
+ HttpUrl | None,
+ BeforeValidator(empty_str_to_none_pre_validator),
+ PlainSerializer(lambda x: str(x) if x is not None else None),
] = Field(default=None)
access_rights: dict[GroupIDStr, AccessRights] | None = Field(default=None)
classifiers: list[ClassifierID] | None = Field(default=None)
dev: dict | None = Field(default=None)
- ui: StudyUI | None = Field(default=None)
+ ui: Annotated[
+ StudyUI | None,
+ BeforeValidator(empty_str_to_none_pre_validator),
+ PlainSerializer(
+ lambda obj: jsonable_encoder(
+ obj, exclude_unset=True, by_alias=False
+ ) # For the sake of backward compatibility
+ ),
+ ] = Field(default=None)
quality: dict[str, Any] | None = Field(default=None)
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_projects.py b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_projects.py
index 55065daaf76..092ab82d655 100644
--- a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_projects.py
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_projects.py
@@ -95,7 +95,7 @@ async def create_project(
for group_id, permissions in _access_rights.items():
await update_or_insert_project_group(
app,
- new_project["uuid"],
+ project_id=new_project["uuid"],
group_id=int(group_id),
read=permissions["read"],
write=permissions["write"],
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py
index c5fd0819fcd..dd52f50ac82 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py
@@ -6,6 +6,7 @@
import aiopg.sa
import arrow
from dask_task_models_library.container_tasks.protocol import ContainerEnvsDict
+from models_library.api_schemas_catalog.services import ServiceGet
from models_library.api_schemas_clusters_keeper.ec2_instances import EC2InstanceTypeGet
from models_library.api_schemas_directorv2.services import (
NodeRequirements,
@@ -89,7 +90,7 @@ async def _get_service_details(
node.version,
product_name,
)
- obj: ServiceMetaDataPublished = ServiceMetaDataPublished(**service_details)
+ obj: ServiceMetaDataPublished = ServiceGet(**service_details)
return obj
diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py
index 4381c9311d4..6b6084c5895 100644
--- a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py
+++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py
@@ -186,15 +186,29 @@ def _mocked_service_resources(request) -> httpx.Response:
def _mocked_services_details(
request, service_key: str, service_version: str
) -> httpx.Response:
+ assert "json_schema_extra" in ServiceGet.model_config
+ assert isinstance(ServiceGet.model_config["json_schema_extra"], dict)
+ assert isinstance(
+ ServiceGet.model_config["json_schema_extra"]["examples"], list
+ )
+ assert isinstance(
+ ServiceGet.model_config["json_schema_extra"]["examples"][0], dict
+ )
+ data_published = fake_service_details.model_copy(
+ update={
+ "key": urllib.parse.unquote(service_key),
+ "version": service_version,
+ }
+ ).model_dump(by_alias=True)
+ data = {
+ **ServiceGet.model_config["json_schema_extra"]["examples"][0],
+ **data_published,
+ }
+ payload = ServiceGet.model_validate(data)
return httpx.Response(
200,
json=jsonable_encoder(
- fake_service_details.model_copy(
- update={
- "key": urllib.parse.unquote(service_key),
- "version": service_version,
- }
- ),
+ payload,
by_alias=True,
),
)
diff --git a/services/static-webserver/client/source/class/osparc/dashboard/StudyBrowser.js b/services/static-webserver/client/source/class/osparc/dashboard/StudyBrowser.js
index c7ef8f916f2..2eecb230400 100644
--- a/services/static-webserver/client/source/class/osparc/dashboard/StudyBrowser.js
+++ b/services/static-webserver/client/source/class/osparc/dashboard/StudyBrowser.js
@@ -578,11 +578,6 @@ qx.Class.define("osparc.dashboard.StudyBrowser", {
const data = e.getData();
const destWorkspaceId = data["workspaceId"];
const destFolderId = data["folderId"];
- if (destWorkspaceId !== currentWorkspaceId) {
- const msg = this.tr("Moving folders to Shared Workspaces are coming soon");
- osparc.FlashMessenger.getInstance().logAs(msg, "WARNING");
- return;
- }
const moveFolder = () => {
Promise.all([
this.__moveFolderToWorkspace(folderId, destWorkspaceId),
diff --git a/services/static-webserver/client/source/class/osparc/data/Resources.js b/services/static-webserver/client/source/class/osparc/data/Resources.js
index d87f6c690bf..f8b38797c58 100644
--- a/services/static-webserver/client/source/class/osparc/data/Resources.js
+++ b/services/static-webserver/client/source/class/osparc/data/Resources.js
@@ -288,8 +288,8 @@ qx.Class.define("osparc.data.Resources", {
url: statics.API + "/projects/{studyId}/folders/{folderId}"
},
moveToWorkspace: {
- method: "PUT",
- url: statics.API + "/projects/{studyId}/workspaces/{workspaceId}"
+ method: "POST",
+ url: statics.API + "/projects/{studyId}/workspaces/{workspaceId}:move"
},
}
},
@@ -342,8 +342,8 @@ qx.Class.define("osparc.data.Resources", {
url: statics.API + "/folders/{folderId}"
},
moveToWorkspace: {
- method: "PUT",
- url: statics.API + "/folders/{folderId}/folders/{workspaceId}"
+ method: "POST",
+ url: statics.API + "/folders/{folderId}/folders/{workspaceId}:move"
},
trash: {
method: "POST",
diff --git a/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml b/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml
index 9cca4bafd06..84951101670 100644
--- a/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml
+++ b/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml
@@ -2944,6 +2944,59 @@ paths:
schema:
$ref: '#/components/schemas/EnvelopedError'
description: Service Unavailable
+ /v0/folders/{folder_id}/workspaces/{workspace_id}:move:
+ post:
+ tags:
+ - folders
+ - workspaces
+ summary: Move folder to the workspace
+ operationId: move_folder_to_workspace
+ parameters:
+ - name: folder_id
+ in: path
+ required: true
+ schema:
+ type: integer
+ exclusiveMinimum: true
+ title: Folder Id
+ minimum: 0
+ - name: workspace_id
+ in: path
+ required: true
+ schema:
+ anyOf:
+ - type: integer
+ exclusiveMinimum: true
+ minimum: 0
+ - type: 'null'
+ title: Workspace Id
+ responses:
+ '204':
+ description: Successful Response
+ '404':
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/EnvelopedError'
+ description: Not Found
+ '403':
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/EnvelopedError'
+ description: Forbidden
+ '409':
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/EnvelopedError'
+ description: Conflict
+ '503':
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/EnvelopedError'
+ description: Service Unavailable
/v0/tasks:
get:
tags:
@@ -4706,13 +4759,13 @@ paths:
application/json:
schema:
$ref: '#/components/schemas/Envelope_WalletGet_'
- /v0/projects/{project_id}/workspaces/{workspace_id}:
- put:
+ /v0/projects/{project_id}/workspaces/{workspace_id}:move:
+ post:
tags:
- projects
- workspaces
summary: Move project to the workspace
- operationId: replace_project_workspace
+ operationId: move_project_to_workspace
parameters:
- name: project_id
in: path
diff --git a/services/web/server/src/simcore_service_webserver/folders/_folders_db.py b/services/web/server/src/simcore_service_webserver/folders/_folders_db.py
index 7e3a54d0bb5..88bb3987de4 100644
--- a/services/web/server/src/simcore_service_webserver/folders/_folders_db.py
+++ b/services/web/server/src/simcore_service_webserver/folders/_folders_db.py
@@ -6,7 +6,7 @@
import logging
from datetime import datetime
-from typing import Any, Final, cast
+from typing import Final, cast
import sqlalchemy as sa
from aiohttp import web
@@ -33,6 +33,7 @@
from simcore_postgres_database.utils_workspaces_sql import (
create_my_workspace_access_rights_subquery,
)
+from simcore_service_webserver.utils import UnSet, as_dict_exclude_unset
from sqlalchemy import func
from sqlalchemy.ext.asyncio import AsyncConnection
from sqlalchemy.orm import aliased
@@ -43,18 +44,9 @@
_logger = logging.getLogger(__name__)
-
-class UnSet:
- ...
-
-
_unset: Final = UnSet()
-def as_dict_exclude_unset(**params) -> dict[str, Any]:
- return {k: v for k, v in params.items() if not isinstance(v, UnSet)}
-
-
_SELECTION_ARGS = (
folders_v2.c.folder_id,
folders_v2.c.name,
@@ -324,6 +316,8 @@ async def update(
parent_folder_id: FolderID | None | UnSet = _unset,
trashed_at: datetime | None | UnSet = _unset,
trashed_explicitly: bool | UnSet = _unset,
+ workspace_id: WorkspaceID | None | UnSet = _unset,
+ user_id: UserID | None | UnSet = _unset,
) -> FolderDB:
"""
Batch/single patch of folder/s
@@ -334,6 +328,8 @@ async def update(
parent_folder_id=parent_folder_id,
trashed_at=trashed_at,
trashed_explicitly=trashed_explicitly,
+ workspace_id=workspace_id,
+ user_id=user_id,
)
query = (
@@ -467,6 +463,60 @@ async def get_projects_recursively_only_if_user_is_owner(
return [ProjectID(row[0]) async for row in result]
+async def get_all_folders_and_projects_ids_recursively(
+ app: web.Application,
+ connection: AsyncConnection | None = None,
+ *,
+ folder_id: FolderID,
+ private_workspace_user_id_or_none: UserID | None,
+ product_name: ProductName,
+) -> tuple[list[FolderID], list[ProjectID]]:
+ """
+ The purpose of this function is to retrieve all projects within the provided folder ID.
+ """
+
+ async with pass_or_acquire_connection(get_asyncpg_engine(app), connection) as conn:
+
+ # Step 1: Define the base case for the recursive CTE
+ base_query = select(
+ folders_v2.c.folder_id, folders_v2.c.parent_folder_id
+ ).where(
+ (folders_v2.c.folder_id == folder_id) # <-- specified folder id
+ & (folders_v2.c.product_name == product_name)
+ )
+ folder_hierarchy_cte = base_query.cte(name="folder_hierarchy", recursive=True)
+
+ # Step 2: Define the recursive case
+ folder_alias = aliased(folders_v2)
+ recursive_query = select(
+ folder_alias.c.folder_id, folder_alias.c.parent_folder_id
+ ).select_from(
+ folder_alias.join(
+ folder_hierarchy_cte,
+ folder_alias.c.parent_folder_id == folder_hierarchy_cte.c.folder_id,
+ )
+ )
+
+ # Step 3: Combine base and recursive cases into a CTE
+ folder_hierarchy_cte = folder_hierarchy_cte.union_all(recursive_query)
+
+ # Step 4: Execute the query to get all descendants
+ final_query = select(folder_hierarchy_cte)
+ result = await conn.stream(final_query)
+ # list of tuples [(folder_id, parent_folder_id), ...] ex. [(1, None), (2, 1)]
+ folder_ids = [item.folder_id async for item in result]
+
+ query = select(projects_to_folders.c.project_uuid).where(
+ (projects_to_folders.c.folder_id.in_(folder_ids))
+ & (projects_to_folders.c.user_id == private_workspace_user_id_or_none)
+ )
+
+ result = await conn.stream(query)
+ project_ids = [ProjectID(row.project_uuid) async for row in result]
+
+ return folder_ids, project_ids
+
+
async def get_folders_recursively(
app: web.Application,
connection: AsyncConnection | None = None,
diff --git a/services/web/server/src/simcore_service_webserver/folders/_models.py b/services/web/server/src/simcore_service_webserver/folders/_models.py
index 9cac8a2f1a1..553d43bd64c 100644
--- a/services/web/server/src/simcore_service_webserver/folders/_models.py
+++ b/services/web/server/src/simcore_service_webserver/folders/_models.py
@@ -18,10 +18,9 @@
null_or_none_str_to_none_validator,
)
from models_library.workspaces import WorkspaceID
-from pydantic import BeforeValidator, ConfigDict, Field
-from servicelib.request_keys import RQT_USERID_KEY
+from pydantic import BaseModel, BeforeValidator, ConfigDict, Field
-from .._constants import RQ_PRODUCT_KEY
+from .._constants import RQ_PRODUCT_KEY, RQT_USERID_KEY
_logger = logging.getLogger(__name__)
@@ -88,3 +87,12 @@ class FolderSearchQueryParams(
class FolderTrashQueryParams(RemoveQueryParams):
...
+
+
+class _FolderWorkspacesPathParams(BaseModel):
+ folder_id: FolderID
+ workspace_id: Annotated[
+ WorkspaceID | None, BeforeValidator(null_or_none_str_to_none_validator)
+ ] = Field(default=None)
+
+ model_config = ConfigDict(extra="forbid")
diff --git a/services/web/server/src/simcore_service_webserver/folders/_workspaces_api.py b/services/web/server/src/simcore_service_webserver/folders/_workspaces_api.py
new file mode 100644
index 00000000000..115ff2c8d8e
--- /dev/null
+++ b/services/web/server/src/simcore_service_webserver/folders/_workspaces_api.py
@@ -0,0 +1,138 @@
+import logging
+
+from aiohttp import web
+from models_library.folders import FolderID
+from models_library.products import ProductName
+from models_library.users import UserID
+from models_library.workspaces import WorkspaceID
+from simcore_postgres_database.utils_repos import transaction_context
+
+from ..db.plugin import get_asyncpg_engine
+from ..projects import _folders_db as project_to_folders_db
+from ..projects import _groups_db as project_groups_db
+from ..projects import _projects_db as projects_db
+from ..projects._access_rights_api import check_user_project_permission
+from ..users.api import get_user
+from ..workspaces.api import check_user_workspace_access
+from . import _folders_db
+
+_logger = logging.getLogger(__name__)
+
+
+async def move_folder_into_workspace(
+ app: web.Application,
+ *,
+ user_id: UserID,
+ folder_id: FolderID,
+ workspace_id: WorkspaceID | None,
+ product_name: ProductName,
+) -> None:
+ # 1. User needs to have delete permission on source folder
+ folder_db = await _folders_db.get(
+ app, folder_id=folder_id, product_name=product_name
+ )
+ workspace_is_private = True
+ if folder_db.workspace_id:
+ await check_user_workspace_access(
+ app,
+ user_id=user_id,
+ workspace_id=folder_db.workspace_id,
+ product_name=product_name,
+ permission="delete",
+ )
+ workspace_is_private = False
+
+ # 2. User needs to have write permission on destination workspace
+ if workspace_id is not None:
+ await check_user_workspace_access(
+ app,
+ user_id=user_id,
+ workspace_id=workspace_id,
+ product_name=product_name,
+ permission="write",
+ )
+
+ # 3. User needs to have delete permission on all the projects inside source folder
+ (
+ folder_ids,
+ project_ids,
+ ) = await _folders_db.get_all_folders_and_projects_ids_recursively(
+ app,
+ connection=None,
+ folder_id=folder_id,
+ private_workspace_user_id_or_none=user_id if workspace_is_private else None,
+ product_name=product_name,
+ )
+ # NOTE: Not the most effective, can be improved
+ for project_id in project_ids:
+ await check_user_project_permission(
+ app,
+ project_id=project_id,
+ user_id=user_id,
+ product_name=product_name,
+ permission="delete",
+ )
+
+ # ⬆️ Here we have already guaranties that user has all the right permissions to do this operation ⬆️
+
+ async with transaction_context(get_asyncpg_engine(app)) as conn:
+ # 4. Update workspace ID on the project resource
+ for project_id in project_ids:
+ await projects_db.patch_project(
+ app=app,
+ connection=conn,
+ project_uuid=project_id,
+ new_partial_project_data={"workspace_id": workspace_id},
+ )
+
+ # 5. BATCH update of folders with workspace_id
+ await _folders_db.update(
+ app,
+ connection=conn,
+ folders_id_or_ids=set(folder_ids),
+ product_name=product_name,
+ workspace_id=workspace_id, # <-- Updating workspace_id
+ user_id=user_id if workspace_id is None else None, # <-- Updating user_id
+ )
+
+ # 6. Update source folder parent folder ID with NULL (it will appear in the root directory)
+ await _folders_db.update(
+ app,
+ connection=conn,
+ folders_id_or_ids=folder_id,
+ product_name=product_name,
+ parent_folder_id=None, # <-- Updating parent folder ID
+ )
+
+ # 7. Remove all records of project to folders that are not in the folders that we are moving
+ # (ex. If we are moving from private workspace, the same project can be in different folders for different users)
+ await project_to_folders_db.delete_all_project_to_folder_by_project_ids_not_in_folder_ids(
+ app,
+ connection=conn,
+ project_id_or_ids=set(project_ids),
+ not_in_folder_ids=set(folder_ids),
+ )
+
+ # 8. Update the user id field for the remaining folders
+ await project_to_folders_db.update_project_to_folder(
+ app,
+ connection=conn,
+ folders_id_or_ids=set(folder_ids),
+ user_id=user_id if workspace_id is None else None, # <-- Updating user_id
+ )
+
+ # 9. Remove all project permissions, leave only the user who moved the project
+ user = await get_user(app, user_id=user_id)
+ for project_id in project_ids:
+ await project_groups_db.delete_all_project_groups(
+ app, connection=conn, project_id=project_id
+ )
+ await project_groups_db.update_or_insert_project_group(
+ app,
+ connection=conn,
+ project_id=project_id,
+ group_id=user["primary_gid"],
+ read=True,
+ write=True,
+ delete=True,
+ )
diff --git a/services/web/server/src/simcore_service_webserver/folders/_workspaces_handlers.py b/services/web/server/src/simcore_service_webserver/folders/_workspaces_handlers.py
new file mode 100644
index 00000000000..faa505ecd31
--- /dev/null
+++ b/services/web/server/src/simcore_service_webserver/folders/_workspaces_handlers.py
@@ -0,0 +1,38 @@
+import logging
+
+from aiohttp import web
+from servicelib.aiohttp import status
+from servicelib.aiohttp.requests_validation import parse_request_path_parameters_as
+
+from .._meta import api_version_prefix as VTAG
+from ..login.decorators import login_required
+from ..security.decorators import permission_required
+from . import _workspaces_api
+from ._exceptions_handlers import handle_plugin_requests_exceptions
+from ._models import FoldersRequestContext, _FolderWorkspacesPathParams
+
+_logger = logging.getLogger(__name__)
+
+
+routes = web.RouteTableDef()
+
+
+@routes.post(
+ f"/{VTAG}/folders/{{folder_id}}/workspaces/{{workspace_id}}:move",
+ name="move_folder_to_workspace",
+)
+@login_required
+@permission_required("folder.update")
+@handle_plugin_requests_exceptions
+async def move_folder_to_workspace(request: web.Request):
+ req_ctx = FoldersRequestContext.model_validate(request)
+ path_params = parse_request_path_parameters_as(_FolderWorkspacesPathParams, request)
+
+ await _workspaces_api.move_folder_into_workspace(
+ app=request.app,
+ user_id=req_ctx.user_id,
+ folder_id=path_params.folder_id,
+ workspace_id=path_params.workspace_id,
+ product_name=req_ctx.product_name,
+ )
+ return web.json_response(status=status.HTTP_204_NO_CONTENT)
diff --git a/services/web/server/src/simcore_service_webserver/folders/plugin.py b/services/web/server/src/simcore_service_webserver/folders/plugin.py
index 8ddef03ec1f..2601962e52f 100644
--- a/services/web/server/src/simcore_service_webserver/folders/plugin.py
+++ b/services/web/server/src/simcore_service_webserver/folders/plugin.py
@@ -7,7 +7,7 @@
from servicelib.aiohttp.application_keys import APP_SETTINGS_KEY
from servicelib.aiohttp.application_setup import ModuleCategory, app_module_setup
-from . import _folders_handlers, _trash_handlers
+from . import _folders_handlers, _trash_handlers, _workspaces_handlers
_logger = logging.getLogger(__name__)
@@ -25,3 +25,4 @@ def setup_folders(app: web.Application):
# routes
app.router.add_routes(_folders_handlers.routes)
app.router.add_routes(_trash_handlers.routes)
+ app.router.add_routes(_workspaces_handlers.routes)
diff --git a/services/web/server/src/simcore_service_webserver/projects/_folders_db.py b/services/web/server/src/simcore_service_webserver/projects/_folders_db.py
index 59ea8ebe282..e655cc17bf5 100644
--- a/services/web/server/src/simcore_service_webserver/projects/_folders_db.py
+++ b/services/web/server/src/simcore_service_webserver/projects/_folders_db.py
@@ -6,6 +6,7 @@
import logging
from datetime import datetime
+from typing import Final
from aiohttp import web
from models_library.folders import FolderID
@@ -13,15 +14,17 @@
from models_library.users import UserID
from pydantic import BaseModel
from simcore_postgres_database.models.projects_to_folders import projects_to_folders
+from simcore_postgres_database.utils_repos import transaction_context
+from simcore_service_webserver.utils import UnSet, as_dict_exclude_unset
from sqlalchemy import func, literal_column
+from sqlalchemy.ext.asyncio import AsyncConnection
from sqlalchemy.sql import select
-from ..db.plugin import get_database_engine
+from ..db.plugin import get_asyncpg_engine, get_database_engine
_logger = logging.getLogger(__name__)
-
-_logger = logging.getLogger(__name__)
+_unset: Final = UnSet()
### Models
@@ -100,13 +103,79 @@ async def delete_project_to_folder(
)
+### AsyncPg
+
+
async def delete_all_project_to_folder_by_project_id(
app: web.Application,
+ connection: AsyncConnection | None = None,
+ *,
project_id: ProjectID,
) -> None:
- async with get_database_engine(app).acquire() as conn:
- await conn.execute(
+ async with transaction_context(get_asyncpg_engine(app), connection) as conn:
+ await conn.stream(
projects_to_folders.delete().where(
projects_to_folders.c.project_uuid == f"{project_id}"
)
)
+
+
+async def update_project_to_folder(
+ app: web.Application,
+ connection: AsyncConnection | None = None,
+ *,
+ folders_id_or_ids: FolderID | set[FolderID],
+ # updatable columns
+ user_id: UserID | None | UnSet = _unset,
+) -> None:
+ """
+ Batch/single patch of project to folders
+ """
+ # NOTE: exclude unset can also be done using a pydantic model and dict(exclude_unset=True)
+ updated = as_dict_exclude_unset(
+ user_id=user_id,
+ )
+
+ query = projects_to_folders.update().values(modified=func.now(), **updated)
+
+ if isinstance(folders_id_or_ids, set):
+ # batch-update
+ query = query.where(
+ projects_to_folders.c.folder_id.in_(list(folders_id_or_ids))
+ )
+ else:
+ # single-update
+ query = query.where(projects_to_folders.c.folder_id == folders_id_or_ids)
+
+ async with transaction_context(get_asyncpg_engine(app), connection) as conn:
+ await conn.stream(query)
+
+
+async def delete_all_project_to_folder_by_project_ids_not_in_folder_ids(
+ app: web.Application,
+ connection: AsyncConnection | None = None,
+ *,
+ project_id_or_ids: ProjectID | set[ProjectID],
+ not_in_folder_ids: set[FolderID],
+) -> None:
+ query = projects_to_folders.delete()
+
+ if isinstance(project_id_or_ids, set):
+ # batch-delete
+ query = query.where(
+ projects_to_folders.c.project_uuid.in_(
+ [f"{project_id}" for project_id in project_id_or_ids]
+ )
+ )
+ else:
+ # single-delete
+ query = query.where(
+ projects_to_folders.c.project_uuid == f"{project_id_or_ids}"
+ )
+
+ query = query.where(
+ projects_to_folders.c.folder_id.not_in(not_in_folder_ids) # <-- NOT IN!
+ )
+
+ async with transaction_context(get_asyncpg_engine(app), connection) as conn:
+ await conn.stream(query)
diff --git a/services/web/server/src/simcore_service_webserver/projects/_groups_api.py b/services/web/server/src/simcore_service_webserver/projects/_groups_api.py
index 7ae45f0f90c..b32a6d15fa1 100644
--- a/services/web/server/src/simcore_service_webserver/projects/_groups_api.py
+++ b/services/web/server/src/simcore_service_webserver/projects/_groups_api.py
@@ -80,7 +80,8 @@ async def list_project_groups_by_user_and_project(
] = await projects_groups_db.list_project_groups(app=app, project_id=project_id)
project_groups_api: list[ProjectGroupGet] = [
- ProjectGroupGet.model_validate(group.model_dump()) for group in project_groups_db
+ ProjectGroupGet.model_validate(group.model_dump())
+ for group in project_groups_db
]
return project_groups_api
diff --git a/services/web/server/src/simcore_service_webserver/projects/_groups_db.py b/services/web/server/src/simcore_service_webserver/projects/_groups_db.py
index 5b963b90cdb..4355f0c9d92 100644
--- a/services/web/server/src/simcore_service_webserver/projects/_groups_db.py
+++ b/services/web/server/src/simcore_service_webserver/projects/_groups_db.py
@@ -3,19 +3,22 @@
- Adds a layer to the postgres API with a focus on the projects comments
"""
+
import logging
from datetime import datetime
from aiohttp import web
from models_library.projects import ProjectID
from models_library.users import GroupID
-from pydantic import BaseModel, TypeAdapter
+from pydantic import BaseModel, ConfigDict, TypeAdapter
from simcore_postgres_database.models.project_to_groups import project_to_groups
+from simcore_postgres_database.utils_repos import transaction_context
from sqlalchemy import func, literal_column
from sqlalchemy.dialects.postgresql import insert as pg_insert
+from sqlalchemy.ext.asyncio import AsyncConnection
from sqlalchemy.sql import select
-from ..db.plugin import get_database_engine
+from ..db.plugin import get_asyncpg_engine
from .exceptions import ProjectGroupNotFoundError
_logger = logging.getLogger(__name__)
@@ -31,39 +34,46 @@ class ProjectGroupGetDB(BaseModel):
created: datetime
modified: datetime
+ model_config = ConfigDict(from_attributes=True)
+
## DB API
async def create_project_group(
app: web.Application,
+ connection: AsyncConnection | None = None,
+ *,
project_id: ProjectID,
group_id: GroupID,
- *,
read: bool,
write: bool,
delete: bool,
) -> ProjectGroupGetDB:
- async with get_database_engine(app).acquire() as conn:
- result = await conn.execute(
- project_to_groups.insert()
- .values(
- project_uuid=f"{project_id}",
- gid=group_id,
- read=read,
- write=write,
- delete=delete,
- created=func.now(),
- modified=func.now(),
- )
- .returning(literal_column("*"))
+ query = (
+ project_to_groups.insert()
+ .values(
+ project_uuid=f"{project_id}",
+ gid=group_id,
+ read=read,
+ write=write,
+ delete=delete,
+ created=func.now(),
+ modified=func.now(),
)
+ .returning(literal_column("*"))
+ )
+
+ async with transaction_context(get_asyncpg_engine(app), connection) as conn:
+ result = await conn.stream(query)
row = await result.first()
return ProjectGroupGetDB.model_validate(row)
async def list_project_groups(
app: web.Application,
+ connection: AsyncConnection | None = None,
+ *,
project_id: ProjectID,
) -> list[ProjectGroupGetDB]:
stmt = (
@@ -79,14 +89,16 @@ async def list_project_groups(
.where(project_to_groups.c.project_uuid == f"{project_id}")
)
- async with get_database_engine(app).acquire() as conn:
- result = await conn.execute(stmt)
- rows = await result.fetchall() or []
+ async with transaction_context(get_asyncpg_engine(app), connection) as conn:
+ result = await conn.stream(stmt)
+ rows = await result.all() or []
return TypeAdapter(list[ProjectGroupGetDB]).validate_python(rows)
async def get_project_group(
app: web.Application,
+ connection: AsyncConnection | None = None,
+ *,
project_id: ProjectID,
group_id: GroupID,
) -> ProjectGroupGetDB:
@@ -106,8 +118,8 @@ async def get_project_group(
)
)
- async with get_database_engine(app).acquire() as conn:
- result = await conn.execute(stmt)
+ async with transaction_context(get_asyncpg_engine(app), connection) as conn:
+ result = await conn.stream(stmt)
row = await result.first()
if row is None:
raise ProjectGroupNotFoundError(
@@ -118,27 +130,31 @@ async def get_project_group(
async def replace_project_group(
app: web.Application,
+ connection: AsyncConnection | None = None,
+ *,
project_id: ProjectID,
group_id: GroupID,
- *,
read: bool,
write: bool,
delete: bool,
) -> ProjectGroupGetDB:
- async with get_database_engine(app).acquire() as conn:
- result = await conn.execute(
- project_to_groups.update()
- .values(
- read=read,
- write=write,
- delete=delete,
- )
- .where(
- (project_to_groups.c.project_uuid == f"{project_id}")
- & (project_to_groups.c.gid == group_id)
- )
- .returning(literal_column("*"))
+
+ query = (
+ project_to_groups.update()
+ .values(
+ read=read,
+ write=write,
+ delete=delete,
+ )
+ .where(
+ (project_to_groups.c.project_uuid == f"{project_id}")
+ & (project_to_groups.c.gid == group_id)
)
+ .returning(literal_column("*"))
+ )
+
+ async with transaction_context(get_asyncpg_engine(app), connection) as conn:
+ result = await conn.stream(query)
row = await result.first()
if row is None:
raise ProjectGroupNotFoundError(
@@ -149,14 +165,15 @@ async def replace_project_group(
async def update_or_insert_project_group(
app: web.Application,
+ connection: AsyncConnection | None = None,
+ *,
project_id: ProjectID,
group_id: GroupID,
- *,
read: bool,
write: bool,
delete: bool,
) -> None:
- async with get_database_engine(app).acquire() as conn:
+ async with transaction_context(get_asyncpg_engine(app), connection) as conn:
insert_stmt = pg_insert(project_to_groups).values(
project_uuid=f"{project_id}",
gid=group_id,
@@ -175,16 +192,18 @@ async def update_or_insert_project_group(
"modified": func.now(),
},
)
- await conn.execute(on_update_stmt)
+ await conn.stream(on_update_stmt)
async def delete_project_group(
app: web.Application,
+ connection: AsyncConnection | None = None,
+ *,
project_id: ProjectID,
group_id: GroupID,
) -> None:
- async with get_database_engine(app).acquire() as conn:
- await conn.execute(
+ async with transaction_context(get_asyncpg_engine(app), connection) as conn:
+ await conn.stream(
project_to_groups.delete().where(
(project_to_groups.c.project_uuid == f"{project_id}")
& (project_to_groups.c.gid == group_id)
@@ -194,10 +213,12 @@ async def delete_project_group(
async def delete_all_project_groups(
app: web.Application,
+ connection: AsyncConnection | None = None,
+ *,
project_id: ProjectID,
) -> None:
- async with get_database_engine(app).acquire() as conn:
- await conn.execute(
+ async with transaction_context(get_asyncpg_engine(app), connection) as conn:
+ await conn.stream(
project_to_groups.delete().where(
project_to_groups.c.project_uuid == f"{project_id}"
)
diff --git a/services/web/server/src/simcore_service_webserver/projects/_projects_db.py b/services/web/server/src/simcore_service_webserver/projects/_projects_db.py
new file mode 100644
index 00000000000..3c94e9e7cdc
--- /dev/null
+++ b/services/web/server/src/simcore_service_webserver/projects/_projects_db.py
@@ -0,0 +1,59 @@
+import logging
+
+import sqlalchemy as sa
+from aiohttp import web
+from models_library.projects import ProjectID
+from simcore_postgres_database.utils_repos import transaction_context
+from simcore_postgres_database.webserver_models import projects
+from sqlalchemy.ext.asyncio import AsyncConnection
+
+from ..db.plugin import get_asyncpg_engine
+from .exceptions import ProjectNotFoundError
+from .models import ProjectDB
+
+_logger = logging.getLogger(__name__)
+
+
+# NOTE: MD: I intentionally didn't include the workbench. There is a special interface
+# for the workbench, and at some point, this column should be removed from the table.
+# The same holds true for access_rights/ui/classifiers/quality, but we have decided to proceed step by step.
+_SELECTION_PROJECT_DB_ARGS = [ # noqa: RUF012
+ projects.c.id,
+ projects.c.type,
+ projects.c.uuid,
+ projects.c.name,
+ projects.c.description,
+ projects.c.thumbnail,
+ projects.c.prj_owner,
+ projects.c.creation_date,
+ projects.c.last_change_date,
+ projects.c.ui,
+ projects.c.classifiers,
+ projects.c.dev,
+ projects.c.quality,
+ projects.c.published,
+ projects.c.hidden,
+ projects.c.workspace_id,
+ projects.c.trashed_at,
+]
+
+
+async def patch_project(
+ app: web.Application,
+ connection: AsyncConnection | None = None,
+ *,
+ project_uuid: ProjectID,
+ new_partial_project_data: dict,
+) -> ProjectDB:
+
+ async with transaction_context(get_asyncpg_engine(app), connection) as conn:
+ result = await conn.stream(
+ projects.update()
+ .values(last_change_date=sa.func.now(), **new_partial_project_data)
+ .where(projects.c.uuid == f"{project_uuid}")
+ .returning(*_SELECTION_PROJECT_DB_ARGS)
+ )
+ row = await result.first()
+ if row is None:
+ raise ProjectNotFoundError(project_uuid=project_uuid)
+ return ProjectDB.model_validate(row)
diff --git a/services/web/server/src/simcore_service_webserver/projects/_workspaces_api.py b/services/web/server/src/simcore_service_webserver/projects/_workspaces_api.py
index 105decdd3ac..1462168fa52 100644
--- a/services/web/server/src/simcore_service_webserver/projects/_workspaces_api.py
+++ b/services/web/server/src/simcore_service_webserver/projects/_workspaces_api.py
@@ -5,13 +5,15 @@
from models_library.projects import ProjectID
from models_library.users import UserID
from models_library.workspaces import WorkspaceID
+from simcore_postgres_database.utils_repos import transaction_context
+from ..db.plugin import get_asyncpg_engine
from ..projects._access_rights_api import get_user_project_access_rights
from ..users.api import get_user
from ..workspaces.api import check_user_workspace_access
from . import _folders_db as project_to_folders_db
from . import _groups_db as project_groups_db
-from .db import APP_PROJECT_DBAPI, ProjectDBAPI
+from . import _projects_db
from .exceptions import ProjectInvalidRightsError
_logger = logging.getLogger(__name__)
@@ -25,8 +27,6 @@ async def move_project_into_workspace(
workspace_id: WorkspaceID | None,
product_name: ProductName,
) -> None:
- project_api: ProjectDBAPI = app[APP_PROJECT_DBAPI]
-
# 1. User needs to have delete permission on project
project_access_rights = await get_user_project_access_rights(
app, project_id=project_id, user_id=user_id, product_name=product_name
@@ -44,26 +44,33 @@ async def move_project_into_workspace(
permission="write",
)
- # 3. Delete project to folders (for everybody)
- await project_to_folders_db.delete_all_project_to_folder_by_project_id(
- app,
- project_id=project_id,
- )
+ async with transaction_context(get_asyncpg_engine(app)) as conn:
+ # 3. Delete project to folders (for everybody)
+ await project_to_folders_db.delete_all_project_to_folder_by_project_id(
+ app,
+ connection=conn,
+ project_id=project_id,
+ )
- # 4. Update workspace ID on the project resource
- await project_api.patch_project(
- project_uuid=project_id,
- new_partial_project_data={"workspace_id": workspace_id},
- )
+ # 4. Update workspace ID on the project resource
+ await _projects_db.patch_project(
+ app=app,
+ connection=conn,
+ project_uuid=project_id,
+ new_partial_project_data={"workspace_id": workspace_id},
+ )
- # 5. Remove all project permissions, leave only the user who moved the project
- user = await get_user(app, user_id=user_id)
- await project_groups_db.delete_all_project_groups(app, project_id=project_id)
- await project_groups_db.update_or_insert_project_group(
- app,
- project_id=project_id,
- group_id=user["primary_gid"],
- read=True,
- write=True,
- delete=True,
- )
+ # 5. Remove all project permissions, leave only the user who moved the project
+ user = await get_user(app, user_id=user_id)
+ await project_groups_db.delete_all_project_groups(
+ app, connection=conn, project_id=project_id
+ )
+ await project_groups_db.update_or_insert_project_group(
+ app,
+ connection=conn,
+ project_id=project_id,
+ group_id=user["primary_gid"],
+ read=True,
+ write=True,
+ delete=True,
+ )
diff --git a/services/web/server/src/simcore_service_webserver/projects/_workspaces_handlers.py b/services/web/server/src/simcore_service_webserver/projects/_workspaces_handlers.py
index ff881b418af..ef3d20b3c5a 100644
--- a/services/web/server/src/simcore_service_webserver/projects/_workspaces_handlers.py
+++ b/services/web/server/src/simcore_service_webserver/projects/_workspaces_handlers.py
@@ -51,19 +51,21 @@ async def wrapper(request: web.Request) -> web.StreamResponse:
class _ProjectWorkspacesPathParams(BaseModel):
project_id: ProjectID
- workspace_id: Annotated[WorkspaceID | None, BeforeValidator(null_or_none_str_to_none_validator)] = Field(default=None)
+ workspace_id: Annotated[
+ WorkspaceID | None, BeforeValidator(null_or_none_str_to_none_validator)
+ ] = Field(default=None)
model_config = ConfigDict(extra="forbid")
-@routes.put(
- f"/{VTAG}/projects/{{project_id}}/workspaces/{{workspace_id}}",
- name="replace_project_workspace",
+@routes.post(
+ f"/{VTAG}/projects/{{project_id}}/workspaces/{{workspace_id}}:move",
+ name="move_project_to_workspace",
)
@login_required
@permission_required("project.workspaces.*")
@_handle_projects_workspaces_exceptions
-async def replace_project_workspace(request: web.Request):
+async def move_project_to_workspace(request: web.Request):
req_ctx = RequestContext.model_validate(request)
path_params = parse_request_path_parameters_as(
_ProjectWorkspacesPathParams, request
diff --git a/services/web/server/src/simcore_service_webserver/projects/db.py b/services/web/server/src/simcore_service_webserver/projects/db.py
index cdaed691e71..b0fc7c5551a 100644
--- a/services/web/server/src/simcore_service_webserver/projects/db.py
+++ b/services/web/server/src/simcore_service_webserver/projects/db.py
@@ -85,6 +85,7 @@
patch_workbench,
update_workbench,
)
+from ._projects_db import _SELECTION_PROJECT_DB_ARGS
from .exceptions import (
ProjectDeleteError,
ProjectInvalidRightsError,
@@ -676,33 +677,10 @@ async def get_project(
project_type,
)
- # NOTE: MD: I intentionally didn't include the workbench. There is a special interface
- # for the workbench, and at some point, this column should be removed from the table.
- # The same holds true for access_rights/ui/classifiers/quality, but we have decided to proceed step by step.
- _SELECTION_PROJECT_DB_ARGS = [ # noqa: RUF012
- projects.c.id,
- projects.c.type,
- projects.c.uuid,
- projects.c.name,
- projects.c.description,
- projects.c.thumbnail,
- projects.c.prj_owner,
- projects.c.creation_date,
- projects.c.last_change_date,
- projects.c.ui,
- projects.c.classifiers,
- projects.c.dev,
- projects.c.quality,
- projects.c.published,
- projects.c.hidden,
- projects.c.workspace_id,
- projects.c.trashed_at,
- ]
-
async def get_project_db(self, project_uuid: ProjectID) -> ProjectDB:
async with self.engine.acquire() as conn:
result = await conn.execute(
- sa.select(*self._SELECTION_PROJECT_DB_ARGS).where(
+ sa.select(*_SELECTION_PROJECT_DB_ARGS).where(
projects.c.uuid == f"{project_uuid}"
)
)
@@ -716,9 +694,7 @@ async def get_user_specific_project_data_db(
) -> UserSpecificProjectDataDB:
async with self.engine.acquire() as conn:
result = await conn.execute(
- sa.select(
- *self._SELECTION_PROJECT_DB_ARGS, projects_to_folders.c.folder_id
- )
+ sa.select(*_SELECTION_PROJECT_DB_ARGS, projects_to_folders.c.folder_id)
.select_from(
projects.join(
projects_to_folders,
@@ -865,21 +841,6 @@ async def replace_project(
msg = "linter unhappy without this"
raise RuntimeError(msg)
- async def patch_project(
- self, project_uuid: ProjectID, new_partial_project_data: dict
- ) -> ProjectDB:
- async with self.engine.acquire() as conn:
- result = await conn.execute(
- projects.update()
- .values(last_change_date=sa.func.now(), **new_partial_project_data)
- .where(projects.c.uuid == f"{project_uuid}")
- .returning(*self._SELECTION_PROJECT_DB_ARGS)
- )
- row = await result.fetchone()
- if row is None:
- raise ProjectNotFoundError(project_uuid=project_uuid)
- return ProjectDB.model_validate(row)
-
async def get_project_product(self, project_uuid: ProjectID) -> ProductName:
async with self.engine.acquire() as conn:
result = await conn.execute(
diff --git a/services/web/server/src/simcore_service_webserver/projects/projects_api.py b/services/web/server/src/simcore_service_webserver/projects/projects_api.py
index 6876c63718d..cf9445985c6 100644
--- a/services/web/server/src/simcore_service_webserver/projects/projects_api.py
+++ b/services/web/server/src/simcore_service_webserver/projects/projects_api.py
@@ -120,7 +120,7 @@
from ..wallets import api as wallets_api
from ..wallets.errors import WalletNotEnoughCreditsError
from ..workspaces import _workspaces_db as workspaces_db
-from . import _crud_api_delete, _nodes_api
+from . import _crud_api_delete, _nodes_api, _projects_db
from ._access_rights_api import (
check_user_project_permission,
has_user_project_access_rights,
@@ -253,8 +253,8 @@ async def patch_project(
project_patch: ProjectPatch | ProjectPatchExtended,
product_name: ProductName,
):
- _project_patch_exclude_unset: dict[str, Any] = jsonable_encoder(
- project_patch, exclude_unset=True, by_alias=False
+ _project_patch_exclude_unset = project_patch.model_dump(
+ exclude_unset=True, by_alias=False
)
db: ProjectDBAPI = app[APP_PROJECT_DBAPI]
@@ -289,7 +289,8 @@ async def patch_project(
raise ProjectOwnerNotFoundInTheProjectAccessRightsError
# 4. Patch the project
- await db.patch_project(
+ await _projects_db.patch_project(
+ app=app,
project_uuid=project_uuid,
new_partial_project_data=_project_patch_exclude_unset,
)
diff --git a/services/web/server/src/simcore_service_webserver/utils.py b/services/web/server/src/simcore_service_webserver/utils.py
index c6eade6345d..1f73ac06e0a 100644
--- a/services/web/server/src/simcore_service_webserver/utils.py
+++ b/services/web/server/src/simcore_service_webserver/utils.py
@@ -194,3 +194,17 @@ def compute_sha1_on_small_dataset(d: Any) -> SHA1Str:
# SEE options in https://github.com/ijl/orjson#option
data_bytes = orjson.dumps(d, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_SORT_KEYS)
return SHA1Str(hashlib.sha1(data_bytes).hexdigest()) # nosec # NOSONAR
+
+
+# -----------------------------------------------
+#
+# UNSET
+#
+
+
+class UnSet:
+ ...
+
+
+def as_dict_exclude_unset(**params) -> dict[str, Any]:
+ return {k: v for k, v in params.items() if not isinstance(v, UnSet)}
diff --git a/services/web/server/tests/conftest.py b/services/web/server/tests/conftest.py
index 7085050f331..f215368ad1d 100644
--- a/services/web/server/tests/conftest.py
+++ b/services/web/server/tests/conftest.py
@@ -358,7 +358,7 @@ async def _creator(
for group_id, permissions in _access_rights.items():
await update_or_insert_project_group(
client.app,
- data["uuid"],
+ project_id=data["uuid"],
group_id=int(group_id),
read=permissions["read"],
write=permissions["write"],
diff --git a/services/web/server/tests/integration/01/test_garbage_collection.py b/services/web/server/tests/integration/01/test_garbage_collection.py
index c52977d7115..d3aee60764d 100644
--- a/services/web/server/tests/integration/01/test_garbage_collection.py
+++ b/services/web/server/tests/integration/01/test_garbage_collection.py
@@ -237,7 +237,7 @@ async def new_project(
for group_id, permissions in access_rights.items():
await update_or_insert_project_group(
client.app,
- project["uuid"],
+ project_id=project["uuid"],
group_id=int(group_id),
read=permissions["read"],
write=permissions["write"],
diff --git a/services/web/server/tests/unit/with_dbs/03/test_project_db.py b/services/web/server/tests/unit/with_dbs/03/test_project_db.py
index fadfe561267..1d73a0e88c4 100644
--- a/services/web/server/tests/unit/with_dbs/03/test_project_db.py
+++ b/services/web/server/tests/unit/with_dbs/03/test_project_db.py
@@ -201,7 +201,7 @@ async def _inserter(prj: dict[str, Any], **overrides) -> dict[str, Any]:
for group_id, permissions in _access_rights.items():
await update_or_insert_project_group(
client.app,
- new_project["uuid"],
+ project_id=new_project["uuid"],
group_id=int(group_id),
read=permissions["read"],
write=permissions["write"],
diff --git a/services/web/server/tests/unit/with_dbs/04/workspaces/conftest.py b/services/web/server/tests/unit/with_dbs/04/workspaces/conftest.py
index 744b30da23b..fa008269aaf 100644
--- a/services/web/server/tests/unit/with_dbs/04/workspaces/conftest.py
+++ b/services/web/server/tests/unit/with_dbs/04/workspaces/conftest.py
@@ -5,6 +5,7 @@
import pytest
import sqlalchemy as sa
+from simcore_postgres_database.models.projects import projects
from simcore_postgres_database.models.workspaces import workspaces
@@ -13,3 +14,4 @@ def workspaces_clean_db(postgres_db: sa.engine.Engine) -> Iterator[None]:
with postgres_db.connect() as con:
yield
con.execute(workspaces.delete())
+ con.execute(projects.delete())
diff --git a/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces__moving_folders_between_workspaces.py b/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces__moving_folders_between_workspaces.py
new file mode 100644
index 00000000000..ea7105a3338
--- /dev/null
+++ b/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces__moving_folders_between_workspaces.py
@@ -0,0 +1,274 @@
+# pylint: disable=redefined-outer-name
+# pylint: disable=unused-argument
+# pylint: disable=unused-variable
+# pylint: disable=too-many-arguments
+# pylint: disable=too-many-statements
+
+
+from copy import deepcopy
+from http.client import NO_CONTENT
+
+import pytest
+from aiohttp.test_utils import TestClient
+from pytest_mock import MockerFixture
+from pytest_simcore.helpers.assert_checks import assert_status
+from pytest_simcore.helpers.webserver_login import UserInfoDict
+from pytest_simcore.helpers.webserver_projects import create_project
+from servicelib.aiohttp import status
+from simcore_service_webserver.db.models import UserRole
+from simcore_service_webserver.db.plugin import setup_db
+from simcore_service_webserver.projects.models import ProjectDict
+
+
+@pytest.fixture
+def user_role() -> UserRole:
+ return UserRole.USER
+
+
+@pytest.fixture
+def mock_catalog_api_get_services_for_user_in_product(mocker: MockerFixture):
+ mocker.patch(
+ "simcore_service_webserver.projects._crud_api_read.get_services_for_user_in_product",
+ spec=True,
+ return_value=[],
+ )
+ mocker.patch(
+ "simcore_service_webserver.projects._crud_handlers.get_services_for_user_in_product",
+ spec=True,
+ return_value=[],
+ )
+ mocker.patch(
+ "simcore_service_webserver.projects._crud_handlers.project_uses_available_services",
+ spec=True,
+ return_value=True,
+ )
+
+
+@pytest.fixture
+async def moving_folder_id(
+ client: TestClient,
+ logged_user: UserInfoDict,
+ fake_project: ProjectDict,
+) -> str:
+ assert client.app
+ setup_db(client.app)
+
+ ### Project creation
+
+ # Create 2 projects
+ project_data = deepcopy(fake_project)
+ first_project = await create_project(
+ client.app,
+ params_override=project_data,
+ user_id=logged_user["id"],
+ product_name="osparc",
+ )
+ second_project = await create_project(
+ client.app,
+ params_override=project_data,
+ user_id=logged_user["id"],
+ product_name="osparc",
+ )
+
+ ### Folder creation
+
+ # Create folder
+ url = client.app.router["create_folder"].url_for()
+ resp = await client.post(
+ f"{url}",
+ json={
+ "name": "Original user folder",
+ },
+ )
+ first_folder, _ = await assert_status(resp, status.HTTP_201_CREATED)
+
+ # Create sub folder of previous folder
+ url = client.app.router["create_folder"].url_for()
+ resp = await client.post(
+ f"{url}",
+ json={
+ "name": "Second user folder",
+ "parentFolderId": f"{first_folder['folderId']}",
+ },
+ )
+ second_folder, _ = await assert_status(resp, status.HTTP_201_CREATED)
+
+ # Create sub sub folder of previous sub folder
+ url = client.app.router["create_folder"].url_for()
+ resp = await client.post(
+ f"{url}",
+ json={
+ "name": "Third user folder",
+ "parentFolderId": f"{second_folder['folderId']}",
+ },
+ )
+ third_folder, _ = await assert_status(resp, status.HTTP_201_CREATED)
+
+ ### Move projects to subfolder
+ # add first project to the folder
+ url = client.app.router["replace_project_folder"].url_for(
+ folder_id=f"{second_folder['folderId']}", project_id=f"{first_project['uuid']}"
+ )
+ resp = await client.put(f"{url}")
+ await assert_status(resp, status.HTTP_204_NO_CONTENT)
+ # add second project to the folder
+ url = client.app.router["replace_project_folder"].url_for(
+ folder_id=f"{second_folder['folderId']}", project_id=f"{second_project['uuid']}"
+ )
+ resp = await client.put(f"{url}")
+ await assert_status(resp, status.HTTP_204_NO_CONTENT)
+
+ ## Double check whether everything is setup OK
+ url = (
+ client.app.router["list_projects"]
+ .url_for()
+ .with_query({"folder_id": f"{second_folder['folderId']}"})
+ )
+ resp = await client.get(f"{url}")
+ data, _ = await assert_status(resp, status.HTTP_200_OK)
+ assert len(data) == 2
+
+ url = (
+ client.app.router["list_projects"]
+ .url_for()
+ .with_query({"folder_id": f"{first_folder['folderId']}"})
+ )
+ resp = await client.get(f"{url}")
+ data, _ = await assert_status(resp, status.HTTP_200_OK)
+ assert len(data) == 0
+
+ url = client.app.router["list_projects"].url_for().with_query({"folder_id": "null"})
+ resp = await client.get(f"{url}")
+ data, _ = await assert_status(resp, status.HTTP_200_OK)
+ assert len(data) == 0
+
+ url = client.app.router["list_folders"].url_for().with_query({"folder_id": "null"})
+ resp = await client.get(f"{url}")
+ data, _ = await assert_status(resp, status.HTTP_200_OK)
+ assert len(data) == 1
+
+ url = (
+ client.app.router["list_folders"]
+ .url_for()
+ .with_query({"folder_id": f"{first_folder['folderId']}"})
+ )
+ resp = await client.get(f"{url}")
+ data, _ = await assert_status(resp, status.HTTP_200_OK)
+ assert len(data) == 1
+
+ return f"{second_folder['folderId']}"
+
+
+async def _move_folder_to_workspace_and_assert(
+ client: TestClient, folder_id: str, workspace_id: str
+):
+ assert client.app
+
+ # MOVE
+ url = client.app.router["move_folder_to_workspace"].url_for(
+ folder_id=folder_id,
+ workspace_id=workspace_id,
+ )
+ resp = await client.post(f"{url}")
+ await assert_status(resp, NO_CONTENT)
+
+ # ASSERT
+ url = (
+ client.app.router["list_projects"]
+ .url_for()
+ .with_query(
+ {
+ "folder_id": folder_id,
+ "workspace_id": workspace_id,
+ }
+ )
+ )
+ resp = await client.get(f"{url}")
+ data, _ = await assert_status(resp, status.HTTP_200_OK)
+ assert len(data) == 2
+
+ url = (
+ client.app.router["list_folders"]
+ .url_for()
+ .with_query(
+ {
+ "folder_id": folder_id,
+ "workspace_id": workspace_id,
+ }
+ )
+ )
+ resp = await client.get(f"{url}")
+ data, _ = await assert_status(resp, status.HTTP_200_OK)
+ assert len(data) == 1
+
+
+async def test_moving_between_private_and_shared_workspaces(
+ client: TestClient,
+ logged_user: UserInfoDict,
+ mock_catalog_api_get_services_for_user_in_product: MockerFixture,
+ fake_project: ProjectDict,
+ moving_folder_id: str,
+ workspaces_clean_db: None,
+):
+ assert client.app
+
+ # We will test these scenarios of moving folders:
+ # 1. Private workspace -> Shared workspace
+ # 2. Shared workspace A -> Shared workspace B
+ # 3. Shared workspace A -> Shared workspace A (Corner case - This endpoint is not used like this)
+ # 4. Shared workspace -> Private workspace
+ # 5. Private workspace -> Private workspace (Corner case - This endpoint is not used like this)
+
+ # create a new workspace
+ url = client.app.router["create_workspace"].url_for()
+ resp = await client.post(
+ f"{url}",
+ json={
+ "name": "A",
+ "description": "A",
+ "thumbnail": None,
+ },
+ )
+ shared_workspace_A, _ = await assert_status(resp, status.HTTP_201_CREATED)
+
+ # 1. Private workspace -> Shared workspace A
+ await _move_folder_to_workspace_and_assert(
+ client,
+ folder_id=moving_folder_id,
+ workspace_id=f"{shared_workspace_A['workspaceId']}",
+ )
+
+ # create a new workspace
+ url = client.app.router["create_workspace"].url_for()
+ resp = await client.post(
+ f"{url}",
+ json={
+ "name": "B",
+ "description": "B",
+ "thumbnail": None,
+ },
+ )
+ shared_workspace_B, _ = await assert_status(resp, status.HTTP_201_CREATED)
+ # 2. Shared workspace A -> Shared workspace B
+ await _move_folder_to_workspace_and_assert(
+ client,
+ folder_id=moving_folder_id,
+ workspace_id=f"{shared_workspace_B['workspaceId']}",
+ )
+
+ # 3. (Corner case) Shared workspace B -> Shared workspace B
+ await _move_folder_to_workspace_and_assert(
+ client,
+ folder_id=moving_folder_id,
+ workspace_id=f"{shared_workspace_B['workspaceId']}",
+ )
+
+ # 4. Shared workspace -> Private workspace
+ await _move_folder_to_workspace_and_assert(
+ client, folder_id=moving_folder_id, workspace_id="null"
+ )
+
+ # 5. (Corner case) Private workspace -> Private workspace
+ await _move_folder_to_workspace_and_assert(
+ client, folder_id=moving_folder_id, workspace_id="null"
+ )
diff --git a/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces__moving_projects_between_workspaces.py b/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces__moving_projects_between_workspaces.py
index 21b16ea9738..a81c76012a0 100644
--- a/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces__moving_projects_between_workspaces.py
+++ b/services/web/server/tests/unit/with_dbs/04/workspaces/test_workspaces__moving_projects_between_workspaces.py
@@ -55,10 +55,10 @@ async def test_moving_between_workspaces_user_role_permissions(
workspaces_clean_db: None,
):
# Move project from workspace to your private workspace
- base_url = client.app.router["replace_project_workspace"].url_for(
+ base_url = client.app.router["move_project_to_workspace"].url_for(
project_id=fake_project["uuid"], workspace_id="null"
)
- resp = await client.put(f"{base_url}")
+ resp = await client.post(f"{base_url}")
await assert_status(resp, expected.no_content)
@@ -103,10 +103,10 @@ async def test_moving_between_private_and_shared_workspaces(
assert data["workspaceId"] == added_workspace["workspaceId"] # <-- Workspace ID
# Move project from workspace to your private workspace
- base_url = client.app.router["replace_project_workspace"].url_for(
+ base_url = client.app.router["move_project_to_workspace"].url_for(
project_id=project["uuid"], workspace_id="null"
)
- resp = await client.put(f"{base_url}")
+ resp = await client.post(f"{base_url}")
await assert_status(resp, status.HTTP_204_NO_CONTENT)
# Get project in workspace
@@ -116,10 +116,10 @@ async def test_moving_between_private_and_shared_workspaces(
assert data["workspaceId"] is None # <-- Workspace ID is None
# Move project from your private workspace to shared workspace
- base_url = client.app.router["replace_project_workspace"].url_for(
+ base_url = client.app.router["move_project_to_workspace"].url_for(
project_id=project["uuid"], workspace_id=f"{added_workspace['workspaceId']}"
)
- resp = await client.put(f"{base_url}")
+ resp = await client.post(f"{base_url}")
await assert_status(resp, status.HTTP_204_NO_CONTENT)
# Get project in workspace
@@ -182,10 +182,10 @@ async def test_moving_between_shared_and_shared_workspaces(
assert data["workspaceId"] == added_workspace["workspaceId"] # <-- Workspace ID
# Move project from workspace to your private workspace
- base_url = client.app.router["replace_project_workspace"].url_for(
+ base_url = client.app.router["move_project_to_workspace"].url_for(
project_id=project["uuid"], workspace_id=f"{second_workspace['workspaceId']}"
)
- resp = await client.put(f"{base_url}")
+ resp = await client.post(f"{base_url}")
await assert_status(resp, status.HTTP_204_NO_CONTENT)
# Get project in workspace
@@ -262,10 +262,10 @@ async def test_moving_between_workspaces_check_removed_from_folder(
assert data["workspaceId"] == added_workspace["workspaceId"] # <-- Workspace ID
# Move project from workspace to your private workspace
- base_url = client.app.router["replace_project_workspace"].url_for(
+ base_url = client.app.router["move_project_to_workspace"].url_for(
project_id=project["uuid"], workspace_id="none"
)
- resp = await client.put(f"{base_url}")
+ resp = await client.post(f"{base_url}")
await assert_status(resp, status.HTTP_204_NO_CONTENT)
# Get project in workspace
From f9f2148daf4a9a78a6c4cdc639d3fa781b6f4d28 Mon Sep 17 00:00:00 2001
From: Odei Maiz <33152403+odeimaiz@users.noreply.github.com>
Date: Tue, 3 Dec 2024 14:59:55 +0100
Subject: [PATCH 11/16] =?UTF-8?q?=F0=9F=8E=A8=20[Frontend]=20Show=20suppor?=
=?UTF-8?q?t=20email=20in=20About=20window=20(#6890)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../source/class/osparc/CookiePolicy.js | 20 ++--
.../class/osparc/navigation/UserMenu.js | 6 +-
.../class/osparc/product/AboutProduct.js | 108 ++++++++++++------
.../source/class/osparc/product/Utils.js | 7 +-
.../osparc/product/quickStart/s4l/Welcome.js | 12 +-
.../product/quickStart/s4lacad/Welcome.js | 12 +-
.../product/quickStart/s4llite/Slides.js | 12 +-
.../source/class/osparc/store/Support.js | 26 ++---
8 files changed, 121 insertions(+), 82 deletions(-)
diff --git a/services/static-webserver/client/source/class/osparc/CookiePolicy.js b/services/static-webserver/client/source/class/osparc/CookiePolicy.js
index 7a0327c09f6..378ff902da4 100644
--- a/services/static-webserver/client/source/class/osparc/CookiePolicy.js
+++ b/services/static-webserver/client/source/class/osparc/CookiePolicy.js
@@ -154,17 +154,15 @@ qx.Class.define("osparc.CookiePolicy", {
control = new qx.ui.basic.Label(text).set({
rich : true
});
- osparc.store.Support.getLicenseURL()
- .then(licenseLink => {
- const lbl = control.getValue();
- if (licenseLink) {
- const color = qx.theme.manager.Color.getInstance().resolve("text");
- const textLink = `Licensing.`;
- control.setValue(lbl + textLink);
- } else {
- control.setValue(lbl + this.tr("Licensing."));
- }
- });
+ const licenseLink = osparc.store.Support.getLicenseURL();
+ const lbl = control.getValue();
+ if (licenseLink) {
+ const color = qx.theme.manager.Color.getInstance().resolve("text");
+ const textLink = `Licensing.`;
+ control.setValue(lbl + textLink);
+ } else {
+ control.setValue(lbl + this.tr("Licensing."));
+ }
this._add(control, {
column: 0,
row: 2
diff --git a/services/static-webserver/client/source/class/osparc/navigation/UserMenu.js b/services/static-webserver/client/source/class/osparc/navigation/UserMenu.js
index dc029edbba9..be4a1c8f4a8 100644
--- a/services/static-webserver/client/source/class/osparc/navigation/UserMenu.js
+++ b/services/static-webserver/client/source/class/osparc/navigation/UserMenu.js
@@ -127,8 +127,8 @@ qx.Class.define("osparc.navigation.UserMenu", {
case "license":
control = new qx.ui.menu.Button(this.tr("License"));
osparc.utils.Utils.setIdToWidget(control, "userMenuLicenseBtn");
- osparc.store.Support.getLicenseURL()
- .then(licenseURL => control.addListener("execute", () => window.open(licenseURL)));
+ const licenseURL = osparc.store.Support.getLicenseURL();
+ control.addListener("execute", () => window.open(licenseURL));
this.add(control);
break;
case "tip-lite-button":
@@ -257,7 +257,7 @@ qx.Class.define("osparc.navigation.UserMenu", {
}
this.getChildControl("about");
- if (!osparc.product.Utils.isProduct("osparc")) {
+ if (osparc.product.Utils.showAboutProduct()) {
this.getChildControl("about-product");
}
this.getChildControl("license");
diff --git a/services/static-webserver/client/source/class/osparc/product/AboutProduct.js b/services/static-webserver/client/source/class/osparc/product/AboutProduct.js
index 97e18eeadfd..c0760d01082 100644
--- a/services/static-webserver/client/source/class/osparc/product/AboutProduct.js
+++ b/services/static-webserver/client/source/class/osparc/product/AboutProduct.js
@@ -57,6 +57,10 @@ qx.Class.define("osparc.product.AboutProduct", {
case "s4llite":
this.__buildS4LLiteLayout();
break;
+ case "tis":
+ case "tiplite":
+ this.__buildTIPLayout();
+ break;
default: {
const noInfoText = this.tr("Information not available");
const noInfoLabel = osparc.product.quickStart.Utils.createLabel(noInfoText);
@@ -67,43 +71,43 @@ qx.Class.define("osparc.product.AboutProduct", {
},
__buildS4LLayout: function() {
- osparc.store.Support.getLicenseURL()
- .then(licenseUrl => {
- const text = this.tr(`
- sim4life.io is a native implementation of the most advanced simulation platform, Sim4Life, in the cloud. \
- The platform empowers users to simulate, analyze, and predict complex, multifaceted, and dynamic biological interactions within the full anatomical complexity of the human body. \
- It provides the ability to set up and run complex simulations directly within any browser, utilizing cloud technology.
-
- sim4life.io makes use of technologies developed by our research partner for the o2S2PARC platform, the IT’IS Foundation, and co-funded by the U.S. National Institutes of Health’s SPARC initiative.\
-
- For more information about Sim4Life, please visit ${osparc.utils.Utils.createHTMLLink("sim4life.swiss", "https://sim4life.swiss/")}.
-
- To review license agreements, click ${osparc.utils.Utils.createHTMLLink("here", licenseUrl)}.
- `);
-
- const label = osparc.product.quickStart.Utils.createLabel(text);
- this.add(label);
- });
+ const licenseUrl = osparc.store.Support.getLicenseURL();
+ const text = this.tr(`
+ sim4life.io is a native implementation of the most advanced simulation platform, Sim4Life, in the cloud. \
+ The platform empowers users to simulate, analyze, and predict complex, multifaceted, and dynamic biological interactions within the full anatomical complexity of the human body. \
+ It provides the ability to set up and run complex simulations directly within any browser, utilizing cloud technology.
+
+ sim4life.io makes use of technologies developed by our research partner for the o2S2PARC platform, the IT’IS Foundation, and co-funded by the U.S. National Institutes of Health’s SPARC initiative.\
+
+ For more information about Sim4Life, please visit ${osparc.utils.Utils.createHTMLLink("sim4life.swiss", "https://sim4life.swiss/")}.
+
+ To review license agreements, click ${osparc.utils.Utils.createHTMLLink("here", licenseUrl)}.
+
+ Send us an email ${this.__getMailTo()}
+ `);
+
+ const label = osparc.product.quickStart.Utils.createLabel(text);
+ this.add(label);
},
__buildS4LAcademicLayout: function() {
- osparc.store.Support.getLicenseURL()
- .then(licenseUrl => {
- const text = this.tr(`
- sim4life.science is a native implementation of the most advanced simulation platform, Sim4Life, in the cloud. \
- The platform empowers users to simulate, analyze, and predict complex, multifaceted, and dynamic biological interactions within the full anatomical complexity of the human body. \
- It provides the ability to set up and run complex simulations directly within any browser, utilizing cloud technology.
-
- sim4life.science makes use of technologies developed by our research partner for the o2S2PARC platform, the IT’IS Foundation, and co-funded by the U.S. National Institutes of Health’s SPARC initiative.\
-
- For more information about Sim4Life, please visit ${osparc.utils.Utils.createHTMLLink("sim4life.swiss", "href='https://sim4life.swiss/")}.
-
- To review license agreements, click ${osparc.utils.Utils.createHTMLLink("here", licenseUrl)}.
- `);
-
- const label = osparc.product.quickStart.Utils.createLabel(text);
- this.add(label);
- });
+ const licenseUrl = osparc.store.Support.getLicenseURL();
+ const text = this.tr(`
+ sim4life.science is a native implementation of the most advanced simulation platform, Sim4Life, in the cloud. \
+ The platform empowers users to simulate, analyze, and predict complex, multifaceted, and dynamic biological interactions within the full anatomical complexity of the human body. \
+ It provides the ability to set up and run complex simulations directly within any browser, utilizing cloud technology.
+
+ sim4life.science makes use of technologies developed by our research partner for the o2S2PARC platform, the IT’IS Foundation, and co-funded by the U.S. National Institutes of Health’s SPARC initiative.\
+
+ For more information about Sim4Life, please visit ${osparc.utils.Utils.createHTMLLink("sim4life.swiss", "href='https://sim4life.swiss/")}.
+
+ To review license agreements, click ${osparc.utils.Utils.createHTMLLink("here", licenseUrl)}.
+
+ Send us an email ${this.__getMailTo()}
+ `);
+
+ const label = osparc.product.quickStart.Utils.createLabel(text);
+ this.add(label);
},
__buildS4LLiteLayout: function() {
@@ -117,10 +121,13 @@ qx.Class.define("osparc.product.AboutProduct", {
const moreInfoUrl = "https://zmt.swiss/";
const moreInfoText = `For more information about Sim4Life.lite, visit ${osparc.utils.Utils.createHTMLLink("our website", moreInfoUrl)}.`;
+ const emailText = `Send us an email ${this.__getMailTo()}`;
+
[
introText,
licenseText,
- moreInfoText
+ moreInfoText,
+ emailText,
].forEach(text => {
const label = osparc.product.quickStart.Utils.createLabel(text);
this.add(label);
@@ -129,6 +136,35 @@ qx.Class.define("osparc.product.AboutProduct", {
this.__addCopyright();
},
+ __buildTIPLayout: function() {
+ const licenseUrl = osparc.store.Support.getLicenseURL();
+ const text = this.tr(`
+ TIP (TI Planning Tool) is an innovative online platform designed to optimize targeted neurostimulation protocols using \
+ temporal interference (TI) stimulation. Developed by IT'IS Foundation, TIP simplifies the complex process of planning deep \
+ brain stimulation.
+
+ Powered by o2S2PARC technology, TIP utilizes sophisticated electromagnetic simulations, detailed anatomical head models, \
+ and automated optimization to generate comprehensive reports with quantitative and visual information. This tool is \
+ invaluable for neuroscientists and brain stimulation experts, especially those with limited computational modeling experience, \
+ enabling them to create effective and safe stimulation protocols for their research. \
+
+ For more information about TIP, please visit ${osparc.utils.Utils.createHTMLLink("itis.swiss", "https://itis.swiss/tools-and-systems/ti-planning/overview")}.
+
+ To review license agreements, click ${osparc.utils.Utils.createHTMLLink("here", licenseUrl)}.
+
+ Send us an email ${this.__getMailTo()}
+ `);
+
+ const label = osparc.product.quickStart.Utils.createLabel(text);
+ this.add(label);
+ },
+
+ __getMailTo: function() {
+ const supportEmail = osparc.store.VendorInfo.getInstance().getSupportEmail();
+ const productName = osparc.store.StaticInfo.getInstance().getDisplayName();
+ return osparc.store.Support.mailToText(supportEmail, "Support " + productName, false);
+ },
+
__addCopyright: function() {
const copyrightLink = new osparc.ui.basic.LinkLabel().set({
font: "link-label-14"
@@ -141,6 +177,6 @@ qx.Class.define("osparc.product.AboutProduct", {
});
}
this.add(copyrightLink);
- }
+ },
}
});
diff --git a/services/static-webserver/client/source/class/osparc/product/Utils.js b/services/static-webserver/client/source/class/osparc/product/Utils.js
index 45d3b7de661..4535f1ca8b6 100644
--- a/services/static-webserver/client/source/class/osparc/product/Utils.js
+++ b/services/static-webserver/client/source/class/osparc/product/Utils.js
@@ -225,7 +225,12 @@ qx.Class.define("osparc.product.Utils", {
},
showAboutProduct: function() {
- return (this.isS4LProduct() || this.isProduct("s4llite"));
+ return (
+ this.isS4LProduct() ||
+ this.isProduct("s4llite") ||
+ this.isProduct("tis") ||
+ this.isProduct("tiplite")
+ );
},
showPreferencesTokens: function() {
diff --git a/services/static-webserver/client/source/class/osparc/product/quickStart/s4l/Welcome.js b/services/static-webserver/client/source/class/osparc/product/quickStart/s4l/Welcome.js
index bec6916504e..518416f1373 100644
--- a/services/static-webserver/client/source/class/osparc/product/quickStart/s4l/Welcome.js
+++ b/services/static-webserver/client/source/class/osparc/product/quickStart/s4l/Welcome.js
@@ -125,12 +125,12 @@ qx.Class.define("osparc.product.quickStart.s4l.Welcome", {
textAlign: "center",
rich : true
});
- osparc.store.Support.getLicenseURL()
- .then(licenseUrl => {
- const link = osparc.utils.Utils.createHTMLLink("Licensing", licenseUrl);
- licenseLink.setValue(link);
- licenseLink.show();
- });
+ const licenseUrl = osparc.store.Support.getLicenseURL();
+ if (licenseUrl) {
+ const link = osparc.utils.Utils.createHTMLLink("Licensing", licenseUrl);
+ licenseLink.setValue(link);
+ licenseLink.show();
+ }
footerItems.push(licenseLink);
const dontShowCB = osparc.product.quickStart.Utils.createDontShowAgain("s4lDontShowQuickStart");
diff --git a/services/static-webserver/client/source/class/osparc/product/quickStart/s4lacad/Welcome.js b/services/static-webserver/client/source/class/osparc/product/quickStart/s4lacad/Welcome.js
index ed8a78dbdb6..c81b9813d51 100644
--- a/services/static-webserver/client/source/class/osparc/product/quickStart/s4lacad/Welcome.js
+++ b/services/static-webserver/client/source/class/osparc/product/quickStart/s4lacad/Welcome.js
@@ -125,12 +125,12 @@ qx.Class.define("osparc.product.quickStart.s4lacad.Welcome", {
textAlign: "center",
rich : true
});
- osparc.store.Support.getLicenseURL()
- .then(licenseUrl => {
- const link = osparc.utils.Utils.createHTMLLink("Licensing", licenseUrl);
- licenseLink.setValue(link);
- licenseLink.show();
- });
+ const licenseUrl = osparc.store.Support.getLicenseURL();
+ if (licenseUrl) {
+ const link = osparc.utils.Utils.createHTMLLink("Licensing", licenseUrl);
+ licenseLink.setValue(link);
+ licenseLink.show();
+ }
footerItems.push(licenseLink);
const dontShowCB = osparc.product.quickStart.Utils.createDontShowAgain("s4lDontShowQuickStart");
diff --git a/services/static-webserver/client/source/class/osparc/product/quickStart/s4llite/Slides.js b/services/static-webserver/client/source/class/osparc/product/quickStart/s4llite/Slides.js
index 3d3bcf6e048..d7726632407 100644
--- a/services/static-webserver/client/source/class/osparc/product/quickStart/s4llite/Slides.js
+++ b/services/static-webserver/client/source/class/osparc/product/quickStart/s4llite/Slides.js
@@ -53,12 +53,12 @@ qx.Class.define("osparc.product.quickStart.s4llite.Slides", {
textAlign: "center",
rich : true
});
- osparc.store.Support.getLicenseURL()
- .then(licenseUrl => {
- const link = osparc.utils.Utils.createHTMLLink("Licensing", licenseUrl);
- licenseLink.setValue(link);
- licenseLink.show();
- });
+ const licenseUrl = osparc.store.Support.getLicenseURL();
+ if (licenseUrl) {
+ const link = osparc.utils.Utils.createHTMLLink("Licensing", licenseUrl);
+ licenseLink.setValue(link);
+ licenseLink.show();
+ }
footerItems.push(licenseLink);
const dontShowCB = osparc.product.quickStart.Utils.createDontShowAgain("s4lliteDontShowQuickStart");
diff --git a/services/static-webserver/client/source/class/osparc/store/Support.js b/services/static-webserver/client/source/class/osparc/store/Support.js
index e79de4d1a27..1352ef2eac9 100644
--- a/services/static-webserver/client/source/class/osparc/store/Support.js
+++ b/services/static-webserver/client/source/class/osparc/store/Support.js
@@ -4,18 +4,15 @@ qx.Class.define("osparc.store.Support", {
statics: {
getLicenseURL: function() {
- return new Promise(resolve => {
- const vendor = osparc.store.VendorInfo.getInstance().getVendor();
- if (vendor) {
- if ("license_url" in vendor) {
- resolve(vendor["license_url"]);
- } else if ("url" in vendor) {
- resolve(vendor["url"]);
- } else {
- resolve("");
- }
+ const vendor = osparc.store.VendorInfo.getInstance().getVendor();
+ if (vendor) {
+ if ("license_url" in vendor) {
+ return vendor["license_url"];
+ } else if ("url" in vendor) {
+ return vendor["url"];
}
- });
+ }
+ return "";
},
getManuals: function() {
@@ -135,9 +132,12 @@ qx.Class.define("osparc.store.Support", {
});
},
- mailToText: function(email, subject) {
+ mailToText: function(email, subject, centered = true) {
const color = qx.theme.manager.Color.getInstance().resolve("text");
- const textLink = `${email}  `;
+ let textLink = `${email}  `;
+ if (centered) {
+ textLink = `${textLink}`
+ }
return textLink;
},
From fd62ccf073ed9d0faa98ea89707cb823e7dee51d Mon Sep 17 00:00:00 2001
From: Sylvain <35365065+sanderegg@users.noreply.github.com>
Date: Tue, 3 Dec 2024 18:25:53 +0100
Subject: [PATCH 12/16] =?UTF-8?q?=F0=9F=92=A3=20Remove=20osparc-gateway-se?=
=?UTF-8?q?rver=20and=20clusters=20endpoints=20(#6881)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.codecov.yml | 3 -
.env-devel | 3 -
.github/CODEOWNERS | 1 -
.github/workflows/ci-testing-deploy.yml | 145 -
Makefile | 1 -
api/specs/web-server/_cluster.py | 104 -
api/specs/web-server/_computations.py | 8 +-
api/specs/web-server/openapi.py | 1 -
.../api_schemas_directorv2/clusters.py | 147 +-
.../api_schemas_directorv2/comp_tasks.py | 17 +-
.../api_schemas_webserver/clusters.py | 33 -
.../api_schemas_webserver/computations.py | 3 -
.../src/models_library/clusters.py | 174 +-
.../src/models_library/projects_pipeline.py | 7 -
.../models-library/tests/test_clusters.py | 90 -
.../src/pytest_simcore/dask_gateway.py | 119 -
.../services_api_mocks_for_aiohttp_clients.py | 161 +-
.../pytest_simcore/simcore_dask_service.py | 4 +-
services/api-server/openapi.json | 14 +-
.../api/routes/solvers_jobs.py | 18 +-
.../api/routes/studies_jobs.py | 14 +-
.../services/director_v2.py | 7 -
.../services/jobs.py | 4 +-
.../services/webserver.py | 11 +-
.../tests/mocks/get_job_outputs.json | 1115 +++---
.../tests/mocks/run_study_workflow.json | 3174 ++++++++---------
.../tests/mocks/start_job_no_payment.json | 188 +-
.../mocks/start_job_not_enough_credit.json | 465 ++-
.../tests/mocks/start_job_with_payment.json | 552 ++-
.../tests/mocks/start_solver_job.json | 147 +-
services/api-server/tests/mocks/stop_job.json | 224 +-
.../mocks/study_job_start_stop_delete.json | 465 ++-
.../test_api_routers_solvers_jobs.py | 3 -
.../core/settings.py | 6 +-
.../auto_scaling_mode_computational.py | 4 +-
.../modules/dask.py | 20 +-
.../tests/unit/test_modules_dask.py | 24 +-
.../core/settings.py | 4 +-
.../modules/dask.py | 10 +-
.../utils/clusters.py | 4 +-
.../utils/dask.py | 4 +-
.../tests/unit/test_modules_dask.py | 16 +-
.../tests/unit/test_utils_clusters.py | 8 +-
services/dask-sidecar/README.md | 39 -
services/dask-sidecar/requirements/_base.in | 1 -
services/dask-sidecar/requirements/_base.txt | 13 +-
services/director-v2/openapi.json | 2354 +++++-------
services/director-v2/requirements/_base.in | 1 -
services/director-v2/requirements/_base.txt | 12 +-
services/director-v2/requirements/_test.in | 1 -
services/director-v2/requirements/_test.txt | 16 -
.../api/entrypoints.py | 2 -
.../api/routes/clusters.py | 242 --
.../api/routes/computations.py | 37 +-
.../core/application.py | 7 -
.../core/errors.py | 12 +-
.../core/settings.py | 17 +-
.../models/comp_runs.py | 17 +-
.../modules/clusters_keeper.py | 1 -
.../modules/comp_scheduler/_manager.py | 7 +-
.../modules/comp_scheduler/_scheduler_base.py | 6 +-
.../modules/comp_scheduler/_scheduler_dask.py | 37 +-
.../modules/comp_scheduler/_utils.py | 4 +-
.../modules/dask_client.py | 25 +-
.../modules/db/repositories/clusters.py | 286 --
.../modules/db/repositories/comp_runs.py | 10 +-
.../db/repositories/comp_tasks/_utils.py | 28 +-
.../simcore_service_director_v2/utils/dask.py | 16 +-
.../utils/dask_client_utils.py | 196 +-
.../simcore_service_director_v2/utils/db.py | 15 -
services/director-v2/tests/conftest.py | 1 -
.../tests/helpers/shared_comp_utils.py | 5 +-
.../integration/01/test_computation_api.py | 16 +-
...t_dynamic_sidecar_nodeports_integration.py | 5 +-
.../director-v2/tests/unit/_dask_helpers.py | 13 -
services/director-v2/tests/unit/conftest.py | 7 -
.../tests/unit/test_models_clusters.py | 38 +-
.../tests/unit/test_modules_dask_client.py | 129 +-
.../unit/test_modules_dask_clients_pool.py | 135 +-
.../director-v2/tests/unit/test_utils_db.py | 30 -
.../test_api_route_computations.py | 137 +-
.../test_api_route_computations_tasks.py | 2 +
.../test_db_repositories_comp_runs.py | 33 -
.../with_dbs/comp_scheduler/test_manager.py | 9 +-
.../comp_scheduler/test_scheduler_dask.py | 8 -
.../with_dbs/comp_scheduler/test_worker.py | 3 -
.../tests/unit/with_dbs/conftest.py | 86 -
.../unit/with_dbs/test_api_route_clusters.py | 802 -----
.../test_api_route_clusters_details.py | 254 --
.../tests/unit/with_dbs/test_utils_dask.py | 14 +-
services/docker-bake.hcl | 9 -
services/docker-compose-build.yml | 16 -
services/docker-compose-deploy.yml | 2 -
services/osparc-gateway-server/.env-devel | 2 -
services/osparc-gateway-server/.gitignore | 1 -
services/osparc-gateway-server/Dockerfile | 177 -
services/osparc-gateway-server/Makefile | 155 -
services/osparc-gateway-server/README.md | 1 -
services/osparc-gateway-server/VERSION | 1 -
.../config/default_config.py | 12 -
.../docker-compose.devel.yml | 10 -
.../docker-compose.local.yml | 8 -
.../osparc-gateway-server/docker-compose.yml | 37 -
services/osparc-gateway-server/docker/boot.sh | 45 -
.../docker/entrypoint.sh | 80 -
.../requirements/Makefile | 8 -
.../requirements/_base.in | 11 -
.../requirements/_base.txt | 68 -
.../requirements/_test.in | 25 -
.../requirements/_test.txt | 213 --
.../requirements/_tools.in | 8 -
.../requirements/_tools.txt | 87 -
.../osparc-gateway-server/requirements/ci.txt | 19 -
.../requirements/constraints.txt | 0
.../requirements/dev.txt | 18 -
.../requirements/prod.txt | 15 -
services/osparc-gateway-server/setup.cfg | 15 -
services/osparc-gateway-server/setup.py | 58 -
.../src/osparc_gateway_server/__init__.py | 1 -
.../src/osparc_gateway_server/app.py | 7 -
.../osparc_gateway_server/backend/__init__.py | 0
.../osparc_gateway_server/backend/errors.py | 14 -
.../osparc_gateway_server/backend/models.py | 42 -
.../osparc_gateway_server/backend/osparc.py | 350 --
.../osparc_gateway_server/backend/settings.py | 76 -
.../osparc_gateway_server/backend/utils.py | 403 ---
.../src/osparc_gateway_server/remote_debug.py | 24 -
.../osparc-gateway-server/tests/conftest.py | 26 -
.../tests/integration/_dask_helpers.py | 10 -
.../tests/integration/conftest.py | 139 -
.../tests/integration/test_clusters.py | 255 --
.../tests/integration/test_dask_sidecar.py | 146 -
.../tests/integration/test_gateway.py | 55 -
.../tests/system/Makefile | 43 -
.../tests/system/requirements/Makefile | 6 -
.../tests/system/requirements/_base.txt | 6 -
.../tests/system/requirements/_test.in | 20 -
.../tests/system/requirements/_test.txt | 194 -
.../tests/system/requirements/_tools.in | 4 -
.../tests/system/requirements/_tools.txt | 78 -
.../tests/system/requirements/ci.txt | 14 -
.../tests/system/requirements/dev.txt | 15 -
.../tests/system/test_deploy.py | 160 -
.../tests/unit/test_osparc.py | 0
.../tests/unit/test_settings.py | 20 -
.../tests/unit/test_utils.py | 460 ---
.../api/v0/openapi.yaml | 780 +---
.../simcore_service_webserver/application.py | 3 +-
.../application_settings.py | 2 -
.../application_settings_utils.py | 2 -
.../clusters/__init__.py | 0
.../clusters/_handlers.py | 200 --
.../clusters/plugin.py | 34 -
.../director_v2/_core_computations.py | 213 +-
.../director_v2/_handlers.py | 24 +-
.../director_v2/_models.py | 88 -
.../director_v2/api.py | 25 +-
.../director_v2/exceptions.py | 24 -
.../isolated/test_application_settings.py | 6 +-
.../unit/with_dbs/01/clusters/conftest.py | 10 -
.../01/clusters/test_clusters_handlers.py | 531 ---
.../01/clusters/test_clusters_plugin_setup.py | 26 -
.../unit/with_dbs/01/test_director_v2.py | 80 -
.../with_dbs/01/test_director_v2_handlers.py | 8 +-
.../test_used_docker_compose.py | 2 +-
165 files changed, 4400 insertions(+), 13978 deletions(-)
delete mode 100644 api/specs/web-server/_cluster.py
delete mode 100644 packages/models-library/src/models_library/api_schemas_webserver/clusters.py
delete mode 100644 packages/models-library/tests/test_clusters.py
delete mode 100644 packages/pytest-simcore/src/pytest_simcore/dask_gateway.py
delete mode 100644 services/director-v2/src/simcore_service_director_v2/api/routes/clusters.py
delete mode 100644 services/director-v2/src/simcore_service_director_v2/modules/db/repositories/clusters.py
delete mode 100644 services/director-v2/tests/unit/_dask_helpers.py
delete mode 100644 services/director-v2/tests/unit/with_dbs/test_api_route_clusters.py
delete mode 100644 services/director-v2/tests/unit/with_dbs/test_api_route_clusters_details.py
delete mode 100644 services/osparc-gateway-server/.env-devel
delete mode 100644 services/osparc-gateway-server/.gitignore
delete mode 100644 services/osparc-gateway-server/Dockerfile
delete mode 100644 services/osparc-gateway-server/Makefile
delete mode 100644 services/osparc-gateway-server/README.md
delete mode 100644 services/osparc-gateway-server/VERSION
delete mode 100644 services/osparc-gateway-server/config/default_config.py
delete mode 100644 services/osparc-gateway-server/docker-compose.devel.yml
delete mode 100644 services/osparc-gateway-server/docker-compose.local.yml
delete mode 100644 services/osparc-gateway-server/docker-compose.yml
delete mode 100755 services/osparc-gateway-server/docker/boot.sh
delete mode 100755 services/osparc-gateway-server/docker/entrypoint.sh
delete mode 100644 services/osparc-gateway-server/requirements/Makefile
delete mode 100644 services/osparc-gateway-server/requirements/_base.in
delete mode 100644 services/osparc-gateway-server/requirements/_base.txt
delete mode 100644 services/osparc-gateway-server/requirements/_test.in
delete mode 100644 services/osparc-gateway-server/requirements/_test.txt
delete mode 100644 services/osparc-gateway-server/requirements/_tools.in
delete mode 100644 services/osparc-gateway-server/requirements/_tools.txt
delete mode 100644 services/osparc-gateway-server/requirements/ci.txt
delete mode 100644 services/osparc-gateway-server/requirements/constraints.txt
delete mode 100644 services/osparc-gateway-server/requirements/dev.txt
delete mode 100644 services/osparc-gateway-server/requirements/prod.txt
delete mode 100644 services/osparc-gateway-server/setup.cfg
delete mode 100755 services/osparc-gateway-server/setup.py
delete mode 100644 services/osparc-gateway-server/src/osparc_gateway_server/__init__.py
delete mode 100644 services/osparc-gateway-server/src/osparc_gateway_server/app.py
delete mode 100644 services/osparc-gateway-server/src/osparc_gateway_server/backend/__init__.py
delete mode 100644 services/osparc-gateway-server/src/osparc_gateway_server/backend/errors.py
delete mode 100644 services/osparc-gateway-server/src/osparc_gateway_server/backend/models.py
delete mode 100644 services/osparc-gateway-server/src/osparc_gateway_server/backend/osparc.py
delete mode 100644 services/osparc-gateway-server/src/osparc_gateway_server/backend/settings.py
delete mode 100644 services/osparc-gateway-server/src/osparc_gateway_server/backend/utils.py
delete mode 100644 services/osparc-gateway-server/src/osparc_gateway_server/remote_debug.py
delete mode 100644 services/osparc-gateway-server/tests/conftest.py
delete mode 100644 services/osparc-gateway-server/tests/integration/_dask_helpers.py
delete mode 100644 services/osparc-gateway-server/tests/integration/conftest.py
delete mode 100644 services/osparc-gateway-server/tests/integration/test_clusters.py
delete mode 100644 services/osparc-gateway-server/tests/integration/test_dask_sidecar.py
delete mode 100644 services/osparc-gateway-server/tests/integration/test_gateway.py
delete mode 100644 services/osparc-gateway-server/tests/system/Makefile
delete mode 100644 services/osparc-gateway-server/tests/system/requirements/Makefile
delete mode 100644 services/osparc-gateway-server/tests/system/requirements/_base.txt
delete mode 100644 services/osparc-gateway-server/tests/system/requirements/_test.in
delete mode 100644 services/osparc-gateway-server/tests/system/requirements/_test.txt
delete mode 100644 services/osparc-gateway-server/tests/system/requirements/_tools.in
delete mode 100644 services/osparc-gateway-server/tests/system/requirements/_tools.txt
delete mode 100644 services/osparc-gateway-server/tests/system/requirements/ci.txt
delete mode 100644 services/osparc-gateway-server/tests/system/requirements/dev.txt
delete mode 100644 services/osparc-gateway-server/tests/system/test_deploy.py
delete mode 100644 services/osparc-gateway-server/tests/unit/test_osparc.py
delete mode 100644 services/osparc-gateway-server/tests/unit/test_settings.py
delete mode 100644 services/osparc-gateway-server/tests/unit/test_utils.py
delete mode 100644 services/web/server/src/simcore_service_webserver/clusters/__init__.py
delete mode 100644 services/web/server/src/simcore_service_webserver/clusters/_handlers.py
delete mode 100644 services/web/server/src/simcore_service_webserver/clusters/plugin.py
delete mode 100644 services/web/server/src/simcore_service_webserver/director_v2/_models.py
delete mode 100644 services/web/server/tests/unit/with_dbs/01/clusters/conftest.py
delete mode 100644 services/web/server/tests/unit/with_dbs/01/clusters/test_clusters_handlers.py
delete mode 100644 services/web/server/tests/unit/with_dbs/01/clusters/test_clusters_plugin_setup.py
diff --git a/.codecov.yml b/.codecov.yml
index 341e18a09bd..ca81ee39226 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -94,9 +94,6 @@ component_management:
- component_id: invitations
paths:
- services/invitations/**
- - component_id: osparc_gateway_server
- paths:
- - services/osparc-gateway-server/**
- component_id: payments
paths:
- services/payments/**
diff --git a/.env-devel b/.env-devel
index 7703fa8082a..df3ea3bb4a7 100644
--- a/.env-devel
+++ b/.env-devel
@@ -259,7 +259,6 @@ WB_API_WEBSERVER_PORT=8080
WB_GC_ACTIVITY=null
WB_GC_ANNOUNCEMENTS=0
WB_GC_CATALOG=null
-WB_GC_CLUSTERS=0
WB_GC_DB_LISTENER=0
WB_GC_DIAGNOSTICS=null
WB_GC_EMAIL=null
@@ -292,7 +291,6 @@ WB_GC_WALLETS=0
WB_DB_EL_ACTIVITY=null
WB_DB_EL_ANNOUNCEMENTS=0
WB_DB_EL_CATALOG=null
-WB_DB_EL_CLUSTERS=0
WB_DB_EL_DB_LISTENER=1
WB_DB_EL_DIAGNOSTICS=null
WB_DB_EL_EMAIL=null
@@ -359,7 +357,6 @@ TWILIO_COUNTRY_CODES_W_ALPHANUMERIC_SID_SUPPORT=["41"]
WEBSERVER_ACTIVITY=null
WEBSERVER_ANNOUNCEMENTS=1
WEBSERVER_CATALOG={}
-WEBSERVER_CLUSTERS=0
WEBSERVER_CREDIT_COMPUTATION_ENABLED=1
WEBSERVER_DB_LISTENER=0
WEBSERVER_DEV_FEATURES_ENABLED=0
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 007676d351c..36c26ee310e 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -33,7 +33,6 @@ Makefile @pcrespov @sanderegg
/services/efs-guardian/ @matusdrobuliak66
/services/invitations/ @pcrespov
/services/migration/ @pcrespov
-/services/osparc-gateway-server/ @sanderegg
/services/payments/ @pcrespov @matusdrobuliak66
/services/resource-usage-tracker/ @matusdrobuliak66
/services/static-webserver/ @GitHK
diff --git a/.github/workflows/ci-testing-deploy.yml b/.github/workflows/ci-testing-deploy.yml
index 96091cff042..516a401f9e3 100644
--- a/.github/workflows/ci-testing-deploy.yml
+++ b/.github/workflows/ci-testing-deploy.yml
@@ -75,7 +75,6 @@ jobs:
efs-guardian: ${{ steps.filter.outputs.efs-guardian }}
invitations: ${{ steps.filter.outputs.invitations }}
migration: ${{ steps.filter.outputs.migration }}
- osparc-gateway-server: ${{ steps.filter.outputs.osparc-gateway-server }}
payments: ${{ steps.filter.outputs.payments }}
dynamic-scheduler: ${{ steps.filter.outputs.dynamic-scheduler }}
resource-usage-tracker: ${{ steps.filter.outputs.resource-usage-tracker }}
@@ -222,12 +221,6 @@ jobs:
- 'packages/**'
- 'services/migration/**'
- 'services/docker-compose*'
- osparc-gateway-server:
- - 'packages/**'
- - 'services/osparc-gateway-server/**'
- - 'services/docker-compose*'
- - 'scripts/mypy/*'
- - 'mypy.ini'
payments:
- 'packages/**'
- 'services/payments/**'
@@ -1161,64 +1154,6 @@ jobs:
with:
token: ${{ secrets.CODECOV_TOKEN }}
- unit-test-osparc-gateway-server:
- needs: changes
- if: ${{ needs.changes.outputs.osparc-gateway-server == 'true' || github.event_name == 'push' }}
- timeout-minutes: 18 # if this timeout gets too small, then split the tests
- name: "[unit] osparc-gateway-server"
- runs-on: ${{ matrix.os }}
- strategy:
- matrix:
- python: ["3.11"]
- os: [ubuntu-22.04]
- fail-fast: false
- steps:
- - uses: actions/checkout@v4
- - name: setup docker buildx
- id: buildx
- uses: docker/setup-buildx-action@v3
- with:
- driver: docker-container
- - name: setup python environment
- uses: actions/setup-python@v5
- with:
- python-version: ${{ matrix.python }}
- - name: install uv
- uses: astral-sh/setup-uv@v4
- with:
- version: "0.4.x"
- enable-cache: false
- cache-dependency-glob: "**/osparc-gateway-server/requirements/ci.txt"
- - name: show system version
- run: ./ci/helpers/show_system_versions.bash
- - name: install
- run: |
- make devenv
- source .venv/bin/activate && \
- pushd services/osparc-gateway-server && \
- make install-ci
- - name: typecheck
- run: |
- source .venv/bin/activate && \
- pushd services/osparc-gateway-server && \
- make mypy
- - name: test
- if: ${{ !cancelled() }}
- run: |
- source .venv/bin/activate && \
- pushd services/osparc-gateway-server && \
- make test-ci-unit
- - uses: codecov/codecov-action@v5.0.7
- env:
- CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
- with:
- flags: unittests #optional
- - name: Upload test results to Codecov
- if: ${{ !cancelled() }}
- uses: codecov/test-results-action@v1
- with:
- token: ${{ secrets.CODECOV_TOKEN }}
-
unit-test-payments:
needs: changes
if: ${{ needs.changes.outputs.payments == 'true' || github.event_name == 'push' }}
@@ -1965,7 +1900,6 @@ jobs:
unit-test-models-library,
unit-test-common-library,
unit-test-notifications-library,
- unit-test-osparc-gateway-server,
unit-test-payments,
unit-test-dynamic-scheduler,
unit-test-postgres-database,
@@ -2317,84 +2251,6 @@ jobs:
with:
flags: integrationtests #optional
- integration-test-osparc-gateway-server:
- needs: [changes, build-test-images]
- if: ${{ needs.changes.outputs.anything-py == 'true' || needs.changes.outputs.osparc-gateway-server == 'true' || github.event_name == 'push' }}
- timeout-minutes: 30 # if this timeout gets too small, then split the tests
- name: "[int] osparc-gateway-server"
- runs-on: ${{ matrix.os }}
- strategy:
- matrix:
- python: ["3.11"]
- os: [ubuntu-22.04]
- fail-fast: false
- steps:
- - uses: actions/checkout@v4
- - name: setup docker buildx
- id: buildx
- uses: docker/setup-buildx-action@v3
- with:
- driver: docker-container
-
- - name: setup python environment
- uses: actions/setup-python@v5
- with:
- python-version: ${{ matrix.python }}
- - name: expose github runtime for buildx
- uses: crazy-max/ghaction-github-runtime@v3
- # FIXME: Workaround for https://github.com/actions/download-artifact/issues/249
- - name: download docker images with retry
- uses: Wandalen/wretry.action@master
- with:
- action: actions/download-artifact@v4
- with: |
- name: docker-buildx-images-${{ runner.os }}-${{ github.sha }}
- path: /${{ runner.temp }}/build
- attempt_limit: 5
- attempt_delay: 1000
- - name: load docker images
- run: make load-images local-src=/${{ runner.temp }}/build
- - name: install uv
- uses: astral-sh/setup-uv@v4
- with:
- version: "0.4.x"
- enable-cache: false
- cache-dependency-glob: "**/osparc-gateway-server/requirements/ci.txt"
- - name: show system version
- run: ./ci/helpers/show_system_versions.bash
- - name: install
- run: |
- make devenv && \
- source .venv/bin/activate && \
- pushd services/osparc-gateway-server && \
- make install-ci
- - name: integration-test
- run: |
- source .venv/bin/activate && \
- pushd services/osparc-gateway-server && \
- make test-ci-integration
- - name: system-test
- run: |
- source .venv/bin/activate && \
- pushd services/osparc-gateway-server && \
- make test-system
- - name: upload failed tests logs
- if: ${{ !cancelled() }}
- uses: actions/upload-artifact@v4
- with:
- name: ${{ github.job }}_docker_logs
- path: ./services/director-v2/test_failures
- - name: cleanup
- if: ${{ !cancelled() }}
- run: |
- pushd services/osparc-gateway-server && \
- make down
- - uses: codecov/codecov-action@v5.0.7
- env:
- CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
- with:
- flags: integrationtests #optional
-
integration-test-simcore-sdk:
needs: [changes, build-test-images]
if: ${{ needs.changes.outputs.anything-py == 'true' || needs.changes.outputs.simcore-sdk == 'true' || github.event_name == 'push' }}
@@ -2466,7 +2322,6 @@ jobs:
integration-test-director-v2-01,
integration-test-director-v2-02,
integration-test-dynamic-sidecar,
- integration-test-osparc-gateway-server,
integration-test-simcore-sdk,
integration-test-webserver-01,
integration-test-webserver-02,
diff --git a/Makefile b/Makefile
index 9dbdf84c9f4..e6176c55136 100644
--- a/Makefile
+++ b/Makefile
@@ -44,7 +44,6 @@ SERVICES_NAMES_TO_BUILD := \
efs-guardian \
invitations \
migration \
- osparc-gateway-server \
payments \
resource-usage-tracker \
dynamic-scheduler \
diff --git a/api/specs/web-server/_cluster.py b/api/specs/web-server/_cluster.py
deleted file mode 100644
index 0a33c049f3e..00000000000
--- a/api/specs/web-server/_cluster.py
+++ /dev/null
@@ -1,104 +0,0 @@
-from typing import Annotated
-
-from fastapi import APIRouter, Depends, status
-from models_library.api_schemas_webserver.clusters import (
- ClusterCreate,
- ClusterDetails,
- ClusterGet,
- ClusterPatch,
- ClusterPathParams,
- ClusterPing,
-)
-from models_library.generics import Envelope
-from simcore_service_webserver._meta import API_VTAG
-
-router = APIRouter(
- prefix=f"/{API_VTAG}",
- tags=[
- "clusters",
- ],
-)
-
-
-@router.get(
- "/clusters",
- response_model=Envelope[list[ClusterGet]],
-)
-def list_clusters():
- ...
-
-
-@router.post(
- "/clusters",
- response_model=Envelope[ClusterGet],
- status_code=status.HTTP_201_CREATED,
-)
-def create_cluster(
- _insert: ClusterCreate,
-):
- ...
-
-
-@router.post(
- "/clusters:ping",
- response_model=None,
- status_code=status.HTTP_204_NO_CONTENT,
-)
-def ping_cluster(
- _ping: ClusterPing,
-):
- """
- Test connectivity with cluster
- """
-
-
-@router.get(
- "/clusters/{cluster_id}",
- response_model=Envelope[ClusterGet],
-)
-def get_cluster(_path_params: Annotated[ClusterPathParams, Depends()]):
- ...
-
-
-@router.patch(
- "/clusters/{cluster_id}",
- response_model=Envelope[ClusterGet],
-)
-def update_cluster(
- _path_params: Annotated[ClusterPathParams, Depends()], _update: ClusterPatch
-):
- ...
-
-
-@router.delete(
- "/clusters/{cluster_id}",
- response_model=None,
- status_code=status.HTTP_204_NO_CONTENT,
-)
-def delete_cluster(
- _path_params: Annotated[ClusterPathParams, Depends()],
-):
- ...
-
-
-@router.get(
- "/clusters/{cluster_id}/details",
- response_model=Envelope[ClusterDetails],
-)
-def get_cluster_details(
- _path_params: Annotated[ClusterPathParams, Depends()],
-):
- ...
-
-
-@router.post(
- "/clusters/{cluster_id}:ping",
- response_model=None,
- status_code=status.HTTP_204_NO_CONTENT,
-)
-def ping_cluster_cluster_id(
- _path_params: Annotated[ClusterPathParams, Depends()],
-):
- """
- Tests connectivity with cluster
- """
diff --git a/api/specs/web-server/_computations.py b/api/specs/web-server/_computations.py
index 36600f1efac..e6c7572e885 100644
--- a/api/specs/web-server/_computations.py
+++ b/api/specs/web-server/_computations.py
@@ -1,12 +1,10 @@
from fastapi import APIRouter, status
+from models_library.api_schemas_directorv2.comp_tasks import ComputationGet
from models_library.api_schemas_webserver.computations import ComputationStart
from models_library.generics import Envelope
from models_library.projects import ProjectID
from simcore_service_webserver._meta import API_VTAG
-from simcore_service_webserver.director_v2._handlers import (
- ComputationTaskGet,
- _ComputationStarted,
-)
+from simcore_service_webserver.director_v2._handlers import _ComputationStarted
router = APIRouter(
prefix=f"/{API_VTAG}",
@@ -19,7 +17,7 @@
@router.get(
"/computations/{project_id}",
- response_model=Envelope[ComputationTaskGet],
+ response_model=Envelope[ComputationGet],
)
async def get_computation(project_id: ProjectID):
...
diff --git a/api/specs/web-server/openapi.py b/api/specs/web-server/openapi.py
index c205153e506..8e6b562c96d 100644
--- a/api/specs/web-server/openapi.py
+++ b/api/specs/web-server/openapi.py
@@ -31,7 +31,6 @@
"_announcements",
"_catalog",
"_catalog_tags", # MUST BE after _catalog
- "_cluster",
"_computations",
"_exporter",
"_folders",
diff --git a/packages/models-library/src/models_library/api_schemas_directorv2/clusters.py b/packages/models-library/src/models_library/api_schemas_directorv2/clusters.py
index 35513ace551..41951a1d06d 100644
--- a/packages/models-library/src/models_library/api_schemas_directorv2/clusters.py
+++ b/packages/models-library/src/models_library/api_schemas_directorv2/clusters.py
@@ -1,32 +1,16 @@
-from typing import Annotated, Any, TypeAlias
+from typing import Any, TypeAlias
from pydantic import (
- AnyHttpUrl,
BaseModel,
- ConfigDict,
Field,
- HttpUrl,
NonNegativeFloat,
- ValidationInfo,
field_validator,
model_validator,
)
from pydantic.networks import AnyUrl
from pydantic.types import ByteSize, PositiveFloat
-from ..clusters import (
- CLUSTER_ADMIN_RIGHTS,
- CLUSTER_MANAGER_RIGHTS,
- CLUSTER_USER_RIGHTS,
- BaseCluster,
- Cluster,
- ClusterAccessRights,
- ClusterAuthentication,
- ClusterTypeInModel,
- ExternalClusterAuthentication,
-)
from ..generics import DictModel
-from ..users import GroupID
class TaskCounts(BaseModel):
@@ -89,132 +73,3 @@ class ClusterDetails(BaseModel):
dashboard_link: AnyUrl = Field(
..., description="Link to this scheduler's dashboard"
)
-
-
-class ClusterGet(Cluster):
- access_rights: Annotated[
- dict[GroupID, ClusterAccessRights],
- Field(
- alias="accessRights",
- default_factory=dict,
- json_schema_extra={"default": {}},
- ),
- ]
-
- model_config = ConfigDict(
- extra="allow",
- populate_by_name=True,
- json_schema_extra={
- # NOTE: make openapi-specs fails because
- # Cluster.model_config.json_schema_extra is raises `TypeError: unhashable type: 'ClusterAccessRights'`
- },
- )
-
- @model_validator(mode="before")
- @classmethod
- def ensure_access_rights_converted(cls, values):
- if "access_rights" in values:
- access_rights = values.pop("access_rights")
- values["accessRights"] = access_rights
- return values
-
-
-class ClusterDetailsGet(ClusterDetails):
- ...
-
-
-class ClusterCreate(BaseCluster):
- owner: GroupID | None = None # type: ignore[assignment]
- authentication: ExternalClusterAuthentication = Field(discriminator="type")
- access_rights: dict[GroupID, ClusterAccessRights] = Field(
- alias="accessRights", default_factory=dict
- )
-
- model_config = ConfigDict(
- json_schema_extra={
- "examples": [
- {
- "name": "My awesome cluster",
- "type": ClusterTypeInModel.ON_PREMISE,
- "endpoint": "https://registry.osparc-development.fake.dev",
- "authentication": {
- "type": "simple",
- "username": "someuser",
- "password": "somepassword",
- },
- },
- {
- "name": "My AWS cluster",
- "description": "a AWS cluster administered by me",
- "type": ClusterTypeInModel.AWS,
- "owner": 154,
- "endpoint": "https://registry.osparc-development.fake.dev",
- "authentication": {
- "type": "simple",
- "username": "someuser",
- "password": "somepassword",
- },
- "accessRights": {
- 154: CLUSTER_ADMIN_RIGHTS.model_dump(), # type:ignore[dict-item]
- 12: CLUSTER_MANAGER_RIGHTS.model_dump(), # type:ignore[dict-item]
- 7899: CLUSTER_USER_RIGHTS.model_dump(), # type:ignore[dict-item]
- },
- },
- ]
- }
- )
-
- @field_validator("thumbnail", mode="before")
- @classmethod
- def set_default_thumbnail_if_empty(cls, v, info: ValidationInfo):
- if v is None:
- cluster_type = info.data["type"]
- default_thumbnails = {
- ClusterTypeInModel.AWS.value: "https://upload.wikimedia.org/wikipedia/commons/thumb/9/93/Amazon_Web_Services_Logo.svg/250px-Amazon_Web_Services_Logo.svg.png",
- ClusterTypeInModel.ON_PREMISE.value: "https://upload.wikimedia.org/wikipedia/commons/thumb/a/ac/Crystal_Clear_app_network_local.png/120px-Crystal_Clear_app_network_local.png",
- ClusterTypeInModel.ON_DEMAND.value: "https://upload.wikimedia.org/wikipedia/commons/thumb/9/93/Amazon_Web_Services_Logo.svg/250px-Amazon_Web_Services_Logo.svg.png",
- }
- return default_thumbnails[cluster_type]
- return v
-
-
-class ClusterPatch(BaseCluster):
- name: str | None = None # type: ignore[assignment]
- description: str | None = None
- type: ClusterTypeInModel | None = None # type: ignore[assignment]
- owner: GroupID | None = None # type: ignore[assignment]
- thumbnail: HttpUrl | None = None
- endpoint: AnyUrl | None = None # type: ignore[assignment]
- authentication: ExternalClusterAuthentication | None = Field(None, discriminator="type") # type: ignore[assignment]
- access_rights: dict[GroupID, ClusterAccessRights] | None = Field( # type: ignore[assignment]
- default=None, alias="accessRights"
- )
-
- model_config = ConfigDict(
- json_schema_extra={
- "examples": [
- {
- "name": "Changing the name of my cluster",
- },
- {
- "description": "adding a better description",
- },
- {
- "accessRights": {
- 154: CLUSTER_ADMIN_RIGHTS.model_dump(), # type:ignore[dict-item]
- 12: CLUSTER_MANAGER_RIGHTS.model_dump(), # type:ignore[dict-item]
- 7899: CLUSTER_USER_RIGHTS.model_dump(), # type:ignore[dict-item]
- },
- },
- ]
- }
- )
-
-
-class ClusterPing(BaseModel):
- endpoint: AnyHttpUrl
- authentication: ClusterAuthentication = Field(
- ...,
- description="Dask gateway authentication",
- discriminator="type",
- )
diff --git a/packages/models-library/src/models_library/api_schemas_directorv2/comp_tasks.py b/packages/models-library/src/models_library/api_schemas_directorv2/comp_tasks.py
index 0663cc37f78..9b548c64e72 100644
--- a/packages/models-library/src/models_library/api_schemas_directorv2/comp_tasks.py
+++ b/packages/models-library/src/models_library/api_schemas_directorv2/comp_tasks.py
@@ -1,6 +1,5 @@
from typing import Any, TypeAlias
-from models_library.basic_types import IDStr
from pydantic import (
AnyHttpUrl,
AnyUrl,
@@ -11,7 +10,7 @@
field_validator,
)
-from ..clusters import ClusterID
+from ..basic_types import IDStr
from ..projects import ProjectID
from ..projects_nodes_io import NodeID
from ..projects_pipeline import ComputationTask
@@ -54,14 +53,10 @@ class ComputationCreate(BaseModel):
force_restart: bool | None = Field(
default=False, description="if True will force re-running all dependent nodes"
)
- cluster_id: ClusterID | None = Field(
- default=None,
- description="the computation shall use the cluster described by its id, 0 is the default cluster",
- )
simcore_user_agent: str = ""
use_on_demand_clusters: bool = Field(
default=False,
- description="if True, a cluster will be created as necessary (wallet_id cannot be None, and cluster_id must be None)",
+ description="if True, a cluster will be created as necessary (wallet_id cannot be None)",
validate_default=True,
)
wallet_info: WalletInfo | None = Field(
@@ -79,14 +74,6 @@ def _ensure_product_name_defined_if_computation_starts(
raise ValueError(msg)
return v
- @field_validator("use_on_demand_clusters")
- @classmethod
- def _ensure_expected_options(cls, v, info: ValidationInfo):
- if v and info.data.get("cluster_id") is not None:
- msg = "cluster_id cannot be set if use_on_demand_clusters is set"
- raise ValueError(msg)
- return v
-
class ComputationStop(BaseModel):
user_id: UserID
diff --git a/packages/models-library/src/models_library/api_schemas_webserver/clusters.py b/packages/models-library/src/models_library/api_schemas_webserver/clusters.py
deleted file mode 100644
index 17232a8b482..00000000000
--- a/packages/models-library/src/models_library/api_schemas_webserver/clusters.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from pydantic import BaseModel, ConfigDict
-
-from ..api_schemas_directorv2 import clusters as directorv2_clusters
-from ..clusters import ClusterID
-from ._base import InputSchema, OutputSchema
-
-
-class ClusterPathParams(BaseModel):
- cluster_id: ClusterID
- model_config = ConfigDict(
- populate_by_name=True,
- extra="forbid",
- )
-
-
-class ClusterGet(directorv2_clusters.ClusterGet):
- model_config = OutputSchema.model_config
-
-
-class ClusterCreate(directorv2_clusters.ClusterCreate):
- model_config = InputSchema.model_config
-
-
-class ClusterPatch(directorv2_clusters.ClusterPatch):
- model_config = InputSchema.model_config
-
-
-class ClusterPing(directorv2_clusters.ClusterPing):
- model_config = InputSchema.model_config
-
-
-class ClusterDetails(directorv2_clusters.ClusterDetails):
- model_config = OutputSchema.model_config
diff --git a/packages/models-library/src/models_library/api_schemas_webserver/computations.py b/packages/models-library/src/models_library/api_schemas_webserver/computations.py
index 278cc747c51..c16426f5f8e 100644
--- a/packages/models-library/src/models_library/api_schemas_webserver/computations.py
+++ b/packages/models-library/src/models_library/api_schemas_webserver/computations.py
@@ -1,11 +1,8 @@
from pydantic import BaseModel
-from ..clusters import ClusterID
-
class ComputationStart(BaseModel):
force_restart: bool = False
- cluster_id: ClusterID = 0
subgraph: set[str] = set()
diff --git a/packages/models-library/src/models_library/clusters.py b/packages/models-library/src/models_library/clusters.py
index 911b709a1f6..783f82df016 100644
--- a/packages/models-library/src/models_library/clusters.py
+++ b/packages/models-library/src/models_library/clusters.py
@@ -1,17 +1,8 @@
from enum import auto
from pathlib import Path
-from typing import Annotated, Final, Literal, Self, TypeAlias
-
-from pydantic import (
- AnyUrl,
- BaseModel,
- ConfigDict,
- Field,
- HttpUrl,
- SecretStr,
- field_validator,
- model_validator,
-)
+from typing import Literal, TypeAlias
+
+from pydantic import AnyUrl, BaseModel, ConfigDict, Field, HttpUrl, field_validator
from pydantic.types import NonNegativeInt
from .users import GroupID
@@ -27,78 +18,19 @@ class ClusterTypeInModel(StrAutoEnum):
ON_DEMAND = auto()
-class ClusterAccessRights(BaseModel):
- read: bool = Field(..., description="allows to run pipelines on that cluster")
- write: bool = Field(..., description="allows to modify the cluster")
- delete: bool = Field(..., description="allows to delete a cluster")
-
- model_config = ConfigDict(extra="forbid")
-
-
-CLUSTER_ADMIN_RIGHTS = ClusterAccessRights(read=True, write=True, delete=True)
-CLUSTER_MANAGER_RIGHTS = ClusterAccessRights(read=True, write=True, delete=False)
-CLUSTER_USER_RIGHTS = ClusterAccessRights(read=True, write=False, delete=False)
-CLUSTER_NO_RIGHTS = ClusterAccessRights(read=False, write=False, delete=False)
-
-
-class BaseAuthentication(BaseModel):
+class _AuthenticationBase(BaseModel):
type: str
model_config = ConfigDict(frozen=True, extra="forbid")
-class SimpleAuthentication(BaseAuthentication):
- type: Literal["simple"] = "simple"
- username: str
- password: SecretStr
-
- model_config = ConfigDict(
- json_schema_extra={
- "examples": [
- {
- "type": "simple",
- "username": "someuser",
- "password": "somepassword",
- },
- ]
- }
- )
-
-
-class KerberosAuthentication(BaseAuthentication):
- type: Literal["kerberos"] = "kerberos"
-
- model_config = ConfigDict(
- json_schema_extra={
- "examples": [
- {
- "type": "kerberos",
- },
- ]
- }
- )
-
-
-class JupyterHubTokenAuthentication(BaseAuthentication):
- type: Literal["jupyterhub"] = "jupyterhub"
- api_token: str
-
- model_config = ConfigDict(
- json_schema_extra={
- "examples": [
- {"type": "jupyterhub", "api_token": "some_jupyterhub_token"},
- ]
- }
- )
-
-
-class NoAuthentication(BaseAuthentication):
+class NoAuthentication(_AuthenticationBase):
type: Literal["none"] = "none"
model_config = ConfigDict(json_schema_extra={"examples": [{"type": "none"}]})
-class TLSAuthentication(BaseAuthentication):
+class TLSAuthentication(_AuthenticationBase):
type: Literal["tls"] = "tls"
tls_ca_file: Path
tls_client_cert: Path
@@ -118,18 +50,11 @@ class TLSAuthentication(BaseAuthentication):
)
-InternalClusterAuthentication: TypeAlias = NoAuthentication | TLSAuthentication
-ExternalClusterAuthentication: TypeAlias = (
- SimpleAuthentication | KerberosAuthentication | JupyterHubTokenAuthentication
-)
-ClusterAuthentication: TypeAlias = (
- ExternalClusterAuthentication | InternalClusterAuthentication
-)
+ClusterAuthentication: TypeAlias = NoAuthentication | TLSAuthentication
class BaseCluster(BaseModel):
name: str = Field(..., description="The human readable name of the cluster")
- description: str | None = None
type: ClusterTypeInModel
owner: GroupID
thumbnail: HttpUrl | None = Field(
@@ -142,104 +67,41 @@ class BaseCluster(BaseModel):
authentication: ClusterAuthentication = Field(
..., description="Dask gateway authentication", discriminator="type"
)
- access_rights: Annotated[
- dict[GroupID, ClusterAccessRights], Field(default_factory=dict)
- ]
-
_from_equivalent_enums = field_validator("type", mode="before")(
create_enums_pre_validator(ClusterTypeInModel)
)
- model_config = ConfigDict(extra="forbid", use_enum_values=True)
-
-
-ClusterID: TypeAlias = NonNegativeInt
-DEFAULT_CLUSTER_ID: Final[ClusterID] = 0
-
-
-class Cluster(BaseCluster):
- id: ClusterID = Field(..., description="The cluster ID")
-
model_config = ConfigDict(
- extra="allow",
+ use_enum_values=True,
json_schema_extra={
"examples": [
{
- "id": DEFAULT_CLUSTER_ID,
- "name": "The default cluster",
- "type": ClusterTypeInModel.ON_PREMISE,
- "owner": 1456,
- "endpoint": "tcp://default-dask-scheduler:8786",
- "authentication": {
- "type": "simple",
- "username": "someuser",
- "password": "somepassword",
- },
- },
- {
- "id": 432,
"name": "My awesome cluster",
"type": ClusterTypeInModel.ON_PREMISE,
"owner": 12,
"endpoint": "https://registry.osparc-development.fake.dev",
"authentication": {
- "type": "simple",
- "username": "someuser",
- "password": "somepassword",
+ "type": "tls",
+ "tls_ca_file": "/path/to/ca_file",
+ "tls_client_cert": "/path/to/cert_file",
+ "tls_client_key": "/path/to/key_file",
},
},
{
- "id": 432546,
"name": "My AWS cluster",
- "description": "a AWS cluster administered by me",
"type": ClusterTypeInModel.AWS,
"owner": 154,
"endpoint": "https://registry.osparc-development.fake.dev",
- "authentication": {"type": "kerberos"},
- "access_rights": {
- 154: CLUSTER_ADMIN_RIGHTS, # type: ignore[dict-item]
- 12: CLUSTER_MANAGER_RIGHTS, # type: ignore[dict-item]
- 7899: CLUSTER_USER_RIGHTS, # type: ignore[dict-item]
- },
- },
- {
- "id": 325436,
- "name": "My AWS cluster",
- "description": "a AWS cluster administered by me",
- "type": ClusterTypeInModel.AWS,
- "owner": 2321,
- "endpoint": "https://registry.osparc-development.fake2.dev",
"authentication": {
- "type": "jupyterhub",
- "api_token": "some_fake_token",
- },
- "access_rights": {
- 154: CLUSTER_ADMIN_RIGHTS, # type: ignore[dict-item]
- 12: CLUSTER_MANAGER_RIGHTS, # type: ignore[dict-item]
- 7899: CLUSTER_USER_RIGHTS, # type: ignore[dict-item]
+ "type": "tls",
+ "tls_ca_file": "/path/to/ca_file",
+ "tls_client_cert": "/path/to/cert_file",
+ "tls_client_key": "/path/to/key_file",
},
},
]
},
)
- @model_validator(mode="after")
- def check_owner_has_access_rights(self: Self) -> Self:
- is_default_cluster = bool(self.id == DEFAULT_CLUSTER_ID)
- owner_gid = self.owner
-
- # check owner is in the access rights, if not add it
- access_rights = self.access_rights.copy()
- if owner_gid not in access_rights:
- access_rights[owner_gid] = (
- CLUSTER_USER_RIGHTS if is_default_cluster else CLUSTER_ADMIN_RIGHTS
- )
- # check owner has the expected access
- if access_rights[owner_gid] != (
- CLUSTER_USER_RIGHTS if is_default_cluster else CLUSTER_ADMIN_RIGHTS
- ):
- msg = f"the cluster owner access rights are incorrectly set: {access_rights[owner_gid]}"
- raise ValueError(msg)
- # NOTE: overcomes frozen configuration (far fetched in ClusterGet model of webserver)
- object.__setattr__(self, "access_rights", access_rights)
- return self
+
+ClusterID: TypeAlias = NonNegativeInt
diff --git a/packages/models-library/src/models_library/projects_pipeline.py b/packages/models-library/src/models_library/projects_pipeline.py
index 975d4726b4e..40b47aa46eb 100644
--- a/packages/models-library/src/models_library/projects_pipeline.py
+++ b/packages/models-library/src/models_library/projects_pipeline.py
@@ -4,7 +4,6 @@
import arrow
from pydantic import BaseModel, ConfigDict, Field, PositiveInt
-from .clusters import ClusterID
from .projects_nodes import NodeState
from .projects_nodes_io import NodeID
from .projects_state import RunningState
@@ -40,10 +39,6 @@ class ComputationTask(BaseModel):
...,
description="the iteration id of the computation task (none if no task ran yet)",
)
- cluster_id: ClusterID | None = Field(
- ...,
- description="the cluster on which the computaional task runs/ran (none if no task ran yet)",
- )
started: datetime.datetime | None = Field(
...,
description="the timestamp when the computation was started or None if not started yet",
@@ -87,7 +82,6 @@ class ComputationTask(BaseModel):
"progress": 0.0,
},
"iteration": None,
- "cluster_id": None,
"started": arrow.utcnow().shift(minutes=-50).datetime, # type: ignore[dict-item]
"stopped": None,
"submitted": arrow.utcnow().shift(hours=-1).datetime, # type: ignore[dict-item]
@@ -119,7 +113,6 @@ class ComputationTask(BaseModel):
"progress": 1.0,
},
"iteration": 2,
- "cluster_id": 0,
"started": arrow.utcnow().shift(minutes=-50).datetime, # type: ignore[dict-item]
"stopped": arrow.utcnow().shift(minutes=-20).datetime, # type: ignore[dict-item]
"submitted": arrow.utcnow().shift(hours=-1).datetime, # type: ignore[dict-item]
diff --git a/packages/models-library/tests/test_clusters.py b/packages/models-library/tests/test_clusters.py
deleted file mode 100644
index 258bdc006f4..00000000000
--- a/packages/models-library/tests/test_clusters.py
+++ /dev/null
@@ -1,90 +0,0 @@
-from copy import deepcopy
-from typing import Any
-
-import pytest
-from faker import Faker
-from models_library.clusters import (
- CLUSTER_ADMIN_RIGHTS,
- CLUSTER_MANAGER_RIGHTS,
- CLUSTER_USER_RIGHTS,
- DEFAULT_CLUSTER_ID,
- Cluster,
-)
-from pydantic import BaseModel, ValidationError
-
-
-@pytest.mark.parametrize(
- "model_cls",
- [
- Cluster,
- ],
-)
-def test_cluster_access_rights_correctly_created_when_owner_access_rights_not_present(
- model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]]
-):
- for example in model_cls_examples.values():
- modified_example = deepcopy(example)
- owner_gid = modified_example["owner"]
- # remove the owner from the access rights if any
- modified_example.get("access_rights", {}).pop(owner_gid, None)
-
- instance = model_cls(**modified_example)
- if instance.id != DEFAULT_CLUSTER_ID:
- assert instance.access_rights[owner_gid] == CLUSTER_ADMIN_RIGHTS # type: ignore
- else:
- assert instance.access_rights[owner_gid] == CLUSTER_USER_RIGHTS # type: ignore
-
-
-@pytest.mark.parametrize(
- "model_cls",
- [
- Cluster,
- ],
-)
-def test_cluster_fails_when_owner_has_no_admin_rights_unless_default_cluster(
- model_cls: type[BaseModel],
- model_cls_examples: dict[str, dict[str, Any]],
- faker: Faker,
-):
- for example in model_cls_examples.values():
- modified_example = deepcopy(example)
- modified_example["id"] = faker.pyint(min_value=1)
- owner_gid = modified_example["owner"]
- # ensure there are access rights
- modified_example.setdefault("access_rights", {})
- # set the owner with manager rights
- modified_example["access_rights"][owner_gid] = CLUSTER_MANAGER_RIGHTS
- with pytest.raises(ValidationError):
- model_cls(**modified_example)
-
- # set the owner with user rights
- modified_example["access_rights"][owner_gid] = CLUSTER_USER_RIGHTS
- with pytest.raises(ValidationError):
- model_cls(**modified_example)
-
-
-@pytest.mark.parametrize(
- "model_cls",
- [
- Cluster,
- ],
-)
-def test_cluster_fails_when_owner_has_no_user_rights_if_default_cluster(
- model_cls: type[BaseModel],
- model_cls_examples: dict[str, dict[str, Any]],
-):
- for example in model_cls_examples.values():
- modified_example = deepcopy(example)
- modified_example["id"] = DEFAULT_CLUSTER_ID
- owner_gid = modified_example["owner"]
- # ensure there are access rights
- modified_example.setdefault("access_rights", {})
- # set the owner with manager rights
- modified_example["access_rights"][owner_gid] = CLUSTER_MANAGER_RIGHTS
- with pytest.raises(ValidationError):
- model_cls(**modified_example)
-
- # set the owner with user rights
- modified_example["access_rights"][owner_gid] = CLUSTER_ADMIN_RIGHTS
- with pytest.raises(ValidationError):
- model_cls(**modified_example)
diff --git a/packages/pytest-simcore/src/pytest_simcore/dask_gateway.py b/packages/pytest-simcore/src/pytest_simcore/dask_gateway.py
deleted file mode 100644
index 3f89a7ac66f..00000000000
--- a/packages/pytest-simcore/src/pytest_simcore/dask_gateway.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# pylint: disable=unused-argument
-# pylint: disable=redefined-outer-name
-
-from collections.abc import Callable
-from typing import AsyncIterator, NamedTuple
-
-import pytest
-import traitlets.config
-from dask_gateway import Gateway, GatewayCluster, auth
-from dask_gateway_server.app import DaskGateway
-from dask_gateway_server.backends.local import UnsafeLocalBackend
-from distributed import Client
-from faker import Faker
-
-
-@pytest.fixture
-def local_dask_gateway_server_config(
- unused_tcp_port_factory: Callable,
-) -> traitlets.config.Config:
- c = traitlets.config.Config()
- assert isinstance(c.DaskGateway, traitlets.config.Config)
- assert isinstance(c.ClusterConfig, traitlets.config.Config)
- assert isinstance(c.Proxy, traitlets.config.Config)
- assert isinstance(c.SimpleAuthenticator, traitlets.config.Config)
- c.DaskGateway.backend_class = UnsafeLocalBackend
- c.DaskGateway.address = f"127.0.0.1:{unused_tcp_port_factory()}"
- c.Proxy.address = f"127.0.0.1:{unused_tcp_port_factory()}"
- c.DaskGateway.authenticator_class = "dask_gateway_server.auth.SimpleAuthenticator"
- c.SimpleAuthenticator.password = "qweqwe" # noqa: S105
- c.ClusterConfig.worker_cmd = [
- "dask-worker",
- "--resources",
- f"CPU=12,GPU=1,RAM={16e9}",
- ]
- # NOTE: This must be set such that the local unsafe backend creates a worker with enough cores/memory
- c.ClusterConfig.worker_cores = 12
- c.ClusterConfig.worker_memory = "16G"
- c.ClusterConfig.cluster_max_workers = 3
-
- c.DaskGateway.log_level = "DEBUG"
- return c
-
-
-class DaskGatewayServer(NamedTuple):
- address: str
- proxy_address: str
- password: str
- server: DaskGateway
-
-
-@pytest.fixture
-async def local_dask_gateway_server(
- local_dask_gateway_server_config: traitlets.config.Config,
-) -> AsyncIterator[DaskGatewayServer]:
- print("--> creating local dask gateway server")
- dask_gateway_server = DaskGateway(config=local_dask_gateway_server_config)
- dask_gateway_server.initialize([]) # that is a shitty one!
- print("--> local dask gateway server initialized")
- await dask_gateway_server.setup()
- await dask_gateway_server.backend.proxy._proxy_contacted # pylint: disable=protected-access
-
- print("--> local dask gateway server setup completed")
- yield DaskGatewayServer(
- f"http://{dask_gateway_server.backend.proxy.address}",
- f"gateway://{dask_gateway_server.backend.proxy.tcp_address}",
- local_dask_gateway_server_config.SimpleAuthenticator.password, # type: ignore
- dask_gateway_server,
- )
- print("--> local dask gateway server switching off...")
- await dask_gateway_server.cleanup()
- print("...done")
-
-
-@pytest.fixture
-def gateway_username(faker: Faker) -> str:
- return faker.user_name()
-
-
-@pytest.fixture
-def gateway_auth(
- local_dask_gateway_server: DaskGatewayServer, gateway_username: str
-) -> auth.BasicAuth:
- return auth.BasicAuth(gateway_username, local_dask_gateway_server.password)
-
-
-@pytest.fixture
-async def dask_gateway(
- local_dask_gateway_server: DaskGatewayServer, gateway_auth: auth.BasicAuth
-) -> Gateway:
- async with Gateway(
- local_dask_gateway_server.address,
- local_dask_gateway_server.proxy_address,
- asynchronous=True,
- auth=gateway_auth,
- ) as gateway:
- print(
- f"--> {gateway=} created, with {gateway_auth.username=}/{gateway_auth.password=}"
- )
- cluster_options = await gateway.cluster_options()
- gateway_versions = await gateway.get_versions()
- clusters_list = await gateway.list_clusters()
- print(f"--> {gateway_versions=}, {cluster_options=}, {clusters_list=}")
- for option in cluster_options.items():
- print(f"--> {option=}")
- return gateway
-
-
-@pytest.fixture
-async def dask_gateway_cluster(dask_gateway: Gateway) -> AsyncIterator[GatewayCluster]:
- async with dask_gateway.new_cluster() as cluster:
- yield cluster
-
-
-@pytest.fixture
-async def dask_gateway_cluster_client(
- dask_gateway_cluster: GatewayCluster,
-) -> AsyncIterator[Client]:
- async with dask_gateway_cluster.get_client() as client:
- yield client
diff --git a/packages/pytest-simcore/src/pytest_simcore/services_api_mocks_for_aiohttp_clients.py b/packages/pytest-simcore/src/pytest_simcore/services_api_mocks_for_aiohttp_clients.py
index aea927de4d6..5b85a036d79 100644
--- a/packages/pytest-simcore/src/pytest_simcore/services_api_mocks_for_aiohttp_clients.py
+++ b/packages/pytest-simcore/src/pytest_simcore/services_api_mocks_for_aiohttp_clients.py
@@ -2,8 +2,6 @@
# pylint: disable=unused-argument
# pylint: disable=unused-variable
-import json
-import random
import re
from pathlib import Path
from typing import Any
@@ -13,6 +11,7 @@
from aioresponses import aioresponses as AioResponsesMock
from aioresponses.core import CallbackResult
from faker import Faker
+from models_library.api_schemas_directorv2.comp_tasks import ComputationGet
from models_library.api_schemas_storage import (
FileMetaDataGet,
FileUploadCompleteFutureResponse,
@@ -23,7 +22,6 @@
LinkType,
PresignedLink,
)
-from models_library.clusters import Cluster
from models_library.generics import Envelope
from models_library.projects_pipeline import ComputationTask
from models_library.projects_state import RunningState
@@ -81,7 +79,7 @@ def create_computation_cb(url, **kwargs) -> CallbackResult:
assert param in body, f"{param} is missing from body: {body}"
state = (
RunningState.PUBLISHED
- if "start_pipeline" in body and body["start_pipeline"]
+ if body.get("start_pipeline")
else RunningState.NOT_STARTED
)
pipeline: dict[str, list[str]] = FULL_PROJECT_PIPELINE_ADJACENCY
@@ -131,8 +129,13 @@ def get_computation_cb(url, **kwargs) -> CallbackResult:
state = RunningState.NOT_STARTED
pipeline: dict[str, list[str]] = FULL_PROJECT_PIPELINE_ADJACENCY
node_states = FULL_PROJECT_NODE_STATES
- returned_computation = ComputationTask.model_validate(
- ComputationTask.model_config["json_schema_extra"]["examples"][0]
+ assert "json_schema_extra" in ComputationGet.model_config
+ assert isinstance(ComputationGet.model_config["json_schema_extra"], dict)
+ assert isinstance(
+ ComputationGet.model_config["json_schema_extra"]["examples"], list
+ )
+ returned_computation = ComputationGet.model_validate(
+ ComputationGet.model_config["json_schema_extra"]["examples"][0]
).model_copy(
update={
"id": Path(url.path).name,
@@ -151,85 +154,6 @@ def get_computation_cb(url, **kwargs) -> CallbackResult:
)
-def create_cluster_cb(url, **kwargs) -> CallbackResult:
- assert "json" in kwargs, f"missing body in call to {url}"
- assert url.query.get("user_id")
- random_cluster = Cluster.model_validate(
- random.choice(Cluster.model_config["json_schema_extra"]["examples"])
- )
- return CallbackResult(
- status=201, payload=json.loads(random_cluster.model_dump_json(by_alias=True))
- )
-
-
-def list_clusters_cb(url, **kwargs) -> CallbackResult:
- assert url.query.get("user_id")
- return CallbackResult(
- status=200,
- body=json.dumps(
- [
- json.loads(
- Cluster.model_validate(
- random.choice(
- Cluster.model_config["json_schema_extra"]["examples"]
- )
- ).model_dump_json(by_alias=True)
- )
- for _ in range(3)
- ]
- ),
- )
-
-
-def get_cluster_cb(url, **kwargs) -> CallbackResult:
- assert url.query.get("user_id")
- cluster_id = url.path.split("/")[-1]
- return CallbackResult(
- status=200,
- payload=json.loads(
- Cluster.model_validate(
- {
- **random.choice(
- Cluster.model_config["json_schema_extra"]["examples"]
- ),
- **{"id": cluster_id},
- }
- ).model_dump_json(by_alias=True)
- ),
- )
-
-
-def get_cluster_details_cb(url, **kwargs) -> CallbackResult:
- assert url.query.get("user_id")
- cluster_id = url.path.split("/")[-1]
- assert cluster_id
- return CallbackResult(
- status=200,
- payload={
- "scheduler": {"status": "RUNNING"},
- "dashboard_link": "https://dashboard.link.com",
- },
- )
-
-
-def patch_cluster_cb(url, **kwargs) -> CallbackResult:
- assert url.query.get("user_id")
- cluster_id = url.path.split("/")[-1]
- return CallbackResult(
- status=200,
- payload=json.loads(
- Cluster.model_validate(
- {
- **random.choice(
- Cluster.model_config["json_schema_extra"]["examples"]
- ),
- **{"id": cluster_id},
- }
- ).model_dump_json(by_alias=True)
- ),
- )
-
-
@pytest.fixture
async def director_v2_service_mock(
aioresponses_mocker: AioResponsesMock,
@@ -280,73 +204,6 @@ async def director_v2_service_mock(
aioresponses_mocker.delete(delete_computation_pattern, status=204, repeat=True)
aioresponses_mocker.patch(projects_networks_pattern, status=204, repeat=True)
- # clusters
- aioresponses_mocker.post(
- re.compile(
- r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters\?(\w+(?:=\w+)?\&?){1,}$"
- ),
- callback=create_cluster_cb,
- status=status.HTTP_201_CREATED,
- repeat=True,
- )
-
- aioresponses_mocker.get(
- re.compile(
- r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters\?(\w+(?:=\w+)?\&?){1,}$"
- ),
- callback=list_clusters_cb,
- status=status.HTTP_201_CREATED,
- repeat=True,
- )
-
- aioresponses_mocker.get(
- re.compile(
- r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)\?(\w+(?:=\w+)?\&?){1,}$"
- ),
- callback=get_cluster_cb,
- status=status.HTTP_201_CREATED,
- repeat=True,
- )
-
- aioresponses_mocker.get(
- re.compile(
- r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters/[0-9]+/details\?(\w+(?:=\w+)?\&?){1,}$"
- ),
- callback=get_cluster_details_cb,
- status=status.HTTP_201_CREATED,
- repeat=True,
- )
-
- aioresponses_mocker.patch(
- re.compile(
- r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)\?(\w+(?:=\w+)?\&?){1,}$"
- ),
- callback=patch_cluster_cb,
- status=status.HTTP_201_CREATED,
- repeat=True,
- )
- aioresponses_mocker.delete(
- re.compile(
- r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)\?(\w+(?:=\w+)?\&?){1,}$"
- ),
- status=status.HTTP_204_NO_CONTENT,
- repeat=True,
- )
-
- aioresponses_mocker.post(
- re.compile(r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters:ping$"),
- status=status.HTTP_204_NO_CONTENT,
- repeat=True,
- )
-
- aioresponses_mocker.post(
- re.compile(
- r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+):ping\?(\w+(?:=\w+)?\&?){1,}$"
- ),
- status=status.HTTP_204_NO_CONTENT,
- repeat=True,
- )
-
return aioresponses_mocker
diff --git a/packages/pytest-simcore/src/pytest_simcore/simcore_dask_service.py b/packages/pytest-simcore/src/pytest_simcore/simcore_dask_service.py
index c2900bf3e4f..507bb602e06 100644
--- a/packages/pytest-simcore/src/pytest_simcore/simcore_dask_service.py
+++ b/packages/pytest-simcore/src/pytest_simcore/simcore_dask_service.py
@@ -9,7 +9,7 @@
import distributed
import pytest
from distributed import Client
-from models_library.clusters import InternalClusterAuthentication, TLSAuthentication
+from models_library.clusters import ClusterAuthentication, TLSAuthentication
from pydantic import AnyUrl
from .helpers.docker import get_service_published_port
@@ -72,7 +72,7 @@ def dask_backend_tls_certificates(
@pytest.fixture
def dask_scheduler_auth(
dask_backend_tls_certificates: _TLSCertificates,
-) -> InternalClusterAuthentication:
+) -> ClusterAuthentication:
return TLSAuthentication(
tls_ca_file=dask_backend_tls_certificates.tls_ca_file,
tls_client_cert=dask_backend_tls_certificates.tls_cert_file,
diff --git a/services/api-server/openapi.json b/services/api-server/openapi.json
index 5b23b44603d..20210a69dba 100644
--- a/services/api-server/openapi.json
+++ b/services/api-server/openapi.json
@@ -2598,7 +2598,7 @@
"solvers"
],
"summary": "Start Job",
- "description": "Starts job job_id created with the solver solver_key:version\n\nAdded in *version 0.4.3*: query parameter `cluster_id`\nAdded in *version 0.6*: responds with a 202 when successfully starting a computation",
+ "description": "Starts job job_id created with the solver solver_key:version\n\nAdded in *version 0.4.3*: query parameter `cluster_id`\nAdded in *version 0.6*: responds with a 202 when successfully starting a computation\nChanged in *version 0.8*: query parameter `cluster_id` deprecated",
"operationId": "start_job",
"security": [
{
@@ -2651,7 +2651,8 @@
}
],
"title": "Cluster Id"
- }
+ },
+ "deprecated": true
}
],
"responses": {
@@ -4585,7 +4586,7 @@
"studies"
],
"summary": "Start Study Job",
- "description": "Changed in *version 0.6.0*: Now responds with a 202 when successfully starting a computation",
+ "description": "Changed in *version 0.6.0*: Now responds with a 202 when successfully starting a computation\nChanged in *version 0.8*: query parameter `cluster_id` deprecated",
"operationId": "start_study_job",
"security": [
{
@@ -4628,7 +4629,8 @@
}
],
"title": "Cluster Id"
- }
+ },
+ "deprecated": true
}
],
"responses": {
@@ -6891,7 +6893,7 @@
"type": "integer",
"x_unit": "second"
},
- "key": "input_2",
+ "key": "f763658f-a89a-4a90-ace4-c44631290f12",
"kind": "input"
}
},
@@ -7099,7 +7101,9 @@
"required": [
"walletId",
"name",
+ "description",
"owner",
+ "thumbnail",
"status",
"created",
"modified",
diff --git a/services/api-server/src/simcore_service_api_server/api/routes/solvers_jobs.py b/services/api-server/src/simcore_service_api_server/api/routes/solvers_jobs.py
index 151a79c6871..af1c80c70ac 100644
--- a/services/api-server/src/simcore_service_api_server/api/routes/solvers_jobs.py
+++ b/services/api-server/src/simcore_service_api_server/api/routes/solvers_jobs.py
@@ -12,10 +12,8 @@
from models_library.projects import ProjectID
from models_library.projects_nodes_io import NodeID
from pydantic.types import PositiveInt
-from simcore_service_api_server.exceptions.backend_errors import (
- ProjectAlreadyStartedError,
-)
+from ...exceptions.backend_errors import ProjectAlreadyStartedError
from ...exceptions.service_errors_utils import DEFAULT_BACKEND_SERVICE_STATUS_CODES
from ...models.basic_types import VersionStr
from ...models.schemas.errors import ErrorGet
@@ -40,7 +38,11 @@
from ..dependencies.authentication import get_current_user_id, get_product_name
from ..dependencies.services import get_api_client
from ..dependencies.webserver import AuthSession, get_webserver_session
-from ._constants import FMSG_CHANGELOG_ADDED_IN_VERSION, FMSG_CHANGELOG_NEW_IN_VERSION
+from ._constants import (
+ FMSG_CHANGELOG_ADDED_IN_VERSION,
+ FMSG_CHANGELOG_CHANGED_IN_VERSION,
+ FMSG_CHANGELOG_NEW_IN_VERSION,
+)
_logger = logging.getLogger(__name__)
@@ -182,6 +184,9 @@ async def delete_job(
+ FMSG_CHANGELOG_ADDED_IN_VERSION.format("0.4.3", "query parameter `cluster_id`")
+ FMSG_CHANGELOG_ADDED_IN_VERSION.format(
"0.6", "responds with a 202 when successfully starting a computation"
+ )
+ + FMSG_CHANGELOG_CHANGED_IN_VERSION.format(
+ "0.8", "query parameter `cluster_id` deprecated"
),
)
async def start_job(
@@ -192,7 +197,9 @@ async def start_job(
user_id: Annotated[PositiveInt, Depends(get_current_user_id)],
director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))],
webserver_api: Annotated[AuthSession, Depends(get_webserver_session)],
- cluster_id: ClusterID | None = None,
+ cluster_id: Annotated[ # pylint: disable=unused-argument # noqa: ARG001
+ ClusterID | None, Query(deprecated=True)
+ ] = None,
):
job_name = _compose_job_resource_name(solver_key, version, job_id)
_logger.debug("Start Job '%s'", job_name)
@@ -203,7 +210,6 @@ async def start_job(
job_id=job_id,
expected_job_name=job_name,
webserver_api=webserver_api,
- cluster_id=cluster_id,
)
except ProjectAlreadyStartedError:
job_status = await inspect_job(
diff --git a/services/api-server/src/simcore_service_api_server/api/routes/studies_jobs.py b/services/api-server/src/simcore_service_api_server/api/routes/studies_jobs.py
index 177b50d1e6c..8d23def5c0b 100644
--- a/services/api-server/src/simcore_service_api_server/api/routes/studies_jobs.py
+++ b/services/api-server/src/simcore_service_api_server/api/routes/studies_jobs.py
@@ -15,14 +15,11 @@
from models_library.projects_nodes_io import NodeID
from pydantic import PositiveInt
from servicelib.logging_utils import log_context
-from simcore_service_api_server.api.routes.solvers_jobs import JOBS_STATUS_CODES
-from simcore_service_api_server.exceptions.backend_errors import (
- ProjectAlreadyStartedError,
-)
from ...api.dependencies.authentication import get_current_user_id
from ...api.dependencies.services import get_api_client
from ...api.dependencies.webserver import get_webserver_session
+from ...exceptions.backend_errors import ProjectAlreadyStartedError
from ...models.pagination import Page, PaginationParams
from ...models.schemas.errors import ErrorGet
from ...models.schemas.jobs import (
@@ -53,6 +50,7 @@
from ..dependencies.application import get_reverse_url_mapper
from ._common import API_SERVER_DEV_FEATURES_ENABLED
from ._constants import FMSG_CHANGELOG_CHANGED_IN_VERSION, FMSG_CHANGELOG_NEW_IN_VERSION
+from .solvers_jobs import JOBS_STATUS_CODES
_logger = logging.getLogger(__name__)
router = APIRouter()
@@ -210,6 +208,9 @@ async def delete_study_job(
},
description=FMSG_CHANGELOG_CHANGED_IN_VERSION.format(
"0.6.0", "Now responds with a 202 when successfully starting a computation"
+ )
+ + FMSG_CHANGELOG_CHANGED_IN_VERSION.format(
+ "0.8", "query parameter `cluster_id` deprecated"
),
)
async def start_study_job(
@@ -219,7 +220,9 @@ async def start_study_job(
user_id: Annotated[PositiveInt, Depends(get_current_user_id)],
webserver_api: Annotated[AuthSession, Depends(get_webserver_session)],
director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))],
- cluster_id: ClusterID | None = None,
+ cluster_id: Annotated[ # pylint: disable=unused-argument # noqa: ARG001
+ ClusterID | None, Query(deprecated=True)
+ ] = None,
):
job_name = _compose_job_resource_name(study_id, job_id)
with log_context(_logger, logging.DEBUG, f"Starting Job '{job_name}'"):
@@ -229,7 +232,6 @@ async def start_study_job(
job_id=job_id,
expected_job_name=job_name,
webserver_api=webserver_api,
- cluster_id=cluster_id,
)
except ProjectAlreadyStartedError:
job_status: JobStatus = await inspect_study_job(
diff --git a/services/api-server/src/simcore_service_api_server/services/director_v2.py b/services/api-server/src/simcore_service_api_server/services/director_v2.py
index e225a8adef7..aaa946f10d4 100644
--- a/services/api-server/src/simcore_service_api_server/services/director_v2.py
+++ b/services/api-server/src/simcore_service_api_server/services/director_v2.py
@@ -3,7 +3,6 @@
from uuid import UUID
from fastapi import FastAPI
-from models_library.clusters import ClusterID
from models_library.projects_nodes_io import NodeID
from models_library.projects_pipeline import ComputationTask
from models_library.projects_state import RunningState
@@ -102,9 +101,7 @@ async def start_computation(
user_id: PositiveInt,
product_name: str,
groups_extra_properties_repository: GroupsExtraPropertiesRepository,
- cluster_id: ClusterID | None = None,
) -> ComputationTaskGet:
- extras = {}
use_on_demand_clusters = (
await groups_extra_properties_repository.use_on_demand_clusters(
@@ -112,9 +109,6 @@ async def start_computation(
)
)
- if cluster_id is not None and not use_on_demand_clusters:
- extras["cluster_id"] = cluster_id
-
response = await self.client.post(
"/v2/computations",
json={
@@ -123,7 +117,6 @@ async def start_computation(
"start_pipeline": True,
"product_name": product_name,
"use_on_demand_clusters": use_on_demand_clusters,
- **extras,
},
)
response.raise_for_status()
diff --git a/services/api-server/src/simcore_service_api_server/services/jobs.py b/services/api-server/src/simcore_service_api_server/services/jobs.py
index 7bc46d5ed1e..277f9625f17 100644
--- a/services/api-server/src/simcore_service_api_server/services/jobs.py
+++ b/services/api-server/src/simcore_service_api_server/services/jobs.py
@@ -4,7 +4,6 @@
from fastapi import Depends, HTTPException, Request, status
from models_library.api_schemas_webserver.projects import ProjectGet
-from models_library.clusters import ClusterID
from pydantic import HttpUrl, PositiveInt
from servicelib.logging_utils import log_context
@@ -41,7 +40,6 @@ async def start_project(
job_id: JobID,
expected_job_name: str,
webserver_api: Annotated[AuthSession, Depends(get_webserver_session)],
- cluster_id: ClusterID | None = None,
) -> None:
if pricing_spec := JobPricingSpecification.create_from_headers(request.headers):
with log_context(_logger, logging.DEBUG, "Set pricing plan and unit"):
@@ -56,7 +54,7 @@ async def start_project(
pricing_unit=pricing_spec.pricing_unit,
)
with log_context(_logger, logging.DEBUG, "Starting job"):
- await webserver_api.start_project(project_id=job_id, cluster_id=cluster_id)
+ await webserver_api.start_project(project_id=job_id)
async def stop_project(
diff --git a/services/api-server/src/simcore_service_api_server/services/webserver.py b/services/api-server/src/simcore_service_api_server/services/webserver.py
index ac0437dbc7d..b5e1c29c106 100644
--- a/services/api-server/src/simcore_service_api_server/services/webserver.py
+++ b/services/api-server/src/simcore_service_api_server/services/webserver.py
@@ -2,9 +2,10 @@
import logging
import urllib.parse
+from collections.abc import Mapping
from dataclasses import dataclass
from functools import partial
-from typing import Any, Mapping
+from typing import Any
from uuid import UUID
import httpx
@@ -36,7 +37,6 @@
WalletGet,
WalletGetWithAvailableCredits,
)
-from models_library.clusters import ClusterID
from models_library.generics import Envelope
from models_library.projects import ProjectID
from models_library.projects_nodes_io import NodeID
@@ -444,11 +444,12 @@ async def connect_pricing_unit_to_project_node(
}
)
async def start_project(
- self, *, project_id: UUID, cluster_id: ClusterID | None = None
+ self,
+ *,
+ project_id: UUID,
) -> None:
body_input: dict[str, Any] = {}
- if cluster_id:
- body_input["cluster_id"] = cluster_id
+
body: ComputationStart = ComputationStart(**body_input)
response = await self.client.post(
f"/computations/{project_id}:start",
diff --git a/services/api-server/tests/mocks/get_job_outputs.json b/services/api-server/tests/mocks/get_job_outputs.json
index cc49e55fe27..a53e1742e95 100644
--- a/services/api-server/tests/mocks/get_job_outputs.json
+++ b/services/api-server/tests/mocks/get_job_outputs.json
@@ -1,586 +1,585 @@
[
- {
- "name": "POST /projects",
- "description": "",
- "method": "POST",
- "host": "webserver",
- "path": {
- "path": "/v0/projects",
- "path_parameters": []
- },
- "query": "from_study=e9f34992-436c-11ef-a15d-0242ac14000c&hidden=true",
- "response_body": {
- "data": {
- "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3",
- "task_name": "POST /v0/projects?from_study=e9f34992-436c-11ef-a15d-0242ac14000c&hidden=true",
- "status_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3",
- "result_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3/result",
- "abort_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3"
- }
- },
- "status_code": 202
- },
- {
- "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3",
- "description": "",
- "method": "GET",
- "host": "webserver",
- "path": {
- "path": "/v0/tasks/{task_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "task_id",
- "required": true,
- "schema": {
- "title": "Task Id",
- "type": "str"
- },
- "response_value": "tasks"
- }
- ]
+ {
+ "name": "POST /projects",
+ "description": "",
+ "method": "POST",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects",
+ "path_parameters": []
+ },
+ "query": "from_study=e9f34992-436c-11ef-a15d-0242ac14000c&hidden=true",
+ "response_body": {
+ "data": {
+ "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3",
+ "task_name": "POST /v0/projects?from_study=e9f34992-436c-11ef-a15d-0242ac14000c&hidden=true",
+ "status_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3",
+ "result_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3/result",
+ "abort_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3"
+ }
+ },
+ "status_code": 202
},
- "response_body": {
- "data": {
- "task_progress": {
- "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3",
- "message": "Checking study access rights...",
- "percent": 0.0
+ {
+ "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3",
+ "description": "",
+ "method": "GET",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/tasks/{task_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "task_id",
+ "required": true,
+ "schema": {
+ "title": "Task Id",
+ "type": "str"
+ },
+ "response_value": "tasks"
+ }
+ ]
},
- "done": false,
- "started": "2024-07-16T12:56:51.900041"
- }
- }
- },
- {
- "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3",
- "description": "",
- "method": "GET",
- "host": "webserver",
- "path": {
- "path": "/v0/tasks/{task_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "task_id",
- "required": true,
- "schema": {
- "title": "Task Id",
- "type": "str"
- },
- "response_value": "tasks"
+ "response_body": {
+ "data": {
+ "task_progress": {
+ "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3",
+ "message": "Checking study access rights...",
+ "percent": 0.0
+ },
+ "done": false,
+ "started": "2024-07-16T12:56:51.900041"
+ }
}
- ]
},
- "response_body": {
- "data": {
- "task_progress": {
- "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3",
- "message": "Checking study access rights...",
- "percent": 0.0
+ {
+ "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3",
+ "description": "",
+ "method": "GET",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/tasks/{task_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "task_id",
+ "required": true,
+ "schema": {
+ "title": "Task Id",
+ "type": "str"
+ },
+ "response_value": "tasks"
+ }
+ ]
},
- "done": false,
- "started": "2024-07-16T12:56:51.900041"
- }
- }
- },
- {
- "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3",
- "description": "",
- "method": "GET",
- "host": "webserver",
- "path": {
- "path": "/v0/tasks/{task_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "task_id",
- "required": true,
- "schema": {
- "title": "Task Id",
- "type": "str"
- },
- "response_value": "tasks"
+ "response_body": {
+ "data": {
+ "task_progress": {
+ "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3",
+ "message": "Checking study access rights...",
+ "percent": 0.0
+ },
+ "done": false,
+ "started": "2024-07-16T12:56:51.900041"
+ }
}
- ]
},
- "response_body": {
- "data": {
- "task_progress": {
- "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3",
- "message": "finished",
- "percent": 1.0
+ {
+ "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3",
+ "description": "",
+ "method": "GET",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/tasks/{task_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "task_id",
+ "required": true,
+ "schema": {
+ "title": "Task Id",
+ "type": "str"
+ },
+ "response_value": "tasks"
+ }
+ ]
},
- "done": true,
- "started": "2024-07-16T12:56:51.900041"
- }
- }
- },
- {
- "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3/result",
- "description": "",
- "method": "GET",
- "host": "webserver",
- "path": {
- "path": "/v0/tasks/{task_id}/result",
- "path_parameters": [
- {
- "in": "path",
- "name": "task_id",
- "required": true,
- "schema": {
- "title": "Task Id",
- "type": "str"
- },
- "response_value": "tasks"
+ "response_body": {
+ "data": {
+ "task_progress": {
+ "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3",
+ "message": "finished",
+ "percent": 1.0
+ },
+ "done": true,
+ "started": "2024-07-16T12:56:51.900041"
+ }
}
- ]
},
- "response_body": {
- "data": {
- "uuid": "df0b67b6-4372-11ef-a15d-0242ac14000c",
- "name": "teststudy (Copy)",
- "description": "",
- "thumbnail": "",
- "creationDate": "2024-07-16T12:56:51.922Z",
- "lastChangeDate": "2024-07-16T12:56:51.922Z",
- "workspaceId": 5,
- "folderId": 2,
- "trashedAt": null,
- "workbench": {
- "dd875b4f-7663-529f-bd7f-3716b19e28af": {
- "key": "simcore/services/comp/itis/sleeper",
- "version": "2.0.2",
- "label": "sleeper",
- "progress": 0.0,
- "inputs": {
- "input_1": {
- "nodeUuid": "cda9d480-d3ad-55c8-b9ce-c50eb1bab818",
- "output": "outFile"
- },
- "input_2": 2,
- "input_3": false
- },
- "inputsRequired": [],
- "inputNodes": [
- "cda9d480-d3ad-55c8-b9ce-c50eb1bab818"
- ],
- "state": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "NOT_STARTED",
- "progress": null
- }
- },
- "cda9d480-d3ad-55c8-b9ce-c50eb1bab818": {
- "key": "simcore/services/frontend/file-picker",
- "version": "1.0.0",
- "label": "inputfile",
- "inputs": {},
- "inputsRequired": [],
- "inputNodes": []
- },
- "c784a033-36c7-558b-9cc5-448321de01f8": {
- "key": "simcore/services/frontend/iterator-consumer/probe/file",
- "version": "1.0.0",
- "label": "outputfile",
- "inputs": {
- "in_1": {
- "nodeUuid": "dd875b4f-7663-529f-bd7f-3716b19e28af",
- "output": "output_1"
- }
- },
- "inputsRequired": [],
- "inputNodes": [
- "dd875b4f-7663-529f-bd7f-3716b19e28af"
+ {
+ "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3/result",
+ "description": "",
+ "method": "GET",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/tasks/{task_id}/result",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "task_id",
+ "required": true,
+ "schema": {
+ "title": "Task Id",
+ "type": "str"
+ },
+ "response_value": "tasks"
+ }
]
- }
- },
- "prjOwner": "bisgaard@itis.swiss",
- "accessRights": {
- "3": {
- "read": true,
- "write": true,
- "delete": true
- }
- },
- "tags": [],
- "classifiers": [],
- "state": {
- "locked": {
- "value": false,
- "status": "CLOSED"
- },
- "state": {
- "value": "NOT_STARTED"
- }
},
- "ui": {
- "workbench": {
- "c784a033-36c7-558b-9cc5-448321de01f8": {
- "position": {
- "x": 1175,
- "y": 467
- }
- },
- "cda9d480-d3ad-55c8-b9ce-c50eb1bab818": {
- "position": {
- "x": 586,
- "y": 471
- }
- },
- "dd875b4f-7663-529f-bd7f-3716b19e28af": {
- "position": {
- "x": 860,
- "y": 440
- }
+ "response_body": {
+ "data": {
+ "uuid": "df0b67b6-4372-11ef-a15d-0242ac14000c",
+ "name": "teststudy (Copy)",
+ "description": "",
+ "thumbnail": "",
+ "creationDate": "2024-07-16T12:56:51.922Z",
+ "lastChangeDate": "2024-07-16T12:56:51.922Z",
+ "workspaceId": 5,
+ "folderId": 2,
+ "trashedAt": null,
+ "workbench": {
+ "dd875b4f-7663-529f-bd7f-3716b19e28af": {
+ "key": "simcore/services/comp/itis/sleeper",
+ "version": "2.0.2",
+ "label": "sleeper",
+ "progress": 0.0,
+ "inputs": {
+ "input_1": {
+ "nodeUuid": "cda9d480-d3ad-55c8-b9ce-c50eb1bab818",
+ "output": "outFile"
+ },
+ "input_2": 2,
+ "input_3": false
+ },
+ "inputsRequired": [],
+ "inputNodes": [
+ "cda9d480-d3ad-55c8-b9ce-c50eb1bab818"
+ ],
+ "state": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "NOT_STARTED",
+ "progress": null
+ }
+ },
+ "cda9d480-d3ad-55c8-b9ce-c50eb1bab818": {
+ "key": "simcore/services/frontend/file-picker",
+ "version": "1.0.0",
+ "label": "inputfile",
+ "inputs": {},
+ "inputsRequired": [],
+ "inputNodes": []
+ },
+ "c784a033-36c7-558b-9cc5-448321de01f8": {
+ "key": "simcore/services/frontend/iterator-consumer/probe/file",
+ "version": "1.0.0",
+ "label": "outputfile",
+ "inputs": {
+ "in_1": {
+ "nodeUuid": "dd875b4f-7663-529f-bd7f-3716b19e28af",
+ "output": "output_1"
+ }
+ },
+ "inputsRequired": [],
+ "inputNodes": [
+ "dd875b4f-7663-529f-bd7f-3716b19e28af"
+ ]
+ }
+ },
+ "prjOwner": "bisgaard@itis.swiss",
+ "accessRights": {
+ "3": {
+ "read": true,
+ "write": true,
+ "delete": true
+ }
+ },
+ "tags": [],
+ "classifiers": [],
+ "state": {
+ "locked": {
+ "value": false,
+ "status": "CLOSED"
+ },
+ "state": {
+ "value": "NOT_STARTED"
+ }
+ },
+ "ui": {
+ "workbench": {
+ "c784a033-36c7-558b-9cc5-448321de01f8": {
+ "position": {
+ "x": 1175,
+ "y": 467
+ }
+ },
+ "cda9d480-d3ad-55c8-b9ce-c50eb1bab818": {
+ "position": {
+ "x": 586,
+ "y": 471
+ }
+ },
+ "dd875b4f-7663-529f-bd7f-3716b19e28af": {
+ "position": {
+ "x": 860,
+ "y": 440
+ }
+ }
+ },
+ "slideshow": {},
+ "currentNodeId": "b448cfb0-436c-11ef-a15d-0242ac14000c",
+ "mode": "workbench"
+ },
+ "quality": {
+ "enabled": true,
+ "tsr_target": {
+ "r01": {
+ "level": 4,
+ "references": ""
+ },
+ "r02": {
+ "level": 4,
+ "references": ""
+ },
+ "r03": {
+ "level": 4,
+ "references": ""
+ },
+ "r04": {
+ "level": 4,
+ "references": ""
+ },
+ "r05": {
+ "level": 4,
+ "references": ""
+ },
+ "r06": {
+ "level": 4,
+ "references": ""
+ },
+ "r07": {
+ "level": 4,
+ "references": ""
+ },
+ "r08": {
+ "level": 4,
+ "references": ""
+ },
+ "r09": {
+ "level": 4,
+ "references": ""
+ },
+ "r10": {
+ "level": 4,
+ "references": ""
+ },
+ "r03b": {
+ "references": ""
+ },
+ "r03c": {
+ "references": ""
+ },
+ "r07b": {
+ "references": ""
+ },
+ "r07c": {
+ "references": ""
+ },
+ "r07d": {
+ "references": ""
+ },
+ "r07e": {
+ "references": ""
+ },
+ "r08b": {
+ "references": ""
+ },
+ "r10b": {
+ "references": ""
+ }
+ },
+ "tsr_current": {
+ "r01": {
+ "level": 0,
+ "references": ""
+ },
+ "r02": {
+ "level": 0,
+ "references": ""
+ },
+ "r03": {
+ "level": 0,
+ "references": ""
+ },
+ "r04": {
+ "level": 0,
+ "references": ""
+ },
+ "r05": {
+ "level": 0,
+ "references": ""
+ },
+ "r06": {
+ "level": 0,
+ "references": ""
+ },
+ "r07": {
+ "level": 0,
+ "references": ""
+ },
+ "r08": {
+ "level": 0,
+ "references": ""
+ },
+ "r09": {
+ "level": 0,
+ "references": ""
+ },
+ "r10": {
+ "level": 0,
+ "references": ""
+ },
+ "r03b": {
+ "references": ""
+ },
+ "r03c": {
+ "references": ""
+ },
+ "r07b": {
+ "references": ""
+ },
+ "r07c": {
+ "references": ""
+ },
+ "r07d": {
+ "references": ""
+ },
+ "r07e": {
+ "references": ""
+ },
+ "r08b": {
+ "references": ""
+ },
+ "r10b": {
+ "references": ""
+ }
+ }
+ },
+ "dev": {}
}
- },
- "slideshow": {},
- "currentNodeId": "b448cfb0-436c-11ef-a15d-0242ac14000c",
- "mode": "workbench"
},
- "quality": {
- "enabled": true,
- "tsr_target": {
- "r01": {
- "level": 4,
- "references": ""
- },
- "r02": {
- "level": 4,
- "references": ""
- },
- "r03": {
- "level": 4,
- "references": ""
- },
- "r04": {
- "level": 4,
- "references": ""
- },
- "r05": {
- "level": 4,
- "references": ""
- },
- "r06": {
- "level": 4,
- "references": ""
- },
- "r07": {
- "level": 4,
- "references": ""
- },
- "r08": {
- "level": 4,
- "references": ""
- },
- "r09": {
- "level": 4,
- "references": ""
- },
- "r10": {
- "level": 4,
- "references": ""
- },
- "r03b": {
- "references": ""
- },
- "r03c": {
- "references": ""
- },
- "r07b": {
- "references": ""
- },
- "r07c": {
- "references": ""
- },
- "r07d": {
- "references": ""
- },
- "r07e": {
- "references": ""
- },
- "r08b": {
- "references": ""
- },
- "r10b": {
- "references": ""
- }
- },
- "tsr_current": {
- "r01": {
- "level": 0,
- "references": ""
- },
- "r02": {
- "level": 0,
- "references": ""
- },
- "r03": {
- "level": 0,
- "references": ""
- },
- "r04": {
- "level": 0,
- "references": ""
- },
- "r05": {
- "level": 0,
- "references": ""
- },
- "r06": {
- "level": 0,
- "references": ""
- },
- "r07": {
- "level": 0,
- "references": ""
- },
- "r08": {
- "level": 0,
- "references": ""
- },
- "r09": {
- "level": 0,
- "references": ""
- },
- "r10": {
- "level": 0,
- "references": ""
- },
- "r03b": {
- "references": ""
- },
- "r03c": {
- "references": ""
- },
- "r07b": {
- "references": ""
- },
- "r07c": {
- "references": ""
- },
- "r07d": {
- "references": ""
- },
- "r07e": {
- "references": ""
- },
- "r08b": {
- "references": ""
- },
- "r10b": {
- "references": ""
- }
- }
- },
- "dev": {}
- }
- },
- "status_code": 201
- },
- {
- "name": "PATCH /projects/df0b67b6-4372-11ef-a15d-0242ac14000c",
- "description": "",
- "method": "PATCH",
- "host": "webserver",
- "path": {
- "path": "/v0/projects/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "projects"
- }
- ]
+ "status_code": 201
},
- "request_payload": {
- "name": "studies/e9f34992-436c-11ef-a15d-0242ac14000c/jobs/df0b67b6-4372-11ef-a15d-0242ac14000c"
- },
- "status_code": 204
- },
- {
- "name": "GET /projects/df0b67b6-4372-11ef-a15d-0242ac14000c/inputs",
- "description": "",
- "method": "GET",
- "host": "webserver",
- "path": {
- "path": "/v0/projects/{project_id}/inputs",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "projects"
- }
- ]
- },
- "response_body": {
- "data": {}
- }
- },
- {
- "name": "PATCH /projects/df0b67b6-4372-11ef-a15d-0242ac14000c/nodes/cda9d480-d3ad-55c8-b9ce-c50eb1bab818/outputs",
- "description": "",
- "method": "PATCH",
- "host": "webserver",
- "path": {
- "path": "/v0/projects/{project_id}/nodes/{node_id}/outputs",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str"
- },
- "response_value": "projects"
+ {
+ "name": "PATCH /projects/df0b67b6-4372-11ef-a15d-0242ac14000c",
+ "description": "",
+ "method": "PATCH",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "projects"
+ }
+ ]
},
- {
- "in": "path",
- "name": "node_id",
- "required": true,
- "schema": {
- "title": "Node Id",
- "type": "str"
- },
- "response_value": "nodes"
- }
- ]
- },
- "request_payload": {
- "outputs": {
- "outFile": {
- "store": 0,
- "path": "api/c1dcde67-6434-31c3-95ee-bf5fe1e9422d/inputfile",
- "label": "inputfile",
- "eTag": null,
- "dataset": null
- }
- }
+ "request_payload": {
+ "name": "studies/e9f34992-436c-11ef-a15d-0242ac14000c/jobs/df0b67b6-4372-11ef-a15d-0242ac14000c"
+ },
+ "status_code": 204
},
- "status_code": 204
- },
- {
- "name": "POST /computations/df0b67b6-4372-11ef-a15d-0242ac14000c:start",
- "description": "",
- "method": "POST",
- "host": "webserver",
- "path": {
- "path": "/v0/computations/{project_id}:start",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "computations"
+ {
+ "name": "GET /projects/df0b67b6-4372-11ef-a15d-0242ac14000c/inputs",
+ "description": "",
+ "method": "GET",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects/{project_id}/inputs",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "projects"
+ }
+ ]
+ },
+ "response_body": {
+ "data": {}
}
- ]
},
- "request_payload": {},
- "response_body": {
- "data": {
- "pipeline_id": "df0b67b6-4372-11ef-a15d-0242ac14000c"
- }
+ {
+ "name": "PATCH /projects/df0b67b6-4372-11ef-a15d-0242ac14000c/nodes/cda9d480-d3ad-55c8-b9ce-c50eb1bab818/outputs",
+ "description": "",
+ "method": "PATCH",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects/{project_id}/nodes/{node_id}/outputs",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str"
+ },
+ "response_value": "projects"
+ },
+ {
+ "in": "path",
+ "name": "node_id",
+ "required": true,
+ "schema": {
+ "title": "Node Id",
+ "type": "str"
+ },
+ "response_value": "nodes"
+ }
+ ]
+ },
+ "request_payload": {
+ "outputs": {
+ "outFile": {
+ "store": 0,
+ "path": "api/c1dcde67-6434-31c3-95ee-bf5fe1e9422d/inputfile",
+ "label": "inputfile",
+ "eTag": null,
+ "dataset": null
+ }
+ }
+ },
+ "status_code": 204
},
- "status_code": 201
- },
- {
- "name": "GET /v2/computations/df0b67b6-4372-11ef-a15d-0242ac14000c",
- "description": "",
- "method": "GET",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "computations"
- }
- ]
+ {
+ "name": "POST /computations/df0b67b6-4372-11ef-a15d-0242ac14000c:start",
+ "description": "",
+ "method": "POST",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/computations/{project_id}:start",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "computations"
+ }
+ ]
+ },
+ "request_payload": {},
+ "response_body": {
+ "data": {
+ "pipeline_id": "df0b67b6-4372-11ef-a15d-0242ac14000c"
+ }
+ },
+ "status_code": 201
},
- "query": "user_id=1",
- "response_body": {
- "id": "df0b67b6-4372-11ef-a15d-0242ac14000c",
- "state": "STARTED",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "dd875b4f-7663-529f-bd7f-3716b19e28af": []
+ {
+ "name": "GET /v2/computations/df0b67b6-4372-11ef-a15d-0242ac14000c",
+ "description": "",
+ "method": "GET",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "computations"
+ }
+ ]
},
- "progress": 0.0,
- "node_states": {
- "dd875b4f-7663-529f-bd7f-3716b19e28af": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "STARTED",
- "progress": 0.0
- }
+ "query": "user_id=1",
+ "response_body": {
+ "id": "df0b67b6-4372-11ef-a15d-0242ac14000c",
+ "state": "STARTED",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "dd875b4f-7663-529f-bd7f-3716b19e28af": []
+ },
+ "progress": 0.0,
+ "node_states": {
+ "dd875b4f-7663-529f-bd7f-3716b19e28af": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "STARTED",
+ "progress": 0.0
+ }
+ }
+ },
+ "iteration": 1,
+ "started": "2024-07-16T12:56:57.553331+00:00",
+ "stopped": null,
+ "submitted": "2024-07-16T12:56:57.454372+00:00",
+ "url": "http://10.43.103.193:30009/v2/computations/df0b67b6-4372-11ef-a15d-0242ac14000c?user_id=1",
+ "stop_url": "http://10.43.103.193:30009/v2/computations/df0b67b6-4372-11ef-a15d-0242ac14000c:stop?user_id=1"
}
- },
- "iteration": 1,
- "cluster_id": 0,
- "started": "2024-07-16T12:56:57.553331+00:00",
- "stopped": null,
- "submitted": "2024-07-16T12:56:57.454372+00:00",
- "url": "http://10.43.103.193:30009/v2/computations/df0b67b6-4372-11ef-a15d-0242ac14000c?user_id=1",
- "stop_url": "http://10.43.103.193:30009/v2/computations/df0b67b6-4372-11ef-a15d-0242ac14000c:stop?user_id=1"
- }
- },
- {
- "name": "GET /projects/df0b67b6-4372-11ef-a15d-0242ac14000c/outputs",
- "description": "",
- "method": "GET",
- "host": "webserver",
- "path": {
- "path": "/v0/projects/{project_id}/outputs",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "projects"
- }
- ]
},
- "response_body": {
- "data": {
- "c784a033-36c7-558b-9cc5-448321de01f8": {
- "key": "c784a033-36c7-558b-9cc5-448321de01f8",
- "value": null,
- "label": "outputfile"
+ {
+ "name": "GET /projects/df0b67b6-4372-11ef-a15d-0242ac14000c/outputs",
+ "description": "",
+ "method": "GET",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects/{project_id}/outputs",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "projects"
+ }
+ ]
+ },
+ "response_body": {
+ "data": {
+ "c784a033-36c7-558b-9cc5-448321de01f8": {
+ "key": "c784a033-36c7-558b-9cc5-448321de01f8",
+ "value": null,
+ "label": "outputfile"
+ }
+ }
}
- }
}
- }
]
diff --git a/services/api-server/tests/mocks/run_study_workflow.json b/services/api-server/tests/mocks/run_study_workflow.json
index 56b92873e50..8078a8cc155 100644
--- a/services/api-server/tests/mocks/run_study_workflow.json
+++ b/services/api-server/tests/mocks/run_study_workflow.json
@@ -1,1676 +1,1666 @@
[
- {
- "name": "GET /projects/aeab71fe-f71b-11ee-8fca-0242ac140008/metadata/ports",
- "description": "",
- "method": "GET",
- "host": "webserver",
- "path": {
- "path": "/v0/projects/{project_id}/metadata/ports",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "projects"
+ {
+ "name": "GET /projects/aeab71fe-f71b-11ee-8fca-0242ac140008/metadata/ports",
+ "description": "",
+ "method": "GET",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects/{project_id}/metadata/ports",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "projects"
+ }
+ ]
+ },
+ "response_body": {
+ "data": [
+ {
+ "key": "0b8042c4-501a-4f9b-b2fa-17f860548b33",
+ "kind": "output",
+ "content_schema": null
+ },
+ {
+ "key": "c0f304e0-228b-413c-937a-2b1b060c9e02",
+ "kind": "input",
+ "content_schema": {
+ "title": "InputInt",
+ "type": "integer",
+ "description": "Produced integer value"
+ }
+ },
+ {
+ "key": "d9069bdb-35ae-4ec3-a05a-a42d7a7b0579",
+ "kind": "output",
+ "content_schema": {
+ "title": "OutputInt",
+ "type": "integer",
+ "description": "Captured integer value"
+ }
+ },
+ {
+ "key": "50fd6b01-bb5d-4136-a932-73676a461680",
+ "kind": "output",
+ "content_schema": {
+ "title": "OutputString",
+ "type": "string",
+ "description": "Captured string value"
+ }
+ },
+ {
+ "key": "38985050-7476-4534-8c79-839a928ea2a8",
+ "kind": "input",
+ "content_schema": {
+ "title": "InputString",
+ "type": "string",
+ "description": "Produced string value"
+ }
+ },
+ {
+ "key": "8815eab9-9bd5-4dda-a65c-3c14a423bfb3",
+ "kind": "input",
+ "content_schema": {
+ "title": "InputArray",
+ "type": "array",
+ "items": {
+ "type": "number"
+ }
+ }
+ },
+ {
+ "key": "22e7a091-2e4e-4e5a-93aa-c500457f5684",
+ "kind": "output",
+ "content_schema": {
+ "title": "OutputArray",
+ "type": "array",
+ "items": {
+ "type": "number"
+ }
+ }
+ },
+ {
+ "key": "04de3b6f-668d-4826-822f-c58370c037ed",
+ "kind": "input",
+ "content_schema": {
+ "title": "InputNumber",
+ "type": "number",
+ "description": "Produced number value"
+ }
+ },
+ {
+ "key": "b227b053-1207-4b48-b6ee-71a0ff24b014",
+ "kind": "output",
+ "content_schema": {
+ "title": "OutputNumber",
+ "type": "number",
+ "description": "Captured number value"
+ }
+ },
+ {
+ "key": "72d5daac-f728-4603-b49e-9a407e4aa079",
+ "kind": "input",
+ "content_schema": {
+ "title": "InputBool",
+ "type": "boolean",
+ "description": "Produced boolean value"
+ }
+ },
+ {
+ "key": "f85418d5-45d8-41eb-a1ac-4f14a63ec890",
+ "kind": "output",
+ "content_schema": {
+ "title": "OutputBool",
+ "type": "boolean",
+ "description": "Captured boolean value"
+ }
+ }
+ ]
}
- ]
},
- "response_body": {
- "data": [
- {
- "key": "0b8042c4-501a-4f9b-b2fa-17f860548b33",
- "kind": "output",
- "content_schema": null
- },
- {
- "key": "c0f304e0-228b-413c-937a-2b1b060c9e02",
- "kind": "input",
- "content_schema": {
- "title": "InputInt",
- "type": "integer",
- "description": "Produced integer value"
- }
- },
- {
- "key": "d9069bdb-35ae-4ec3-a05a-a42d7a7b0579",
- "kind": "output",
- "content_schema": {
- "title": "OutputInt",
- "type": "integer",
- "description": "Captured integer value"
- }
- },
- {
- "key": "50fd6b01-bb5d-4136-a932-73676a461680",
- "kind": "output",
- "content_schema": {
- "title": "OutputString",
- "type": "string",
- "description": "Captured string value"
- }
+ {
+ "name": "POST /projects",
+ "description": "",
+ "method": "POST",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects",
+ "path_parameters": []
},
- {
- "key": "38985050-7476-4534-8c79-839a928ea2a8",
- "kind": "input",
- "content_schema": {
- "title": "InputString",
- "type": "string",
- "description": "Produced string value"
- }
- },
- {
- "key": "8815eab9-9bd5-4dda-a65c-3c14a423bfb3",
- "kind": "input",
- "content_schema": {
- "title": "InputArray",
- "type": "array",
- "items": {
- "type": "number"
- }
- }
- },
- {
- "key": "22e7a091-2e4e-4e5a-93aa-c500457f5684",
- "kind": "output",
- "content_schema": {
- "title": "OutputArray",
- "type": "array",
- "items": {
- "type": "number"
+ "query": "from_study=aeab71fe-f71b-11ee-8fca-0242ac140008&hidden=true",
+ "response_body": {
+ "data": {
+ "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
+ "task_name": "POST /v0/projects?from_study=aeab71fe-f71b-11ee-8fca-0242ac140008&hidden=true",
+ "status_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
+ "result_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24/result",
+ "abort_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24"
}
- }
- },
- {
- "key": "04de3b6f-668d-4826-822f-c58370c037ed",
- "kind": "input",
- "content_schema": {
- "title": "InputNumber",
- "type": "number",
- "description": "Produced number value"
- }
- },
- {
- "key": "b227b053-1207-4b48-b6ee-71a0ff24b014",
- "kind": "output",
- "content_schema": {
- "title": "OutputNumber",
- "type": "number",
- "description": "Captured number value"
- }
- },
- {
- "key": "72d5daac-f728-4603-b49e-9a407e4aa079",
- "kind": "input",
- "content_schema": {
- "title": "InputBool",
- "type": "boolean",
- "description": "Produced boolean value"
- }
},
- {
- "key": "f85418d5-45d8-41eb-a1ac-4f14a63ec890",
- "kind": "output",
- "content_schema": {
- "title": "OutputBool",
- "type": "boolean",
- "description": "Captured boolean value"
- }
- }
- ]
- }
- },
- {
- "name": "POST /projects",
- "description": "",
- "method": "POST",
- "host": "webserver",
- "path": {
- "path": "/v0/projects",
- "path_parameters": []
- },
- "query": "from_study=aeab71fe-f71b-11ee-8fca-0242ac140008&hidden=true",
- "response_body": {
- "data": {
- "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
- "task_name": "POST /v0/projects?from_study=aeab71fe-f71b-11ee-8fca-0242ac140008&hidden=true",
- "status_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
- "result_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24/result",
- "abort_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24"
- }
+ "status_code": 202
},
- "status_code": 202
- },
- {
- "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
- "description": "",
- "method": "GET",
- "host": "webserver",
- "path": {
- "path": "/v0/tasks/{task_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "task_id",
- "required": true,
- "schema": {
- "title": "Task Id",
- "type": "str"
- },
- "response_value": "tasks"
- }
- ]
- },
- "response_body": {
- "data": {
- "task_progress": {
- "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
- "message": "inserted project new_project['uuid']='e19f9144-fb3f-11ee-b7b0-0242ac14001c' into the db",
- "percent": 0.0
+ {
+ "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
+ "description": "",
+ "method": "GET",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/tasks/{task_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "task_id",
+ "required": true,
+ "schema": {
+ "title": "Task Id",
+ "type": "str"
+ },
+ "response_value": "tasks"
+ }
+ ]
},
- "done": false,
- "started": "2024-04-15T15:50:28.173722"
- }
- }
- },
- {
- "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
- "description": "",
- "method": "GET",
- "host": "webserver",
- "path": {
- "path": "/v0/tasks/{task_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "task_id",
- "required": true,
- "schema": {
- "title": "Task Id",
- "type": "str"
- },
- "response_value": "tasks"
+ "response_body": {
+ "data": {
+ "task_progress": {
+ "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
+ "message": "inserted project new_project['uuid']='e19f9144-fb3f-11ee-b7b0-0242ac14001c' into the db",
+ "percent": 0.0
+ },
+ "done": false,
+ "started": "2024-04-15T15:50:28.173722"
+ }
}
- ]
},
- "response_body": {
- "data": {
- "task_progress": {
- "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
- "message": "Checking study access rights...",
- "percent": 0.0
+ {
+ "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
+ "description": "",
+ "method": "GET",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/tasks/{task_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "task_id",
+ "required": true,
+ "schema": {
+ "title": "Task Id",
+ "type": "str"
+ },
+ "response_value": "tasks"
+ }
+ ]
},
- "done": false,
- "started": "2024-04-15T15:50:28.173722"
- }
- }
- },
- {
- "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
- "description": "",
- "method": "GET",
- "host": "webserver",
- "path": {
- "path": "/v0/tasks/{task_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "task_id",
- "required": true,
- "schema": {
- "title": "Task Id",
- "type": "str"
- },
- "response_value": "tasks"
+ "response_body": {
+ "data": {
+ "task_progress": {
+ "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
+ "message": "Checking study access rights...",
+ "percent": 0.0
+ },
+ "done": false,
+ "started": "2024-04-15T15:50:28.173722"
+ }
}
- ]
},
- "response_body": {
- "data": {
- "task_progress": {
- "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
- "message": "updated network information in directorv2",
- "percent": 1.0
+ {
+ "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
+ "description": "",
+ "method": "GET",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/tasks/{task_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "task_id",
+ "required": true,
+ "schema": {
+ "title": "Task Id",
+ "type": "str"
+ },
+ "response_value": "tasks"
+ }
+ ]
},
- "done": false,
- "started": "2024-04-15T15:50:28.173722"
- }
- }
- },
- {
- "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
- "description": "",
- "method": "GET",
- "host": "webserver",
- "path": {
- "path": "/v0/tasks/{task_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "task_id",
- "required": true,
- "schema": {
- "title": "Task Id",
- "type": "str"
- },
- "response_value": "tasks"
+ "response_body": {
+ "data": {
+ "task_progress": {
+ "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
+ "message": "updated network information in directorv2",
+ "percent": 1.0
+ },
+ "done": false,
+ "started": "2024-04-15T15:50:28.173722"
+ }
}
- ]
},
- "response_body": {
- "data": {
- "task_progress": {
- "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
- "message": "finished",
- "percent": 1.0
+ {
+ "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
+ "description": "",
+ "method": "GET",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/tasks/{task_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "task_id",
+ "required": true,
+ "schema": {
+ "title": "Task Id",
+ "type": "str"
+ },
+ "response_value": "tasks"
+ }
+ ]
},
- "done": true,
- "started": "2024-04-15T15:50:28.173722"
- }
- }
- },
- {
- "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24/result",
- "description": "",
- "method": "GET",
- "host": "webserver",
- "path": {
- "path": "/v0/tasks/{task_id}/result",
- "path_parameters": [
- {
- "in": "path",
- "name": "task_id",
- "required": true,
- "schema": {
- "title": "Task Id",
- "type": "str"
- },
- "response_value": "tasks"
+ "response_body": {
+ "data": {
+ "task_progress": {
+ "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24",
+ "message": "finished",
+ "percent": 1.0
+ },
+ "done": true,
+ "started": "2024-04-15T15:50:28.173722"
+ }
}
- ]
},
- "response_body": {
- "data": {
- "uuid": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "name": "test (Copy)",
- "description": "",
- "thumbnail": "",
- "creationDate": "2024-04-15T15:50:28.196Z",
- "lastChangeDate": "2024-04-15T15:50:28.196Z",
- "workspaceId": 3,
- "folderId": 3,
- "trashedAt": null,
- "workbench": {
- "ab014072-a95f-5775-bb34-5582a13245a6": {
- "key": "simcore/services/frontend/iterator-consumer/probe/file",
- "version": "1.0.0",
- "label": "OutputFile",
- "thumbnail": null,
- "inputs": {
- "in_1": {
- "nodeUuid": "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca",
- "output": "outFile"
- }
- },
- "inputsUnits": {},
- "inputNodes": [
- "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca"
- ],
- "parent": null
- },
- "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca": {
- "key": "simcore/services/frontend/file-picker",
- "version": "1.0.0",
- "label": "InputFile",
- "thumbnail": null,
- "inputs": {},
- "inputsUnits": {},
- "inputNodes": [],
- "outputs": {}
- },
- "096acfb2-8c38-560a-91d3-8911f4334289": {
- "key": "simcore/services/frontend/parameter/integer",
- "version": "1.0.0",
- "label": "InputInt",
- "thumbnail": null,
- "runHash": null,
- "inputs": {},
- "inputsUnits": {},
- "inputNodes": [],
- "outputs": {
- "out_1": 1
- }
- },
- "3d4963ee-179f-5948-9086-dd9bef543f65": {
- "key": "simcore/services/frontend/iterator-consumer/probe/integer",
- "version": "1.0.0",
- "label": "OutputInt",
- "thumbnail": null,
- "inputs": {
- "in_1": {
- "nodeUuid": "096acfb2-8c38-560a-91d3-8911f4334289",
- "output": "out_1"
- }
- },
- "inputsUnits": {},
- "inputNodes": [
- "096acfb2-8c38-560a-91d3-8911f4334289"
- ]
- },
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
- "key": "simcore/services/comp/itis/sleeper",
- "version": "2.0.2",
- "label": "sleeper",
- "progress": 0.0,
- "thumbnail": null,
- "inputs": {
- "input_2": 2,
- "input_3": false
- },
- "inputsUnits": {},
- "inputNodes": [],
- "state": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "NOT_STARTED",
- "progress": null
- }
- },
- "2a9452ac-d210-5e11-a631-1d73454bfd91": {
- "key": "simcore/services/frontend/iterator-consumer/probe/string",
- "version": "1.0.0",
- "label": "OutputString",
- "thumbnail": null,
- "inputs": {
- "in_1": {
- "nodeUuid": "bcc36381-7377-533f-bb04-f785c0f8e2be",
- "output": "out_1"
- }
- },
- "inputsUnits": {},
- "inputNodes": [
- "bcc36381-7377-533f-bb04-f785c0f8e2be"
- ]
- },
- "bcc36381-7377-533f-bb04-f785c0f8e2be": {
- "key": "simcore/services/frontend/parameter/string",
- "version": "1.0.0",
- "label": "InputString",
- "thumbnail": null,
- "runHash": null,
- "inputs": {},
- "inputsUnits": {},
- "inputNodes": [],
- "outputs": {
- "out_1": "Foo"
- }
- },
- "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": {
- "key": "simcore/services/frontend/parameter/array",
- "version": "1.0.0",
- "label": "InputArray",
- "thumbnail": null,
- "runHash": null,
- "inputs": {},
- "inputsUnits": {},
- "inputNodes": [],
- "outputs": {
- "out_1": [
- 1
- ]
- }
- },
- "cb5bc33d-6635-5680-98e3-a6ac57f908f4": {
- "key": "simcore/services/frontend/iterator-consumer/probe/array",
- "version": "1.0.0",
- "label": "OutputArray",
- "thumbnail": null,
- "inputs": {
- "in_1": {
- "nodeUuid": "197ba9f7-d09c-5cf8-9290-284cd6c40fb3",
- "output": "out_1"
- }
- },
- "inputsUnits": {},
- "inputNodes": [
- "197ba9f7-d09c-5cf8-9290-284cd6c40fb3"
- ]
- },
- "d43949c5-5143-5738-bae9-7d231dcabe7f": {
- "key": "simcore/services/frontend/parameter/number",
- "version": "1.0.0",
- "label": "InputNumber",
- "thumbnail": null,
- "runHash": null,
- "inputs": {},
- "inputsUnits": {},
- "inputNodes": [],
- "outputs": {
- "out_1": 1
- }
- },
- "cd7eacb5-6806-5956-86c8-9b30ec588402": {
- "key": "simcore/services/frontend/iterator-consumer/probe/number",
- "version": "1.0.0",
- "label": "OutputNumber",
- "thumbnail": null,
- "inputs": {
- "in_1": {
- "nodeUuid": "d43949c5-5143-5738-bae9-7d231dcabe7f",
- "output": "out_1"
- }
- },
- "inputsUnits": {},
- "inputNodes": [
- "d43949c5-5143-5738-bae9-7d231dcabe7f"
+ {
+ "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24/result",
+ "description": "",
+ "method": "GET",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/tasks/{task_id}/result",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "task_id",
+ "required": true,
+ "schema": {
+ "title": "Task Id",
+ "type": "str"
+ },
+ "response_value": "tasks"
+ }
]
- },
- "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": {
- "key": "simcore/services/frontend/parameter/boolean",
- "version": "1.0.0",
- "label": "InputBool",
- "thumbnail": null,
- "runHash": null,
- "inputs": {},
- "inputsUnits": {},
- "inputNodes": [],
- "outputs": {
- "out_1": true
+ },
+ "response_body": {
+ "data": {
+ "uuid": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "name": "test (Copy)",
+ "description": "",
+ "thumbnail": "",
+ "creationDate": "2024-04-15T15:50:28.196Z",
+ "lastChangeDate": "2024-04-15T15:50:28.196Z",
+ "workspaceId": 3,
+ "folderId": 3,
+ "trashedAt": null,
+ "workbench": {
+ "ab014072-a95f-5775-bb34-5582a13245a6": {
+ "key": "simcore/services/frontend/iterator-consumer/probe/file",
+ "version": "1.0.0",
+ "label": "OutputFile",
+ "thumbnail": null,
+ "inputs": {
+ "in_1": {
+ "nodeUuid": "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca",
+ "output": "outFile"
+ }
+ },
+ "inputsUnits": {},
+ "inputNodes": [
+ "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca"
+ ],
+ "parent": null
+ },
+ "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca": {
+ "key": "simcore/services/frontend/file-picker",
+ "version": "1.0.0",
+ "label": "InputFile",
+ "thumbnail": null,
+ "inputs": {},
+ "inputsUnits": {},
+ "inputNodes": [],
+ "outputs": {}
+ },
+ "096acfb2-8c38-560a-91d3-8911f4334289": {
+ "key": "simcore/services/frontend/parameter/integer",
+ "version": "1.0.0",
+ "label": "InputInt",
+ "thumbnail": null,
+ "runHash": null,
+ "inputs": {},
+ "inputsUnits": {},
+ "inputNodes": [],
+ "outputs": {
+ "out_1": 1
+ }
+ },
+ "3d4963ee-179f-5948-9086-dd9bef543f65": {
+ "key": "simcore/services/frontend/iterator-consumer/probe/integer",
+ "version": "1.0.0",
+ "label": "OutputInt",
+ "thumbnail": null,
+ "inputs": {
+ "in_1": {
+ "nodeUuid": "096acfb2-8c38-560a-91d3-8911f4334289",
+ "output": "out_1"
+ }
+ },
+ "inputsUnits": {},
+ "inputNodes": [
+ "096acfb2-8c38-560a-91d3-8911f4334289"
+ ]
+ },
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
+ "key": "simcore/services/comp/itis/sleeper",
+ "version": "2.0.2",
+ "label": "sleeper",
+ "progress": 0.0,
+ "thumbnail": null,
+ "inputs": {
+ "input_2": 2,
+ "input_3": false
+ },
+ "inputsUnits": {},
+ "inputNodes": [],
+ "state": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "NOT_STARTED",
+ "progress": null
+ }
+ },
+ "2a9452ac-d210-5e11-a631-1d73454bfd91": {
+ "key": "simcore/services/frontend/iterator-consumer/probe/string",
+ "version": "1.0.0",
+ "label": "OutputString",
+ "thumbnail": null,
+ "inputs": {
+ "in_1": {
+ "nodeUuid": "bcc36381-7377-533f-bb04-f785c0f8e2be",
+ "output": "out_1"
+ }
+ },
+ "inputsUnits": {},
+ "inputNodes": [
+ "bcc36381-7377-533f-bb04-f785c0f8e2be"
+ ]
+ },
+ "bcc36381-7377-533f-bb04-f785c0f8e2be": {
+ "key": "simcore/services/frontend/parameter/string",
+ "version": "1.0.0",
+ "label": "InputString",
+ "thumbnail": null,
+ "runHash": null,
+ "inputs": {},
+ "inputsUnits": {},
+ "inputNodes": [],
+ "outputs": {
+ "out_1": "Foo"
+ }
+ },
+ "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": {
+ "key": "simcore/services/frontend/parameter/array",
+ "version": "1.0.0",
+ "label": "InputArray",
+ "thumbnail": null,
+ "runHash": null,
+ "inputs": {},
+ "inputsUnits": {},
+ "inputNodes": [],
+ "outputs": {
+ "out_1": [
+ 1
+ ]
+ }
+ },
+ "cb5bc33d-6635-5680-98e3-a6ac57f908f4": {
+ "key": "simcore/services/frontend/iterator-consumer/probe/array",
+ "version": "1.0.0",
+ "label": "OutputArray",
+ "thumbnail": null,
+ "inputs": {
+ "in_1": {
+ "nodeUuid": "197ba9f7-d09c-5cf8-9290-284cd6c40fb3",
+ "output": "out_1"
+ }
+ },
+ "inputsUnits": {},
+ "inputNodes": [
+ "197ba9f7-d09c-5cf8-9290-284cd6c40fb3"
+ ]
+ },
+ "d43949c5-5143-5738-bae9-7d231dcabe7f": {
+ "key": "simcore/services/frontend/parameter/number",
+ "version": "1.0.0",
+ "label": "InputNumber",
+ "thumbnail": null,
+ "runHash": null,
+ "inputs": {},
+ "inputsUnits": {},
+ "inputNodes": [],
+ "outputs": {
+ "out_1": 1
+ }
+ },
+ "cd7eacb5-6806-5956-86c8-9b30ec588402": {
+ "key": "simcore/services/frontend/iterator-consumer/probe/number",
+ "version": "1.0.0",
+ "label": "OutputNumber",
+ "thumbnail": null,
+ "inputs": {
+ "in_1": {
+ "nodeUuid": "d43949c5-5143-5738-bae9-7d231dcabe7f",
+ "output": "out_1"
+ }
+ },
+ "inputsUnits": {},
+ "inputNodes": [
+ "d43949c5-5143-5738-bae9-7d231dcabe7f"
+ ]
+ },
+ "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": {
+ "key": "simcore/services/frontend/parameter/boolean",
+ "version": "1.0.0",
+ "label": "InputBool",
+ "thumbnail": null,
+ "runHash": null,
+ "inputs": {},
+ "inputsUnits": {},
+ "inputNodes": [],
+ "outputs": {
+ "out_1": true
+ }
+ },
+ "efaaeabf-e4bc-5667-a757-d9b17ad606d9": {
+ "key": "simcore/services/frontend/iterator-consumer/probe/boolean",
+ "version": "1.0.0",
+ "label": "OutputBool",
+ "thumbnail": null,
+ "inputs": {
+ "in_1": {
+ "nodeUuid": "584e44d4-9a78-571f-a2a4-7d9c7b2396e3",
+ "output": "out_1"
+ }
+ },
+ "inputsUnits": {},
+ "inputNodes": [
+ "584e44d4-9a78-571f-a2a4-7d9c7b2396e3"
+ ]
+ }
+ },
+ "prjOwner": "harpercynthia@example.com",
+ "accessRights": {
+ "3": {
+ "read": true,
+ "write": true,
+ "delete": true
+ }
+ },
+ "tags": [],
+ "classifiers": [],
+ "state": {
+ "locked": {
+ "value": false,
+ "status": "CLOSED"
+ },
+ "state": {
+ "value": "NOT_STARTED"
+ }
+ },
+ "ui": {
+ "workbench": {
+ "096acfb2-8c38-560a-91d3-8911f4334289": {
+ "position": {
+ "x": 220,
+ "y": 40
+ }
+ },
+ "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": {
+ "position": {
+ "x": 240,
+ "y": 400
+ }
+ },
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
+ "position": {
+ "x": 820,
+ "y": 360
+ }
+ },
+ "2a9452ac-d210-5e11-a631-1d73454bfd91": {
+ "position": {
+ "x": 580,
+ "y": 200
+ }
+ },
+ "3d4963ee-179f-5948-9086-dd9bef543f65": {
+ "position": {
+ "x": 580,
+ "y": 40
+ }
+ },
+ "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": {
+ "position": {
+ "x": 278,
+ "y": 733
+ }
+ },
+ "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca": {
+ "position": {
+ "x": 200,
+ "y": 840
+ }
+ },
+ "ab014072-a95f-5775-bb34-5582a13245a6": {
+ "position": {
+ "x": 700,
+ "y": 840
+ }
+ },
+ "bcc36381-7377-533f-bb04-f785c0f8e2be": {
+ "position": {
+ "x": 220,
+ "y": 200
+ }
+ },
+ "cb5bc33d-6635-5680-98e3-a6ac57f908f4": {
+ "position": {
+ "x": 580,
+ "y": 420
+ }
+ },
+ "cd7eacb5-6806-5956-86c8-9b30ec588402": {
+ "position": {
+ "x": 562,
+ "y": 586
+ }
+ },
+ "d43949c5-5143-5738-bae9-7d231dcabe7f": {
+ "position": {
+ "x": 271,
+ "y": 621
+ }
+ },
+ "efaaeabf-e4bc-5667-a757-d9b17ad606d9": {
+ "position": {
+ "x": 656,
+ "y": 720
+ }
+ }
+ },
+ "slideshow": {},
+ "currentNodeId": "aeab71fe-f71b-11ee-8fca-0242ac140008",
+ "mode": "workbench"
+ },
+ "quality": {
+ "enabled": true,
+ "tsr_target": {
+ "r01": {
+ "level": 4,
+ "references": ""
+ },
+ "r02": {
+ "level": 4,
+ "references": ""
+ },
+ "r03": {
+ "level": 4,
+ "references": ""
+ },
+ "r04": {
+ "level": 4,
+ "references": ""
+ },
+ "r05": {
+ "level": 4,
+ "references": ""
+ },
+ "r06": {
+ "level": 4,
+ "references": ""
+ },
+ "r07": {
+ "level": 4,
+ "references": ""
+ },
+ "r08": {
+ "level": 4,
+ "references": ""
+ },
+ "r09": {
+ "level": 4,
+ "references": ""
+ },
+ "r10": {
+ "level": 4,
+ "references": ""
+ },
+ "r03b": {
+ "references": ""
+ },
+ "r03c": {
+ "references": ""
+ },
+ "r07b": {
+ "references": ""
+ },
+ "r07c": {
+ "references": ""
+ },
+ "r07d": {
+ "references": ""
+ },
+ "r07e": {
+ "references": ""
+ },
+ "r08b": {
+ "references": ""
+ },
+ "r10b": {
+ "references": ""
+ }
+ },
+ "tsr_current": {
+ "r01": {
+ "level": 0,
+ "references": ""
+ },
+ "r02": {
+ "level": 0,
+ "references": ""
+ },
+ "r03": {
+ "level": 0,
+ "references": ""
+ },
+ "r04": {
+ "level": 0,
+ "references": ""
+ },
+ "r05": {
+ "level": 0,
+ "references": ""
+ },
+ "r06": {
+ "level": 0,
+ "references": ""
+ },
+ "r07": {
+ "level": 0,
+ "references": ""
+ },
+ "r08": {
+ "level": 0,
+ "references": ""
+ },
+ "r09": {
+ "level": 0,
+ "references": ""
+ },
+ "r10": {
+ "level": 0,
+ "references": ""
+ },
+ "r03b": {
+ "references": ""
+ },
+ "r03c": {
+ "references": ""
+ },
+ "r07b": {
+ "references": ""
+ },
+ "r07c": {
+ "references": ""
+ },
+ "r07d": {
+ "references": ""
+ },
+ "r07e": {
+ "references": ""
+ },
+ "r08b": {
+ "references": ""
+ },
+ "r10b": {
+ "references": ""
+ }
+ }
+ },
+ "dev": {}
}
- },
- "efaaeabf-e4bc-5667-a757-d9b17ad606d9": {
- "key": "simcore/services/frontend/iterator-consumer/probe/boolean",
- "version": "1.0.0",
- "label": "OutputBool",
- "thumbnail": null,
- "inputs": {
- "in_1": {
- "nodeUuid": "584e44d4-9a78-571f-a2a4-7d9c7b2396e3",
- "output": "out_1"
- }
- },
- "inputsUnits": {},
- "inputNodes": [
- "584e44d4-9a78-571f-a2a4-7d9c7b2396e3"
- ]
- }
},
- "prjOwner": "harpercynthia@example.com",
- "accessRights": {
- "3": {
- "read": true,
- "write": true,
- "delete": true
- }
+ "status_code": 201
+ },
+ {
+ "name": "PATCH /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "description": "",
+ "method": "PATCH",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "projects"
+ }
+ ]
},
- "tags": [],
- "classifiers": [],
- "state": {
- "locked": {
- "value": false,
- "status": "CLOSED"
- },
- "state": {
- "value": "NOT_STARTED"
- }
+ "request_payload": {
+ "name": "posix"
},
- "ui": {
- "workbench": {
- "096acfb2-8c38-560a-91d3-8911f4334289": {
- "position": {
- "x": 220,
- "y": 40
- }
- },
- "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": {
- "position": {
- "x": 240,
- "y": 400
- }
- },
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
- "position": {
- "x": 820,
- "y": 360
- }
- },
- "2a9452ac-d210-5e11-a631-1d73454bfd91": {
- "position": {
- "x": 580,
- "y": 200
- }
- },
- "3d4963ee-179f-5948-9086-dd9bef543f65": {
- "position": {
- "x": 580,
- "y": 40
- }
- },
- "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": {
- "position": {
- "x": 278,
- "y": 733
- }
- },
- "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca": {
- "position": {
- "x": 200,
- "y": 840
- }
- },
- "ab014072-a95f-5775-bb34-5582a13245a6": {
- "position": {
- "x": 700,
- "y": 840
- }
- },
- "bcc36381-7377-533f-bb04-f785c0f8e2be": {
- "position": {
- "x": 220,
- "y": 200
- }
- },
- "cb5bc33d-6635-5680-98e3-a6ac57f908f4": {
- "position": {
- "x": 580,
- "y": 420
- }
- },
- "cd7eacb5-6806-5956-86c8-9b30ec588402": {
- "position": {
- "x": 562,
- "y": 586
- }
- },
- "d43949c5-5143-5738-bae9-7d231dcabe7f": {
- "position": {
- "x": 271,
- "y": 621
- }
- },
- "efaaeabf-e4bc-5667-a757-d9b17ad606d9": {
- "position": {
- "x": 656,
- "y": 720
- }
- }
- },
- "slideshow": {},
- "currentNodeId": "aeab71fe-f71b-11ee-8fca-0242ac140008",
- "mode": "workbench"
+ "status_code": 204
+ },
+ {
+ "name": "GET /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/inputs",
+ "description": "",
+ "method": "GET",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects/{project_id}/inputs",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "projects"
+ }
+ ]
},
- "quality": {
- "enabled": true,
- "tsr_target": {
- "r01": {
- "level": 4,
- "references": ""
- },
- "r02": {
- "level": 4,
- "references": ""
- },
- "r03": {
- "level": 4,
- "references": ""
- },
- "r04": {
- "level": 4,
- "references": ""
- },
- "r05": {
- "level": 4,
- "references": ""
- },
- "r06": {
- "level": 4,
- "references": ""
- },
- "r07": {
- "level": 4,
- "references": ""
- },
- "r08": {
- "level": 4,
- "references": ""
- },
- "r09": {
- "level": 4,
- "references": ""
- },
- "r10": {
- "level": 4,
- "references": ""
- },
- "r03b": {
- "references": ""
- },
- "r03c": {
- "references": ""
- },
- "r07b": {
- "references": ""
- },
- "r07c": {
- "references": ""
- },
- "r07d": {
- "references": ""
- },
- "r07e": {
- "references": ""
- },
- "r08b": {
- "references": ""
- },
- "r10b": {
- "references": ""
- }
- },
- "tsr_current": {
- "r01": {
- "level": 0,
- "references": ""
- },
- "r02": {
- "level": 0,
- "references": ""
- },
- "r03": {
- "level": 0,
- "references": ""
- },
- "r04": {
- "level": 0,
- "references": ""
- },
- "r05": {
- "level": 0,
- "references": ""
- },
- "r06": {
- "level": 0,
- "references": ""
- },
- "r07": {
- "level": 0,
- "references": ""
- },
- "r08": {
- "level": 0,
- "references": ""
- },
- "r09": {
- "level": 0,
- "references": ""
- },
- "r10": {
- "level": 0,
- "references": ""
- },
- "r03b": {
- "references": ""
- },
- "r03c": {
- "references": ""
- },
- "r07b": {
- "references": ""
- },
- "r07c": {
- "references": ""
- },
- "r07d": {
- "references": ""
- },
- "r07e": {
- "references": ""
- },
- "r08b": {
- "references": ""
- },
- "r10b": {
- "references": ""
+ "response_body": {
+ "data": {
+ "096acfb2-8c38-560a-91d3-8911f4334289": {
+ "key": "096acfb2-8c38-560a-91d3-8911f4334289",
+ "value": 1,
+ "label": "InputInt"
+ },
+ "bcc36381-7377-533f-bb04-f785c0f8e2be": {
+ "key": "bcc36381-7377-533f-bb04-f785c0f8e2be",
+ "value": "Foo",
+ "label": "InputString"
+ },
+ "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": {
+ "key": "197ba9f7-d09c-5cf8-9290-284cd6c40fb3",
+ "value": [
+ 1
+ ],
+ "label": "InputArray"
+ },
+ "d43949c5-5143-5738-bae9-7d231dcabe7f": {
+ "key": "d43949c5-5143-5738-bae9-7d231dcabe7f",
+ "value": 1,
+ "label": "InputNumber"
+ },
+ "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": {
+ "key": "584e44d4-9a78-571f-a2a4-7d9c7b2396e3",
+ "value": true,
+ "label": "InputBool"
+ }
}
- }
- },
- "dev": {}
- }
- },
- "status_code": 201
- },
- {
- "name": "PATCH /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "description": "",
- "method": "PATCH",
- "host": "webserver",
- "path": {
- "path": "/v0/projects/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "projects"
- }
- ]
- },
- "request_payload": {
- "name": "posix"
- },
- "status_code": 204
- },
- {
- "name": "GET /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/inputs",
- "description": "",
- "method": "GET",
- "host": "webserver",
- "path": {
- "path": "/v0/projects/{project_id}/inputs",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "projects"
}
- ]
},
- "response_body": {
- "data": {
- "096acfb2-8c38-560a-91d3-8911f4334289": {
- "key": "096acfb2-8c38-560a-91d3-8911f4334289",
- "value": 1,
- "label": "InputInt"
- },
- "bcc36381-7377-533f-bb04-f785c0f8e2be": {
- "key": "bcc36381-7377-533f-bb04-f785c0f8e2be",
- "value": "Foo",
- "label": "InputString"
+ {
+ "name": "PATCH /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/nodes/9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca/outputs",
+ "description": "",
+ "method": "PATCH",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects/{project_id}/nodes/{node_id}/outputs",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str"
+ },
+ "response_value": "projects"
+ },
+ {
+ "in": "path",
+ "name": "node_id",
+ "required": true,
+ "schema": {
+ "title": "Node Id",
+ "type": "str"
+ },
+ "response_value": "nodes"
+ }
+ ]
},
- "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": {
- "key": "197ba9f7-d09c-5cf8-9290-284cd6c40fb3",
- "value": [
- 1
- ],
- "label": "InputArray"
+ "request_payload": {
+ "outputs": {
+ "outFile": {
+ "store": 0,
+ "path": "api/d8bc0c02-c3ee-3cec-a562-e6fd3e00be4b/input.json",
+ "label": "input.json",
+ "eTag": "3f14fb3a8ba8d750f26bdaa402b2f6cc",
+ "dataset": null
+ }
+ }
},
- "d43949c5-5143-5738-bae9-7d231dcabe7f": {
- "key": "d43949c5-5143-5738-bae9-7d231dcabe7f",
- "value": 1,
- "label": "InputNumber"
+ "status_code": 204
+ },
+ {
+ "name": "PATCH /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/inputs",
+ "description": "",
+ "method": "PATCH",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects/{project_id}/inputs",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "projects"
+ }
+ ]
},
- "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": {
- "key": "584e44d4-9a78-571f-a2a4-7d9c7b2396e3",
- "value": true,
- "label": "InputBool"
- }
- }
- }
- },
- {
- "name": "PATCH /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/nodes/9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca/outputs",
- "description": "",
- "method": "PATCH",
- "host": "webserver",
- "path": {
- "path": "/v0/projects/{project_id}/nodes/{node_id}/outputs",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str"
- },
- "response_value": "projects"
+ "request_payload": {
+ "key": "value"
},
- {
- "in": "path",
- "name": "node_id",
- "required": true,
- "schema": {
- "title": "Node Id",
- "type": "str"
- },
- "response_value": "nodes"
- }
- ]
- },
- "request_payload": {
- "outputs": {
- "outFile": {
- "store": 0,
- "path": "api/d8bc0c02-c3ee-3cec-a562-e6fd3e00be4b/input.json",
- "label": "input.json",
- "eTag": "3f14fb3a8ba8d750f26bdaa402b2f6cc",
- "dataset": null
- }
- }
- },
- "status_code": 204
- },
- {
- "name": "PATCH /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/inputs",
- "description": "",
- "method": "PATCH",
- "host": "webserver",
- "path": {
- "path": "/v0/projects/{project_id}/inputs",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "projects"
+ "response_body": {
+ "data": {
+ "096acfb2-8c38-560a-91d3-8911f4334289": {
+ "key": "096acfb2-8c38-560a-91d3-8911f4334289",
+ "value": 42,
+ "label": "InputInt"
+ },
+ "bcc36381-7377-533f-bb04-f785c0f8e2be": {
+ "key": "bcc36381-7377-533f-bb04-f785c0f8e2be",
+ "value": "Z43",
+ "label": "InputString"
+ },
+ "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": {
+ "key": "197ba9f7-d09c-5cf8-9290-284cd6c40fb3",
+ "value": [
+ 1,
+ 2,
+ 3
+ ],
+ "label": "InputArray"
+ },
+ "d43949c5-5143-5738-bae9-7d231dcabe7f": {
+ "key": "d43949c5-5143-5738-bae9-7d231dcabe7f",
+ "value": 3.14,
+ "label": "InputNumber"
+ },
+ "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": {
+ "key": "584e44d4-9a78-571f-a2a4-7d9c7b2396e3",
+ "value": false,
+ "label": "InputBool"
+ }
+ }
}
- ]
- },
- "request_payload": {
- "key": "value"
},
- "response_body": {
- "data": {
- "096acfb2-8c38-560a-91d3-8911f4334289": {
- "key": "096acfb2-8c38-560a-91d3-8911f4334289",
- "value": 42,
- "label": "InputInt"
- },
- "bcc36381-7377-533f-bb04-f785c0f8e2be": {
- "key": "bcc36381-7377-533f-bb04-f785c0f8e2be",
- "value": "Z43",
- "label": "InputString"
+ {
+ "name": "POST /computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:start",
+ "description": "",
+ "method": "POST",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/computations/{project_id}:start",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "computations"
+ }
+ ]
},
- "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": {
- "key": "197ba9f7-d09c-5cf8-9290-284cd6c40fb3",
- "value": [
- 1,
- 2,
- 3
- ],
- "label": "InputArray"
+ "request_payload": {},
+ "response_body": {
+ "data": {
+ "pipeline_id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c"
+ }
},
- "d43949c5-5143-5738-bae9-7d231dcabe7f": {
- "key": "d43949c5-5143-5738-bae9-7d231dcabe7f",
- "value": 3.14,
- "label": "InputNumber"
+ "status_code": 201
+ },
+ {
+ "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "description": "",
+ "method": "GET",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "computations"
+ }
+ ]
},
- "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": {
- "key": "584e44d4-9a78-571f-a2a4-7d9c7b2396e3",
- "value": false,
- "label": "InputBool"
- }
- }
- }
- },
- {
- "name": "POST /computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:start",
- "description": "",
- "method": "POST",
- "host": "webserver",
- "path": {
- "path": "/v0/computations/{project_id}:start",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "computations"
+ "query": "user_id=1",
+ "response_body": {
+ "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "state": "STARTED",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ },
+ "progress": 0.05,
+ "node_states": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "STARTED",
+ "progress": 0.05
+ }
+ }
+ },
+ "iteration": 1,
+ "started": "2024-04-15T15:50:31.284124+00:00",
+ "stopped": null,
+ "submitted": "2024-04-15T15:50:31.162440+00:00",
+ "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
+ "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1"
}
- ]
- },
- "request_payload": {},
- "response_body": {
- "data": {
- "pipeline_id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c"
- }
},
- "status_code": 201
- },
- {
- "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "description": "",
- "method": "GET",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "computations"
+ {
+ "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "description": "",
+ "method": "GET",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "computations"
+ }
+ ]
+ },
+ "query": "user_id=1",
+ "response_body": {
+ "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "state": "STARTED",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ },
+ "progress": 0.05,
+ "node_states": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "STARTED",
+ "progress": 0.05
+ }
+ }
+ },
+ "iteration": 1,
+ "started": "2024-04-15T15:50:31.284124+00:00",
+ "stopped": null,
+ "submitted": "2024-04-15T15:50:31.162440+00:00",
+ "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
+ "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1"
}
- ]
},
- "query": "user_id=1",
- "response_body": {
- "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "state": "STARTED",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ {
+ "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "description": "",
+ "method": "GET",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "computations"
+ }
+ ]
},
- "progress": 0.05,
- "node_states": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "STARTED",
- "progress": 0.05
- }
- }
- },
- "iteration": 1,
- "cluster_id": 0,
- "started": "2024-04-15T15:50:31.284124+00:00",
- "stopped": null,
- "submitted": "2024-04-15T15:50:31.162440+00:00",
- "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
- "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1"
- }
- },
- {
- "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "description": "",
- "method": "GET",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "computations"
+ "query": "user_id=1",
+ "response_body": {
+ "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "state": "STARTED",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ },
+ "progress": 0.05,
+ "node_states": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "STARTED",
+ "progress": 0.05
+ }
+ }
+ },
+ "iteration": 1,
+ "started": "2024-04-15T15:50:31.284124+00:00",
+ "stopped": null,
+ "submitted": "2024-04-15T15:50:31.162440+00:00",
+ "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
+ "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1"
}
- ]
},
- "query": "user_id=1",
- "response_body": {
- "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "state": "STARTED",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ {
+ "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "description": "",
+ "method": "GET",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "computations"
+ }
+ ]
},
- "progress": 0.05,
- "node_states": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "STARTED",
- "progress": 0.05
- }
- }
- },
- "iteration": 1,
- "cluster_id": 0,
- "started": "2024-04-15T15:50:31.284124+00:00",
- "stopped": null,
- "submitted": "2024-04-15T15:50:31.162440+00:00",
- "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
- "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1"
- }
- },
- {
- "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "description": "",
- "method": "GET",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "computations"
+ "query": "user_id=1",
+ "response_body": {
+ "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "state": "STARTED",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ },
+ "progress": 0.05,
+ "node_states": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "STARTED",
+ "progress": 0.05
+ }
+ }
+ },
+ "iteration": 1,
+ "started": "2024-04-15T15:50:31.284124+00:00",
+ "stopped": null,
+ "submitted": "2024-04-15T15:50:31.162440+00:00",
+ "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
+ "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1"
}
- ]
},
- "query": "user_id=1",
- "response_body": {
- "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "state": "STARTED",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ {
+ "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "description": "",
+ "method": "GET",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "computations"
+ }
+ ]
},
- "progress": 0.05,
- "node_states": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "STARTED",
- "progress": 0.05
- }
- }
- },
- "iteration": 1,
- "cluster_id": 0,
- "started": "2024-04-15T15:50:31.284124+00:00",
- "stopped": null,
- "submitted": "2024-04-15T15:50:31.162440+00:00",
- "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
- "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1"
- }
- },
- {
- "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "description": "",
- "method": "GET",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "computations"
+ "query": "user_id=1",
+ "response_body": {
+ "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "state": "STARTED",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ },
+ "progress": 0.05,
+ "node_states": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "STARTED",
+ "progress": 0.05
+ }
+ }
+ },
+ "iteration": 1,
+ "started": "2024-04-15T15:50:31.284124+00:00",
+ "stopped": null,
+ "submitted": "2024-04-15T15:50:31.162440+00:00",
+ "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
+ "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1"
}
- ]
},
- "query": "user_id=1",
- "response_body": {
- "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "state": "STARTED",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ {
+ "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "description": "",
+ "method": "GET",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "computations"
+ }
+ ]
},
- "progress": 0.05,
- "node_states": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "STARTED",
- "progress": 0.05
- }
- }
- },
- "iteration": 1,
- "cluster_id": 0,
- "started": "2024-04-15T15:50:31.284124+00:00",
- "stopped": null,
- "submitted": "2024-04-15T15:50:31.162440+00:00",
- "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
- "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1"
- }
- },
- {
- "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "description": "",
- "method": "GET",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "computations"
+ "query": "user_id=1",
+ "response_body": {
+ "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "state": "STARTED",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ },
+ "progress": 0.05,
+ "node_states": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "STARTED",
+ "progress": 0.05
+ }
+ }
+ },
+ "iteration": 1,
+ "started": "2024-04-15T15:50:31.284124+00:00",
+ "stopped": null,
+ "submitted": "2024-04-15T15:50:31.162440+00:00",
+ "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
+ "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1"
}
- ]
},
- "query": "user_id=1",
- "response_body": {
- "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "state": "STARTED",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ {
+ "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "description": "",
+ "method": "GET",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "computations"
+ }
+ ]
},
- "progress": 0.05,
- "node_states": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "STARTED",
- "progress": 0.05
- }
- }
- },
- "iteration": 1,
- "cluster_id": 0,
- "started": "2024-04-15T15:50:31.284124+00:00",
- "stopped": null,
- "submitted": "2024-04-15T15:50:31.162440+00:00",
- "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
- "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1"
- }
- },
- {
- "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "description": "",
- "method": "GET",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "computations"
+ "query": "user_id=1",
+ "response_body": {
+ "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "state": "STARTED",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ },
+ "progress": 0.05,
+ "node_states": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "STARTED",
+ "progress": 0.05
+ }
+ }
+ },
+ "iteration": 1,
+ "started": "2024-04-15T15:50:31.284124+00:00",
+ "stopped": null,
+ "submitted": "2024-04-15T15:50:31.162440+00:00",
+ "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
+ "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1"
}
- ]
},
- "query": "user_id=1",
- "response_body": {
- "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "state": "STARTED",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ {
+ "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "description": "",
+ "method": "GET",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "computations"
+ }
+ ]
},
- "progress": 0.05,
- "node_states": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "STARTED",
- "progress": 0.05
- }
- }
- },
- "iteration": 1,
- "cluster_id": 0,
- "started": "2024-04-15T15:50:31.284124+00:00",
- "stopped": null,
- "submitted": "2024-04-15T15:50:31.162440+00:00",
- "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
- "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1"
- }
- },
- {
- "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "description": "",
- "method": "GET",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "computations"
+ "query": "user_id=1",
+ "response_body": {
+ "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "state": "STARTED",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ },
+ "progress": 0.05,
+ "node_states": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "STARTED",
+ "progress": 0.05
+ }
+ }
+ },
+ "iteration": 1,
+ "started": "2024-04-15T15:50:31.284124+00:00",
+ "stopped": null,
+ "submitted": "2024-04-15T15:50:31.162440+00:00",
+ "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
+ "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1"
}
- ]
},
- "query": "user_id=1",
- "response_body": {
- "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "state": "STARTED",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ {
+ "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "description": "",
+ "method": "GET",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "computations"
+ }
+ ]
},
- "progress": 0.05,
- "node_states": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "STARTED",
- "progress": 0.05
- }
- }
- },
- "iteration": 1,
- "cluster_id": 0,
- "started": "2024-04-15T15:50:31.284124+00:00",
- "stopped": null,
- "submitted": "2024-04-15T15:50:31.162440+00:00",
- "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
- "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1"
- }
- },
- {
- "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "description": "",
- "method": "GET",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "computations"
+ "query": "user_id=1",
+ "response_body": {
+ "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "state": "SUCCESS",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ },
+ "progress": 1.0,
+ "node_states": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
+ "modified": false,
+ "dependencies": [],
+ "currentStatus": "SUCCESS",
+ "progress": 1.0
+ }
+ }
+ },
+ "iteration": 1,
+ "started": "2024-04-15T15:50:31.284124+00:00",
+ "stopped": "2024-04-15T15:50:37.747356+00:00",
+ "submitted": "2024-04-15T15:50:31.162440+00:00",
+ "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
+ "stop_url": null
}
- ]
},
- "query": "user_id=1",
- "response_body": {
- "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "state": "STARTED",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ {
+ "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "description": "",
+ "method": "GET",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "computations"
+ }
+ ]
},
- "progress": 0.05,
- "node_states": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "STARTED",
- "progress": 0.05
- }
- }
- },
- "iteration": 1,
- "cluster_id": 0,
- "started": "2024-04-15T15:50:31.284124+00:00",
- "stopped": null,
- "submitted": "2024-04-15T15:50:31.162440+00:00",
- "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
- "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1"
- }
- },
- {
- "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "description": "",
- "method": "GET",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "computations"
+ "query": "user_id=1",
+ "response_body": {
+ "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "state": "SUCCESS",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ },
+ "progress": 1.0,
+ "node_states": {
+ "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
+ "modified": false,
+ "dependencies": [],
+ "currentStatus": "SUCCESS",
+ "progress": 1.0
+ }
+ }
+ },
+ "iteration": 1,
+ "started": "2024-04-15T15:50:31.284124+00:00",
+ "stopped": "2024-04-15T15:50:37.747356+00:00",
+ "submitted": "2024-04-15T15:50:31.162440+00:00",
+ "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
+ "stop_url": null
}
- ]
},
- "query": "user_id=1",
- "response_body": {
- "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "state": "SUCCESS",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ {
+ "name": "GET /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/outputs",
+ "description": "",
+ "method": "GET",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects/{project_id}/outputs",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "projects"
+ }
+ ]
},
- "progress": 1.0,
- "node_states": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
- "modified": false,
- "dependencies": [],
- "currentStatus": "SUCCESS",
- "progress": 1.0
- }
- }
- },
- "iteration": 1,
- "cluster_id": 0,
- "started": "2024-04-15T15:50:31.284124+00:00",
- "stopped": "2024-04-15T15:50:37.747356+00:00",
- "submitted": "2024-04-15T15:50:31.162440+00:00",
- "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
- "stop_url": null
- }
- },
- {
- "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "description": "",
- "method": "GET",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "computations"
+ "response_body": {
+ "data": {
+ "ab014072-a95f-5775-bb34-5582a13245a6": {
+ "key": "ab014072-a95f-5775-bb34-5582a13245a6",
+ "value": {
+ "store": 0,
+ "path": "api/d8bc0c02-c3ee-3cec-a562-e6fd3e00be4b/input.json",
+ "label": "input.json",
+ "eTag": "3f14fb3a8ba8d750f26bdaa402b2f6cc",
+ "dataset": null
+ },
+ "label": "OutputFile"
+ },
+ "3d4963ee-179f-5948-9086-dd9bef543f65": {
+ "key": "3d4963ee-179f-5948-9086-dd9bef543f65",
+ "value": 42,
+ "label": "OutputInt"
+ },
+ "2a9452ac-d210-5e11-a631-1d73454bfd91": {
+ "key": "2a9452ac-d210-5e11-a631-1d73454bfd91",
+ "value": "Z43",
+ "label": "OutputString"
+ },
+ "cb5bc33d-6635-5680-98e3-a6ac57f908f4": {
+ "key": "cb5bc33d-6635-5680-98e3-a6ac57f908f4",
+ "value": [
+ 1,
+ 2,
+ 3
+ ],
+ "label": "OutputArray"
+ },
+ "cd7eacb5-6806-5956-86c8-9b30ec588402": {
+ "key": "cd7eacb5-6806-5956-86c8-9b30ec588402",
+ "value": 3.14,
+ "label": "OutputNumber"
+ },
+ "efaaeabf-e4bc-5667-a757-d9b17ad606d9": {
+ "key": "efaaeabf-e4bc-5667-a757-d9b17ad606d9",
+ "value": false,
+ "label": "OutputBool"
+ }
+ }
}
- ]
},
- "query": "user_id=1",
- "response_body": {
- "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "state": "SUCCESS",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": []
+ {
+ "name": "POST /simcore-s3/files/metadata:search_owned",
+ "description": "",
+ "method": "POST",
+ "host": "storage",
+ "path": {
+ "path": "/v0/simcore-s3/files/metadata:search_owned",
+ "path_parameters": []
},
- "progress": 1.0,
- "node_states": {
- "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": {
- "modified": false,
- "dependencies": [],
- "currentStatus": "SUCCESS",
- "progress": 1.0
- }
+ "query": "user_id=1&startswith=api%2F5b0cd3cd-5ceb-3d74-9961-246840c1e1d4",
+ "response_body": {
+ "data": []
}
- },
- "iteration": 1,
- "cluster_id": 0,
- "started": "2024-04-15T15:50:31.284124+00:00",
- "stopped": "2024-04-15T15:50:37.747356+00:00",
- "submitted": "2024-04-15T15:50:31.162440+00:00",
- "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1",
- "stop_url": null
- }
- },
- {
- "name": "GET /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/outputs",
- "description": "",
- "method": "GET",
- "host": "webserver",
- "path": {
- "path": "/v0/projects/{project_id}/outputs",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "projects"
- }
- ]
},
- "response_body": {
- "data": {
- "ab014072-a95f-5775-bb34-5582a13245a6": {
- "key": "ab014072-a95f-5775-bb34-5582a13245a6",
- "value": {
- "store": 0,
- "path": "api/d8bc0c02-c3ee-3cec-a562-e6fd3e00be4b/input.json",
- "label": "input.json",
- "eTag": "3f14fb3a8ba8d750f26bdaa402b2f6cc",
- "dataset": null
- },
- "label": "OutputFile"
- },
- "3d4963ee-179f-5948-9086-dd9bef543f65": {
- "key": "3d4963ee-179f-5948-9086-dd9bef543f65",
- "value": 42,
- "label": "OutputInt"
- },
- "2a9452ac-d210-5e11-a631-1d73454bfd91": {
- "key": "2a9452ac-d210-5e11-a631-1d73454bfd91",
- "value": "Z43",
- "label": "OutputString"
- },
- "cb5bc33d-6635-5680-98e3-a6ac57f908f4": {
- "key": "cb5bc33d-6635-5680-98e3-a6ac57f908f4",
- "value": [
- 1,
- 2,
- 3
- ],
- "label": "OutputArray"
+ {
+ "name": "POST /files/api%2Fd8bc0c02-c3ee-3cec-a562-e6fd3e00be4b%2Finput.json:soft-copy",
+ "description": "",
+ "method": "POST",
+ "host": "storage",
+ "path": {
+ "path": "/v0/files/{file_id}:soft-copy",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "file_id",
+ "required": true,
+ "schema": {
+ "title": "File Id",
+ "anyOf": [
+ {
+ "type": "str",
+ "pattern": "^(api|([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}))\\/([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12})\\/(.+)$"
+ },
+ {
+ "type": "str",
+ "pattern": "^N:package:[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$"
+ }
+ ]
+ },
+ "response_value": "files"
+ }
+ ]
},
- "cd7eacb5-6806-5956-86c8-9b30ec588402": {
- "key": "cd7eacb5-6806-5956-86c8-9b30ec588402",
- "value": 3.14,
- "label": "OutputNumber"
+ "query": "user_id=1",
+ "request_payload": {
+ "link_id": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json"
},
- "efaaeabf-e4bc-5667-a757-d9b17ad606d9": {
- "key": "efaaeabf-e4bc-5667-a757-d9b17ad606d9",
- "value": false,
- "label": "OutputBool"
+ "response_body": {
+ "data": {
+ "file_uuid": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json",
+ "location_id": 0,
+ "project_name": null,
+ "node_name": null,
+ "file_name": "input.json",
+ "file_id": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json",
+ "created_at": "2024-04-15T15:50:27.134729",
+ "last_modified": "2024-04-15T15:50:27+00:00",
+ "file_size": 9,
+ "entity_tag": "3f14fb3a8ba8d750f26bdaa402b2f6cc",
+ "is_soft_link": true,
+ "is_directory": false,
+ "sha256_checksum": "fd3bb7e4cc5098e8040cd35fe3346628693097fbf7d05477d0b2845b20b4a4fd"
+ },
+ "error": null
}
- }
- }
- },
- {
- "name": "POST /simcore-s3/files/metadata:search_owned",
- "description": "",
- "method": "POST",
- "host": "storage",
- "path": {
- "path": "/v0/simcore-s3/files/metadata:search_owned",
- "path_parameters": []
},
- "query": "user_id=1&startswith=api%2F5b0cd3cd-5ceb-3d74-9961-246840c1e1d4",
- "response_body": {
- "data": []
- }
- },
- {
- "name": "POST /files/api%2Fd8bc0c02-c3ee-3cec-a562-e6fd3e00be4b%2Finput.json:soft-copy",
- "description": "",
- "method": "POST",
- "host": "storage",
- "path": {
- "path": "/v0/files/{file_id}:soft-copy",
- "path_parameters": [
- {
- "in": "path",
- "name": "file_id",
- "required": true,
- "schema": {
- "title": "File Id",
- "anyOf": [
- {
- "type": "str",
- "pattern": "^(api|([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}))\\/([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12})\\/(.+)$"
- },
- {
- "type": "str",
- "pattern": "^N:package:[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$"
- }
+ {
+ "name": "POST /simcore-s3/files/metadata:search",
+ "description": "",
+ "method": "POST",
+ "host": "storage",
+ "path": {
+ "path": "/v0/simcore-s3/files/metadata:search",
+ "path_parameters": []
+ },
+ "query": "user_id=1&startswith=api%2F5b0cd3cd-5ceb-3d74-9961-246840c1e1d4&access_right=read",
+ "response_body": {
+ "data": [
+ {
+ "file_uuid": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json",
+ "location_id": 0,
+ "project_name": null,
+ "node_name": null,
+ "file_name": "input.json",
+ "file_id": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json",
+ "created_at": "2024-04-15T15:50:27.134729",
+ "last_modified": "2024-04-15T15:50:27+00:00",
+ "file_size": 9,
+ "entity_tag": "3f14fb3a8ba8d750f26bdaa402b2f6cc",
+ "is_soft_link": true,
+ "is_directory": false,
+ "sha256_checksum": "fd3bb7e4cc5098e8040cd35fe3346628693097fbf7d05477d0b2845b20b4a4fd"
+ }
]
- },
- "response_value": "files"
}
- ]
- },
- "query": "user_id=1",
- "request_payload": {
- "link_id": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json"
- },
- "response_body": {
- "data": {
- "file_uuid": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json",
- "location_id": 0,
- "project_name": null,
- "node_name": null,
- "file_name": "input.json",
- "file_id": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json",
- "created_at": "2024-04-15T15:50:27.134729",
- "last_modified": "2024-04-15T15:50:27+00:00",
- "file_size": 9,
- "entity_tag": "3f14fb3a8ba8d750f26bdaa402b2f6cc",
- "is_soft_link": true,
- "is_directory": false,
- "sha256_checksum": "fd3bb7e4cc5098e8040cd35fe3346628693097fbf7d05477d0b2845b20b4a4fd"
- },
- "error": null
- }
- },
- {
- "name": "POST /simcore-s3/files/metadata:search",
- "description": "",
- "method": "POST",
- "host": "storage",
- "path": {
- "path": "/v0/simcore-s3/files/metadata:search",
- "path_parameters": []
},
- "query": "user_id=1&startswith=api%2F5b0cd3cd-5ceb-3d74-9961-246840c1e1d4&access_right=read",
- "response_body": {
- "data": [
- {
- "file_uuid": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json",
- "location_id": 0,
- "project_name": null,
- "node_name": null,
- "file_name": "input.json",
- "file_id": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json",
- "created_at": "2024-04-15T15:50:27.134729",
- "last_modified": "2024-04-15T15:50:27+00:00",
- "file_size": 9,
- "entity_tag": "3f14fb3a8ba8d750f26bdaa402b2f6cc",
- "is_soft_link": true,
- "is_directory": false,
- "sha256_checksum": "fd3bb7e4cc5098e8040cd35fe3346628693097fbf7d05477d0b2845b20b4a4fd"
- }
- ]
- }
- },
- {
- "name": "GET /locations/0/files/api%2F5b0cd3cd-5ceb-3d74-9961-246840c1e1d4%2Finput.json",
- "description": "",
- "method": "GET",
- "host": "storage",
- "path": {
- "path": "/v0/locations/{location_id}/files/{file_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "file_id",
- "required": true,
- "schema": {
- "title": "File Id",
- "anyOf": [
- {
- "type": "str",
- "pattern": "^(api|([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}))\\/([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12})\\/(.+)$"
- },
- {
- "type": "str",
- "pattern": "^N:package:[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$"
- }
+ {
+ "name": "GET /locations/0/files/api%2F5b0cd3cd-5ceb-3d74-9961-246840c1e1d4%2Finput.json",
+ "description": "",
+ "method": "GET",
+ "host": "storage",
+ "path": {
+ "path": "/v0/locations/{location_id}/files/{file_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "file_id",
+ "required": true,
+ "schema": {
+ "title": "File Id",
+ "anyOf": [
+ {
+ "type": "str",
+ "pattern": "^(api|([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}))\\/([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12})\\/(.+)$"
+ },
+ {
+ "type": "str",
+ "pattern": "^N:package:[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$"
+ }
+ ]
+ },
+ "response_value": "files"
+ },
+ {
+ "in": "path",
+ "name": "location_id",
+ "required": true,
+ "schema": {
+ "title": "Location Id",
+ "type": "int"
+ },
+ "response_value": "locations"
+ }
]
- },
- "response_value": "files"
},
- {
- "in": "path",
- "name": "location_id",
- "required": true,
- "schema": {
- "title": "Location Id",
- "type": "int"
- },
- "response_value": "locations"
+ "query": "user_id=1",
+ "response_body": {
+ "data": {
+ "link": "http://127.0.0.1:9001/simcore/api/d8bc0c02-c3ee-3cec-a562-e6fd3e00be4b/input.json?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=12345678%2F20240415%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240415T155039Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=79a5cbc9b23ebb4084f4156acd6f7e6f891197dbd5a088327c9131768bd1c610"
+ }
}
- ]
},
- "query": "user_id=1",
- "response_body": {
- "data": {
- "link": "http://127.0.0.1:9001/simcore/api/d8bc0c02-c3ee-3cec-a562-e6fd3e00be4b/input.json?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=12345678%2F20240415%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240415T155039Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=79a5cbc9b23ebb4084f4156acd6f7e6f891197dbd5a088327c9131768bd1c610"
- }
+ {
+ "name": "DELETE /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
+ "description": "",
+ "method": "DELETE",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "projects"
+ }
+ ]
+ },
+ "status_code": 204
}
- },
- {
- "name": "DELETE /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c",
- "description": "",
- "method": "DELETE",
- "host": "webserver",
- "path": {
- "path": "/v0/projects/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "projects"
- }
- ]
- },
- "status_code": 204
- }
]
diff --git a/services/api-server/tests/mocks/start_job_no_payment.json b/services/api-server/tests/mocks/start_job_no_payment.json
index fb20632634a..15b1e3b92b4 100644
--- a/services/api-server/tests/mocks/start_job_no_payment.json
+++ b/services/api-server/tests/mocks/start_job_no_payment.json
@@ -1,99 +1,97 @@
[
- {
- "name": "POST /computations/48323c7f-e379-4e16-8b58-dc69643f653d:start",
- "description": "",
- "method": "POST",
- "host": "webserver",
- "path": {
- "path": "/v0/computations/{project_id}:start",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "computations"
- }
- ]
- },
- "query": null,
- "request_payload": {
- "force_restart": false,
- "cluster_id": 0,
- "subgraph": []
- },
- "response_body": {
- "data": {
- "pipeline_id": "48323c7f-e379-4e16-8b58-dc69643f653d"
- }
- },
- "status_code": 201
- },
- {
- "name": "GET /v2/computations/48323c7f-e379-4e16-8b58-dc69643f653d",
- "description": "",
- "method": "GET",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "computations"
- }
- ]
- },
- "query": "user_id=1",
- "request_payload": null,
- "response_body": {
- "id": "48323c7f-e379-4e16-8b58-dc69643f653d",
- "state": "STARTED",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": []
+ {
+ "name": "POST /computations/48323c7f-e379-4e16-8b58-dc69643f653d:start",
+ "description": "",
+ "method": "POST",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/computations/{project_id}:start",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "computations"
+ }
+ ]
},
- "progress": 0.0,
- "node_states": {
- "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "STARTED",
- "progress": 0.0
- }
- }
- },
- "iteration": 1,
- "cluster_id": 0,
- "started": "2023-10-26T14:19:05.389765+00:00",
- "stopped": null,
- "submitted": "2023-10-26T14:19:05.241935+00:00",
- "url": "http://director-v2:8000/v2/computations/48323c7f-e379-4e16-8b58-dc69643f653d?user_id=1",
- "stop_url": "http://director-v2:8000/v2/computations/48323c7f-e379-4e16-8b58-dc69643f653d:stop?user_id=1"
+ "query": null,
+ "request_payload": {
+ "force_restart": false,
+ "subgraph": []
+ },
+ "response_body": {
+ "data": {
+ "pipeline_id": "48323c7f-e379-4e16-8b58-dc69643f653d"
+ }
+ },
+ "status_code": 201
},
- "status_code": 200
- }
+ {
+ "name": "GET /v2/computations/48323c7f-e379-4e16-8b58-dc69643f653d",
+ "description": "",
+ "method": "GET",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "computations"
+ }
+ ]
+ },
+ "query": "user_id=1",
+ "request_payload": null,
+ "response_body": {
+ "id": "48323c7f-e379-4e16-8b58-dc69643f653d",
+ "state": "STARTED",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": []
+ },
+ "progress": 0.0,
+ "node_states": {
+ "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "STARTED",
+ "progress": 0.0
+ }
+ }
+ },
+ "iteration": 1,
+ "started": "2023-10-26T14:19:05.389765+00:00",
+ "stopped": null,
+ "submitted": "2023-10-26T14:19:05.241935+00:00",
+ "url": "http://director-v2:8000/v2/computations/48323c7f-e379-4e16-8b58-dc69643f653d?user_id=1",
+ "stop_url": "http://director-v2:8000/v2/computations/48323c7f-e379-4e16-8b58-dc69643f653d:stop?user_id=1"
+ },
+ "status_code": 200
+ }
]
diff --git a/services/api-server/tests/mocks/start_job_not_enough_credit.json b/services/api-server/tests/mocks/start_job_not_enough_credit.json
index 2167313c683..19f54e53ca6 100644
--- a/services/api-server/tests/mocks/start_job_not_enough_credit.json
+++ b/services/api-server/tests/mocks/start_job_not_enough_credit.json
@@ -1,242 +1,241 @@
[
- {
- "name": "GET /projects/48323c7f-e379-4e16-8b58-dc69643f653d",
- "description": "",
- "method": "GET",
- "host": "webserver",
- "path": {
- "path": "/v0/projects/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "projects"
- }
- ]
- },
- "query": null,
- "request_payload": null,
- "response_body": {
- "data": {
- "uuid": "48323c7f-e379-4e16-8b58-dc69643f653d",
- "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/48323c7f-e379-4e16-8b58-dc69643f653d",
- "description": "Study associated to solver job:\n{\n \"id\": \"48323c7f-e379-4e16-8b58-dc69643f653d\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/48323c7f-e379-4e16-8b58-dc69643f653d\",\n \"inputs_checksum\": \"015ba4cd5cf00c511a8217deb65c242e3b15dc6ae4b1ecf94982d693887d9e8a\",\n \"created_at\": \"2023-10-26T14:13:07.998632+00:00\"\n}",
- "thumbnail": "https://via.placeholder.com/170x120.png",
- "creationDate": "2023-10-26T14:13:08.013Z",
- "lastChangeDate": "2023-10-26T14:13:08.013Z",
- "workspaceId": 3,
- "folderId": 2,
- "trashedAt": null,
- "workbench": {
- "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": {
- "key": "simcore/services/comp/itis/sleeper",
- "version": "2.0.2",
- "label": "sleeper",
- "progress": 0.0,
- "inputs": {
- "x": 4.33,
- "n": 55,
- "title": "Temperature",
- "enabled": true,
- "input_file": {
- "store": 0,
- "path": "api/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt",
- "label": "input.txt"
- }
- },
- "inputsUnits": {},
- "inputNodes": [],
- "outputs": {},
- "state": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "NOT_STARTED",
- "progress": null
- }
- }
- },
- "prjOwner": "austin66@example.org",
- "accessRights": {
- "3": {
- "read": true,
- "write": true,
- "delete": true
- }
+ {
+ "name": "GET /projects/48323c7f-e379-4e16-8b58-dc69643f653d",
+ "description": "",
+ "method": "GET",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "projects"
+ }
+ ]
},
- "tags": [],
- "classifiers": [],
- "state": {
- "locked": {
- "value": false,
- "status": "CLOSED"
- },
- "state": {
- "value": "NOT_STARTED"
- }
- },
- "ui": {
- "workbench": {
- "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": {
- "position": {
- "x": 633,
- "y": 229
- }
+ "query": null,
+ "request_payload": null,
+ "response_body": {
+ "data": {
+ "uuid": "48323c7f-e379-4e16-8b58-dc69643f653d",
+ "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/48323c7f-e379-4e16-8b58-dc69643f653d",
+ "description": "Study associated to solver job:\n{\n \"id\": \"48323c7f-e379-4e16-8b58-dc69643f653d\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/48323c7f-e379-4e16-8b58-dc69643f653d\",\n \"inputs_checksum\": \"015ba4cd5cf00c511a8217deb65c242e3b15dc6ae4b1ecf94982d693887d9e8a\",\n \"created_at\": \"2023-10-26T14:13:07.998632+00:00\"\n}",
+ "thumbnail": "https://via.placeholder.com/170x120.png",
+ "creationDate": "2023-10-26T14:13:08.013Z",
+ "lastChangeDate": "2023-10-26T14:13:08.013Z",
+ "workspaceId": 3,
+ "folderId": 2,
+ "trashedAt": null,
+ "workbench": {
+ "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": {
+ "key": "simcore/services/comp/itis/sleeper",
+ "version": "2.0.2",
+ "label": "sleeper",
+ "progress": 0.0,
+ "inputs": {
+ "x": 4.33,
+ "n": 55,
+ "title": "Temperature",
+ "enabled": true,
+ "input_file": {
+ "store": 0,
+ "path": "api/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt",
+ "label": "input.txt"
+ }
+ },
+ "inputsUnits": {},
+ "inputNodes": [],
+ "outputs": {},
+ "state": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "NOT_STARTED",
+ "progress": null
+ }
+ }
+ },
+ "prjOwner": "austin66@example.org",
+ "accessRights": {
+ "3": {
+ "read": true,
+ "write": true,
+ "delete": true
+ }
+ },
+ "tags": [],
+ "classifiers": [],
+ "state": {
+ "locked": {
+ "value": false,
+ "status": "CLOSED"
+ },
+ "state": {
+ "value": "NOT_STARTED"
+ }
+ },
+ "ui": {
+ "workbench": {
+ "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": {
+ "position": {
+ "x": 633,
+ "y": 229
+ }
+ }
+ },
+ "slideshow": {},
+ "currentNodeId": "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24",
+ "annotations": {}
+ },
+ "quality": {},
+ "dev": {}
}
- },
- "slideshow": {},
- "currentNodeId": "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24",
- "annotations": {}
},
- "quality": {},
- "dev": {}
- }
+ "status_code": 200
},
- "status_code": 200
- },
- {
- "name": "PUT /projects/48323c7f-e379-4e16-8b58-dc69643f653d/nodes/3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24/pricing-plan/1/pricing-unit/1",
- "description": "",
- "method": "PUT",
- "host": "webserver",
- "path": {
- "path": "/v0/projects/{project_id}/nodes/{node_id}/pricing-plan/{pricing_plan_id}/pricing-unit/{pricing_unit_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "pricing_plan_id",
- "required": true,
- "schema": {
- "title": "Pricing Plan Id",
- "type": "int",
- "pattern": null,
- "format": null,
- "exclusiveMinimum": true,
- "minimum": 0,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "pricing-plan"
+ {
+ "name": "PUT /projects/48323c7f-e379-4e16-8b58-dc69643f653d/nodes/3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24/pricing-plan/1/pricing-unit/1",
+ "description": "",
+ "method": "PUT",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects/{project_id}/nodes/{node_id}/pricing-plan/{pricing_plan_id}/pricing-unit/{pricing_unit_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "pricing_plan_id",
+ "required": true,
+ "schema": {
+ "title": "Pricing Plan Id",
+ "type": "int",
+ "pattern": null,
+ "format": null,
+ "exclusiveMinimum": true,
+ "minimum": 0,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "pricing-plan"
+ },
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "projects"
+ },
+ {
+ "in": "path",
+ "name": "pricing_unit_id",
+ "required": true,
+ "schema": {
+ "title": "Pricing Unit Id",
+ "type": "int",
+ "pattern": null,
+ "format": null,
+ "exclusiveMinimum": true,
+ "minimum": 0,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "pricing-unit"
+ },
+ {
+ "in": "path",
+ "name": "node_id",
+ "required": true,
+ "schema": {
+ "title": "Node Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "nodes"
+ }
+ ]
},
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "projects"
+ "query": null,
+ "request_payload": null,
+ "response_body": null,
+ "status_code": 204
+ },
+ {
+ "name": "POST /computations/48323c7f-e379-4e16-8b58-dc69643f653d:start",
+ "description": "",
+ "method": "POST",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/computations/{project_id}:start",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "computations"
+ }
+ ]
},
- {
- "in": "path",
- "name": "pricing_unit_id",
- "required": true,
- "schema": {
- "title": "Pricing Unit Id",
- "type": "int",
- "pattern": null,
- "format": null,
- "exclusiveMinimum": true,
- "minimum": 0,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "pricing-unit"
+ "query": null,
+ "request_payload": {
+ "force_restart": false,
+ "subgraph": []
},
- {
- "in": "path",
- "name": "node_id",
- "required": true,
- "schema": {
- "title": "Node Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "nodes"
- }
- ]
- },
- "query": null,
- "request_payload": null,
- "response_body": null,
- "status_code": 204
- },
- {
- "name": "POST /computations/48323c7f-e379-4e16-8b58-dc69643f653d:start",
- "description": "",
- "method": "POST",
- "host": "webserver",
- "path": {
- "path": "/v0/computations/{project_id}:start",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "computations"
- }
- ]
- },
- "query": null,
- "request_payload": {
- "force_restart": false,
- "cluster_id": 0,
- "subgraph": []
- },
- "response_body": {
- "data": null,
- "error": {
- "logs": [],
- "errors": [
- {
- "code": "WalletNotEnoughCreditsError",
- "message": "Wallet does not have enough credits. Wallet 1 credit balance -200.11",
- "resource": null,
- "field": null
- }
- ],
- "status": 402,
- "message": "Unexpected client error"
- }
- },
- "status_code": 402
- }
+ "response_body": {
+ "data": null,
+ "error": {
+ "logs": [],
+ "errors": [
+ {
+ "code": "WalletNotEnoughCreditsError",
+ "message": "Wallet does not have enough credits. Wallet 1 credit balance -200.11",
+ "resource": null,
+ "field": null
+ }
+ ],
+ "status": 402,
+ "message": "Unexpected client error"
+ }
+ },
+ "status_code": 402
+ }
]
diff --git a/services/api-server/tests/mocks/start_job_with_payment.json b/services/api-server/tests/mocks/start_job_with_payment.json
index 1a7a829cf11..ac3aed74ecb 100644
--- a/services/api-server/tests/mocks/start_job_with_payment.json
+++ b/services/api-server/tests/mocks/start_job_with_payment.json
@@ -1,288 +1,286 @@
[
- {
- "name": "GET /projects/e551e994-a68d-4c26-b6fc-59019b35ee6e",
- "description": "",
- "method": "GET",
- "host": "webserver",
- "path": {
- "path": "/v0/projects/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "projects"
- }
- ]
- },
- "query": null,
- "request_payload": null,
- "response_body": {
- "data": {
- "uuid": "e551e994-a68d-4c26-b6fc-59019b35ee6e",
- "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/e551e994-a68d-4c26-b6fc-59019b35ee6e",
- "description": "Study associated to solver job:\n{\n \"id\": \"e551e994-a68d-4c26-b6fc-59019b35ee6e\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/e551e994-a68d-4c26-b6fc-59019b35ee6e\",\n \"inputs_checksum\": \"015ba4cd5cf00c511a8217deb65c242e3b15dc6ae4b1ecf94982d693887d9e8a\",\n \"created_at\": \"2023-10-26T14:10:11.103041+00:00\"\n}",
- "thumbnail": "https://via.placeholder.com/170x120.png",
- "creationDate": "2023-10-26T14:10:11.118Z",
- "lastChangeDate": "2023-10-26T14:10:11.118Z",
- "workspaceId": 12,
- "folderId": 2,
- "trashedAt": null,
- "workbench": {
- "657b124c-0697-5166-b820-a2ea2704ae84": {
- "key": "simcore/services/comp/itis/sleeper",
- "version": "2.0.2",
- "label": "sleeper",
- "progress": 0.0,
- "inputs": {
- "x": 4.33,
- "n": 55,
- "title": "Temperature",
- "enabled": true,
- "input_file": {
- "store": 0,
- "path": "api/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt",
- "label": "input.txt"
- }
- },
- "inputsUnits": {},
- "inputNodes": [],
- "outputs": {},
- "state": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "NOT_STARTED",
- "progress": null
- }
- }
- },
- "prjOwner": "freemanryan@example.net",
- "accessRights": {
- "3": {
- "read": true,
- "write": true,
- "delete": true
- }
+ {
+ "name": "GET /projects/e551e994-a68d-4c26-b6fc-59019b35ee6e",
+ "description": "",
+ "method": "GET",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "projects"
+ }
+ ]
},
- "tags": [],
- "classifiers": [],
- "state": {
- "locked": {
- "value": false,
- "status": "CLOSED"
- },
- "state": {
- "value": "NOT_STARTED"
- }
- },
- "ui": {
- "workbench": {
- "657b124c-0697-5166-b820-a2ea2704ae84": {
- "position": {
- "x": 633,
- "y": 229
- }
+ "query": null,
+ "request_payload": null,
+ "response_body": {
+ "data": {
+ "uuid": "e551e994-a68d-4c26-b6fc-59019b35ee6e",
+ "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/e551e994-a68d-4c26-b6fc-59019b35ee6e",
+ "description": "Study associated to solver job:\n{\n \"id\": \"e551e994-a68d-4c26-b6fc-59019b35ee6e\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/e551e994-a68d-4c26-b6fc-59019b35ee6e\",\n \"inputs_checksum\": \"015ba4cd5cf00c511a8217deb65c242e3b15dc6ae4b1ecf94982d693887d9e8a\",\n \"created_at\": \"2023-10-26T14:10:11.103041+00:00\"\n}",
+ "thumbnail": "https://via.placeholder.com/170x120.png",
+ "creationDate": "2023-10-26T14:10:11.118Z",
+ "lastChangeDate": "2023-10-26T14:10:11.118Z",
+ "workspaceId": 12,
+ "folderId": 2,
+ "trashedAt": null,
+ "workbench": {
+ "657b124c-0697-5166-b820-a2ea2704ae84": {
+ "key": "simcore/services/comp/itis/sleeper",
+ "version": "2.0.2",
+ "label": "sleeper",
+ "progress": 0.0,
+ "inputs": {
+ "x": 4.33,
+ "n": 55,
+ "title": "Temperature",
+ "enabled": true,
+ "input_file": {
+ "store": 0,
+ "path": "api/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt",
+ "label": "input.txt"
+ }
+ },
+ "inputsUnits": {},
+ "inputNodes": [],
+ "outputs": {},
+ "state": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "NOT_STARTED",
+ "progress": null
+ }
+ }
+ },
+ "prjOwner": "freemanryan@example.net",
+ "accessRights": {
+ "3": {
+ "read": true,
+ "write": true,
+ "delete": true
+ }
+ },
+ "tags": [],
+ "classifiers": [],
+ "state": {
+ "locked": {
+ "value": false,
+ "status": "CLOSED"
+ },
+ "state": {
+ "value": "NOT_STARTED"
+ }
+ },
+ "ui": {
+ "workbench": {
+ "657b124c-0697-5166-b820-a2ea2704ae84": {
+ "position": {
+ "x": 633,
+ "y": 229
+ }
+ }
+ },
+ "slideshow": {},
+ "currentNodeId": "657b124c-0697-5166-b820-a2ea2704ae84",
+ "annotations": {}
+ },
+ "quality": {},
+ "dev": {}
}
- },
- "slideshow": {},
- "currentNodeId": "657b124c-0697-5166-b820-a2ea2704ae84",
- "annotations": {}
},
- "quality": {},
- "dev": {}
- }
+ "status_code": 200
},
- "status_code": 200
- },
- {
- "name": "PUT /projects/e551e994-a68d-4c26-b6fc-59019b35ee6e/nodes/657b124c-0697-5166-b820-a2ea2704ae84/pricing-plan/1/pricing-unit/1",
- "description": "",
- "method": "PUT",
- "host": "webserver",
- "path": {
- "path": "/v0/projects/{project_id}/nodes/{node_id}/pricing-plan/{pricing_plan_id}/pricing-unit/{pricing_unit_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "pricing_plan_id",
- "required": true,
- "schema": {
- "title": "Pricing Plan Id",
- "type": "int",
- "pattern": null,
- "format": null,
- "exclusiveMinimum": true,
- "minimum": 0,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "pricing-plan"
+ {
+ "name": "PUT /projects/e551e994-a68d-4c26-b6fc-59019b35ee6e/nodes/657b124c-0697-5166-b820-a2ea2704ae84/pricing-plan/1/pricing-unit/1",
+ "description": "",
+ "method": "PUT",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects/{project_id}/nodes/{node_id}/pricing-plan/{pricing_plan_id}/pricing-unit/{pricing_unit_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "pricing_plan_id",
+ "required": true,
+ "schema": {
+ "title": "Pricing Plan Id",
+ "type": "int",
+ "pattern": null,
+ "format": null,
+ "exclusiveMinimum": true,
+ "minimum": 0,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "pricing-plan"
+ },
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "projects"
+ },
+ {
+ "in": "path",
+ "name": "pricing_unit_id",
+ "required": true,
+ "schema": {
+ "title": "Pricing Unit Id",
+ "type": "int",
+ "pattern": null,
+ "format": null,
+ "exclusiveMinimum": true,
+ "minimum": 0,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "pricing-unit"
+ },
+ {
+ "in": "path",
+ "name": "node_id",
+ "required": true,
+ "schema": {
+ "title": "Node Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "nodes"
+ }
+ ]
},
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "projects"
+ "query": null,
+ "request_payload": null,
+ "response_body": null,
+ "status_code": 204
+ },
+ {
+ "name": "POST /computations/e551e994-a68d-4c26-b6fc-59019b35ee6e:start",
+ "description": "",
+ "method": "POST",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/computations/{project_id}:start",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "computations"
+ }
+ ]
},
- {
- "in": "path",
- "name": "pricing_unit_id",
- "required": true,
- "schema": {
- "title": "Pricing Unit Id",
- "type": "int",
- "pattern": null,
- "format": null,
- "exclusiveMinimum": true,
- "minimum": 0,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "pricing-unit"
+ "query": null,
+ "request_payload": {
+ "force_restart": false,
+ "subgraph": []
},
- {
- "in": "path",
- "name": "node_id",
- "required": true,
- "schema": {
- "title": "Node Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "nodes"
- }
- ]
- },
- "query": null,
- "request_payload": null,
- "response_body": null,
- "status_code": 204
- },
- {
- "name": "POST /computations/e551e994-a68d-4c26-b6fc-59019b35ee6e:start",
- "description": "",
- "method": "POST",
- "host": "webserver",
- "path": {
- "path": "/v0/computations/{project_id}:start",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "computations"
- }
- ]
- },
- "query": null,
- "request_payload": {
- "force_restart": false,
- "cluster_id": 0,
- "subgraph": []
- },
- "response_body": {
- "data": {
- "pipeline_id": "e551e994-a68d-4c26-b6fc-59019b35ee6e"
- }
- },
- "status_code": 201
- },
- {
- "name": "GET /v2/computations/e551e994-a68d-4c26-b6fc-59019b35ee6e",
- "description": "",
- "method": "GET",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "computations"
- }
- ]
- },
- "query": "user_id=1",
- "request_payload": null,
- "response_body": {
- "id": "e551e994-a68d-4c26-b6fc-59019b35ee6e",
- "state": "STARTED",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "657b124c-0697-5166-b820-a2ea2704ae84": []
+ "response_body": {
+ "data": {
+ "pipeline_id": "e551e994-a68d-4c26-b6fc-59019b35ee6e"
+ }
},
- "progress": 0.0,
- "node_states": {
- "657b124c-0697-5166-b820-a2ea2704ae84": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "STARTED",
- "progress": 0.0
- }
- }
- },
- "iteration": 1,
- "cluster_id": 0,
- "started": "2023-10-26T14:11:20.606448+00:00",
- "stopped": null,
- "submitted": "2023-10-26T14:11:20.460760+00:00",
- "url": "http://director-v2:8000/v2/computations/e551e994-a68d-4c26-b6fc-59019b35ee6e?user_id=1",
- "stop_url": "http://director-v2:8000/v2/computations/e551e994-a68d-4c26-b6fc-59019b35ee6e:stop?user_id=1"
+ "status_code": 201
},
- "status_code": 200
- }
+ {
+ "name": "GET /v2/computations/e551e994-a68d-4c26-b6fc-59019b35ee6e",
+ "description": "",
+ "method": "GET",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "computations"
+ }
+ ]
+ },
+ "query": "user_id=1",
+ "request_payload": null,
+ "response_body": {
+ "id": "e551e994-a68d-4c26-b6fc-59019b35ee6e",
+ "state": "STARTED",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "657b124c-0697-5166-b820-a2ea2704ae84": []
+ },
+ "progress": 0.0,
+ "node_states": {
+ "657b124c-0697-5166-b820-a2ea2704ae84": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "STARTED",
+ "progress": 0.0
+ }
+ }
+ },
+ "iteration": 1,
+ "started": "2023-10-26T14:11:20.606448+00:00",
+ "stopped": null,
+ "submitted": "2023-10-26T14:11:20.460760+00:00",
+ "url": "http://director-v2:8000/v2/computations/e551e994-a68d-4c26-b6fc-59019b35ee6e?user_id=1",
+ "stop_url": "http://director-v2:8000/v2/computations/e551e994-a68d-4c26-b6fc-59019b35ee6e:stop?user_id=1"
+ },
+ "status_code": 200
+ }
]
diff --git a/services/api-server/tests/mocks/start_solver_job.json b/services/api-server/tests/mocks/start_solver_job.json
index 6c54ff0a058..f779cd45b9d 100644
--- a/services/api-server/tests/mocks/start_solver_job.json
+++ b/services/api-server/tests/mocks/start_solver_job.json
@@ -1,80 +1,79 @@
[
- {
- "name": "POST /computations/b9faf8d8-4928-4e50-af40-3690712c5481:start",
- "description": "",
- "method": "POST",
- "host": "webserver",
- "path": {
- "path": "/v0/computations/{project_id}:start",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "computations"
- }
- ]
- },
- "request_payload": {},
- "response_body": {
- "data": {
- "pipeline_id": "b9faf8d8-4928-4e50-af40-3690712c5481"
- }
- },
- "status_code": 409
- },
- {
- "name": "GET /v2/computations/b9faf8d8-4928-4e50-af40-3690712c5481",
- "description": "",
- "method": "GET",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "format": "uuid"
- },
- "response_value": "computations"
- }
- ]
+ {
+ "name": "POST /computations/b9faf8d8-4928-4e50-af40-3690712c5481:start",
+ "description": "",
+ "method": "POST",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/computations/{project_id}:start",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "computations"
+ }
+ ]
+ },
+ "request_payload": {},
+ "response_body": {
+ "data": {
+ "pipeline_id": "b9faf8d8-4928-4e50-af40-3690712c5481"
+ }
+ },
+ "status_code": 409
},
- "query": "user_id=1",
- "response_body": {
- "id": "b9faf8d8-4928-4e50-af40-3690712c5481",
- "state": "STARTED",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "d3a3c1e6-3d89-5e7a-af22-0f3ffcedef3d": []
+ {
+ "name": "GET /v2/computations/b9faf8d8-4928-4e50-af40-3690712c5481",
+ "description": "",
+ "method": "GET",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "format": "uuid"
+ },
+ "response_value": "computations"
+ }
+ ]
},
- "progress": 0.05,
- "node_states": {
- "d3a3c1e6-3d89-5e7a-af22-0f3ffcedef3d": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "STARTED",
- "progress": 0.05
- }
+ "query": "user_id=1",
+ "response_body": {
+ "id": "b9faf8d8-4928-4e50-af40-3690712c5481",
+ "state": "STARTED",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "d3a3c1e6-3d89-5e7a-af22-0f3ffcedef3d": []
+ },
+ "progress": 0.05,
+ "node_states": {
+ "d3a3c1e6-3d89-5e7a-af22-0f3ffcedef3d": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "STARTED",
+ "progress": 0.05
+ }
+ }
+ },
+ "iteration": 2,
+ "started": "2024-06-18T20:33:46.482456+00:00",
+ "stopped": "2024-06-18T20:31:25.399647+00:00",
+ "submitted": "2024-06-18T20:33:46.384524+00:00",
+ "url": "http://director-v2/v2/computations/b9faf8d8-4928-4e50-af40-3690712c5481?user_id=1",
+ "stop_url": "http://director-v2/v2/computations/b9faf8d8-4928-4e50-af40-3690712c5481:stop?user_id=1"
}
- },
- "iteration": 2,
- "cluster_id": 0,
- "started": "2024-06-18T20:33:46.482456+00:00",
- "stopped": "2024-06-18T20:31:25.399647+00:00",
- "submitted": "2024-06-18T20:33:46.384524+00:00",
- "url": "http://director-v2/v2/computations/b9faf8d8-4928-4e50-af40-3690712c5481?user_id=1",
- "stop_url": "http://director-v2/v2/computations/b9faf8d8-4928-4e50-af40-3690712c5481:stop?user_id=1"
}
- }
]
diff --git a/services/api-server/tests/mocks/stop_job.json b/services/api-server/tests/mocks/stop_job.json
index e840e1b5cca..f6574562dbf 100644
--- a/services/api-server/tests/mocks/stop_job.json
+++ b/services/api-server/tests/mocks/stop_job.json
@@ -1,118 +1,116 @@
[
- {
- "name": "POST /v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc:stop",
- "description": "",
- "method": "POST",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}:stop",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "computations"
- }
- ]
- },
- "query": null,
- "request_payload": {
- "user_id": 1
- },
- "response_body": {
- "id": "4989fa99-b567-43bd-978a-68c2b95fdabc",
- "state": "NOT_STARTED",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": []
+ {
+ "name": "POST /v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc:stop",
+ "description": "",
+ "method": "POST",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}:stop",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "computations"
+ }
+ ]
},
- "progress": 0.0,
- "node_states": {
- "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "NOT_STARTED",
- "progress": null
- }
- }
- },
- "iteration": null,
- "cluster_id": null,
- "started": null,
- "stopped": null,
- "submitted": "2023-11-17T13:04:59.327557+00:00",
- "url": "http://director-v2:8000/v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc:stop",
- "stop_url": null
- },
- "status_code": 202
- },
- {
- "name": "GET /v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc",
- "description": "",
- "method": "GET",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "computations"
- }
- ]
- },
- "query": "user_id=1",
- "request_payload": null,
- "response_body": {
- "id": "4989fa99-b567-43bd-978a-68c2b95fdabc",
- "state": "NOT_STARTED",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": []
+ "query": null,
+ "request_payload": {
+ "user_id": 1
},
- "progress": 0.0,
- "node_states": {
- "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "NOT_STARTED",
- "progress": null
- }
- }
- },
- "iteration": null,
- "cluster_id": null,
- "started": null,
- "stopped": null,
- "submitted": "2023-11-17T13:04:59.327557+00:00",
- "url": "http://director-v2:8000/v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc?user_id=1",
- "stop_url": null
+ "response_body": {
+ "id": "4989fa99-b567-43bd-978a-68c2b95fdabc",
+ "state": "NOT_STARTED",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": []
+ },
+ "progress": 0.0,
+ "node_states": {
+ "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "NOT_STARTED",
+ "progress": null
+ }
+ }
+ },
+ "iteration": null,
+ "started": null,
+ "stopped": null,
+ "submitted": "2023-11-17T13:04:59.327557+00:00",
+ "url": "http://director-v2:8000/v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc:stop",
+ "stop_url": null
+ },
+ "status_code": 202
},
- "status_code": 200
- }
+ {
+ "name": "GET /v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc",
+ "description": "",
+ "method": "GET",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "computations"
+ }
+ ]
+ },
+ "query": "user_id=1",
+ "request_payload": null,
+ "response_body": {
+ "id": "4989fa99-b567-43bd-978a-68c2b95fdabc",
+ "state": "NOT_STARTED",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": []
+ },
+ "progress": 0.0,
+ "node_states": {
+ "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "NOT_STARTED",
+ "progress": null
+ }
+ }
+ },
+ "iteration": null,
+ "started": null,
+ "stopped": null,
+ "submitted": "2023-11-17T13:04:59.327557+00:00",
+ "url": "http://director-v2:8000/v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc?user_id=1",
+ "stop_url": null
+ },
+ "status_code": 200
+ }
]
diff --git a/services/api-server/tests/mocks/study_job_start_stop_delete.json b/services/api-server/tests/mocks/study_job_start_stop_delete.json
index 823f2639334..d279e1dc240 100644
--- a/services/api-server/tests/mocks/study_job_start_stop_delete.json
+++ b/services/api-server/tests/mocks/study_job_start_stop_delete.json
@@ -1,243 +1,240 @@
[
- {
- "name": "POST /computations/10da03f0-f1bc-11ee-9e42-0242ac140012:start",
- "description": "",
- "method": "POST",
- "host": "webserver",
- "path": {
- "path": "/v0/computations/{project_id}:start",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "computations"
- }
- ]
- },
- "query": null,
- "request_payload": {},
- "response_body": {
- "data": {
- "pipeline_id": "10da03f0-f1bc-11ee-9e42-0242ac140012"
- }
- },
- "status_code": 201
- },
- {
- "name": "GET /v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012",
- "description": "",
- "method": "GET",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "computations"
- }
- ]
- },
- "query": "user_id=1",
- "request_payload": null,
- "response_body": {
- "id": "10da03f0-f1bc-11ee-9e42-0242ac140012",
- "state": "PENDING",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": []
+ {
+ "name": "POST /computations/10da03f0-f1bc-11ee-9e42-0242ac140012:start",
+ "description": "",
+ "method": "POST",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/computations/{project_id}:start",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "computations"
+ }
+ ]
},
- "progress": 0.0,
- "node_states": {
- "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": {
- "modified": true,
- "dependencies": [],
- "currentStatus": "PENDING",
- "progress": null
- }
- }
- },
- "iteration": 1,
- "cluster_id": 0,
- "started": null,
- "stopped": null,
- "submitted": "2024-04-03T13:15:00.045631+00:00",
- "url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012?user_id=1",
- "stop_url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012:stop?user_id=1"
- },
- "status_code": 200
- },
- {
- "name": "POST /v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012:stop",
- "description": "",
- "method": "POST",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}:stop",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "computations"
- }
- ]
- },
- "query": null,
- "request_payload": {
- "user_id": 1
- },
- "response_body": {
- "id": "10da03f0-f1bc-11ee-9e42-0242ac140012",
- "state": "SUCCESS",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": []
+ "query": null,
+ "request_payload": {},
+ "response_body": {
+ "data": {
+ "pipeline_id": "10da03f0-f1bc-11ee-9e42-0242ac140012"
+ }
},
- "progress": 1.0,
- "node_states": {
- "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": {
- "modified": false,
- "dependencies": [],
- "currentStatus": "SUCCESS",
- "progress": 1.0
- }
- }
- },
- "iteration": 1,
- "cluster_id": 0,
- "started": "2024-04-03T13:15:00.425270+00:00",
- "stopped": "2024-04-03T13:15:08.997076+00:00",
- "submitted": "2024-04-03T13:15:00.045631+00:00",
- "url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012:stop",
- "stop_url": null
+ "status_code": 201
},
- "status_code": 202
- },
- {
- "name": "GET /v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012",
- "description": "",
- "method": "GET",
- "host": "director-v2",
- "path": {
- "path": "/v2/computations/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "computations"
- }
- ]
+ {
+ "name": "GET /v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012",
+ "description": "",
+ "method": "GET",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "computations"
+ }
+ ]
+ },
+ "query": "user_id=1",
+ "request_payload": null,
+ "response_body": {
+ "id": "10da03f0-f1bc-11ee-9e42-0242ac140012",
+ "state": "PENDING",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": []
+ },
+ "progress": 0.0,
+ "node_states": {
+ "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": {
+ "modified": true,
+ "dependencies": [],
+ "currentStatus": "PENDING",
+ "progress": null
+ }
+ }
+ },
+ "iteration": 1,
+ "started": null,
+ "stopped": null,
+ "submitted": "2024-04-03T13:15:00.045631+00:00",
+ "url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012?user_id=1",
+ "stop_url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012:stop?user_id=1"
+ },
+ "status_code": 200
},
- "query": "user_id=1",
- "request_payload": null,
- "response_body": {
- "id": "10da03f0-f1bc-11ee-9e42-0242ac140012",
- "state": "SUCCESS",
- "result": null,
- "pipeline_details": {
- "adjacency_list": {
- "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": []
+ {
+ "name": "POST /v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012:stop",
+ "description": "",
+ "method": "POST",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}:stop",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "computations"
+ }
+ ]
+ },
+ "query": null,
+ "request_payload": {
+ "user_id": 1
+ },
+ "response_body": {
+ "id": "10da03f0-f1bc-11ee-9e42-0242ac140012",
+ "state": "SUCCESS",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": []
+ },
+ "progress": 1.0,
+ "node_states": {
+ "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": {
+ "modified": false,
+ "dependencies": [],
+ "currentStatus": "SUCCESS",
+ "progress": 1.0
+ }
+ }
+ },
+ "iteration": 1,
+ "started": "2024-04-03T13:15:00.425270+00:00",
+ "stopped": "2024-04-03T13:15:08.997076+00:00",
+ "submitted": "2024-04-03T13:15:00.045631+00:00",
+ "url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012:stop",
+ "stop_url": null
},
- "progress": 1.0,
- "node_states": {
- "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": {
- "modified": false,
- "dependencies": [],
- "currentStatus": "SUCCESS",
- "progress": 1.0
- }
- }
- },
- "iteration": 1,
- "cluster_id": 0,
- "started": "2024-04-03T13:15:00.425270+00:00",
- "stopped": "2024-04-03T13:15:08.997076+00:00",
- "submitted": "2024-04-03T13:15:00.045631+00:00",
- "url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012?user_id=1",
- "stop_url": null
+ "status_code": 202
},
- "status_code": 200
- },
- {
- "name": "DELETE /projects/10da03f0-f1bc-11ee-9e42-0242ac140012",
- "description": "",
- "method": "DELETE",
- "host": "webserver",
- "path": {
- "path": "/v0/projects/{project_id}",
- "path_parameters": [
- {
- "in": "path",
- "name": "project_id",
- "required": true,
- "schema": {
- "title": "Project Id",
- "type": "str",
- "pattern": null,
- "format": "uuid",
- "exclusiveMinimum": null,
- "minimum": null,
- "anyOf": null,
- "allOf": null,
- "oneOf": null
- },
- "response_value": "projects"
- }
- ]
+ {
+ "name": "GET /v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012",
+ "description": "",
+ "method": "GET",
+ "host": "director-v2",
+ "path": {
+ "path": "/v2/computations/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "computations"
+ }
+ ]
+ },
+ "query": "user_id=1",
+ "request_payload": null,
+ "response_body": {
+ "id": "10da03f0-f1bc-11ee-9e42-0242ac140012",
+ "state": "SUCCESS",
+ "result": null,
+ "pipeline_details": {
+ "adjacency_list": {
+ "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": []
+ },
+ "progress": 1.0,
+ "node_states": {
+ "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": {
+ "modified": false,
+ "dependencies": [],
+ "currentStatus": "SUCCESS",
+ "progress": 1.0
+ }
+ }
+ },
+ "iteration": 1,
+ "started": "2024-04-03T13:15:00.425270+00:00",
+ "stopped": "2024-04-03T13:15:08.997076+00:00",
+ "submitted": "2024-04-03T13:15:00.045631+00:00",
+ "url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012?user_id=1",
+ "stop_url": null
+ },
+ "status_code": 200
},
- "query": null,
- "request_payload": null,
- "response_body": null,
- "status_code": 204
- }
+ {
+ "name": "DELETE /projects/10da03f0-f1bc-11ee-9e42-0242ac140012",
+ "description": "",
+ "method": "DELETE",
+ "host": "webserver",
+ "path": {
+ "path": "/v0/projects/{project_id}",
+ "path_parameters": [
+ {
+ "in": "path",
+ "name": "project_id",
+ "required": true,
+ "schema": {
+ "title": "Project Id",
+ "type": "str",
+ "pattern": null,
+ "format": "uuid",
+ "exclusiveMinimum": null,
+ "minimum": null,
+ "anyOf": null,
+ "allOf": null,
+ "oneOf": null
+ },
+ "response_value": "projects"
+ }
+ ]
+ },
+ "query": null,
+ "request_payload": null,
+ "response_body": null,
+ "status_code": 204
+ }
]
diff --git a/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs.py b/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs.py
index 237b846abaf..865983537b0 100644
--- a/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs.py
+++ b/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs.py
@@ -231,7 +231,6 @@ async def test_run_solver_job(
"result",
"pipeline_details",
"iteration",
- "cluster_id",
"url",
"stop_url",
"submitted",
@@ -269,7 +268,6 @@ async def test_run_solver_job(
"progress": 0.0,
},
"iteration": 1,
- "cluster_id": 0,
"url": "http://test.com",
"stop_url": "http://test.com",
"started": None,
@@ -365,7 +363,6 @@ async def test_run_solver_job(
resp = await client.post(
f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs/{job.id}:start",
auth=auth,
- params={"cluster_id": 1},
)
assert resp.status_code == status.HTTP_202_ACCEPTED
assert mocked_directorv2_service_api["inspect_computation"].called
diff --git a/services/autoscaling/src/simcore_service_autoscaling/core/settings.py b/services/autoscaling/src/simcore_service_autoscaling/core/settings.py
index 51e7a06e7d5..347c4c978c3 100644
--- a/services/autoscaling/src/simcore_service_autoscaling/core/settings.py
+++ b/services/autoscaling/src/simcore_service_autoscaling/core/settings.py
@@ -11,7 +11,7 @@
PortInt,
VersionTag,
)
-from models_library.clusters import InternalClusterAuthentication
+from models_library.clusters import ClusterAuthentication
from models_library.docker import DockerLabelKey
from pydantic import (
AliasChoices,
@@ -193,9 +193,9 @@ class NodesMonitoringSettings(BaseCustomSettings):
class DaskMonitoringSettings(BaseCustomSettings):
DASK_MONITORING_URL: AnyUrl = Field(
- ..., description="the url to the osparc-dask-scheduler"
+ ..., description="the url to the dask-scheduler"
)
- DASK_SCHEDULER_AUTH: InternalClusterAuthentication = Field(
+ DASK_SCHEDULER_AUTH: ClusterAuthentication = Field(
...,
description="defines the authentication of the clusters created via clusters-keeper (can be None or TLS)",
)
diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_computational.py b/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_computational.py
index cc6dcef68a4..a632afe956e 100644
--- a/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_computational.py
+++ b/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_computational.py
@@ -4,7 +4,7 @@
from aws_library.ec2 import EC2InstanceData, EC2Tags, Resources
from fastapi import FastAPI
-from models_library.clusters import InternalClusterAuthentication
+from models_library.clusters import ClusterAuthentication
from models_library.docker import (
DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY,
DockerLabelKey,
@@ -37,7 +37,7 @@ def _scheduler_url(app: FastAPI) -> AnyUrl:
return app_settings.AUTOSCALING_DASK.DASK_MONITORING_URL
-def _scheduler_auth(app: FastAPI) -> InternalClusterAuthentication:
+def _scheduler_auth(app: FastAPI) -> ClusterAuthentication:
app_settings = get_application_settings(app)
assert app_settings.AUTOSCALING_DASK # nosec
return app_settings.AUTOSCALING_DASK.DASK_SCHEDULER_AUTH
diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/dask.py b/services/autoscaling/src/simcore_service_autoscaling/modules/dask.py
index b547ce2bbd4..4c5ee00f86c 100644
--- a/services/autoscaling/src/simcore_service_autoscaling/modules/dask.py
+++ b/services/autoscaling/src/simcore_service_autoscaling/modules/dask.py
@@ -12,7 +12,7 @@
from aws_library.ec2 import EC2InstanceData, Resources
from dask_task_models_library.resource_constraints import DaskTaskResources
from distributed.core import Status
-from models_library.clusters import InternalClusterAuthentication, TLSAuthentication
+from models_library.clusters import ClusterAuthentication, TLSAuthentication
from pydantic import AnyUrl, ByteSize, TypeAdapter
from ..core.errors import (
@@ -43,7 +43,7 @@ async def _wrap_client_async_routine(
@contextlib.asynccontextmanager
async def _scheduler_client(
- url: AnyUrl, authentication: InternalClusterAuthentication
+ url: AnyUrl, authentication: ClusterAuthentication
) -> AsyncIterator[distributed.Client]:
"""
Raises:
@@ -116,7 +116,7 @@ def _find_by_worker_host(
async def is_worker_connected(
scheduler_url: AnyUrl,
- authentication: InternalClusterAuthentication,
+ authentication: ClusterAuthentication,
worker_ec2_instance: EC2InstanceData,
) -> bool:
with contextlib.suppress(DaskNoWorkersError, DaskWorkerNotFoundError):
@@ -130,7 +130,7 @@ async def is_worker_connected(
async def is_worker_retired(
scheduler_url: AnyUrl,
- authentication: InternalClusterAuthentication,
+ authentication: ClusterAuthentication,
worker_ec2_instance: EC2InstanceData,
) -> bool:
with contextlib.suppress(DaskNoWorkersError, DaskWorkerNotFoundError):
@@ -156,7 +156,7 @@ def _dask_key_to_dask_task_id(key: dask.typing.Key) -> DaskTaskId:
async def list_unrunnable_tasks(
scheduler_url: AnyUrl,
- authentication: InternalClusterAuthentication,
+ authentication: ClusterAuthentication,
) -> list[DaskTask]:
"""
Raises:
@@ -188,7 +188,7 @@ def _list_tasks(
async def list_processing_tasks_per_worker(
scheduler_url: AnyUrl,
- authentication: InternalClusterAuthentication,
+ authentication: ClusterAuthentication,
) -> dict[DaskWorkerUrl, list[DaskTask]]:
"""
Raises:
@@ -227,7 +227,7 @@ def _list_processing_tasks(
async def get_worker_still_has_results_in_memory(
scheduler_url: AnyUrl,
- authentication: InternalClusterAuthentication,
+ authentication: ClusterAuthentication,
ec2_instance: EC2InstanceData,
) -> int:
"""
@@ -246,7 +246,7 @@ async def get_worker_still_has_results_in_memory(
async def get_worker_used_resources(
scheduler_url: AnyUrl,
- authentication: InternalClusterAuthentication,
+ authentication: ClusterAuthentication,
ec2_instance: EC2InstanceData,
) -> Resources:
"""
@@ -299,7 +299,7 @@ def _list_processing_tasks_on_worker(
async def compute_cluster_total_resources(
scheduler_url: AnyUrl,
- authentication: InternalClusterAuthentication,
+ authentication: ClusterAuthentication,
instances: list[AssociatedInstance],
) -> Resources:
if not instances:
@@ -320,7 +320,7 @@ async def compute_cluster_total_resources(
async def try_retire_nodes(
- scheduler_url: AnyUrl, authentication: InternalClusterAuthentication
+ scheduler_url: AnyUrl, authentication: ClusterAuthentication
) -> None:
async with _scheduler_client(scheduler_url, authentication) as client:
await _wrap_client_async_routine(
diff --git a/services/autoscaling/tests/unit/test_modules_dask.py b/services/autoscaling/tests/unit/test_modules_dask.py
index ae2ed0c5f15..36c45a70752 100644
--- a/services/autoscaling/tests/unit/test_modules_dask.py
+++ b/services/autoscaling/tests/unit/test_modules_dask.py
@@ -13,7 +13,7 @@
from aws_library.ec2 import Resources
from faker import Faker
from models_library.clusters import (
- InternalClusterAuthentication,
+ ClusterAuthentication,
NoAuthentication,
TLSAuthentication,
)
@@ -52,7 +52,7 @@
"authentication", _authentication_types, ids=lambda p: f"authentication-{p.type}"
)
async def test__scheduler_client_with_wrong_url(
- faker: Faker, authentication: InternalClusterAuthentication
+ faker: Faker, authentication: ClusterAuthentication
):
with pytest.raises(DaskSchedulerNotFoundError):
async with _scheduler_client(
@@ -72,7 +72,7 @@ def scheduler_url(dask_spec_local_cluster: distributed.SpecCluster) -> AnyUrl:
@pytest.fixture
-def scheduler_authentication() -> InternalClusterAuthentication:
+def scheduler_authentication() -> ClusterAuthentication:
return NoAuthentication()
@@ -92,7 +92,7 @@ def dask_workers_config() -> dict[str, Any]:
async def test__scheduler_client(
- scheduler_url: AnyUrl, scheduler_authentication: InternalClusterAuthentication
+ scheduler_url: AnyUrl, scheduler_authentication: ClusterAuthentication
):
async with _scheduler_client(scheduler_url, scheduler_authentication):
...
@@ -109,7 +109,7 @@ async def test_list_unrunnable_tasks_with_no_workers(
async def test_list_unrunnable_tasks(
scheduler_url: AnyUrl,
- scheduler_authentication: InternalClusterAuthentication,
+ scheduler_authentication: ClusterAuthentication,
create_dask_task: Callable[[DaskTaskResources], distributed.Future],
):
# we have nothing running now
@@ -131,7 +131,7 @@ async def test_list_unrunnable_tasks(
async def test_list_processing_tasks(
scheduler_url: AnyUrl,
- scheduler_authentication: InternalClusterAuthentication,
+ scheduler_authentication: ClusterAuthentication,
dask_spec_cluster_client: distributed.Client,
):
def _add_fct(x: int, y: int) -> int:
@@ -190,7 +190,7 @@ def fake_ec2_instance_data_with_invalid_ec2_name(
async def test_get_worker_still_has_results_in_memory_with_invalid_ec2_name_raises(
scheduler_url: AnyUrl,
- scheduler_authentication: InternalClusterAuthentication,
+ scheduler_authentication: ClusterAuthentication,
fake_ec2_instance_data_with_invalid_ec2_name: EC2InstanceData,
):
with pytest.raises(Ec2InvalidDnsNameError):
@@ -216,7 +216,7 @@ async def test_get_worker_still_has_results_in_memory_with_no_workers_raises(
async def test_get_worker_still_has_results_in_memory_with_invalid_worker_host_raises(
scheduler_url: AnyUrl,
- scheduler_authentication: InternalClusterAuthentication,
+ scheduler_authentication: ClusterAuthentication,
fake_ec2_instance_data: Callable[..., EC2InstanceData],
):
ec2_instance_data = fake_ec2_instance_data()
@@ -229,7 +229,7 @@ async def test_get_worker_still_has_results_in_memory_with_invalid_worker_host_r
@pytest.mark.parametrize("fct_shall_err", [True, False], ids=str)
async def test_get_worker_still_has_results_in_memory(
scheduler_url: AnyUrl,
- scheduler_authentication: InternalClusterAuthentication,
+ scheduler_authentication: ClusterAuthentication,
dask_spec_cluster_client: distributed.Client,
fake_localhost_ec2_instance_data: EC2InstanceData,
fct_shall_err: bool,
@@ -291,7 +291,7 @@ def _add_fct(x: int, y: int) -> int:
async def test_worker_used_resources_with_invalid_ec2_name_raises(
scheduler_url: AnyUrl,
- scheduler_authentication: InternalClusterAuthentication,
+ scheduler_authentication: ClusterAuthentication,
fake_ec2_instance_data_with_invalid_ec2_name: EC2InstanceData,
):
with pytest.raises(Ec2InvalidDnsNameError):
@@ -317,7 +317,7 @@ async def test_worker_used_resources_with_no_workers_raises(
async def test_worker_used_resources_with_invalid_worker_host_raises(
scheduler_url: AnyUrl,
- scheduler_authentication: InternalClusterAuthentication,
+ scheduler_authentication: ClusterAuthentication,
fake_ec2_instance_data: Callable[..., EC2InstanceData],
):
ec2_instance_data = fake_ec2_instance_data()
@@ -329,7 +329,7 @@ async def test_worker_used_resources_with_invalid_worker_host_raises(
async def test_worker_used_resources(
scheduler_url: AnyUrl,
- scheduler_authentication: InternalClusterAuthentication,
+ scheduler_authentication: ClusterAuthentication,
dask_spec_cluster_client: distributed.Client,
fake_localhost_ec2_instance_data: EC2InstanceData,
):
diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/core/settings.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/core/settings.py
index c4f656c68fb..32b5cdae9d1 100644
--- a/services/clusters-keeper/src/simcore_service_clusters_keeper/core/settings.py
+++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/core/settings.py
@@ -10,7 +10,7 @@
LogLevel,
VersionTag,
)
-from models_library.clusters import InternalClusterAuthentication
+from models_library.clusters import ClusterAuthentication
from pydantic import (
AliasChoices,
Field,
@@ -347,7 +347,7 @@ class ApplicationSettings(BaseCustomSettings, MixinLoggingSettings):
)
CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH: (
- InternalClusterAuthentication
+ ClusterAuthentication
) = Field(
...,
description="defines the authentication of the clusters created via clusters-keeper (can be None or TLS)",
diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/dask.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/dask.py
index af1d0df0e66..0641e812777 100644
--- a/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/dask.py
+++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/dask.py
@@ -3,7 +3,7 @@
from typing import Any, Final
import distributed
-from models_library.clusters import InternalClusterAuthentication, TLSAuthentication
+from models_library.clusters import ClusterAuthentication, TLSAuthentication
from pydantic import AnyUrl
_logger = logging.getLogger(__name__)
@@ -21,9 +21,7 @@ async def _wrap_client_async_routine(
_CONNECTION_TIMEOUT: Final[str] = "5"
-async def ping_scheduler(
- url: AnyUrl, authentication: InternalClusterAuthentication
-) -> bool:
+async def ping_scheduler(url: AnyUrl, authentication: ClusterAuthentication) -> bool:
try:
security = distributed.Security()
if isinstance(authentication, TLSAuthentication):
@@ -47,9 +45,7 @@ async def ping_scheduler(
return False
-async def is_scheduler_busy(
- url: AnyUrl, authentication: InternalClusterAuthentication
-) -> bool:
+async def is_scheduler_busy(url: AnyUrl, authentication: ClusterAuthentication) -> bool:
security = distributed.Security()
if isinstance(authentication, TLSAuthentication):
security = distributed.Security(
diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/clusters.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/clusters.py
index a6ecfdb8189..5a9402ba093 100644
--- a/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/clusters.py
+++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/clusters.py
@@ -14,7 +14,7 @@
ClusterState,
OnDemandCluster,
)
-from models_library.clusters import InternalClusterAuthentication, TLSAuthentication
+from models_library.clusters import ClusterAuthentication, TLSAuthentication
from models_library.users import UserID
from models_library.wallets import WalletID
from types_aiobotocore_ec2.literals import InstanceStateNameType
@@ -190,7 +190,7 @@ def create_cluster_from_ec2_instance(
wallet_id: WalletID | None,
*,
dask_scheduler_ready: bool,
- cluster_auth: InternalClusterAuthentication,
+ cluster_auth: ClusterAuthentication,
max_cluster_start_time: datetime.timedelta,
) -> OnDemandCluster:
return OnDemandCluster(
diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/dask.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/dask.py
index 266557358b7..6dc6a452fe4 100644
--- a/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/dask.py
+++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/dask.py
@@ -1,6 +1,6 @@
from aws_library.ec2 import EC2InstanceData
from fastapi import FastAPI
-from models_library.clusters import InternalClusterAuthentication
+from models_library.clusters import ClusterAuthentication
from pydantic import AnyUrl, TypeAdapter
from ..core.settings import get_application_settings
@@ -13,7 +13,7 @@ def get_scheduler_url(ec2_instance: EC2InstanceData) -> AnyUrl:
return url
-def get_scheduler_auth(app: FastAPI) -> InternalClusterAuthentication:
+def get_scheduler_auth(app: FastAPI) -> ClusterAuthentication:
return get_application_settings(
app
).CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH
diff --git a/services/clusters-keeper/tests/unit/test_modules_dask.py b/services/clusters-keeper/tests/unit/test_modules_dask.py
index 7f0408d7057..97c831ea789 100644
--- a/services/clusters-keeper/tests/unit/test_modules_dask.py
+++ b/services/clusters-keeper/tests/unit/test_modules_dask.py
@@ -8,7 +8,7 @@
from distributed import SpecCluster
from faker import Faker
from models_library.clusters import (
- InternalClusterAuthentication,
+ ClusterAuthentication,
NoAuthentication,
TLSAuthentication,
)
@@ -34,11 +34,13 @@
"authentication", _authentication_types, ids=lambda p: f"authentication-{p.type}"
)
async def test_ping_scheduler_non_existing_scheduler(
- faker: Faker, authentication: InternalClusterAuthentication
+ faker: Faker, authentication: ClusterAuthentication
):
assert (
await ping_scheduler(
- TypeAdapter(AnyUrl).validate_python(f"tcp://{faker.ipv4()}:{faker.port_number()}"),
+ TypeAdapter(AnyUrl).validate_python(
+ f"tcp://{faker.ipv4()}:{faker.port_number()}"
+ ),
authentication,
)
is False
@@ -48,7 +50,9 @@ async def test_ping_scheduler_non_existing_scheduler(
async def test_ping_scheduler(dask_spec_local_cluster: SpecCluster):
assert (
await ping_scheduler(
- TypeAdapter(AnyUrl).validate_python(dask_spec_local_cluster.scheduler_address),
+ TypeAdapter(AnyUrl).validate_python(
+ dask_spec_local_cluster.scheduler_address
+ ),
NoAuthentication(),
)
is True
@@ -71,7 +75,9 @@ async def test_is_scheduler_busy(
dask_spec_cluster_client: distributed.Client,
):
# nothing runs right now
- scheduler_address = TypeAdapter(AnyUrl).validate_python(dask_spec_local_cluster.scheduler_address)
+ scheduler_address = TypeAdapter(AnyUrl).validate_python(
+ dask_spec_local_cluster.scheduler_address
+ )
assert await is_scheduler_busy(scheduler_address, NoAuthentication()) is False
_SLEEP_TIME = 5
diff --git a/services/clusters-keeper/tests/unit/test_utils_clusters.py b/services/clusters-keeper/tests/unit/test_utils_clusters.py
index 55190cb46a1..96983dd34d5 100644
--- a/services/clusters-keeper/tests/unit/test_utils_clusters.py
+++ b/services/clusters-keeper/tests/unit/test_utils_clusters.py
@@ -19,7 +19,7 @@
from faker import Faker
from models_library.api_schemas_clusters_keeper.clusters import ClusterState
from models_library.clusters import (
- InternalClusterAuthentication,
+ ClusterAuthentication,
NoAuthentication,
TLSAuthentication,
)
@@ -55,7 +55,7 @@ def ec2_boot_specs(app_settings: ApplicationSettings) -> EC2InstanceBootSpecific
@pytest.fixture(params=[TLSAuthentication, NoAuthentication])
def backend_cluster_auth(
request: pytest.FixtureRequest,
-) -> InternalClusterAuthentication:
+) -> ClusterAuthentication:
return request.param
@@ -63,7 +63,7 @@ def backend_cluster_auth(
def app_environment(
app_environment: EnvVarsDict,
monkeypatch: pytest.MonkeyPatch,
- backend_cluster_auth: InternalClusterAuthentication,
+ backend_cluster_auth: ClusterAuthentication,
) -> EnvVarsDict:
return app_environment | setenvs_from_dict(
monkeypatch,
@@ -295,7 +295,7 @@ def test_create_cluster_from_ec2_instance(
faker: Faker,
ec2_state: InstanceStateNameType,
expected_cluster_state: ClusterState,
- authentication: InternalClusterAuthentication,
+ authentication: ClusterAuthentication,
):
instance_data = fake_ec2_instance_data(state=ec2_state)
cluster_instance = create_cluster_from_ec2_instance(
diff --git a/services/dask-sidecar/README.md b/services/dask-sidecar/README.md
index 8abc94a9dbc..2a3f5ec254b 100644
--- a/services/dask-sidecar/README.md
+++ b/services/dask-sidecar/README.md
@@ -13,42 +13,3 @@ source .venv/bin/activate
cd services/api-service
make install-dev
```
-
-## Deploy on a specific cluster
-
-1. define label on docker engine
-
- ```bash
- sudo nano /etc/docker/daemon.json
- ```
-
- ```json
- {
- "labels":["cluster_id=MYCLUSTERUNIQUEIDENTIFIER"]
- }
- ```
-
-2. restart the docker engine
-
- ```bash
- sudo service docker restart
- ```
-
-3. verify
-
- ```bash
- docker info --format "{{.Labels}}"
- ```
-
-
-## Dev notes
-
-### 2021.08.24
-
- - sidecar sets up its own available resources on start
- - sidecar checks local docker engine labels to get its cluster_id
-
-### 2021.06.10
-
- - installed from dynamic-sidecar in current repo, but could have opted for taking sidecar image as a base. The latter would complicate in-host development though, so we start commando here.
- - can be started as scheduler or worker. TODO: scheduler does not need to mount anything
diff --git a/services/dask-sidecar/requirements/_base.in b/services/dask-sidecar/requirements/_base.in
index 2352652e4a0..9571b106d4f 100644
--- a/services/dask-sidecar/requirements/_base.in
+++ b/services/dask-sidecar/requirements/_base.in
@@ -22,7 +22,6 @@ aiodocker
aiofiles
blosc # for compression
dask[distributed, diagnostics]
-dask-gateway # needed for the osparc-dask-gateway to preload the module
fsspec[http, s3] # sub types needed as we acces http and s3 here
lz4 # for compression
pydantic
diff --git a/services/dask-sidecar/requirements/_base.txt b/services/dask-sidecar/requirements/_base.txt
index e3cd751062d..7cc0de4aa6d 100644
--- a/services/dask-sidecar/requirements/_base.txt
+++ b/services/dask-sidecar/requirements/_base.txt
@@ -36,7 +36,6 @@ aiohttp==3.9.5
# -c requirements/../../../requirements/constraints.txt
# aiobotocore
# aiodocker
- # dask-gateway
# fsspec
# s3fs
aioitertools==0.11.0
@@ -94,7 +93,6 @@ charset-normalizer==3.3.2
click==8.1.7
# via
# dask
- # dask-gateway
# distributed
# typer
cloudpickle==3.0.0
@@ -108,10 +106,7 @@ dask==2024.5.1
# -c requirements/constraints.txt
# -r requirements/../../../packages/dask-task-models-library/requirements/_base.in
# -r requirements/_base.in
- # dask-gateway
# distributed
-dask-gateway==2024.1.0
- # via -r requirements/_base.in
deprecated==1.2.14
# via
# opentelemetry-api
@@ -119,9 +114,7 @@ deprecated==1.2.14
# opentelemetry-exporter-otlp-proto-http
# opentelemetry-semantic-conventions
distributed==2024.5.1
- # via
- # dask
- # dask-gateway
+ # via dask
dnspython==2.6.1
# via email-validator
email-validator==2.1.1
@@ -411,7 +404,6 @@ pyyaml==6.0.1
# -r requirements/../../../packages/service-library/requirements/_base.in
# bokeh
# dask
- # dask-gateway
# distributed
redis==5.0.4
# via
@@ -436,7 +428,6 @@ redis==5.0.4
# -r requirements/../../../packages/service-library/requirements/_base.in
referencing==0.29.3
# via
- # -c requirements/../../../packages/service-library/requirements/./constraints.txt
# jsonschema
# jsonschema-specifications
repro-zipfile==0.3.1
@@ -480,7 +471,6 @@ toolz==0.12.1
tornado==6.4
# via
# bokeh
- # dask-gateway
# distributed
tqdm==4.66.4
# via -r requirements/../../../packages/service-library/requirements/_base.in
@@ -535,6 +525,7 @@ xyzservices==2024.4.0
# via bokeh
yarl==1.9.4
# via
+ # -r requirements/../../../packages/service-library/requirements/_base.in
# aio-pika
# aiohttp
# aiormq
diff --git a/services/director-v2/openapi.json b/services/director-v2/openapi.json
index cdd6d4eca05..c1b38416efe 100644
--- a/services/director-v2/openapi.json
+++ b/services/director-v2/openapi.json
@@ -118,25 +118,25 @@
"operationId": "get_computation_v2_computations__project_id__get",
"parameters": [
{
+ "name": "project_id",
+ "in": "path",
"required": true,
"schema": {
"type": "string",
"format": "uuid",
"title": "Project Id"
- },
- "name": "project_id",
- "in": "path"
+ }
},
{
+ "name": "user_id",
+ "in": "query",
"required": true,
"schema": {
"type": "integer",
"exclusiveMinimum": true,
"title": "User Id",
"minimum": 0
- },
- "name": "user_id",
- "in": "query"
+ }
}
],
"responses": {
@@ -170,25 +170,25 @@
"operationId": "delete_computation_v2_computations__project_id__delete",
"parameters": [
{
+ "name": "project_id",
+ "in": "path",
"required": true,
"schema": {
"type": "string",
"format": "uuid",
"title": "Project Id"
- },
- "name": "project_id",
- "in": "path"
+ }
}
],
"requestBody": {
+ "required": true,
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ComputationDelete"
}
}
- },
- "required": true
+ }
},
"responses": {
"204": {
@@ -216,25 +216,25 @@
"operationId": "stop_computation_v2_computations__project_id__stop_post",
"parameters": [
{
+ "name": "project_id",
+ "in": "path",
"required": true,
"schema": {
"type": "string",
"format": "uuid",
"title": "Project Id"
- },
- "name": "project_id",
- "in": "path"
+ }
}
],
"requestBody": {
+ "required": true,
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ComputationStop"
}
}
- },
- "required": true
+ }
},
"responses": {
"202": {
@@ -270,25 +270,25 @@
"operationId": "get_all_tasks_log_files_v2_computations__project_id__tasks___logfile_get",
"parameters": [
{
+ "name": "project_id",
+ "in": "path",
"required": true,
"schema": {
"type": "string",
"format": "uuid",
"title": "Project Id"
- },
- "name": "project_id",
- "in": "path"
+ }
},
{
+ "name": "user_id",
+ "in": "query",
"required": true,
"schema": {
"type": "integer",
"exclusiveMinimum": true,
"title": "User Id",
"minimum": 0
- },
- "name": "user_id",
- "in": "query"
+ }
}
],
"responses": {
@@ -297,10 +297,10 @@
"content": {
"application/json": {
"schema": {
+ "type": "array",
"items": {
"$ref": "#/components/schemas/TaskLogFileGet"
},
- "type": "array",
"title": "Response Get All Tasks Log Files V2 Computations Project Id Tasks Logfile Get"
}
}
@@ -329,35 +329,35 @@
"operationId": "get_task_log_file_v2_computations__project_id__tasks__node_uuid__logfile_get",
"parameters": [
{
+ "name": "project_id",
+ "in": "path",
"required": true,
"schema": {
"type": "string",
"format": "uuid",
"title": "Project Id"
- },
- "name": "project_id",
- "in": "path"
+ }
},
{
+ "name": "node_uuid",
+ "in": "path",
"required": true,
"schema": {
"type": "string",
"format": "uuid",
"title": "Node Uuid"
- },
- "name": "node_uuid",
- "in": "path"
+ }
},
{
+ "name": "user_id",
+ "in": "query",
"required": true,
"schema": {
"type": "integer",
"exclusiveMinimum": true,
"title": "User Id",
"minimum": 0
- },
- "name": "user_id",
- "in": "query"
+ }
}
],
"responses": {
@@ -393,25 +393,25 @@
"operationId": "get_batch_tasks_outputs_v2_computations__project_id__tasks___outputs_batchGet_post",
"parameters": [
{
+ "name": "project_id",
+ "in": "path",
"required": true,
"schema": {
"type": "string",
"format": "uuid",
"title": "Project Id"
- },
- "name": "project_id",
- "in": "path"
+ }
}
],
"requestBody": {
+ "required": true,
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/TasksSelection"
}
}
- },
- "required": true
+ }
},
"responses": {
"200": {
@@ -449,25 +449,39 @@
"operationId": "list_tracked_dynamic_services_v2_dynamic_services_get",
"parameters": [
{
+ "name": "user_id",
+ "in": "query",
"required": false,
"schema": {
- "type": "integer",
- "exclusiveMinimum": true,
- "title": "User Id",
- "minimum": 0
- },
- "name": "user_id",
- "in": "query"
+ "anyOf": [
+ {
+ "type": "integer",
+ "exclusiveMinimum": true,
+ "minimum": 0
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "title": "User Id"
+ }
},
{
+ "name": "project_id",
+ "in": "query",
"required": false,
"schema": {
- "type": "string",
- "format": "uuid",
+ "anyOf": [
+ {
+ "type": "string",
+ "format": "uuid"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Project Id"
- },
- "name": "project_id",
- "in": "query"
+ }
}
],
"responses": {
@@ -476,10 +490,10 @@
"content": {
"application/json": {
"schema": {
+ "type": "array",
"items": {
"$ref": "#/components/schemas/RunningDynamicServiceDetails"
},
- "type": "array",
"title": "Response List Tracked Dynamic Services V2 Dynamic Services Get"
}
}
@@ -505,42 +519,42 @@
"operationId": "create_dynamic_service_v2_dynamic_services_post",
"parameters": [
{
+ "name": "x-dynamic-sidecar-request-dns",
+ "in": "header",
"required": true,
"schema": {
"type": "string",
"title": "X-Dynamic-Sidecar-Request-Dns"
- },
- "name": "x-dynamic-sidecar-request-dns",
- "in": "header"
+ }
},
{
+ "name": "x-dynamic-sidecar-request-scheme",
+ "in": "header",
"required": true,
"schema": {
"type": "string",
"title": "X-Dynamic-Sidecar-Request-Scheme"
- },
- "name": "x-dynamic-sidecar-request-scheme",
- "in": "header"
+ }
},
{
+ "name": "x-simcore-user-agent",
+ "in": "header",
"required": true,
"schema": {
"type": "string",
"title": "X-Simcore-User-Agent"
- },
- "name": "x-simcore-user-agent",
- "in": "header"
+ }
}
],
"requestBody": {
+ "required": true,
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/DynamicServiceCreate"
}
}
- },
- "required": true
+ }
},
"responses": {
"201": {
@@ -575,14 +589,14 @@
"operationId": "get_dynamic_sidecar_status_v2_dynamic_services__node_uuid__get",
"parameters": [
{
+ "name": "node_uuid",
+ "in": "path",
"required": true,
"schema": {
"type": "string",
"format": "uuid",
"title": "Node Uuid"
- },
- "name": "node_uuid",
- "in": "path"
+ }
}
],
"responses": {
@@ -616,24 +630,31 @@
"operationId": "stop_dynamic_service_v2_dynamic_services__node_uuid__delete",
"parameters": [
{
+ "name": "node_uuid",
+ "in": "path",
"required": true,
"schema": {
"type": "string",
"format": "uuid",
"title": "Node Uuid"
- },
- "name": "node_uuid",
- "in": "path"
+ }
},
{
+ "name": "can_save",
+ "in": "query",
"required": false,
"schema": {
- "type": "boolean",
- "title": "Can Save",
- "default": true
- },
- "name": "can_save",
- "in": "query"
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": true,
+ "title": "Can Save"
+ }
}
],
"responses": {
@@ -662,25 +683,25 @@
"operationId": "service_retrieve_data_on_ports_v2_dynamic_services__node_uuid__retrieve_post",
"parameters": [
{
+ "name": "node_uuid",
+ "in": "path",
"required": true,
"schema": {
"type": "string",
"format": "uuid",
"title": "Node Uuid"
- },
- "name": "node_uuid",
- "in": "path"
+ }
}
],
"requestBody": {
+ "required": true,
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/RetrieveDataIn"
}
}
- },
- "required": true
+ }
},
"responses": {
"200": {
@@ -715,14 +736,14 @@
"operationId": "service_restart_containers_v2_dynamic_services__node_uuid__restart_post",
"parameters": [
{
+ "name": "node_uuid",
+ "in": "path",
"required": true,
"schema": {
"type": "string",
"format": "uuid",
"title": "Node Uuid"
- },
- "name": "node_uuid",
- "in": "path"
+ }
}
],
"responses": {
@@ -751,14 +772,14 @@
"operationId": "update_projects_networks_v2_dynamic_services_projects__project_id____networks_patch",
"parameters": [
{
+ "name": "project_id",
+ "in": "path",
"required": true,
"schema": {
"type": "string",
"format": "uuid",
"title": "Project Id"
- },
- "name": "project_id",
- "in": "path"
+ }
}
],
"responses": {
@@ -787,24 +808,24 @@
"operationId": "get_project_inactivity_v2_dynamic_services_projects__project_id__inactivity_get",
"parameters": [
{
+ "name": "project_id",
+ "in": "path",
"required": true,
"schema": {
"type": "string",
"format": "uuid",
"title": "Project Id"
- },
- "name": "project_id",
- "in": "path"
+ }
},
{
+ "name": "max_inactivity_seconds",
+ "in": "query",
"required": true,
"schema": {
"type": "number",
"minimum": 0.0,
"title": "Max Inactivity Seconds"
- },
- "name": "max_inactivity_seconds",
- "in": "query"
+ }
}
],
"responses": {
@@ -831,40 +852,38 @@
}
}
},
- "/v2/clusters": {
- "get": {
+ "/v2/dynamic_scheduler/services/{node_uuid}/observation": {
+ "patch": {
"tags": [
- "clusters"
+ "dynamic scheduler"
],
- "summary": "Lists clusters for user",
- "operationId": "list_clusters_v2_clusters_get",
+ "summary": "Enable/disable observation of the service",
+ "operationId": "update_service_observation_v2_dynamic_scheduler_services__node_uuid__observation_patch",
"parameters": [
{
+ "name": "node_uuid",
+ "in": "path",
"required": true,
"schema": {
- "type": "integer",
- "exclusiveMinimum": true,
- "title": "User Id",
- "minimum": 0
- },
- "name": "user_id",
- "in": "query"
+ "type": "string",
+ "format": "uuid",
+ "title": "Node Uuid"
+ }
}
],
- "responses": {
- "200": {
- "description": "Successful Response",
- "content": {
- "application/json": {
- "schema": {
- "items": {
- "$ref": "#/components/schemas/ClusterGet"
- },
- "type": "array",
- "title": "Response List Clusters V2 Clusters Get"
- }
+ "requestBody": {
+ "required": true,
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/ObservationItem"
}
}
+ }
+ },
+ "responses": {
+ "204": {
+ "description": "Successful Response"
},
"422": {
"description": "Validation Error",
@@ -877,47 +896,42 @@
}
}
}
- },
- "post": {
+ }
+ },
+ "/v2/dynamic_scheduler/services/{node_uuid}/containers": {
+ "delete": {
"tags": [
- "clusters"
+ "dynamic scheduler"
],
- "summary": "Create a new cluster for a user",
- "operationId": "create_cluster_v2_clusters_post",
+ "summary": "Removes the service's user services",
+ "operationId": "delete_service_containers_v2_dynamic_scheduler_services__node_uuid__containers_delete",
"parameters": [
{
+ "name": "node_uuid",
+ "in": "path",
"required": true,
"schema": {
- "type": "integer",
- "exclusiveMinimum": true,
- "title": "User Id",
- "minimum": 0
- },
- "name": "user_id",
- "in": "query"
+ "type": "string",
+ "format": "uuid",
+ "title": "Node Uuid"
+ }
}
],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ClusterCreate"
- }
- }
- },
- "required": true
- },
"responses": {
- "201": {
+ "202": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ClusterGet"
+ "type": "string",
+ "title": "Response Delete Service Containers V2 Dynamic Scheduler Services Node Uuid Containers Delete"
}
}
}
},
+ "409": {
+ "description": "Task already running, cannot start a new one"
+ },
"422": {
"description": "Validation Error",
"content": {
@@ -931,55 +945,23 @@
}
}
},
- "/v2/clusters/default": {
- "get": {
- "tags": [
- "clusters"
- ],
- "summary": "Returns the default cluster",
- "operationId": "get_default_cluster_v2_clusters_default_get",
- "responses": {
- "200": {
- "description": "Successful Response",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ClusterGet"
- }
- }
- }
- }
- }
- }
- },
- "/v2/clusters/{cluster_id}": {
+ "/v2/dynamic_scheduler/services/{node_uuid}/state": {
"get": {
"tags": [
- "clusters"
+ "dynamic scheduler"
],
- "summary": "Get one cluster for user",
- "operationId": "get_cluster_v2_clusters__cluster_id__get",
+ "summary": "Returns the internals of the scheduler for the given service",
+ "operationId": "get_service_state_v2_dynamic_scheduler_services__node_uuid__state_get",
"parameters": [
{
+ "name": "node_uuid",
+ "in": "path",
"required": true,
"schema": {
- "type": "integer",
- "minimum": 0,
- "title": "Cluster Id"
- },
- "name": "cluster_id",
- "in": "path"
- },
- {
- "required": true,
- "schema": {
- "type": "integer",
- "exclusiveMinimum": true,
- "title": "User Id",
- "minimum": 0
- },
- "name": "user_id",
- "in": "query"
+ "type": "string",
+ "format": "uuid",
+ "title": "Node Uuid"
+ }
}
],
"responses": {
@@ -988,7 +970,7 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ClusterGet"
+ "$ref": "#/components/schemas/SchedulerData"
}
}
}
@@ -1004,461 +986,25 @@
}
}
}
- },
- "delete": {
+ }
+ },
+ "/v2/dynamic_scheduler/services/{node_uuid}/state:save": {
+ "post": {
"tags": [
- "clusters"
+ "dynamic scheduler"
],
- "summary": "Remove a cluster for user",
- "operationId": "delete_cluster_v2_clusters__cluster_id__delete",
+ "summary": "Starts the saving of the state for the service",
+ "operationId": "save_service_state_v2_dynamic_scheduler_services__node_uuid__state_save_post",
"parameters": [
{
- "required": true,
- "schema": {
- "type": "integer",
- "minimum": 0,
- "title": "Cluster Id"
- },
- "name": "cluster_id",
- "in": "path"
- },
- {
- "required": true,
- "schema": {
- "type": "integer",
- "exclusiveMinimum": true,
- "title": "User Id",
- "minimum": 0
- },
- "name": "user_id",
- "in": "query"
- }
- ],
- "responses": {
- "204": {
- "description": "Successful Response"
- },
- "422": {
- "description": "Validation Error",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/HTTPValidationError"
- }
- }
- }
- }
- }
- },
- "patch": {
- "tags": [
- "clusters"
- ],
- "summary": "Modify a cluster for user",
- "operationId": "update_cluster_v2_clusters__cluster_id__patch",
- "parameters": [
- {
- "required": true,
- "schema": {
- "type": "integer",
- "minimum": 0,
- "title": "Cluster Id"
- },
- "name": "cluster_id",
- "in": "path"
- },
- {
- "required": true,
- "schema": {
- "type": "integer",
- "exclusiveMinimum": true,
- "title": "User Id",
- "minimum": 0
- },
- "name": "user_id",
- "in": "query"
- }
- ],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ClusterPatch"
- }
- }
- },
- "required": true
- },
- "responses": {
- "200": {
- "description": "Successful Response",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ClusterGet"
- }
- }
- }
- },
- "422": {
- "description": "Validation Error",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/HTTPValidationError"
- }
- }
- }
- }
- }
- }
- },
- "/v2/clusters/default/details": {
- "get": {
- "tags": [
- "clusters"
- ],
- "summary": "Returns the cluster details",
- "operationId": "get_default_cluster_details_v2_clusters_default_details_get",
- "parameters": [
- {
- "required": true,
- "schema": {
- "type": "integer",
- "exclusiveMinimum": true,
- "title": "User Id",
- "minimum": 0
- },
- "name": "user_id",
- "in": "query"
- }
- ],
- "responses": {
- "200": {
- "description": "Successful Response",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ClusterDetailsGet"
- }
- }
- }
- },
- "422": {
- "description": "Validation Error",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/HTTPValidationError"
- }
- }
- }
- }
- }
- }
- },
- "/v2/clusters/{cluster_id}/details": {
- "get": {
- "tags": [
- "clusters"
- ],
- "summary": "Returns the cluster details",
- "operationId": "get_cluster_details_v2_clusters__cluster_id__details_get",
- "parameters": [
- {
- "required": true,
- "schema": {
- "type": "integer",
- "minimum": 0,
- "title": "Cluster Id"
- },
- "name": "cluster_id",
- "in": "path"
- },
- {
- "required": true,
- "schema": {
- "type": "integer",
- "exclusiveMinimum": true,
- "title": "User Id",
- "minimum": 0
- },
- "name": "user_id",
- "in": "query"
- }
- ],
- "responses": {
- "200": {
- "description": "Successful Response",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ClusterDetailsGet"
- }
- }
- }
- },
- "422": {
- "description": "Validation Error",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/HTTPValidationError"
- }
- }
- }
- }
- }
- }
- },
- "/v2/clusters:ping": {
- "post": {
- "tags": [
- "clusters"
- ],
- "summary": "Test cluster connection",
- "operationId": "test_cluster_connection_v2_clusters_ping_post",
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ClusterPing"
- }
- }
- },
- "required": true
- },
- "responses": {
- "204": {
- "description": "Successful Response"
- },
- "422": {
- "description": "Validation Error",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/HTTPValidationError"
- }
- }
- }
- }
- }
- }
- },
- "/v2/clusters/default:ping": {
- "post": {
- "tags": [
- "clusters"
- ],
- "summary": "Test cluster connection",
- "operationId": "test_default_cluster_connection_v2_clusters_default_ping_post",
- "responses": {
- "204": {
- "description": "Successful Response"
- }
- }
- }
- },
- "/v2/clusters/{cluster_id}:ping": {
- "post": {
- "tags": [
- "clusters"
- ],
- "summary": "Test cluster connection",
- "operationId": "test_specific_cluster_connection_v2_clusters__cluster_id__ping_post",
- "parameters": [
- {
- "required": true,
- "schema": {
- "type": "integer",
- "minimum": 0,
- "title": "Cluster Id"
- },
- "name": "cluster_id",
- "in": "path"
- },
- {
- "required": true,
- "schema": {
- "type": "integer",
- "exclusiveMinimum": true,
- "title": "User Id",
- "minimum": 0
- },
- "name": "user_id",
- "in": "query"
- }
- ],
- "responses": {
- "204": {
- "description": "Successful Response"
- },
- "422": {
- "description": "Validation Error",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/HTTPValidationError"
- }
- }
- }
- }
- }
- }
- },
- "/v2/dynamic_scheduler/services/{node_uuid}/observation": {
- "patch": {
- "tags": [
- "dynamic scheduler"
- ],
- "summary": "Enable/disable observation of the service",
- "operationId": "update_service_observation_v2_dynamic_scheduler_services__node_uuid__observation_patch",
- "parameters": [
- {
- "required": true,
- "schema": {
- "type": "string",
- "format": "uuid",
- "title": "Node Uuid"
- },
- "name": "node_uuid",
- "in": "path"
- }
- ],
- "requestBody": {
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/ObservationItem"
- }
- }
- },
- "required": true
- },
- "responses": {
- "204": {
- "description": "Successful Response"
- },
- "422": {
- "description": "Validation Error",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/HTTPValidationError"
- }
- }
- }
- }
- }
- }
- },
- "/v2/dynamic_scheduler/services/{node_uuid}/containers": {
- "delete": {
- "tags": [
- "dynamic scheduler"
- ],
- "summary": "Removes the service's user services",
- "operationId": "delete_service_containers_v2_dynamic_scheduler_services__node_uuid__containers_delete",
- "parameters": [
- {
- "required": true,
- "schema": {
- "type": "string",
- "format": "uuid",
- "title": "Node Uuid"
- },
- "name": "node_uuid",
- "in": "path"
- }
- ],
- "responses": {
- "202": {
- "description": "Successful Response",
- "content": {
- "application/json": {
- "schema": {
- "type": "string",
- "title": "Response Delete Service Containers V2 Dynamic Scheduler Services Node Uuid Containers Delete"
- }
- }
- }
- },
- "409": {
- "description": "Task already running, cannot start a new one"
- },
- "422": {
- "description": "Validation Error",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/HTTPValidationError"
- }
- }
- }
- }
- }
- }
- },
- "/v2/dynamic_scheduler/services/{node_uuid}/state": {
- "get": {
- "tags": [
- "dynamic scheduler"
- ],
- "summary": "Returns the internals of the scheduler for the given service",
- "operationId": "get_service_state_v2_dynamic_scheduler_services__node_uuid__state_get",
- "parameters": [
- {
- "required": true,
- "schema": {
- "type": "string",
- "format": "uuid",
- "title": "Node Uuid"
- },
"name": "node_uuid",
- "in": "path"
- }
- ],
- "responses": {
- "200": {
- "description": "Successful Response",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/SchedulerData"
- }
- }
- }
- },
- "422": {
- "description": "Validation Error",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/HTTPValidationError"
- }
- }
- }
- }
- }
- }
- },
- "/v2/dynamic_scheduler/services/{node_uuid}/state:save": {
- "post": {
- "tags": [
- "dynamic scheduler"
- ],
- "summary": "Starts the saving of the state for the service",
- "operationId": "save_service_state_v2_dynamic_scheduler_services__node_uuid__state_save_post",
- "parameters": [
- {
+ "in": "path",
"required": true,
"schema": {
"type": "string",
"format": "uuid",
"title": "Node Uuid"
- },
- "name": "node_uuid",
- "in": "path"
+ }
}
],
"responses": {
@@ -1498,14 +1044,14 @@
"operationId": "push_service_outputs_v2_dynamic_scheduler_services__node_uuid__outputs_push_post",
"parameters": [
{
+ "name": "node_uuid",
+ "in": "path",
"required": true,
"schema": {
"type": "string",
"format": "uuid",
"title": "Node Uuid"
- },
- "name": "node_uuid",
- "in": "path"
+ }
}
],
"responses": {
@@ -1545,14 +1091,14 @@
"operationId": "delete_service_docker_resources_v2_dynamic_scheduler_services__node_uuid__docker_resources_delete",
"parameters": [
{
+ "name": "node_uuid",
+ "in": "path",
"required": true,
"schema": {
"type": "string",
"format": "uuid",
"title": "Node Uuid"
- },
- "name": "node_uuid",
- "in": "path"
+ }
}
],
"responses": {
@@ -1592,14 +1138,14 @@
"operationId": "free_reserved_disk_space_v2_dynamic_scheduler_services__node_uuid__disk_reserved_free_post",
"parameters": [
{
+ "name": "node_uuid",
+ "in": "path",
"required": true,
"schema": {
"type": "string",
"format": "uuid",
"title": "Node Uuid"
- },
- "name": "node_uuid",
- "in": "path"
+ }
}
],
"responses": {
@@ -1634,11 +1180,18 @@
"title": "Version"
},
"released": {
- "additionalProperties": {
- "type": "string",
- "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$"
- },
- "type": "object",
+ "anyOf": [
+ {
+ "additionalProperties": {
+ "type": "string",
+ "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$"
+ },
+ "type": "object"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Released",
"description": "Maps every route's path tag with a released version"
}
@@ -1651,11 +1204,11 @@
"title": "BaseMeta",
"example": {
"name": "simcore_service_foo",
- "version": "2.4.45",
"released": {
"v1": "1.3.4",
"v2": "2.4.45"
- }
+ },
+ "version": "2.4.45"
}
},
"BootMode": {
@@ -1665,351 +1218,44 @@
"GPU",
"MPI"
],
- "title": "BootMode",
- "description": "An enumeration."
+ "title": "BootMode"
},
"CallbacksMapping": {
"properties": {
- "metrics": {
- "allOf": [
- {
- "$ref": "#/components/schemas/UserServiceCommand"
- }
- ],
- "title": "Metrics",
- "description": "command to recover prometheus metrics from a specific user service"
- },
- "before_shutdown": {
- "items": {
- "$ref": "#/components/schemas/UserServiceCommand"
- },
- "type": "array",
- "title": "Before Shutdown",
- "description": "commands to run before shutting down the user servicescommands get executed first to last, multiple commands for the sameuser services are allowed"
- },
- "inactivity": {
- "allOf": [
- {
- "$ref": "#/components/schemas/UserServiceCommand"
- }
- ],
- "title": "Inactivity",
- "description": "command used to figure out for how much time the user service(s) were inactive for"
- }
- },
- "additionalProperties": false,
- "type": "object",
- "title": "CallbacksMapping"
- },
- "ClusterAccessRights": {
- "properties": {
- "read": {
- "type": "boolean",
- "title": "Read",
- "description": "allows to run pipelines on that cluster"
- },
- "write": {
- "type": "boolean",
- "title": "Write",
- "description": "allows to modify the cluster"
- },
- "delete": {
- "type": "boolean",
- "title": "Delete",
- "description": "allows to delete a cluster"
- }
- },
- "additionalProperties": false,
- "type": "object",
- "required": [
- "read",
- "write",
- "delete"
- ],
- "title": "ClusterAccessRights"
- },
- "ClusterCreate": {
- "properties": {
- "name": {
- "type": "string",
- "title": "Name",
- "description": "The human readable name of the cluster"
- },
- "description": {
- "type": "string",
- "title": "Description"
- },
- "type": {
- "$ref": "#/components/schemas/ClusterTypeInModel"
- },
- "owner": {
- "type": "integer",
- "exclusiveMinimum": true,
- "title": "Owner",
- "minimum": 0
- },
- "thumbnail": {
- "type": "string",
- "maxLength": 2083,
- "minLength": 1,
- "format": "uri",
- "title": "Thumbnail",
- "description": "url to the image describing this cluster"
- },
- "endpoint": {
- "type": "string",
- "maxLength": 65536,
- "minLength": 1,
- "format": "uri",
- "title": "Endpoint"
- },
- "authentication": {
- "anyOf": [
- {
- "$ref": "#/components/schemas/SimpleAuthentication"
- },
- {
- "$ref": "#/components/schemas/KerberosAuthentication"
- },
- {
- "$ref": "#/components/schemas/JupyterHubTokenAuthentication"
- }
- ],
- "title": "Authentication"
- },
- "accessRights": {
- "additionalProperties": {
- "$ref": "#/components/schemas/ClusterAccessRights"
- },
- "type": "object",
- "title": "Accessrights"
- }
- },
- "additionalProperties": false,
- "type": "object",
- "required": [
- "name",
- "type",
- "endpoint",
- "authentication"
- ],
- "title": "ClusterCreate"
- },
- "ClusterDetailsGet": {
- "properties": {
- "scheduler": {
- "allOf": [
- {
- "$ref": "#/components/schemas/Scheduler"
- }
- ],
- "title": "Scheduler",
- "description": "This contains dask scheduler information given by the underlying dask library"
- },
- "dashboard_link": {
- "type": "string",
- "maxLength": 65536,
- "minLength": 1,
- "format": "uri",
- "title": "Dashboard Link",
- "description": "Link to this scheduler's dashboard"
- }
- },
- "type": "object",
- "required": [
- "scheduler",
- "dashboard_link"
- ],
- "title": "ClusterDetailsGet"
- },
- "ClusterGet": {
- "properties": {
- "name": {
- "type": "string",
- "title": "Name",
- "description": "The human readable name of the cluster"
- },
- "description": {
- "type": "string",
- "title": "Description"
- },
- "type": {
- "$ref": "#/components/schemas/ClusterTypeInModel"
- },
- "owner": {
- "type": "integer",
- "exclusiveMinimum": true,
- "title": "Owner",
- "minimum": 0
- },
- "thumbnail": {
- "type": "string",
- "maxLength": 2083,
- "minLength": 1,
- "format": "uri",
- "title": "Thumbnail",
- "description": "url to the image describing this cluster"
- },
- "endpoint": {
- "type": "string",
- "maxLength": 65536,
- "minLength": 1,
- "format": "uri",
- "title": "Endpoint"
- },
- "authentication": {
- "anyOf": [
- {
- "$ref": "#/components/schemas/SimpleAuthentication"
- },
- {
- "$ref": "#/components/schemas/KerberosAuthentication"
- },
- {
- "$ref": "#/components/schemas/JupyterHubTokenAuthentication"
- },
- {
- "$ref": "#/components/schemas/NoAuthentication"
- },
- {
- "$ref": "#/components/schemas/TLSAuthentication"
- }
- ],
- "title": "Authentication",
- "description": "Dask gateway authentication"
- },
- "accessRights": {
- "additionalProperties": {
- "$ref": "#/components/schemas/ClusterAccessRights"
- },
- "type": "object",
- "title": "Accessrights"
- },
- "id": {
- "type": "integer",
- "minimum": 0,
- "title": "Id",
- "description": "The cluster ID"
- }
- },
- "additionalProperties": false,
- "type": "object",
- "required": [
- "name",
- "type",
- "owner",
- "endpoint",
- "authentication",
- "id"
- ],
- "title": "ClusterGet"
- },
- "ClusterPatch": {
- "properties": {
- "name": {
- "type": "string",
- "title": "Name"
- },
- "description": {
- "type": "string",
- "title": "Description"
- },
- "type": {
- "$ref": "#/components/schemas/ClusterTypeInModel"
- },
- "owner": {
- "type": "integer",
- "exclusiveMinimum": true,
- "title": "Owner",
- "minimum": 0
- },
- "thumbnail": {
- "type": "string",
- "maxLength": 2083,
- "minLength": 1,
- "format": "uri",
- "title": "Thumbnail"
- },
- "endpoint": {
- "type": "string",
- "maxLength": 65536,
- "minLength": 1,
- "format": "uri",
- "title": "Endpoint"
- },
- "authentication": {
- "anyOf": [
- {
- "$ref": "#/components/schemas/SimpleAuthentication"
- },
- {
- "$ref": "#/components/schemas/KerberosAuthentication"
- },
- {
- "$ref": "#/components/schemas/JupyterHubTokenAuthentication"
- }
- ],
- "title": "Authentication"
- },
- "accessRights": {
- "additionalProperties": {
- "$ref": "#/components/schemas/ClusterAccessRights"
- },
- "type": "object",
- "title": "Accessrights"
- }
- },
- "additionalProperties": false,
- "type": "object",
- "title": "ClusterPatch"
- },
- "ClusterPing": {
- "properties": {
- "endpoint": {
- "type": "string",
- "maxLength": 65536,
- "minLength": 1,
- "format": "uri",
- "title": "Endpoint"
- },
- "authentication": {
+ "metrics": {
"anyOf": [
{
- "$ref": "#/components/schemas/SimpleAuthentication"
- },
- {
- "$ref": "#/components/schemas/KerberosAuthentication"
+ "$ref": "#/components/schemas/UserServiceCommand"
},
{
- "$ref": "#/components/schemas/JupyterHubTokenAuthentication"
- },
+ "type": "null"
+ }
+ ],
+ "description": "command to recover prometheus metrics from a specific user service"
+ },
+ "before_shutdown": {
+ "items": {
+ "$ref": "#/components/schemas/UserServiceCommand"
+ },
+ "type": "array",
+ "title": "Before Shutdown",
+ "description": "commands to run before shutting down the user servicescommands get executed first to last, multiple commands for the sameuser services are allowed"
+ },
+ "inactivity": {
+ "anyOf": [
{
- "$ref": "#/components/schemas/NoAuthentication"
+ "$ref": "#/components/schemas/UserServiceCommand"
},
{
- "$ref": "#/components/schemas/TLSAuthentication"
+ "type": "null"
}
],
- "title": "Authentication",
- "description": "Dask gateway authentication"
+ "description": "command used to figure out for how much time the user service(s) were inactive for"
}
},
+ "additionalProperties": false,
"type": "object",
- "required": [
- "endpoint",
- "authentication"
- ],
- "title": "ClusterPing"
- },
- "ClusterTypeInModel": {
- "type": "string",
- "enum": [
- "AWS",
- "ON_PREMISE",
- "ON_DEMAND"
- ],
- "title": "ClusterTypeInModel",
- "description": "An enumeration."
+ "title": "CallbacksMapping"
},
"ComputationCreate": {
"properties": {
@@ -2025,7 +1271,14 @@
"title": "Project Id"
},
"start_pipeline": {
- "type": "boolean",
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Start Pipeline",
"description": "if True the computation pipeline will start right away",
"default": false
@@ -2035,26 +1288,34 @@
"title": "Product Name"
},
"subgraph": {
- "items": {
- "type": "string",
- "format": "uuid"
- },
- "type": "array",
+ "anyOf": [
+ {
+ "items": {
+ "type": "string",
+ "format": "uuid"
+ },
+ "type": "array"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Subgraph",
"description": "An optional set of nodes that must be executed, if empty the whole pipeline is executed"
},
"force_restart": {
- "type": "boolean",
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Force Restart",
"description": "if True will force re-running all dependent nodes",
"default": false
},
- "cluster_id": {
- "type": "integer",
- "minimum": 0,
- "title": "Cluster Id",
- "description": "the computation shall use the cluster described by its id, 0 is the default cluster"
- },
"simcore_user_agent": {
"type": "string",
"title": "Simcore User Agent",
@@ -2063,16 +1324,18 @@
"use_on_demand_clusters": {
"type": "boolean",
"title": "Use On Demand Clusters",
- "description": "if True, a cluster will be created as necessary (wallet_id cannot be None, and cluster_id must be None)",
+ "description": "if True, a cluster will be created as necessary (wallet_id cannot be None)",
"default": false
},
"wallet_info": {
- "allOf": [
+ "anyOf": [
+ {
+ "$ref": "#/components/schemas/WalletInfo-Input"
+ },
{
- "$ref": "#/components/schemas/WalletInfo"
+ "type": "null"
}
],
- "title": "Wallet Info",
"description": "contains information about the wallet used to bill the running service"
}
},
@@ -2093,7 +1356,14 @@
"minimum": 0
},
"force": {
- "type": "boolean",
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Force",
"description": "if True then the pipeline will be removed even if it is running",
"default": false
@@ -2114,71 +1384,96 @@
"description": "the id of the computation task"
},
"state": {
- "allOf": [
- {
- "$ref": "#/components/schemas/RunningState"
- }
- ],
+ "$ref": "#/components/schemas/RunningState",
"description": "the state of the computational task"
},
"result": {
- "type": "string",
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Result",
"description": "the result of the computational task"
},
"pipeline_details": {
- "allOf": [
- {
- "$ref": "#/components/schemas/PipelineDetails"
- }
- ],
- "title": "Pipeline Details",
+ "$ref": "#/components/schemas/PipelineDetails",
"description": "the details of the generated pipeline"
},
"iteration": {
- "type": "integer",
- "exclusiveMinimum": true,
+ "anyOf": [
+ {
+ "type": "integer",
+ "exclusiveMinimum": true,
+ "minimum": 0
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Iteration",
- "description": "the iteration id of the computation task (none if no task ran yet)",
- "minimum": 0
- },
- "cluster_id": {
- "type": "integer",
- "minimum": 0,
- "title": "Cluster Id",
- "description": "the cluster on which the computaional task runs/ran (none if no task ran yet)"
+ "description": "the iteration id of the computation task (none if no task ran yet)"
},
"started": {
- "type": "string",
- "format": "date-time",
+ "anyOf": [
+ {
+ "type": "string",
+ "format": "date-time"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Started",
"description": "the timestamp when the computation was started or None if not started yet"
},
"stopped": {
- "type": "string",
- "format": "date-time",
+ "anyOf": [
+ {
+ "type": "string",
+ "format": "date-time"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Stopped",
"description": "the timestamp when the computation was stopped or None if not started nor stopped yet"
},
"submitted": {
- "type": "string",
- "format": "date-time",
+ "anyOf": [
+ {
+ "type": "string",
+ "format": "date-time"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Submitted",
"description": "task last modification timestamp or None if the there is no task"
},
"url": {
"type": "string",
- "maxLength": 65536,
"minLength": 1,
"format": "uri",
"title": "Url",
"description": "the link where to get the status of the task"
},
"stop_url": {
- "type": "string",
- "maxLength": 65536,
- "minLength": 1,
- "format": "uri",
+ "anyOf": [
+ {
+ "type": "string",
+ "minLength": 1,
+ "format": "uri"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Stop Url",
"description": "the link where to stop the task"
}
@@ -2189,7 +1484,6 @@
"state",
"pipeline_details",
"iteration",
- "cluster_id",
"started",
"stopped",
"submitted",
@@ -2215,85 +1509,156 @@
"ContainerState": {
"properties": {
"Status": {
- "allOf": [
+ "anyOf": [
{
"$ref": "#/components/schemas/Status2"
+ },
+ {
+ "type": "null"
}
],
- "description": "String representation of the container state. Can be one of \"created\",\n\"running\", \"paused\", \"restarting\", \"removing\", \"exited\", or \"dead\".\n",
- "example": "running"
+ "description": "String representation of the container state. Can be one of \"created\",\n\"running\", \"paused\", \"restarting\", \"removing\", \"exited\", or \"dead\".\n"
},
"Running": {
- "type": "boolean",
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Running",
- "description": "Whether this container is running.\n\nNote that a running container can be _paused_. The `Running` and `Paused`\nbooleans are not mutually exclusive:\n\nWhen pausing a container (on Linux), the freezer cgroup is used to suspend\nall processes in the container. Freezing the process requires the process to\nbe running. As a result, paused containers are both `Running` _and_ `Paused`.\n\nUse the `Status` field instead to determine if a container's state is \"running\".\n",
- "example": true
+ "description": "Whether this container is running.\n\nNote that a running container can be _paused_. The `Running` and `Paused`\nbooleans are not mutually exclusive:\n\nWhen pausing a container (on Linux), the freezer cgroup is used to suspend\nall processes in the container. Freezing the process requires the process to\nbe running. As a result, paused containers are both `Running` _and_ `Paused`.\n\nUse the `Status` field instead to determine if a container's state is \"running\".\n"
},
"Paused": {
- "type": "boolean",
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Paused",
- "description": "Whether this container is paused.",
- "example": false
+ "description": "Whether this container is paused."
},
"Restarting": {
- "type": "boolean",
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Restarting",
- "description": "Whether this container is restarting.",
- "example": false
+ "description": "Whether this container is restarting."
},
"OOMKilled": {
- "type": "boolean",
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Oomkilled",
- "description": "Whether this container has been killed because it ran out of memory.\n",
- "example": false
+ "description": "Whether this container has been killed because it ran out of memory.\n"
},
"Dead": {
- "type": "boolean",
- "title": "Dead",
- "example": false
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "title": "Dead"
},
"Pid": {
- "type": "integer",
+ "anyOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Pid",
- "description": "The process ID of this container",
- "example": 1234
+ "description": "The process ID of this container"
},
"ExitCode": {
- "type": "integer",
+ "anyOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Exitcode",
- "description": "The last exit code of this container",
- "example": 0
+ "description": "The last exit code of this container"
},
"Error": {
- "type": "string",
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Error"
},
"StartedAt": {
- "type": "string",
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Startedat",
- "description": "The time when this container was last started.",
- "example": "2020-01-06T09:06:59.461876391Z"
+ "description": "The time when this container was last started."
},
"FinishedAt": {
- "type": "string",
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Finishedat",
- "description": "The time when this container last exited.",
- "example": "2020-01-06T09:07:59.461876391Z"
+ "description": "The time when this container last exited."
},
"Health": {
- "$ref": "#/components/schemas/Health"
+ "anyOf": [
+ {
+ "$ref": "#/components/schemas/Health"
+ },
+ {
+ "type": "null"
+ }
+ ]
}
},
"type": "object",
"title": "ContainerState",
- "description": " ContainerState stores container's running state. It's part of ContainerJSONBase\nand will be returned by the \"inspect\" command."
+ "description": "ContainerState stores container's running state. It's part of ContainerJSONBase\nand will be returned by the \"inspect\" command."
},
"DNSResolver": {
"properties": {
"address": {
"anyOf": [
{
- "$ref": "#/components/schemas/OsparcVariableIdentifier"
+ "type": "string",
+ "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$"
},
{
"type": "string"
@@ -2312,12 +1677,14 @@
"minimum": 0
},
{
- "$ref": "#/components/schemas/OsparcVariableIdentifier"
+ "type": "string",
+ "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$"
}
],
"title": "Port"
}
},
+ "additionalProperties": true,
"type": "object",
"required": [
"address",
@@ -2341,24 +1708,10 @@
"title": "DelayedExceptionHandler",
"description": "Allows to ignore an exception for an established\nperiod of time after which it is raised.\n\nThis use case most commonly occurs when dealing with\nexternal systems.\nFor example, due to poor network performance or\nnetwork congestion, an external system which is healthy,\ncurrently is not reachable any longer.\nA possible solution:\n- ignore exceptions for an interval in which the\n system usually is reachable again by not\n raising the error\n- if the error persist give up and raise it\n\nExample code usage:\n\n delayed_handler_external_service = DelayedExceptionHandler(\n delay_for=60\n )\n try:\n function_called_periodically_accessing_external_service()\n except TargetException as e:\n delayed_handler_external_service.try_to_raise(e)\n else:\n delayed_handler_external_service.else_reset()"
},
- "DictModel_str__PositiveFloat_": {
- "additionalProperties": {
- "type": "number",
- "exclusiveMinimum": true,
- "minimum": 0.0
- },
- "type": "object",
- "title": "DictModel[str, PositiveFloat]"
- },
"DockerContainerInspect": {
"properties": {
"container_state": {
- "allOf": [
- {
- "$ref": "#/components/schemas/ContainerState"
- }
- ],
- "title": "Container State",
+ "$ref": "#/components/schemas/ContainerState",
"description": "current state of container"
},
"name": {
@@ -2411,15 +1764,19 @@
"title": "Service Uuid"
},
"service_basepath": {
- "type": "string",
- "format": "path",
+ "anyOf": [
+ {
+ "type": "string",
+ "format": "path"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Service Basepath",
"description": "predefined path where the dynamic service should be served. If empty, the service shall use the root endpoint."
},
"service_resources": {
- "additionalProperties": {
- "$ref": "#/components/schemas/ImageResources"
- },
"type": "object",
"title": "Service Resources"
},
@@ -2434,30 +1791,36 @@
"description": "the service data must be saved when closing"
},
"wallet_info": {
- "allOf": [
+ "anyOf": [
+ {
+ "$ref": "#/components/schemas/WalletInfo-Input"
+ },
{
- "$ref": "#/components/schemas/WalletInfo"
+ "type": "null"
}
],
- "title": "Wallet Info",
"description": "contains information about the wallet used to bill the running service"
},
"pricing_info": {
- "allOf": [
+ "anyOf": [
{
"$ref": "#/components/schemas/PricingInfo"
+ },
+ {
+ "type": "null"
}
],
- "title": "Pricing Info",
"description": "contains pricing information (ex. pricing plan and unit ids)"
},
"hardware_info": {
- "allOf": [
+ "anyOf": [
{
"$ref": "#/components/schemas/HardwareInfo"
+ },
+ {
+ "type": "null"
}
],
- "title": "Hardware Info",
"description": "contains harware information (ex. aws_ec2_instances)"
}
},
@@ -2474,16 +1837,27 @@
],
"title": "DynamicServiceCreate",
"example": {
+ "basepath": "/x/75c7f3f4-18f9-4678-8610-54a2ade78eaa",
+ "can_save": true,
+ "hardware_info": {
+ "aws_ec2_instances": [
+ "c6a.4xlarge"
+ ]
+ },
"key": "simcore/services/dynamic/3dviewer",
- "version": "2.4.5",
- "user_id": 234,
- "project_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe",
"node_uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa",
- "basepath": "/x/75c7f3f4-18f9-4678-8610-54a2ade78eaa",
+ "pricing_info": {
+ "pricing_plan_id": 1,
+ "pricing_unit_cost_id": 1,
+ "pricing_unit_id": 1
+ },
"product_name": "osparc",
- "can_save": true,
+ "project_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe",
"service_resources": {
"container": {
+ "boot_modes": [
+ "CPU"
+ ],
"image": "simcore/services/dynamic/jupyter-math:2.0.5",
"resources": {
"CPU": {
@@ -2494,38 +1868,22 @@
"limit": 2147483648,
"reservation": 2147483648
}
- },
- "boot_modes": [
- "CPU"
- ]
+ }
}
},
+ "user_id": 234,
+ "version": "2.4.5",
"wallet_info": {
+ "wallet_credit_amount": "10",
"wallet_id": 1,
- "wallet_name": "My Wallet",
- "wallet_credit_amount": 10
- },
- "pricing_info": {
- "pricing_plan_id": 1,
- "pricing_unit_id": 1,
- "pricing_unit_cost_id": 1
- },
- "hardware_info": {
- "aws_ec2_instances": [
- "c6a.4xlarge"
- ]
+ "wallet_name": "My Wallet"
}
}
},
"DynamicSidecar": {
"properties": {
"status": {
- "allOf": [
- {
- "$ref": "#/components/schemas/simcore_service_director_v2__models__dynamic_services_scheduler__Status"
- }
- ],
- "title": "Status",
+ "$ref": "#/components/schemas/simcore_service_director_v2__models__dynamic_services_scheduler__Status",
"description": "status of the service sidecar also with additional information",
"default": {
"current": "ok",
@@ -2535,8 +1893,8 @@
"is_ready": {
"type": "boolean",
"title": "Is Ready",
- "default": false,
- "scription": "is True while the health check on the dynamic-sidecar is responding. Meaning that the dynamic-sidecar is reachable and can accept requests"
+ "description": "is True while the health check on the dynamic-sidecar is responding. Meaning that the dynamic-sidecar is reachable and can accept requests",
+ "default": false
},
"was_compose_spec_submitted": {
"type": "boolean",
@@ -2550,8 +1908,8 @@
},
"type": "array",
"title": "Containers Inspect",
- "default": [],
- "scription": "docker inspect results from all the container ran at regular intervals"
+ "description": "docker inspect results from all the container ran at regular intervals",
+ "default": []
},
"was_dynamic_sidecar_started": {
"type": "boolean",
@@ -2582,12 +1940,7 @@
"default": false
},
"service_removal_state": {
- "allOf": [
- {
- "$ref": "#/components/schemas/ServiceRemovalState"
- }
- ],
- "title": "Service Removal State",
+ "$ref": "#/components/schemas/ServiceRemovalState",
"description": "stores information used during service removal from the dynamic-sidecar scheduler"
},
"wait_for_manual_intervention_after_error": {
@@ -2609,52 +1962,77 @@
"default": false
},
"instrumentation": {
- "allOf": [
- {
- "$ref": "#/components/schemas/ServicesInstrumentation"
- }
- ],
- "title": "Instrumentation",
+ "$ref": "#/components/schemas/ServicesInstrumentation",
"description": "keeps track times for various operations"
},
"dynamic_sidecar_id": {
- "type": "string",
- "maxLength": 25,
- "pattern": "[A-Za-z0-9]{25}",
+ "anyOf": [
+ {
+ "type": "string",
+ "maxLength": 25,
+ "pattern": "[A-Za-z0-9]{25}"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Dynamic Sidecar Id",
"description": "returned by the docker engine; used for starting the proxy"
},
"dynamic_sidecar_network_id": {
- "type": "string",
- "maxLength": 25,
- "pattern": "[A-Za-z0-9]{25}",
+ "anyOf": [
+ {
+ "type": "string",
+ "maxLength": 25,
+ "pattern": "[A-Za-z0-9]{25}"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Dynamic Sidecar Network Id",
"description": "returned by the docker engine; used for starting the proxy"
},
"swarm_network_id": {
- "type": "string",
- "maxLength": 25,
- "pattern": "[A-Za-z0-9]{25}",
+ "anyOf": [
+ {
+ "type": "string",
+ "maxLength": 25,
+ "pattern": "[A-Za-z0-9]{25}"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Swarm Network Id",
"description": "returned by the docker engine; used for starting the proxy"
},
"swarm_network_name": {
- "type": "string",
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Swarm Network Name",
"description": "used for starting the proxy"
},
"docker_node_id": {
- "type": "string",
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Docker Node Id",
"description": "contains node id of the docker node where all services and created containers are started"
},
"inspect_error_handler": {
- "allOf": [
- {
- "$ref": "#/components/schemas/DelayedExceptionHandler"
- }
- ],
- "title": "Inspect Error Handler",
+ "$ref": "#/components/schemas/DelayedExceptionHandler",
"description": "Set when the dy-sidecar can no longer be reached by the director-v2. If it will be possible to reach the dy-sidecar again, this value will be set to None.",
"default": {
"delay_for": 0.0
@@ -2670,8 +2048,7 @@
"ok",
"failing"
],
- "title": "DynamicSidecarStatus",
- "description": "An enumeration."
+ "title": "DynamicSidecarStatus"
},
"GetProjectInactivityResponse": {
"properties": {
@@ -2718,25 +2095,40 @@
"Health": {
"properties": {
"Status": {
- "allOf": [
+ "anyOf": [
{
"$ref": "#/components/schemas/models_library__generated_models__docker_rest_api__Status"
+ },
+ {
+ "type": "null"
}
],
- "description": "Status is one of `none`, `starting`, `healthy` or `unhealthy`\n\n- \"none\" Indicates there is no healthcheck\n- \"starting\" Starting indicates that the container is not yet ready\n- \"healthy\" Healthy indicates that the container is running correctly\n- \"unhealthy\" Unhealthy indicates that the container has a problem\n",
- "example": "healthy"
+ "description": "Status is one of `none`, `starting`, `healthy` or `unhealthy`\n\n- \"none\" Indicates there is no healthcheck\n- \"starting\" Starting indicates that the container is not yet ready\n- \"healthy\" Healthy indicates that the container is running correctly\n- \"unhealthy\" Unhealthy indicates that the container has a problem\n"
},
"FailingStreak": {
- "type": "integer",
+ "anyOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Failingstreak",
- "description": "FailingStreak is the number of consecutive failures",
- "example": 0
+ "description": "FailingStreak is the number of consecutive failures"
},
"Log": {
- "items": {
- "$ref": "#/components/schemas/HealthcheckResult"
- },
- "type": "array",
+ "anyOf": [
+ {
+ "items": {
+ "$ref": "#/components/schemas/HealthcheckResult"
+ },
+ "type": "array"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Log",
"description": "Log contains the last few results (oldest first)\n"
}
@@ -2764,26 +2156,51 @@
"HealthcheckResult": {
"properties": {
"Start": {
- "type": "string",
- "format": "date-time",
+ "anyOf": [
+ {
+ "type": "string",
+ "format": "date-time"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Start",
- "description": "Date and time at which this check started in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
- "example": "2020-01-04T10:44:24.496525531Z"
+ "description": "Date and time at which this check started in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n"
},
"End": {
- "type": "string",
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "End",
- "description": "Date and time at which this check ended in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n",
- "example": "2020-01-04T10:45:21.364524523Z"
+ "description": "Date and time at which this check ended in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n"
},
"ExitCode": {
- "type": "integer",
+ "anyOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Exitcode",
- "description": "ExitCode meanings:\n\n- `0` healthy\n- `1` unhealthy\n- `2` reserved (considered unhealthy)\n- other values: error running probe\n",
- "example": 0
+ "description": "ExitCode meanings:\n\n- `0` healthy\n- `1` unhealthy\n- `2` reserved (considered unhealthy)\n- other values: error running probe\n"
},
"Output": {
- "type": "string",
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Output",
"description": "Output from last check"
}
@@ -2812,6 +2229,7 @@
"$ref": "#/components/schemas/BootMode"
},
"type": "array",
+ "title": "Boot Modes",
"description": "describe how a service shall be booted, using CPU, MPI, openMP or GPU",
"default": [
"CPU"
@@ -2827,6 +2245,14 @@
"example": {
"image": "simcore/service/dynamic/pretty-intense:1.0.0",
"resources": {
+ "AIRAM": {
+ "limit": 1,
+ "reservation": 1
+ },
+ "ANY_resource": {
+ "limit": "some_value",
+ "reservation": "some_value"
+ },
"CPU": {
"limit": 4,
"reservation": 0.1
@@ -2838,61 +2264,17 @@
"VRAM": {
"limit": 1,
"reservation": 1
- },
- "AIRAM": {
- "limit": 1,
- "reservation": 1
- },
- "ANY_resource": {
- "limit": "some_value",
- "reservation": "some_value"
}
}
}
},
- "JupyterHubTokenAuthentication": {
- "properties": {
- "type": {
- "type": "string",
- "enum": [
- "jupyterhub"
- ],
- "title": "Type",
- "default": "jupyterhub"
- },
- "api_token": {
- "type": "string",
- "title": "Api Token"
- }
- },
- "additionalProperties": false,
- "type": "object",
- "required": [
- "api_token"
- ],
- "title": "JupyterHubTokenAuthentication"
- },
- "KerberosAuthentication": {
- "properties": {
- "type": {
- "type": "string",
- "enum": [
- "kerberos"
- ],
- "title": "Type",
- "default": "kerberos"
- }
- },
- "additionalProperties": false,
- "type": "object",
- "title": "KerberosAuthentication"
- },
"NATRule": {
"properties": {
"hostname": {
"anyOf": [
{
- "$ref": "#/components/schemas/OsparcVariableIdentifier"
+ "type": "string",
+ "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$"
},
{
"type": "string"
@@ -2911,7 +2293,8 @@
"minimum": 0
},
{
- "$ref": "#/components/schemas/OsparcVariableIdentifier"
+ "type": "string",
+ "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$"
},
{
"$ref": "#/components/schemas/_PortRange"
@@ -2922,12 +2305,7 @@
"title": "Tcp Ports"
},
"dns_resolver": {
- "allOf": [
- {
- "$ref": "#/components/schemas/DNSResolver"
- }
- ],
- "title": "Dns Resolver",
+ "$ref": "#/components/schemas/DNSResolver",
"description": "specify a DNS resolver address and port"
}
},
@@ -2939,21 +2317,6 @@
"title": "NATRule",
"description": "Content of \"simcore.service.containers-allowed-outgoing-permit-list\" label"
},
- "NoAuthentication": {
- "properties": {
- "type": {
- "type": "string",
- "enum": [
- "none"
- ],
- "title": "Type",
- "default": "none"
- }
- },
- "additionalProperties": false,
- "type": "object",
- "title": "NoAuthentication"
- },
"NodeState": {
"properties": {
"modified": {
@@ -2973,18 +2336,21 @@
"description": "contains the node inputs dependencies if they need to be computed first"
},
"currentStatus": {
- "allOf": [
- {
- "$ref": "#/components/schemas/RunningState"
- }
- ],
+ "$ref": "#/components/schemas/RunningState",
"description": "the node's current state",
"default": "NOT_STARTED"
},
"progress": {
- "type": "number",
- "maximum": 1.0,
- "minimum": 0.0,
+ "anyOf": [
+ {
+ "type": "number",
+ "maximum": 1.0,
+ "minimum": 0.0
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Progress",
"description": "current progress of the task if available (None if not started or not a computational task)",
"default": 0
@@ -3007,11 +2373,6 @@
],
"title": "ObservationItem"
},
- "OsparcVariableIdentifier": {
- "type": "string",
- "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$",
- "title": "OsparcVariableIdentifier"
- },
"PathMappingsLabel": {
"properties": {
"inputs_path": {
@@ -3033,23 +2394,36 @@
},
"type": "array",
"title": "State Paths",
- "description": "optional list of paths which contents need to be persisted",
- "default": []
+ "description": "optional list of paths which contents need to be persisted"
},
"state_exclude": {
- "items": {
- "type": "string"
- },
- "type": "array",
- "uniqueItems": true,
+ "anyOf": [
+ {
+ "items": {
+ "type": "string"
+ },
+ "type": "array",
+ "uniqueItems": true
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "State Exclude",
"description": "optional list unix shell rules used to exclude files from the state"
},
"volume_size_limits": {
- "additionalProperties": {
- "type": "string"
- },
- "type": "object",
+ "anyOf": [
+ {
+ "additionalProperties": {
+ "type": "string"
+ },
+ "type": "object"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Volume Size Limits",
"description": "Apply volume size limits to entries in: `inputs_path`, `outputs_path` and `state_paths`. Limits must be parsable by Pydantic's ByteSize."
}
@@ -3078,9 +2452,16 @@
"description": "The adjacency list of the current pipeline in terms of {NodeID: [successor NodeID]}"
},
"progress": {
- "type": "number",
- "maximum": 1.0,
- "minimum": 0.0,
+ "anyOf": [
+ {
+ "type": "number",
+ "maximum": 1.0,
+ "minimum": 0.0
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Progress",
"description": "the progress of the pipeline (None if there are no computational tasks)"
},
@@ -3199,6 +2580,7 @@
"properties": {
"size_bytes": {
"type": "integer",
+ "minimum": 0,
"title": "Size Bytes",
"description": "The amount of data transferred by the retrieve call"
}
@@ -3252,17 +2634,20 @@
"title": "Service Uuid"
},
"service_basepath": {
- "type": "string",
- "format": "path",
+ "anyOf": [
+ {
+ "type": "string",
+ "format": "path"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Service Basepath",
"description": "predefined path where the dynamic service should be served. If empty, the service shall use the root endpoint."
},
"boot_type": {
- "allOf": [
- {
- "$ref": "#/components/schemas/ServiceBootType"
- }
- ],
+ "$ref": "#/components/schemas/ServiceBootType",
"description": "Describes how the dynamic services was started (legacy=V0, modern=V2).Since legacy services do not have this label it defaults to V0.",
"default": "V0"
},
@@ -3281,31 +2666,48 @@
"minimum": 0
},
"published_port": {
- "type": "integer",
- "exclusiveMaximum": true,
- "exclusiveMinimum": true,
+ "anyOf": [
+ {
+ "type": "integer",
+ "exclusiveMaximum": true,
+ "exclusiveMinimum": true,
+ "maximum": 65535,
+ "minimum": 0
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Published Port",
"description": "the service swarm published port if any",
- "deprecated": true,
- "maximum": 65535,
- "minimum": 0
+ "deprecated": true
},
"entry_point": {
- "type": "string",
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Entry Point",
"description": "if empty the service entrypoint is on the root endpoint.",
"deprecated": true
},
"service_state": {
- "allOf": [
- {
- "$ref": "#/components/schemas/ServiceState"
- }
- ],
+ "$ref": "#/components/schemas/ServiceState",
"description": "service current state"
},
"service_message": {
- "type": "string",
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Service Message",
"description": "additional information related to service state"
}
@@ -3340,73 +2742,83 @@
"title": "RunningState",
"description": "State of execution of a project's computational workflow\n\nSEE StateType for task state"
},
- "Scheduler": {
- "properties": {
- "status": {
- "type": "string",
- "title": "Status",
- "description": "The running status of the scheduler"
- },
- "workers": {
- "additionalProperties": {
- "$ref": "#/components/schemas/Worker"
- },
- "type": "object",
- "title": "Workers"
- }
- },
- "type": "object",
- "required": [
- "status"
- ],
- "title": "Scheduler"
- },
"SchedulerData": {
"properties": {
"paths_mapping": {
"$ref": "#/components/schemas/PathMappingsLabel"
},
"simcore.service.compose-spec": {
- "type": "object",
+ "anyOf": [
+ {
+ "type": "object"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Simcore.Service.Compose-Spec",
"description": "json encoded docker-compose specifications. see https://docs.docker.com/compose/compose-file/, only used by dynamic-sidecar."
},
"simcore.service.container-http-entrypoint": {
- "type": "string",
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Simcore.Service.Container-Http-Entrypoint",
"description": "When a docker-compose specifications is provided, the container where the traffic must flow has to be specified. Required by dynamic-sidecar when compose_spec is set."
},
"user_preferences_path": {
- "type": "string",
- "format": "path",
- "title": "User Preferences Path"
- },
- "simcore.service.restart-policy": {
- "allOf": [
+ "anyOf": [
+ {
+ "type": "string",
+ "format": "path"
+ },
{
- "$ref": "#/components/schemas/RestartPolicy"
+ "type": "null"
}
],
+ "title": "User Preferences Path"
+ },
+ "simcore.service.restart-policy": {
+ "$ref": "#/components/schemas/RestartPolicy",
"description": "the dynamic-sidecar can restart all running containers on certain events. Supported events:\n- `no-restart` default\n- `on-inputs-downloaded` after inputs are downloaded\n",
"default": "no-restart"
},
"simcore.service.containers-allowed-outgoing-permit-list": {
- "additionalProperties": {
- "items": {
- "$ref": "#/components/schemas/NATRule"
+ "anyOf": [
+ {
+ "additionalProperties": {
+ "items": {
+ "$ref": "#/components/schemas/NATRule"
+ },
+ "type": "array"
+ },
+ "type": "object"
},
- "type": "array"
- },
- "type": "object",
+ {
+ "type": "null"
+ }
+ ],
"title": "Simcore.Service.Containers-Allowed-Outgoing-Permit-List",
"description": "allow internet access to certain domain names and ports per container"
},
"simcore.service.containers-allowed-outgoing-internet": {
- "items": {
- "type": "string"
- },
- "type": "array",
- "uniqueItems": true,
+ "anyOf": [
+ {
+ "items": {
+ "type": "string"
+ },
+ "type": "array",
+ "uniqueItems": true
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Simcore.Service.Containers-Allowed-Outgoing-Internet",
"description": "allow complete internet access to containers in here"
},
@@ -3468,12 +2880,7 @@
"minimum": 0
},
"dynamic_sidecar": {
- "allOf": [
- {
- "$ref": "#/components/schemas/DynamicSidecar"
- }
- ],
- "title": "Dynamic Sidecar",
+ "$ref": "#/components/schemas/DynamicSidecar",
"description": "stores information fetched from the dynamic-sidecar"
},
"dynamic_sidecar_network_name": {
@@ -3497,9 +2904,6 @@
"minimum": 0
},
"service_resources": {
- "additionalProperties": {
- "$ref": "#/components/schemas/ImageResources"
- },
"type": "object",
"title": "Service Resources",
"description": "service resources used to enforce limits"
@@ -3525,39 +2929,52 @@
"description": "service name given to the proxy"
},
"proxy_admin_api_port": {
- "type": "integer",
- "exclusiveMaximum": true,
- "exclusiveMinimum": true,
+ "anyOf": [
+ {
+ "type": "integer",
+ "exclusiveMaximum": true,
+ "exclusiveMinimum": true,
+ "maximum": 65535,
+ "minimum": 0
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Proxy Admin Api Port",
- "description": "used as the admin endpoint API port",
- "maximum": 65535,
- "minimum": 0
+ "description": "used as the admin endpoint API port"
},
"wallet_info": {
- "allOf": [
+ "anyOf": [
+ {
+ "$ref": "#/components/schemas/WalletInfo-Output"
+ },
{
- "$ref": "#/components/schemas/WalletInfo"
+ "type": "null"
}
],
- "title": "Wallet Info",
"description": "contains information about the wallet used to bill the running service"
},
"pricing_info": {
- "allOf": [
+ "anyOf": [
{
"$ref": "#/components/schemas/PricingInfo"
+ },
+ {
+ "type": "null"
}
],
- "title": "Pricing Info",
"description": "contains pricing information so we know what is the cost of running of the service"
},
"hardware_info": {
- "allOf": [
+ "anyOf": [
{
"$ref": "#/components/schemas/HardwareInfo"
+ },
+ {
+ "type": "null"
}
],
- "title": "Hardware Info",
"description": "contains harware information so we know on which hardware to run the service"
},
"product_name": {
@@ -3566,6 +2983,7 @@
"description": "Current product upon which this service is scheduled. If set to None, the current product is undefined. Mostly for backwards compatibility"
}
},
+ "additionalProperties": true,
"type": "object",
"required": [
"paths_mapping",
@@ -3584,8 +3002,7 @@
"request_scheme",
"request_simcore_user_agent"
],
- "title": "SchedulerData",
- "description": "All \"simcore.service.*\" labels including keys"
+ "title": "SchedulerData"
},
"ServiceBootType": {
"type": "string",
@@ -3593,8 +3010,7 @@
"V0",
"V2"
],
- "title": "ServiceBootType",
- "description": "An enumeration."
+ "title": "ServiceBootType"
},
"ServiceRemovalState": {
"properties": {
@@ -3621,6 +3037,7 @@
"title": "ServiceRemovalState"
},
"ServiceState": {
+ "type": "string",
"enum": [
"failed",
"pending",
@@ -3631,20 +3048,33 @@
"complete",
"idle"
],
- "title": "ServiceState",
- "description": "An enumeration."
+ "title": "ServiceState"
},
"ServicesInstrumentation": {
"properties": {
"start_requested_at": {
- "type": "string",
- "format": "date-time",
+ "anyOf": [
+ {
+ "type": "string",
+ "format": "date-time"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Start Requested At",
"description": "moment in which the process of starting the service was requested"
},
"close_requested_at": {
- "type": "string",
- "format": "date-time",
+ "anyOf": [
+ {
+ "type": "string",
+ "format": "date-time"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Close Requested At",
"description": "moment in which the process of stopping the service was requested"
}
@@ -3652,35 +3082,6 @@
"type": "object",
"title": "ServicesInstrumentation"
},
- "SimpleAuthentication": {
- "properties": {
- "type": {
- "type": "string",
- "enum": [
- "simple"
- ],
- "title": "Type",
- "default": "simple"
- },
- "username": {
- "type": "string",
- "title": "Username"
- },
- "password": {
- "type": "string",
- "format": "password",
- "title": "Password",
- "writeOnly": true
- }
- },
- "additionalProperties": false,
- "type": "object",
- "required": [
- "username",
- "password"
- ],
- "title": "SimpleAuthentication"
- },
"Status2": {
"type": "string",
"enum": [
@@ -3693,63 +3094,7 @@
"dead"
],
"title": "Status2",
- "description": " String representation of the container state. Can be one of \"created\",\n\"running\", \"paused\", \"restarting\", \"removing\", \"exited\", or \"dead\"."
- },
- "TLSAuthentication": {
- "properties": {
- "type": {
- "type": "string",
- "enum": [
- "tls"
- ],
- "title": "Type",
- "default": "tls"
- },
- "tls_ca_file": {
- "type": "string",
- "format": "path",
- "title": "Tls Ca File"
- },
- "tls_client_cert": {
- "type": "string",
- "format": "path",
- "title": "Tls Client Cert"
- },
- "tls_client_key": {
- "type": "string",
- "format": "path",
- "title": "Tls Client Key"
- }
- },
- "additionalProperties": false,
- "type": "object",
- "required": [
- "tls_ca_file",
- "tls_client_cert",
- "tls_client_key"
- ],
- "title": "TLSAuthentication"
- },
- "TaskCounts": {
- "properties": {
- "error": {
- "type": "integer",
- "title": "Error",
- "default": 0
- },
- "memory": {
- "type": "integer",
- "title": "Memory",
- "default": 0
- },
- "executing": {
- "type": "integer",
- "title": "Executing",
- "default": 0
- }
- },
- "type": "object",
- "title": "TaskCounts"
+ "description": "String representation of the container state. Can be one of \"created\",\n\"running\", \"paused\", \"restarting\", \"removing\", \"exited\", or \"dead\"."
},
"TaskLogFileGet": {
"properties": {
@@ -3759,10 +3104,16 @@
"title": "Task Id"
},
"download_link": {
- "type": "string",
- "maxLength": 65536,
- "minLength": 1,
- "format": "uri",
+ "anyOf": [
+ {
+ "type": "string",
+ "minLength": 1,
+ "format": "uri"
+ },
+ {
+ "type": "null"
+ }
+ ],
"title": "Download Link",
"description": "Presigned link for log file or None if still not available"
}
@@ -3806,14 +3157,6 @@
],
"title": "TasksSelection"
},
- "UsedResources": {
- "additionalProperties": {
- "type": "number",
- "minimum": 0.0
- },
- "type": "object",
- "title": "UsedResources"
- },
"UserServiceCommand": {
"properties": {
"service": {
@@ -3885,7 +3228,7 @@
],
"title": "ValidationError"
},
- "WalletInfo": {
+ "WalletInfo-Input": {
"properties": {
"wallet_id": {
"type": "integer",
@@ -3898,7 +3241,14 @@
"title": "Wallet Name"
},
"wallet_credit_amount": {
- "type": "number",
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ],
"title": "Wallet Credit Amount"
}
},
@@ -3910,76 +3260,30 @@
],
"title": "WalletInfo"
},
- "Worker": {
- "properties": {
- "id": {
- "type": "string",
- "title": "Id"
- },
- "name": {
- "type": "string",
- "title": "Name"
- },
- "resources": {
- "$ref": "#/components/schemas/DictModel_str__PositiveFloat_"
- },
- "used_resources": {
- "$ref": "#/components/schemas/UsedResources"
- },
- "memory_limit": {
- "type": "integer",
- "title": "Memory Limit"
- },
- "metrics": {
- "$ref": "#/components/schemas/WorkerMetrics"
- }
- },
- "type": "object",
- "required": [
- "id",
- "name",
- "resources",
- "used_resources",
- "memory_limit",
- "metrics"
- ],
- "title": "Worker"
- },
- "WorkerMetrics": {
+ "WalletInfo-Output": {
"properties": {
- "cpu": {
- "type": "number",
- "title": "Cpu",
- "description": "consumed % of cpus"
- },
- "memory": {
+ "wallet_id": {
"type": "integer",
- "title": "Memory",
- "description": "consumed memory"
+ "exclusiveMinimum": true,
+ "title": "Wallet Id",
+ "minimum": 0
},
- "num_fds": {
- "type": "integer",
- "title": "Num Fds",
- "description": "consumed file descriptors"
+ "wallet_name": {
+ "type": "string",
+ "title": "Wallet Name"
},
- "task_counts": {
- "allOf": [
- {
- "$ref": "#/components/schemas/TaskCounts"
- }
- ],
- "title": "Task Counts",
- "description": "task details"
+ "wallet_credit_amount": {
+ "type": "string",
+ "title": "Wallet Credit Amount"
}
},
"type": "object",
"required": [
- "cpu",
- "memory",
- "num_fds",
- "task_counts"
+ "wallet_id",
+ "wallet_name",
+ "wallet_credit_amount"
],
- "title": "WorkerMetrics"
+ "title": "WalletInfo"
},
"_PortRange": {
"properties": {
@@ -3993,7 +3297,8 @@
"minimum": 0
},
{
- "$ref": "#/components/schemas/OsparcVariableIdentifier"
+ "type": "string",
+ "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$"
}
],
"title": "Lower"
@@ -4008,7 +3313,8 @@
"minimum": 0
},
{
- "$ref": "#/components/schemas/OsparcVariableIdentifier"
+ "type": "string",
+ "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$"
}
],
"title": "Upper"
@@ -4031,16 +3337,12 @@
"unhealthy"
],
"title": "Status",
- "description": " Status is one of `none`, `starting`, `healthy` or `unhealthy`\n\n- \"none\" Indicates there is no healthcheck\n- \"starting\" Starting indicates that the container is not yet ready\n- \"healthy\" Healthy indicates that the container is running correctly\n- \"unhealthy\" Unhealthy indicates that the container has a problem"
+ "description": "Status is one of `none`, `starting`, `healthy` or `unhealthy`\n\n- \"none\" Indicates there is no healthcheck\n- \"starting\" Starting indicates that the container is not yet ready\n- \"healthy\" Healthy indicates that the container is running correctly\n- \"unhealthy\" Unhealthy indicates that the container has a problem"
},
"simcore_service_director_v2__models__dynamic_services_scheduler__Status": {
"properties": {
"current": {
- "allOf": [
- {
- "$ref": "#/components/schemas/DynamicSidecarStatus"
- }
- ],
+ "$ref": "#/components/schemas/DynamicSidecarStatus",
"description": "status of the service"
},
"info": {
diff --git a/services/director-v2/requirements/_base.in b/services/director-v2/requirements/_base.in
index dc173e2c2b6..5cc2fcd649c 100644
--- a/services/director-v2/requirements/_base.in
+++ b/services/director-v2/requirements/_base.in
@@ -23,7 +23,6 @@ aio-pika
aiocache[redis,msgpack]
aiodocker
aiopg[sa]
-dask-gateway
fastapi[all]
httpx
networkx
diff --git a/services/director-v2/requirements/_base.txt b/services/director-v2/requirements/_base.txt
index 15a4e37ffc5..e7bfdb265fc 100644
--- a/services/director-v2/requirements/_base.txt
+++ b/services/director-v2/requirements/_base.txt
@@ -61,7 +61,6 @@ aiohttp==3.9.5
# -c requirements/../../../requirements/constraints.txt
# -r requirements/../../../packages/simcore-sdk/requirements/_base.in
# aiodocker
- # dask-gateway
aiopg==1.4.0
# via
# -r requirements/../../../packages/simcore-sdk/requirements/_base.in
@@ -156,7 +155,6 @@ click==8.1.7
# via
# -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt
# dask
- # dask-gateway
# distributed
# typer
# uvicorn
@@ -169,10 +167,7 @@ dask==2024.5.1
# via
# -r requirements/../../../packages/dask-task-models-library/requirements/_base.in
# -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt
- # dask-gateway
# distributed
-dask-gateway==2024.1.0
- # via -r requirements/_base.in
deprecated==1.2.14
# via
# opentelemetry-api
@@ -183,7 +178,6 @@ distributed==2024.5.1
# via
# -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt
# dask
- # dask-gateway
dnspython==2.6.1
# via email-validator
email-validator==2.1.1
@@ -755,7 +749,6 @@ pyyaml==6.0.1
# -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt
# dask
- # dask-gateway
# distributed
# fastapi
# uvicorn
@@ -801,8 +794,6 @@ redis==5.0.4
# aiocache
referencing==0.29.3
# via
- # -c requirements/../../../packages/service-library/requirements/./constraints.txt
- # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/./constraints.txt
# jsonschema
# jsonschema-specifications
repro-zipfile==0.3.1
@@ -938,7 +929,6 @@ toolz==0.12.1
tornado==6.4
# via
# -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt
- # dask-gateway
# distributed
tqdm==4.66.4
# via
@@ -1069,7 +1059,9 @@ wsproto==1.2.0
yarl==1.9.4
# via
# -r requirements/../../../packages/postgres-database/requirements/_base.in
+ # -r requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in
+ # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in
# aio-pika
# aiohttp
# aiormq
diff --git a/services/director-v2/requirements/_test.in b/services/director-v2/requirements/_test.in
index fb4edeaafbf..3633a09b704 100644
--- a/services/director-v2/requirements/_test.in
+++ b/services/director-v2/requirements/_test.in
@@ -14,7 +14,6 @@ aioboto3
alembic # migration due to pytest_simcore.postgres_service2
asgi_lifespan
async-asgi-testclient # replacement for fastapi.testclient.TestClient [see b) below]
-dask-gateway-server[local]
dask[distributed,diagnostics]
docker
Faker
diff --git a/services/director-v2/requirements/_test.txt b/services/director-v2/requirements/_test.txt
index ee97fe23500..6f4b07aeac9 100644
--- a/services/director-v2/requirements/_test.txt
+++ b/services/director-v2/requirements/_test.txt
@@ -15,7 +15,6 @@ aiohttp==3.9.5
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
# aiobotocore
- # dask-gateway-server
aioitertools==0.12.0
# via aiobotocore
aiormq==6.8.0
@@ -59,8 +58,6 @@ certifi==2024.2.2
# httpcore
# httpx
# requests
-cffi==1.17.1
- # via cryptography
charset-normalizer==3.3.2
# via
# -c requirements/_base.txt
@@ -75,23 +72,15 @@ cloudpickle==3.0.0
# -c requirements/_base.txt
# dask
# distributed
-colorlog==6.8.2
- # via dask-gateway-server
contourpy==1.3.0
# via bokeh
coverage==7.6.1
# via pytest-cov
-cryptography==43.0.1
- # via
- # -c requirements/../../../requirements/constraints.txt
- # dask-gateway-server
dask==2024.5.1
# via
# -c requirements/_base.txt
# -r requirements/_test.in
# distributed
-dask-gateway-server==2023.1.1
- # via -r requirements/_test.in
distributed==2024.5.1
# via
# -c requirements/_base.txt
@@ -219,8 +208,6 @@ psutil==6.0.0
# via
# -c requirements/_base.txt
# distributed
-pycparser==2.22
- # via cffi
pytest==8.3.3
# via
# -r requirements/_test.in
@@ -290,7 +277,6 @@ sqlalchemy==1.4.52
# -c requirements/_base.txt
# -r requirements/_test.in
# alembic
- # dask-gateway-server
sqlalchemy2-stubs==0.0.2a38
# via sqlalchemy
tblib==3.0.0
@@ -308,8 +294,6 @@ tornado==6.4
# -c requirements/_base.txt
# bokeh
# distributed
-traitlets==5.14.3
- # via dask-gateway-server
types-networkx==3.2.1.20240918
# via -r requirements/_test.in
types-psycopg2==2.9.21.20240819
diff --git a/services/director-v2/src/simcore_service_director_v2/api/entrypoints.py b/services/director-v2/src/simcore_service_director_v2/api/entrypoints.py
index 671fc78d2d4..df3d607049c 100644
--- a/services/director-v2/src/simcore_service_director_v2/api/entrypoints.py
+++ b/services/director-v2/src/simcore_service_director_v2/api/entrypoints.py
@@ -2,7 +2,6 @@
from .._meta import API_VTAG
from .routes import (
- clusters,
computations,
computations_tasks,
dynamic_scheduler,
@@ -27,7 +26,6 @@
v2_router.include_router(
dynamic_services.router, tags=["dynamic services"], prefix="/dynamic_services"
)
-v2_router.include_router(clusters.router, tags=["clusters"], prefix="/clusters")
v2_router.include_router(
dynamic_scheduler.router, tags=["dynamic scheduler"], prefix="/dynamic_scheduler"
diff --git a/services/director-v2/src/simcore_service_director_v2/api/routes/clusters.py b/services/director-v2/src/simcore_service_director_v2/api/routes/clusters.py
deleted file mode 100644
index d2ab294757e..00000000000
--- a/services/director-v2/src/simcore_service_director_v2/api/routes/clusters.py
+++ /dev/null
@@ -1,242 +0,0 @@
-import logging
-from asyncio.log import logger
-from typing import Final
-
-from aiocache import cached # type: ignore[import-untyped]
-from fastapi import APIRouter, Depends, HTTPException
-from models_library.api_schemas_directorv2.clusters import (
- ClusterCreate,
- ClusterDetails,
- ClusterDetailsGet,
- ClusterGet,
- ClusterPatch,
- ClusterPing,
-)
-from models_library.clusters import DEFAULT_CLUSTER_ID, BaseCluster, ClusterID
-from models_library.users import UserID
-from starlette import status
-
-from ...core.errors import (
- ClusterInvalidOperationError,
- ConfigurationError,
- DaskClientAcquisisitonError,
-)
-from ...core.settings import ComputationalBackendSettings
-from ...modules.dask_clients_pool import DaskClientsPool
-from ...modules.db.repositories.clusters import ClustersRepository
-from ...utils.dask_client_utils import test_scheduler_endpoint
-from ..dependencies.dask import get_dask_clients_pool
-from ..dependencies.database import get_repository
-from ..dependencies.scheduler import get_scheduler_settings
-
-router = APIRouter()
-log = logging.getLogger(__name__)
-
-
-GET_CLUSTER_DETAILS_CACHING_TTL: Final[int] = 3
-
-
-def _build_cache_key(fct, *_, **kwargs):
- return f"{fct.__name__}_{kwargs['cluster_id']}"
-
-
-@cached(ttl=GET_CLUSTER_DETAILS_CACHING_TTL, key_builder=_build_cache_key)
-async def _get_cluster_details_with_id(
- settings: ComputationalBackendSettings,
- user_id: UserID,
- cluster_id: ClusterID,
- clusters_repo: ClustersRepository,
- dask_clients_pool: DaskClientsPool,
-) -> ClusterDetails:
- log.debug("Getting details for cluster '%s'", cluster_id)
- cluster: BaseCluster = settings.default_cluster
- if cluster_id != DEFAULT_CLUSTER_ID:
- cluster = await clusters_repo.get_cluster(user_id, cluster_id)
- async with dask_clients_pool.acquire(cluster) as client:
- return await client.get_cluster_details()
-
-
-@router.post(
- "",
- summary="Create a new cluster for a user",
- response_model=ClusterGet,
- status_code=status.HTTP_201_CREATED,
-)
-async def create_cluster(
- user_id: UserID,
- new_cluster: ClusterCreate,
- clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)),
-):
- return await clusters_repo.create_cluster(user_id, new_cluster)
-
-
-@router.get("", summary="Lists clusters for user", response_model=list[ClusterGet])
-async def list_clusters(
- user_id: UserID,
- clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)),
- settings: ComputationalBackendSettings = Depends(get_scheduler_settings),
-):
- default_cluster = settings.default_cluster
- return [default_cluster] + await clusters_repo.list_clusters(user_id)
-
-
-@router.get(
- "/default",
- summary="Returns the default cluster",
- response_model=ClusterGet,
- status_code=status.HTTP_200_OK,
-)
-async def get_default_cluster(
- settings: ComputationalBackendSettings = Depends(get_scheduler_settings),
-):
- return settings.default_cluster
-
-
-@router.get(
- "/{cluster_id}",
- summary="Get one cluster for user",
- response_model=ClusterGet,
- status_code=status.HTTP_200_OK,
-)
-async def get_cluster(
- user_id: UserID,
- cluster_id: ClusterID,
- clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)),
-):
- return await clusters_repo.get_cluster(user_id, cluster_id)
-
-
-@router.patch(
- "/{cluster_id}",
- summary="Modify a cluster for user",
- response_model=ClusterGet,
- status_code=status.HTTP_200_OK,
-)
-async def update_cluster(
- user_id: UserID,
- cluster_id: ClusterID,
- updated_cluster: ClusterPatch,
- clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)),
-):
- try:
- return await clusters_repo.update_cluster(user_id, cluster_id, updated_cluster)
- except ClusterInvalidOperationError as e:
- raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=f"{e}") from e
-
-
-@router.delete(
- "/{cluster_id}",
- summary="Remove a cluster for user",
- response_model=None,
- status_code=status.HTTP_204_NO_CONTENT,
-)
-async def delete_cluster(
- user_id: UserID,
- cluster_id: ClusterID,
- clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)),
-):
- await clusters_repo.delete_cluster(user_id, cluster_id)
-
-
-@router.get(
- "/default/details",
- summary="Returns the cluster details",
- response_model=ClusterDetailsGet,
- status_code=status.HTTP_200_OK,
-)
-async def get_default_cluster_details(
- user_id: UserID,
- settings: ComputationalBackendSettings = Depends(get_scheduler_settings),
- clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)),
- dask_clients_pool: DaskClientsPool = Depends(get_dask_clients_pool),
-):
- default_cluster = await _get_cluster_details_with_id(
- settings=settings,
- user_id=user_id,
- cluster_id=DEFAULT_CLUSTER_ID,
- clusters_repo=clusters_repo,
- dask_clients_pool=dask_clients_pool,
- )
- logger.debug("found followind %s", f"{default_cluster=!r}")
- return default_cluster
-
-
-@router.get(
- "/{cluster_id}/details",
- summary="Returns the cluster details",
- response_model=ClusterDetailsGet,
- status_code=status.HTTP_200_OK,
-)
-async def get_cluster_details(
- user_id: UserID,
- cluster_id: ClusterID,
- settings: ComputationalBackendSettings = Depends(get_scheduler_settings),
- clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)),
- dask_clients_pool: DaskClientsPool = Depends(get_dask_clients_pool),
-):
- try:
- cluster_details = await _get_cluster_details_with_id(
- settings=settings,
- user_id=user_id,
- cluster_id=cluster_id,
- clusters_repo=clusters_repo,
- dask_clients_pool=dask_clients_pool,
- )
- logger.debug("found following %s", f"{cluster_details=!r}")
- return cluster_details
- except DaskClientAcquisisitonError as exc:
- raise HTTPException(
- status_code=status.HTTP_404_NOT_FOUND, detail=f"{exc}"
- ) from exc
-
-
-@router.post(
- ":ping",
- summary="Test cluster connection",
- response_model=None,
- status_code=status.HTTP_204_NO_CONTENT,
-)
-async def test_cluster_connection(
- cluster_auth: ClusterPing,
-):
- try:
- return await test_scheduler_endpoint(
- endpoint=cluster_auth.endpoint, authentication=cluster_auth.authentication
- )
-
- except ConfigurationError as e:
- raise HTTPException(
- status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail=f"{e}"
- ) from e
-
-
-@router.post(
- "/default:ping",
- summary="Test cluster connection",
- response_model=None,
- status_code=status.HTTP_204_NO_CONTENT,
-)
-async def test_default_cluster_connection(
- settings: ComputationalBackendSettings = Depends(get_scheduler_settings),
-):
- cluster = settings.default_cluster
- return await test_scheduler_endpoint(
- endpoint=cluster.endpoint, authentication=cluster.authentication
- )
-
-
-@router.post(
- "/{cluster_id}:ping",
- summary="Test cluster connection",
- response_model=None,
- status_code=status.HTTP_204_NO_CONTENT,
-)
-async def test_specific_cluster_connection(
- user_id: UserID,
- cluster_id: ClusterID,
- clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)),
-):
- cluster = await clusters_repo.get_cluster(user_id, cluster_id)
- return await test_scheduler_endpoint(
- endpoint=cluster.endpoint, authentication=cluster.authentication
- )
diff --git a/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py b/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py
index f25fdf32ece..f0b6e635ac7 100644
--- a/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py
+++ b/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py
@@ -28,9 +28,8 @@
ComputationGet,
ComputationStop,
)
-from models_library.clusters import DEFAULT_CLUSTER_ID
from models_library.projects import ProjectAtDB, ProjectID
-from models_library.projects_nodes_io import NodeID, NodeIDStr
+from models_library.projects_nodes_io import NodeID
from models_library.projects_state import RunningState
from models_library.services import ServiceKeyVersion
from models_library.users import UserID
@@ -49,7 +48,6 @@
from tenacity.wait import wait_random
from ...core.errors import (
- ClusterAccessForbiddenError,
ClusterNotFoundError,
ClustersKeeperNotAvailableError,
ComputationalRunNotFoundError,
@@ -64,7 +62,6 @@
from ...models.comp_tasks import CompTaskAtDB
from ...modules.catalog import CatalogClient
from ...modules.comp_scheduler import run_new_pipeline, stop_pipeline
-from ...modules.db.repositories.clusters import ClustersRepository
from ...modules.db.repositories.comp_pipelines import CompPipelinesRepository
from ...modules.db.repositories.comp_runs import CompRunsRepository
from ...modules.db.repositories.comp_tasks import CompTasksRepository
@@ -115,7 +112,6 @@ async def _check_pipeline_startable(
pipeline_dag: nx.DiGraph,
computation: ComputationCreate,
catalog_client: CatalogClient,
- clusters_repo: ClustersRepository,
) -> None:
assert computation.product_name # nosec
if deprecated_tasks := await utils.find_deprecated_tasks(
@@ -131,20 +127,6 @@ async def _check_pipeline_startable(
status_code=status.HTTP_406_NOT_ACCEPTABLE,
detail=f"Project {computation.project_id} cannot run since it contains deprecated tasks {jsonable_encoder( deprecated_tasks)}",
)
- if computation.cluster_id:
- # check the cluster ID is a valid one
- try:
- await clusters_repo.get_cluster(computation.user_id, computation.cluster_id)
- except ClusterNotFoundError as exc:
- raise HTTPException(
- status_code=status.HTTP_406_NOT_ACCEPTABLE,
- detail=f"Project {computation.project_id} cannot run on cluster {computation.cluster_id}, not found",
- ) from exc
- except ClusterAccessForbiddenError as exc:
- raise HTTPException(
- status_code=status.HTTP_403_FORBIDDEN,
- detail=f"Project {computation.project_id} cannot run on cluster {computation.cluster_id}, no access",
- ) from exc
_UNKNOWN_NODE: Final[str] = "unknown node"
@@ -172,7 +154,7 @@ async def _get_project_node_names(
project_uuid: ProjectID, node_id: NodeID
) -> tuple[str, str]:
prj = await project_repo.get_project(project_uuid)
- node_id_str = NodeIDStr(f"{node_id}")
+ node_id_str = f"{node_id}"
if node_id_str not in prj.workbench:
_logger.error(
"%s not found in %s. it is an ancestor of %s. Please check!",
@@ -245,7 +227,6 @@ async def _try_start_pipeline(
app,
user_id=computation.user_id,
project_id=computation.project_id,
- cluster_id=computation.cluster_id or DEFAULT_CLUSTER_ID,
run_metadata=RunMetadataDict(
node_id_names_map={
NodeID(node_idstr): node_data.label
@@ -305,9 +286,6 @@ async def create_computation( # noqa: PLR0913 # pylint: disable=too-many-positi
comp_runs_repo: Annotated[
CompRunsRepository, Depends(get_repository(CompRunsRepository))
],
- clusters_repo: Annotated[
- ClustersRepository, Depends(get_repository(ClustersRepository))
- ],
users_repo: Annotated[UsersRepository, Depends(get_repository(UsersRepository))],
projects_metadata_repo: Annotated[
ProjectsMetadataRepository, Depends(get_repository(ProjectsMetadataRepository))
@@ -342,7 +320,7 @@ async def create_computation( # noqa: PLR0913 # pylint: disable=too-many-positi
if computation.start_pipeline:
await _check_pipeline_startable(
- minimal_computational_dag, computation, catalog_client, clusters_repo
+ minimal_computational_dag, computation, catalog_client
)
# ok so put the tasks in the db
@@ -411,7 +389,6 @@ async def create_computation( # noqa: PLR0913 # pylint: disable=too-many-positi
else None
),
iteration=last_run.iteration if last_run else None,
- cluster_id=last_run.cluster_id if last_run else None,
result=None,
started=compute_pipeline_started_timestamp(
minimal_computational_dag, comp_tasks
@@ -518,7 +495,6 @@ async def get_computation(
else None
),
iteration=last_run.iteration if last_run else None,
- cluster_id=last_run.cluster_id if last_run else None,
result=None,
started=compute_pipeline_started_timestamp(pipeline_dag, all_tasks),
stopped=compute_pipeline_stopped_timestamp(pipeline_dag, all_tasks),
@@ -593,7 +569,6 @@ async def stop_computation(
url=TypeAdapter(AnyHttpUrl).validate_python(f"{request.url}"),
stop_url=None,
iteration=last_run.iteration if last_run else None,
- cluster_id=last_run.cluster_id if last_run else None,
result=None,
started=compute_pipeline_started_timestamp(pipeline_dag, tasks),
stopped=compute_pipeline_stopped_timestamp(pipeline_dag, tasks),
@@ -665,9 +640,9 @@ def return_last_value(retry_state: Any) -> Any:
before_sleep=before_sleep_log(_logger, logging.INFO),
)
async def check_pipeline_stopped() -> bool:
- comp_tasks: list[CompTaskAtDB] = (
- await comp_tasks_repo.list_computational_tasks(project_id)
- )
+ comp_tasks: list[
+ CompTaskAtDB
+ ] = await comp_tasks_repo.list_computational_tasks(project_id)
pipeline_state = utils.get_pipeline_state_from_task_states(
comp_tasks,
)
diff --git a/services/director-v2/src/simcore_service_director_v2/core/application.py b/services/director-v2/src/simcore_service_director_v2/core/application.py
index 43a9dcc4e03..4b62b4ce73c 100644
--- a/services/director-v2/src/simcore_service_director_v2/core/application.py
+++ b/services/director-v2/src/simcore_service_director_v2/core/application.py
@@ -35,7 +35,6 @@
)
from ..modules.osparc_variables import substitutions
from .errors import (
- ClusterAccessForbiddenError,
ClusterNotFoundError,
PipelineNotFoundError,
ProjectNetworkNotFoundError,
@@ -75,12 +74,6 @@ def _set_exception_handlers(app: FastAPI):
status.HTTP_404_NOT_FOUND, ClusterNotFoundError
),
)
- app.add_exception_handler(
- ClusterAccessForbiddenError,
- make_http_error_handler_for_exception(
- status.HTTP_403_FORBIDDEN, ClusterAccessForbiddenError
- ),
- )
# SEE https://docs.python.org/3/library/exceptions.html#exception-hierarchy
app.add_exception_handler(
diff --git a/services/director-v2/src/simcore_service_director_v2/core/errors.py b/services/director-v2/src/simcore_service_director_v2/core/errors.py
index 492e75bdeab..1dd0243bc4d 100644
--- a/services/director-v2/src/simcore_service_director_v2/core/errors.py
+++ b/services/director-v2/src/simcore_service_director_v2/core/errors.py
@@ -105,7 +105,7 @@ class MissingComputationalResourcesError(
): # pylint: disable=too-many-ancestors
msg_template = (
"Service {service_name}:{service_version} cannot be scheduled "
- "on cluster {cluster_id}: task needs '{task_resources}', "
+ "on cluster: task needs '{task_resources}', "
"cluster has {cluster_resources}"
)
@@ -114,7 +114,7 @@ class InsuficientComputationalResourcesError(
TaskSchedulingError
): # pylint: disable=too-many-ancestors
msg_template: str = (
- "Insufficient computational resources to run {service_name}:{service_version} with {service_requested_resources} on cluster {cluster_id}."
+ "Insufficient computational resources to run {service_name}:{service_version} with {service_requested_resources} on cluster."
"Cluster available workers: {cluster_available_resources}"
"TIP: Reduce service required resources or contact oSparc support"
)
@@ -165,14 +165,6 @@ class ClusterNotFoundError(ComputationalSchedulerError):
msg_template = "The cluster '{cluster_id}' not found"
-class ClusterAccessForbiddenError(ComputationalSchedulerError):
- msg_template = "Insufficient rights to access cluster '{cluster_id}'"
-
-
-class ClusterInvalidOperationError(ComputationalSchedulerError):
- msg_template = "Invalid operation on cluster '{cluster_id}'"
-
-
#
# SCHEDULER/CLIENT ERRORS
#
diff --git a/services/director-v2/src/simcore_service_director_v2/core/settings.py b/services/director-v2/src/simcore_service_director_v2/core/settings.py
index fe0af49fc5c..61e23e9f018 100644
--- a/services/director-v2/src/simcore_service_director_v2/core/settings.py
+++ b/services/director-v2/src/simcore_service_director_v2/core/settings.py
@@ -10,8 +10,7 @@
from fastapi import FastAPI
from models_library.basic_types import LogLevel, PortInt, VersionTag
from models_library.clusters import (
- DEFAULT_CLUSTER_ID,
- Cluster,
+ BaseCluster,
ClusterAuthentication,
ClusterTypeInModel,
NoAuthentication,
@@ -85,13 +84,11 @@ class ComputationalBackendSettings(BaseCustomSettings):
...,
description="This is the cluster that will be used by default"
" when submitting computational services (typically "
- "tcp://dask-scheduler:8786, tls://dask-scheduler:8786 for the internal cluster, or "
- "http(s)/GATEWAY_IP:8000 for a osparc-dask-gateway)",
+ "tcp://dask-scheduler:8786, tls://dask-scheduler:8786 for the internal cluster",
)
COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH: ClusterAuthentication = Field(
- ...,
- description="Empty for the internal cluster, must be one "
- "of simple/kerberos/jupyterhub for the osparc-dask-gateway",
+ default=...,
+ description="this is the cluster authentication that will be used by default",
)
COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_FILE_LINK_TYPE: FileLinkType = Field(
FileLinkType.S3,
@@ -107,15 +104,13 @@ class ComputationalBackendSettings(BaseCustomSettings):
)
@cached_property
- def default_cluster(self) -> Cluster:
- return Cluster(
- id=DEFAULT_CLUSTER_ID,
+ def default_cluster(self) -> BaseCluster:
+ return BaseCluster(
name="Default cluster",
endpoint=self.COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL,
authentication=self.COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH,
owner=1, # NOTE: currently this is a soft hack (the group of everyone is the group 1)
type=ClusterTypeInModel.ON_PREMISE,
- access_rights={},
)
@field_validator("COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH", mode="before")
diff --git a/services/director-v2/src/simcore_service_director_v2/models/comp_runs.py b/services/director-v2/src/simcore_service_director_v2/models/comp_runs.py
index f3fedc6a9f9..915b5b2f1d0 100644
--- a/services/director-v2/src/simcore_service_director_v2/models/comp_runs.py
+++ b/services/director-v2/src/simcore_service_director_v2/models/comp_runs.py
@@ -2,7 +2,7 @@
from contextlib import suppress
from typing import TypeAlias
-from models_library.clusters import DEFAULT_CLUSTER_ID, ClusterID
+from models_library.clusters import ClusterID
from models_library.projects import ProjectID
from models_library.projects_nodes_io import NodeID
from models_library.projects_state import RunningState
@@ -70,13 +70,6 @@ def convert_result_from_state_type_enum_if_needed(cls, v):
return RunningState(DB_TO_RUNNING_STATE[StateType(v)])
return v
- @field_validator("cluster_id", mode="before")
- @classmethod
- def convert_null_to_default_cluster_id(cls, v):
- if v is None:
- v = DEFAULT_CLUSTER_ID
- return v
-
@field_validator("created", "modified", "started", "ended")
@classmethod
def ensure_utc(cls, v: datetime.datetime | None) -> datetime.datetime | None:
@@ -100,7 +93,7 @@ def convert_null_to_empty_metadata(cls, v):
"run_id": 432,
"project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5",
"user_id": 132,
- "cluster_id": 0,
+ "cluster_id": None,
"iteration": 42,
"result": "UNKNOWN",
"started": None,
@@ -116,7 +109,7 @@ def convert_null_to_empty_metadata(cls, v):
"run_id": 432,
"project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5",
"user_id": 132,
- "cluster_id": None, # this default to DEFAULT_CLUSTER_ID
+ "cluster_id": None,
"iteration": 42,
"result": "NOT_STARTED",
"started": None,
@@ -132,7 +125,7 @@ def convert_null_to_empty_metadata(cls, v):
"run_id": 43243,
"project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5",
"user_id": 132,
- "cluster_id": 123,
+ "cluster_id": None,
"iteration": 12,
"result": "SUCCESS",
"created": "2021-03-01T13:07:34.191610",
@@ -155,7 +148,7 @@ def convert_null_to_empty_metadata(cls, v):
"run_id": 43243,
"project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5",
"user_id": 132,
- "cluster_id": 123,
+ "cluster_id": None,
"iteration": 12,
"result": "SUCCESS",
"created": "2021-03-01T13:07:34.191610",
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/clusters_keeper.py b/services/director-v2/src/simcore_service_director_v2/modules/clusters_keeper.py
index 2e62c414d86..01f5586fc35 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/clusters_keeper.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/clusters_keeper.py
@@ -48,7 +48,6 @@ async def get_or_create_on_demand_cluster(
owner=user_id,
endpoint=returned_cluster.endpoint,
authentication=returned_cluster.authentication,
- access_rights={},
)
except RemoteMethodNotRegisteredError as exc:
# no clusters-keeper, that is not going to work!
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py
index 281c9fc4630..57308eb27c9 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py
@@ -4,7 +4,6 @@
import networkx as nx
from aiopg.sa import Engine
from fastapi import FastAPI
-from models_library.clusters import ClusterID
from models_library.projects import ProjectID
from models_library.users import UserID
from servicelib.background_task import start_periodic_task, stop_periodic_task
@@ -36,13 +35,10 @@ async def run_new_pipeline(
*,
user_id: UserID,
project_id: ProjectID,
- cluster_id: ClusterID,
run_metadata: RunMetadataDict,
use_on_demand_clusters: bool,
) -> None:
- """Sets a new pipeline to be scheduled on the computational resources.
- Passing cluster_id=0 will use the default cluster. Passing an existing ID will instruct
- the scheduler to run the tasks on the defined cluster"""
+ """Sets a new pipeline to be scheduled on the computational resources."""
# ensure the pipeline exists and is populated with something
db_engine = get_db_engine(app)
dag = await _get_pipeline_dag(project_id, db_engine)
@@ -56,7 +52,6 @@ async def run_new_pipeline(
new_run = await CompRunsRepository.instance(db_engine).create(
user_id=user_id,
project_id=project_id,
- cluster_id=cluster_id,
metadata=run_metadata,
use_on_demand_clusters=use_on_demand_clusters,
)
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py
index d8fdccc1663..b959c9c8014 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py
@@ -783,9 +783,9 @@ async def _schedule_tasks_to_start( # noqa: C901
except Exception:
_logger.exception(
"Unexpected error for %s with %s on %s happened when scheduling %s:",
- f"{user_id=}",
- f"{project_id=}",
- f"{comp_run.cluster_id=}",
+ f"{comp_run.user_id=}",
+ f"{comp_run.project_uuid=}",
+ f"{comp_run.use_on_demand_clusters=}",
f"{tasks_ready_to_start.keys()=}",
)
await CompTasksRepository.instance(
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_dask.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_dask.py
index adc67853686..153378e9ee5 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_dask.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_dask.py
@@ -12,7 +12,7 @@
TaskProgressEvent,
)
from dask_task_models_library.container_tasks.io import TaskOutputData
-from models_library.clusters import DEFAULT_CLUSTER_ID, BaseCluster, ClusterID
+from models_library.clusters import BaseCluster
from models_library.errors import ErrorDict
from models_library.projects import ProjectID
from models_library.projects_nodes_io import NodeID
@@ -45,7 +45,6 @@
from ..clusters_keeper import get_or_create_on_demand_cluster
from ..dask_client import DaskClient, PublishedComputationTask
from ..dask_clients_pool import DaskClientsPool
-from ..db.repositories.clusters import ClustersRepository
from ..db.repositories.comp_runs import CompRunsRepository
from ..db.repositories.comp_tasks import CompTasksRepository
from ._scheduler_base import BaseCompScheduler
@@ -72,7 +71,6 @@ async def _cluster_dask_client(
scheduler: "DaskScheduler",
*,
use_on_demand_clusters: bool,
- cluster_id: ClusterID,
run_metadata: RunMetadataDict,
) -> AsyncIterator[DaskClient]:
cluster: BaseCluster = scheduler.settings.default_cluster
@@ -82,9 +80,6 @@ async def _cluster_dask_client(
user_id=user_id,
wallet_id=run_metadata.get("wallet_id"),
)
- if cluster_id != DEFAULT_CLUSTER_ID:
- clusters_repo = ClustersRepository.instance(scheduler.db_engine)
- cluster = await clusters_repo.get_cluster(user_id, cluster_id)
async with scheduler.dask_clients_pool.acquire(cluster) as client:
yield client
@@ -115,11 +110,6 @@ async def _start_tasks(
user_id,
self,
use_on_demand_clusters=comp_run.use_on_demand_clusters,
- cluster_id=(
- comp_run.cluster_id
- if comp_run.cluster_id is not None
- else DEFAULT_CLUSTER_ID
- ),
run_metadata=comp_run.metadata,
) as client:
# Change the tasks state to PENDING
@@ -135,11 +125,6 @@ async def _start_tasks(
client.send_computation_tasks(
user_id=user_id,
project_id=project_id,
- cluster_id=(
- comp_run.cluster_id
- if comp_run.cluster_id is not None
- else DEFAULT_CLUSTER_ID
- ),
tasks={node_id: task.image},
hardware_info=task.hardware_info,
callback=wake_up_callback,
@@ -171,11 +156,6 @@ async def _get_tasks_status(
user_id,
self,
use_on_demand_clusters=comp_run.use_on_demand_clusters,
- cluster_id=(
- comp_run.cluster_id
- if comp_run.cluster_id is not None
- else DEFAULT_CLUSTER_ID
- ),
run_metadata=comp_run.metadata,
) as client:
tasks_statuses = await client.get_tasks_status(
@@ -213,11 +193,6 @@ async def _stop_tasks(
user_id,
self,
use_on_demand_clusters=comp_run.use_on_demand_clusters,
- cluster_id=(
- comp_run.cluster_id
- if comp_run.cluster_id is not None
- else DEFAULT_CLUSTER_ID
- ),
run_metadata=comp_run.metadata,
) as client:
await asyncio.gather(
@@ -251,11 +226,6 @@ async def _process_completed_tasks(
user_id,
self,
use_on_demand_clusters=comp_run.use_on_demand_clusters,
- cluster_id=(
- comp_run.cluster_id
- if comp_run.cluster_id is not None
- else DEFAULT_CLUSTER_ID
- ),
run_metadata=comp_run.metadata,
) as client:
tasks_results = await asyncio.gather(
@@ -275,11 +245,6 @@ async def _process_completed_tasks(
user_id,
self,
use_on_demand_clusters=comp_run.use_on_demand_clusters,
- cluster_id=(
- comp_run.cluster_id
- if comp_run.cluster_id is not None
- else DEFAULT_CLUSTER_ID
- ),
run_metadata=comp_run.metadata,
) as client:
await asyncio.gather(
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_utils.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_utils.py
index 0458b159811..9d2722e3b6c 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_utils.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_utils.py
@@ -1,4 +1,4 @@
-from typing import Callable
+from collections.abc import Callable
from fastapi import FastAPI
from models_library.docker import DockerGenericTag
@@ -13,10 +13,10 @@
from models_library.users import UserID
from servicelib.redis import RedisClientSDK
from settings_library.redis import RedisDatabase
-from simcore_service_director_v2.modules.redis import get_redis_client_manager
from ...models.comp_runs import Iteration
from ...models.comp_tasks import CompTaskAtDB
+from ..redis import get_redis_client_manager
SCHEDULED_STATES: set[RunningState] = {
RunningState.PUBLISHED,
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dask_client.py b/services/director-v2/src/simcore_service_director_v2/modules/dask_client.py
index e28e48f82f7..96505371754 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/dask_client.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/dask_client.py
@@ -1,5 +1,5 @@
"""The dask client is the osparc part that communicates with a
-dask-scheduler/worker backend directly or through a dask-gateway.
+dask-scheduler/worker backend.
From dask documentation any Data or function must follow the criteria to be
usable in dask [http://distributed.dask.org/en/stable/limitations.html?highlight=cloudpickle#assumptions-on-functions-and-data]:
@@ -43,7 +43,7 @@
from distributed.scheduler import TaskStateState as DaskSchedulerTaskState
from fastapi import FastAPI
from models_library.api_schemas_directorv2.clusters import ClusterDetails, Scheduler
-from models_library.clusters import ClusterAuthentication, ClusterID, ClusterTypeInModel
+from models_library.clusters import ClusterAuthentication, ClusterTypeInModel
from models_library.projects import ProjectID
from models_library.projects_nodes_io import NodeID
from models_library.resource_tracker import HardwareInfo
@@ -74,7 +74,7 @@
from ..utils.dask_client_utils import (
DaskSubSystem,
TaskHandlers,
- create_internal_client_based_on_auth,
+ connect_to_dask_scheduler,
)
_logger = logging.getLogger(__name__)
@@ -133,7 +133,7 @@ async def create(
) -> "DaskClient":
_logger.info(
"Initiating connection to %s with auth: %s, type: %s",
- f"dask-scheduler/gateway at {endpoint}",
+ f"dask-scheduler at {endpoint}",
authentication,
cluster_type,
)
@@ -149,9 +149,7 @@ async def create(
endpoint,
attempt.retry_state.attempt_number,
)
- backend = await create_internal_client_based_on_auth(
- endpoint, authentication
- )
+ backend = await connect_to_dask_scheduler(endpoint, authentication)
dask_utils.check_scheduler_status(backend.client)
instance = cls(
app=app,
@@ -162,7 +160,7 @@ async def create(
)
_logger.info(
"Connection to %s succeeded [%s]",
- f"dask-scheduler/gateway at {endpoint}",
+ f"dask-scheduler at {endpoint}",
json.dumps(attempt.retry_state.retry_object.statistics),
)
_logger.info(
@@ -287,7 +285,6 @@ async def send_computation_tasks(
*,
user_id: UserID,
project_id: ProjectID,
- cluster_id: ClusterID,
tasks: dict[NodeID, Image],
callback: _UserCallbackInSepThread,
remote_fct: ContainerRemoteFct | None = None,
@@ -331,22 +328,18 @@ async def send_computation_tasks(
)
dask_utils.check_communication_with_scheduler_is_open(self.backend.client)
dask_utils.check_scheduler_status(self.backend.client)
- await dask_utils.check_maximize_workers(self.backend.gateway_cluster)
- # NOTE: in case it's a gateway or it is an on-demand cluster
+ # NOTE: in case it is an on-demand cluster
# we do not check a priori if the task
# is runnable because we CAN'T. A cluster might auto-scale, the worker(s)
- # might also auto-scale and the gateway does not know that a priori.
+ # might also auto-scale we do not know that a priori.
# So, we'll just send the tasks over and see what happens after a while.
- if (self.cluster_type != ClusterTypeInModel.ON_DEMAND) and (
- self.backend.gateway is None
- ):
+ if self.cluster_type != ClusterTypeInModel.ON_DEMAND:
dask_utils.check_if_cluster_is_able_to_run_pipeline(
project_id=project_id,
node_id=node_id,
scheduler_info=self.backend.client.scheduler_info(),
task_resources=dask_resources,
node_image=node_image,
- cluster_id=cluster_id,
)
s3_settings = None
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/clusters.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/clusters.py
deleted file mode 100644
index 30381110173..00000000000
--- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/clusters.py
+++ /dev/null
@@ -1,286 +0,0 @@
-import logging
-from collections.abc import Iterable
-
-import psycopg2
-import sqlalchemy as sa
-from aiopg.sa import connection
-from models_library.api_schemas_directorv2.clusters import ClusterCreate, ClusterPatch
-from models_library.clusters import (
- CLUSTER_ADMIN_RIGHTS,
- CLUSTER_MANAGER_RIGHTS,
- CLUSTER_NO_RIGHTS,
- CLUSTER_USER_RIGHTS,
- Cluster,
- ClusterAccessRights,
- ClusterID,
-)
-from models_library.users import UserID
-from pydantic.types import PositiveInt
-from simcore_postgres_database.models.cluster_to_groups import cluster_to_groups
-from simcore_postgres_database.models.clusters import clusters
-from simcore_postgres_database.models.groups import GroupType, groups, user_to_groups
-from simcore_postgres_database.models.users import users
-from sqlalchemy.dialects.postgresql import insert as pg_insert
-
-from ....core.errors import (
- ClusterAccessForbiddenError,
- ClusterInvalidOperationError,
- ClusterNotFoundError,
-)
-from ....utils.db import to_clusters_db
-from ._base import BaseRepository
-
-logger = logging.getLogger(__name__)
-
-
-async def _clusters_from_cluster_ids(
- conn: connection.SAConnection,
- cluster_ids: Iterable[PositiveInt],
- offset: int = 0,
- limit: int | None = None,
-) -> list[Cluster]:
- cluster_id_to_cluster: dict[PositiveInt, Cluster] = {}
- async for row in conn.execute(
- sa.select(
- clusters,
- cluster_to_groups.c.gid,
- cluster_to_groups.c.read,
- cluster_to_groups.c.write,
- cluster_to_groups.c.delete,
- )
- .select_from(
- clusters.join(
- cluster_to_groups,
- clusters.c.id == cluster_to_groups.c.cluster_id,
- )
- )
- .where(clusters.c.id.in_(cluster_ids))
- .offset(offset)
- .limit(limit)
- ):
- cluster_access_rights = {
- row[cluster_to_groups.c.gid]: ClusterAccessRights(
- **{
- "read": row[cluster_to_groups.c.read],
- "write": row[cluster_to_groups.c.write],
- "delete": row[cluster_to_groups.c.delete],
- }
- )
- }
-
- cluster_id = row[clusters.c.id]
- if cluster_id not in cluster_id_to_cluster:
- cluster_id_to_cluster[cluster_id] = Cluster(
- id=cluster_id,
- name=row[clusters.c.name],
- description=row[clusters.c.description],
- type=row[clusters.c.type],
- owner=row[clusters.c.owner],
- endpoint=row[clusters.c.endpoint],
- authentication=row[clusters.c.authentication],
- thumbnail=row[clusters.c.thumbnail],
- access_rights=cluster_access_rights,
- )
- else:
- cluster_id_to_cluster[cluster_id].access_rights.update(
- cluster_access_rights
- )
-
- return list(cluster_id_to_cluster.values())
-
-
-async def _compute_user_access_rights(
- conn: connection.SAConnection, user_id: UserID, cluster: Cluster
-) -> ClusterAccessRights:
- result = await conn.execute(
- sa.select(user_to_groups.c.gid, groups.c.type)
- .where(user_to_groups.c.uid == user_id)
- .order_by(groups.c.type)
- .join(groups)
- )
- user_groups = await result.fetchall()
- assert user_groups # nosec
- # get the primary group first, as it has precedence
- if (
- primary_group_row := next(
- filter(lambda ugrp: ugrp[1] == GroupType.PRIMARY, user_groups), None
- )
- ) and (primary_grp_rights := cluster.access_rights.get(primary_group_row.gid)):
- return primary_grp_rights
-
- solved_rights = CLUSTER_NO_RIGHTS.model_dump()
- for group_row in filter(lambda ugrp: ugrp[1] != GroupType.PRIMARY, user_groups):
- grp_access = cluster.access_rights.get(group_row.gid, CLUSTER_NO_RIGHTS).model_dump()
- for operation in ["read", "write", "delete"]:
- solved_rights[operation] |= grp_access[operation]
- return ClusterAccessRights(**solved_rights)
-
-
-class ClustersRepository(BaseRepository):
- async def create_cluster(self, user_id, new_cluster: ClusterCreate) -> Cluster:
- async with self.db_engine.acquire() as conn:
- user_primary_gid = await conn.scalar(
- sa.select(users.c.primary_gid).where(users.c.id == user_id)
- )
- new_cluster.owner = user_primary_gid
- new_cluster_id = await conn.scalar(
- sa.insert(
- clusters, values=to_clusters_db(new_cluster, only_update=False)
- ).returning(clusters.c.id)
- )
- assert new_cluster_id # nosec
- return await self.get_cluster(user_id, new_cluster_id)
-
- async def list_clusters(self, user_id: UserID) -> list[Cluster]:
- async with self.db_engine.acquire() as conn:
- result = await conn.execute(
- sa.select(clusters.c.id)
- .distinct()
- .where(
- cluster_to_groups.c.gid.in_(
- # get the groups of the user where he/she has read access
- sa.select(groups.c.gid)
- .where(user_to_groups.c.uid == user_id)
- .order_by(groups.c.gid)
- .select_from(groups.join(user_to_groups))
- )
- & cluster_to_groups.c.read
- )
- .join(cluster_to_groups)
- )
- retrieved_clusters = []
- if cluster_ids := await result.fetchall():
- retrieved_clusters = await _clusters_from_cluster_ids(
- conn, {c.id for c in cluster_ids}
- )
- return retrieved_clusters
-
- async def get_cluster(self, user_id: UserID, cluster_id: ClusterID) -> Cluster:
- async with self.db_engine.acquire() as conn:
- clusters_list = await _clusters_from_cluster_ids(conn, {cluster_id})
- if not clusters_list:
- raise ClusterNotFoundError(cluster_id=cluster_id)
- the_cluster = clusters_list[0]
-
- access_rights = await _compute_user_access_rights(
- conn, user_id, the_cluster
- )
- logger.debug(
- "found cluster in DB: %s, with computed %s",
- f"{the_cluster=}",
- f"{access_rights=}",
- )
- if not access_rights.read:
- raise ClusterAccessForbiddenError(cluster_id=cluster_id)
-
- return the_cluster
-
- async def update_cluster( # pylint: disable=too-many-branches
- self, user_id: UserID, cluster_id: ClusterID, updated_cluster: ClusterPatch
- ) -> Cluster:
- async with self.db_engine.acquire() as conn:
- clusters_list: list[Cluster] = await _clusters_from_cluster_ids(
- conn, {cluster_id}
- )
- if len(clusters_list) != 1:
- raise ClusterNotFoundError(cluster_id=cluster_id)
- the_cluster = clusters_list[0]
-
- this_user_access_rights = await _compute_user_access_rights(
- conn, user_id, the_cluster
- )
- logger.debug(
- "found cluster in DB: %s, with computed %s",
- f"{the_cluster=}",
- f"{this_user_access_rights=}",
- )
-
- if not this_user_access_rights.write:
- raise ClusterAccessForbiddenError(cluster_id=cluster_id)
-
- if updated_cluster.owner and updated_cluster.owner != the_cluster.owner:
- # if the user wants to change the owner, we need more rights here
- if this_user_access_rights != CLUSTER_ADMIN_RIGHTS:
- raise ClusterAccessForbiddenError(cluster_id=cluster_id)
-
- # ensure the new owner has admin rights, too
- if not updated_cluster.access_rights:
- updated_cluster.access_rights = {
- updated_cluster.owner: CLUSTER_ADMIN_RIGHTS
- }
- else:
- updated_cluster.access_rights[
- updated_cluster.owner
- ] = CLUSTER_ADMIN_RIGHTS
-
- # resolve access rights changes
- resolved_access_rights = the_cluster.access_rights
- if updated_cluster.access_rights:
- # if the user is a manager he/she may ONLY add/remove users
- if this_user_access_rights == CLUSTER_MANAGER_RIGHTS:
- for grp, rights in updated_cluster.access_rights.items():
- if grp == the_cluster.owner or rights not in [
- CLUSTER_USER_RIGHTS,
- CLUSTER_NO_RIGHTS,
- ]:
- # a manager cannot change the owner abilities or create
- # managers/admins
- raise ClusterAccessForbiddenError(cluster_id=cluster_id)
-
- resolved_access_rights.update(updated_cluster.access_rights)
- # ensure the user is not trying to mess around owner admin rights
- if (
- resolved_access_rights.setdefault(
- the_cluster.owner, CLUSTER_ADMIN_RIGHTS
- )
- != CLUSTER_ADMIN_RIGHTS
- ):
- raise ClusterAccessForbiddenError(cluster_id=cluster_id)
-
- # ok we can update now
- try:
- await conn.execute(
- sa.update(clusters)
- .where(clusters.c.id == the_cluster.id)
- .values(to_clusters_db(updated_cluster, only_update=True))
- )
- except psycopg2.DatabaseError as e:
- raise ClusterInvalidOperationError(cluster_id=cluster_id) from e
- # upsert the rights
- if updated_cluster.access_rights:
- for grp, rights in resolved_access_rights.items():
- insert_stmt = pg_insert(cluster_to_groups).values(
- **rights.model_dump(by_alias=True), gid=grp, cluster_id=the_cluster.id
- )
- on_update_stmt = insert_stmt.on_conflict_do_update(
- index_elements=[
- cluster_to_groups.c.cluster_id,
- cluster_to_groups.c.gid,
- ],
- set_=rights.model_dump(by_alias=True),
- )
- await conn.execute(on_update_stmt)
-
- clusters_list = await _clusters_from_cluster_ids(conn, {cluster_id})
- if not clusters_list:
- raise ClusterNotFoundError(cluster_id=cluster_id)
- return clusters_list[0]
-
- async def delete_cluster(self, user_id: UserID, cluster_id: ClusterID) -> None:
- async with self.db_engine.acquire() as conn:
- clusters_list = await _clusters_from_cluster_ids(conn, {cluster_id})
- if not clusters_list:
- raise ClusterNotFoundError(cluster_id=cluster_id)
- the_cluster = clusters_list[0]
-
- access_rights = await _compute_user_access_rights(
- conn, user_id, the_cluster
- )
- logger.debug(
- "found cluster in DB: %s, with computed %s",
- f"{the_cluster=}",
- f"{access_rights=}",
- )
- if not access_rights.delete:
- raise ClusterAccessForbiddenError(cluster_id=cluster_id)
- await conn.execute(sa.delete(clusters).where(clusters.c.id == cluster_id))
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py
index 13e01a4276f..46cc7669cde 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py
@@ -5,7 +5,6 @@
import arrow
import sqlalchemy as sa
from aiopg.sa.result import RowProxy
-from models_library.clusters import DEFAULT_CLUSTER_ID, ClusterID
from models_library.projects import ProjectID
from models_library.projects_state import RunningState
from models_library.users import UserID
@@ -43,10 +42,6 @@
("clusters", "cluster_id"),
),
}
-_DEFAULT_FK_CONSTRAINT_TO_ERROR: Final[tuple[type[DirectorError], tuple]] = (
- DirectorError,
- (),
-)
class CompRunsRepository(BaseRepository):
@@ -154,7 +149,6 @@ async def create(
*,
user_id: UserID,
project_id: ProjectID,
- cluster_id: ClusterID,
iteration: PositiveInt | None = None,
metadata: RunMetadataDict,
use_on_demand_clusters: bool,
@@ -178,9 +172,7 @@ async def create(
.values(
user_id=user_id,
project_uuid=f"{project_id}",
- cluster_id=(
- cluster_id if cluster_id != DEFAULT_CLUSTER_ID else None
- ),
+ cluster_id=None,
iteration=iteration,
result=RUNNING_STATE_TO_DB[RunningState.PUBLISHED],
started=datetime.datetime.now(tz=datetime.UTC),
diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py
index dd52f50ac82..2619d9ce98f 100644
--- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py
+++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py
@@ -145,12 +145,12 @@ async def _get_node_infos(
None,
)
- result: tuple[
- ServiceMetaDataPublished, ServiceExtras, SimcoreServiceLabels
- ] = await asyncio.gather(
- _get_service_details(catalog_client, user_id, product_name, node),
- director_client.get_service_extras(node.key, node.version),
- director_client.get_service_labels(node),
+ result: tuple[ServiceMetaDataPublished, ServiceExtras, SimcoreServiceLabels] = (
+ await asyncio.gather(
+ _get_service_details(catalog_client, user_id, product_name, node),
+ director_client.get_service_extras(node.key, node.version),
+ director_client.get_service_labels(node),
+ )
)
return result
@@ -246,9 +246,9 @@ async def _get_pricing_and_hardware_infos(
return pricing_info, hardware_info
-_RAM_SAFE_MARGIN_RATIO: Final[
- float
-] = 0.1 # NOTE: machines always have less available RAM than advertised
+_RAM_SAFE_MARGIN_RATIO: Final[float] = (
+ 0.1 # NOTE: machines always have less available RAM than advertised
+)
_CPUS_SAFE_MARGIN: Final[float] = 0.1
@@ -266,11 +266,11 @@ async def _update_project_node_resources_from_hardware_info(
if not hardware_info.aws_ec2_instances:
return
try:
- unordered_list_ec2_instance_types: list[
- EC2InstanceTypeGet
- ] = await get_instance_type_details(
- rabbitmq_rpc_client,
- instance_type_names=set(hardware_info.aws_ec2_instances),
+ unordered_list_ec2_instance_types: list[EC2InstanceTypeGet] = (
+ await get_instance_type_details(
+ rabbitmq_rpc_client,
+ instance_type_names=set(hardware_info.aws_ec2_instances),
+ )
)
assert unordered_list_ec2_instance_types # nosec
diff --git a/services/director-v2/src/simcore_service_director_v2/utils/dask.py b/services/director-v2/src/simcore_service_director_v2/utils/dask.py
index afb1e0b3770..13967b0c5da 100644
--- a/services/director-v2/src/simcore_service_director_v2/utils/dask.py
+++ b/services/director-v2/src/simcore_service_director_v2/utils/dask.py
@@ -5,7 +5,6 @@
from typing import Any, Final, NoReturn, ParamSpec, TypeVar, cast
from uuid import uuid4
-import dask_gateway # type: ignore[import-untyped]
import distributed
from aiopg.sa.engine import Engine
from common_library.json_serialization import json_dumps
@@ -22,7 +21,6 @@
)
from fastapi import FastAPI
from models_library.api_schemas_directorv2.services import NodeRequirements
-from models_library.clusters import ClusterID
from models_library.docker import DockerLabelKey, StandardSimcoreDockerLabels
from models_library.errors import ErrorDict
from models_library.projects import ProjectID, ProjectIDStr
@@ -515,14 +513,6 @@ def check_scheduler_status(client: distributed.Client):
raise ComputationalBackendNotConnectedError
-_LARGE_NUMBER_OF_WORKERS: Final[int] = 10000
-
-
-async def check_maximize_workers(cluster: dask_gateway.GatewayCluster | None) -> None:
- if cluster:
- await cluster.scale(_LARGE_NUMBER_OF_WORKERS)
-
-
def _can_task_run_on_worker(
task_resources: dict[str, Any], worker_resources: dict[str, Any]
) -> bool:
@@ -573,7 +563,6 @@ def check_if_cluster_is_able_to_run_pipeline(
scheduler_info: dict[str, Any],
task_resources: dict[str, Any],
node_image: Image,
- cluster_id: ClusterID,
) -> None:
_logger.debug(
@@ -592,8 +581,7 @@ def check_if_cluster_is_able_to_run_pipeline(
all_available_resources_in_cluster = dict(cluster_resources_counter)
_logger.debug(
- "Dask scheduler total available resources in cluster %s: %s, task needed resources %s",
- cluster_id,
+ "Dask scheduler total available resources in cluster: %s, task needed resources %s",
json_dumps(all_available_resources_in_cluster, indent=2),
json_dumps(task_resources, indent=2),
)
@@ -616,7 +604,6 @@ def check_if_cluster_is_able_to_run_pipeline(
node_id=node_id,
service_name=node_image.name,
service_version=node_image.tag,
- cluster_id=cluster_id,
task_resources=task_resources,
cluster_resources=cluster_resources,
)
@@ -628,7 +615,6 @@ def check_if_cluster_is_able_to_run_pipeline(
service_name=node_image.name,
service_version=node_image.tag,
service_requested_resources=_to_human_readable_resource_values(task_resources),
- cluster_id=cluster_id,
cluster_available_resources=[
_to_human_readable_resource_values(worker.get("resources", None))
for worker in workers.values()
diff --git a/services/director-v2/src/simcore_service_director_v2/utils/dask_client_utils.py b/services/director-v2/src/simcore_service_director_v2/utils/dask_client_utils.py
index 964f38e6484..0ec66eeabdd 100644
--- a/services/director-v2/src/simcore_service_director_v2/utils/dask_client_utils.py
+++ b/services/director-v2/src/simcore_service_director_v2/utils/dask_client_utils.py
@@ -2,41 +2,18 @@
import os
import socket
from collections.abc import Awaitable, Callable
-from contextlib import suppress
from dataclasses import dataclass, field
-from typing import Final, Union
-import dask_gateway # type: ignore[import-untyped]
import distributed
-import httpx
-from aiohttp import ClientConnectionError, ClientResponseError
from dask_task_models_library.container_tasks.events import (
TaskLogEvent,
TaskProgressEvent,
)
-from models_library.clusters import (
- ClusterAuthentication,
- InternalClusterAuthentication,
- JupyterHubTokenAuthentication,
- KerberosAuthentication,
- NoAuthentication,
- SimpleAuthentication,
- TLSAuthentication,
-)
+from models_library.clusters import ClusterAuthentication, TLSAuthentication
from pydantic import AnyUrl
-from ..core.errors import (
- ComputationalSchedulerError,
- ConfigurationError,
- DaskClientRequestError,
- DaskClusterError,
- DaskGatewayServerError,
-)
-from .dask import check_maximize_workers, wrap_client_async_routine
-
-DaskGatewayAuths = Union[
- dask_gateway.BasicAuth, dask_gateway.KerberosAuth, dask_gateway.JupyterHubAuth
-]
+from ..core.errors import ConfigurationError
+from .dask import wrap_client_async_routine
@dataclass
@@ -52,8 +29,6 @@ class TaskHandlers:
class DaskSubSystem:
client: distributed.Client
scheduler_id: str
- gateway: dask_gateway.Gateway | None
- gateway_cluster: dask_gateway.GatewayCluster | None
progress_sub: distributed.Sub = field(init=False)
logs_sub: distributed.Sub = field(init=False)
@@ -69,14 +44,10 @@ async def close(self) -> None:
# closing the client appears to fix the issue and the dask-scheduler remains happy
if self.client:
await wrap_client_async_routine(self.client.close())
- if self.gateway_cluster:
- await wrap_client_async_routine(self.gateway_cluster.close())
- if self.gateway:
- await wrap_client_async_routine(self.gateway.close())
-async def _connect_to_dask_scheduler(
- endpoint: AnyUrl, authentication: InternalClusterAuthentication
+async def connect_to_dask_scheduler(
+ endpoint: AnyUrl, authentication: ClusterAuthentication
) -> DaskSubSystem:
try:
security = distributed.Security()
@@ -93,162 +64,7 @@ async def _connect_to_dask_scheduler(
name=f"director-v2_{socket.gethostname()}_{os.getpid()}",
security=security,
)
- return DaskSubSystem(
- client=client,
- scheduler_id=client.scheduler_info()["id"],
- gateway=None,
- gateway_cluster=None,
- )
+ return DaskSubSystem(client=client, scheduler_id=client.scheduler_info()["id"])
except TypeError as exc:
msg = f"Scheduler has invalid configuration: {endpoint=}"
raise ConfigurationError(msg=msg) from exc
-
-
-async def _connect_with_gateway_and_create_cluster(
- endpoint: AnyUrl, auth_params: ClusterAuthentication
-) -> DaskSubSystem:
- try:
- logger.debug(
- "connecting with gateway at %s with %s", f"{endpoint!r}", f"{auth_params=}"
- )
- gateway_auth = await get_gateway_auth_from_params(auth_params)
- gateway = dask_gateway.Gateway(
- address=f"{endpoint}", auth=gateway_auth, asynchronous=True
- )
-
- try:
- # if there is already a cluster that means we can re-connect to it,
- # and IT SHALL BE the first in the list
- cluster_reports_list = await gateway.list_clusters()
- logger.debug(
- "current clusters on the gateway: %s", f"{cluster_reports_list=}"
- )
- cluster = None
- if cluster_reports_list:
- assert (
- len(cluster_reports_list) == 1
- ), "More than 1 cluster at this location, that is unexpected!!" # nosec
- cluster = await gateway.connect(
- cluster_reports_list[0].name, shutdown_on_close=False
- )
- logger.debug("connected to %s", f"{cluster=}")
- else:
- cluster = await gateway.new_cluster(shutdown_on_close=False)
- logger.debug("created %s", f"{cluster=}")
- assert cluster # nosec
- logger.info("Cluster dashboard available: %s", cluster.dashboard_link)
- await check_maximize_workers(cluster)
- logger.info("Cluster workers maximized")
- client = await cluster.get_client()
- assert client # nosec
- return DaskSubSystem(
- client=client,
- scheduler_id=client.scheduler_info()["id"],
- gateway=gateway,
- gateway_cluster=cluster,
- )
- except Exception:
- # cleanup
- with suppress(Exception):
- await wrap_client_async_routine(gateway.close())
- raise
-
- except TypeError as exc:
- msg = f"Cluster has invalid configuration: {endpoint=}, {auth_params=}"
- raise ConfigurationError(msg=msg) from exc
- except ValueError as exc:
- # this is when a 404=NotFound,422=MalformedData comes up
- raise DaskClientRequestError(endpoint=endpoint, error=exc) from exc
- except dask_gateway.GatewayClusterError as exc:
- # this is when a 409=Conflict/Cannot complete request comes up
- raise DaskClusterError(endpoint=endpoint, error=exc) from exc
- except dask_gateway.GatewayServerError as exc:
- # this is when a 500 comes up
- raise DaskGatewayServerError(endpoint=endpoint, error=exc) from exc
-
-
-def _is_dask_scheduler(authentication: ClusterAuthentication) -> bool:
- return isinstance(authentication, NoAuthentication | TLSAuthentication)
-
-
-async def create_internal_client_based_on_auth(
- endpoint: AnyUrl, authentication: ClusterAuthentication
-) -> DaskSubSystem:
- if _is_dask_scheduler(authentication):
- # if no auth then we go for a standard scheduler connection
- return await _connect_to_dask_scheduler(endpoint, authentication) # type: ignore[arg-type] # _is_dask_scheduler checks already that it is a valid type
- # we do have some auth, so it is going through a gateway
- return await _connect_with_gateway_and_create_cluster(endpoint, authentication)
-
-
-async def get_gateway_auth_from_params(
- auth_params: ClusterAuthentication,
-) -> DaskGatewayAuths:
- try:
- if isinstance(auth_params, SimpleAuthentication):
- return dask_gateway.BasicAuth(
- username=auth_params.username,
- password=auth_params.password.get_secret_value(),
- )
- if isinstance(auth_params, KerberosAuthentication):
- return dask_gateway.KerberosAuth()
- if isinstance(auth_params, JupyterHubTokenAuthentication):
- return dask_gateway.JupyterHubAuth(auth_params.api_token)
- except (TypeError, ValueError) as exc:
- msg = f"Cluster has invalid configuration: {auth_params}"
- raise ConfigurationError(msg=msg) from exc
-
- msg = f"Cluster has invalid configuration: {auth_params=}"
- raise ConfigurationError(msg=msg)
-
-
-_PING_TIMEOUT_S: Final[int] = 5
-_DASK_SCHEDULER_RUNNING_STATE: Final[str] = "running"
-
-
-async def test_scheduler_endpoint(
- endpoint: AnyUrl, authentication: ClusterAuthentication
-) -> None:
- """This method will try to connect to a gateway endpoint and raise a ConfigurationError in case of problem
-
- :raises ConfigurationError: contians some information as to why the connection failed
- """
- try:
- if _is_dask_scheduler(authentication):
- async with distributed.Client(
- address=f"{endpoint}", timeout=f"{_PING_TIMEOUT_S}", asynchronous=True
- ) as dask_client:
- if dask_client.status != _DASK_SCHEDULER_RUNNING_STATE:
- msg = "internal scheduler is not running!"
- raise ComputationalSchedulerError(msg=msg)
-
- else:
- gateway_auth = await get_gateway_auth_from_params(authentication)
- async with dask_gateway.Gateway(
- address=f"{endpoint}", auth=gateway_auth, asynchronous=True
- ) as gateway:
- # this does not yet create any connection to the underlying gateway.
- # since using a fct from dask gateway is going to timeout after a long time
- # we bypass the pinging by calling in ourselves with a short timeout
- async with httpx.AsyncClient(
- transport=httpx.AsyncHTTPTransport(retries=2)
- ) as httpx_client:
- # try to get something the api shall return fast
- response = await httpx_client.get(
- f"{endpoint}/api/version", timeout=_PING_TIMEOUT_S
- )
- response.raise_for_status()
- # now we try to list the clusters to check the gateway responds in a sensible way
- await gateway.list_clusters()
-
- logger.debug("Pinging %s, succeeded", f"{endpoint=}")
- except (
- dask_gateway.GatewayServerError,
- ClientConnectionError,
- ClientResponseError,
- httpx.HTTPError,
- ComputationalSchedulerError,
- ) as exc:
- logger.debug("Pinging %s, failed: %s", f"{endpoint=}", f"{exc=!r}")
- msg = f"Could not connect to cluster in {endpoint}: error: {exc}"
- raise ConfigurationError(msg=msg) from exc
diff --git a/services/director-v2/src/simcore_service_director_v2/utils/db.py b/services/director-v2/src/simcore_service_director_v2/utils/db.py
index af944c11dff..43e3a371089 100644
--- a/services/director-v2/src/simcore_service_director_v2/utils/db.py
+++ b/services/director-v2/src/simcore_service_director_v2/utils/db.py
@@ -1,9 +1,6 @@
import logging
-from typing import Any
-from common_library.serialization import model_dump_with_secrets
from fastapi import FastAPI
-from models_library.clusters import BaseCluster
from models_library.projects_state import RunningState
from simcore_postgres_database.models.comp_pipeline import StateType
@@ -28,17 +25,5 @@
_logger = logging.getLogger(__name__)
-def to_clusters_db(cluster: BaseCluster, *, only_update: bool) -> dict[str, Any]:
- db_model: dict[str, Any] = model_dump_with_secrets(
- cluster,
- show_secrets=True,
- by_alias=True,
- exclude={"id", "access_rights"},
- exclude_unset=only_update,
- exclude_none=only_update,
- )
- return db_model
-
-
def get_repository(app: FastAPI, repo_type: type[RepoType]) -> RepoType:
return get_base_repository(engine=app.state.engine, repo_type=repo_type)
diff --git a/services/director-v2/tests/conftest.py b/services/director-v2/tests/conftest.py
index 72b94ec3262..231debc371f 100644
--- a/services/director-v2/tests/conftest.py
+++ b/services/director-v2/tests/conftest.py
@@ -35,7 +35,6 @@
from starlette.testclient import ASGI3App, TestClient
pytest_plugins = [
- "pytest_simcore.dask_gateway",
"pytest_simcore.dask_scheduler",
"pytest_simcore.db_entries_mocks",
"pytest_simcore.docker_compose",
diff --git a/services/director-v2/tests/helpers/shared_comp_utils.py b/services/director-v2/tests/helpers/shared_comp_utils.py
index 8ee507f4a2b..2aed8e4525b 100644
--- a/services/director-v2/tests/helpers/shared_comp_utils.py
+++ b/services/director-v2/tests/helpers/shared_comp_utils.py
@@ -4,7 +4,6 @@
import httpx
from models_library.api_schemas_directorv2.comp_tasks import ComputationGet
-from models_library.clusters import ClusterID
from models_library.projects import ProjectAtDB
from models_library.projects_pipeline import PipelineDetails
from models_library.projects_state import RunningState
@@ -26,8 +25,7 @@ async def assert_computation_task_out_obj(
exp_task_state: RunningState,
exp_pipeline_details: PipelineDetails,
iteration: PositiveInt | None,
- cluster_id: ClusterID | None,
-):
+) -> None:
assert task_out.id == project.uuid
assert task_out.state == exp_task_state
assert task_out.url.path == f"/v2/computations/{project.uuid}"
@@ -41,7 +39,6 @@ async def assert_computation_task_out_obj(
else:
assert task_out.stop_url is None
assert task_out.iteration == iteration
- assert task_out.cluster_id == cluster_id
# check pipeline details contents
received_task_out_pipeline = task_out.pipeline_details.model_dump()
expected_task_out_pipeline = exp_pipeline_details.model_dump()
diff --git a/services/director-v2/tests/integration/01/test_computation_api.py b/services/director-v2/tests/integration/01/test_computation_api.py
index 053431fc34d..431939c31dd 100644
--- a/services/director-v2/tests/integration/01/test_computation_api.py
+++ b/services/director-v2/tests/integration/01/test_computation_api.py
@@ -21,7 +21,7 @@
assert_computation_task_out_obj,
)
from models_library.api_schemas_directorv2.comp_tasks import ComputationGet
-from models_library.clusters import DEFAULT_CLUSTER_ID, InternalClusterAuthentication
+from models_library.clusters import ClusterAuthentication
from models_library.projects import ProjectAtDB
from models_library.projects_nodes import NodeState
from models_library.projects_nodes_io import NodeID
@@ -58,7 +58,7 @@ def mock_env(
monkeypatch: pytest.MonkeyPatch,
dynamic_sidecar_docker_image_name: str,
dask_scheduler_service: str,
- dask_scheduler_auth: InternalClusterAuthentication,
+ dask_scheduler_auth: ClusterAuthentication,
) -> None:
# used by the client fixture
setenvs_from_dict(
@@ -463,7 +463,6 @@ def _convert_to_pipeline_details(
exp_task_state=RunningState.PUBLISHED,
exp_pipeline_details=expected_pipeline_details,
iteration=1,
- cluster_id=DEFAULT_CLUSTER_ID,
)
# now wait for the computation to finish
@@ -479,7 +478,6 @@ def _convert_to_pipeline_details(
exp_task_state=RunningState.SUCCESS,
exp_pipeline_details=expected_pipeline_details_after_run,
iteration=1,
- cluster_id=DEFAULT_CLUSTER_ID,
)
# run it a second time. the tasks are all up-to-date, nothing should be run
@@ -531,7 +529,6 @@ def _convert_to_pipeline_details(
exp_task_state=RunningState.PUBLISHED,
exp_pipeline_details=expected_pipeline_details_forced,
iteration=2,
- cluster_id=DEFAULT_CLUSTER_ID,
)
# now wait for the computation to finish
@@ -572,7 +569,6 @@ async def test_run_computation(
exp_task_state=RunningState.PUBLISHED,
exp_pipeline_details=fake_workbench_computational_pipeline_details,
iteration=1,
- cluster_id=DEFAULT_CLUSTER_ID,
)
# wait for the computation to start
@@ -595,7 +591,6 @@ async def test_run_computation(
exp_task_state=RunningState.SUCCESS,
exp_pipeline_details=fake_workbench_computational_pipeline_details_completed,
iteration=1,
- cluster_id=DEFAULT_CLUSTER_ID,
)
# NOTE: currently the webserver is the one updating the projects table so we need to fake this by copying the run_hash
@@ -642,7 +637,6 @@ async def test_run_computation(
exp_task_state=RunningState.PUBLISHED,
exp_pipeline_details=expected_pipeline_details_forced, # NOTE: here the pipeline already ran so its states are different
iteration=2,
- cluster_id=DEFAULT_CLUSTER_ID,
)
# wait for the computation to finish
@@ -655,7 +649,6 @@ async def test_run_computation(
exp_task_state=RunningState.SUCCESS,
exp_pipeline_details=fake_workbench_computational_pipeline_details_completed,
iteration=2,
- cluster_id=DEFAULT_CLUSTER_ID,
)
@@ -692,7 +685,6 @@ async def test_abort_computation(
exp_task_state=RunningState.PUBLISHED,
exp_pipeline_details=fake_workbench_computational_pipeline_details,
iteration=1,
- cluster_id=DEFAULT_CLUSTER_ID,
)
# wait until the pipeline is started
@@ -765,7 +757,6 @@ async def test_update_and_delete_computation(
exp_task_state=RunningState.NOT_STARTED,
exp_pipeline_details=fake_workbench_computational_pipeline_details_not_started,
iteration=None,
- cluster_id=None,
)
# update the pipeline
@@ -784,7 +775,6 @@ async def test_update_and_delete_computation(
exp_task_state=RunningState.NOT_STARTED,
exp_pipeline_details=fake_workbench_computational_pipeline_details_not_started,
iteration=None,
- cluster_id=None,
)
# update the pipeline
@@ -803,7 +793,6 @@ async def test_update_and_delete_computation(
exp_task_state=RunningState.NOT_STARTED,
exp_pipeline_details=fake_workbench_computational_pipeline_details_not_started,
iteration=None,
- cluster_id=None,
)
# start it now
@@ -821,7 +810,6 @@ async def test_update_and_delete_computation(
exp_task_state=RunningState.PUBLISHED,
exp_pipeline_details=fake_workbench_computational_pipeline_details,
iteration=1,
- cluster_id=DEFAULT_CLUSTER_ID,
)
# wait until the pipeline is started
diff --git a/services/director-v2/tests/integration/02/test_dynamic_sidecar_nodeports_integration.py b/services/director-v2/tests/integration/02/test_dynamic_sidecar_nodeports_integration.py
index e43f23bc9dd..b1c99b772b9 100644
--- a/services/director-v2/tests/integration/02/test_dynamic_sidecar_nodeports_integration.py
+++ b/services/director-v2/tests/integration/02/test_dynamic_sidecar_nodeports_integration.py
@@ -29,7 +29,7 @@
assert_computation_task_out_obj,
)
from models_library.api_schemas_directorv2.comp_tasks import ComputationGet
-from models_library.clusters import DEFAULT_CLUSTER_ID, InternalClusterAuthentication
+from models_library.clusters import ClusterAuthentication
from models_library.projects import (
Node,
NodesDict,
@@ -360,7 +360,7 @@ def mock_env(
network_name: str,
dev_feature_r_clone_enabled: str,
dask_scheduler_service: str,
- dask_scheduler_auth: InternalClusterAuthentication,
+ dask_scheduler_auth: ClusterAuthentication,
minimal_configuration: None,
patch_storage_setup: None,
) -> None:
@@ -983,7 +983,6 @@ async def test_nodeports_integration(
exp_task_state=RunningState.SUCCESS,
exp_pipeline_details=PipelineDetails.model_validate(fake_dy_success),
iteration=1,
- cluster_id=DEFAULT_CLUSTER_ID,
)
update_project_workbench_with_comp_tasks(str(current_study.uuid))
diff --git a/services/director-v2/tests/unit/_dask_helpers.py b/services/director-v2/tests/unit/_dask_helpers.py
deleted file mode 100644
index 9bf9a739946..00000000000
--- a/services/director-v2/tests/unit/_dask_helpers.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# pylint:disable=unused-variable
-# pylint:disable=unused-argument
-
-from typing import NamedTuple
-
-from dask_gateway_server.app import DaskGateway
-
-
-class DaskGatewayServer(NamedTuple):
- address: str
- proxy_address: str
- password: str
- server: DaskGateway
diff --git a/services/director-v2/tests/unit/conftest.py b/services/director-v2/tests/unit/conftest.py
index cdf0751fab4..b305f6bcafd 100644
--- a/services/director-v2/tests/unit/conftest.py
+++ b/services/director-v2/tests/unit/conftest.py
@@ -3,7 +3,6 @@
import json
import logging
-import random
import urllib.parse
from collections.abc import AsyncIterable, Iterable, Iterator, Mapping
from typing import Any
@@ -20,7 +19,6 @@
)
from models_library.basic_types import PortInt
from models_library.callbacks_mapping import CallbacksMapping
-from models_library.clusters import ClusterID
from models_library.generated_models.docker_rest_api import (
ServiceSpec as DockerServiceSpec,
)
@@ -159,11 +157,6 @@ def scheduler_data(
}[request.param]
-@pytest.fixture
-def cluster_id() -> ClusterID:
- return random.randint(0, 10)
-
-
@pytest.fixture(params=list(FileLinkType))
def tasks_file_link_type(request) -> FileLinkType:
"""parametrized fixture on all FileLinkType enum variants"""
diff --git a/services/director-v2/tests/unit/test_models_clusters.py b/services/director-v2/tests/unit/test_models_clusters.py
index b08a988fc68..ae0b17dd43e 100644
--- a/services/director-v2/tests/unit/test_models_clusters.py
+++ b/services/director-v2/tests/unit/test_models_clusters.py
@@ -1,52 +1,16 @@
-from pprint import pformat
-from typing import Any
-
-import pytest
from faker import Faker
from models_library.api_schemas_directorv2.clusters import (
AvailableResources,
- ClusterCreate,
- ClusterPatch,
Scheduler,
UsedResources,
Worker,
WorkerMetrics,
)
from models_library.clusters import ClusterTypeInModel
-from pydantic import BaseModel, ByteSize, TypeAdapter
+from pydantic import ByteSize, TypeAdapter
from simcore_postgres_database.models.clusters import ClusterType
-@pytest.mark.parametrize(
- "model_cls",
- [ClusterCreate, ClusterPatch],
-)
-def test_clusters_model_examples(
- model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]]
-):
- for name, example in model_cls_examples.items():
- print(name, ":", pformat(example))
- model_instance = model_cls(**example)
- assert model_instance, f"Failed with {name}"
-
-
-@pytest.mark.parametrize(
- "model_cls",
- [
- ClusterCreate,
- ],
-)
-def test_cluster_creation_brings_default_thumbail(
- model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]]
-):
- for example in model_cls_examples.values():
- if "thumbnail" in example:
- example.pop("thumbnail")
- instance = model_cls(**example)
- assert instance
- assert instance.thumbnail
-
-
def test_scheduler_constructor_with_default_has_correct_dict(faker: Faker):
scheduler = Scheduler(status=faker.text())
assert scheduler.workers is not None
diff --git a/services/director-v2/tests/unit/test_modules_dask_client.py b/services/director-v2/tests/unit/test_modules_dask_client.py
index f45040c143a..83939689808 100644
--- a/services/director-v2/tests/unit/test_modules_dask_client.py
+++ b/services/director-v2/tests/unit/test_modules_dask_client.py
@@ -5,7 +5,6 @@
# pylint:disable=too-many-arguments
# pylint: disable=reimported
import asyncio
-import datetime
import functools
import traceback
from collections.abc import AsyncIterator, Awaitable, Callable, Coroutine
@@ -17,7 +16,6 @@
import distributed
import pytest
import respx
-from _dask_helpers import DaskGatewayServer
from dask.distributed import get_worker
from dask_task_models_library.container_tasks.docker import DockerBasicAuth
from dask_task_models_library.container_tasks.errors import TaskCancelledError
@@ -42,22 +40,15 @@
from faker import Faker
from fastapi.applications import FastAPI
from models_library.api_schemas_directorv2.services import NodeRequirements
-from models_library.api_schemas_storage import LinkType
-from models_library.clusters import (
- ClusterID,
- ClusterTypeInModel,
- NoAuthentication,
- SimpleAuthentication,
-)
+from models_library.clusters import ClusterTypeInModel, NoAuthentication
from models_library.docker import to_simcore_runtime_docker_label_key
from models_library.projects import ProjectID
from models_library.projects_nodes_io import NodeID
from models_library.resource_tracker import HardwareInfo
from models_library.users import UserID
-from pydantic import AnyUrl, ByteSize, SecretStr, TypeAdapter
+from pydantic import AnyUrl, ByteSize, TypeAdapter
from pytest_mock.plugin import MockerFixture
from pytest_simcore.helpers.typing_env import EnvVarsDict
-from servicelib.background_task import periodic_task
from settings_library.s3 import S3Settings
from simcore_sdk.node_ports_v2 import FileLinkType
from simcore_service_director_v2.core.errors import (
@@ -163,7 +154,9 @@ async def factory() -> DaskClient:
client = await DaskClient.create(
app=minimal_app,
settings=minimal_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND,
- endpoint=TypeAdapter(AnyUrl).validate_python(dask_spec_local_cluster.scheduler_address),
+ endpoint=TypeAdapter(AnyUrl).validate_python(
+ dask_spec_local_cluster.scheduler_address
+ ),
authentication=NoAuthentication(),
tasks_file_link_type=tasks_file_link_type,
cluster_type=ClusterTypeInModel.ON_PREMISE,
@@ -177,8 +170,6 @@ async def factory() -> DaskClient:
assert not client._subscribed_tasks # noqa: SLF001
assert client.backend.client
- assert not client.backend.gateway
- assert not client.backend.gateway_cluster
scheduler_infos = client.backend.client.scheduler_info() # type: ignore
print(
f"--> Connected to scheduler via client {client=} to scheduler {scheduler_infos=}"
@@ -191,66 +182,13 @@ async def factory() -> DaskClient:
print(f"<-- Disconnected scheduler clients {created_clients=}")
-@pytest.fixture
-async def create_dask_client_from_gateway(
- _minimal_dask_config: None,
- local_dask_gateway_server: DaskGatewayServer,
- minimal_app: FastAPI,
- tasks_file_link_type: FileLinkType,
-) -> AsyncIterator[Callable[[], Awaitable[DaskClient]]]:
- created_clients = []
-
- async def factory() -> DaskClient:
- client = await DaskClient.create(
- app=minimal_app,
- settings=minimal_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND,
- endpoint=TypeAdapter(AnyUrl).validate_python(local_dask_gateway_server.address),
- authentication=SimpleAuthentication(
- username="pytest_user",
- password=SecretStr(local_dask_gateway_server.password),
- ),
- tasks_file_link_type=tasks_file_link_type,
- cluster_type=ClusterTypeInModel.AWS,
- )
- assert client
- assert client.app == minimal_app
- assert (
- client.settings
- == minimal_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND
- )
- assert not client._subscribed_tasks # noqa: SLF001
-
- assert client.backend.client
- assert client.backend.gateway
- assert client.backend.gateway_cluster
-
- scheduler_infos = client.backend.client.scheduler_info()
- assert scheduler_infos
- print(f"--> Connected to gateway {client.backend.gateway=}")
- print(f"--> Cluster {client.backend.gateway_cluster=}")
- print(f"--> Client {client=}")
- print(
- f"--> Cluster dashboard link {client.backend.gateway_cluster.dashboard_link}"
- )
- created_clients.append(client)
- return client
-
- yield factory
- await asyncio.gather(*[client.delete() for client in created_clients])
- print(f"<-- Disconnected gateway clients {created_clients=}")
-
-
-@pytest.fixture(
- params=["create_dask_client_from_scheduler", "create_dask_client_from_gateway"]
-)
+@pytest.fixture(params=["create_dask_client_from_scheduler"])
async def dask_client(
create_dask_client_from_scheduler: Callable[[], Awaitable[DaskClient]],
- create_dask_client_from_gateway: Callable[[], Awaitable[DaskClient]],
request,
) -> DaskClient:
client: DaskClient = await {
"create_dask_client_from_scheduler": create_dask_client_from_scheduler,
- "create_dask_client_from_gateway": create_dask_client_from_gateway,
}[request.param]()
try:
@@ -495,7 +433,6 @@ async def test_send_computation_task(
user_id: UserID,
project_id: ProjectID,
node_id: NodeID,
- cluster_id: ClusterID,
image_params: ImageParams,
_mocked_node_ports: None,
mocked_user_completed_cb: mock.AsyncMock,
@@ -543,7 +480,6 @@ def fake_sidecar_fct(
node_id_to_job_ids = await dask_client.send_computation_tasks(
user_id=user_id,
project_id=project_id,
- cluster_id=cluster_id,
tasks=image_params.fake_tasks,
callback=mocked_user_completed_cb,
remote_fct=functools.partial(
@@ -614,7 +550,6 @@ async def test_computation_task_is_persisted_on_dask_scheduler(
dask_client: DaskClient,
user_id: UserID,
project_id: ProjectID,
- cluster_id: ClusterID,
image_params: ImageParams,
_mocked_node_ports: None,
mocked_user_completed_cb: mock.AsyncMock,
@@ -651,7 +586,6 @@ def fake_sidecar_fct(
published_computation_task = await dask_client.send_computation_tasks(
user_id=user_id,
project_id=project_id,
- cluster_id=cluster_id,
tasks=image_params.fake_tasks,
callback=mocked_user_completed_cb,
remote_fct=fake_sidecar_fct,
@@ -701,7 +635,6 @@ async def test_abort_computation_tasks(
dask_client: DaskClient,
user_id: UserID,
project_id: ProjectID,
- cluster_id: ClusterID,
image_params: ImageParams,
_mocked_node_ports: None,
mocked_user_completed_cb: mock.AsyncMock,
@@ -742,7 +675,6 @@ def fake_remote_fct(
published_computation_task = await dask_client.send_computation_tasks(
user_id=user_id,
project_id=project_id,
- cluster_id=cluster_id,
tasks=image_params.fake_tasks,
callback=mocked_user_completed_cb,
remote_fct=fake_remote_fct,
@@ -793,7 +725,6 @@ async def test_failed_task_returns_exceptions(
dask_client: DaskClient,
user_id: UserID,
project_id: ProjectID,
- cluster_id: ClusterID,
gpu_image: ImageParams,
_mocked_node_ports: None,
mocked_user_completed_cb: mock.AsyncMock,
@@ -815,7 +746,6 @@ def fake_failing_sidecar_fct(
published_computation_task = await dask_client.send_computation_tasks(
user_id=user_id,
project_id=project_id,
- cluster_id=cluster_id,
tasks=gpu_image.fake_tasks,
callback=mocked_user_completed_cb,
remote_fct=fake_failing_sidecar_fct,
@@ -857,7 +787,6 @@ async def test_send_computation_task_with_missing_resources_raises(
dask_client: DaskClient,
user_id: UserID,
project_id: ProjectID,
- cluster_id: ClusterID,
image_params: ImageParams,
_mocked_node_ports: None,
mocked_user_completed_cb: mock.AsyncMock,
@@ -885,7 +814,6 @@ async def test_send_computation_task_with_missing_resources_raises(
await dask_client.send_computation_tasks(
user_id=user_id,
project_id=project_id,
- cluster_id=cluster_id,
tasks=image_params.fake_tasks,
callback=mocked_user_completed_cb,
remote_fct=None,
@@ -903,7 +831,6 @@ async def test_send_computation_task_with_hardware_info_raises(
dask_client: DaskClient,
user_id: UserID,
project_id: ProjectID,
- cluster_id: ClusterID,
image_params: ImageParams,
_mocked_node_ports: None,
mocked_user_completed_cb: mock.AsyncMock,
@@ -916,7 +843,6 @@ async def test_send_computation_task_with_hardware_info_raises(
await dask_client.send_computation_tasks(
user_id=user_id,
project_id=project_id,
- cluster_id=cluster_id,
tasks=image_params.fake_tasks,
callback=mocked_user_completed_cb,
remote_fct=None,
@@ -934,7 +860,6 @@ async def test_too_many_resources_send_computation_task(
user_id: UserID,
project_id: ProjectID,
node_id: NodeID,
- cluster_id: ClusterID,
_mocked_node_ports: None,
mocked_user_completed_cb: mock.AsyncMock,
mocked_storage_service_api: respx.MockRouter,
@@ -958,7 +883,6 @@ async def test_too_many_resources_send_computation_task(
await dask_client.send_computation_tasks(
user_id=user_id,
project_id=project_id,
- cluster_id=cluster_id,
tasks=fake_task,
callback=mocked_user_completed_cb,
remote_fct=None,
@@ -971,11 +895,9 @@ async def test_too_many_resources_send_computation_task(
async def test_disconnected_backend_raises_exception(
dask_spec_local_cluster: SpecCluster,
- local_dask_gateway_server: DaskGatewayServer,
dask_client: DaskClient,
user_id: UserID,
project_id: ProjectID,
- cluster_id: ClusterID,
cpu_image: ImageParams,
_mocked_node_ports: None,
mocked_user_completed_cb: mock.AsyncMock,
@@ -985,13 +907,10 @@ async def test_disconnected_backend_raises_exception(
):
# DISCONNECT THE CLUSTER
await dask_spec_local_cluster.close() # type: ignore
- await local_dask_gateway_server.server.cleanup()
- #
with pytest.raises(ComputationalBackendNotConnectedError):
await dask_client.send_computation_tasks(
user_id=user_id,
project_id=project_id,
- cluster_id=cluster_id,
tasks=cpu_image.fake_tasks,
callback=mocked_user_completed_cb,
remote_fct=None,
@@ -1009,7 +928,6 @@ async def test_changed_scheduler_raises_exception(
dask_client: DaskClient,
user_id: UserID,
project_id: ProjectID,
- cluster_id: ClusterID,
cpu_image: ImageParams,
_mocked_node_ports: None,
mocked_user_completed_cb: mock.AsyncMock,
@@ -1041,7 +959,6 @@ async def test_changed_scheduler_raises_exception(
await dask_client.send_computation_tasks(
user_id=user_id,
project_id=project_id,
- cluster_id=cluster_id,
tasks=cpu_image.fake_tasks,
callback=mocked_user_completed_cb,
remote_fct=None,
@@ -1051,13 +968,11 @@ async def test_changed_scheduler_raises_exception(
mocked_user_completed_cb.assert_not_called()
-@pytest.mark.flaky(max_runs=3)
@pytest.mark.parametrize("fail_remote_fct", [False, True])
async def test_get_tasks_status(
dask_client: DaskClient,
user_id: UserID,
project_id: ProjectID,
- cluster_id: ClusterID,
cpu_image: ImageParams,
_mocked_node_ports: None,
mocked_user_completed_cb: mock.AsyncMock,
@@ -1088,7 +1003,6 @@ def fake_remote_fct(
published_computation_task = await dask_client.send_computation_tasks(
user_id=user_id,
project_id=project_id,
- cluster_id=cluster_id,
tasks=cpu_image.fake_tasks,
callback=mocked_user_completed_cb,
remote_fct=fake_remote_fct,
@@ -1148,7 +1062,6 @@ async def test_dask_sub_handlers(
dask_client: DaskClient,
user_id: UserID,
project_id: ProjectID,
- cluster_id: ClusterID,
cpu_image: ImageParams,
_mocked_node_ports: None,
mocked_user_completed_cb: mock.AsyncMock,
@@ -1180,7 +1093,6 @@ def fake_remote_fct(
published_computation_task = await dask_client.send_computation_tasks(
user_id=user_id,
project_id=project_id,
- cluster_id=cluster_id,
tasks=cpu_image.fake_tasks,
callback=mocked_user_completed_cb,
remote_fct=fake_remote_fct,
@@ -1219,7 +1131,6 @@ async def test_get_cluster_details(
dask_client: DaskClient,
user_id: UserID,
project_id: ProjectID,
- cluster_id: ClusterID,
image_params: ImageParams,
_mocked_node_ports: None,
mocked_user_completed_cb: mock.AsyncMock,
@@ -1256,7 +1167,6 @@ def fake_sidecar_fct(
published_computation_task = await dask_client.send_computation_tasks(
user_id=user_id,
project_id=project_id,
- cluster_id=cluster_id,
tasks=image_params.fake_tasks,
callback=mocked_user_completed_cb,
remote_fct=functools.partial(
@@ -1318,30 +1228,3 @@ def fake_sidecar_fct(
].used_resources
assert all(res == 0.0 for res in currently_used_resources.values())
-
-
-@pytest.mark.skip(reason="manual testing")
-@pytest.mark.parametrize("tasks_file_link_type", [LinkType.S3], indirect=True)
-async def test_get_cluster_details_robust_to_worker_disappearing(
- create_dask_client_from_gateway: Callable[[], Awaitable[DaskClient]]
-):
- """When running a high number of comp. services in a gateway,
- one could observe an issue where getting the cluster used resources
- would fail sometimes and generate a big amount of errors in the logs
- due to dask worker disappearing or not completely ready.
- This test kind of simulates this."""
- dask_client = await create_dask_client_from_gateway()
- await dask_client.get_cluster_details()
-
- async def _scale_up_and_down() -> None:
- assert dask_client.backend.gateway_cluster
- await dask_client.backend.gateway_cluster.scale(40)
- await asyncio.sleep(1)
- await dask_client.backend.gateway_cluster.scale(1)
-
- async with periodic_task(
- _scale_up_and_down, interval=datetime.timedelta(seconds=1), task_name="pytest"
- ):
- for _ in range(900):
- await dask_client.get_cluster_details()
- await asyncio.sleep(0.1)
diff --git a/services/director-v2/tests/unit/test_modules_dask_clients_pool.py b/services/director-v2/tests/unit/test_modules_dask_clients_pool.py
index 3bd1e318878..6f87cb4bcb1 100644
--- a/services/director-v2/tests/unit/test_modules_dask_clients_pool.py
+++ b/services/director-v2/tests/unit/test_modules_dask_clients_pool.py
@@ -3,27 +3,23 @@
# pylint:disable=redefined-outer-name
+from collections.abc import AsyncIterator, Callable
+from pathlib import Path
from random import choice
-from typing import Any, AsyncIterator, Callable, get_args
+from typing import Any, cast, get_args
from unittest import mock
import pytest
-from _dask_helpers import DaskGatewayServer
-from common_library.json_serialization import json_dumps
-from common_library.serialization import model_dump_with_secrets
from distributed.deploy.spec import SpecCluster
from faker import Faker
+from fastapi import FastAPI
from models_library.clusters import (
- DEFAULT_CLUSTER_ID,
- Cluster,
+ BaseCluster,
ClusterAuthentication,
ClusterTypeInModel,
- JupyterHubTokenAuthentication,
- KerberosAuthentication,
NoAuthentication,
- SimpleAuthentication,
+ TLSAuthentication,
)
-from pydantic import SecretStr
from pytest_mock.plugin import MockerFixture
from pytest_simcore.helpers.typing_env import EnvVarsDict
from simcore_postgres_database.models.clusters import ClusterType
@@ -61,9 +57,9 @@ def test_dask_clients_pool_missing_raises_configuration_error(
settings = AppSettings.create_from_envs()
app = init_app(settings)
- with TestClient(app, raise_server_exceptions=True) as client:
+ with TestClient(app, raise_server_exceptions=True): # noqa: SIM117
with pytest.raises(ConfigurationError):
- DaskClientsPool.instance(client.app)
+ DaskClientsPool.instance(app)
def test_dask_clients_pool_properly_setup_and_deleted(
@@ -77,66 +73,36 @@ def test_dask_clients_pool_properly_setup_and_deleted(
settings = AppSettings.create_from_envs()
app = init_app(settings)
- with TestClient(app, raise_server_exceptions=True) as client:
+ with TestClient(app, raise_server_exceptions=True):
mocked_dask_clients_pool.create.assert_called_once()
mocked_dask_clients_pool.delete.assert_called_once()
@pytest.fixture
-def fake_clusters(faker: Faker) -> Callable[[int], list[Cluster]]:
- def creator(num_clusters: int) -> list[Cluster]:
- fake_clusters = []
- for n in range(num_clusters):
- fake_clusters.append(
- Cluster.model_validate(
- {
- "id": faker.pyint(),
- "name": faker.name(),
- "type": ClusterType.ON_PREMISE,
- "owner": faker.pyint(),
- "endpoint": faker.uri(),
- "authentication": choice(
- [
- NoAuthentication(),
- SimpleAuthentication(
- username=faker.user_name(),
- password=faker.password(),
- ),
- KerberosAuthentication(),
- JupyterHubTokenAuthentication(api_token=faker.uuid4()),
- ]
- ),
- }
- )
- )
- return fake_clusters
-
- return creator
-
-
-@pytest.fixture()
-def default_scheduler_set_as_osparc_gateway(
- local_dask_gateway_server: DaskGatewayServer,
- monkeypatch: pytest.MonkeyPatch,
- faker: Faker,
-) -> Callable:
- def creator():
- monkeypatch.setenv(
- "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL",
- local_dask_gateway_server.proxy_address,
- )
- monkeypatch.setenv(
- "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH",
- json_dumps(
- model_dump_with_secrets(
- SimpleAuthentication(
- username=faker.user_name(),
- password=SecretStr(local_dask_gateway_server.password),
+def fake_clusters(faker: Faker) -> Callable[[int], list[BaseCluster]]:
+ def creator(num_clusters: int) -> list[BaseCluster]:
+ return [
+ BaseCluster.model_validate(
+ {
+ "id": faker.pyint(),
+ "name": faker.name(),
+ "type": ClusterType.ON_PREMISE,
+ "owner": faker.pyint(),
+ "endpoint": faker.uri(),
+ "authentication": choice( # noqa: S311
+ [
+ NoAuthentication(),
+ TLSAuthentication(
+ tls_client_cert=Path(faker.file_path()),
+ tls_client_key=Path(faker.file_path()),
+ tls_ca_file=Path(faker.file_path()),
+ ),
+ ]
),
- show_secrets=True,
- )
- ),
- )
+ }
+ )
+ for _n in range(num_clusters)
+ ]
return creator
@@ -157,17 +123,14 @@ def creator():
@pytest.fixture(
params=[
"default_scheduler_set_as_dask_scheduler",
- "default_scheduler_set_as_osparc_gateway",
]
)
def default_scheduler(
default_scheduler_set_as_dask_scheduler,
- default_scheduler_set_as_osparc_gateway,
request,
):
{
"default_scheduler_set_as_dask_scheduler": default_scheduler_set_as_dask_scheduler,
- "default_scheduler_set_as_osparc_gateway": default_scheduler_set_as_osparc_gateway,
}[request.param]()
@@ -175,28 +138,30 @@ async def test_dask_clients_pool_acquisition_creates_client_on_demand(
minimal_dask_config: None,
mocker: MockerFixture,
client: TestClient,
- fake_clusters: Callable[[int], list[Cluster]],
+ fake_clusters: Callable[[int], list[BaseCluster]],
):
assert client.app
+ the_app = cast(FastAPI, client.app)
mocked_dask_client = mocker.patch(
"simcore_service_director_v2.modules.dask_clients_pool.DaskClient",
autospec=True,
)
mocked_dask_client.create.return_value = mocked_dask_client
- clients_pool = DaskClientsPool.instance(client.app)
+ clients_pool = DaskClientsPool.instance(the_app)
mocked_dask_client.create.assert_not_called()
mocked_dask_client.register_handlers.assert_not_called()
clusters = fake_clusters(30)
mocked_creation_calls = []
+ assert isinstance(the_app.state.settings, AppSettings)
for cluster in clusters:
mocked_creation_calls.append(
mock.call(
app=client.app,
- settings=client.app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND,
+ settings=the_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND,
authentication=cluster.authentication,
endpoint=cluster.endpoint,
- tasks_file_link_type=client.app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND.COMPUTATIONAL_BACKEND_DEFAULT_FILE_LINK_TYPE,
+ tasks_file_link_type=the_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND.COMPUTATIONAL_BACKEND_DEFAULT_FILE_LINK_TYPE,
cluster_type=ClusterTypeInModel.ON_PREMISE,
)
)
@@ -218,14 +183,16 @@ async def test_acquiring_wrong_cluster_raises_exception(
minimal_dask_config: None,
mocker: MockerFixture,
client: TestClient,
- fake_clusters: Callable[[int], list[Cluster]],
+ fake_clusters: Callable[[int], list[BaseCluster]],
):
+ assert client.app
+ the_app = cast(FastAPI, client.app)
mocked_dask_client = mocker.patch(
"simcore_service_director_v2.modules.dask_clients_pool.DaskClient",
autospec=True,
)
mocked_dask_client.create.side_effect = Exception
- clients_pool = DaskClientsPool.instance(client.app)
+ clients_pool = DaskClientsPool.instance(the_app)
mocked_dask_client.assert_not_called()
non_existing_cluster = fake_clusters(1)[0]
@@ -237,9 +204,9 @@ async def test_acquiring_wrong_cluster_raises_exception(
def test_default_cluster_correctly_initialized(
minimal_dask_config: None, default_scheduler: None, client: TestClient
):
- dask_scheduler_settings = (
- client.app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND
- )
+ assert client.app
+ the_app = cast(FastAPI, client.app)
+ dask_scheduler_settings = the_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND
default_cluster = dask_scheduler_settings.default_cluster
assert default_cluster
assert (
@@ -247,7 +214,6 @@ def test_default_cluster_correctly_initialized(
== dask_scheduler_settings.COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL
)
- assert default_cluster.id == DEFAULT_CLUSTER_ID
assert isinstance(default_cluster.authentication, get_args(ClusterAuthentication))
@@ -257,7 +223,9 @@ async def dask_clients_pool(
default_scheduler,
client: TestClient,
) -> AsyncIterator[DaskClientsPool]:
- clients_pool = DaskClientsPool.instance(client.app)
+ assert client.app
+ the_app = cast(FastAPI, client.app)
+ clients_pool = DaskClientsPool.instance(the_app)
assert clients_pool
yield clients_pool
await clients_pool.delete()
@@ -268,9 +236,8 @@ async def test_acquire_default_cluster(
client: TestClient,
):
assert client.app
- dask_scheduler_settings = (
- client.app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND
- )
+ the_app = cast(FastAPI, client.app)
+ dask_scheduler_settings = the_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND
default_cluster = dask_scheduler_settings.default_cluster
assert default_cluster
async with dask_clients_pool.acquire(default_cluster) as dask_client:
@@ -280,7 +247,7 @@ def just_a_quick_fct(x, y):
assert (
dask_client.tasks_file_link_type
- == client.app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND.COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_FILE_LINK_TYPE
+ == the_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND.COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_FILE_LINK_TYPE
)
future = dask_client.backend.client.submit(just_a_quick_fct, 12, 23)
assert future
diff --git a/services/director-v2/tests/unit/test_utils_db.py b/services/director-v2/tests/unit/test_utils_db.py
index d2a9e49484d..4bb06b82085 100644
--- a/services/director-v2/tests/unit/test_utils_db.py
+++ b/services/director-v2/tests/unit/test_utils_db.py
@@ -1,40 +1,10 @@
-from contextlib import suppress
-from typing import Any, cast
-
import pytest
-from models_library.clusters import BaseCluster, Cluster
from models_library.projects_state import RunningState
-from pydantic import BaseModel
from simcore_postgres_database.models.comp_pipeline import StateType
from simcore_service_director_v2.utils.db import (
DB_TO_RUNNING_STATE,
RUNNING_STATE_TO_DB,
- to_clusters_db,
-)
-
-
-@pytest.mark.parametrize(
- "model_cls",
- [Cluster],
)
-def test_export_clusters_to_db(
- model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]]
-):
- for example in model_cls_examples.values():
- owner_gid = example["owner"]
- # remove the owner from the access rights if any
- with suppress(KeyError):
- example.get("access_rights", {}).pop(owner_gid)
- instance = cast(BaseCluster, model_cls(**example))
-
- # for updates
-
- cluster_db_dict = to_clusters_db(instance, only_update=True)
- keys_not_in_db = ["id", "access_rights"]
-
- assert list(cluster_db_dict.keys()) == [
- x for x in example if x not in keys_not_in_db
- ]
@pytest.mark.parametrize("input_running_state", RunningState)
diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py
index 6b6084c5895..04b85f8ad82 100644
--- a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py
+++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py
@@ -4,7 +4,7 @@
# pylint: disable=too-many-arguments
# pylint: disable=unused-argument
# pylint: disable=unused-variable
-# pylint:disable=too-many-positional-arguments
+# pylint: disable=too-many-positional-arguments
import datetime as dt
import json
@@ -34,10 +34,8 @@
PricingPlanGet,
PricingUnitGet,
)
-from models_library.clusters import DEFAULT_CLUSTER_ID, Cluster, ClusterID
from models_library.projects import ProjectAtDB
from models_library.projects_nodes import NodeID, NodeState
-from models_library.projects_nodes_io import NodeIDStr
from models_library.projects_pipeline import PipelineDetails
from models_library.projects_state import RunningState
from models_library.service_settings_labels import SimcoreServiceLabels
@@ -49,7 +47,7 @@
)
from models_library.utils.fastapi_encoders import jsonable_encoder
from models_library.wallets import WalletInfo
-from pydantic import AnyHttpUrl, ByteSize, PositiveInt, TypeAdapter, ValidationError
+from pydantic import AnyHttpUrl, ByteSize, PositiveInt, TypeAdapter
from pytest_mock.plugin import MockerFixture
from pytest_simcore.helpers.typing_env import EnvVarsDict
from settings_library.rabbit import RabbitSettings
@@ -288,6 +286,11 @@ def _mocked_services_details(
yield respx_mock
+assert "json_schema_extra" in PricingPlanGet.model_config
+assert isinstance(PricingPlanGet.model_config["json_schema_extra"], dict)
+assert isinstance(PricingPlanGet.model_config["json_schema_extra"]["examples"], list)
+
+
@pytest.fixture(
params=PricingPlanGet.model_config["json_schema_extra"]["examples"],
ids=["with ec2 restriction", "without"],
@@ -300,6 +303,7 @@ def default_pricing_plan(request: pytest.FixtureRequest) -> PricingPlanGet:
def default_pricing_plan_aws_ec2_type(
default_pricing_plan: PricingPlanGet,
) -> str | None:
+ assert default_pricing_plan.pricing_units
for p in default_pricing_plan.pricing_units:
if p.default:
if p.specific_info.aws_ec2_instances:
@@ -327,6 +331,11 @@ def _mocked_service_default_pricing_plan(
)
def _mocked_get_pricing_unit(request, pricing_plan_id: int) -> httpx.Response:
+ assert "json_schema_extra" in PricingUnitGet.model_config
+ assert isinstance(PricingUnitGet.model_config["json_schema_extra"], dict)
+ assert isinstance(
+ PricingUnitGet.model_config["json_schema_extra"]["examples"], list
+ )
return httpx.Response(
200,
json=jsonable_encoder(
@@ -374,30 +383,6 @@ async def test_computation_create_validators(
):
user = registered_user()
proj = await project(user, workbench=fake_workbench_without_outputs)
- # cluster id and use_on_demand raises
- with pytest.raises(ValidationError, match=r"cluster_id cannot be set.+"):
- ComputationCreate(
- user_id=user["id"],
- project_id=proj.uuid,
- product_name=faker.pystr(),
- use_on_demand_clusters=True,
- cluster_id=faker.pyint(),
- )
- # this should not raise
- ComputationCreate(
- user_id=user["id"],
- project_id=proj.uuid,
- product_name=faker.pystr(),
- use_on_demand_clusters=True,
- cluster_id=None,
- )
- ComputationCreate(
- user_id=user["id"],
- project_id=proj.uuid,
- product_name=faker.pystr(),
- use_on_demand_clusters=False,
- cluster_id=faker.pyint(),
- )
ComputationCreate(
user_id=user["id"],
project_id=proj.uuid,
@@ -493,6 +478,13 @@ def mocked_clusters_keeper_service_get_instance_type_details_with_invalid_name(
)
+assert "json_schema_extra" in ServiceResourcesDictHelpers.model_config
+assert isinstance(ServiceResourcesDictHelpers.model_config["json_schema_extra"], dict)
+assert isinstance(
+ ServiceResourcesDictHelpers.model_config["json_schema_extra"]["examples"], list
+)
+
+
@pytest.fixture(
params=ServiceResourcesDictHelpers.model_config["json_schema_extra"]["examples"]
)
@@ -559,7 +551,7 @@ async def test_create_computation_with_wallet(
project_nodes_repo = ProjectNodesRepo(project_uuid=proj.uuid)
for node in await project_nodes_repo.list(connection):
if (
- to_node_class(proj.workbench[NodeIDStr(f"{node.node_id}")].key)
+ to_node_class(proj.workbench[f"{node.node_id}"].key)
!= NodeClass.FRONTEND
):
assert node.required_resources
@@ -604,7 +596,11 @@ async def test_create_computation_with_wallet(
@pytest.mark.parametrize(
"default_pricing_plan",
- [PricingPlanGet(**PricingPlanGet.model_config["json_schema_extra"]["examples"][0])],
+ [
+ PricingPlanGet.model_validate(
+ PricingPlanGet.model_config["json_schema_extra"]["examples"][0]
+ )
+ ],
)
async def test_create_computation_with_wallet_with_invalid_pricing_unit_name_raises_422(
minimal_configuration: None,
@@ -744,6 +740,13 @@ async def test_start_computation_with_project_node_resources_defined(
async_client: httpx.AsyncClient,
):
user = registered_user()
+ assert "json_schema_extra" in ServiceResourcesDictHelpers.model_config
+ assert isinstance(
+ ServiceResourcesDictHelpers.model_config["json_schema_extra"], dict
+ )
+ assert isinstance(
+ ServiceResourcesDictHelpers.model_config["json_schema_extra"]["examples"], list
+ )
proj = await project(
user,
project_nodes_overrides={
@@ -799,77 +802,6 @@ async def test_start_computation_with_deprecated_services_raises_406(
assert response.status_code == status.HTTP_406_NOT_ACCEPTABLE, response.text
-@pytest.fixture
-async def unusable_cluster(
- registered_user: Callable[..., dict[str, Any]],
- create_cluster: Callable[..., Awaitable[Cluster]],
-) -> ClusterID:
- user = registered_user()
- created_cluster = await create_cluster(user)
- return created_cluster.id
-
-
-async def test_start_computation_with_forbidden_cluster_raises_403(
- minimal_configuration: None,
- mocked_director_service_fcts,
- mocked_catalog_service_fcts,
- product_name: str,
- fake_workbench_without_outputs: dict[str, Any],
- registered_user: Callable[..., dict[str, Any]],
- project: Callable[..., Awaitable[ProjectAtDB]],
- async_client: httpx.AsyncClient,
- unusable_cluster: ClusterID,
-):
- user = registered_user()
- proj = await project(user, workbench=fake_workbench_without_outputs)
- create_computation_url = httpx.URL("/v2/computations")
- response = await async_client.post(
- create_computation_url,
- json=jsonable_encoder(
- ComputationCreate(
- user_id=user["id"],
- project_id=proj.uuid,
- start_pipeline=True,
- product_name=product_name,
- cluster_id=unusable_cluster,
- )
- ),
- )
- assert response.status_code == status.HTTP_403_FORBIDDEN, response.text
- assert f"cluster {unusable_cluster}" in response.text
-
-
-async def test_start_computation_with_unknown_cluster_raises_406(
- minimal_configuration: None,
- mocked_director_service_fcts,
- mocked_catalog_service_fcts,
- product_name: str,
- fake_workbench_without_outputs: dict[str, Any],
- registered_user: Callable[..., dict[str, Any]],
- project: Callable[..., Awaitable[ProjectAtDB]],
- async_client: httpx.AsyncClient,
- faker: Faker,
-):
- user = registered_user()
- proj = await project(user, workbench=fake_workbench_without_outputs)
- create_computation_url = httpx.URL("/v2/computations")
- unknown_cluster_id = faker.pyint(1, 10000)
- response = await async_client.post(
- create_computation_url,
- json=jsonable_encoder(
- ComputationCreate(
- user_id=user["id"],
- project_id=proj.uuid,
- start_pipeline=True,
- product_name=product_name,
- cluster_id=unknown_cluster_id,
- )
- ),
- )
- assert response.status_code == status.HTTP_406_NOT_ACCEPTABLE, response.text
- assert f"cluster {unknown_cluster_id}" in response.text
-
-
async def test_get_computation_from_empty_project(
minimal_configuration: None,
fake_workbench_without_outputs: dict[str, Any],
@@ -914,7 +846,6 @@ async def test_get_computation_from_empty_project(
stop_url=None,
result=None,
iteration=None,
- cluster_id=None,
started=None,
stopped=None,
submitted=None,
@@ -980,7 +911,6 @@ async def test_get_computation_from_not_started_computation_task(
stop_url=None,
result=None,
iteration=None,
- cluster_id=None,
started=None,
stopped=None,
submitted=None,
@@ -1057,7 +987,6 @@ async def test_get_computation_from_published_computation_task(
stop_url=TypeAdapter(AnyHttpUrl).validate_python(f"{expected_stop_url}"),
result=None,
iteration=1,
- cluster_id=DEFAULT_CLUSTER_ID,
started=None,
stopped=None,
submitted=None,
diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations_tasks.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations_tasks.py
index 845983b99cb..73d59a740c5 100644
--- a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations_tasks.py
+++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations_tasks.py
@@ -30,6 +30,8 @@
pytest_simcore_core_services_selection = [
"postgres",
+ "rabbit",
+ "redis",
]
pytest_simcore_ops_services_selection = [
"adminer",
diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_db_repositories_comp_runs.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_db_repositories_comp_runs.py
index ba903d1b069..1dea4f59cbe 100644
--- a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_db_repositories_comp_runs.py
+++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_db_repositories_comp_runs.py
@@ -15,12 +15,10 @@
import pytest
from _helpers import PublishedProject
from faker import Faker
-from models_library.clusters import DEFAULT_CLUSTER_ID, Cluster
from models_library.projects import ProjectID
from models_library.projects_state import RunningState
from models_library.users import UserID
from simcore_service_director_v2.core.errors import (
- ClusterNotFoundError,
ComputationalRunNotFoundError,
ProjectNotFoundError,
UserNotFoundError,
@@ -89,7 +87,6 @@ async def test_list(
created = await CompRunsRepository(aiopg_engine).create(
user_id=published_project.user["id"],
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
iteration=None,
metadata=run_metadata,
use_on_demand_clusters=faker.pybool(),
@@ -101,7 +98,6 @@ async def test_list(
CompRunsRepository(aiopg_engine).create(
user_id=published_project.user["id"],
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
iteration=created.iteration + n + 1,
metadata=run_metadata,
use_on_demand_clusters=faker.pybool(),
@@ -260,13 +256,11 @@ async def test_create(
run_metadata: RunMetadataDict,
faker: Faker,
publish_project: Callable[[], Awaitable[PublishedProject]],
- create_cluster: Callable[..., Awaitable[Cluster]],
):
with pytest.raises(ProjectNotFoundError):
await CompRunsRepository(aiopg_engine).create(
user_id=fake_user_id,
project_id=fake_project_id,
- cluster_id=DEFAULT_CLUSTER_ID,
iteration=None,
metadata=run_metadata,
use_on_demand_clusters=faker.pybool(),
@@ -276,7 +270,6 @@ async def test_create(
await CompRunsRepository(aiopg_engine).create(
user_id=fake_user_id,
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
iteration=None,
metadata=run_metadata,
use_on_demand_clusters=faker.pybool(),
@@ -285,7 +278,6 @@ async def test_create(
created = await CompRunsRepository(aiopg_engine).create(
user_id=published_project.user["id"],
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
iteration=None,
metadata=run_metadata,
use_on_demand_clusters=faker.pybool(),
@@ -300,7 +292,6 @@ async def test_create(
created = await CompRunsRepository(aiopg_engine).create(
user_id=published_project.user["id"],
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
iteration=None,
metadata=run_metadata,
use_on_demand_clusters=faker.pybool(),
@@ -315,25 +306,6 @@ async def test_create(
)
assert created == got
- with pytest.raises(ClusterNotFoundError):
- await CompRunsRepository(aiopg_engine).create(
- user_id=published_project.user["id"],
- project_id=published_project.project.uuid,
- cluster_id=faker.pyint(min_value=1),
- iteration=None,
- metadata=run_metadata,
- use_on_demand_clusters=faker.pybool(),
- )
- cluster = await create_cluster(published_project.user)
- await CompRunsRepository(aiopg_engine).create(
- user_id=published_project.user["id"],
- project_id=published_project.project.uuid,
- cluster_id=cluster.id,
- iteration=None,
- metadata=run_metadata,
- use_on_demand_clusters=faker.pybool(),
- )
-
async def test_update(
aiopg_engine,
@@ -353,7 +325,6 @@ async def test_update(
created = await CompRunsRepository(aiopg_engine).create(
user_id=published_project.user["id"],
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
iteration=None,
metadata=run_metadata,
use_on_demand_clusters=faker.pybool(),
@@ -387,7 +358,6 @@ async def test_set_run_result(
created = await CompRunsRepository(aiopg_engine).create(
user_id=published_project.user["id"],
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
iteration=None,
metadata=run_metadata,
use_on_demand_clusters=faker.pybool(),
@@ -435,7 +405,6 @@ async def test_mark_for_cancellation(
created = await CompRunsRepository(aiopg_engine).create(
user_id=published_project.user["id"],
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
iteration=None,
metadata=run_metadata,
use_on_demand_clusters=faker.pybool(),
@@ -467,7 +436,6 @@ async def test_mark_for_scheduling(
created = await CompRunsRepository(aiopg_engine).create(
user_id=published_project.user["id"],
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
iteration=None,
metadata=run_metadata,
use_on_demand_clusters=faker.pybool(),
@@ -501,7 +469,6 @@ async def test_mark_scheduling_done(
created = await CompRunsRepository(aiopg_engine).create(
user_id=published_project.user["id"],
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
iteration=None,
metadata=run_metadata,
use_on_demand_clusters=faker.pybool(),
diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_manager.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_manager.py
index ac5bbbcc942..47bdd35f8cd 100644
--- a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_manager.py
+++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_manager.py
@@ -18,7 +18,6 @@
import pytest
from _helpers import PublishedProject, assert_comp_runs, assert_comp_runs_empty
from fastapi import FastAPI
-from models_library.clusters import DEFAULT_CLUSTER_ID
from models_library.projects import ProjectAtDB
from models_library.projects_state import RunningState
from pytest_mock.plugin import MockerFixture
@@ -156,7 +155,6 @@ async def test_schedule_all_pipelines(
initialized_app,
user_id=published_project.project.prj_owner,
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
run_metadata=run_metadata,
use_on_demand_clusters=False,
)
@@ -174,7 +172,7 @@ async def test_schedule_all_pipelines(
assert comp_run.user_id == published_project.project.prj_owner
assert comp_run.iteration == 1
assert comp_run.cancelled is None
- assert comp_run.cluster_id == DEFAULT_CLUSTER_ID
+ assert comp_run.cluster_id is None
assert comp_run.metadata == run_metadata
assert comp_run.result is RunningState.PUBLISHED
assert comp_run.scheduled is not None
@@ -260,7 +258,6 @@ async def test_schedule_all_pipelines_logs_error_if_it_find_old_pipelines(
initialized_app,
user_id=published_project.project.prj_owner,
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
run_metadata=run_metadata,
use_on_demand_clusters=False,
)
@@ -278,7 +275,7 @@ async def test_schedule_all_pipelines_logs_error_if_it_find_old_pipelines(
assert comp_run.user_id == published_project.project.prj_owner
assert comp_run.iteration == 1
assert comp_run.cancelled is None
- assert comp_run.cluster_id == DEFAULT_CLUSTER_ID
+ assert comp_run.cluster_id is None
assert comp_run.metadata == run_metadata
assert comp_run.result is RunningState.PUBLISHED
assert comp_run.scheduled is not None
@@ -345,7 +342,6 @@ async def test_empty_pipeline_is_not_scheduled(
initialized_app,
user_id=user["id"],
project_id=empty_project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
run_metadata=run_metadata,
use_on_demand_clusters=False,
)
@@ -361,7 +357,6 @@ async def test_empty_pipeline_is_not_scheduled(
initialized_app,
user_id=user["id"],
project_id=empty_project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
run_metadata=run_metadata,
use_on_demand_clusters=False,
)
diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py
index 7609f6e956e..d9559b6c75e 100644
--- a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py
+++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py
@@ -30,7 +30,6 @@
from dask_task_models_library.container_tasks.protocol import TaskOwner
from faker import Faker
from fastapi.applications import FastAPI
-from models_library.clusters import DEFAULT_CLUSTER_ID
from models_library.projects import ProjectAtDB, ProjectID
from models_library.projects_nodes_io import NodeID
from models_library.projects_state import RunningState
@@ -169,7 +168,6 @@ async def _assert_start_pipeline(
app,
user_id=published_project.project.prj_owner,
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
run_metadata=run_metadata,
use_on_demand_clusters=False,
)
@@ -253,7 +251,6 @@ async def _return_tasks_pending(job_ids: list[str]) -> list[DaskClientTaskState]
mock.call(
user_id=published_project.project.prj_owner,
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
tasks={f"{p.node_id}": p.image},
callback=mock.ANY,
metadata=mock.ANY,
@@ -651,7 +648,6 @@ async def _return_random_task_result(job_id) -> TaskOutputData:
mocked_dask_client.send_computation_tasks.assert_called_once_with(
user_id=published_project.project.prj_owner,
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
tasks={
f"{next_pending_task.node_id}": next_pending_task.image,
},
@@ -1115,7 +1111,6 @@ async def test_broken_pipeline_configuration_is_not_scheduled_and_aborted(
initialized_app,
user_id=user["id"],
project_id=sleepers_project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
run_metadata=run_metadata,
use_on_demand_clusters=False,
)
@@ -1241,7 +1236,6 @@ async def test_handling_of_disconnected_scheduler_dask(
initialized_app,
user_id=published_project.project.prj_owner,
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
run_metadata=run_metadata,
use_on_demand_clusters=False,
)
@@ -1749,7 +1743,6 @@ async def test_pipeline_with_on_demand_cluster_with_not_ready_backend_waits(
initialized_app,
user_id=published_project.project.prj_owner,
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
run_metadata=run_metadata,
use_on_demand_clusters=True,
)
@@ -1854,7 +1847,6 @@ async def test_pipeline_with_on_demand_cluster_with_no_clusters_keeper_fails(
initialized_app,
user_id=published_project.project.prj_owner,
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
run_metadata=run_metadata,
use_on_demand_clusters=True,
)
diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_worker.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_worker.py
index 9eb301e0910..8a66e543ed1 100644
--- a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_worker.py
+++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_worker.py
@@ -14,7 +14,6 @@
import pytest
from _helpers import PublishedProject
from fastapi import FastAPI
-from models_library.clusters import DEFAULT_CLUSTER_ID
from pytest_mock import MockerFixture
from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict
from pytest_simcore.helpers.typing_env import EnvVarsDict
@@ -66,7 +65,6 @@ async def test_worker_properly_autocalls_scheduler_api(
initialized_app,
user_id=published_project.project.prj_owner,
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
run_metadata=run_metadata,
use_on_demand_clusters=False,
)
@@ -123,7 +121,6 @@ async def _project_pipeline_creation_workflow() -> None:
initialized_app,
user_id=published_project.project.prj_owner,
project_id=published_project.project.uuid,
- cluster_id=DEFAULT_CLUSTER_ID,
run_metadata=run_metadata,
use_on_demand_clusters=False,
)
diff --git a/services/director-v2/tests/unit/with_dbs/conftest.py b/services/director-v2/tests/unit/with_dbs/conftest.py
index 56784acba13..703686d2526 100644
--- a/services/director-v2/tests/unit/with_dbs/conftest.py
+++ b/services/director-v2/tests/unit/with_dbs/conftest.py
@@ -16,12 +16,9 @@
from _helpers import PublishedProject, RunningProject
from faker import Faker
from fastapi.encoders import jsonable_encoder
-from models_library.clusters import Cluster
from models_library.projects import ProjectAtDB, ProjectID
from models_library.projects_nodes_io import NodeID
from pydantic.main import BaseModel
-from simcore_postgres_database.models.cluster_to_groups import cluster_to_groups
-from simcore_postgres_database.models.clusters import clusters
from simcore_postgres_database.models.comp_pipeline import StateType, comp_pipeline
from simcore_postgres_database.models.comp_runs import comp_runs
from simcore_postgres_database.models.comp_tasks import comp_tasks
@@ -34,8 +31,6 @@
from simcore_service_director_v2.models.comp_tasks import CompTaskAtDB, Image
from simcore_service_director_v2.utils.computations import to_node_class
from simcore_service_director_v2.utils.dask import generate_dask_job_id
-from simcore_service_director_v2.utils.db import to_clusters_db
-from sqlalchemy.dialects.postgresql import insert as pg_insert
from sqlalchemy.ext.asyncio import AsyncEngine
@@ -223,87 +218,6 @@ async def _(
)
-@pytest.fixture
-async def create_cluster(
- sqlalchemy_async_engine: AsyncEngine,
-) -> AsyncIterator[Callable[..., Awaitable[Cluster]]]:
- created_cluster_ids: list[str] = []
-
- async def _(user: dict[str, Any], **cluster_kwargs) -> Cluster:
- assert "json_schema_extra" in Cluster.model_config
- assert isinstance(Cluster.model_config["json_schema_extra"], dict)
- assert isinstance(Cluster.model_config["json_schema_extra"]["examples"], list)
- assert isinstance(
- Cluster.model_config["json_schema_extra"]["examples"][1], dict
- )
- cluster_config = Cluster.model_config["json_schema_extra"]["examples"][1]
- cluster_config["owner"] = user["primary_gid"]
- cluster_config.update(**cluster_kwargs)
- new_cluster = Cluster.model_validate(cluster_config)
- assert new_cluster
-
- async with sqlalchemy_async_engine.begin() as conn:
- # insert basic cluster
- created_cluster = (
- await conn.execute(
- sa.insert(clusters)
- .values(to_clusters_db(new_cluster, only_update=False))
- .returning(sa.literal_column("*"))
- )
- ).one()
- created_cluster_ids.append(created_cluster.id)
- if "access_rights" in cluster_kwargs:
- for gid, rights in cluster_kwargs["access_rights"].items():
- await conn.execute(
- pg_insert(cluster_to_groups)
- .values(
- cluster_id=created_cluster.id,
- gid=gid,
- **rights.model_dump(),
- )
- .on_conflict_do_update(
- index_elements=["gid", "cluster_id"],
- set_=rights.model_dump(),
- )
- )
- access_rights_in_db = {}
- for row in await conn.execute(
- sa.select(
- cluster_to_groups.c.gid,
- cluster_to_groups.c.read,
- cluster_to_groups.c.write,
- cluster_to_groups.c.delete,
- )
- .select_from(clusters.join(cluster_to_groups))
- .where(clusters.c.id == created_cluster.id)
- ):
- access_rights_in_db[row.gid] = {
- "read": row.read,
- "write": row.write,
- "delete": row.delete,
- }
-
- return Cluster(
- id=created_cluster.id,
- name=created_cluster.name,
- description=created_cluster.description,
- type=created_cluster.type,
- owner=created_cluster.owner,
- endpoint=created_cluster.endpoint,
- authentication=created_cluster.authentication,
- access_rights=access_rights_in_db,
- thumbnail=None,
- )
-
- yield _
-
- # cleanup
- async with sqlalchemy_async_engine.begin() as conn:
- await conn.execute(
- clusters.delete().where(clusters.c.id.in_(created_cluster_ids))
- )
-
-
@pytest.fixture
async def publish_project(
registered_user: Callable[..., dict[str, Any]],
diff --git a/services/director-v2/tests/unit/with_dbs/test_api_route_clusters.py b/services/director-v2/tests/unit/with_dbs/test_api_route_clusters.py
deleted file mode 100644
index 9f55e71f935..00000000000
--- a/services/director-v2/tests/unit/with_dbs/test_api_route_clusters.py
+++ /dev/null
@@ -1,802 +0,0 @@
-# pylint:disable=unused-variable
-# pylint:disable=unused-argument
-# pylint:disable=redefined-outer-name
-
-import random
-from collections.abc import Callable, Iterator
-from typing import Any, Awaitable
-
-import httpx
-import pytest
-import sqlalchemy as sa
-from _dask_helpers import DaskGatewayServer
-from common_library.serialization import model_dump_with_secrets
-from distributed.deploy.spec import SpecCluster
-from faker import Faker
-from httpx import URL
-from models_library.api_schemas_directorv2.clusters import (
- ClusterCreate,
- ClusterGet,
- ClusterPatch,
- ClusterPing,
-)
-from models_library.clusters import (
- CLUSTER_ADMIN_RIGHTS,
- CLUSTER_MANAGER_RIGHTS,
- CLUSTER_NO_RIGHTS,
- CLUSTER_USER_RIGHTS,
- Cluster,
- ClusterAccessRights,
- ClusterAuthentication,
- SimpleAuthentication,
-)
-from pydantic import AnyHttpUrl, SecretStr, TypeAdapter
-from pytest_simcore.helpers.typing_env import EnvVarsDict
-from simcore_postgres_database.models.clusters import ClusterType, clusters
-from starlette import status
-
-pytest_simcore_core_services_selection = [
- "postgres",
-]
-pytest_simcore_ops_services_selection = [
- "adminer",
-]
-
-
-@pytest.fixture()
-def clusters_config(
- mock_env: EnvVarsDict,
- postgres_db: sa.engine.Engine,
- postgres_host_config: dict[str, str],
- monkeypatch: pytest.MonkeyPatch,
- dask_spec_local_cluster: SpecCluster,
- faker: Faker,
-):
- monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "1")
- monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO")
- monkeypatch.setenv("S3_ENDPOINT", faker.url())
- monkeypatch.setenv("S3_ACCESS_KEY", faker.pystr())
- monkeypatch.setenv("S3_REGION", faker.pystr())
- monkeypatch.setenv("S3_SECRET_KEY", faker.pystr())
- monkeypatch.setenv("S3_BUCKET_NAME", faker.pystr())
-
-
-@pytest.fixture
-def cluster_simple_authentication(faker: Faker) -> Callable[[], dict[str, Any]]:
- def creator() -> dict[str, Any]:
- simple_auth = {
- "type": "simple",
- "username": faker.user_name(),
- "password": faker.password(),
- }
- assert SimpleAuthentication.model_validate(simple_auth)
- return simple_auth
-
- return creator
-
-
-@pytest.fixture
-def clusters_cleaner(postgres_db: sa.engine.Engine) -> Iterator:
- yield
- with postgres_db.connect() as conn:
- conn.execute(sa.delete(clusters))
-
-
-async def test_list_clusters(
- clusters_config: None,
- registered_user: Callable[..., dict],
- create_cluster: Callable[..., Awaitable[Cluster]],
- async_client: httpx.AsyncClient,
-):
- user_1 = registered_user()
- list_clusters_url = URL(f"/v2/clusters?user_id={user_1['id']}")
- # there is no cluster at the moment, the list shall contain the default cluster
- response = await async_client.get(list_clusters_url)
- assert response.status_code == status.HTTP_200_OK
- returned_clusters_list = TypeAdapter(list[ClusterGet]).validate_python(
- response.json()
- )
- assert (
- len(returned_clusters_list) == 1
- ), f"no default cluster in {returned_clusters_list=}"
- assert (
- returned_clusters_list[0].id == 0
- ), "default cluster id is not the one expected"
-
- # let's create some clusters
- NUM_CLUSTERS = 111
- for n in range(NUM_CLUSTERS):
- await create_cluster(user_1, name=f"pytest cluster{n:04}")
-
- response = await async_client.get(list_clusters_url)
- assert response.status_code == status.HTTP_200_OK
- returned_clusters_list = TypeAdapter(list[ClusterGet]).validate_python(
- response.json()
- )
- assert (
- len(returned_clusters_list) == NUM_CLUSTERS + 1
- ) # the default cluster comes on top of the NUM_CLUSTERS
- assert (
- returned_clusters_list[0].id == 0
- ), "the first cluster shall be the platform default cluster"
-
- # now create a second user and check the clusters are not seen by it BUT the default one
- user_2 = registered_user()
- response = await async_client.get(f"/v2/clusters?user_id={user_2['id']}")
- assert response.status_code == status.HTTP_200_OK
- returned_clusters_list = TypeAdapter(list[ClusterGet]).validate_python(
- response.json()
- )
- assert (
- len(returned_clusters_list) == 1
- ), f"no default cluster in {returned_clusters_list=}"
- assert (
- returned_clusters_list[0].id == 0
- ), "default cluster id is not the one expected"
-
- # let's create a few more clusters owned by user_1 with specific rights
- for rights, name in [
- (CLUSTER_NO_RIGHTS, "no rights"),
- (CLUSTER_USER_RIGHTS, "user rights"),
- (CLUSTER_MANAGER_RIGHTS, "manager rights"),
- (CLUSTER_ADMIN_RIGHTS, "admin rights"),
- ]:
- await create_cluster(
- user_1, # cluster is owned by user_1
- name=f"cluster with {name}",
- access_rights={
- user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS,
- user_2["primary_gid"]: rights,
- },
- )
-
- response = await async_client.get(f"/v2/clusters?user_id={user_2['id']}")
- assert response.status_code == status.HTTP_200_OK
- user_2_clusters = TypeAdapter(list[ClusterGet]).validate_python(response.json())
- # we should find 3 clusters + the default cluster
- assert len(user_2_clusters) == 3 + 1
- for name in [
- "cluster with user rights",
- "cluster with manager rights",
- "cluster with admin rights",
- ]:
- clusters = list(
- filter(
- lambda cluster, name=name: cluster.name == name,
- user_2_clusters,
- ),
- )
- assert len(clusters) == 1, f"missing cluster with {name=}"
-
-
-async def test_get_cluster(
- clusters_config: None,
- registered_user: Callable[..., dict],
- create_cluster: Callable[..., Awaitable[Cluster]],
- async_client: httpx.AsyncClient,
-):
- user_1 = registered_user()
- # try to get one that does not exist
- response = await async_client.get(
- f"/v2/clusters/15615165165165?user_id={user_1['id']}"
- )
- assert response.status_code == status.HTTP_404_NOT_FOUND
- # let's create some clusters
- a_bunch_of_clusters = [
- await create_cluster(user_1, name=f"pytest cluster{n:04}") for n in range(111)
- ]
- the_cluster = random.choice(a_bunch_of_clusters)
-
- # there is no cluster at the moment, the list is empty
- response = await async_client.get(
- f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}"
- )
- assert response.status_code == status.HTTP_200_OK, f"received {response.text}"
- returned_cluster = ClusterGet.model_validate(response.json())
- assert returned_cluster
- assert the_cluster.model_dump(
- exclude={"authentication"}
- ) == returned_cluster.model_dump(exclude={"authentication"})
-
- user_2 = registered_user()
- # getting the same cluster for user 2 shall return 403
- response = await async_client.get(
- f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}"
- )
- assert (
- response.status_code == status.HTTP_403_FORBIDDEN
- ), f"received {response.text}"
- # let's create a few cluster for user 2 and share some with user 1
- for rights, user_1_expected_access in [
- (CLUSTER_NO_RIGHTS, False),
- (CLUSTER_USER_RIGHTS, True),
- (CLUSTER_MANAGER_RIGHTS, True),
- (CLUSTER_ADMIN_RIGHTS, True),
- ]:
- a_cluster = await create_cluster(
- user_2, # cluster is owned by user_2
- access_rights={
- user_2["primary_gid"]: CLUSTER_ADMIN_RIGHTS,
- user_1["primary_gid"]: rights,
- },
- )
- # now let's check that user_1 can access only the correct ones
- response = await async_client.get(
- f"/v2/clusters/{a_cluster.id}?user_id={user_1['id']}"
- )
- assert (
- response.status_code == status.HTTP_200_OK
- if user_1_expected_access
- else status.HTTP_403_FORBIDDEN
- ), f"received {response.text}"
-
-
-@pytest.mark.parametrize(
- "cluster_sharing_rights, can_use",
- [
- pytest.param(CLUSTER_ADMIN_RIGHTS, True, id="SHARE_WITH_ADMIN_RIGHTS"),
- pytest.param(CLUSTER_MANAGER_RIGHTS, True, id="SHARE_WITH_MANAGER_RIGHTS"),
- pytest.param(CLUSTER_USER_RIGHTS, True, id="SHARE_WITH_USER_RIGHTS"),
- pytest.param(CLUSTER_NO_RIGHTS, False, id="DENY_RIGHTS"),
- ],
-)
-async def test_get_another_cluster(
- clusters_config: None,
- registered_user: Callable[..., dict],
- create_cluster: Callable[..., Awaitable[Cluster]],
- async_client: httpx.AsyncClient,
- cluster_sharing_rights: ClusterAccessRights,
- can_use: bool,
-):
- user_1 = registered_user()
- user_2 = registered_user()
- # let's create some clusters
- a_bunch_of_clusters = [
- await create_cluster(
- user_1,
- name=f"pytest cluster{n:04}",
- access_rights={
- user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS,
- user_2["primary_gid"]: cluster_sharing_rights,
- },
- )
- for n in range(111)
- ]
- the_cluster = random.choice(a_bunch_of_clusters)
- # try to get the cluster as user 2
- response = await async_client.get(
- f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}"
- )
- assert (
- response.status_code == status.HTTP_200_OK
- if can_use
- else status.HTTP_403_FORBIDDEN
- ), f"received {response.text}"
-
-
-@pytest.mark.parametrize("with_query", [True, False])
-async def test_get_default_cluster(
- clusters_config: None,
- registered_user: Callable[..., dict],
- async_client: httpx.AsyncClient,
- with_query: bool,
-):
- user_1 = registered_user()
-
- get_cluster_url = URL("/v2/clusters/default")
- if with_query:
- get_cluster_url = URL(f"/v2/clusters/default?user_id={user_1['id']}")
- response = await async_client.get(get_cluster_url)
- assert response.status_code == status.HTTP_200_OK, f"received {response.text}"
- returned_cluster = ClusterGet.model_validate(response.json())
- assert returned_cluster
- assert returned_cluster.id == 0
- assert returned_cluster.name == "Default cluster"
- assert 1 in returned_cluster.access_rights # everyone group is always 1
- assert returned_cluster.access_rights[1] == CLUSTER_USER_RIGHTS
-
-
-async def test_create_cluster(
- clusters_config: None,
- registered_user: Callable[..., dict],
- cluster_simple_authentication: Callable,
- async_client: httpx.AsyncClient,
- faker: Faker,
- postgres_db: sa.engine.Engine,
- clusters_cleaner,
-):
- user_1 = registered_user()
- create_cluster_url = URL(f"/v2/clusters?user_id={user_1['id']}")
- cluster_data = ClusterCreate(
- endpoint=faker.uri(),
- authentication=cluster_simple_authentication(),
- name=faker.name(),
- type=random.choice(list(ClusterType)),
- owner=faker.pyint(min_value=1),
- )
- response = await async_client.post(
- create_cluster_url,
- json=model_dump_with_secrets(
- cluster_data,
- show_secrets=True,
- by_alias=True,
- exclude_unset=True,
- ),
- )
- assert response.status_code == status.HTTP_201_CREATED, f"received: {response.text}"
- created_cluster = ClusterGet.model_validate(response.json())
- assert created_cluster
-
- assert cluster_data.model_dump(
- exclude={"id", "owner", "access_rights", "authentication"}
- ) == created_cluster.model_dump(
- exclude={"id", "owner", "access_rights", "authentication"}
- )
-
- assert created_cluster.id is not None
- assert created_cluster.owner == user_1["primary_gid"]
- assert created_cluster.access_rights == {
- user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS
- }
-
- # let's check that DB is correctly setup, there is one entry
- with postgres_db.connect() as conn:
- conn.execute(
- sa.select(clusters).where(clusters.c.name == cluster_data.name)
- ).one()
-
-
-async def test_update_own_cluster(
- clusters_config: None,
- registered_user: Callable[..., dict],
- create_cluster: Callable[..., Awaitable[Cluster]],
- cluster_simple_authentication: Callable,
- async_client: httpx.AsyncClient,
- faker: Faker,
-):
- _PATCH_EXPORT = {"by_alias": True, "exclude_unset": True, "exclude_none": True}
- user_1 = registered_user()
- # try to modify one that does not exist
- response = await async_client.patch(
- f"/v2/clusters/15615165165165?user_id={user_1['id']}",
- json=model_dump_with_secrets(
- ClusterPatch(), show_secrets=True, **_PATCH_EXPORT
- ),
- )
- assert response.status_code == status.HTTP_404_NOT_FOUND
- # let's create some clusters
- a_bunch_of_clusters = [
- await create_cluster(user_1, name=f"pytest cluster{n:04}") for n in range(111)
- ]
- the_cluster = random.choice(a_bunch_of_clusters)
- # get the original one
- response = await async_client.get(
- f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}"
- )
- assert response.status_code == status.HTTP_200_OK, f"received {response.text}"
- original_cluster = ClusterGet.model_validate(response.json())
-
- # now we modify nothing
- response = await async_client.patch(
- f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}",
- json=model_dump_with_secrets(
- ClusterPatch(), show_secrets=True, **_PATCH_EXPORT
- ),
- )
- assert response.status_code == status.HTTP_200_OK, f"received {response.text}"
- returned_cluster = ClusterGet.model_validate(response.json())
- assert returned_cluster.model_dump() == original_cluster.model_dump()
-
- # modify some simple things
- expected_modified_cluster = original_cluster.model_copy()
- for cluster_patch in [
- ClusterPatch(name=faker.name()),
- ClusterPatch(description=faker.text()),
- ClusterPatch(type=ClusterType.ON_PREMISE),
- ClusterPatch(thumbnail=faker.uri()),
- ClusterPatch(endpoint=faker.uri()),
- ClusterPatch(authentication=cluster_simple_authentication()),
- ]:
- jsonable_cluster_patch = model_dump_with_secrets(
- cluster_patch, show_secrets=True, **_PATCH_EXPORT
- )
- print(f"--> patching cluster with {jsonable_cluster_patch}")
- response = await async_client.patch(
- f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}",
- json=jsonable_cluster_patch,
- )
- assert response.status_code == status.HTTP_200_OK, f"received {response.text}"
- returned_cluster = ClusterGet.model_validate(response.json())
- expected_modified_cluster = expected_modified_cluster.model_copy(
- update=cluster_patch.model_dump(**_PATCH_EXPORT)
- )
- assert returned_cluster.model_dump(
- exclude={"authentication": {"password"}}
- ) == expected_modified_cluster.model_dump(
- exclude={"authentication": {"password"}}
- )
-
- # we can change the access rights, the owner rights are always kept
- user_2 = registered_user()
-
- for rights in [
- CLUSTER_ADMIN_RIGHTS,
- CLUSTER_MANAGER_RIGHTS,
- CLUSTER_USER_RIGHTS,
- CLUSTER_NO_RIGHTS,
- ]:
- cluster_patch = ClusterPatch(accessRights={user_2["primary_gid"]: rights})
- response = await async_client.patch(
- f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}",
- json=cluster_patch.model_dump(**_PATCH_EXPORT),
- )
- assert response.status_code == status.HTTP_200_OK, f"received {response.text}"
- returned_cluster = ClusterGet.model_validate(response.json())
-
- expected_modified_cluster.access_rights[user_2["primary_gid"]] = rights
- assert returned_cluster.model_dump(
- exclude={"authentication": {"password"}}
- ) == expected_modified_cluster.model_dump(
- exclude={"authentication": {"password"}}
- )
- # we can change the owner since we are admin
- cluster_patch = ClusterPatch(owner=user_2["primary_gid"])
- response = await async_client.patch(
- f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}",
- json=model_dump_with_secrets(cluster_patch, show_secrets=True, **_PATCH_EXPORT),
- )
- assert response.status_code == status.HTTP_200_OK, f"received {response.text}"
- returned_cluster = ClusterGet.model_validate(response.json())
- expected_modified_cluster.owner = user_2["primary_gid"]
- expected_modified_cluster.access_rights[
- user_2["primary_gid"]
- ] = CLUSTER_ADMIN_RIGHTS
- assert returned_cluster.model_dump(
- exclude={"authentication": {"password"}}
- ) == expected_modified_cluster.model_dump(exclude={"authentication": {"password"}})
-
- # we should not be able to reduce the rights of the new owner
- cluster_patch = ClusterPatch(
- accessRights={user_2["primary_gid"]: CLUSTER_NO_RIGHTS}
- )
- response = await async_client.patch(
- f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}",
- json=model_dump_with_secrets(cluster_patch, show_secrets=True, **_PATCH_EXPORT),
- )
- assert (
- response.status_code == status.HTTP_403_FORBIDDEN
- ), f"received {response.text}"
-
-
-async def test_update_default_cluster_fails(
- clusters_config: None,
- registered_user: Callable[..., dict],
- create_cluster: Callable[..., Awaitable[Cluster]],
- cluster_simple_authentication: Callable,
- async_client: httpx.AsyncClient,
- faker: Faker,
-):
- _PATCH_EXPORT = {"by_alias": True, "exclude_unset": True, "exclude_none": True}
- user_1 = registered_user()
- # try to modify one that does not exist
- response = await async_client.patch(
- f"/v2/clusters/default?user_id={user_1['id']}",
- json=model_dump_with_secrets(
- ClusterPatch(), show_secrets=True, **_PATCH_EXPORT
- ),
- )
- assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
-
-
-@pytest.mark.parametrize(
- "cluster_sharing_rights, can_use, can_manage, can_administer",
- [
- pytest.param(
- CLUSTER_ADMIN_RIGHTS, True, True, True, id="SHARE_WITH_ADMIN_RIGHTS"
- ),
- pytest.param(
- CLUSTER_MANAGER_RIGHTS, True, True, False, id="SHARE_WITH_MANAGER_RIGHTS"
- ),
- pytest.param(
- CLUSTER_USER_RIGHTS, True, False, False, id="SHARE_WITH_USER_RIGHTS"
- ),
- pytest.param(CLUSTER_NO_RIGHTS, False, False, False, id="DENY_RIGHTS"),
- ],
-)
-async def test_update_another_cluster(
- clusters_config: None,
- registered_user: Callable[..., dict],
- create_cluster: Callable[..., Awaitable[Cluster]],
- cluster_simple_authentication: Callable,
- async_client: httpx.AsyncClient,
- faker: Faker,
- cluster_sharing_rights: ClusterAccessRights,
- can_use: bool,
- can_manage: bool,
- can_administer: bool,
-):
- """user_1 is the owner and administrator, he/she gives some rights to user 2"""
-
- _PATCH_EXPORT = {"by_alias": True, "exclude_unset": True, "exclude_none": True}
- user_1 = registered_user()
- user_2 = registered_user()
- # let's create some clusters
- a_bunch_of_clusters = [
- await create_cluster(
- user_1,
- name=f"pytest cluster{n:04}",
- access_rights={
- user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS,
- user_2["primary_gid"]: cluster_sharing_rights,
- },
- )
- for n in range(111)
- ]
- the_cluster = random.choice(a_bunch_of_clusters)
- # get the original one
- response = await async_client.get(
- f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}"
- )
- assert response.status_code == status.HTTP_200_OK, f"received {response.text}"
- ClusterGet.model_validate(response.json())
-
- # let's try to modify stuff as we are user 2
- for cluster_patch in [
- ClusterPatch(name=faker.name()),
- ClusterPatch(description=faker.text()),
- ClusterPatch(type=ClusterType.ON_PREMISE),
- ClusterPatch(thumbnail=faker.uri()),
- ClusterPatch(endpoint=faker.uri()),
- ClusterPatch(authentication=cluster_simple_authentication()),
- ]:
- response = await async_client.patch(
- f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}",
- json=model_dump_with_secrets(
- cluster_patch, show_secrets=True, **_PATCH_EXPORT
- ),
- )
- assert (
- response.status_code == status.HTTP_200_OK
- if can_manage
- else status.HTTP_403_FORBIDDEN
- ), f"received {response.text}"
-
- # let's try to add/remove someone (reserved to managers)
- user_3 = registered_user()
- for rights in [
- CLUSTER_USER_RIGHTS, # add user
- CLUSTER_NO_RIGHTS, # remove user
- ]:
- # try to add user 3
- cluster_patch = ClusterPatch(accessRights={user_3["primary_gid"]: rights})
- response = await async_client.patch(
- f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}",
- json=model_dump_with_secrets(
- cluster_patch, show_secrets=True, **_PATCH_EXPORT
- ),
- )
- assert (
- response.status_code == status.HTTP_200_OK
- if can_manage
- else status.HTTP_403_FORBIDDEN
- ), f"received {response.text} while {'adding' if rights == CLUSTER_USER_RIGHTS else 'removing'} user"
-
- # modify rights to admin/manager (reserved to administrators)
- for rights in [
- CLUSTER_ADMIN_RIGHTS,
- CLUSTER_MANAGER_RIGHTS,
- ]:
- cluster_patch = ClusterPatch(accessRights={user_3["primary_gid"]: rights})
- response = await async_client.patch(
- f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}",
- json=model_dump_with_secrets(
- cluster_patch, show_secrets=True, **_PATCH_EXPORT
- ),
- )
- assert (
- response.status_code == status.HTTP_200_OK
- if can_administer
- else status.HTTP_403_FORBIDDEN
- ), f"received {response.text}"
-
-
-async def test_delete_cluster(
- clusters_config: None,
- registered_user: Callable[..., dict],
- create_cluster: Callable[..., Awaitable[Cluster]],
- async_client: httpx.AsyncClient,
-):
- user_1 = registered_user()
- # let's create some clusters
- a_bunch_of_clusters = [
- await create_cluster(
- user_1,
- name=f"pytest cluster{n:04}",
- access_rights={
- user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS,
- },
- )
- for n in range(111)
- ]
- the_cluster = random.choice(a_bunch_of_clusters)
- # let's delete that cluster
- response = await async_client.delete(
- f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}"
- )
- assert (
- response.status_code == status.HTTP_204_NO_CONTENT
- ), f"received {response.text}"
- # now check it is gone
- response = await async_client.get(
- f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}"
- )
- assert (
- response.status_code == status.HTTP_404_NOT_FOUND
- ), f"received {response.text}"
-
-
-@pytest.mark.parametrize(
- "cluster_sharing_rights, can_administer",
- [
- pytest.param(CLUSTER_ADMIN_RIGHTS, True, id="SHARE_WITH_ADMIN_RIGHTS"),
- pytest.param(CLUSTER_MANAGER_RIGHTS, False, id="SHARE_WITH_MANAGER_RIGHTS"),
- pytest.param(CLUSTER_USER_RIGHTS, False, id="SHARE_WITH_USER_RIGHTS"),
- pytest.param(CLUSTER_NO_RIGHTS, False, id="DENY_RIGHTS"),
- ],
-)
-async def test_delete_another_cluster(
- clusters_config: None,
- registered_user: Callable[..., dict],
- create_cluster: Callable[..., Awaitable[Cluster]],
- cluster_simple_authentication: Callable,
- async_client: httpx.AsyncClient,
- faker: Faker,
- cluster_sharing_rights: ClusterAccessRights,
- can_administer: bool,
-):
- user_1 = registered_user()
- user_2 = registered_user()
- # let's create some clusters
- a_bunch_of_clusters = [
- await create_cluster(
- user_1,
- name=f"pytest cluster{n:04}",
- access_rights={
- user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS,
- user_2["primary_gid"]: cluster_sharing_rights,
- },
- )
- for n in range(111)
- ]
- the_cluster = random.choice(a_bunch_of_clusters)
- # let's delete that cluster as user_2
- response = await async_client.delete(
- f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}"
- )
- assert (
- response.status_code == status.HTTP_204_NO_CONTENT
- if can_administer
- else status.HTTP_403_FORBIDDEN
- ), f"received {response.text}"
- # now check it is gone or still around
- response = await async_client.get(
- f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}"
- )
- assert (
- response.status_code == status.HTTP_404_NOT_FOUND
- if can_administer
- else status.HTTP_200_OK
- ), f"received {response.text}"
-
-
-async def test_delete_default_cluster_fails(
- clusters_config: None,
- registered_user: Callable[..., dict],
- async_client: httpx.AsyncClient,
-):
- user_1 = registered_user()
- response = await async_client.delete(f"/v2/clusters/default?user_id={user_1['id']}")
- assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
-
-
-async def test_ping_invalid_cluster_raises_422(
- clusters_config: None,
- async_client: httpx.AsyncClient,
- faker: Faker,
- cluster_simple_authentication: Callable[[], dict[str, Any]],
-):
- # calling with wrong data raises
- response = await async_client.post("/v2/clusters:ping", json={})
- with pytest.raises(httpx.HTTPStatusError):
- response.raise_for_status()
-
- # calling with correct data but non existing cluster also raises
- some_fake_cluster = ClusterPing(
- endpoint=faker.url(),
- authentication=TypeAdapter(ClusterAuthentication).validate_python(
- cluster_simple_authentication()
- ),
- )
- response = await async_client.post(
- "/v2/clusters:ping",
- json=model_dump_with_secrets(
- some_fake_cluster, show_secrets=True, by_alias=True
- ),
- )
- with pytest.raises(httpx.HTTPStatusError):
- response.raise_for_status()
-
-
-async def test_ping_cluster(
- clusters_config: None,
- async_client: httpx.AsyncClient,
- local_dask_gateway_server: DaskGatewayServer,
-):
- valid_cluster = ClusterPing(
- endpoint=TypeAdapter(AnyHttpUrl).validate_python(
- local_dask_gateway_server.address
- ),
- authentication=SimpleAuthentication(
- username="pytest_user",
- password=TypeAdapter(SecretStr).validate_python(
- local_dask_gateway_server.password
- ),
- ),
- )
- response = await async_client.post(
- "/v2/clusters:ping",
- json=model_dump_with_secrets(valid_cluster, show_secrets=True, by_alias=True),
- )
- response.raise_for_status()
- assert response.status_code == status.HTTP_204_NO_CONTENT
-
-
-async def test_ping_specific_cluster(
- clusters_config: None,
- registered_user: Callable[..., dict],
- create_cluster: Callable[..., Awaitable[Cluster]],
- async_client: httpx.AsyncClient,
- local_dask_gateway_server: DaskGatewayServer,
-):
- user_1 = registered_user()
- # try to ping one that does not exist
- response = await async_client.get(
- f"/v2/clusters/15615165165165:ping?user_id={user_1['id']}"
- )
- assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
-
- # let's create some clusters and ping one
- a_bunch_of_clusters = [
- await create_cluster(
- user_1,
- name=f"pytest cluster{n:04}",
- endpoint=local_dask_gateway_server.address,
- authentication=SimpleAuthentication(
- username="pytest_user",
- password=TypeAdapter(SecretStr).validate_python(
- local_dask_gateway_server.password
- ),
- ),
- )
- for n in range(111)
- ]
- the_cluster = random.choice(a_bunch_of_clusters)
-
- response = await async_client.post(
- f"/v2/clusters/{the_cluster.id}:ping?user_id={user_1['id']}",
- )
- response.raise_for_status()
- assert response.status_code == status.HTTP_204_NO_CONTENT
-
-
-async def test_ping_default_cluster(
- clusters_config: None,
- registered_user: Callable[..., dict],
- async_client: httpx.AsyncClient,
-):
- user_1 = registered_user()
- # try to ping one that does not exist
- response = await async_client.post(
- f"/v2/clusters/default:ping?user_id={user_1['id']}"
- )
- assert response.status_code == status.HTTP_204_NO_CONTENT
diff --git a/services/director-v2/tests/unit/with_dbs/test_api_route_clusters_details.py b/services/director-v2/tests/unit/with_dbs/test_api_route_clusters_details.py
deleted file mode 100644
index 357f3b7647a..00000000000
--- a/services/director-v2/tests/unit/with_dbs/test_api_route_clusters_details.py
+++ /dev/null
@@ -1,254 +0,0 @@
-# pylint:disable=unused-variable
-# pylint:disable=unused-argument
-# pylint:disable=redefined-outer-name
-
-import json
-from collections.abc import Callable
-from typing import Any, Awaitable
-
-import httpx
-import pytest
-import sqlalchemy as sa
-from _dask_helpers import DaskGatewayServer
-from dask_gateway import Gateway, GatewayCluster, auth
-from distributed import Client as DaskClient
-from distributed.deploy.spec import SpecCluster
-from faker import Faker
-from models_library.api_schemas_directorv2.clusters import ClusterDetailsGet
-from models_library.clusters import Cluster, ClusterID, SimpleAuthentication
-from models_library.users import UserID
-from pydantic import SecretStr
-from pytest_simcore.helpers.typing_env import EnvVarsDict
-from starlette import status
-from tenacity.asyncio import AsyncRetrying
-from tenacity.stop import stop_after_delay
-from tenacity.wait import wait_fixed
-
-pytest_simcore_core_services_selection = [
- "postgres",
-]
-pytest_simcore_ops_services_selection = [
- "adminer",
-]
-
-
-@pytest.fixture()
-def clusters_config(
- mock_env: EnvVarsDict,
- postgres_db: sa.engine.Engine,
- postgres_host_config: dict[str, str],
- monkeypatch: pytest.MonkeyPatch,
- dask_spec_local_cluster: SpecCluster,
- faker: Faker,
-):
- monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "1")
- monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO")
- monkeypatch.setenv("S3_ENDPOINT", faker.url())
- monkeypatch.setenv("S3_ACCESS_KEY", faker.pystr())
- monkeypatch.setenv("S3_REGION", faker.pystr())
- monkeypatch.setenv("S3_SECRET_KEY", faker.pystr())
- monkeypatch.setenv("S3_BUCKET_NAME", faker.pystr())
-
-
-@pytest.mark.skip(
- reason="test for helping developers understand how to use dask gateways"
-)
-async def test_local_dask_gateway_server(local_dask_gateway_server: DaskGatewayServer):
- async with Gateway(
- local_dask_gateway_server.address,
- local_dask_gateway_server.proxy_address,
- asynchronous=True,
- auth=auth.BasicAuth("pytest_user", local_dask_gateway_server.password),
- ) as gateway:
- print(f"--> {gateway=} created")
- cluster_options = await gateway.cluster_options()
- gateway_versions = await gateway.get_versions()
- clusters_list = await gateway.list_clusters()
- print(f"--> {gateway_versions=}, {cluster_options=}, {clusters_list=}")
- for option in cluster_options.items():
- print(f"--> {option=}")
-
- async with gateway.new_cluster() as cluster:
- assert cluster
- print(f"--> created new cluster {cluster=}, {cluster.scheduler_info=}")
- NUM_WORKERS = 10
- await cluster.scale(NUM_WORKERS)
- print(f"--> scaling cluster {cluster=} to {NUM_WORKERS} workers")
- async for attempt in AsyncRetrying(
- reraise=True, wait=wait_fixed(0.24), stop=stop_after_delay(30)
- ):
- with attempt:
- print(
- f"cluster {cluster=} has now {len(cluster.scheduler_info.get('workers', []))} worker(s)"
- )
- assert len(cluster.scheduler_info.get("workers", 0)) == 10
-
- async with cluster.get_client() as client:
- print(f"--> created new client {client=}, submitting a job")
- res = await client.submit(lambda x: x + 1, 1)
- assert res == 2
-
- print(f"--> scaling cluster {cluster=} back to 0")
- await cluster.scale(0)
-
- async for attempt in AsyncRetrying(
- reraise=True, wait=wait_fixed(0.24), stop=stop_after_delay(30)
- ):
- with attempt:
- print(
- f"cluster {cluster=} has now {len(cluster.scheduler_info.get('workers', []))}"
- )
- assert len(cluster.scheduler_info.get("workers", 0)) == 0
-
-
-async def test_get_default_cluster_details(
- clusters_config: None,
- registered_user: Callable,
- async_client: httpx.AsyncClient,
-):
- user_1 = registered_user()
-
- # This test checks that the default cluster is accessible
- # the default cluster is the osparc internal cluster available through a dask-scheduler
- response = await async_client.get(
- f"/v2/clusters/default/details?user_id={user_1['id']}"
- )
- assert response.status_code == status.HTTP_200_OK
- default_cluster_out = ClusterDetailsGet.model_validate(response.json())
- response = await async_client.get(
- f"/v2/clusters/{0}/details?user_id={user_1['id']}"
- )
- assert response.status_code == status.HTTP_200_OK
- assert default_cluster_out == ClusterDetailsGet.model_validate(response.json())
-
-
-async def _get_cluster_details(
- async_client: httpx.AsyncClient, user_id: UserID, cluster_id: ClusterID
-) -> ClusterDetailsGet:
- response = await async_client.get(
- f"/v2/clusters/{cluster_id}/details?user_id={user_id}"
- )
- assert response.status_code == status.HTTP_200_OK
- print(f"<-- received cluster details response {response=}")
- cluster_out = ClusterDetailsGet.model_validate(response.json())
- assert cluster_out
- print(f"<-- received cluster details {cluster_out=}")
- assert cluster_out.scheduler, "the cluster's scheduler is not started!"
- return cluster_out
-
-
-async def test_get_cluster_details(
- clusters_config: None,
- registered_user: Callable[..., dict[str, Any]],
- async_client: httpx.AsyncClient,
- local_dask_gateway_server: DaskGatewayServer,
- create_cluster: Callable[..., Awaitable[Cluster]],
- dask_gateway_cluster: GatewayCluster,
- dask_gateway_cluster_client: DaskClient,
- gateway_username: str,
-):
- user_1 = registered_user()
- # define the cluster in the DB
- some_cluster = await create_cluster(
- user_1,
- endpoint=local_dask_gateway_server.address,
- authentication=SimpleAuthentication(
- username=gateway_username,
- password=SecretStr(local_dask_gateway_server.password),
- ).model_dump(by_alias=True),
- )
- # in its present state, the cluster should have no workers
- cluster_out = await _get_cluster_details(
- async_client, user_1["id"], some_cluster.id
- )
- assert not cluster_out.scheduler.workers, "the cluster should not have any worker!"
-
- # now let's scale the cluster
- _NUM_WORKERS = 1
- await dask_gateway_cluster.scale(_NUM_WORKERS)
- async for attempt in AsyncRetrying(
- reraise=True, stop=stop_after_delay(60), wait=wait_fixed(1)
- ):
- with attempt:
- cluster_out = await _get_cluster_details(
- async_client, user_1["id"], some_cluster.id
- )
- assert cluster_out.scheduler.workers, "the cluster has no workers!"
- assert (
- len(cluster_out.scheduler.workers) == _NUM_WORKERS
- ), f"the cluster is expected to have {_NUM_WORKERS} worker(s), currently has {len(cluster_out.scheduler.workers)} worker(s)"
- print(
- f"cluster now has its {_NUM_WORKERS}, after {json.dumps(attempt.retry_state.retry_object.statistics)}"
- )
- print(f"!!> cluster dashboard link: {dask_gateway_cluster.dashboard_link}")
-
- # let's start some computation
- _TASK_SLEEP_TIME = 55
-
- def do_some_work(x: int):
- import time
-
- time.sleep(x)
- return True
-
- task = dask_gateway_cluster_client.submit(do_some_work, _TASK_SLEEP_TIME)
- # wait for the computation to start, we should see this in the cluster infos
- async for attempt in AsyncRetrying(
- reraise=True, stop=stop_after_delay(10), wait=wait_fixed(1)
- ):
- with attempt:
- cluster_out = await _get_cluster_details(
- async_client, user_1["id"], some_cluster.id
- )
- assert cluster_out.scheduler.workers
- assert (
- next(
- iter(cluster_out.scheduler.workers.values())
- ).metrics.task_counts.executing
- == 1
- ), "worker is not executing the task"
- print(
- f"!!> cluster metrics: {next(iter(cluster_out.scheduler.workers.values())).metrics=}"
- )
- # let's wait for the result
- result = task.result(timeout=_TASK_SLEEP_TIME + 5)
- assert result
- assert await result is True
- # wait for the computation to effectively stop
- async for attempt in AsyncRetrying(
- reraise=True, stop=stop_after_delay(60), wait=wait_fixed(1)
- ):
- with attempt:
- cluster_out = await _get_cluster_details(
- async_client, user_1["id"], some_cluster.id
- )
- assert cluster_out.scheduler.workers
- print(
- f"!!> cluster metrics: {next(iter(cluster_out.scheduler.workers.values())).metrics=}"
- )
- assert (
- next(
- iter(cluster_out.scheduler.workers.values())
- ).metrics.task_counts.executing
- == 0
- ), "worker is still executing the task"
- assert (
- next(
- iter(cluster_out.scheduler.workers.values())
- ).metrics.task_counts.memory
- == 1
- ), "worker did not keep the result in memory"
- # NOTE: this is a CPU percent use
- assert (
- next(iter(cluster_out.scheduler.workers.values())).metrics.cpu < 5.0
- ), "worker did not update the cpu metrics"
-
- # since the task is completed the worker should have stopped executing
- cluster_out = await _get_cluster_details(
- async_client, user_1["id"], some_cluster.id
- )
- assert cluster_out.scheduler.workers
- worker_data = next(iter(cluster_out.scheduler.workers.values()))
- assert worker_data.metrics.task_counts.executing == 0
- # in dask, the task remains in memory until the result is deleted
- assert worker_data.metrics.task_counts.memory == 1
diff --git a/services/director-v2/tests/unit/with_dbs/test_utils_dask.py b/services/director-v2/tests/unit/with_dbs/test_utils_dask.py
index 977828e4753..d02836de9e2 100644
--- a/services/director-v2/tests/unit/with_dbs/test_utils_dask.py
+++ b/services/director-v2/tests/unit/with_dbs/test_utils_dask.py
@@ -31,7 +31,6 @@
from fastapi import FastAPI
from models_library.api_schemas_directorv2.services import NodeRequirements
from models_library.api_schemas_storage import FileUploadLinks, FileUploadSchema
-from models_library.clusters import ClusterID
from models_library.docker import to_simcore_runtime_docker_label_key
from models_library.projects import ProjectID
from models_library.projects_nodes_io import NodeID, SimCoreFileLink, SimcoreS3FileID
@@ -100,7 +99,9 @@ async def mocked_node_ports_filemanager_fcts(
],
chunk_size=TypeAdapter(ByteSize).validate_python("5GiB"),
links=FileUploadLinks(
- abort_upload=TypeAdapter(AnyUrl).validate_python("https://www.fakeabort.com"),
+ abort_upload=TypeAdapter(AnyUrl).validate_python(
+ "https://www.fakeabort.com"
+ ),
complete_upload=TypeAdapter(AnyUrl).validate_python(
"https://www.fakecomplete.com"
),
@@ -425,7 +426,7 @@ async def test_clean_task_output_and_log_files_if_invalid(
published_project: PublishedProject,
mocked_node_ports_filemanager_fcts: dict[str, mock.MagicMock],
create_simcore_file_id: Callable[[ProjectID, NodeID, str], SimcoreS3FileID],
- entry_exists_returns: bool, # noqa: FBT001
+ entry_exists_returns: bool,
fake_io_schema: dict[str, dict[str, str]],
faker: Faker,
):
@@ -527,11 +528,6 @@ def test__to_human_readable_resource_values(
)
-@pytest.fixture
-def cluster_id(faker: Faker) -> ClusterID:
- return faker.pyint(min_value=0)
-
-
@pytest.fixture
def _app_config_with_dask_client(
_app_config_with_db: None,
@@ -549,7 +545,6 @@ async def test_check_if_cluster_is_able_to_run_pipeline(
_app_config_with_dask_client: None,
project_id: ProjectID,
node_id: NodeID,
- cluster_id: ClusterID,
published_project: PublishedProject,
initialized_app: FastAPI,
):
@@ -563,7 +558,6 @@ async def test_check_if_cluster_is_able_to_run_pipeline(
check_if_cluster_is_able_to_run_pipeline(
project_id=project_id,
node_id=node_id,
- cluster_id=cluster_id,
node_image=sleeper_task.image,
scheduler_info=dask_client.backend.client.scheduler_info(),
task_resources={},
diff --git a/services/docker-bake.hcl b/services/docker-bake.hcl
index c11de1c6834..7cc0470f177 100644
--- a/services/docker-bake.hcl
+++ b/services/docker-bake.hcl
@@ -10,12 +10,3 @@ target "dask-sidecar" {
tags = ["${DOCKER_REGISTRY}/dask-sidecar:latest","${DOCKER_REGISTRY}/dask-sidecar:${DASK_SIDECAR_VERSION}"]
output = ["type=registry"]
}
-
-variable "OSPARC_GATEWAY_SERVER_VERSION" {
- default = "latest"
-}
-
-target "osparc-gateway-server" {
- tags = ["${DOCKER_REGISTRY}/osparc-gateway-server:latest","${DOCKER_REGISTRY}/osparc-gateway-server:${OSPARC_GATEWAY_SERVER_VERSION}"]
- output = ["type=registry"]
-}
diff --git a/services/docker-compose-build.yml b/services/docker-compose-build.yml
index df66ec7a41c..becf5ce2a25 100644
--- a/services/docker-compose-build.yml
+++ b/services/docker-compose-build.yml
@@ -169,22 +169,6 @@ services:
org.opencontainers.image.source: "${VCS_URL}"
org.opencontainers.image.revision: "${VCS_REF}"
- osparc-gateway-server:
- image: local/osparc-gateway-server:${BUILD_TARGET:?build_target_required}
- build:
- context: ../
- dockerfile: services/osparc-gateway-server/Dockerfile
- cache_from:
- - local/osparc-gateway-server:${BUILD_TARGET:?build_target_required}
- - ${DOCKER_REGISTRY:-itisfoundation}/osparc-gateway-server:master-github-latest
- - ${DOCKER_REGISTRY:-itisfoundation}/osparc-gateway-server:staging-github-latest
- - ${DOCKER_REGISTRY:-itisfoundation}/osparc-gateway-server:release-github-latest
- target: ${BUILD_TARGET:?build_target_required}
- labels:
- org.opencontainers.image.created: "${BUILD_DATE}"
- org.opencontainers.image.source: "${VCS_URL}"
- org.opencontainers.image.revision: "${VCS_REF}"
-
resource-usage-tracker:
image: local/resource-usage-tracker:${BUILD_TARGET:?build_target_required}
build:
diff --git a/services/docker-compose-deploy.yml b/services/docker-compose-deploy.yml
index fb7adc69a9e..1da5f7933de 100644
--- a/services/docker-compose-deploy.yml
+++ b/services/docker-compose-deploy.yml
@@ -25,8 +25,6 @@ services:
image: ${DOCKER_REGISTRY:-itisfoundation}/invitations:${DOCKER_IMAGE_TAG:-latest}
migration:
image: ${DOCKER_REGISTRY:-itisfoundation}/migration:${DOCKER_IMAGE_TAG:-latest}
- osparc-gateway-server:
- image: ${DOCKER_REGISTRY:-itisfoundation}/osparc-gateway-server:${DOCKER_IMAGE_TAG:-latest}
payments:
image: ${DOCKER_REGISTRY:-itisfoundation}/payments:${DOCKER_IMAGE_TAG:-latest}
dynamic-scheduler:
diff --git a/services/osparc-gateway-server/.env-devel b/services/osparc-gateway-server/.env-devel
deleted file mode 100644
index 944c6914d43..00000000000
--- a/services/osparc-gateway-server/.env-devel
+++ /dev/null
@@ -1,2 +0,0 @@
-COMPUTATIONAL_SIDECAR_IMAGE=local/dask-sidecar:production
-COMPUTATIONAL_SIDECAR_LOG_LEVEL=INFO
diff --git a/services/osparc-gateway-server/.gitignore b/services/osparc-gateway-server/.gitignore
deleted file mode 100644
index 4d7a877c063..00000000000
--- a/services/osparc-gateway-server/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-.osparc-dask*
diff --git a/services/osparc-gateway-server/Dockerfile b/services/osparc-gateway-server/Dockerfile
deleted file mode 100644
index 899ef0fb434..00000000000
--- a/services/osparc-gateway-server/Dockerfile
+++ /dev/null
@@ -1,177 +0,0 @@
-# syntax=docker/dockerfile:1
-
-# Define arguments in the global scope
-ARG PYTHON_VERSION="3.11.9"
-ARG UV_VERSION="0.4"
-FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build
-# we docker image is built based on debian
-FROM python:${PYTHON_VERSION}-slim-bullseye AS base
-ARG TARGETPLATFORM
-ARG BUILDPLATFORM
-RUN echo "I am running on $BUILDPLATFORM, building for $TARGETPLATFORM"
-
-LABEL maintainer=mguidon,sanderegg
-
-# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/]
-RUN rm -f /etc/apt/apt.conf.d/docker-clean && \
- echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
-# libffi-dev is needed for ARM architectures
-RUN --mount=type=cache,target=/var/cache/apt,mode=0755,sharing=private \
- --mount=type=cache,target=/var/lib/apt,mode=0755,sharing=private \
- set -eux \
- && apt-get update \
- && apt-get install -y --no-install-recommends \
- gosu \
- libffi-dev \
- libffi7 \
- && apt-get clean -y \
- # verify that the binary works
- && gosu nobody true
-
-
-# simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu)
-ENV SC_USER_ID=8004 \
- SC_USER_NAME=scu \
- SC_BUILD_TARGET=base \
- SC_BOOT_MODE=default
-
-RUN adduser \
- --uid ${SC_USER_ID} \
- --disabled-password \
- --gecos "" \
- --shell /bin/sh \
- --home /home/${SC_USER_NAME} \
- ${SC_USER_NAME}
-
-
-ENV LANG=C.UTF-8 \
- PYTHONDONTWRITEBYTECODE=1 \
- VIRTUAL_ENV=/home/scu/.venv
-
-ENV PATH="${VIRTUAL_ENV}/bin:$PATH"
-
-# for ARM architecture this helps a lot VS building packages
-# NOTE: remove as this might create bad caching behaviour
-# ENV PIP_EXTRA_INDEX_URL=https://www.piwheels.org/simple
-
-
-EXPOSE 8000
-
-
-# -------------------------- Build stage -------------------
-# Installs build/package management tools and third party dependencies
-#
-# + /build WORKDIR
-#
-FROM base AS build
-
-ENV SC_BUILD_TARGET=build
-
-RUN rm -f /etc/apt/apt.conf.d/docker-clean
-RUN --mount=type=cache,target=/var/cache/apt,mode=0755,sharing=private \
- --mount=type=cache,target=/var/lib/apt,mode=0755,sharing=private \
- set -eux \
- && apt-get update \
- && apt-get install -y --no-install-recommends \
- build-essential \
- git \
- golang-go
-
-# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv
-COPY --from=uv_build /uv /uvx /bin/
-
-# NOTE: python virtualenv is used here such that installed packages may be moved to production image easily by copying the venv
-RUN uv venv "${VIRTUAL_ENV}"
-RUN --mount=type=cache,target=/root/.cache/uv \
- uv pip install --upgrade \
- wheel \
- setuptools
-
-WORKDIR /build
-
-
-
-# in ARM64 mode there is a catch: the piwheels package does not contain the dask-gateway-proxy executable in 64-bit
-RUN dpkgArch="$(dpkg --print-architecture)";\
- case "$dpkgArch" in \
- arm64) git clone --depth 1 --branch 0.9.0 https://github.com/dask/dask-gateway.git \
- && cd dask-gateway/osparc-gateway-server \
- && uv pip install .\
- ;; \
- esac;
-
-# --------------------------Prod-depends-only stage -------------------
-# This stage is for production only dependencies that get partially wiped out afterwards (final docker image concerns)
-#
-# + /build
-# + services/osparc-gateway-server [scu:scu] WORKDIR
-#
-FROM build AS prod-only-deps
-
-ENV SC_BUILD_TARGET=prod-only-deps
-
-WORKDIR /build/services/osparc-gateway-server
-
-RUN \
- --mount=type=bind,source=packages,target=/build/packages,rw \
- --mount=type=bind,source=services/osparc-gateway-server,target=/build/services/osparc-gateway-server,rw \
- --mount=type=cache,target=/root/.cache/uv \
- uv pip install \
- --requirement requirements/prod.txt
-
-# --------------------------Production stage -------------------
-# Final cleanup up to reduce image size and startup setup
-# Runs as scu (non-root user)
-#
-# + /home/scu $HOME = WORKDIR
-# + services/osparc-gateway-server [scu:scu]
-#
-FROM base AS production
-
-ENV SC_BUILD_TARGET=production \
- SC_BOOT_MODE=production
-
-ENV PYTHONOPTIMIZE=TRUE
-# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode
-ENV UV_COMPILE_BYTECODE=1
-
-WORKDIR /home/scu
-# ensure home folder is read/writable for user scu
-RUN chown -R scu /home/scu
-# bring installed package without build tools
-COPY --from=prod-only-deps --chown=scu:scu ${VIRTUAL_ENV} ${VIRTUAL_ENV}
-# copy docker entrypoint and boot scripts
-COPY --chown=scu:scu services/osparc-gateway-server/docker services/osparc-gateway-server/docker
-
-
-# TODO: Create healthcheck
-# HEALTHCHECK \
-# --interval=60s \
-# --timeout=60s \
-# --start-period=10s \
-# --retries=3 \
-# CMD ["curl", "-Lf", "http://127.0.0.1:8787/health"]
-
-ENTRYPOINT [ "/bin/sh", "services/osparc-gateway-server/docker/entrypoint.sh" ]
-CMD ["/bin/sh", "services/osparc-gateway-server/docker/boot.sh"]
-
-
-# --------------------------Development stage -------------------
-# Source code accessible in host but runs in container
-# Runs as scu with same gid/uid as host
-# Placed at the end to speed-up the build if images targeting production
-#
-# + /devel WORKDIR
-# + services (mounted volume)
-#
-FROM build AS development
-
-ENV SC_BUILD_TARGET=development
-
-WORKDIR /devel
-RUN chown -R scu:scu "${VIRTUAL_ENV}"
-
-# NOTE: devel mode does NOT have HEALTHCHECK
-
-ENTRYPOINT [ "/bin/sh", "services/osparc-gateway-server/docker/entrypoint.sh" ]
-CMD ["/bin/sh", "services/osparc-gateway-server/docker/boot.sh"]
diff --git a/services/osparc-gateway-server/Makefile b/services/osparc-gateway-server/Makefile
deleted file mode 100644
index 73a8327b8c8..00000000000
--- a/services/osparc-gateway-server/Makefile
+++ /dev/null
@@ -1,155 +0,0 @@
-#
-# Targets for DEVELOPMENT for osparc gateway service service
-#
-include ../../scripts/common.Makefile
-include ../../scripts/common-service.Makefile
-
-
-
-APP_PACKAGE_NAME=osparc_gateway_server
-SERVICE_NAME=osparc-gateway-server
-DASK_SIDECAR_NAME=dask-sidecar
-
-.env: .env-devel ## creates .env file from defaults in .env-devel
- $(if $(wildcard $@), \
- @echo "WARNING ##### $< is newer than $@ ####"; diff -uN $@ $<; false;,\
- @echo "WARNING ##### $@ does not exist, cloning $< as $@ ############"; cp $< $@)
-
-
-
-##
-## INFOS
-##
-.PHONY: info-images info-swarm
-define show-meta
- $(foreach iid,$(shell docker images "*/$(1):*" --quiet | sort | uniq),\
- docker image inspect $(iid) | jq '.[0] | {tags:.RepoTags, labels:.Config.Labels, arch:.Architecture}';)
-endef
-
-info-images: ## lists tags and labels of built images. To display one: 'make target=webserver info-images'
- @echo "## $(SERVICE_NAME) images:";\
- docker images */$(SERVICE_NAME):*;\
- $(call show-meta,$(SERVICE_NAME))
- @echo "## $(DASK_SIDECAR_NAME) images:";\
- docker images */$(DASK_SIDECAR_NAME):*;\
- $(call show-meta,$(DASK_SIDECAR_NAME))
-
-
-info-swarm: ## displays info about stacks and networks
-ifneq ($(SWARM_HOSTS), )
- # Stacks in swarm
- @docker stack ls
- # Containers (tasks) running in '$(SWARM_STACK_NAME)' stack
- -@docker stack ps $(SWARM_STACK_NAME)
- # Services in '$(SWARM_STACK_NAME)' stack
- -@docker stack services $(SWARM_STACK_NAME)
- # Networks
- @docker network ls
-endif
-
-##
-## Running Osparc Dask Gateway
-##
-SWARM_HOSTS = $(shell docker node ls --format="{{.Hostname}}" 2>$(if $(IS_WIN),NUL,/dev/null))
-
-PHONY: .init-swarm
-.init-swarm:
- # Ensures swarm is initialized
- $(if $(SWARM_HOSTS),,docker swarm init --advertise-addr=$(get_my_ip) --default-addr-pool 172.20.0.0/14)
-
-.PHONY: config
-export OSPARC_GATEWAY_CONFIG_FILE_HOST = .osparc-dask-gateway-config.py
-export SWARM_STACK_NAME ?= dask-gateway
-docker-compose-config-cmd=../../scripts/docker/docker-stack-config.bash
-docker-compose-configs = $(wildcard services/docker-compose*.yml)
-
-$(OSPARC_GATEWAY_CONFIG_FILE_HOST): $(CURDIR)/config/default_config.py ## creates config file from defaults in /config/default_config.py
- $(if $(wildcard $@), \
- @echo "WARNING ##### $< is newer than $@ ####"; diff -uN $@ $<; false;,\
- @echo "WARNING ##### $@ does not exist, cloning $< as $@ ############"; cp $< $@)
-config: $(OSPARC_GATEWAY_CONFIG_FILE_HOST) ## create default configuration file
-
-.stack-$(SWARM_STACK_NAME)-development.yml: .env $(docker-compose-configs)
- # Creating config for stack with 'local/{service}:development' to $@
- @export DOCKER_REGISTRY=local && \
- export DOCKER_IMAGE_TAG=development && \
- $(docker-compose-config-cmd) \
- docker-compose.yml \
- docker-compose.local.yml \
- docker-compose.devel.yml > $@
-
-
-.stack-$(SWARM_STACK_NAME)-production.yml: .env $(docker-compose-configs)
- # Creating config for stack with 'local/{service}:production' to $@
- @export DOCKER_REGISTRY=local && \
- export DOCKER_IMAGE_TAG=production && \
- $(docker-compose-config-cmd) \
- docker-compose.yml \
- docker-compose.local.yml > $@
-
-.stack-$(SWARM_STACK_NAME)-version.yml: .env $(docker-compose-configs)
- # Creating config for stack with '$(DOCKER_REGISTRY)/{service}:${DOCKER_IMAGE_TAG}' to $@
- $(docker-compose-config-cmd) \
- docker-compose.yml \
- docker-compose.local.yml > $@
-
-
-.PHONY: up-devel up-prod up-version up-latest
-
-define _show_endpoints
-# The following endpoints are available
-set -o allexport; \
-source $(CURDIR)/.env; \
-set +o allexport; \
-separator=------------------------------------------------------------------------------------;\
-separator=$${separator}$${separator}$${separator};\
-rows="%-22s | %40s | %12s | %12s\n";\
-TableWidth=100;\
-printf "%22s | %40s | %12s | %12s\n" Name Endpoint User Password;\
-printf "%.$${TableWidth}s\n" "$$separator";\
-printf "$$rows" Dask-Gateway 'http://$(get_my_ip):8000' whatever $(filter-out %.password =,$(shell cat $(OSPARC_GATEWAY_CONFIG_FILE_HOST) | grep c.Authenticator.password));
-endef
-
-show-endpoints:
- @$(_show_endpoints)
-
-
-up-devel: .stack-$(SWARM_STACK_NAME)-development.yml .init-swarm config ## Deploys local development stack and ops stack (pass 'make ops_disabled=1 up-...' to disable)
- # Deploy stack $(SWARM_STACK_NAME) [back-end]
- @docker stack deploy --with-registry-auth -c $< $(SWARM_STACK_NAME)
- @$(_show_endpoints)
-
-up-prod: .stack-$(SWARM_STACK_NAME)-production.yml .init-swarm config ## Deploys local production stack and ops stack (pass 'make ops_disabled=1 up-...' to disable)
-ifeq ($(target),)
- # Deploy stack $(SWARM_STACK_NAME)
- @docker stack deploy --with-registry-auth -c $< $(SWARM_STACK_NAME)
-else
- # deploys ONLY $(target) service
- @docker compose --file $< up --detach $(target)
-endif
- @$(_show_endpoints)
-
-up up-version: .stack-$(SWARM_STACK_NAME)-version.yml .init-swarm config ## Deploys versioned stack '$(DOCKER_REGISTRY)/{service}:$(DOCKER_IMAGE_TAG)' and ops stack (pass 'make ops_disabled=1 up-...' to disable)
- # Deploy stack $(SWARM_STACK_NAME)
- @docker stack deploy --with-registry-auth -c $< $(SWARM_STACK_NAME)
- @$(_show_endpoints)
-
-up-latest:
- @export DOCKER_IMAGE_TAG=release-github-latest && \
- $(MAKE) up-version
-
-.PHONY: down
-down: ## Stops and removes stack
- # Removing stacks in reverse order to creation
- -@docker stack rm $(SWARM_STACK_NAME)
- -@docker stack rm $(SWARM_STACK_NAME)-ops
- # Removing generated docker compose configurations, i.e. .stack-*
- -@rm $(wildcard .stack-*)
- -@rm $(wildcard $(OSPARC_GATEWAY_CONFIG_FILE_HOST))
-
-##
-## system tests
-##
-test-system: ## Runs system tests (needs local docker images of osparc-gateway-server and dask-sidecar)
- $(MAKE_C) tests/system install-ci
- $(MAKE_C) tests/system tests
diff --git a/services/osparc-gateway-server/README.md b/services/osparc-gateway-server/README.md
deleted file mode 100644
index 1f536df68e4..00000000000
--- a/services/osparc-gateway-server/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# osparc backend for dask gateway server
diff --git a/services/osparc-gateway-server/VERSION b/services/osparc-gateway-server/VERSION
deleted file mode 100644
index 8acdd82b765..00000000000
--- a/services/osparc-gateway-server/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-0.0.1
diff --git a/services/osparc-gateway-server/config/default_config.py b/services/osparc-gateway-server/config/default_config.py
deleted file mode 100644
index 4cd75de4d73..00000000000
--- a/services/osparc-gateway-server/config/default_config.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# pylint: disable=undefined-variable
-
-# NOTE: this configuration is used by the dask-gateway-server
-# it follows [traitlets](https://traitlets.readthedocs.io/en/stable/config.html) configuration files
-
-# defines the backend to use with the gateway
-c.DaskGateway.backend_class = "osparc_gateway_server.backend.osparc.OsparcBackend" # type: ignore
-# defines the password for 'simple' authentication
-c.Authenticator.password = "asdf" # type: ignore
-# defines log levels
-c.DaskGateway.log_level = "WARN" # type: ignore
-c.Proxy.log_level = "WARN" # type: ignore
diff --git a/services/osparc-gateway-server/docker-compose.devel.yml b/services/osparc-gateway-server/docker-compose.devel.yml
deleted file mode 100644
index 32514289e1a..00000000000
--- a/services/osparc-gateway-server/docker-compose.devel.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-version: '3.9'
-services:
- osparc-gateway-server:
- environment:
- - SC_BOOT_MODE=debug
- - LOG_LEVEL=debug
- - DEBUG=true
- volumes:
- - ./:/devel/services/osparc-gateway-server
- - ../../packages:/devel/packages
diff --git a/services/osparc-gateway-server/docker-compose.local.yml b/services/osparc-gateway-server/docker-compose.local.yml
deleted file mode 100644
index ff73e36a256..00000000000
--- a/services/osparc-gateway-server/docker-compose.local.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-version: '3.9'
-services:
- osparc-gateway-server:
- environment:
- - SC_BOOT_MODE=${SC_BOOT_MODE:-default}
- - OSPARC_GATEWAY_SERVER_DEBUGGING_PORT=3000
- ports:
- - "3100:3000" # debug port
diff --git a/services/osparc-gateway-server/docker-compose.yml b/services/osparc-gateway-server/docker-compose.yml
deleted file mode 100644
index acdfe4179db..00000000000
--- a/services/osparc-gateway-server/docker-compose.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-version: '3.9'
-services:
- osparc-gateway-server:
- image: ${DOCKER_REGISTRY:-itisfoundation}/osparc-gateway-server:${DOCKER_IMAGE_TAG:-latest}
- ports:
- - "8000:8000"
- volumes:
- - /var/run/docker.sock:/var/run/docker.sock
- networks:
- - dask_net
- configs:
- - source: gateway_config
- target: ${GATEWAY_SERVER_CONFIG_FILE_CONTAINER:-/etc/dask/dask_config.py}
- environment:
- - GATEWAY_WORKERS_NETWORK=${SWARM_STACK_NAME:?swarm_stack_name_required}_dask_net
- - GATEWAY_SERVER_NAME=${SWARM_STACK_NAME:?swarm_stack_name_required}_osparc-gateway-server
- - COMPUTATIONAL_SIDECAR_VOLUME_NAME=${SWARM_STACK_NAME:?swarm_stack_name_required}_sidecar_data
- - COMPUTATIONAL_SIDECAR_IMAGE=${COMPUTATIONAL_SIDECAR_IMAGE:-local/dask-sidecar:production}
- - COMPUTATIONAL_SIDECAR_LOG_LEVEL=${COMPUTATIONAL_SIDECAR_LOG_LEVEL:-WARNING}
- - COMPUTATION_SIDECAR_NUM_NON_USABLE_CPUS=${COMPUTATION_SIDECAR_NUM_NON_USABLE_CPUS:-2}
- - COMPUTATION_SIDECAR_NON_USABLE_RAM=${COMPUTATION_SIDECAR_NON_USABLE_RAM:-0}
- - GATEWAY_SERVER_ONE_WORKER_PER_NODE=${GATEWAY_SERVER_ONE_WORKER_PER_NODE-True}
- - GATEWAY_SERVER_CONFIG_FILE_CONTAINER=${GATEWAY_SERVER_CONFIG_FILE_CONTAINER:-/etc/dask/dask_config.py}
- deploy:
- placement:
- constraints:
- - node.role == manager
-networks:
- dask_net:
- name: ${SWARM_STACK_NAME:?swarm_stack_name_required}_dask_net
-
-volumes:
- sidecar_data: null
-
-configs:
- gateway_config:
- file: ./${OSPARC_GATEWAY_CONFIG_FILE_HOST:?gateway_config_required}
diff --git a/services/osparc-gateway-server/docker/boot.sh b/services/osparc-gateway-server/docker/boot.sh
deleted file mode 100755
index d2b912eb3ba..00000000000
--- a/services/osparc-gateway-server/docker/boot.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/sh
-set -o errexit
-set -o nounset
-
-IFS=$(printf '\n\t')
-
-INFO="INFO: [$(basename "$0")] "
-
-# BOOTING application ---------------------------------------------
-echo "$INFO" "Booting in ${SC_BOOT_MODE} mode ..."
-echo " User :$(id "$(whoami)")"
-echo " Workdir :$(pwd)"
-echo " env :$(env)"
-
-if [ "${SC_BUILD_TARGET}" = "development" ]; then
- echo "$INFO" "Environment :"
- printenv | sed 's/=/: /' | sed 's/^/ /' | sort
- echo "$INFO" "Python :"
- python --version | sed 's/^/ /'
- command -v python | sed 's/^/ /'
- cd services/osparc-gateway-server
- uv pip install --no-cache-dir -r requirements/dev.txt
- cd -
- echo "$INFO" "PIP :"
- pip list | sed 's/^/ /'
-fi
-
-if [ "${SC_BOOT_MODE}" = "debug" ]; then
- # NOTE: production does NOT pre-installs debugpy
- uv pip install --no-cache-dir debugpy
-fi
-
-if [ "${SC_BOOT_MODE}" = "debug" ]; then
- exec python -m debugpy --listen 0.0.0.0:"${OSPARC_GATEWAY_SERVER_DEBUGGING_PORT}" -m watchmedo auto-restart \
- --recursive \
- --pattern="*.py;*/src/*" \
- --ignore-patterns="*test*;pytest_simcore/*;setup.py;*ignore*" \
- --ignore-directories -- \
- osparc-gateway-server \
- --config "${GATEWAY_SERVER_CONFIG_FILE_CONTAINER}" \
- --debug
-else
- exec osparc-gateway-server \
- --config "${GATEWAY_SERVER_CONFIG_FILE_CONTAINER}"
-fi
diff --git a/services/osparc-gateway-server/docker/entrypoint.sh b/services/osparc-gateway-server/docker/entrypoint.sh
deleted file mode 100755
index cd8eb9a01ef..00000000000
--- a/services/osparc-gateway-server/docker/entrypoint.sh
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/bin/sh
-set -o errexit
-set -o nounset
-
-IFS=$(printf '\n\t')
-
-INFO="INFO: [$(basename "$0")] "
-WARNING="WARNING: [$(basename "$0")] "
-ERROR="ERROR: [$(basename "$0")] "
-
-# This entrypoint script:
-#
-# - Executes *inside* of the container upon start as --user [default root]
-# - Notice that the container *starts* as --user [default root] but
-# *runs* as non-root user [scu]
-#
-echo "$INFO" "Entrypoint for stage ${SC_BUILD_TARGET} ..."
-echo User :"$(id "$(whoami)")"
-echo Workdir :"$(pwd)"
-echo scuUser :"$(id scu)"
-
-if [ "${SC_BUILD_TARGET}" = "development" ]; then
- echo "$INFO" "development mode detected..."
- # NOTE: expects docker run ... -v $(pwd):/devel/services/osparc-gateway-server
- DEVEL_MOUNT=${DEVEL_MOUNT:="/devel/services/osparc-gateway-server"}
-
- stat $DEVEL_MOUNT >/dev/null 2>&1 ||
- (echo "$ERROR" "You must mount '$DEVEL_MOUNT' to deduce user and group ids" && exit 1)
-
- echo "setting correct user id/group id..."
- HOST_USERID=$(stat --format=%u "${DEVEL_MOUNT}")
- HOST_GROUPID=$(stat --format=%g "${DEVEL_MOUNT}")
- CONT_GROUPNAME=$(getent group "${HOST_GROUPID}" | cut --delimiter=: --fields=1)
- if [ "$HOST_USERID" -eq 0 ]; then
- echo "Warning: Folder mounted owned by root user... adding $SC_USER_NAME to root..."
- adduser "$SC_USER_NAME" root
- else
- echo "Folder mounted owned by user $HOST_USERID:$HOST_GROUPID-'$CONT_GROUPNAME'..."
- # take host's credentials in $SC_USER_NAME
- if [ -z "$CONT_GROUPNAME" ]; then
- echo "Creating new group my$SC_USER_NAME"
- CONT_GROUPNAME=my$SC_USER_NAME
- addgroup --gid "$HOST_GROUPID" "$CONT_GROUPNAME"
- else
- echo "group already exists"
- fi
- echo "adding $SC_USER_NAME to group $CONT_GROUPNAME..."
- adduser "$SC_USER_NAME" "$CONT_GROUPNAME"
-
- echo "changing $SC_USER_NAME:$SC_USER_NAME ($SC_USER_ID:$SC_USER_ID) to $SC_USER_NAME:$CONT_GROUPNAME ($HOST_USERID:$HOST_GROUPID)"
- usermod --uid "$HOST_USERID" --gid "$HOST_GROUPID" "$SC_USER_NAME"
-
- echo "Changing group properties of files around from $SC_USER_ID to group $CONT_GROUPNAME"
- find / -path /proc -prune -o -group "$SC_USER_ID" -exec chgrp --no-dereference "$CONT_GROUPNAME" {} \;
- # change user property of files already around
- echo "Changing ownership properties of files around from $SC_USER_ID to group $CONT_GROUPNAME"
- find / -path /proc -prune -o -user "$SC_USER_ID" -exec chown --no-dereference "$SC_USER_NAME" {} \;
- fi
-fi
-
-DOCKER_MOUNT=/var/run/docker.sock
-if stat $DOCKER_MOUNT >/dev/null 2>&1; then
- echo "$INFO detected docker socket is mounted, adding user to group..."
- GROUPID=$(stat --format=%g $DOCKER_MOUNT)
- GROUPNAME=scdocker
-
- if ! addgroup --gid "$GROUPID" $GROUPNAME >/dev/null 2>&1; then
- echo "$WARNING docker group with $GROUPID already exists, getting group name..."
- # if group already exists in container, then reuse name
- GROUPNAME=$(getent group "${GROUPID}" | cut --delimiter=: --fields=1)
- echo "$WARNING docker group with $GROUPID has name $GROUPNAME"
- fi
- adduser "$SC_USER_NAME" "$GROUPNAME"
-fi
-
-echo "$INFO Starting osparc-gateway-server ..."
-echo " $SC_USER_NAME rights : $(id "$SC_USER_NAME")"
-echo " local dir : $(ls -al)"
-
-exec gosu "$SC_USER_NAME" "$@"
diff --git a/services/osparc-gateway-server/requirements/Makefile b/services/osparc-gateway-server/requirements/Makefile
deleted file mode 100644
index 1118bbf105e..00000000000
--- a/services/osparc-gateway-server/requirements/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Targets to pip-compile requirements
-#
-include ../../../requirements/base.Makefile
-
-# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt
-
-_test.txt: _base.txt
diff --git a/services/osparc-gateway-server/requirements/_base.in b/services/osparc-gateway-server/requirements/_base.in
deleted file mode 100644
index 605373b2ef8..00000000000
--- a/services/osparc-gateway-server/requirements/_base.in
+++ /dev/null
@@ -1,11 +0,0 @@
-# Specifies third-party dependencies for the 'osparc-gateway-server'
-#
-#
---constraint ../../../requirements/constraints.txt
---constraint constraints.txt
-
-aiodocker
-async-timeout
-dask-gateway-server[local]
-pydantic-settings
-pydantic[email,dotenv]
diff --git a/services/osparc-gateway-server/requirements/_base.txt b/services/osparc-gateway-server/requirements/_base.txt
deleted file mode 100644
index c6689413bb4..00000000000
--- a/services/osparc-gateway-server/requirements/_base.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-aiodocker==0.22.1
- # via -r requirements/_base.in
-aiohttp==3.9.5
- # via
- # -c requirements/../../../requirements/constraints.txt
- # aiodocker
- # dask-gateway-server
-aiosignal==1.3.1
- # via aiohttp
-annotated-types==0.7.0
- # via pydantic
-async-timeout==4.0.3
- # via -r requirements/_base.in
-attrs==23.2.0
- # via aiohttp
-cffi==1.16.0
- # via cryptography
-colorlog==6.8.2
- # via dask-gateway-server
-cryptography==42.0.7
- # via
- # -c requirements/../../../requirements/constraints.txt
- # dask-gateway-server
-dask-gateway-server==2023.1.1
- # via -r requirements/_base.in
-dnspython==2.6.1
- # via email-validator
-email-validator==2.1.1
- # via pydantic
-frozenlist==1.4.1
- # via
- # aiohttp
- # aiosignal
-greenlet==3.0.3
- # via sqlalchemy
-idna==3.7
- # via
- # email-validator
- # yarl
-multidict==6.0.5
- # via
- # aiohttp
- # yarl
-pycparser==2.22
- # via cffi
-pydantic==2.9.2
- # via
- # -c requirements/../../../requirements/constraints.txt
- # -r requirements/_base.in
- # pydantic-settings
-pydantic-core==2.23.4
- # via pydantic
-pydantic-settings==2.6.1
- # via -r requirements/_base.in
-python-dotenv==1.0.1
- # via pydantic-settings
-sqlalchemy==1.4.52
- # via
- # -c requirements/../../../requirements/constraints.txt
- # dask-gateway-server
-traitlets==5.14.3
- # via dask-gateway-server
-typing-extensions==4.12.2
- # via
- # pydantic
- # pydantic-core
-yarl==1.9.4
- # via aiohttp
diff --git a/services/osparc-gateway-server/requirements/_test.in b/services/osparc-gateway-server/requirements/_test.in
deleted file mode 100644
index 61f8faa298f..00000000000
--- a/services/osparc-gateway-server/requirements/_test.in
+++ /dev/null
@@ -1,25 +0,0 @@
-#
-# Specifies dependencies required to run 'osparc-gateway-server'
-#
---constraint ../../../requirements/constraints.txt
-
-# Adds base AS CONSTRAINT specs, not requirement.
-# - Resulting _text.txt is a frozen list of EXTRA packages for testing, besides _base.txt
-#
---constraint _base.txt
---constraint ../../dask-sidecar/requirements/_dask-distributed.txt
-
-coverage
-dask-gateway
-debugpy
-docker
-faker
-pytest
-pytest-asyncio
-pytest-cov
-pytest-icdiff
-pytest-instafail
-pytest-mock
-pytest-sugar
-tenacity
-sqlalchemy[mypy] # adds Mypy / Pep-484 Support for ORM Mappings SEE https://docs.sqlalchemy.org/en/20/orm/extensions/mypy.html
diff --git a/services/osparc-gateway-server/requirements/_test.txt b/services/osparc-gateway-server/requirements/_test.txt
deleted file mode 100644
index 1fc9e930b69..00000000000
--- a/services/osparc-gateway-server/requirements/_test.txt
+++ /dev/null
@@ -1,213 +0,0 @@
-aiohttp==3.9.5
- # via
- # -c requirements/../../../requirements/constraints.txt
- # -c requirements/_base.txt
- # dask-gateway
-aiosignal==1.3.1
- # via
- # -c requirements/_base.txt
- # aiohttp
-attrs==23.2.0
- # via
- # -c requirements/_base.txt
- # aiohttp
-certifi==2024.8.30
- # via
- # -c requirements/../../../requirements/constraints.txt
- # requests
-charset-normalizer==3.3.2
- # via requests
-click==8.1.7
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # dask
- # dask-gateway
- # distributed
-cloudpickle==3.0.0
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # dask
- # distributed
-coverage==7.6.1
- # via
- # -r requirements/_test.in
- # pytest-cov
-dask==2024.5.1
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # dask-gateway
- # distributed
-dask-gateway==2024.1.0
- # via -r requirements/_test.in
-debugpy==1.8.5
- # via -r requirements/_test.in
-distributed==2024.5.1
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # dask-gateway
-docker==7.1.0
- # via -r requirements/_test.in
-faker==29.0.0
- # via -r requirements/_test.in
-frozenlist==1.4.1
- # via
- # -c requirements/_base.txt
- # aiohttp
- # aiosignal
-fsspec==2024.5.0
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # dask
-greenlet==3.0.3
- # via
- # -c requirements/_base.txt
- # sqlalchemy
-icdiff==2.0.7
- # via pytest-icdiff
-idna==3.7
- # via
- # -c requirements/_base.txt
- # requests
- # yarl
-importlib-metadata==7.1.0
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # dask
-iniconfig==2.0.0
- # via pytest
-jinja2==3.1.4
- # via
- # -c requirements/../../../requirements/constraints.txt
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # distributed
-locket==1.0.0
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # distributed
- # partd
-markupsafe==2.1.5
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # jinja2
-msgpack==1.1.0
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # distributed
-multidict==6.0.5
- # via
- # -c requirements/_base.txt
- # aiohttp
- # yarl
-mypy==1.12.0
- # via sqlalchemy
-mypy-extensions==1.0.0
- # via mypy
-packaging==24.0
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # dask
- # distributed
- # pytest
- # pytest-sugar
-partd==1.4.2
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # dask
-pluggy==1.5.0
- # via pytest
-pprintpp==0.4.0
- # via pytest-icdiff
-psutil==6.0.0
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # distributed
-pytest==8.3.3
- # via
- # -r requirements/_test.in
- # pytest-asyncio
- # pytest-cov
- # pytest-icdiff
- # pytest-instafail
- # pytest-mock
- # pytest-sugar
-pytest-asyncio==0.23.8
- # via
- # -c requirements/../../../requirements/constraints.txt
- # -r requirements/_test.in
-pytest-cov==5.0.0
- # via -r requirements/_test.in
-pytest-icdiff==0.9
- # via -r requirements/_test.in
-pytest-instafail==0.5.0
- # via -r requirements/_test.in
-pytest-mock==3.14.0
- # via -r requirements/_test.in
-pytest-sugar==1.0.0
- # via -r requirements/_test.in
-python-dateutil==2.9.0.post0
- # via faker
-pyyaml==6.0.1
- # via
- # -c requirements/../../../requirements/constraints.txt
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # dask
- # dask-gateway
- # distributed
-requests==2.32.3
- # via docker
-six==1.16.0
- # via python-dateutil
-sortedcontainers==2.4.0
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # distributed
-sqlalchemy==1.4.52
- # via
- # -c requirements/../../../requirements/constraints.txt
- # -c requirements/_base.txt
- # -r requirements/_test.in
-sqlalchemy2-stubs==0.0.2a38
- # via sqlalchemy
-tblib==3.0.0
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # distributed
-tenacity==9.0.0
- # via -r requirements/_test.in
-termcolor==2.4.0
- # via pytest-sugar
-toolz==0.12.1
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # dask
- # distributed
- # partd
-tornado==6.4
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # dask-gateway
- # distributed
-typing-extensions==4.12.2
- # via
- # -c requirements/_base.txt
- # mypy
- # sqlalchemy2-stubs
-urllib3==2.2.3
- # via
- # -c requirements/../../../requirements/constraints.txt
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # distributed
- # docker
- # requests
-yarl==1.9.4
- # via
- # -c requirements/_base.txt
- # aiohttp
-zict==3.0.0
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # distributed
-zipp==3.18.2
- # via
- # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt
- # importlib-metadata
diff --git a/services/osparc-gateway-server/requirements/_tools.in b/services/osparc-gateway-server/requirements/_tools.in
deleted file mode 100644
index f91a82de59b..00000000000
--- a/services/osparc-gateway-server/requirements/_tools.in
+++ /dev/null
@@ -1,8 +0,0 @@
---constraint ../../../requirements/constraints.txt
---constraint _base.txt
---constraint _test.txt
-
---requirement ../../../requirements/devenv.txt
-
-# basic dev tools
-watchdog[watchmedo]
diff --git a/services/osparc-gateway-server/requirements/_tools.txt b/services/osparc-gateway-server/requirements/_tools.txt
deleted file mode 100644
index 4366080afe1..00000000000
--- a/services/osparc-gateway-server/requirements/_tools.txt
+++ /dev/null
@@ -1,87 +0,0 @@
-astroid==3.3.4
- # via pylint
-black==24.8.0
- # via -r requirements/../../../requirements/devenv.txt
-build==1.2.2
- # via pip-tools
-bump2version==1.0.1
- # via -r requirements/../../../requirements/devenv.txt
-cfgv==3.4.0
- # via pre-commit
-click==8.1.7
- # via
- # -c requirements/_test.txt
- # black
- # pip-tools
-dill==0.3.8
- # via pylint
-distlib==0.3.8
- # via virtualenv
-filelock==3.16.1
- # via virtualenv
-identify==2.6.1
- # via pre-commit
-isort==5.13.2
- # via
- # -r requirements/../../../requirements/devenv.txt
- # pylint
-mccabe==0.7.0
- # via pylint
-mypy==1.12.0
- # via
- # -c requirements/_test.txt
- # -r requirements/../../../requirements/devenv.txt
-mypy-extensions==1.0.0
- # via
- # -c requirements/_test.txt
- # black
- # mypy
-nodeenv==1.9.1
- # via pre-commit
-packaging==24.0
- # via
- # -c requirements/_test.txt
- # black
- # build
-pathspec==0.12.1
- # via black
-pip==24.2
- # via pip-tools
-pip-tools==7.4.1
- # via -r requirements/../../../requirements/devenv.txt
-platformdirs==4.3.6
- # via
- # black
- # pylint
- # virtualenv
-pre-commit==3.8.0
- # via -r requirements/../../../requirements/devenv.txt
-pylint==3.3.0
- # via -r requirements/../../../requirements/devenv.txt
-pyproject-hooks==1.1.0
- # via
- # build
- # pip-tools
-pyyaml==6.0.1
- # via
- # -c requirements/../../../requirements/constraints.txt
- # -c requirements/_test.txt
- # pre-commit
- # watchdog
-ruff==0.6.7
- # via -r requirements/../../../requirements/devenv.txt
-setuptools==75.1.0
- # via pip-tools
-tomlkit==0.13.2
- # via pylint
-typing-extensions==4.12.2
- # via
- # -c requirements/_base.txt
- # -c requirements/_test.txt
- # mypy
-virtualenv==20.26.5
- # via pre-commit
-watchdog==5.0.2
- # via -r requirements/_tools.in
-wheel==0.44.0
- # via pip-tools
diff --git a/services/osparc-gateway-server/requirements/ci.txt b/services/osparc-gateway-server/requirements/ci.txt
deleted file mode 100644
index e30762175d1..00000000000
--- a/services/osparc-gateway-server/requirements/ci.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Shortcut to install all packages for the contigous integration (CI) of 'services/director-v2'
-#
-# - As ci.txt but w/ tests
-#
-# Usage:
-# pip install -r requirements/ci.txt
-#
-
-
-# installs base + tests requirements
---requirement _base.txt
---requirement _test.txt
---requirement _tools.txt
-
-# installs this repo's packages
-pytest-simcore @ ../../packages/pytest-simcore/
-
-# installs current package
-osparc-gateway-server @ .
diff --git a/services/osparc-gateway-server/requirements/constraints.txt b/services/osparc-gateway-server/requirements/constraints.txt
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/services/osparc-gateway-server/requirements/dev.txt b/services/osparc-gateway-server/requirements/dev.txt
deleted file mode 100644
index f2182d2b170..00000000000
--- a/services/osparc-gateway-server/requirements/dev.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-# Shortcut to install all packages needed to develop 'services/director-v2'
-#
-# - As ci.txt but with current and repo packages in develop (edit) mode
-#
-# Usage:
-# pip install -r requirements/dev.txt
-#
-
-# installs base + tests + tools requirements
---requirement _base.txt
---requirement _test.txt
---requirement _tools.txt
-
-# installs this repo's packages
---editable ../../packages/pytest-simcore/
-
-# installs current package
---editable .
diff --git a/services/osparc-gateway-server/requirements/prod.txt b/services/osparc-gateway-server/requirements/prod.txt
deleted file mode 100644
index 45b869b18cf..00000000000
--- a/services/osparc-gateway-server/requirements/prod.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-# Shortcut to install 'services/director-v2' for production
-#
-# - As ci.txt but w/o tests
-#
-# Usage:
-# pip install -r requirements/prod.txt
-#
-
-# installs base requirements
---requirement _base.txt
-
-# installs this repo's packages
-
-# installs current package
-osparc-gateway-server @ .
diff --git a/services/osparc-gateway-server/setup.cfg b/services/osparc-gateway-server/setup.cfg
deleted file mode 100644
index 421c932766f..00000000000
--- a/services/osparc-gateway-server/setup.cfg
+++ /dev/null
@@ -1,15 +0,0 @@
-[bumpversion]
-current_version = 0.0.1
-commit = True
-message = services/osparc-gateway-server version: {current_version} → {new_version}
-tag = False
-commit_args = --no-verify
-
-[bumpversion:file:VERSION]
-
-[tool:pytest]
-asyncio_mode = auto
-
-[mypy]
-plugins =
- pydantic.mypy
diff --git a/services/osparc-gateway-server/setup.py b/services/osparc-gateway-server/setup.py
deleted file mode 100755
index c3a7becc072..00000000000
--- a/services/osparc-gateway-server/setup.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#! /bin/python
-import re
-import sys
-from pathlib import Path
-
-from setuptools import find_packages, setup
-
-
-def read_reqs(reqs_path: Path) -> set[str]:
- return {
- r
- for r in re.findall(
- r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)",
- reqs_path.read_text(),
- re.MULTILINE,
- )
- if isinstance(r, str)
- }
-
-
-CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent
-
-INSTALL_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_base.txt"))
-TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt"))
-
-
-SETUP = {
- "name": "osparc-gateway-server",
- "version": (CURRENT_DIR / "VERSION").read_text().strip(),
- "author": "Manuel Guidon (mguidon), Sylvain Anderegg (sanderegg)",
- "description": "Osparc backend for dask-gateway-server",
- "classifiers": [
- "Development Status :: 1 - Planning",
- "License :: OSI Approved :: MIT License",
- "Natural Language :: English",
- "Programming Language :: Python :: 3.10",
- ],
- "long_description": (CURRENT_DIR / "README.md").read_text(),
- "license": "MIT license",
- "python_requires": "~=3.11",
- "packages": find_packages(where="src"),
- "package_dir": {
- "": "src",
- },
- "install_requires": INSTALL_REQUIREMENTS,
- "test_suite": "tests",
- "tests_require": TEST_REQUIREMENTS,
- "extras_require": {"test": TEST_REQUIREMENTS},
- "entry_points": {
- "console_scripts": [
- "osparc-gateway-server=osparc_gateway_server.app:start",
- ]
- },
-}
-
-
-if __name__ == "__main__":
- setup(**SETUP)
diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/__init__.py b/services/osparc-gateway-server/src/osparc_gateway_server/__init__.py
deleted file mode 100644
index 0d83e8059db..00000000000
--- a/services/osparc-gateway-server/src/osparc_gateway_server/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-package_name = __name__
diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/app.py b/services/osparc-gateway-server/src/osparc_gateway_server/app.py
deleted file mode 100644
index eddceee8d22..00000000000
--- a/services/osparc-gateway-server/src/osparc_gateway_server/app.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import sys
-
-from dask_gateway_server.app import main # type: ignore[import-untyped]
-
-
-def start() -> None:
- sys.exit(main())
diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/__init__.py b/services/osparc-gateway-server/src/osparc_gateway_server/backend/__init__.py
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/errors.py b/services/osparc-gateway-server/src/osparc_gateway_server/backend/errors.py
deleted file mode 100644
index 6a42e519f6e..00000000000
--- a/services/osparc-gateway-server/src/osparc_gateway_server/backend/errors.py
+++ /dev/null
@@ -1,14 +0,0 @@
-class OSparcGatewayServerException(Exception):
- """Exception raised when there is an exception in oSparc gateway server"""
-
-
-class NoServiceTasksError(OSparcGatewayServerException):
- """Exception raised when there is no tasks attached to service"""
-
-
-class TaskNotAssignedError(OSparcGatewayServerException):
- """Exception raised when a task is not assigned to a host"""
-
-
-class NoHostFoundError(OSparcGatewayServerException):
- """Exception raised when there is no host found"""
diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/models.py b/services/osparc-gateway-server/src/osparc_gateway_server/backend/models.py
deleted file mode 100644
index 9cdd2fc9edb..00000000000
--- a/services/osparc-gateway-server/src/osparc_gateway_server/backend/models.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from collections.abc import Mapping
-from ipaddress import IPv4Address
-from typing import Any, Union
-
-from pydantic import BaseModel, ByteSize, Field, PositiveFloat, TypeAdapter
-
-Hostname = str
-ResourceName = str
-ResourceType = Union[int, float]
-
-
-class NodeResources(BaseModel):
- memory: ByteSize
- cpus: PositiveFloat
- others: dict[ResourceName, ResourceType] = Field(default_factory=dict)
-
-
-class NodeInformation(BaseModel):
- docker_node_id: str
- ip: IPv4Address
- resources: NodeResources
-
-
-ClusterInformation = dict[Hostname, NodeInformation]
-
-
-def cluster_information_from_docker_nodes(
- nodes_list: list[Mapping[str, Any]]
-) -> ClusterInformation:
- return TypeAdapter(ClusterInformation).validate_python(
- {
- node["Description"]["Hostname"]: {
- "docker_node_id": node["ID"],
- "ip": node["Status"]["Addr"],
- "resources": {
- "memory": node["Description"]["Resources"]["MemoryBytes"],
- "cpus": node["Description"]["Resources"]["NanoCPUs"] / 1e9,
- },
- }
- for node in nodes_list
- }
- )
diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/osparc.py b/services/osparc-gateway-server/src/osparc_gateway_server/backend/osparc.py
deleted file mode 100644
index f905dfc83a4..00000000000
--- a/services/osparc-gateway-server/src/osparc_gateway_server/backend/osparc.py
+++ /dev/null
@@ -1,350 +0,0 @@
-import asyncio
-import logging
-from collections.abc import AsyncGenerator
-from importlib.metadata import version
-from typing import Any
-
-import osparc_gateway_server
-from aiodocker import Docker
-from aiodocker.exceptions import DockerContainerError
-from dask_gateway_server.backends.base import ( # type: ignore[import-untyped]
- PublicException,
-)
-from dask_gateway_server.backends.db_base import ( # type: ignore[import-untyped]
- Cluster,
- DBBackendBase,
- JobStatus,
- Worker,
- chain,
- islice,
- timestamp,
-)
-
-from ..remote_debug import setup_remote_debugging
-from .errors import NoHostFoundError, NoServiceTasksError, TaskNotAssignedError
-from .settings import AppSettings, BootModeEnum
-from .utils import (
- OSPARC_SCHEDULER_API_PORT,
- DockerSecret,
- create_docker_secrets_from_tls_certs_for_cluster,
- delete_secrets,
- get_cluster_information,
- get_next_empty_node_hostname,
- get_osparc_scheduler_cmd_modifications,
- is_service_task_running,
- modify_cmd_argument,
- start_service,
- stop_service,
-)
-
-#
-# https://patorjk.com/software/taag/#p=display&v=0&f=Avatar&t=osparc-gateway-server
-#
-WELCOME_MSG = rf"""
- ____ ____ ____ ____ ____ ____ ____ ____ ____ _ __ _____ ____ _____ _____ _ ____ ___ _ ____ _____ ____ _ _____ ____
-/ _ \/ ___\/ __\/ _ \/ __\/ _\ / _ \/ _ \/ ___\/ |/ / / __// _ \/__ __\/ __// \ /|/ _ \\ \// / ___\/ __// __\/ \ |\/ __// __\
-| / \|| \| \/|| / \|| \/|| / _____ | | \|| / \|| \| /_____ | | _| / \| / \ | \ | | ||| / \| \ /_____ | \| \ | \/|| | //| \ | \/|
-| \_/|\___ || __/| |-||| /| \_\____\| |_/|| |-||\___ || \\____\| |_//| |-|| | | | /_ | |/\||| |-|| / / \____\\___ || /_ | /| \// | /_ | /
-\____/\____/\_/ \_/ \|\_/\_\\____/ \____/\_/ \|\____/\_|\_\ \____\\_/ \| \_/ \____\\_/ \|\_/ \|/_/ \____/\____\\_/\_\\__/ \____\\_/\_\ {version(osparc_gateway_server.package_name)}
-
-
-"""
-
-
-class OsparcBackend(DBBackendBase):
- """A cluster backend that launches osparc workers.
-
- Scheduler are spawned as services in a docker swarm
- Workers are spawned as services in a docker swarm
- """
-
- settings: AppSettings
- docker_client: Docker
- cluster_secrets: list[DockerSecret] = []
-
- async def do_setup(self) -> None:
- self.settings = AppSettings() # type: ignore[call-arg]
- assert isinstance(self.log, logging.Logger) # nosec
- self.log.info(
- "osparc-gateway-server application settings:\n%s",
- self.settings.model_dump_json(indent=2),
- )
-
- if self.settings.SC_BOOT_MODE in [BootModeEnum.DEBUG]:
- setup_remote_debugging(logger=self.log)
-
- # pylint: disable=attribute-defined-outside-init
- self.cluster_start_timeout = self.settings.GATEWAY_CLUSTER_START_TIMEOUT
- self.worker_start_timeout = self.settings.GATEWAY_WORKER_START_TIMEOUT
- self.docker_client = Docker()
-
- print(WELCOME_MSG, flush=True) # noqa: T201
-
- async def do_cleanup(self) -> None:
- assert isinstance(self.log, logging.Logger) # nosec
- await self.docker_client.close()
- self.log.info("osparc-gateway-server closed.")
-
- async def do_start_cluster(
- self, cluster: Cluster
- ) -> AsyncGenerator[dict[str, Any], None]:
- assert isinstance(self.log, logging.Logger) # nosec
- assert isinstance(self.api_url, str) # nosec
- self.log.debug(f"starting {cluster=}")
- self.cluster_secrets.extend(
- await create_docker_secrets_from_tls_certs_for_cluster(
- self.docker_client, self, cluster
- )
- )
- self.log.debug("created '%s' for TLS certification", f"{self.cluster_secrets=}")
-
- # now we need a scheduler (get these auto-generated entries from dask-gateway base class)
- scheduler_env = self.get_scheduler_env(cluster)
- scheduler_cmd = self.get_scheduler_command(cluster)
- # we need a few modifications for running in docker swarm
- scheduler_service_name = f"cluster_{cluster.id}_scheduler"
- modifications = get_osparc_scheduler_cmd_modifications(scheduler_service_name)
- for key, value in modifications.items():
- scheduler_cmd = modify_cmd_argument(scheduler_cmd, key, value)
- # start the scheduler
- async for dask_scheduler_start_result in start_service(
- docker_client=self.docker_client,
- settings=self.settings,
- logger=self.log,
- service_name=scheduler_service_name,
- base_env=scheduler_env,
- cluster_secrets=[
- c for c in self.cluster_secrets if c.cluster.name == cluster.name
- ],
- cmd=scheduler_cmd,
- labels={"cluster_id": f"{cluster.id}", "type": "scheduler"},
- gateway_api_url=self.api_url,
- placement={"Constraints": ["node.role==manager"]},
- ):
- yield dask_scheduler_start_result
-
- async def do_stop_cluster(self, cluster: Cluster) -> None:
- assert isinstance(self.log, logging.Logger) # nosec
- assert cluster.state # nosec
- self.log.debug("--> stopping %s", f"{cluster=}")
- dask_scheduler_service_id = cluster.state.get("service_id")
- await stop_service(self.docker_client, dask_scheduler_service_id, self.log)
- await delete_secrets(self.docker_client, cluster)
- self.log.debug("<--%s stopped", f"{cluster=}")
-
- async def do_check_clusters(self, clusters: list[Cluster]) -> list[bool]:
- assert isinstance(self.log, logging.Logger) # nosec
- self.log.debug("--> checking statuses of : %s", f"{clusters=}")
- oks: list[bool | BaseException] = await asyncio.gather(
- *[self._check_service_status(c) for c in clusters], return_exceptions=True
- )
- self.log.debug("<-- clusters status returned: %s", f"{oks=}")
- return [ok if isinstance(ok, bool) else False for ok in oks]
-
- async def do_start_worker(
- self, worker: Worker
- ) -> AsyncGenerator[dict[str, Any], None]:
- assert isinstance(self.log, logging.Logger) # nosec
- assert isinstance(self.api_url, str) # nosec
- assert worker.cluster # nosec
- self.log.debug("--> starting %s", f"{worker=}")
- node_hostname = None
- try:
- node_hostname = await get_next_empty_node_hostname(
- self.docker_client, worker.cluster
- )
- except (NoServiceTasksError, TaskNotAssignedError) as exc:
- # this is a real error
- raise PublicException(f"{exc}") from exc
- except NoHostFoundError as exc:
- # this should not happen since calling do_start_worker is done
- # from the on_cluster_heartbeat that checks if we already reached max worker
- # What may happen is that a docker node was removed in between and that is an error we can report.
- msg = "Unexpected error while creating a new worker, there is no available host! Was a docker node removed?"
- raise PublicException(msg) from exc
- assert node_hostname is not None # nosec
- worker_env = self.get_worker_env(worker.cluster)
- dask_scheduler_url = f"tls://cluster_{worker.cluster.id}_scheduler:{OSPARC_SCHEDULER_API_PORT}" # worker.cluster.scheduler_address
- # NOTE: the name must be set so that the scheduler knows which worker to wait for
- worker_env.update(
- {
- "DASK_SCHEDULER_URL": dask_scheduler_url,
- "DASK_WORKER_NAME": worker.name,
- }
- )
-
- async for dask_sidecar_start_result in start_service(
- docker_client=self.docker_client,
- settings=self.settings,
- logger=self.log,
- service_name=f"cluster_{worker.cluster.id}_sidecar_{worker.id}",
- base_env=worker_env,
- cluster_secrets=[
- c for c in self.cluster_secrets if c.cluster.name == worker.cluster.name
- ],
- cmd=None,
- labels={
- "cluster_id": f"{worker.cluster.id}",
- "worker_id": f"{worker.id}",
- "type": "worker",
- },
- gateway_api_url=self.api_url,
- placement={"Constraints": [f"node.hostname=={node_hostname}"]},
- ):
- yield dask_sidecar_start_result
-
- async def do_stop_worker(self, worker: Worker) -> None:
- assert isinstance(self.log, logging.Logger) # nosec
- self.log.debug("--> Stopping %s", f"{worker=}")
- assert worker.state # nosec
- if service_id := worker.state.get("service_id"):
- await stop_service(self.docker_client, service_id, self.log)
- self.log.debug("<-- %s stopped", f"{worker=}")
- else:
- self.log.error(
- "Worker %s does not have a service id! That is not expected!",
- f"{worker=}",
- )
-
- async def _check_service_status(self, cluster_service: Worker | Cluster) -> bool:
- assert isinstance(self.log, logging.Logger) # nosec
- self.log.debug("--> checking status: %s", f"{cluster_service=}")
- assert cluster_service.state # nosec
- if service_id := cluster_service.state.get("service_id"):
- self.log.debug("--> checking service '%s' status", f"{service_id}")
- try:
- service = await self.docker_client.services.inspect(service_id)
- if service:
- service_name = service["Spec"]["Name"]
- return await is_service_task_running(
- self.docker_client, service_name, self.log
- )
-
- except DockerContainerError:
- self.log.exception("Error while checking %s", f"{service_id=}")
- self.log.warning(
- "%s does not have a service id! That is not expected!",
- f"{cluster_service=}",
- )
- return False
-
- async def do_check_workers(self, workers: list[Worker]) -> list[bool]:
- assert isinstance(self.log, logging.Logger) # nosec
- self.log.debug("--> checking statuses: %s", f"{workers=}")
- ok = await asyncio.gather(
- *[self._check_service_status(w) for w in workers], return_exceptions=True
- )
- self.log.debug("<-- worker status returned: %s", f"{ok=}")
- return [False if isinstance(_, BaseException) else _ for _ in ok]
-
- async def on_cluster_heartbeat(self, cluster_name, msg) -> None:
- # pylint: disable=no-else-continue, unused-variable, too-many-branches
- # pylint: disable=too-many-statements
- assert isinstance(self.log, logging.Logger) # nosec
-
- # HACK: we override the base class heartbeat in order to
- # dynamically allow for more or less workers depending on the
- # available docker nodes!!!
- cluster = self.db.get_cluster(cluster_name)
- if cluster is None or cluster.target > JobStatus.RUNNING:
- return
-
- cluster.last_heartbeat = timestamp()
-
- if cluster.status == JobStatus.RUNNING:
- cluster_update = {}
- else:
- cluster_update = {
- "api_address": msg["api_address"],
- "scheduler_address": msg["scheduler_address"],
- "dashboard_address": msg["dashboard_address"],
- }
-
- count = msg["count"]
- active_workers = set(msg["active_workers"])
- closing_workers = set(msg["closing_workers"])
- closed_workers = set(msg["closed_workers"])
-
- self.log.info(
- "Cluster %s heartbeat [count: %d, n_active: %d, n_closing: %d, n_closed: %d]",
- cluster_name,
- count,
- len(active_workers),
- len(closing_workers),
- len(closed_workers),
- )
-
- # THIS IS THE HACK!!!
- # original code in dask_gateway_server.backend.db_base
- max_workers = cluster.config.get("cluster_max_workers")
- if self.settings.GATEWAY_SERVER_ONE_WORKER_PER_NODE:
- # cluster_max_workers = len(await get_cluster_information(self.docker_client))
- # if max_workers != cluster_max_workers:
- # unfrozen_cluster_config = {k: v for k, v in cluster.config.items()}
- # unfrozen_cluster_config["cluster_max_workers"] = cluster_max_workers
- # cluster_update["config"] = unfrozen_cluster_config
- max_workers = len(await get_cluster_information(self.docker_client))
- if max_workers is not None and count > max_workers:
- # This shouldn't happen under normal operation, but could if the
- # user does something malicious (or there's a bug).
- self.log.info(
- "Cluster %s heartbeat requested %d workers, exceeding limit of %s.",
- cluster_name,
- count,
- max_workers,
- )
- count = max_workers
-
- if count != cluster.count:
- cluster_update["count"] = count
-
- created_workers = []
- submitted_workers = []
- target_updates = []
- newly_running = []
- close_expected = []
- for worker in cluster.workers.values():
- if worker.status >= JobStatus.STOPPED:
- continue
- if worker.name in closing_workers:
- if worker.status < JobStatus.RUNNING:
- newly_running.append(worker)
- close_expected.append(worker)
- elif worker.name in active_workers:
- if worker.status < JobStatus.RUNNING:
- newly_running.append(worker)
- elif worker.name in closed_workers:
- target = (
- JobStatus.STOPPED if worker.close_expected else JobStatus.FAILED
- )
- target_updates.append((worker, {"target": target}))
- elif worker.status == JobStatus.SUBMITTED:
- submitted_workers.append(worker)
- else:
- assert worker.status == JobStatus.CREATED
- created_workers.append(worker)
-
- n_pending = len(created_workers) + len(submitted_workers)
- n_to_stop = len(active_workers) + n_pending - count
- if n_to_stop > 0:
- for w in islice(chain(created_workers, submitted_workers), n_to_stop):
- target_updates.append((w, {"target": JobStatus.STOPPED}))
-
- if cluster_update:
- self.db.update_cluster(cluster, **cluster_update)
- self.queue.put(cluster)
-
- self.db.update_workers(target_updates)
- for w, _u in target_updates:
- self.queue.put(w)
-
- if newly_running:
- # At least one worker successfully started, reset failure count
- cluster.worker_start_failure_count = 0
- self.db.update_workers(
- [(w, {"status": JobStatus.RUNNING}) for w in newly_running]
- )
- for w in newly_running:
- self.log.info("Worker %s is running", w.name)
-
- self.db.update_workers([(w, {"close_expected": True}) for w in close_expected])
diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/settings.py b/services/osparc-gateway-server/src/osparc_gateway_server/backend/settings.py
deleted file mode 100644
index 6df9845bbaf..00000000000
--- a/services/osparc-gateway-server/src/osparc_gateway_server/backend/settings.py
+++ /dev/null
@@ -1,76 +0,0 @@
-from enum import Enum
-
-from pydantic import AliasChoices, Field, NonNegativeInt, PositiveInt
-from pydantic_settings import BaseSettings
-
-
-class BootModeEnum(str, Enum):
- """
- Values taken by SC_BOOT_MODE environment variable
- set in Dockerfile and used during docker/boot.sh
- """
-
- DEFAULT = "default"
- LOCAL = "local-development"
- DEBUG = "debug"
- PRODUCTION = "production"
- DEVELOPMENT = "development"
-
-
-class AppSettings(BaseSettings):
- COMPUTATIONAL_SIDECAR_IMAGE: str = Field(
- ..., description="The computational sidecar image in use"
- )
- COMPUTATIONAL_SIDECAR_LOG_LEVEL: str | None = Field(
- default="WARNING",
- description="The computational sidecar log level",
- validation_alias=AliasChoices(
- "COMPUTATIONAL_SIDECAR_LOG_LEVEL",
- "LOG_LEVEL",
- "LOGLEVEL",
- "SIDECAR_LOG_LEVEL",
- "SIDECAR_LOGLEVEL",
- ),
- )
- COMPUTATIONAL_SIDECAR_VOLUME_NAME: str = Field(
- ..., description="Named volume for the computational sidecars"
- )
-
- COMPUTATION_SIDECAR_NUM_NON_USABLE_CPUS: NonNegativeInt = Field(
- default=2, description="Number of CPUS the sidecar should not advertise/use"
- )
-
- COMPUTATION_SIDECAR_NON_USABLE_RAM: NonNegativeInt = Field(
- default=0,
- description="Amount of RAM in bytes, the sidecar should not advertise/use",
- )
-
- COMPUTATION_SIDECAR_DASK_NTHREADS: PositiveInt | None = Field(
- default=None,
- description="Allows to override the default number of threads used by the dask-sidecars",
- )
-
- GATEWAY_WORKERS_NETWORK: str = Field(
- ...,
- description="The docker network where the gateway workers shall be able to access the gateway",
- )
- GATEWAY_SERVER_NAME: str = Field(
- ...,
- description="The hostname of the gateway server in the GATEWAY_WORKERS_NETWORK network",
- )
-
- SC_BOOT_MODE: BootModeEnum | None = None
-
- GATEWAY_SERVER_ONE_WORKER_PER_NODE: bool = Field(
- default=True,
- description="Only one dask-worker is allowed per node (default). If disabled, then scaling must be done manually.",
- )
-
- GATEWAY_CLUSTER_START_TIMEOUT: float = Field(
- default=120.0,
- description="Allowed timeout to define a starting cluster as failed",
- )
- GATEWAY_WORKER_START_TIMEOUT: float = Field(
- default=120.0,
- description="Allowed timeout to define a starting worker as failed",
- )
diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/utils.py b/services/osparc-gateway-server/src/osparc_gateway_server/backend/utils.py
deleted file mode 100644
index 30cecf14235..00000000000
--- a/services/osparc-gateway-server/src/osparc_gateway_server/backend/utils.py
+++ /dev/null
@@ -1,403 +0,0 @@
-import asyncio
-import json
-import logging
-from collections import deque
-from collections.abc import AsyncGenerator, Mapping
-from copy import deepcopy
-from pathlib import Path
-from typing import Any, Final, NamedTuple, cast
-
-import aiodocker
-from aiodocker import Docker
-from dask_gateway_server.backends.db_base import ( # type: ignore[import-untyped]
- Cluster,
- DBBackendBase,
-)
-from yarl import URL
-
-from .errors import NoHostFoundError
-from .models import ClusterInformation, Hostname, cluster_information_from_docker_nodes
-from .settings import AppSettings
-
-_SHARED_COMPUTATIONAL_FOLDER_IN_SIDECAR = "/home/scu/shared_computational_data"
-_DASK_KEY_CERT_PATH_IN_SIDECAR = Path("/home/scu/dask-credentials")
-
-
-class DockerSecret(NamedTuple):
- secret_id: str
- secret_name: str
- secret_file_name: str
- cluster: Cluster
-
-
-async def is_service_task_running(
- docker_client: Docker, service_name: str, logger: logging.Logger
-) -> bool:
- tasks = await docker_client.tasks.list(filters={"service": service_name})
- tasks_current_state = [task["Status"]["State"] for task in tasks]
- logger.info(
- "%s current service task states are %s", service_name, f"{tasks_current_state=}"
- )
- num_running = sum(current == "running" for current in tasks_current_state)
- return bool(num_running == 1)
-
-
-async def get_network_id(
- docker_client: Docker, network_name: str, logger: logging.Logger
-) -> str:
- # try to find the network name (usually named STACKNAME_default)
- logger.debug("--> finding network id for '%s'", f"{network_name=}")
- networks = [
- x
- for x in (await docker_client.networks.list())
- if "swarm" in x["Scope"] and network_name == x["Name"]
- ]
- logger.debug(f"found the following: {networks=}")
- if not networks:
- raise ValueError(f"network {network_name} not found")
- if len(networks) > 1:
- # NOTE: this is impossible at the moment. test_utils::test_get_network_id proves it
- raise ValueError(
- f"network {network_name} is ambiguous, too many network founds: {networks=}"
- )
- logger.debug("found '%s'", f"{networks[0]=}")
- assert "Id" in networks[0] # nosec
- assert isinstance(networks[0]["Id"], str) # nosec
- return networks[0]["Id"]
-
-
-def create_service_config(
- settings: AppSettings,
- service_env: dict[str, Any],
- service_name: str,
- network_id: str,
- service_secrets: list[DockerSecret],
- cmd: list[str] | None,
- labels: dict[str, str],
- placement: dict[str, Any] | None,
- **service_kwargs,
-) -> dict[str, Any]:
- env = deepcopy(service_env)
- env.pop("PATH", None)
- # create the secrets array containing the TLS cert/key pair
- container_secrets = []
- for s in service_secrets:
- container_secrets.append(
- {
- "SecretName": s.secret_name,
- "SecretID": s.secret_id,
- "File": {
- "Name": f"{_DASK_KEY_CERT_PATH_IN_SIDECAR / Path(s.secret_file_name).name}",
- "UID": "0",
- "GID": "0",
- "Mode": 0x777,
- },
- }
- )
- env_updates = {}
- for env_name, env_value in env.items():
- if env_value == s.secret_file_name:
- env_updates[
- env_name
- ] = f"{_DASK_KEY_CERT_PATH_IN_SIDECAR / Path(s.secret_file_name).name}"
- env.update(env_updates)
- mounts = [
- # docker socket needed to use the docker api
- {
- "Source": "/var/run/docker.sock",
- "Target": "/var/run/docker.sock",
- "Type": "bind",
- "ReadOnly": True,
- },
- # the sidecar data data is stored in a volume
- {
- "Source": settings.COMPUTATIONAL_SIDECAR_VOLUME_NAME,
- "Target": _SHARED_COMPUTATIONAL_FOLDER_IN_SIDECAR,
- "Type": "volume",
- "ReadOnly": False,
- },
- ]
-
- task_template: dict[str, Any] = {
- "ContainerSpec": {
- "Env": env,
- "Image": settings.COMPUTATIONAL_SIDECAR_IMAGE,
- "Init": True,
- "Mounts": mounts,
- "Secrets": container_secrets,
- "Hostname": service_name,
- },
- "RestartPolicy": {"Condition": "on-failure"},
- }
-
- if cmd:
- task_template["ContainerSpec"]["Command"] = cmd
- if placement:
- task_template["Placement"] = placement
-
- return {
- "name": service_name,
- "labels": labels,
- "task_template": task_template,
- "networks": [network_id],
- **service_kwargs,
- }
-
-
-async def create_or_update_secret(
- docker_client: aiodocker.Docker,
- target_file_name: str,
- cluster: Cluster,
- *,
- file_path: Path | None = None,
- secret_data: str | None = None,
-) -> DockerSecret:
- if file_path is None and secret_data is None:
- raise ValueError(
- f"Both {file_path=} and {secret_data=} are empty, that is not allowed"
- )
- data = secret_data
- if not data and file_path:
- data = file_path.read_text()
-
- docker_secret_name = f"{Path( target_file_name).name}_{cluster.id}"
-
- secrets = await docker_client.secrets.list(filters={"name": docker_secret_name})
- if secrets:
- # we must first delete it as only labels may be updated
- secret = secrets[0]
- await docker_client.secrets.delete(secret["ID"])
- assert data # nosec
- secret = await docker_client.secrets.create(
- name=docker_secret_name,
- data=data,
- labels={"cluster_id": f"{cluster.id}", "cluster_name": f"{cluster.name}"},
- )
- return DockerSecret(
- secret_id=secret["ID"],
- secret_name=docker_secret_name,
- secret_file_name=target_file_name,
- cluster=cluster,
- )
-
-
-async def delete_secrets(docker_client: aiodocker.Docker, cluster: Cluster) -> None:
- secrets = await docker_client.secrets.list(
- filters={"label": f"cluster_id={cluster.id}"}
- )
- await asyncio.gather(*[docker_client.secrets.delete(s["ID"]) for s in secrets])
-
-
-async def start_service(
- docker_client: aiodocker.Docker,
- settings: AppSettings,
- logger: logging.Logger,
- service_name: str,
- base_env: dict[str, str],
- cluster_secrets: list[DockerSecret],
- cmd: list[str] | None,
- labels: dict[str, str],
- gateway_api_url: str,
- placement: dict[str, Any] | None = None,
- **service_kwargs,
-) -> AsyncGenerator[dict[str, Any], None]:
- service_parameters = {}
- try:
- assert settings.COMPUTATIONAL_SIDECAR_LOG_LEVEL # nosec
- env = deepcopy(base_env)
- env.update(
- {
- # NOTE: the hostname of the gateway API must be
- # modified so that the scheduler/sidecar can
- # send heartbeats to the gateway
- "DASK_GATEWAY_API_URL": f"{URL(gateway_api_url).with_host(settings.GATEWAY_SERVER_NAME)}",
- "SIDECAR_COMP_SERVICES_SHARED_FOLDER": _SHARED_COMPUTATIONAL_FOLDER_IN_SIDECAR,
- "SIDECAR_COMP_SERVICES_SHARED_VOLUME_NAME": settings.COMPUTATIONAL_SIDECAR_VOLUME_NAME,
- "LOG_LEVEL": settings.COMPUTATIONAL_SIDECAR_LOG_LEVEL,
- "DASK_SIDECAR_NUM_NON_USABLE_CPUS": f"{settings.COMPUTATION_SIDECAR_NUM_NON_USABLE_CPUS}",
- "DASK_SIDECAR_NON_USABLE_RAM": f"{settings.COMPUTATION_SIDECAR_NON_USABLE_RAM}",
- }
- )
- if settings.COMPUTATION_SIDECAR_DASK_NTHREADS:
- env["DASK_NTHREADS"] = f"{settings.COMPUTATION_SIDECAR_DASK_NTHREADS}"
-
- # find service parameters
- network_id = await get_network_id(
- docker_client, settings.GATEWAY_WORKERS_NETWORK, logger
- )
- service_parameters = create_service_config(
- settings,
- env,
- service_name,
- network_id,
- cluster_secrets,
- cmd,
- labels=labels,
- placement=placement,
- **service_kwargs,
- )
-
- # start service
- logger.info("Starting service %s", service_name)
- logger.debug("Using parameters %s", json.dumps(service_parameters, indent=2))
- service = await docker_client.services.create(**service_parameters)
- logger.info("Service %s started: %s", service_name, f"{service=}")
- yield {"service_id": service["ID"]}
-
- # get the full info from docker
- service = await docker_client.services.inspect(service["ID"])
- logger.debug(
- "Service '%s' inspection: %s",
- service_name,
- f"{json.dumps(service, indent=2)}",
- )
-
- # wait until the service is started
- logger.info(
- "---> Service started, waiting for service %s to run...",
- service_name,
- )
- while not await is_service_task_running(
- docker_client, service["Spec"]["Name"], logger
- ):
- yield {"service_id": service["ID"]}
- await asyncio.sleep(1)
-
- # we are done, the service is started
- logger.info(
- "---> Service %s is started, and has ID %s",
- service["Spec"]["Name"],
- service["ID"],
- )
- yield {"service_id": service["ID"]}
-
- except (aiodocker.DockerContainerError, aiodocker.DockerError):
- logger.exception(
- "Unexpected Error while running container with parameters %s",
- json.dumps(service_parameters, indent=2),
- )
- raise
- except asyncio.CancelledError:
- logger.warning("Service creation was cancelled")
- raise
-
-
-async def stop_service(
- docker_client: aiodocker.Docker, service_id: str, logger: logging.Logger
-) -> None:
- logger.info("Stopping service %s", f"{service_id}")
- try:
- await docker_client.services.delete(service_id)
- logger.info("service %s stopped", f"{service_id=}")
-
- except aiodocker.DockerContainerError:
- logger.exception("Error while stopping service with id %s", f"{service_id=}")
-
-
-async def create_docker_secrets_from_tls_certs_for_cluster(
- docker_client: Docker, backend: DBBackendBase, cluster: Cluster
-) -> list[DockerSecret]:
- tls_cert_path, tls_key_path = backend.get_tls_paths(cluster)
- return [
- await create_or_update_secret(
- docker_client,
- f"{tls_cert_path}",
- cluster,
- secret_data=cluster.tls_cert.decode(),
- ),
- await create_or_update_secret(
- docker_client,
- f"{tls_key_path}",
- cluster,
- secret_data=cluster.tls_key.decode(),
- ),
- ]
-
-
-OSPARC_SCHEDULER_API_PORT: Final[int] = 8786
-OSPARC_SCHEDULER_DASHBOARD_PORT: Final[int] = 8787
-
-
-def get_osparc_scheduler_cmd_modifications(
- scheduler_service_name: str,
-) -> dict[str, str]:
- # NOTE: the healthcheck of itisfoundation/dask-sidecar expects the dashboard
- # to be on port 8787
- # (see https://github.com/ITISFoundation/osparc-simcore/blob/f3d98dccdae665d23701b0db4ee917364a0fbd99/services/dask-sidecar/Dockerfile)
- return {
- "--dashboard-address": f":{OSPARC_SCHEDULER_DASHBOARD_PORT}",
- "--port": f"{OSPARC_SCHEDULER_API_PORT}",
- "--host": scheduler_service_name,
- }
-
-
-def modify_cmd_argument(
- cmd: list[str], argument_name: str, argument_value: str
-) -> list[str]:
- modified_cmd = deepcopy(cmd)
- try:
- dashboard_address_arg_index = modified_cmd.index(argument_name)
- modified_cmd[dashboard_address_arg_index + 1] = argument_value
- except ValueError:
- modified_cmd.extend([argument_name, argument_value])
- return modified_cmd
-
-
-async def get_cluster_information(docker_client: Docker) -> ClusterInformation:
- cluster_information = cluster_information_from_docker_nodes(
- await docker_client.nodes.list()
- )
-
- return cluster_information
-
-
-def _find_service_node_assignment(service_tasks: list[Mapping[str, Any]]) -> str | None:
- for task in service_tasks:
- if task["Status"]["State"] in ("new", "pending"):
- # some task is not running yet. that is a bit weird
- service_constraints = (
- task.get("Spec", {}).get("Placement", {}).get("Constraints", [])
- )
- filtered_service_constraints = list(
- filter(lambda x: "node.hostname" in x, service_constraints)
- )
- if len(filtered_service_constraints) > 1:
- continue
- service_placement: str = filtered_service_constraints[0]
- return service_placement.split("==")[1]
-
- if task["Status"]["State"] in (
- "assigned",
- "preparing",
- "starting",
- "running",
- ):
- return cast(str, task["NodeID"]) # mypy
- return None
-
-
-async def get_next_empty_node_hostname(
- docker_client: Docker, cluster: Cluster
-) -> Hostname:
- current_count = getattr(get_next_empty_node_hostname, "counter", -1) + 1
- setattr(get_next_empty_node_hostname, "counter", current_count) # noqa: B010
-
- cluster_nodes = deque(await docker_client.nodes.list())
- current_worker_services = await docker_client.services.list(
- filters={"label": [f"cluster_id={cluster.id}", "type=worker"]}
- )
- used_docker_node_ids = set()
-
- for service in current_worker_services:
- service_tasks = await docker_client.tasks.list(
- filters={"service": service["ID"]}
- )
- if assigned_node := _find_service_node_assignment(service_tasks):
- used_docker_node_ids.add(assigned_node)
-
- cluster_nodes.rotate(current_count)
- for node in cluster_nodes:
- if node["ID"] in used_docker_node_ids:
- continue
- return f"{node['Description']['Hostname']}"
- raise NoHostFoundError("Could not find any empty host")
diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/remote_debug.py b/services/osparc-gateway-server/src/osparc_gateway_server/remote_debug.py
deleted file mode 100644
index c4d442796d2..00000000000
--- a/services/osparc-gateway-server/src/osparc_gateway_server/remote_debug.py
+++ /dev/null
@@ -1,24 +0,0 @@
-""" Setup remote debugger with debugpy - a debugger for Python
- https://github.com/microsoft/debugpy
-
-"""
-
-import logging
-
-
-def setup_remote_debugging(logger: logging.Logger) -> None:
- try:
- logger.debug("Attaching debugpy ...")
-
- import debugpy # type: ignore[import-untyped]
-
- REMOTE_DEBUGGING_PORT = 3000
- debugpy.listen(("0.0.0.0", REMOTE_DEBUGGING_PORT)) # nosec
- # debugpy.wait_for_client()
-
- except ImportError as err:
- raise RuntimeError(
- "Cannot enable remote debugging. Please install debugpy first"
- ) from err
-
- logger.info("Remote debugging enabled: listening port %s", REMOTE_DEBUGGING_PORT)
diff --git a/services/osparc-gateway-server/tests/conftest.py b/services/osparc-gateway-server/tests/conftest.py
deleted file mode 100644
index b7d545e4f0b..00000000000
--- a/services/osparc-gateway-server/tests/conftest.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# pylint: disable=unused-argument
-# pylint: disable=redefined-outer-name
-
-from collections.abc import AsyncIterator
-from pathlib import Path
-
-import aiodocker
-import pytest
-
-pytest_plugins = [
- "pytest_simcore.repository_paths",
- "pytest_simcore.docker_swarm",
-]
-
-
-@pytest.fixture(scope="session")
-def package_dir(osparc_simcore_services_dir: Path):
- package_folder = osparc_simcore_services_dir / "osparc-gateway-server"
- assert package_folder.exists()
- return package_folder
-
-
-@pytest.fixture
-async def async_docker_client() -> AsyncIterator[aiodocker.Docker]:
- async with aiodocker.Docker() as docker_client:
- yield docker_client
diff --git a/services/osparc-gateway-server/tests/integration/_dask_helpers.py b/services/osparc-gateway-server/tests/integration/_dask_helpers.py
deleted file mode 100644
index e81d0332787..00000000000
--- a/services/osparc-gateway-server/tests/integration/_dask_helpers.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from typing import NamedTuple
-
-from dask_gateway_server.app import DaskGateway
-
-
-class DaskGatewayServer(NamedTuple):
- address: str
- proxy_address: str
- password: str
- server: DaskGateway
diff --git a/services/osparc-gateway-server/tests/integration/conftest.py b/services/osparc-gateway-server/tests/integration/conftest.py
deleted file mode 100644
index dc89484803e..00000000000
--- a/services/osparc-gateway-server/tests/integration/conftest.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# pylint: disable=unused-argument
-# pylint: disable=redefined-outer-name
-
-
-import asyncio
-import json
-from typing import Any, AsyncIterator, Awaitable, Callable
-
-import aiodocker
-import dask_gateway
-import pytest
-import traitlets
-import traitlets.config
-from _dask_helpers import DaskGatewayServer
-from dask_gateway_server.app import DaskGateway
-from faker import Faker
-from osparc_gateway_server.backend.osparc import OsparcBackend
-from osparc_gateway_server.backend.utils import (
- OSPARC_SCHEDULER_API_PORT,
- OSPARC_SCHEDULER_DASHBOARD_PORT,
-)
-from pytest_simcore.helpers.host import get_localhost_ip
-from tenacity.asyncio import AsyncRetrying
-from tenacity.wait import wait_fixed
-
-
-@pytest.fixture
-async def docker_volume(
- async_docker_client: aiodocker.Docker,
-) -> AsyncIterator[Callable[[str], Awaitable[dict[str, Any]]]]:
- volumes = []
-
- async def _volume_creator(name: str) -> dict[str, Any]:
- volume = await async_docker_client.volumes.create(config={"Name": name})
- assert volume
- print(f"--> created {volume=}")
- volumes.append(volume)
- return await volume.show()
-
- yield _volume_creator
-
- # cleanup
- async def _wait_for_volume_deletion(volume: aiodocker.docker.DockerVolume):
- inspected_volume = await volume.show()
- async for attempt in AsyncRetrying(reraise=True, wait=wait_fixed(1)):
- with attempt:
- print(f"<-- deleting volume '{inspected_volume['Name']}'...")
- await volume.delete()
- print(f"<-- volume '{inspected_volume['Name']}' deleted")
-
- await asyncio.gather(*[_wait_for_volume_deletion(v) for v in volumes])
-
-
-@pytest.fixture
-def gateway_password(faker: Faker) -> str:
- return faker.password()
-
-
-def _convert_to_dict(c: traitlets.config.Config | dict) -> dict[str, Any]:
- converted_dict = {}
- for x, y in c.items():
- if isinstance(y, (dict, traitlets.config.Config)):
- converted_dict[x] = _convert_to_dict(y)
- else:
- converted_dict[x] = f"{y}"
- return converted_dict
-
-
-@pytest.fixture
-def mock_scheduler_cmd_modifications(mocker):
- """This mock is necessary since:
- If the osparc-gateway-server is running in the host then:
- - dask-scheduler must start with "" for --host, so the dask-scheduler defines its IP as being in docker_gw_bridge (172.18.0.X), accessible from the host
- When the osparc-gateway-server is running as a docker container, then the --host must be set
- as "cluster_X_scheduler" since this is the hostname of the container and resolves into the dask-gateway network
- """
- mocker.patch(
- "osparc_gateway_server.backend.osparc.get_osparc_scheduler_cmd_modifications",
- autospec=True,
- return_value={
- "--dashboard-address": f":{OSPARC_SCHEDULER_DASHBOARD_PORT}",
- "--port": f"{OSPARC_SCHEDULER_API_PORT}",
- },
- )
-
-
-@pytest.fixture
-async def local_dask_gateway_server(
- mock_scheduler_cmd_modifications,
- minimal_config: None,
- gateway_password: str,
-) -> AsyncIterator[DaskGatewayServer]:
- """this code is more or less copy/pasted from dask-gateway repo"""
- c = traitlets.config.Config()
- c.DaskGateway.backend_class = OsparcBackend # type: ignore
- c.DaskGateway.address = "127.0.0.1:0" # type: ignore
- c.DaskGateway.log_level = "DEBUG" # type: ignore
- c.Proxy.address = f"{get_localhost_ip()}:0" # type: ignore
- c.DaskGateway.authenticator_class = "dask_gateway_server.auth.SimpleAuthenticator" # type: ignore
- c.SimpleAuthenticator.password = gateway_password # type: ignore
- print(f"--> local dask gateway config: {json.dumps(_convert_to_dict(c), indent=2)}")
- dask_gateway_server = DaskGateway(config=c)
- dask_gateway_server.initialize([]) # that is a shitty one!
- print("--> local dask gateway server initialized")
- await dask_gateway_server.setup()
- await dask_gateway_server.backend.proxy._proxy_contacted # pylint: disable=protected-access
- print("--> local dask gateway server setup completed")
- yield DaskGatewayServer(
- f"http://{dask_gateway_server.backend.proxy.address}",
- f"gateway://{dask_gateway_server.backend.proxy.tcp_address}",
- c.SimpleAuthenticator.password, # type: ignore
- dask_gateway_server,
- )
- print("<-- local dask gateway server switching off...")
- await dask_gateway_server.cleanup()
- print("...done")
-
-
-@pytest.fixture
-async def gateway_client(
- local_dask_gateway_server: DaskGatewayServer,
-) -> AsyncIterator[dask_gateway.Gateway]:
- async with dask_gateway.Gateway(
- local_dask_gateway_server.address,
- local_dask_gateway_server.proxy_address,
- asynchronous=True,
- auth=dask_gateway.BasicAuth(
- username="pytest_user", password=local_dask_gateway_server.password
- ),
- ) as gateway:
- assert gateway
- print(f"--> {gateway} created")
- cluster_options = await gateway.cluster_options()
- gateway_versions = await gateway.get_versions()
- clusters_list = await gateway.list_clusters()
- print(f"--> {gateway_versions}, {cluster_options}, {clusters_list}")
- for option in cluster_options.items():
- print(f"--> {option}")
- yield gateway
diff --git a/services/osparc-gateway-server/tests/integration/test_clusters.py b/services/osparc-gateway-server/tests/integration/test_clusters.py
deleted file mode 100644
index 2f31188394e..00000000000
--- a/services/osparc-gateway-server/tests/integration/test_clusters.py
+++ /dev/null
@@ -1,255 +0,0 @@
-# pylint: disable=unused-argument
-# pylint: disable=redefined-outer-name
-
-import asyncio
-from collections.abc import Awaitable, Callable
-from typing import Any
-
-import pytest
-from _dask_helpers import DaskGatewayServer
-from aiodocker import Docker
-from dask_gateway import Gateway
-from faker import Faker
-from pytest_simcore.helpers.host import get_localhost_ip
-from tenacity.asyncio import AsyncRetrying
-from tenacity.stop import stop_after_delay
-from tenacity.wait import wait_fixed
-
-
-@pytest.fixture(
- params=[
- "local/dask-sidecar:production",
- ]
-)
-def minimal_config(
- docker_swarm,
- monkeypatch: pytest.MonkeyPatch,
- faker: Faker,
- request: pytest.FixtureRequest,
-):
- monkeypatch.setenv("GATEWAY_WORKERS_NETWORK", faker.pystr())
- monkeypatch.setenv("GATEWAY_SERVER_NAME", get_localhost_ip())
- monkeypatch.setenv("COMPUTATIONAL_SIDECAR_VOLUME_NAME", faker.pystr())
- monkeypatch.setenv(
- "COMPUTATIONAL_SIDECAR_IMAGE",
- request.param, # type: ignore
- )
- monkeypatch.setenv("COMPUTATIONAL_SIDECAR_LOG_LEVEL", "DEBUG")
- monkeypatch.setenv("GATEWAY_SERVER_ONE_WORKER_PER_NODE", "False")
-
-
-@pytest.fixture
-async def gateway_worker_network(
- local_dask_gateway_server: DaskGatewayServer,
- docker_network: Callable[..., Awaitable[dict[str, Any]]],
-) -> dict[str, Any]:
- network = await docker_network(
- Name=local_dask_gateway_server.server.backend.settings.GATEWAY_WORKERS_NETWORK
- )
- return network
-
-
-async def assert_services_stability(docker_client: Docker, service_name: str):
- list_services = await docker_client.services.list(filters={"name": service_name})
- assert (
- len(list_services) == 1
- ), f"{service_name} is missing from the expected services in {list_services}"
- _SECONDS_STABLE = 10
- print(f"--> {service_name} is up, now checking if it is running...")
- async for attempt in AsyncRetrying(
- reraise=True, wait=wait_fixed(1), stop=stop_after_delay(60)
- ):
- with attempt:
- tasks_list = await docker_client.tasks.list(
- filters={"service": service_name}
- )
- tasks_current_state = [t["Status"]["State"] for t in tasks_list]
- print(f"--> {service_name} service task states are {tasks_current_state=}")
- num_running = sum(current == "running" for current in tasks_current_state)
- assert num_running == 1
- print(f"--> {service_name} is running now")
- print(
- f"--> {service_name} is running, now checking if it is stable during {_SECONDS_STABLE}s..."
- )
-
- async def _check_stability(service: dict[str, Any]):
- inspected_service = await docker_client.services.inspect(service["ID"])
- # we ensure the service remains stable for _SECONDS_STABLE seconds (e.g. only one task runs)
-
- print(
- f"--> checking {_SECONDS_STABLE} seconds for stability of service {inspected_service['Spec']['Name']=}"
- )
- for n in range(_SECONDS_STABLE):
- service_tasks = await docker_client.tasks.list(
- filters={"service": inspected_service["Spec"]["Name"]}
- )
- assert (
- len(service_tasks) == 1
- ), f"The service is not stable it shows {service_tasks}"
- print(f"the {service_name=} is stable after {n} seconds...")
- await asyncio.sleep(1)
- print(f"{service_name=} stable!!")
-
- await asyncio.gather(*[_check_stability(s) for s in list_services])
-
-
-async def _wait_for_cluster_services_and_secrets(
- async_docker_client: Docker,
- num_services: int,
- num_secrets: int,
- timeout_s: int = 10,
-) -> list[dict[str, Any]]:
- async for attempt in AsyncRetrying(
- reraise=True, wait=wait_fixed(1), stop=stop_after_delay(timeout_s)
- ):
- with attempt:
- list_services = await async_docker_client.services.list()
- print(
- f"--> list of services after {attempt.retry_state.attempt_number}s: {list_services=}, expected {num_services=}"
- )
- assert len(list_services) == num_services
- # as the secrets
- list_secrets = await async_docker_client.secrets.list()
- print(
- f"--> list of secrets after {attempt.retry_state.attempt_number}s: {list_secrets=}, expected {num_secrets}"
- )
- assert len(list_secrets) == num_secrets
- return list_services
- # needed for pylint
- raise AssertionError("Invalid call to _wait_for_cluster_services_and_secrets")
-
-
-async def test_clusters_start_stop(
- minimal_config,
- gateway_worker_network,
- gateway_client: Gateway,
- async_docker_client: Docker,
-):
- """Each cluster is made of 1 scheduler + X number of sidecars (with 0<=X dict[str, Any]:
- return await docker_volume(faker.pystr())
-
-
-@pytest.fixture
-def computational_sidecar_mounted_folder() -> str:
- return "/comp_shared_folder"
-
-
-@pytest.fixture
-def sidecar_envs(
- computational_sidecar_mounted_folder: str,
- sidecar_computational_shared_volume: dict[str, Any],
-) -> dict[str, str]:
- return {
- "SIDECAR_COMP_SERVICES_SHARED_FOLDER": f"{computational_sidecar_mounted_folder}",
- "SIDECAR_COMP_SERVICES_SHARED_VOLUME_NAME": f"{sidecar_computational_shared_volume['Name']}",
- }
-
-
-@pytest.fixture
-def sidecar_mounts(
- sidecar_computational_shared_volume: dict[str, Any],
- computational_sidecar_mounted_folder: str,
-) -> list[dict[str, Any]]:
- return [ # docker socket needed to use the docker api
- {
- "Source": "/var/run/docker.sock",
- "Target": "/var/run/docker.sock",
- "Type": "bind",
- "ReadOnly": True,
- },
- # the sidecar computational data must be mounted
- {
- "Source": sidecar_computational_shared_volume["Name"],
- "Target": computational_sidecar_mounted_folder,
- "Type": "volume",
- "ReadOnly": False,
- },
- ]
-
-
-@pytest.fixture
-async def create_docker_service(
- async_docker_client: aiodocker.Docker,
-) -> AsyncIterator[Callable[..., Awaitable[Mapping[str, Any]]]]:
- services = []
-
- async def service_creator(**service_kwargs) -> Mapping[str, Any]:
- service = await async_docker_client.services.create(**service_kwargs)
- assert service
- assert "ID" in service
- services.append(service["ID"])
- return await async_docker_client.services.inspect(service["ID"])
-
- yield service_creator
- # cleanup
- await asyncio.gather(*[async_docker_client.services.delete(s) for s in services])
-
-
-async def _wait_for_service_to_be_ready(
- docker_client: aiodocker.Docker, service_name: str
-):
- async for attempt in AsyncRetrying(
- reraise=True, wait=wait_fixed(1), stop=stop_after_delay(60)
- ):
- with attempt:
- tasks_list = await docker_client.tasks.list(
- filters={"service": service_name}
- )
- tasks_current_state = [t["Status"]["State"] for t in tasks_list]
- print(f"--> {service_name} service task states are {tasks_current_state=}")
- num_running = sum(current == "running" for current in tasks_current_state)
- assert num_running == 1
- print(f"--> {service_name} is running now")
-
-
-@pytest.mark.parametrize(
- "image_name",
- [
- "local/dask-sidecar:production",
- ],
-)
-async def test_computational_sidecar_properly_start_stop(
- docker_swarm: None,
- sidecar_computational_shared_volume: dict[str, Any],
- async_docker_client: aiodocker.Docker,
- image_name: str,
- sidecar_envs: dict[str, str],
- sidecar_mounts: list[dict[str, Any]],
- create_docker_service: Callable[..., Awaitable[dict[str, Any]]],
-):
- scheduler_service = await create_docker_service(
- task_template={
- "ContainerSpec": {
- "Image": image_name,
- "Env": sidecar_envs
- | {
- "DASK_START_AS_SCHEDULER": "1",
- "DASK_SCHEDULER_URL": f"tcp://{get_localhost_ip()}:8786",
- },
- "Init": True,
- "Mounts": sidecar_mounts,
- }
- },
- endpoint_spec={"Ports": [{"PublishedPort": 8786, "TargetPort": 8786}]},
- name="pytest_dask_scheduler",
- )
- await _wait_for_service_to_be_ready(
- async_docker_client, scheduler_service["Spec"]["Name"]
- )
- sidecar_service = await create_docker_service(
- task_template={
- "ContainerSpec": {
- "Image": image_name,
- "Env": sidecar_envs
- | {"DASK_SCHEDULER_URL": f"tcp://{get_localhost_ip()}:8786"},
- "Init": True,
- "Mounts": sidecar_mounts,
- }
- },
- name="pytest_dask_sidecar",
- )
- await _wait_for_service_to_be_ready(
- async_docker_client, sidecar_service["Spec"]["Name"]
- )
diff --git a/services/osparc-gateway-server/tests/integration/test_gateway.py b/services/osparc-gateway-server/tests/integration/test_gateway.py
deleted file mode 100644
index 7009c12cb5b..00000000000
--- a/services/osparc-gateway-server/tests/integration/test_gateway.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# pylint: disable=unused-argument
-# pylint: disable=redefined-outer-name
-
-import pytest
-import traitlets
-import traitlets.config
-from dask_gateway_server.app import DaskGateway
-from faker import Faker
-from osparc_gateway_server.backend.osparc import OsparcBackend
-from pytest_simcore.helpers.host import get_localhost_ip
-
-
-@pytest.fixture(
- params=[
- "local/dask-sidecar:production",
- ]
-)
-def minimal_config(
- docker_swarm,
- monkeypatch: pytest.MonkeyPatch,
- faker: Faker,
- request: pytest.FixtureRequest,
-):
- monkeypatch.setenv("GATEWAY_WORKERS_NETWORK", faker.pystr())
- monkeypatch.setenv("GATEWAY_SERVER_NAME", get_localhost_ip())
- monkeypatch.setenv("COMPUTATIONAL_SIDECAR_VOLUME_NAME", faker.pystr())
- monkeypatch.setenv(
- "COMPUTATIONAL_SIDECAR_IMAGE",
- request.param, # type: ignore
- )
- monkeypatch.setenv("COMPUTATIONAL_SIDECAR_LOG_LEVEL", "DEBUG")
-
-
-async def test_gateway_configuration_through_env_variables(
- minimal_config, monkeypatch, faker: Faker
-):
- cluster_start_timeout = faker.pyfloat()
- monkeypatch.setenv("GATEWAY_CLUSTER_START_TIMEOUT", f"{cluster_start_timeout}")
- worker_start_timeout = faker.pyfloat()
- monkeypatch.setenv("GATEWAY_WORKER_START_TIMEOUT", f"{worker_start_timeout}")
- c = traitlets.config.Config()
- c.DaskGateway.backend_class = OsparcBackend # type: ignore
- dask_gateway_server = DaskGateway(config=c)
- dask_gateway_server.initialize([]) # that is a shitty one!
- print("--> local dask gateway server initialized")
- await dask_gateway_server.setup()
- await dask_gateway_server.backend.proxy._proxy_contacted # pylint: disable=protected-access
- print("--> local dask gateway server setup completed")
-
- assert dask_gateway_server.backend.cluster_start_timeout == cluster_start_timeout
- assert dask_gateway_server.backend.worker_start_timeout == worker_start_timeout
-
- print("<-- local dask gateway server switching off...")
- await dask_gateway_server.cleanup()
- print("...done")
diff --git a/services/osparc-gateway-server/tests/system/Makefile b/services/osparc-gateway-server/tests/system/Makefile
deleted file mode 100644
index fc9cd92a3f5..00000000000
--- a/services/osparc-gateway-server/tests/system/Makefile
+++ /dev/null
@@ -1,43 +0,0 @@
-#
-# Targets for DEVELOPMENT for system tests
-#
-include ../../../../scripts/common.Makefile
-
-
-.PHONY: requirements
-requirements: ## compiles pip requirements (.in -> .txt)
- @$(MAKE_C) requirements reqs
-
-
-.PHONY: install install-dev install-prod install-ci
-
-install: install-ci
-
-install-dev install-prod install-ci: _check_venv_active ## install requirements in dev/prod/ci mode
- # installing in $(subst install-,,$@) mode
- @uv pip sync requirements/$(subst install-,,$@).txt
-
-
-.PHONY: tests
-tests: _check_venv_active ## runs all tests [CI]
- # running system tests
- pytest \
- --asyncio-mode=auto \
- --color=yes \
- --durations=10 \
- -vv \
- $(CURDIR)
-
-.PHONY: test-dev
-tests-dev: _check_venv_active ## runs all tests [DEV]
- # running system tests
- @pytest \
- --asyncio-mode=auto \
- --color=yes \
- --durations=10 \
- --exitfirst \
- --failed-first \
- --keep-docker-up \
- --pdb \
- -vv \
- $(CURDIR)
diff --git a/services/osparc-gateway-server/tests/system/requirements/Makefile b/services/osparc-gateway-server/tests/system/requirements/Makefile
deleted file mode 100644
index c447724e305..00000000000
--- a/services/osparc-gateway-server/tests/system/requirements/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-#
-# Targets to pip-compile requirements
-#
-include ../../../../../requirements/base.Makefile
-
-# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt
diff --git a/services/osparc-gateway-server/tests/system/requirements/_base.txt b/services/osparc-gateway-server/tests/system/requirements/_base.txt
deleted file mode 100644
index 0eb14367cec..00000000000
--- a/services/osparc-gateway-server/tests/system/requirements/_base.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-# NOTE:
-# This file file is just here as placeholder
-# to fulfill dependencies of _tools.txt target in requirements/base.Makefile
-#
-# This is a pure-tests project and all dependencies are added in _test.in
-#
diff --git a/services/osparc-gateway-server/tests/system/requirements/_test.in b/services/osparc-gateway-server/tests/system/requirements/_test.in
deleted file mode 100644
index 09fa07fb7e3..00000000000
--- a/services/osparc-gateway-server/tests/system/requirements/_test.in
+++ /dev/null
@@ -1,20 +0,0 @@
---constraint ../../../../../requirements/constraints.txt
---constraint ../../../../dask-sidecar/requirements/_dask-distributed.txt
-
-
-
-aiodocker
-dask-gateway
-docker
-faker
-lz4
-numpy
-pytest
-pytest-asyncio
-pytest-cov
-pytest-icdiff
-pytest-instafail
-pytest-mock
-pytest-runner
-pytest-sugar
-tenacity
diff --git a/services/osparc-gateway-server/tests/system/requirements/_test.txt b/services/osparc-gateway-server/tests/system/requirements/_test.txt
deleted file mode 100644
index 29d4e7666d4..00000000000
--- a/services/osparc-gateway-server/tests/system/requirements/_test.txt
+++ /dev/null
@@ -1,194 +0,0 @@
-aiodocker==0.23.0
- # via -r requirements/_test.in
-aiohappyeyeballs==2.4.0
- # via aiohttp
-aiohttp==3.10.5
- # via
- # -c requirements/../../../../../requirements/constraints.txt
- # aiodocker
- # dask-gateway
-aiosignal==1.3.1
- # via aiohttp
-attrs==24.2.0
- # via aiohttp
-certifi==2024.8.30
- # via
- # -c requirements/../../../../../requirements/constraints.txt
- # requests
-charset-normalizer==3.3.2
- # via requests
-click==8.1.7
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # dask
- # dask-gateway
- # distributed
-cloudpickle==3.0.0
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # dask
- # distributed
-coverage==7.6.1
- # via pytest-cov
-dask==2024.5.1
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # dask-gateway
- # distributed
-dask-gateway==2024.1.0
- # via -r requirements/_test.in
-distributed==2024.5.1
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # dask-gateway
-docker==7.1.0
- # via -r requirements/_test.in
-faker==29.0.0
- # via -r requirements/_test.in
-frozenlist==1.4.1
- # via
- # aiohttp
- # aiosignal
-fsspec==2024.5.0
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # dask
-icdiff==2.0.7
- # via pytest-icdiff
-idna==3.10
- # via
- # requests
- # yarl
-importlib-metadata==7.1.0
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # dask
-iniconfig==2.0.0
- # via pytest
-jinja2==3.1.4
- # via
- # -c requirements/../../../../../requirements/constraints.txt
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # distributed
-locket==1.0.0
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # distributed
- # partd
-lz4==4.3.3
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # -r requirements/_test.in
-markupsafe==2.1.5
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # jinja2
-msgpack==1.1.0
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # distributed
-multidict==6.1.0
- # via
- # aiohttp
- # yarl
-numpy==1.26.4
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # -r requirements/_test.in
-packaging==24.0
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # dask
- # distributed
- # pytest
- # pytest-sugar
-partd==1.4.2
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # dask
-pluggy==1.5.0
- # via pytest
-pprintpp==0.4.0
- # via pytest-icdiff
-psutil==6.0.0
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # distributed
-pytest==8.3.3
- # via
- # -r requirements/_test.in
- # pytest-asyncio
- # pytest-cov
- # pytest-icdiff
- # pytest-instafail
- # pytest-mock
- # pytest-sugar
-pytest-asyncio==0.23.8
- # via
- # -c requirements/../../../../../requirements/constraints.txt
- # -r requirements/_test.in
-pytest-cov==5.0.0
- # via -r requirements/_test.in
-pytest-icdiff==0.9
- # via -r requirements/_test.in
-pytest-instafail==0.5.0
- # via -r requirements/_test.in
-pytest-mock==3.14.0
- # via -r requirements/_test.in
-pytest-runner==6.0.1
- # via -r requirements/_test.in
-pytest-sugar==1.0.0
- # via -r requirements/_test.in
-python-dateutil==2.9.0.post0
- # via faker
-pyyaml==6.0.1
- # via
- # -c requirements/../../../../../requirements/constraints.txt
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # dask
- # dask-gateway
- # distributed
-requests==2.32.3
- # via docker
-six==1.16.0
- # via python-dateutil
-sortedcontainers==2.4.0
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # distributed
-tblib==3.0.0
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # distributed
-tenacity==9.0.0
- # via -r requirements/_test.in
-termcolor==2.4.0
- # via pytest-sugar
-toolz==0.12.1
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # dask
- # distributed
- # partd
-tornado==6.4
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # dask-gateway
- # distributed
-urllib3==2.2.3
- # via
- # -c requirements/../../../../../requirements/constraints.txt
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # distributed
- # docker
- # requests
-yarl==1.12.1
- # via aiohttp
-zict==3.0.0
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # distributed
-zipp==3.18.2
- # via
- # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt
- # importlib-metadata
diff --git a/services/osparc-gateway-server/tests/system/requirements/_tools.in b/services/osparc-gateway-server/tests/system/requirements/_tools.in
deleted file mode 100644
index b0503840a27..00000000000
--- a/services/osparc-gateway-server/tests/system/requirements/_tools.in
+++ /dev/null
@@ -1,4 +0,0 @@
---constraint ../../../../../requirements/constraints.txt
---constraint _test.txt
-
---requirement ../../../../../requirements/devenv.txt
diff --git a/services/osparc-gateway-server/tests/system/requirements/_tools.txt b/services/osparc-gateway-server/tests/system/requirements/_tools.txt
deleted file mode 100644
index 56217c590ee..00000000000
--- a/services/osparc-gateway-server/tests/system/requirements/_tools.txt
+++ /dev/null
@@ -1,78 +0,0 @@
-astroid==3.3.4
- # via pylint
-black==24.8.0
- # via -r requirements/../../../../../requirements/devenv.txt
-build==1.2.2
- # via pip-tools
-bump2version==1.0.1
- # via -r requirements/../../../../../requirements/devenv.txt
-cfgv==3.4.0
- # via pre-commit
-click==8.1.7
- # via
- # -c requirements/_test.txt
- # black
- # pip-tools
-dill==0.3.8
- # via pylint
-distlib==0.3.8
- # via virtualenv
-filelock==3.16.1
- # via virtualenv
-identify==2.6.1
- # via pre-commit
-isort==5.13.2
- # via
- # -r requirements/../../../../../requirements/devenv.txt
- # pylint
-mccabe==0.7.0
- # via pylint
-mypy==1.12.0
- # via -r requirements/../../../../../requirements/devenv.txt
-mypy-extensions==1.0.0
- # via
- # black
- # mypy
-nodeenv==1.9.1
- # via pre-commit
-packaging==24.0
- # via
- # -c requirements/_test.txt
- # black
- # build
-pathspec==0.12.1
- # via black
-pip==24.2
- # via pip-tools
-pip-tools==7.4.1
- # via -r requirements/../../../../../requirements/devenv.txt
-platformdirs==4.3.6
- # via
- # black
- # pylint
- # virtualenv
-pre-commit==3.8.0
- # via -r requirements/../../../../../requirements/devenv.txt
-pylint==3.3.0
- # via -r requirements/../../../../../requirements/devenv.txt
-pyproject-hooks==1.1.0
- # via
- # build
- # pip-tools
-pyyaml==6.0.1
- # via
- # -c requirements/../../../../../requirements/constraints.txt
- # -c requirements/_test.txt
- # pre-commit
-ruff==0.6.7
- # via -r requirements/../../../../../requirements/devenv.txt
-setuptools==75.1.0
- # via pip-tools
-tomlkit==0.13.2
- # via pylint
-typing-extensions==4.12.2
- # via mypy
-virtualenv==20.26.5
- # via pre-commit
-wheel==0.44.0
- # via pip-tools
diff --git a/services/osparc-gateway-server/tests/system/requirements/ci.txt b/services/osparc-gateway-server/tests/system/requirements/ci.txt
deleted file mode 100644
index 684ed6c7887..00000000000
--- a/services/osparc-gateway-server/tests/system/requirements/ci.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-# Shortcut to install all packages for the contigous integration (CI) of 'services/web/server'
-#
-# - As ci.txt but w/ tests
-#
-# Usage:
-# pip install -r requirements/ci.txt
-#
-
-# installs base + tests requirements
---requirement _test.txt
---requirement _tools.txt
-
-# installs this repo's packages
-pytest-simcore @ ../../../../packages/pytest-simcore/
diff --git a/services/osparc-gateway-server/tests/system/requirements/dev.txt b/services/osparc-gateway-server/tests/system/requirements/dev.txt
deleted file mode 100644
index 436b5550342..00000000000
--- a/services/osparc-gateway-server/tests/system/requirements/dev.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-# Shortcut to install all packages needed to develop 'services/web/server'
-#
-# - As ci.txt but with current and repo packages in develop (edit) mode
-#
-# Usage:
-# pip install -r requirements/dev.txt
-#
-
-
-# installs base + tests requirements
---requirement _test.txt
---requirement _tools.txt
-
-# installs this repo's packages
---editable ../../../../packages/pytest-simcore/
diff --git a/services/osparc-gateway-server/tests/system/test_deploy.py b/services/osparc-gateway-server/tests/system/test_deploy.py
deleted file mode 100644
index 7e4044f6337..00000000000
--- a/services/osparc-gateway-server/tests/system/test_deploy.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# pylint: disable=unused-argument
-# pylint: disable=redefined-outer-name
-
-
-import asyncio
-import json
-from collections.abc import AsyncIterator
-from copy import deepcopy
-from pathlib import Path
-
-import aiohttp
-import dask_gateway
-import pytest
-from faker import Faker
-from pytest_simcore.helpers.host import get_localhost_ip
-from tenacity.asyncio import AsyncRetrying
-from tenacity.stop import stop_after_delay
-from tenacity.wait import wait_fixed
-
-pytest_plugins = ["pytest_simcore.repository_paths", "pytest_simcore.docker_swarm"]
-
-
-@pytest.fixture
-async def aiohttp_client() -> AsyncIterator[aiohttp.ClientSession]:
- async with aiohttp.ClientSession() as session:
- yield session
-
-
-@pytest.fixture
-def minimal_config(monkeypatch):
- monkeypatch.setenv("SC_BOOT_MODE", "production")
- monkeypatch.setenv("GATEWAY_SERVER_ONE_WORKER_PER_NODE", "False")
-
-
-@pytest.fixture(scope="session")
-def dask_gateway_entrypoint() -> str:
- return f"http://{get_localhost_ip()}:8000"
-
-
-@pytest.fixture(scope="session")
-def dask_gateway_password() -> str:
- return "asdf"
-
-
-@pytest.fixture
-async def dask_gateway_stack_deployed_services(
- minimal_config,
- package_dir: Path,
- docker_swarm,
- aiohttp_client: aiohttp.ClientSession,
- dask_gateway_entrypoint: str,
-):
- print("--> Deploying osparc-dask-gateway stack...")
- process = await asyncio.create_subprocess_exec(
- "make",
- "up-prod",
- stdout=asyncio.subprocess.PIPE,
- stderr=asyncio.subprocess.PIPE,
- cwd=package_dir,
- )
- stdout, stderr = await process.communicate()
- assert (
- process.returncode == 0
- ), f"Unexpected error while deploying stack:\nstdout:{stdout.decode()}\n\nstderr:{stderr.decode()}"
- print(f"{stdout}")
- print("--> osparc-dask-gateway stack deployed.")
- healtcheck_endpoint = f"{dask_gateway_entrypoint}/api/health"
- async for attempt in AsyncRetrying(
- reraise=True, wait=wait_fixed(1), stop=stop_after_delay(60)
- ):
- with attempt:
- print(
- f"--> Connecting to {healtcheck_endpoint}, "
- f"attempt {attempt.retry_state.attempt_number}...",
- )
- response = await aiohttp_client.get(healtcheck_endpoint)
- response.raise_for_status()
- print(
- f"--> Connection to gateway server succeeded."
- f" [{json.dumps(attempt.retry_state.retry_object.statistics)}]",
- )
-
- yield
- print("<-- Stopping osparc-dask-gateway stack...")
- process = await asyncio.create_subprocess_exec(
- "make",
- "down",
- stdout=asyncio.subprocess.PIPE,
- stderr=asyncio.subprocess.PIPE,
- cwd=package_dir,
- )
- stdout, stderr = await process.communicate()
- assert (
- process.returncode == 0
- ), f"Unexpected error while deploying stack:\nstdout:{stdout.decode()}\n\n{stderr.decode()}"
- print(f"{stdout}")
- print("<-- osparc-dask-gateway stack stopped.")
-
-
-async def test_deployment(
- dask_gateway_stack_deployed_services,
- dask_gateway_entrypoint: str,
- faker: Faker,
- dask_gateway_password: str,
-):
- gateway = dask_gateway.Gateway(
- address=dask_gateway_entrypoint,
- auth=dask_gateway.BasicAuth(faker.pystr(), dask_gateway_password),
- )
-
- with gateway.new_cluster() as cluster:
- _NUM_WORKERS = 2
- cluster.scale(
- _NUM_WORKERS
- ) # when returning we are in the process of creating the workers
-
- # now wait until we get the workers
- workers = None
- async for attempt in AsyncRetrying(
- reraise=True, wait=wait_fixed(1), stop=stop_after_delay(60)
- ):
- with attempt:
- print(
- f"--> Waiting to have {_NUM_WORKERS} running,"
- f" attempt {attempt.retry_state.attempt_number}...",
- )
- assert "workers" in cluster.scheduler_info
- assert len(cluster.scheduler_info["workers"]) == _NUM_WORKERS
- workers = deepcopy(cluster.scheduler_info["workers"])
- print(
- f"!-- {_NUM_WORKERS} are running,"
- f" [{json.dumps(attempt.retry_state.retry_object.statistics)}]",
- )
-
- # now check all this is stable
- _SECONDS_STABLE = 6
- for n in range(_SECONDS_STABLE):
- # NOTE: the scheduler_info gets auto-udpated by the dask-gateway internals
- assert workers == cluster.scheduler_info["workers"]
- await asyncio.sleep(1)
- print(f"!-- {_NUM_WORKERS} stable for {n} seconds")
-
- # send some work
- def square(x):
- return x**2
-
- def neg(x):
- return -x
-
- with cluster.get_client() as client:
- square_of_2 = client.submit(square, 2)
- assert square_of_2.result(timeout=10) == 4
- assert not square_of_2.exception(timeout=10)
-
- # now send some more stuff just for the fun
- A = client.map(square, range(10))
- B = client.map(neg, A)
-
- total = client.submit(sum, B)
- print("computation completed", total.result(timeout=120))
diff --git a/services/osparc-gateway-server/tests/unit/test_osparc.py b/services/osparc-gateway-server/tests/unit/test_osparc.py
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/services/osparc-gateway-server/tests/unit/test_settings.py b/services/osparc-gateway-server/tests/unit/test_settings.py
deleted file mode 100644
index 37adbcd168b..00000000000
--- a/services/osparc-gateway-server/tests/unit/test_settings.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# pylint: disable=unused-argument
-# pylint: disable=redefined-outer-name
-
-import pytest
-from osparc_gateway_server.backend.settings import AppSettings
-
-
-@pytest.fixture
-def minimal_config(monkeypatch: pytest.MonkeyPatch):
- monkeypatch.setenv("GATEWAY_WORKERS_NETWORK", "atestnetwork")
- monkeypatch.setenv("GATEWAY_SERVER_NAME", "atestserver")
- monkeypatch.setenv("COMPUTATIONAL_SIDECAR_IMAGE", "test/localpytest:latest")
- monkeypatch.setenv(
- "COMPUTATIONAL_SIDECAR_VOLUME_NAME", "sidecar_computational_volume_name"
- )
-
-
-def test_app_settings(minimal_config):
- settings = AppSettings()
- assert settings
diff --git a/services/osparc-gateway-server/tests/unit/test_utils.py b/services/osparc-gateway-server/tests/unit/test_utils.py
deleted file mode 100644
index f512395ee7e..00000000000
--- a/services/osparc-gateway-server/tests/unit/test_utils.py
+++ /dev/null
@@ -1,460 +0,0 @@
-# pylint: disable=unused-argument
-# pylint: disable=unused-variable
-# pylint: disable=redefined-outer-name
-
-import asyncio
-import socket
-from copy import deepcopy
-from pathlib import Path
-from typing import Any, AsyncIterator, Awaitable, Callable
-from unittest import mock
-
-import aiodocker
-import pytest
-from dask_gateway_server.backends.db_base import Cluster, JobStatus
-from faker import Faker
-from osparc_gateway_server.backend.errors import NoHostFoundError
-from osparc_gateway_server.backend.settings import AppSettings
-from osparc_gateway_server.backend.utils import (
- _DASK_KEY_CERT_PATH_IN_SIDECAR,
- DockerSecret,
- create_or_update_secret,
- create_service_config,
- delete_secrets,
- get_cluster_information,
- get_network_id,
- get_next_empty_node_hostname,
- is_service_task_running,
-)
-from pytest_mock.plugin import MockerFixture
-from tenacity.asyncio import AsyncRetrying
-from tenacity.stop import stop_after_delay
-from tenacity.wait import wait_fixed
-
-
-@pytest.fixture
-def minimal_config(monkeypatch):
- monkeypatch.setenv("GATEWAY_WORKERS_NETWORK", "atestnetwork")
- monkeypatch.setenv("GATEWAY_SERVER_NAME", "atestserver")
- monkeypatch.setenv("COMPUTATIONAL_SIDECAR_IMAGE", "test/localpytest:latest")
- monkeypatch.setenv(
- "COMPUTATIONAL_SIDECAR_VOLUME_NAME", "sidecar_computational_volume_name"
- )
-
-
-@pytest.fixture()
-async def create_docker_service(
- docker_swarm, async_docker_client: aiodocker.Docker, faker: Faker
-) -> AsyncIterator[
- Callable[[dict[str, str], dict[str, Any]], Awaitable[dict[str, Any]]]
-]:
- created_services = []
-
- async def _creator(
- labels: dict[str, str], override_task_template: dict[str, Any]
- ) -> dict[str, Any]:
- task_template = {
- "ContainerSpec": {
- "Image": "busybox:latest",
- "Command": ["sleep", "10000"],
- }
- }
- task_template.update(override_task_template)
- service = await async_docker_client.services.create(
- task_template=task_template,
- name=faker.pystr(),
- labels=labels,
- )
- assert service
- created_services.append(service)
- print(f"--> created docker service {service}")
- inspected_service = await async_docker_client.services.inspect(service["ID"])
- print(f"--> service inspected returned {inspected_service}")
- return inspected_service
-
- yield _creator
-
- await asyncio.gather(
- *[async_docker_client.services.delete(s["ID"]) for s in created_services]
- )
-
-
-@pytest.fixture
-def create_running_service(
- async_docker_client: aiodocker.Docker,
- create_docker_service: Callable[
- [dict[str, str], dict[str, Any]], Awaitable[dict[str, Any]]
- ],
-) -> Callable[[dict[str, str]], Awaitable[dict[str, Any]]]:
- async def _creator(labels: dict[str, str]) -> dict[str, Any]:
- service = await create_docker_service(labels, {})
- async for attempt in AsyncRetrying(
- reraise=True, wait=wait_fixed(1), stop=stop_after_delay(60)
- ):
- with attempt:
- tasks = await async_docker_client.tasks.list(
- filters={"service": f"{service['Spec']['Name']}"}
- )
- task_states = [task["Status"]["State"] for task in tasks]
- num_running = sum(current == "running" for current in task_states)
- print(f"--> service task states {task_states=}")
- assert num_running == 1
- print(f"--> service {service['Spec']['Name']} is running now")
- return service
- raise AssertionError(f"service {service=} could not start")
-
- return _creator
-
-
-@pytest.fixture
-def mocked_logger(mocker: MockerFixture) -> mock.MagicMock:
- return mocker.MagicMock()
-
-
-async def test_is_task_running(
- docker_swarm,
- minimal_config,
- async_docker_client: aiodocker.Docker,
- create_running_service: Callable[[dict[str, str]], Awaitable[dict[str, Any]]],
- mocked_logger: mock.MagicMock,
-):
- service = await create_running_service({})
- # this service exists and run
- assert (
- await is_service_task_running(
- async_docker_client, service["Spec"]["Name"], mocked_logger
- )
- == True
- )
-
- # check unknown service raises error
- with pytest.raises(aiodocker.DockerError):
- await is_service_task_running(
- async_docker_client, "unknown_service", mocked_logger
- )
-
-
-async def test_get_network_id(
- docker_swarm,
- async_docker_client: aiodocker.Docker,
- docker_network: Callable[..., Awaitable[dict[str, Any]]],
- mocked_logger: mock.MagicMock,
-):
- # wrong name shall raise
- with pytest.raises(ValueError):
- await get_network_id(async_docker_client, "a_fake_network_name", mocked_logger)
- # create 1 bridge network, shall raise when looking for it
- bridge_network = await docker_network(**{"Driver": "bridge"})
- with pytest.raises(ValueError):
- await get_network_id(async_docker_client, bridge_network["Name"], mocked_logger)
- # create 1 overlay network
- overlay_network = await docker_network()
- network_id = await get_network_id(
- async_docker_client, overlay_network["Name"], mocked_logger
- )
- assert network_id == overlay_network["Id"]
-
- # create a second overlay network with the same name, shall raise on creation, so not possible
- with pytest.raises(aiodocker.exceptions.DockerError):
- await docker_network(**{"Name": overlay_network["Name"]})
- assert (
- True
- ), "If it is possible to have 2 networks with the same name, this must be handled"
-
-
-@pytest.fixture
-async def fake_cluster(faker: Faker) -> Cluster:
- return Cluster(id=faker.uuid4(), name=faker.pystr(), status=JobStatus.CREATED)
-
-
-@pytest.fixture
-async def docker_secret_cleaner(
- async_docker_client: aiodocker.Docker, fake_cluster: Cluster
-) -> AsyncIterator:
- yield
- await delete_secrets(async_docker_client, fake_cluster)
-
-
-async def test_create_service_config(
- docker_swarm,
- async_docker_client: aiodocker.Docker,
- minimal_config: None,
- faker: Faker,
- fake_cluster: Cluster,
- docker_secret_cleaner,
-):
- # let's create some fake service config
- settings = AppSettings() # type: ignore
- service_env = faker.pydict()
- service_name = faker.name()
- network_id = faker.uuid4()
- cmd = faker.pystr()
- fake_labels = faker.pydict()
- fake_placement = {"Constraints": [f"node.hostname=={faker.hostname()}"]}
-
- # create a second one
- secrets = [
- await create_or_update_secret(
- async_docker_client,
- faker.file_path(),
- fake_cluster,
- secret_data=faker.text(),
- )
- for n in range(3)
- ]
-
- assert len(await async_docker_client.secrets.list()) == 3
-
- # we shall have some env that tells the service where the secret is located
- expected_service_env = deepcopy(service_env)
- for s in secrets:
- fake_env_key = faker.pystr()
- service_env[fake_env_key] = s.secret_file_name
- expected_service_env[
- fake_env_key
- ] = f"{_DASK_KEY_CERT_PATH_IN_SIDECAR / Path(s.secret_file_name).name}"
-
- service_parameters = create_service_config(
- settings=settings,
- service_env=service_env,
- service_name=service_name,
- network_id=network_id,
- service_secrets=secrets,
- cmd=cmd,
- labels=fake_labels,
- placement=fake_placement,
- )
- assert service_parameters
- assert service_parameters["name"] == service_name
- assert network_id in service_parameters["networks"]
-
- for env_key, env_value in expected_service_env.items():
- assert env_key in service_parameters["task_template"]["ContainerSpec"]["Env"]
- assert (
- service_parameters["task_template"]["ContainerSpec"]["Env"][env_key]
- == env_value
- )
- assert service_parameters["task_template"]["ContainerSpec"]["Command"] == cmd
- assert service_parameters["labels"] == fake_labels
- assert len(service_parameters["task_template"]["ContainerSpec"]["Secrets"]) == 3
- for service_secret, original_secret in zip(
- service_parameters["task_template"]["ContainerSpec"]["Secrets"], secrets
- ):
- assert service_secret["SecretName"] == original_secret.secret_name
- assert service_secret["SecretID"] == original_secret.secret_id
- assert (
- service_secret["File"]["Name"]
- == f"{_DASK_KEY_CERT_PATH_IN_SIDECAR / Path(original_secret.secret_file_name).name}"
- )
- assert service_parameters["task_template"]["Placement"] == fake_placement
-
-
-@pytest.fixture
-def fake_secret_file(tmp_path) -> Path:
- fake_secret_file = Path(tmp_path / "fake_file")
- fake_secret_file.write_text("Hello I am a secret file")
- assert fake_secret_file.exists()
- return fake_secret_file
-
-
-async def test_create_or_update_docker_secrets_with_invalid_call_raises(
- docker_swarm,
- async_docker_client: aiodocker.Docker,
- fake_cluster: Cluster,
- faker: Faker,
- docker_secret_cleaner,
-):
- with pytest.raises(ValueError):
- await create_or_update_secret(
- async_docker_client,
- faker.file_path(),
- fake_cluster,
- )
-
-
-async def test_create_or_update_docker_secrets(
- docker_swarm,
- async_docker_client: aiodocker.Docker,
- fake_secret_file: Path,
- fake_cluster: Cluster,
- faker: Faker,
- docker_secret_cleaner,
-):
- list_of_secrets = await async_docker_client.secrets.list(
- filters={"label": f"cluster_id={fake_cluster.id}"}
- )
- assert len(list_of_secrets) == 0
- file_original_size = fake_secret_file.stat().st_size
- # check secret creation
- secret_target_file_name = faker.file_path()
- created_secret: DockerSecret = await create_or_update_secret(
- async_docker_client,
- secret_target_file_name,
- fake_cluster,
- file_path=fake_secret_file,
- )
- list_of_secrets = await async_docker_client.secrets.list(
- filters={"label": f"cluster_id={fake_cluster.id}"}
- )
- assert len(list_of_secrets) == 1
- secret = list_of_secrets[0]
- assert created_secret.secret_id == secret["ID"]
- inspected_secret = await async_docker_client.secrets.inspect(secret["ID"])
-
- assert created_secret.secret_name == inspected_secret["Spec"]["Name"]
- assert "cluster_id" in inspected_secret["Spec"]["Labels"]
- assert inspected_secret["Spec"]["Labels"]["cluster_id"] == fake_cluster.id
- assert "cluster_name" in inspected_secret["Spec"]["Labels"]
- assert inspected_secret["Spec"]["Labels"]["cluster_name"] == fake_cluster.name
-
- # check update of secret
- fake_secret_file.write_text("some additional stuff in the file")
- assert fake_secret_file.stat().st_size != file_original_size
-
- updated_secret: DockerSecret = await create_or_update_secret(
- async_docker_client,
- secret_target_file_name,
- fake_cluster,
- file_path=fake_secret_file,
- )
- assert updated_secret.secret_id != created_secret.secret_id
- secrets = await async_docker_client.secrets.list(
- filters={"label": f"cluster_id={fake_cluster.id}"}
- )
- assert len(secrets) == 1
- updated_secret = secrets[0]
- assert updated_secret != created_secret
-
- # create a second one
- secret_target_file_name2 = faker.file_path()
- created_secret: DockerSecret = await create_or_update_secret(
- async_docker_client,
- secret_target_file_name2,
- fake_cluster,
- secret_data=faker.text(),
- )
- secrets = await async_docker_client.secrets.list(
- filters={"label": f"cluster_id={fake_cluster.id}"}
- )
- assert len(secrets) == 2
-
- # test deletion
- await delete_secrets(async_docker_client, fake_cluster)
- secrets = await async_docker_client.secrets.list(
- filters={"label": f"cluster_id={fake_cluster.id}"}
- )
- assert len(secrets) == 0
-
-
-async def test_get_cluster_information(
- docker_swarm,
- async_docker_client: aiodocker.Docker,
-):
- cluster_information = await get_cluster_information(async_docker_client)
- assert cluster_information
-
- # in testing we do have 1 machine, that is... this very host
- assert len(cluster_information) == 1
- assert socket.gethostname() in cluster_information
-
-
-@pytest.fixture()
-def fake_docker_nodes(faker: Faker) -> list[dict[str, Any]]:
- return [
- {"ID": f"{faker.uuid4()}", "Description": {"Hostname": f"{faker.hostname()}"}},
- {"ID": f"{faker.uuid4()}", "Description": {"Hostname": f"{faker.hostname()}"}},
- {"ID": f"{faker.uuid4()}", "Description": {"Hostname": f"{faker.hostname()}"}},
- ]
-
-
-@pytest.fixture()
-def mocked_docker_nodes(mocker: MockerFixture, fake_docker_nodes):
- mocked_aiodocker_nodes = mocker.patch(
- "osparc_gateway_server.backend.utils.aiodocker.nodes.DockerSwarmNodes.list",
- autospec=True,
- return_value=fake_docker_nodes,
- )
-
-
-async def test_get_empty_node_hostname_rotates_host_names(
- fake_docker_nodes: list[dict[str, Any]],
- mocked_docker_nodes,
- docker_swarm,
- async_docker_client: aiodocker.Docker,
- fake_cluster: Cluster,
-):
- available_hostnames = [
- node["Description"]["Hostname"] for node in fake_docker_nodes
- ]
- num_nodes = len(fake_docker_nodes)
- for n in range(num_nodes):
- hostname = await get_next_empty_node_hostname(async_docker_client, fake_cluster)
- assert hostname in available_hostnames
- available_hostnames.pop(available_hostnames.index(hostname))
- # let's do it a second time, since it should again go over all the hosts
- available_hostnames = [
- node["Description"]["Hostname"] for node in fake_docker_nodes
- ]
- for n in range(num_nodes):
- hostname = await get_next_empty_node_hostname(async_docker_client, fake_cluster)
- assert hostname in available_hostnames
- available_hostnames.pop(available_hostnames.index(hostname))
-
-
-async def test_get_empty_node_hostname_correctly_checks_services_labels(
- docker_swarm: None,
- async_docker_client: aiodocker.Docker,
- fake_cluster: Cluster,
- create_running_service: Callable[[dict[str, str]], Awaitable[dict[str, Any]]],
-):
- hostname = await get_next_empty_node_hostname(async_docker_client, fake_cluster)
- assert socket.gethostname() == hostname
-
- # only services with the required labels shall be used to find if a service is already on a machine
- invalid_labels = [
- # no labels
- {},
- # only one of the required label
- {
- "cluster_id": fake_cluster.id,
- },
- # only one of the required label
- {"type": "worker"},
- ]
- await asyncio.gather(*[create_running_service(l) for l in invalid_labels])
- # these services have not the correct labels, so the host is still available
- hostname = await get_next_empty_node_hostname(async_docker_client, fake_cluster)
- assert socket.gethostname() == hostname
-
-
-async def test_get_empty_node_hostname_raises_no_host_found_if_a_service_is_already_running(
- docker_swarm: None,
- async_docker_client: aiodocker.Docker,
- fake_cluster: Cluster,
- create_running_service: Callable[[dict[str, str]], Awaitable[dict[str, Any]]],
-):
- # now create a service with the required labels
- required_labels = {"cluster_id": fake_cluster.id, "type": "worker"}
- await create_running_service(required_labels)
- with pytest.raises(NoHostFoundError):
- await get_next_empty_node_hostname(async_docker_client, fake_cluster)
-
-
-async def test_get_empty_node_hostname_returns_constraint_if_available(
- docker_swarm: None,
- async_docker_client: aiodocker.Docker,
- fake_cluster: Cluster,
- create_docker_service: Callable[
- [dict[str, str], dict[str, Any]], Awaitable[dict[str, Any]]
- ],
-):
- # now create a service with the required labels but that is pending
- required_labels = {"cluster_id": fake_cluster.id, "type": "worker"}
- await create_docker_service(
- required_labels,
- {
- "Placement": {"Constraints": ["node.hostname==pytest"]},
- "Resources": {"Reservations": {"NanoCPUs": int(500 * 10e9)}},
- },
- )
- await get_next_empty_node_hostname(async_docker_client, fake_cluster)
diff --git a/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml b/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml
index 84951101670..49278e0f128 100644
--- a/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml
+++ b/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml
@@ -2347,155 +2347,6 @@ paths:
application/json:
schema:
$ref: '#/components/schemas/Envelope_CatalogServiceGet_'
- /v0/clusters:
- get:
- tags:
- - clusters
- summary: List Clusters
- operationId: list_clusters
- responses:
- '200':
- description: Successful Response
- content:
- application/json:
- schema:
- $ref: '#/components/schemas/Envelope_list_ClusterGet__'
- post:
- tags:
- - clusters
- summary: Create Cluster
- operationId: create_cluster
- requestBody:
- content:
- application/json:
- schema:
- $ref: '#/components/schemas/ClusterCreate'
- required: true
- responses:
- '201':
- description: Successful Response
- content:
- application/json:
- schema:
- $ref: '#/components/schemas/Envelope_ClusterGet_'
- /v0/clusters:ping:
- post:
- tags:
- - clusters
- summary: Ping Cluster
- description: Test connectivity with cluster
- operationId: ping_cluster
- requestBody:
- content:
- application/json:
- schema:
- $ref: '#/components/schemas/ClusterPing'
- required: true
- responses:
- '204':
- description: Successful Response
- /v0/clusters/{cluster_id}:
- get:
- tags:
- - clusters
- summary: Get Cluster
- operationId: get_cluster
- parameters:
- - name: cluster_id
- in: path
- required: true
- schema:
- type: integer
- minimum: 0
- title: Cluster Id
- responses:
- '200':
- description: Successful Response
- content:
- application/json:
- schema:
- $ref: '#/components/schemas/Envelope_ClusterGet_'
- patch:
- tags:
- - clusters
- summary: Update Cluster
- operationId: update_cluster
- parameters:
- - name: cluster_id
- in: path
- required: true
- schema:
- type: integer
- minimum: 0
- title: Cluster Id
- requestBody:
- required: true
- content:
- application/json:
- schema:
- $ref: '#/components/schemas/ClusterPatch'
- responses:
- '200':
- description: Successful Response
- content:
- application/json:
- schema:
- $ref: '#/components/schemas/Envelope_ClusterGet_'
- delete:
- tags:
- - clusters
- summary: Delete Cluster
- operationId: delete_cluster
- parameters:
- - name: cluster_id
- in: path
- required: true
- schema:
- type: integer
- minimum: 0
- title: Cluster Id
- responses:
- '204':
- description: Successful Response
- /v0/clusters/{cluster_id}/details:
- get:
- tags:
- - clusters
- summary: Get Cluster Details
- operationId: get_cluster_details
- parameters:
- - name: cluster_id
- in: path
- required: true
- schema:
- type: integer
- minimum: 0
- title: Cluster Id
- responses:
- '200':
- description: Successful Response
- content:
- application/json:
- schema:
- $ref: '#/components/schemas/Envelope_ClusterDetails_'
- /v0/clusters/{cluster_id}:ping:
- post:
- tags:
- - clusters
- summary: Ping Cluster Cluster Id
- description: Tests connectivity with cluster
- operationId: ping_cluster_cluster_id
- parameters:
- - name: cluster_id
- in: path
- required: true
- schema:
- type: integer
- minimum: 0
- title: Cluster Id
- responses:
- '204':
- description: Successful Response
/v0/computations/{project_id}:
get:
tags:
@@ -2517,7 +2368,7 @@ paths:
content:
application/json:
schema:
- $ref: '#/components/schemas/Envelope_ComputationTaskGet_'
+ $ref: '#/components/schemas/Envelope_ComputationGet_'
/v0/computations/{project_id}:start:
post:
tags:
@@ -4593,7 +4444,7 @@ paths:
'403':
description: ProjectInvalidRightsError
'404':
- description: ProjectNotFoundError, UserDefaultWalletNotFoundError
+ description: UserDefaultWalletNotFoundError, ProjectNotFoundError
'409':
description: ProjectTooManyProjectOpenedError
'422':
@@ -7242,268 +7093,6 @@ components:
required:
- tag
title: CheckpointNew
- ClusterAccessRights:
- properties:
- read:
- type: boolean
- title: Read
- description: allows to run pipelines on that cluster
- write:
- type: boolean
- title: Write
- description: allows to modify the cluster
- delete:
- type: boolean
- title: Delete
- description: allows to delete a cluster
- additionalProperties: false
- type: object
- required:
- - read
- - write
- - delete
- title: ClusterAccessRights
- ClusterCreate:
- properties:
- name:
- type: string
- title: Name
- description: The human readable name of the cluster
- description:
- anyOf:
- - type: string
- - type: 'null'
- title: Description
- type:
- $ref: '#/components/schemas/ClusterTypeInModel'
- owner:
- anyOf:
- - type: integer
- exclusiveMinimum: true
- minimum: 0
- - type: 'null'
- title: Owner
- thumbnail:
- anyOf:
- - type: string
- maxLength: 2083
- minLength: 1
- format: uri
- - type: 'null'
- title: Thumbnail
- description: url to the image describing this cluster
- endpoint:
- type: string
- minLength: 1
- format: uri
- title: Endpoint
- authentication:
- oneOf:
- - $ref: '#/components/schemas/SimpleAuthentication'
- - $ref: '#/components/schemas/KerberosAuthentication'
- - $ref: '#/components/schemas/JupyterHubTokenAuthentication'
- title: Authentication
- discriminator:
- propertyName: type
- mapping:
- jupyterhub: '#/components/schemas/JupyterHubTokenAuthentication'
- kerberos: '#/components/schemas/KerberosAuthentication'
- simple: '#/components/schemas/SimpleAuthentication'
- accessRights:
- additionalProperties:
- $ref: '#/components/schemas/ClusterAccessRights'
- type: object
- title: Accessrights
- type: object
- required:
- - name
- - type
- - endpoint
- - authentication
- title: ClusterCreate
- ClusterDetails:
- properties:
- scheduler:
- $ref: '#/components/schemas/Scheduler'
- description: This contains dask scheduler information given by the underlying
- dask library
- dashboardLink:
- type: string
- minLength: 1
- format: uri
- title: Dashboardlink
- description: Link to this scheduler's dashboard
- type: object
- required:
- - scheduler
- - dashboardLink
- title: ClusterDetails
- ClusterGet:
- properties:
- name:
- type: string
- title: Name
- description: The human readable name of the cluster
- description:
- anyOf:
- - type: string
- - type: 'null'
- title: Description
- type:
- $ref: '#/components/schemas/ClusterTypeInModel'
- owner:
- type: integer
- exclusiveMinimum: true
- title: Owner
- minimum: 0
- thumbnail:
- anyOf:
- - type: string
- maxLength: 2083
- minLength: 1
- format: uri
- - type: 'null'
- title: Thumbnail
- description: url to the image describing this cluster
- endpoint:
- type: string
- minLength: 1
- format: uri
- title: Endpoint
- authentication:
- oneOf:
- - $ref: '#/components/schemas/SimpleAuthentication'
- - $ref: '#/components/schemas/KerberosAuthentication'
- - $ref: '#/components/schemas/JupyterHubTokenAuthentication'
- - $ref: '#/components/schemas/NoAuthentication'
- - $ref: '#/components/schemas/TLSAuthentication'
- title: Authentication
- description: Dask gateway authentication
- discriminator:
- propertyName: type
- mapping:
- jupyterhub: '#/components/schemas/JupyterHubTokenAuthentication'
- kerberos: '#/components/schemas/KerberosAuthentication'
- none: '#/components/schemas/NoAuthentication'
- simple: '#/components/schemas/SimpleAuthentication'
- tls: '#/components/schemas/TLSAuthentication'
- accessRights:
- additionalProperties:
- $ref: '#/components/schemas/ClusterAccessRights'
- type: object
- title: Accessrights
- default: {}
- id:
- type: integer
- minimum: 0
- title: Id
- description: The cluster ID
- type: object
- required:
- - name
- - type
- - owner
- - endpoint
- - authentication
- - id
- title: ClusterGet
- ClusterPatch:
- properties:
- name:
- anyOf:
- - type: string
- - type: 'null'
- title: Name
- description:
- anyOf:
- - type: string
- - type: 'null'
- title: Description
- type:
- anyOf:
- - $ref: '#/components/schemas/ClusterTypeInModel'
- - type: 'null'
- owner:
- anyOf:
- - type: integer
- exclusiveMinimum: true
- minimum: 0
- - type: 'null'
- title: Owner
- thumbnail:
- anyOf:
- - type: string
- maxLength: 2083
- minLength: 1
- format: uri
- - type: 'null'
- title: Thumbnail
- endpoint:
- anyOf:
- - type: string
- minLength: 1
- format: uri
- - type: 'null'
- title: Endpoint
- authentication:
- anyOf:
- - oneOf:
- - $ref: '#/components/schemas/SimpleAuthentication'
- - $ref: '#/components/schemas/KerberosAuthentication'
- - $ref: '#/components/schemas/JupyterHubTokenAuthentication'
- discriminator:
- propertyName: type
- mapping:
- jupyterhub: '#/components/schemas/JupyterHubTokenAuthentication'
- kerberos: '#/components/schemas/KerberosAuthentication'
- simple: '#/components/schemas/SimpleAuthentication'
- - type: 'null'
- title: Authentication
- accessRights:
- anyOf:
- - additionalProperties:
- $ref: '#/components/schemas/ClusterAccessRights'
- type: object
- - type: 'null'
- title: Accessrights
- type: object
- title: ClusterPatch
- ClusterPing:
- properties:
- endpoint:
- type: string
- minLength: 1
- format: uri
- title: Endpoint
- authentication:
- oneOf:
- - $ref: '#/components/schemas/SimpleAuthentication'
- - $ref: '#/components/schemas/KerberosAuthentication'
- - $ref: '#/components/schemas/JupyterHubTokenAuthentication'
- - $ref: '#/components/schemas/NoAuthentication'
- - $ref: '#/components/schemas/TLSAuthentication'
- title: Authentication
- description: Dask gateway authentication
- discriminator:
- propertyName: type
- mapping:
- jupyterhub: '#/components/schemas/JupyterHubTokenAuthentication'
- kerberos: '#/components/schemas/KerberosAuthentication'
- none: '#/components/schemas/NoAuthentication'
- simple: '#/components/schemas/SimpleAuthentication'
- tls: '#/components/schemas/TLSAuthentication'
- type: object
- required:
- - endpoint
- - authentication
- title: ClusterPing
- ClusterTypeInModel:
- type: string
- enum:
- - AWS
- - ON_PREMISE
- - ON_DEMAND
- title: ClusterTypeInModel
CodePageParams:
properties:
message:
@@ -7552,17 +7141,89 @@ components:
required:
- version
title: CompatibleService
+ ComputationGet:
+ properties:
+ id:
+ type: string
+ format: uuid
+ title: Id
+ description: the id of the computation task
+ state:
+ $ref: '#/components/schemas/RunningState'
+ description: the state of the computational task
+ result:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Result
+ description: the result of the computational task
+ pipeline_details:
+ $ref: '#/components/schemas/PipelineDetails'
+ description: the details of the generated pipeline
+ iteration:
+ anyOf:
+ - type: integer
+ exclusiveMinimum: true
+ minimum: 0
+ - type: 'null'
+ title: Iteration
+ description: the iteration id of the computation task (none if no task ran
+ yet)
+ started:
+ anyOf:
+ - type: string
+ format: date-time
+ - type: 'null'
+ title: Started
+ description: the timestamp when the computation was started or None if not
+ started yet
+ stopped:
+ anyOf:
+ - type: string
+ format: date-time
+ - type: 'null'
+ title: Stopped
+ description: the timestamp when the computation was stopped or None if not
+ started nor stopped yet
+ submitted:
+ anyOf:
+ - type: string
+ format: date-time
+ - type: 'null'
+ title: Submitted
+ description: task last modification timestamp or None if the there is no
+ task
+ url:
+ type: string
+ minLength: 1
+ format: uri
+ title: Url
+ description: the link where to get the status of the task
+ stop_url:
+ anyOf:
+ - type: string
+ minLength: 1
+ format: uri
+ - type: 'null'
+ title: Stop Url
+ description: the link where to stop the task
+ type: object
+ required:
+ - id
+ - state
+ - pipeline_details
+ - iteration
+ - started
+ - stopped
+ - submitted
+ - url
+ title: ComputationGet
ComputationStart:
properties:
force_restart:
type: boolean
title: Force Restart
default: false
- cluster_id:
- type: integer
- minimum: 0
- title: Cluster Id
- default: 0
subgraph:
items:
type: string
@@ -7572,18 +7233,6 @@ components:
default: []
type: object
title: ComputationStart
- ComputationTaskGet:
- properties:
- cluster_id:
- anyOf:
- - type: integer
- minimum: 0
- - type: 'null'
- title: Cluster Id
- type: object
- required:
- - cluster_id
- title: ComputationTaskGet
ConnectServiceToPricingPlanBodyParams:
properties:
serviceKey:
@@ -7757,13 +7406,6 @@ components:
example:
dataset_id: N:id-aaaa
display_name: simcore-testing
- DictModel_str_Annotated_float__Gt__:
- additionalProperties:
- type: number
- exclusiveMinimum: true
- minimum: 0.0
- type: object
- title: DictModel[str, Annotated[float, Gt]]
DownloadLink:
properties:
downloadLink:
@@ -7860,37 +7502,11 @@ components:
title: Error
type: object
title: Envelope[CheckpointApiModel]
- Envelope_ClusterDetails_:
- properties:
- data:
- anyOf:
- - $ref: '#/components/schemas/ClusterDetails'
- - type: 'null'
- error:
- anyOf:
- - {}
- - type: 'null'
- title: Error
- type: object
- title: Envelope[ClusterDetails]
- Envelope_ClusterGet_:
- properties:
- data:
- anyOf:
- - $ref: '#/components/schemas/ClusterGet'
- - type: 'null'
- error:
- anyOf:
- - {}
- - type: 'null'
- title: Error
- type: object
- title: Envelope[ClusterGet]
- Envelope_ComputationTaskGet_:
+ Envelope_ComputationGet_:
properties:
data:
anyOf:
- - $ref: '#/components/schemas/ComputationTaskGet'
+ - $ref: '#/components/schemas/ComputationGet'
- type: 'null'
error:
anyOf:
@@ -7898,7 +7514,7 @@ components:
- type: 'null'
title: Error
type: object
- title: Envelope[ComputationTaskGet]
+ title: Envelope[ComputationGet]
Envelope_FileMetaDataGet_:
properties:
data:
@@ -8803,22 +8419,6 @@ components:
title: Error
type: object
title: Envelope[list[Announcement]]
- Envelope_list_ClusterGet__:
- properties:
- data:
- anyOf:
- - items:
- $ref: '#/components/schemas/ClusterGet'
- type: array
- - type: 'null'
- title: Data
- error:
- anyOf:
- - {}
- - type: 'null'
- title: Error
- type: object
- title: Envelope[list[ClusterGet]]
Envelope_list_DatasetMetaData__:
properties:
data:
@@ -10278,35 +9878,6 @@ components:
additionalProperties: false
type: object
title: InvitationInfo
- JupyterHubTokenAuthentication:
- properties:
- type:
- type: string
- enum:
- - jupyterhub
- const: jupyterhub
- title: Type
- default: jupyterhub
- api_token:
- type: string
- title: Api Token
- additionalProperties: false
- type: object
- required:
- - api_token
- title: JupyterHubTokenAuthentication
- KerberosAuthentication:
- properties:
- type:
- type: string
- enum:
- - kerberos
- const: kerberos
- title: Type
- default: kerberos
- additionalProperties: false
- type: object
- title: KerberosAuthentication
Limits:
properties:
cpus:
@@ -10505,18 +10076,6 @@ components:
description: Some foundation
gid: '16'
label: Blue Fundation
- NoAuthentication:
- properties:
- type:
- type: string
- enum:
- - none
- const: none
- title: Type
- default: none
- additionalProperties: false
- type: object
- title: NoAuthentication
Node-Input:
properties:
key:
@@ -11518,6 +11077,39 @@ components:
- phone
- code
title: PhoneConfirmationBody
+ PipelineDetails:
+ properties:
+ adjacency_list:
+ additionalProperties:
+ items:
+ type: string
+ format: uuid
+ type: array
+ type: object
+ title: Adjacency List
+ description: 'The adjacency list of the current pipeline in terms of {NodeID:
+ [successor NodeID]}'
+ progress:
+ anyOf:
+ - type: number
+ maximum: 1.0
+ minimum: 0.0
+ - type: 'null'
+ title: Progress
+ description: the progress of the pipeline (None if there are no computational
+ tasks)
+ node_states:
+ additionalProperties:
+ $ref: '#/components/schemas/NodeState'
+ type: object
+ title: Node States
+ description: The states of each of the computational nodes in the pipeline
+ type: object
+ required:
+ - adjacency_list
+ - progress
+ - node_states
+ title: PipelineDetails
PortLink:
properties:
nodeUuid:
@@ -12930,23 +12522,6 @@ components:
SEE StateType for task state'
- Scheduler:
- properties:
- status:
- type: string
- title: Status
- description: The running status of the scheduler
- workers:
- anyOf:
- - additionalProperties:
- $ref: '#/components/schemas/Worker'
- type: object
- - type: 'null'
- title: Workers
- type: object
- required:
- - status
- title: Scheduler
SelectBox:
properties:
structure:
@@ -13472,29 +13047,6 @@ components:
- path
title: SimCoreFileLink
description: I/O port type to hold a link to a file in simcore S3 storage
- SimpleAuthentication:
- properties:
- type:
- type: string
- enum:
- - simple
- const: simple
- title: Type
- default: simple
- username:
- type: string
- title: Username
- password:
- type: string
- format: password
- title: Password
- writeOnly: true
- additionalProperties: false
- type: object
- required:
- - username
- - password
- title: SimpleAuthentication
Slideshow:
properties:
position:
@@ -13663,34 +13215,6 @@ components:
additionalProperties: true
type: object
title: StudyUI
- TLSAuthentication:
- properties:
- type:
- type: string
- enum:
- - tls
- const: tls
- title: Type
- default: tls
- tls_ca_file:
- type: string
- format: path
- title: Tls Ca File
- tls_client_cert:
- type: string
- format: path
- title: Tls Client Cert
- tls_client_key:
- type: string
- format: path
- title: Tls Client Key
- additionalProperties: false
- type: object
- required:
- - tls_ca_file
- - tls_client_cert
- - tls_client_key
- title: TLSAuthentication
TableSynchronisation:
properties:
dry_run:
@@ -13855,22 +13379,6 @@ components:
title: Priority
type: object
title: TagUpdate
- TaskCounts:
- properties:
- error:
- type: integer
- title: Error
- default: 0
- memory:
- type: integer
- title: Memory
- default: 0
- executing:
- type: integer
- title: Executing
- default: 0
- type: object
- title: TaskCounts
TaskGet:
properties:
task_id:
@@ -14220,12 +13728,6 @@ components:
- number
- e_tag
title: UploadedPart
- UsedResources:
- additionalProperties:
- type: number
- minimum: 0.0
- type: object
- title: UsedResources
UserNotification:
properties:
user_id:
@@ -14716,58 +14218,6 @@ components:
- url
- checkpoint_url
title: WorkbenchViewApiModel
- Worker:
- properties:
- id:
- type: string
- title: Id
- name:
- type: string
- title: Name
- resources:
- $ref: '#/components/schemas/DictModel_str_Annotated_float__Gt__'
- used_resources:
- $ref: '#/components/schemas/UsedResources'
- memory_limit:
- type: integer
- minimum: 0
- title: Memory Limit
- metrics:
- $ref: '#/components/schemas/WorkerMetrics'
- type: object
- required:
- - id
- - name
- - resources
- - used_resources
- - memory_limit
- - metrics
- title: Worker
- WorkerMetrics:
- properties:
- cpu:
- type: number
- title: Cpu
- description: consumed % of cpus
- memory:
- type: integer
- minimum: 0
- title: Memory
- description: consumed memory
- num_fds:
- type: integer
- title: Num Fds
- description: consumed file descriptors
- task_counts:
- $ref: '#/components/schemas/TaskCounts'
- description: task details
- type: object
- required:
- - cpu
- - memory
- - num_fds
- - task_counts
- title: WorkerMetrics
WorkspaceCreateBodyParams:
properties:
name:
diff --git a/services/web/server/src/simcore_service_webserver/application.py b/services/web/server/src/simcore_service_webserver/application.py
index 9e6e4f393d6..79477051ddb 100644
--- a/services/web/server/src/simcore_service_webserver/application.py
+++ b/services/web/server/src/simcore_service_webserver/application.py
@@ -1,6 +1,7 @@
""" Main application
"""
+
import logging
from pprint import pformat
from typing import Any
@@ -14,7 +15,6 @@
from .api_keys.plugin import setup_api_keys
from .application_settings import get_application_settings, setup_settings
from .catalog.plugin import setup_catalog
-from .clusters.plugin import setup_clusters
from .db.plugin import setup_db
from .db_listener.plugin import setup_db_listener
from .diagnostics.plugin import setup_diagnostics, setup_profiling_middleware
@@ -147,7 +147,6 @@ def create_application() -> web.Application:
setup_publications(app)
setup_studies_dispatcher(app)
setup_exporter(app)
- setup_clusters(app)
# NOTE: *last* events
app.on_startup.append(_welcome_banner)
diff --git a/services/web/server/src/simcore_service_webserver/application_settings.py b/services/web/server/src/simcore_service_webserver/application_settings.py
index e5aa008377a..ed4e519141b 100644
--- a/services/web/server/src/simcore_service_webserver/application_settings.py
+++ b/services/web/server/src/simcore_service_webserver/application_settings.py
@@ -268,7 +268,6 @@ class ApplicationSettings(BaseCustomSettings, MixinLoggingSettings):
# These plugins only require (for the moment) an entry to toggle between enabled/disabled
WEBSERVER_ANNOUNCEMENTS: bool = False
WEBSERVER_API_KEYS: bool = True
- WEBSERVER_CLUSTERS: bool = False
WEBSERVER_DB_LISTENER: bool = True
WEBSERVER_FOLDERS: bool = True
WEBSERVER_GROUPS: bool = True
@@ -370,7 +369,6 @@ def _get_disabled_public_plugins(self) -> list[str]:
# TODO: more reliable definition of a "plugin" and whether it can be advertised or not
# (extra var? e.g. Field( ... , x_advertise_plugin=True))
public_plugin_candidates: Final = {
- "WEBSERVER_CLUSTERS",
"WEBSERVER_EXPORTER",
"WEBSERVER_FOLDERS",
"WEBSERVER_META_MODELING",
diff --git a/services/web/server/src/simcore_service_webserver/application_settings_utils.py b/services/web/server/src/simcore_service_webserver/application_settings_utils.py
index 9843e84afdd..162a927e0ad 100644
--- a/services/web/server/src/simcore_service_webserver/application_settings_utils.py
+++ b/services/web/server/src/simcore_service_webserver/application_settings_utils.py
@@ -157,7 +157,6 @@ def convert_to_app_config(app_settings: ApplicationSettings) -> dict[str, Any]:
app_settings.WEBSERVER_ACTIVITY, "PROMETHEUS_VTAG", None
),
},
- "clusters": {"enabled": app_settings.WEBSERVER_CLUSTERS},
"computation": {"enabled": app_settings.is_enabled("WEBSERVER_NOTIFICATIONS")},
"diagnostics": {"enabled": app_settings.is_enabled("WEBSERVER_DIAGNOSTICS")},
"director-v2": {"enabled": app_settings.is_enabled("WEBSERVER_DIRECTOR_V2")},
@@ -310,7 +309,6 @@ def _set_if_disabled(field_name, section):
_set_if_disabled("WEBSERVER_STATICWEB", section)
for settings_name in (
- "WEBSERVER_CLUSTERS",
"WEBSERVER_GARBAGE_COLLECTOR",
"WEBSERVER_GROUPS",
"WEBSERVER_META_MODELING",
diff --git a/services/web/server/src/simcore_service_webserver/clusters/__init__.py b/services/web/server/src/simcore_service_webserver/clusters/__init__.py
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/services/web/server/src/simcore_service_webserver/clusters/_handlers.py b/services/web/server/src/simcore_service_webserver/clusters/_handlers.py
deleted file mode 100644
index 0df3dd792a2..00000000000
--- a/services/web/server/src/simcore_service_webserver/clusters/_handlers.py
+++ /dev/null
@@ -1,200 +0,0 @@
-import functools
-import logging
-
-from aiohttp import web
-from models_library.api_schemas_webserver.clusters import (
- ClusterCreate,
- ClusterDetails,
- ClusterGet,
- ClusterPatch,
- ClusterPathParams,
- ClusterPing,
-)
-from pydantic import TypeAdapter
-from servicelib.aiohttp import status
-from servicelib.aiohttp.requests_validation import (
- parse_request_body_as,
- parse_request_path_parameters_as,
-)
-from servicelib.aiohttp.typing_extension import Handler
-
-from .._meta import api_version_prefix
-from ..director_v2 import api as director_v2_api
-from ..director_v2.exceptions import (
- ClusterAccessForbidden,
- ClusterNotFoundError,
- ClusterPingError,
- DirectorServiceError,
-)
-from ..login.decorators import login_required
-from ..models import RequestContext
-from ..security.decorators import permission_required
-from ..utils_aiohttp import envelope_json_response
-
-_logger = logging.getLogger(__name__)
-
-
-def _handle_cluster_exceptions(handler: Handler):
- # maps API exceptions to HTTP errors
- @functools.wraps(handler)
- async def wrapper(request: web.Request) -> web.StreamResponse:
- try:
- return await handler(request)
-
- except ClusterPingError as exc:
- raise web.HTTPUnprocessableEntity(reason=f"{exc}") from exc
-
- except ClusterNotFoundError as exc:
- raise web.HTTPNotFound(reason=f"{exc}") from exc
-
- except ClusterAccessForbidden as exc:
- raise web.HTTPForbidden(reason=f"{exc}") from exc
-
- except DirectorServiceError as exc:
- raise web.HTTPServiceUnavailable(reason=f"{exc}") from exc
-
- return wrapper
-
-
-#
-# API handlers
-#
-
-routes = web.RouteTableDef()
-
-
-@routes.post(f"/{api_version_prefix}/clusters", name="create_cluster")
-@login_required
-@permission_required("clusters.create")
-@_handle_cluster_exceptions
-async def create_cluster(request: web.Request) -> web.Response:
- req_ctx = RequestContext.model_validate(request)
- new_cluster = await parse_request_body_as(ClusterCreate, request)
-
- created_cluster = await director_v2_api.create_cluster(
- app=request.app,
- user_id=req_ctx.user_id,
- new_cluster=new_cluster,
- )
- return envelope_json_response(created_cluster, web.HTTPCreated)
-
-
-@routes.get(f"/{api_version_prefix}/clusters", name="list_clusters")
-@login_required
-@permission_required("clusters.read")
-@_handle_cluster_exceptions
-async def list_clusters(request: web.Request) -> web.Response:
- req_ctx = RequestContext.model_validate(request)
-
- clusters = await director_v2_api.list_clusters(
- app=request.app,
- user_id=req_ctx.user_id,
- )
- assert TypeAdapter(list[ClusterGet]).validate_python(clusters) is not None # nosec
- return envelope_json_response(clusters)
-
-
-@routes.get(f"/{api_version_prefix}/clusters/{{cluster_id}}", name="get_cluster")
-@login_required
-@permission_required("clusters.read")
-@_handle_cluster_exceptions
-async def get_cluster(request: web.Request) -> web.Response:
- req_ctx = RequestContext.model_validate(request)
- path_params = parse_request_path_parameters_as(ClusterPathParams, request)
-
- cluster = await director_v2_api.get_cluster(
- app=request.app,
- user_id=req_ctx.user_id,
- cluster_id=path_params.cluster_id,
- )
- assert ClusterGet.model_validate(cluster) is not None # nosec
- return envelope_json_response(cluster)
-
-
-@routes.patch(f"/{api_version_prefix}/clusters/{{cluster_id}}", name="update_cluster")
-@login_required
-@permission_required("clusters.write")
-@_handle_cluster_exceptions
-async def update_cluster(request: web.Request) -> web.Response:
- req_ctx = RequestContext.model_validate(request)
- path_params = parse_request_path_parameters_as(ClusterPathParams, request)
- cluster_patch = await parse_request_body_as(ClusterPatch, request)
-
- updated_cluster = await director_v2_api.update_cluster(
- app=request.app,
- user_id=req_ctx.user_id,
- cluster_id=path_params.cluster_id,
- cluster_patch=cluster_patch,
- )
-
- assert ClusterGet.model_validate(updated_cluster) is not None # nosec
- return envelope_json_response(updated_cluster)
-
-
-@routes.delete(f"/{api_version_prefix}/clusters/{{cluster_id}}", name="delete_cluster")
-@login_required
-@permission_required("clusters.delete")
-@_handle_cluster_exceptions
-async def delete_cluster(request: web.Request) -> web.Response:
- req_ctx = RequestContext.model_validate(request)
- path_params = parse_request_path_parameters_as(ClusterPathParams, request)
-
- await director_v2_api.delete_cluster(
- app=request.app,
- user_id=req_ctx.user_id,
- cluster_id=path_params.cluster_id,
- )
- return web.json_response(status=status.HTTP_204_NO_CONTENT)
-
-
-@routes.get(
- f"/{api_version_prefix}/clusters/{{cluster_id}}/details",
- name="get_cluster_details",
-)
-@login_required
-@permission_required("clusters.read")
-@_handle_cluster_exceptions
-async def get_cluster_details(request: web.Request) -> web.Response:
- req_ctx = RequestContext.model_validate(request)
- path_params = parse_request_path_parameters_as(ClusterPathParams, request)
-
- cluster_details = await director_v2_api.get_cluster_details(
- app=request.app,
- user_id=req_ctx.user_id,
- cluster_id=path_params.cluster_id,
- )
- assert ClusterDetails.model_validate(cluster_details) is not None # nosec
- return envelope_json_response(cluster_details)
-
-
-@routes.post(f"/{api_version_prefix}/clusters:ping", name="ping_cluster")
-@login_required
-@permission_required("clusters.read")
-@_handle_cluster_exceptions
-async def ping_cluster(request: web.Request) -> web.Response:
- cluster_ping = await parse_request_body_as(ClusterPing, request)
-
- await director_v2_api.ping_cluster(
- app=request.app,
- cluster_ping=cluster_ping,
- )
- return web.json_response(status=status.HTTP_204_NO_CONTENT)
-
-
-@routes.post(
- f"/{api_version_prefix}/clusters/{{cluster_id}}:ping",
- name="ping_cluster_cluster_id",
-)
-@login_required
-@permission_required("clusters.read")
-@_handle_cluster_exceptions
-async def ping_cluster_cluster_id(request: web.Request) -> web.Response:
- req_ctx = RequestContext.model_validate(request)
- path_params = parse_request_path_parameters_as(ClusterPathParams, request)
-
- await director_v2_api.ping_specific_cluster(
- app=request.app,
- user_id=req_ctx.user_id,
- cluster_id=path_params.cluster_id,
- )
- return web.json_response(status=status.HTTP_204_NO_CONTENT)
diff --git a/services/web/server/src/simcore_service_webserver/clusters/plugin.py b/services/web/server/src/simcore_service_webserver/clusters/plugin.py
deleted file mode 100644
index 59a406b731b..00000000000
--- a/services/web/server/src/simcore_service_webserver/clusters/plugin.py
+++ /dev/null
@@ -1,34 +0,0 @@
-""" clusters app module setup
-
- Allows a user to manage clusters depending on user group(s) access rights:
- - create, modify, delete clusters
- - monitor clusters
- - send computational jobs to clusters
-
-"""
-import logging
-
-from aiohttp import web
-from servicelib.aiohttp.application_setup import ModuleCategory, app_module_setup
-
-from .._constants import APP_SETTINGS_KEY
-from ..director_v2 import plugin as director_v2
-from . import _handlers
-
-_logger = logging.getLogger(__name__)
-
-
-@app_module_setup(
- "simcore_service_webserver.clusters",
- ModuleCategory.ADDON,
- settings_name="WEBSERVER_CLUSTERS",
- logger=_logger,
-)
-def setup_clusters(app: web.Application):
- director_v2.setup_director_v2(app)
- assert app[APP_SETTINGS_KEY].WEBSERVER_CLUSTERS # nosec
-
- app.add_routes(_handlers.routes)
-
-
-__all__: tuple[str, ...] = ("setup_clusters",)
diff --git a/services/web/server/src/simcore_service_webserver/director_v2/_core_computations.py b/services/web/server/src/simcore_service_webserver/director_v2/_core_computations.py
index c034f93a660..7785f7936d2 100644
--- a/services/web/server/src/simcore_service_webserver/director_v2/_core_computations.py
+++ b/services/web/server/src/simcore_service_webserver/director_v2/_core_computations.py
@@ -9,24 +9,14 @@
from uuid import UUID
from aiohttp import web
-from common_library.serialization import model_dump_with_secrets
-from models_library.api_schemas_directorv2.clusters import (
- ClusterCreate,
- ClusterDetails,
- ClusterGet,
- ClusterPatch,
- ClusterPing,
-)
from models_library.api_schemas_directorv2.comp_tasks import (
TasksOutputs,
TasksSelection,
)
-from models_library.clusters import ClusterID
from models_library.projects import ProjectID
from models_library.projects_pipeline import ComputationTask
from models_library.users import UserID
from models_library.utils.fastapi_encoders import jsonable_encoder
-from pydantic import TypeAdapter
from pydantic.types import PositiveInt
from servicelib.aiohttp import status
from servicelib.logging_utils import log_decorator
@@ -34,14 +24,7 @@
from ..products.api import get_product
from ._api_utils import get_wallet_info
from ._core_base import DataType, request_director_v2
-from .exceptions import (
- ClusterAccessForbidden,
- ClusterDefinedPingError,
- ClusterNotFoundError,
- ClusterPingError,
- ComputationNotFoundError,
- DirectorServiceError,
-)
+from .exceptions import ComputationNotFoundError, DirectorServiceError
from .settings import DirectorV2Settings, get_plugin_settings
_logger = logging.getLogger(__name__)
@@ -229,200 +212,6 @@ async def delete_pipeline(
)
-#
-# CLUSTER RESOURCE ----------------------
-#
-
-
-@log_decorator(logger=_logger)
-async def create_cluster(
- app: web.Application, user_id: UserID, new_cluster: ClusterCreate
-) -> DataType:
- settings: DirectorV2Settings = get_plugin_settings(app)
- cluster = await request_director_v2(
- app,
- "POST",
- url=(settings.base_url / "clusters").update_query(user_id=int(user_id)),
- expected_status=web.HTTPCreated,
- data=model_dump_with_secrets(
- new_cluster, show_secrets=True, by_alias=True, exclude_unset=True
- ),
- )
- assert isinstance(cluster, dict) # nosec
- assert ClusterGet.model_validate(cluster) is not None # nosec
- return cluster
-
-
-async def list_clusters(app: web.Application, user_id: UserID) -> list[DataType]:
- settings: DirectorV2Settings = get_plugin_settings(app)
- clusters = await request_director_v2(
- app,
- "GET",
- url=(settings.base_url / "clusters").update_query(user_id=int(user_id)),
- expected_status=web.HTTPOk,
- )
-
- assert isinstance(clusters, list) # nosec
- assert TypeAdapter(list[ClusterGet]).validate_python(clusters) is not None # nosec
- return clusters
-
-
-async def get_cluster(
- app: web.Application, user_id: UserID, cluster_id: ClusterID
-) -> DataType:
- settings: DirectorV2Settings = get_plugin_settings(app)
- cluster = await request_director_v2(
- app,
- "GET",
- url=(settings.base_url / f"clusters/{cluster_id}").update_query(
- user_id=int(user_id)
- ),
- expected_status=web.HTTPOk,
- on_error={
- status.HTTP_404_NOT_FOUND: (
- ClusterNotFoundError,
- {"cluster_id": cluster_id},
- ),
- status.HTTP_403_FORBIDDEN: (
- ClusterAccessForbidden,
- {"cluster_id": cluster_id},
- ),
- },
- )
-
- assert isinstance(cluster, dict) # nosec
- assert ClusterGet.model_validate(cluster) is not None # nosec
- return cluster
-
-
-async def get_cluster_details(
- app: web.Application, user_id: UserID, cluster_id: ClusterID
-) -> DataType:
- settings: DirectorV2Settings = get_plugin_settings(app)
-
- cluster = await request_director_v2(
- app,
- "GET",
- url=(settings.base_url / f"clusters/{cluster_id}/details").update_query(
- user_id=int(user_id)
- ),
- expected_status=web.HTTPOk,
- on_error={
- status.HTTP_404_NOT_FOUND: (
- ClusterNotFoundError,
- {"cluster_id": cluster_id},
- ),
- status.HTTP_403_FORBIDDEN: (
- ClusterAccessForbidden,
- {"cluster_id": cluster_id},
- ),
- },
- )
- assert isinstance(cluster, dict) # nosec
- assert ClusterDetails.model_validate(cluster) is not None # nosec
- return cluster
-
-
-async def update_cluster(
- app: web.Application,
- user_id: UserID,
- cluster_id: ClusterID,
- cluster_patch: ClusterPatch,
-) -> DataType:
- settings: DirectorV2Settings = get_plugin_settings(app)
- cluster = await request_director_v2(
- app,
- "PATCH",
- url=(settings.base_url / f"clusters/{cluster_id}").update_query(
- user_id=int(user_id)
- ),
- expected_status=web.HTTPOk,
- data=model_dump_with_secrets(
- cluster_patch, show_secrets=True, by_alias=True, exclude_none=True
- ),
- on_error={
- status.HTTP_404_NOT_FOUND: (
- ClusterNotFoundError,
- {"cluster_id": cluster_id},
- ),
- status.HTTP_403_FORBIDDEN: (
- ClusterAccessForbidden,
- {"cluster_id": cluster_id},
- ),
- },
- )
-
- assert isinstance(cluster, dict) # nosec
- assert ClusterGet.model_validate(cluster) is not None # nosec
- return cluster
-
-
-async def delete_cluster(
- app: web.Application, user_id: UserID, cluster_id: ClusterID
-) -> None:
- settings: DirectorV2Settings = get_plugin_settings(app)
- await request_director_v2(
- app,
- "DELETE",
- url=(settings.base_url / f"clusters/{cluster_id}").update_query(
- user_id=int(user_id)
- ),
- expected_status=web.HTTPNoContent,
- on_error={
- status.HTTP_404_NOT_FOUND: (
- ClusterNotFoundError,
- {"cluster_id": cluster_id},
- ),
- status.HTTP_403_FORBIDDEN: (
- ClusterAccessForbidden,
- {"cluster_id": cluster_id},
- ),
- },
- )
-
-
-async def ping_cluster(app: web.Application, cluster_ping: ClusterPing) -> None:
- settings: DirectorV2Settings = get_plugin_settings(app)
- await request_director_v2(
- app,
- "POST",
- url=settings.base_url / "clusters:ping",
- expected_status=web.HTTPNoContent,
- data=model_dump_with_secrets(
- cluster_ping,
- show_secrets=True,
- by_alias=True,
- exclude_unset=True,
- ),
- on_error={
- status.HTTP_422_UNPROCESSABLE_ENTITY: (
- ClusterPingError,
- {"endpoint": f"{cluster_ping.endpoint}"},
- )
- },
- )
-
-
-async def ping_specific_cluster(
- app: web.Application, user_id: UserID, cluster_id: ClusterID
-) -> None:
- settings: DirectorV2Settings = get_plugin_settings(app)
- await request_director_v2(
- app,
- "POST",
- url=(settings.base_url / f"clusters/{cluster_id}:ping").update_query(
- user_id=int(user_id)
- ),
- expected_status=web.HTTPNoContent,
- on_error={
- status.HTTP_422_UNPROCESSABLE_ENTITY: (
- ClusterDefinedPingError,
- {"cluster_id": f"{cluster_id}"},
- )
- },
- )
-
-
#
# COMPUTATIONS TASKS RESOURCE ----------------------
#
diff --git a/services/web/server/src/simcore_service_webserver/director_v2/_handlers.py b/services/web/server/src/simcore_service_webserver/director_v2/_handlers.py
index 1a999b35c0e..aa3914ee6df 100644
--- a/services/web/server/src/simcore_service_webserver/director_v2/_handlers.py
+++ b/services/web/server/src/simcore_service_webserver/director_v2/_handlers.py
@@ -4,12 +4,10 @@
from aiohttp import web
from common_library.json_serialization import json_dumps
+from models_library.api_schemas_directorv2.comp_tasks import ComputationGet
from models_library.api_schemas_webserver.computations import ComputationStart
-from models_library.clusters import ClusterID
from models_library.projects import ProjectID
-from models_library.users import UserID
from pydantic import BaseModel, Field, TypeAdapter, ValidationError
-from pydantic.types import NonNegativeInt
from servicelib.aiohttp import status
from servicelib.aiohttp.rest_responses import create_http_error, exception_to_response
from servicelib.aiohttp.web_exceptions_extension import get_http_error_class_or_none
@@ -69,7 +67,6 @@ async def start_computation(request: web.Request) -> web.Response:
subgraph: set[str] = set()
force_restart: bool = False # NOTE: deprecate this entry
- cluster_id: NonNegativeInt = 0
if request.can_read_body:
body = await request.json()
@@ -79,7 +76,6 @@ async def start_computation(request: web.Request) -> web.Response:
subgraph = body.get("subgraph", [])
force_restart = bool(body.get("force_restart", force_restart))
- cluster_id = body.get("cluster_id")
simcore_user_agent = request.headers.get(
X_SIMCORE_USER_AGENT, UNDEFINED_DEFAULT_SIMCORE_USER_AGENT_VALUE
@@ -106,9 +102,6 @@ async def start_computation(request: web.Request) -> web.Response:
"start_pipeline": True,
"subgraph": list(subgraph), # sets are not natively json serializable
"force_restart": force_restart,
- "cluster_id": (
- None if group_properties.use_on_demand_clusters else cluster_id
- ),
"simcore_user_agent": simcore_user_agent,
"use_on_demand_clusters": group_properties.use_on_demand_clusters,
"wallet_info": wallet_info,
@@ -212,10 +205,6 @@ async def stop_computation(request: web.Request) -> web.Response:
)
-class ComputationTaskGet(BaseModel):
- cluster_id: ClusterID | None
-
-
@routes.get(f"/{VTAG}/computations/{{project_id}}", name="get_computation")
@login_required
@permission_required("services.pipeline.*")
@@ -225,7 +214,7 @@ async def get_computation(request: web.Request) -> web.Response:
run_policy = get_project_run_policy(request.app)
assert run_policy # nosec
- user_id = UserID(request[RQT_USERID_KEY])
+ user_id = request[RQT_USERID_KEY]
project_id = ProjectID(request.match_info["project_id"])
try:
@@ -233,7 +222,7 @@ async def get_computation(request: web.Request) -> web.Response:
request, project_id
)
_logger.debug("Project %s will get %d variants", project_id, len(project_ids))
- list_computation_tasks = TypeAdapter(list[ComputationTaskGet]).validate_python(
+ list_computation_tasks = TypeAdapter(list[ComputationGet]).validate_python(
await asyncio.gather(
*[
computations.get(project_id=pid, user_id=user_id)
@@ -242,12 +231,7 @@ async def get_computation(request: web.Request) -> web.Response:
),
)
assert len(list_computation_tasks) == len(project_ids) # nosec
- # NOTE: until changed all the versions of a meta project shall use the same cluster
- # this should fail the day that changes
- assert all(
- c.cluster_id == list_computation_tasks[0].cluster_id
- for c in list_computation_tasks
- )
+
return web.json_response(
data={"data": list_computation_tasks[0].model_dump(by_alias=True)},
dumps=json_dumps,
diff --git a/services/web/server/src/simcore_service_webserver/director_v2/_models.py b/services/web/server/src/simcore_service_webserver/director_v2/_models.py
deleted file mode 100644
index 966229c4221..00000000000
--- a/services/web/server/src/simcore_service_webserver/director_v2/_models.py
+++ /dev/null
@@ -1,88 +0,0 @@
-from models_library.clusters import (
- CLUSTER_ADMIN_RIGHTS,
- CLUSTER_MANAGER_RIGHTS,
- CLUSTER_USER_RIGHTS,
- BaseCluster,
- ClusterAccessRights,
- ClusterTypeInModel,
- ExternalClusterAuthentication,
-)
-from models_library.users import GroupID
-from pydantic import AnyHttpUrl, BaseModel, ConfigDict, Field, field_validator
-from pydantic.networks import AnyUrl, HttpUrl
-from simcore_postgres_database.models.clusters import ClusterType
-
-
-class ClusterPing(BaseModel):
- endpoint: AnyHttpUrl
- authentication: ExternalClusterAuthentication
-
-
-_DEFAULT_THUMBNAILS = {
- f"{ClusterTypeInModel.AWS}": "https://upload.wikimedia.org/wikipedia/commons/thumb/9/93/Amazon_Web_Services_Logo.svg/250px-Amazon_Web_Services_Logo.svg.png",
- f"{ClusterTypeInModel.ON_PREMISE}": "https://upload.wikimedia.org/wikipedia/commons/thumb/a/ac/Crystal_Clear_app_network_local.png/120px-Crystal_Clear_app_network_local.png",
-}
-
-
-class ClusterCreate(BaseCluster):
- owner: GroupID | None # type: ignore[assignment]
- authentication: ExternalClusterAuthentication
- access_rights: dict[GroupID, ClusterAccessRights] = Field(
- alias="accessRights", default_factory=dict
- )
-
- @field_validator("thumbnail", mode="before")
- @classmethod
- def set_default_thumbnail_if_empty(cls, v, values):
- if v is None and (
- cluster_type := values.get("type", f"{ClusterTypeInModel.ON_PREMISE}")
- ):
- return _DEFAULT_THUMBNAILS[f"{cluster_type}"]
- return v
-
- model_config = ConfigDict(
- json_schema_extra={
- "examples": [
- {
- "name": "My awesome cluster",
- "type": f"{ClusterType.ON_PREMISE}", # can use also values from equivalent enum
- "endpoint": "https://registry.osparc-development.fake.dev",
- "authentication": {
- "type": "simple",
- "username": "someuser",
- "password": "somepassword",
- },
- },
- {
- "name": "My AWS cluster",
- "description": "a AWS cluster administered by me",
- "type": f"{ClusterType.AWS}",
- "owner": 154,
- "endpoint": "https://registry.osparc-development.fake.dev",
- "authentication": {
- "type": "simple",
- "username": "someuser",
- "password": "somepassword",
- },
- "access_rights": {
- 154: CLUSTER_ADMIN_RIGHTS.model_dump(), # type:ignore[dict-item]
- 12: CLUSTER_MANAGER_RIGHTS.model_dump(), # type:ignore[dict-item]
- 7899: CLUSTER_USER_RIGHTS.model_dump(), # type:ignore[dict-item]
- },
- },
- ]
- }
- )
-
-
-class ClusterPatch(BaseCluster):
- name: str | None # type: ignore[assignment]
- description: str | None
- type: ClusterType | None # type: ignore[assignment]
- owner: GroupID | None # type: ignore[assignment]
- thumbnail: HttpUrl | None
- endpoint: AnyUrl | None # type: ignore[assignment]
- authentication: ExternalClusterAuthentication | None # type: ignore[assignment]
- access_rights: dict[GroupID, ClusterAccessRights] | None = Field( # type: ignore[assignment]
- alias="accessRights"
- )
diff --git a/services/web/server/src/simcore_service_webserver/director_v2/api.py b/services/web/server/src/simcore_service_webserver/director_v2/api.py
index 4d1efd822f6..2de6b49e4a2 100644
--- a/services/web/server/src/simcore_service_webserver/director_v2/api.py
+++ b/services/web/server/src/simcore_service_webserver/director_v2/api.py
@@ -3,27 +3,18 @@
PLEASE avoid importing from any other module to access this plugin's functionality
"""
-
from ._abc import (
AbstractProjectRunPolicy,
get_project_run_policy,
set_project_run_policy,
)
from ._core_computations import (
- create_cluster,
create_or_update_pipeline,
- delete_cluster,
delete_pipeline,
get_batch_tasks_outputs,
- get_cluster,
- get_cluster_details,
get_computation_task,
is_pipeline_running,
- list_clusters,
- ping_cluster,
- ping_specific_cluster,
stop_pipeline,
- update_cluster,
)
from ._core_dynamic_services import (
get_project_inactivity,
@@ -34,40 +25,26 @@
update_dynamic_service_networks_in_project,
)
from ._core_utils import is_healthy
-from .exceptions import (
- ClusterAccessForbidden,
- ClusterNotFoundError,
- DirectorServiceError,
-)
+from .exceptions import DirectorServiceError
# director-v2 module internal API
__all__: tuple[str, ...] = (
"AbstractProjectRunPolicy",
- "ClusterAccessForbidden",
- "ClusterNotFoundError",
- "create_cluster",
"create_or_update_pipeline",
- "delete_cluster",
"delete_pipeline",
"DirectorServiceError",
"get_batch_tasks_outputs",
- "get_cluster_details",
- "get_cluster",
"get_computation_task",
"get_project_inactivity",
"get_project_run_policy",
"is_healthy",
"is_pipeline_running",
- "list_clusters",
"list_dynamic_services",
- "ping_cluster",
- "ping_specific_cluster",
"request_retrieve_dyn_service",
"restart_dynamic_service",
"retrieve",
"set_project_run_policy",
"stop_pipeline",
- "update_cluster",
"update_dynamic_service_networks_in_project",
)
# nopycln: file
diff --git a/services/web/server/src/simcore_service_webserver/director_v2/exceptions.py b/services/web/server/src/simcore_service_webserver/director_v2/exceptions.py
index 8fe3a2a0478..2301815f754 100644
--- a/services/web/server/src/simcore_service_webserver/director_v2/exceptions.py
+++ b/services/web/server/src/simcore_service_webserver/director_v2/exceptions.py
@@ -20,29 +20,5 @@ class ComputationNotFoundError(DirectorServiceError):
msg_template = "Computation '{project_id}' not found"
-class ClusterNotFoundError(DirectorServiceError):
- """Cluster was not found in director-v2"""
-
- msg_template = "Cluster '{cluster_id}' not found"
-
-
-class ClusterAccessForbidden(DirectorServiceError):
- """Cluster access is forbidden"""
-
- msg_template = "Cluster '{cluster_id}' access forbidden!"
-
-
-class ClusterPingError(DirectorServiceError):
- """Cluster ping failed"""
-
- msg_template = "Connection to cluster in '{endpoint}' failed, received '{reason}'"
-
-
-class ClusterDefinedPingError(DirectorServiceError):
- """Cluster ping failed"""
-
- msg_template = "Connection to cluster '{cluster_id}' failed, received '{reason}'"
-
-
class ServiceWaitingForManualIntervention(DirectorServiceError):
msg_template = "Service '{service_uuid}' is waiting for user manual intervention"
diff --git a/services/web/server/tests/unit/isolated/test_application_settings.py b/services/web/server/tests/unit/isolated/test_application_settings.py
index afedd1f0149..da7fbf2f34f 100644
--- a/services/web/server/tests/unit/isolated/test_application_settings.py
+++ b/services/web/server/tests/unit/isolated/test_application_settings.py
@@ -61,7 +61,7 @@ def test_settings_to_client_statics(app_settings: ApplicationSettings):
# special alias
assert statics["stackName"] == "master-simcore"
- assert statics["pluginsDisabled"] == ["WEBSERVER_CLUSTERS"]
+ assert statics["pluginsDisabled"] == []
def test_settings_to_client_statics_plugins(
@@ -100,13 +100,13 @@ def test_settings_to_client_statics_plugins(
assert statics["vcsReleaseTag"]
assert TypeAdapter(HttpUrl).validate_python(statics["vcsReleaseUrl"])
- assert set(statics["pluginsDisabled"]) == (disable_plugins | {"WEBSERVER_CLUSTERS"})
+ assert set(statics["pluginsDisabled"]) == (disable_plugins)
@pytest.mark.parametrize("is_dev_feature_enabled", [True, False])
@pytest.mark.parametrize(
"plugin_name",
- ["WEBSERVER_META_MODELING", "WEBSERVER_VERSION_CONTROL"]
+ ["WEBSERVER_META_MODELING", "WEBSERVER_VERSION_CONTROL"],
# NOTE: this is the list in _enable_only_if_dev_features_allowed
)
def test_disabled_plugins_settings_to_client_statics(
diff --git a/services/web/server/tests/unit/with_dbs/01/clusters/conftest.py b/services/web/server/tests/unit/with_dbs/01/clusters/conftest.py
deleted file mode 100644
index afc0a5aada7..00000000000
--- a/services/web/server/tests/unit/with_dbs/01/clusters/conftest.py
+++ /dev/null
@@ -1,10 +0,0 @@
-import pytest
-from pytest_simcore.helpers.typing_env import EnvVarsDict
-
-
-@pytest.fixture
-def enable_webserver_clusters_feature(
- app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch
-) -> EnvVarsDict:
- monkeypatch.setenv("WEBSERVER_CLUSTERS", "1")
- return app_environment | {"WEBSERVER_CLUSTERS": "1"}
diff --git a/services/web/server/tests/unit/with_dbs/01/clusters/test_clusters_handlers.py b/services/web/server/tests/unit/with_dbs/01/clusters/test_clusters_handlers.py
deleted file mode 100644
index e75aee0866f..00000000000
--- a/services/web/server/tests/unit/with_dbs/01/clusters/test_clusters_handlers.py
+++ /dev/null
@@ -1,531 +0,0 @@
-# pylint:disable=unused-variable
-# pylint:disable=unused-argument
-# pylint:disable=redefined-outer-name
-# pylint:disable=no-value-for-parameter
-# pylint:disable=too-many-arguments
-# pylint:disable=too-many-statements
-
-
-import json
-import random
-from http import HTTPStatus
-from typing import Any
-
-import hypothesis
-import hypothesis.provisional
-import pytest
-from aiohttp.test_utils import TestClient
-from faker import Faker
-from hypothesis import strategies as st
-from models_library.api_schemas_webserver.clusters import (
- ClusterCreate,
- ClusterPatch,
- ClusterPing,
-)
-from models_library.clusters import (
- CLUSTER_ADMIN_RIGHTS,
- Cluster,
- ClusterTypeInModel,
- SimpleAuthentication,
-)
-from pydantic import HttpUrl, TypeAdapter
-from pydantic_core import Url
-from pytest_mock import MockerFixture
-from pytest_simcore.helpers.assert_checks import assert_status
-from pytest_simcore.helpers.webserver_parametrizations import ( # nopycln: import
- ExpectedResponse,
- standard_role_response,
-)
-from servicelib.aiohttp import status
-from simcore_postgres_database.models.clusters import ClusterType
-from simcore_postgres_database.models.users import UserRole
-from simcore_service_webserver.director_v2.exceptions import (
- ClusterAccessForbidden,
- ClusterNotFoundError,
- ClusterPingError,
- DirectorServiceError,
-)
-
-
-@st.composite
-def http_url_strategy(draw):
- return TypeAdapter(HttpUrl).validate_python(draw(hypothesis.provisional.urls()))
-
-
-@st.composite
-def cluster_patch_strategy(draw):
- return ClusterPatch(
- name=draw(st.text()),
- description=draw(st.text()),
- owner=draw(st.integers(min_value=1)),
- type=draw(st.sampled_from(ClusterTypeInModel)),
- thumbnail=draw(http_url_strategy()),
- endpoint=draw(http_url_strategy()),
- authentication=None,
- accessRights={},
- )
-
-
-st.register_type_strategy(ClusterPatch, cluster_patch_strategy())
-st.register_type_strategy(Url, http_url_strategy())
-
-
-@pytest.fixture
-def mocked_director_v2_api(mocker: MockerFixture):
- mocked_director_v2_api = mocker.patch(
- "simcore_service_webserver.clusters._handlers.director_v2_api", autospec=True
- )
-
- mocked_director_v2_api.create_cluster.return_value = random.choice(
- Cluster.model_config["json_schema_extra"]["examples"]
- )
- mocked_director_v2_api.list_clusters.return_value = []
- mocked_director_v2_api.get_cluster.return_value = random.choice(
- Cluster.model_config["json_schema_extra"]["examples"]
- )
- mocked_director_v2_api.get_cluster_details.return_value = {
- "scheduler": {"status": "running"},
- "dashboardLink": "https://link.to.dashboard",
- }
- mocked_director_v2_api.update_cluster.return_value = random.choice(
- Cluster.model_config["json_schema_extra"]["examples"]
- )
- mocked_director_v2_api.delete_cluster.return_value = None
- mocked_director_v2_api.ping_cluster.return_value = None
- mocked_director_v2_api.ping_specific_cluster.return_value = None
-
-
-@pytest.fixture
-def mocked_director_v2_with_error(
- mocker: MockerFixture, faker: Faker, director_v2_error: type[DirectorServiceError]
-):
- mocked_director_v2_api = mocker.patch(
- "simcore_service_webserver.clusters._handlers.director_v2_api", autospec=True
- )
- error = director_v2_error(
- status=status.HTTP_503_SERVICE_UNAVAILABLE,
- reason="no director-v2",
- url=faker.uri(),
- cluster_id=faker.pyint(min_value=1),
- endpoint=faker.uri(),
- )
- mocked_director_v2_api.create_cluster.side_effect = error
- mocked_director_v2_api.list_clusters.side_effect = error
- mocked_director_v2_api.get_cluster.side_effect = error
- mocked_director_v2_api.get_cluster_details.side_effect = error
- mocked_director_v2_api.update_cluster.side_effect = error
- mocked_director_v2_api.delete_cluster.side_effect = error
- mocked_director_v2_api.ping_cluster.side_effect = error
- mocked_director_v2_api.ping_specific_cluster.side_effect = error
-
-
-@pytest.fixture()
-def cluster_create(faker: Faker) -> ClusterCreate:
- instance = ClusterCreate(
- name=faker.name(),
- endpoint=faker.uri(),
- type=random.choice(list(ClusterType)),
- owner=faker.pyint(),
- authentication=SimpleAuthentication(
- username=faker.user_name(), password=faker.password()
- ),
- )
- assert instance
- return instance
-
-
-@pytest.mark.parametrize(*standard_role_response(), ids=str)
-async def test_create_cluster(
- enable_webserver_clusters_feature: None,
- mocked_director_v2_api,
- client: TestClient,
- logged_user: dict[str, Any],
- faker: Faker,
- cluster_create: ClusterCreate,
- user_role: UserRole,
- expected: ExpectedResponse,
-):
- cluster_create.access_rights[logged_user["id"]] = CLUSTER_ADMIN_RIGHTS
- print(f"--> creating {cluster_create=!r}")
- # check we can create a cluster
- assert client.app
- url = client.app.router["create_cluster"].url_for()
- rsp = await client.post(
- f"{url}",
- json=json.loads(cluster_create.model_dump_json(by_alias=True)),
- )
- data, error = await assert_status(
- rsp,
- (
- expected.forbidden if user_role == UserRole.USER else expected.created
- ), # only accessible for TESTER
- )
- if error:
- # we are done here
- return
-
- created_cluster = Cluster.model_validate(data)
- assert created_cluster
-
-
-@pytest.mark.parametrize(*standard_role_response(), ids=str)
-async def test_list_clusters(
- enable_webserver_clusters_feature: None,
- mocked_director_v2_api,
- client: TestClient,
- logged_user: dict[str, Any],
- expected: ExpectedResponse,
-):
- # check empty clusters
- assert client.app
- url = client.app.router["list_clusters"].url_for()
- rsp = await client.get(f"{url}")
- data, error = await assert_status(rsp, expected.ok)
- if not error:
- assert isinstance(data, list)
-
-
-@pytest.mark.parametrize(*standard_role_response(), ids=str)
-async def test_get_cluster(
- enable_webserver_clusters_feature: None,
- mocked_director_v2_api,
- client: TestClient,
- logged_user: dict[str, Any],
- user_role: UserRole,
- expected: ExpectedResponse,
-):
- # check not found
- assert client.app
- url = client.app.router["get_cluster"].url_for(cluster_id=f"{25}")
- rsp = await client.get(f"{url}")
- data, error = await assert_status(rsp, expected.ok)
- if not error:
- assert isinstance(data, dict)
-
-
-@pytest.mark.parametrize(*standard_role_response(), ids=str)
-async def test_get_cluster_details(
- enable_webserver_clusters_feature: None,
- mocked_director_v2_api,
- client: TestClient,
- logged_user: dict[str, Any],
- user_role: UserRole,
- expected: ExpectedResponse,
-):
- # check not found
- assert client.app
- url = client.app.router["get_cluster_details"].url_for(cluster_id=f"{25}")
- rsp = await client.get(f"{url}")
- data, error = await assert_status(rsp, expected.ok)
- if not error:
- assert isinstance(data, dict)
-
-
-@pytest.mark.parametrize(*standard_role_response(), ids=str)
-@hypothesis.given(cluster_patch=st.from_type(ClusterPatch))
-@hypothesis.settings(
- # hypothesis does not play well with fixtures, hence the warning
- # it will create several tests but not replay the fixtures
- suppress_health_check=[
- hypothesis.HealthCheck.function_scoped_fixture,
- hypothesis.HealthCheck.too_slow,
- ],
- deadline=None,
-)
-async def test_update_cluster(
- enable_webserver_clusters_feature: None,
- mocked_director_v2_api,
- client: TestClient,
- logged_user: dict[str, Any],
- cluster_patch: ClusterPatch,
- expected: ExpectedResponse,
-):
- print(f"--> updating {cluster_patch=!r}")
- _PATCH_EXPORT = {"by_alias": True, "exclude_unset": True, "exclude_none": True}
- assert client.app
- url = client.app.router["update_cluster"].url_for(cluster_id=f"{25}")
- rsp = await client.patch(
- f"{url}",
- json=json.loads(cluster_patch.model_dump_json(**_PATCH_EXPORT)),
- )
- data, error = await assert_status(rsp, expected.ok)
- if not error:
- assert isinstance(data, dict)
-
-
-@pytest.mark.parametrize(*standard_role_response(), ids=str)
-async def test_delete_cluster(
- enable_webserver_clusters_feature: None,
- mocked_director_v2_api,
- client: TestClient,
- logged_user: dict[str, Any],
- expected: ExpectedResponse,
-):
- assert client.app
- url = client.app.router["delete_cluster"].url_for(cluster_id=f"{25}")
- rsp = await client.delete(f"{url}")
- data, error = await assert_status(rsp, expected.no_content)
- if not error:
- assert data is None
-
-
-@pytest.mark.parametrize(*standard_role_response(), ids=str)
-@hypothesis.given(cluster_ping=st.from_type(ClusterPing))
-@hypothesis.settings(
- # hypothesis does not play well with fixtures, hence the warning
- # it will create several tests but not replay the fixtures
- suppress_health_check=[
- hypothesis.HealthCheck.function_scoped_fixture,
- hypothesis.HealthCheck.too_slow,
- ],
- deadline=None,
-)
-async def test_ping_cluster(
- enable_webserver_clusters_feature: None,
- mocked_director_v2_api,
- client: TestClient,
- logged_user: dict[str, Any],
- expected: ExpectedResponse,
- cluster_ping: ClusterPing,
-):
- print(f"--> pinging {cluster_ping=!r}")
- assert client.app
- url = client.app.router["ping_cluster"].url_for()
- rsp = await client.post(
- f"{url}", json=json.loads(cluster_ping.model_dump_json(by_alias=True))
- )
- data, error = await assert_status(rsp, expected.no_content)
- if not error:
- assert data is None
-
-
-@pytest.mark.parametrize(*standard_role_response(), ids=str)
-async def test_ping_specific_cluster(
- enable_webserver_clusters_feature: None,
- mocked_director_v2_api,
- client: TestClient,
- logged_user: dict[str, Any],
- faker: Faker,
- expected: ExpectedResponse,
-):
- assert client.app
- url = client.app.router["ping_cluster_cluster_id"].url_for(
- cluster_id=f"{faker.pyint(min_value=1)}"
- )
- rsp = await client.post(f"{url}")
- data, error = await assert_status(rsp, expected.no_content)
- if not error:
- assert data is None
-
-
-@pytest.mark.parametrize("user_role", [UserRole.TESTER], ids=str)
-@pytest.mark.parametrize(
- "director_v2_error, expected_http_error",
- [
- (DirectorServiceError, status.HTTP_503_SERVICE_UNAVAILABLE),
- ],
-)
-async def test_create_cluster_with_error(
- enable_webserver_clusters_feature: None,
- mocked_director_v2_with_error,
- client: TestClient,
- logged_user: dict[str, Any],
- faker: Faker,
- cluster_create: ClusterCreate,
- expected_http_error: HTTPStatus,
-):
- cluster_create.access_rights[logged_user["id"]] = CLUSTER_ADMIN_RIGHTS
- print(f"--> creating {cluster_create=!r}")
- # check we can create a cluster
- assert client.app
- url = client.app.router["create_cluster"].url_for()
- rsp = await client.post(
- f"{url}",
- json=json.loads(cluster_create.model_dump_json(by_alias=True)),
- )
- data, error = await assert_status(rsp, expected_http_error)
- assert not data
- assert error
-
-
-@pytest.mark.parametrize("user_role", [UserRole.TESTER], ids=str)
-@pytest.mark.parametrize(
- "director_v2_error, expected_http_error",
- [
- (DirectorServiceError, status.HTTP_503_SERVICE_UNAVAILABLE),
- ],
-)
-async def test_list_clusters_with_error(
- enable_webserver_clusters_feature: None,
- mocked_director_v2_with_error,
- client: TestClient,
- logged_user: dict[str, Any],
- expected_http_error: HTTPStatus,
-):
- # check empty clusters
- assert client.app
- url = client.app.router["list_clusters"].url_for()
- rsp = await client.get(f"{url}")
- data, error = await assert_status(rsp, expected_http_error)
- assert not data
- assert error
-
-
-@pytest.mark.parametrize("user_role", [UserRole.TESTER], ids=str)
-@pytest.mark.parametrize(
- "director_v2_error, expected_http_error",
- [
- (DirectorServiceError, status.HTTP_503_SERVICE_UNAVAILABLE),
- (ClusterNotFoundError, status.HTTP_404_NOT_FOUND),
- (ClusterAccessForbidden, status.HTTP_403_FORBIDDEN),
- ],
-)
-async def test_get_cluster_with_error(
- enable_webserver_clusters_feature: None,
- mocked_director_v2_with_error,
- client: TestClient,
- logged_user: dict[str, Any],
- expected_http_error: HTTPStatus,
-):
- # check empty clusters
- assert client.app
- url = client.app.router["get_cluster"].url_for(cluster_id=f"{25}")
- rsp = await client.get(f"{url}")
- data, error = await assert_status(rsp, expected_http_error)
- assert not data
- assert error
-
-
-@pytest.mark.parametrize("user_role", [UserRole.TESTER], ids=str)
-@pytest.mark.parametrize(
- "director_v2_error, expected_http_error",
- [
- (DirectorServiceError, status.HTTP_503_SERVICE_UNAVAILABLE),
- (ClusterNotFoundError, status.HTTP_404_NOT_FOUND),
- (ClusterAccessForbidden, status.HTTP_403_FORBIDDEN),
- ],
-)
-async def test_get_cluster_details_with_error(
- enable_webserver_clusters_feature: None,
- mocked_director_v2_with_error,
- client: TestClient,
- logged_user: dict[str, Any],
- expected_http_error: HTTPStatus,
-):
- # check not found
- assert client.app
- url = client.app.router["get_cluster_details"].url_for(cluster_id=f"{25}")
- rsp = await client.get(f"{url}")
- data, error = await assert_status(rsp, expected_http_error)
- assert not data
- assert error
-
-
-@pytest.mark.parametrize("user_role", [UserRole.TESTER], ids=str)
-@pytest.mark.parametrize(
- "director_v2_error, expected_http_error",
- [
- (DirectorServiceError, status.HTTP_503_SERVICE_UNAVAILABLE),
- (ClusterNotFoundError, status.HTTP_404_NOT_FOUND),
- (ClusterAccessForbidden, status.HTTP_403_FORBIDDEN),
- ],
-)
-async def test_update_cluster_with_error(
- enable_webserver_clusters_feature: None,
- mocked_director_v2_with_error,
- client: TestClient,
- logged_user: dict[str, Any],
- expected_http_error: HTTPStatus,
-):
- _PATCH_EXPORT = {"by_alias": True, "exclude_unset": True, "exclude_none": True}
- assert client.app
- url = client.app.router["update_cluster"].url_for(cluster_id=f"{25}")
- rsp = await client.patch(
- f"{url}",
- json=json.loads(ClusterPatch().model_dump_json(**_PATCH_EXPORT)),
- )
- data, error = await assert_status(rsp, expected_http_error)
- assert not data
- assert error
-
-
-@pytest.mark.parametrize("user_role", [UserRole.TESTER], ids=str)
-@pytest.mark.parametrize(
- "director_v2_error, expected_http_error",
- [
- (DirectorServiceError, status.HTTP_503_SERVICE_UNAVAILABLE),
- (ClusterNotFoundError, status.HTTP_404_NOT_FOUND),
- (ClusterAccessForbidden, status.HTTP_403_FORBIDDEN),
- ],
-)
-async def test_delete_cluster_with_error(
- enable_webserver_clusters_feature: None,
- mocked_director_v2_with_error,
- client: TestClient,
- logged_user: dict[str, Any],
- expected_http_error: HTTPStatus,
-):
- assert client.app
- url = client.app.router["delete_cluster"].url_for(cluster_id=f"{25}")
- rsp = await client.delete(f"{url}")
- data, error = await assert_status(rsp, expected_http_error)
- assert not data
- assert error
-
-
-@pytest.mark.parametrize("user_role", [UserRole.TESTER], ids=str)
-@pytest.mark.parametrize(
- "director_v2_error, expected_http_error",
- [
- (DirectorServiceError, status.HTTP_503_SERVICE_UNAVAILABLE),
- (ClusterPingError, status.HTTP_422_UNPROCESSABLE_ENTITY),
- ],
-)
-async def test_ping_cluster_with_error(
- enable_webserver_clusters_feature: None,
- mocked_director_v2_with_error,
- client: TestClient,
- logged_user: dict[str, Any],
- faker: Faker,
- expected_http_error,
-):
- cluster_ping = ClusterPing(
- endpoint=faker.uri(),
- authentication=SimpleAuthentication(
- username=faker.user_name(), password=faker.password()
- ),
- )
- assert client.app
- url = client.app.router["ping_cluster"].url_for()
- rsp = await client.post(
- f"{url}", json=json.loads(cluster_ping.model_dump_json(by_alias=True))
- )
- data, error = await assert_status(rsp, expected_http_error)
- assert not data
- assert error
-
-
-@pytest.mark.parametrize("user_role", [UserRole.TESTER], ids=str)
-@pytest.mark.parametrize(
- "director_v2_error, expected_http_error",
- [
- (DirectorServiceError, status.HTTP_503_SERVICE_UNAVAILABLE),
- (ClusterPingError, status.HTTP_422_UNPROCESSABLE_ENTITY),
- ],
-)
-async def test_ping_specific_cluster_with_error(
- enable_webserver_clusters_feature: None,
- mocked_director_v2_with_error,
- client: TestClient,
- logged_user: dict[str, Any],
- faker: Faker,
- expected_http_error,
-):
- assert client.app
- url = client.app.router["ping_cluster_cluster_id"].url_for(
- cluster_id=f"{faker.pyint(min_value=1)}"
- )
- rsp = await client.post(f"{url}")
- data, error = await assert_status(rsp, expected_http_error)
- assert not data
- assert error
diff --git a/services/web/server/tests/unit/with_dbs/01/clusters/test_clusters_plugin_setup.py b/services/web/server/tests/unit/with_dbs/01/clusters/test_clusters_plugin_setup.py
deleted file mode 100644
index c9731dcc0bd..00000000000
--- a/services/web/server/tests/unit/with_dbs/01/clusters/test_clusters_plugin_setup.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# pylint:disable=unused-variable
-# pylint:disable=unused-argument
-# pylint:disable=redefined-outer-name
-
-from aiohttp.test_utils import TestClient
-from servicelib.aiohttp.application_keys import APP_SETTINGS_KEY
-from simcore_service_webserver.application_settings import ApplicationSettings
-
-
-def test_module_setup_defaults_to_false(client: TestClient):
- assert client.app
- settings: ApplicationSettings = client.app[APP_SETTINGS_KEY]
-
- assert settings.WEBSERVER_CLUSTERS
- assert "list_clusters" in client.app.router
-
-
-def test_module_setup_can_be_properly_enabled(
- enable_webserver_clusters_feature: None,
- client: TestClient,
-):
- assert client.app
- settings: ApplicationSettings = client.app[APP_SETTINGS_KEY]
-
- assert settings.WEBSERVER_CLUSTERS
- assert "list_clusters" in client.app.router
diff --git a/services/web/server/tests/unit/with_dbs/01/test_director_v2.py b/services/web/server/tests/unit/with_dbs/01/test_director_v2.py
index 93956089fb9..f18bc9e1754 100644
--- a/services/web/server/tests/unit/with_dbs/01/test_director_v2.py
+++ b/services/web/server/tests/unit/with_dbs/01/test_director_v2.py
@@ -6,14 +6,6 @@
import pytest
from aioresponses import aioresponses
from faker import Faker
-from hypothesis import HealthCheck, given, settings
-from hypothesis import strategies as st
-from models_library.api_schemas_webserver.clusters import (
- ClusterCreate,
- ClusterPatch,
- ClusterPing,
-)
-from models_library.clusters import ClusterID
from models_library.projects import ProjectID
from models_library.projects_pipeline import ComputationTask
from models_library.projects_state import RunningState
@@ -38,11 +30,6 @@ def project_id(faker: Faker) -> ProjectID:
return ProjectID(faker.uuid4())
-@pytest.fixture
-def cluster_id(faker: Faker) -> ClusterID:
- return ClusterID(faker.pyint(min_value=0))
-
-
async def test_create_pipeline(
mocked_director_v2,
client,
@@ -74,70 +61,3 @@ async def test_delete_pipeline(
mocked_director_v2, client, user_id: UserID, project_id: ProjectID
):
await api.delete_pipeline(client.app, user_id, project_id)
-
-
-@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
-@given(cluster_create=st.builds(ClusterCreate))
-async def test_create_cluster(
- mocked_director_v2, client, user_id: UserID, cluster_create
-):
- created_cluster = await api.create_cluster(
- client.app, user_id=user_id, new_cluster=cluster_create
- )
- assert created_cluster is not None
- assert isinstance(created_cluster, dict)
- assert "id" in created_cluster
-
-
-async def test_list_clusters(mocked_director_v2, client, user_id: UserID):
- list_of_clusters = await api.list_clusters(client.app, user_id=user_id)
- assert isinstance(list_of_clusters, list)
- assert len(list_of_clusters) > 0
-
-
-async def test_get_cluster(
- mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
-):
- cluster = await api.get_cluster(client.app, user_id=user_id, cluster_id=cluster_id)
- assert isinstance(cluster, dict)
- assert cluster["id"] == cluster_id
-
-
-async def test_get_cluster_details(
- mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
-):
- cluster_details = await api.get_cluster_details(
- client.app, user_id=user_id, cluster_id=cluster_id
- )
- assert isinstance(cluster_details, dict)
-
-
-@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
-@given(cluster_patch=st.from_type(ClusterPatch))
-async def test_update_cluster(
- mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID, cluster_patch
-):
- print(f"--> updating cluster with {cluster_patch=}")
- updated_cluster = await api.update_cluster(
- client.app, user_id=user_id, cluster_id=cluster_id, cluster_patch=cluster_patch
- )
- assert isinstance(updated_cluster, dict)
- assert updated_cluster["id"] == cluster_id
-
-
-async def test_delete_cluster(
- mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
-):
- await api.delete_cluster(client.app, user_id=user_id, cluster_id=cluster_id)
-
-
-@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
-@given(cluster_ping=st.builds(ClusterPing))
-async def test_ping_cluster(mocked_director_v2, client, cluster_ping: ClusterPing):
- await api.ping_cluster(client.app, cluster_ping=cluster_ping)
-
-
-async def test_ping_specific_cluster(
- mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
-):
- await api.ping_specific_cluster(client.app, user_id=user_id, cluster_id=cluster_id)
diff --git a/services/web/server/tests/unit/with_dbs/01/test_director_v2_handlers.py b/services/web/server/tests/unit/with_dbs/01/test_director_v2_handlers.py
index 8cbcfbdf739..e2c9b7e03c1 100644
--- a/services/web/server/tests/unit/with_dbs/01/test_director_v2_handlers.py
+++ b/services/web/server/tests/unit/with_dbs/01/test_director_v2_handlers.py
@@ -111,9 +111,11 @@ async def test_stop_computation(
rsp = await client.post(f"{url}")
await assert_status(
rsp,
- status.HTTP_204_NO_CONTENT
- if user_role == UserRole.GUEST
- else expected.no_content,
+ (
+ status.HTTP_204_NO_CONTENT
+ if user_role == UserRole.GUEST
+ else expected.no_content
+ ),
)
diff --git a/tests/environment-setup/test_used_docker_compose.py b/tests/environment-setup/test_used_docker_compose.py
index 93d07ba9b66..c083c79b206 100644
--- a/tests/environment-setup/test_used_docker_compose.py
+++ b/tests/environment-setup/test_used_docker_compose.py
@@ -76,7 +76,7 @@ def ensure_env_file(env_devel_file: Path) -> Iterable[Path]:
def _skip_not_useful_docker_composes(p) -> bool:
- result = "osparc-gateway-server" not in f"{p}" and "manual" not in f"{p}"
+ result = "manual" not in f"{p}"
result &= "tests/performance" not in f"{p}"
return result
From ca5fcdc858ea3cc5950b99b3c5a2decd7d16c9bf Mon Sep 17 00:00:00 2001
From: Matus Drobuliak <60785969+matusdrobuliak66@users.noreply.github.com>
Date: Tue, 3 Dec 2024 19:21:44 +0100
Subject: [PATCH 13/16] =?UTF-8?q?=E2=99=BB=EF=B8=8F=20refactor=20RUT=20to?=
=?UTF-8?q?=20use=20new=20transactional=20context=20(#6874)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../api/rest/dependencies.py | 14 -
.../api/rpc/_resource_tracker.py | 27 +-
...ackground_task_periodic_heartbeat_check.py | 37 +-
.../services/credit_transactions.py | 23 +-
.../modules/db/credit_transactions_db.py | 162 ++
.../services/modules/db/pricing_plans_db.py | 668 ++++++++
.../modules/db/repositories/__init__.py | 3 -
.../services/modules/db/repositories/_base.py | 12 -
.../db/repositories/resource_tracker.py | 1382 -----------------
.../services/modules/db/service_runs_db.py | 622 ++++++++
.../services/pricing_plans.py | 91 +-
.../services/pricing_units.py | 61 +-
.../process_message_running_service.py | 63 +-
.../services/service_runs.py | 55 +-
.../services/utils.py | 36 +-
...i_resource_tracker_service_runs__export.py | 2 +-
...ackground_task_periodic_heartbeat_check.py | 6 -
.../with_dbs/test_process_rabbitmq_message.py | 12 +-
...t_process_rabbitmq_message_with_billing.py | 13 +-
...ss_rabbitmq_message_with_billing_cost_0.py | 13 +-
20 files changed, 1665 insertions(+), 1637 deletions(-)
create mode 100644 services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/credit_transactions_db.py
create mode 100644 services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/pricing_plans_db.py
delete mode 100644 services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/__init__.py
delete mode 100644 services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/_base.py
delete mode 100644 services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/resource_tracker.py
create mode 100644 services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/service_runs_db.py
diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/dependencies.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/dependencies.py
index 49ce9523cfe..dacf0ff08b5 100644
--- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/dependencies.py
+++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/dependencies.py
@@ -4,16 +4,11 @@
#
import logging
-from collections.abc import AsyncGenerator, Callable
-from typing import Annotated
-from fastapi import Depends
from fastapi.requests import Request
from servicelib.fastapi.dependencies import get_app, get_reverse_url_mapper
from sqlalchemy.ext.asyncio import AsyncEngine
-from ...services.modules.db.repositories._base import BaseRepository
-
logger = logging.getLogger(__name__)
@@ -23,15 +18,6 @@ def get_resource_tracker_db_engine(request: Request) -> AsyncEngine:
return engine
-def get_repository(repo_type: type[BaseRepository]) -> Callable:
- async def _get_repo(
- engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)],
- ) -> AsyncGenerator[BaseRepository, None]:
- yield repo_type(db_engine=engine)
-
- return _get_repo
-
-
assert get_reverse_url_mapper # nosec
assert get_app # nosec
diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_resource_tracker.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_resource_tracker.py
index d7e9a5ca74d..5a382782f9d 100644
--- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_resource_tracker.py
+++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_resource_tracker.py
@@ -29,9 +29,6 @@
from ...core.settings import ApplicationSettings
from ...services import pricing_plans, pricing_units, service_runs
-from ...services.modules.db.repositories.resource_tracker import (
- ResourceTrackerRepository,
-)
from ...services.modules.s3 import get_s3_client
router = RPCRouter()
@@ -56,7 +53,7 @@ async def get_service_run_page(
return await service_runs.list_service_runs(
user_id=user_id,
product_name=product_name,
- resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine),
+ db_engine=app.state.engine,
limit=limit,
offset=offset,
wallet_id=wallet_id,
@@ -87,7 +84,7 @@ async def export_service_runs(
s3_region=s3_settings.S3_REGION,
user_id=user_id,
product_name=product_name,
- resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine),
+ db_engine=app.state.engine,
wallet_id=wallet_id,
access_all_wallet_usage=access_all_wallet_usage,
order_by=order_by,
@@ -111,7 +108,7 @@ async def get_osparc_credits_aggregated_usages_page(
return await service_runs.get_osparc_credits_aggregated_usages_page(
user_id=user_id,
product_name=product_name,
- resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine),
+ db_engine=app.state.engine,
aggregated_by=aggregated_by,
time_period=time_period,
limit=limit,
@@ -134,7 +131,7 @@ async def get_pricing_plan(
return await pricing_plans.get_pricing_plan(
product_name=product_name,
pricing_plan_id=pricing_plan_id,
- resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine),
+ db_engine=app.state.engine,
)
@@ -146,7 +143,7 @@ async def list_pricing_plans(
) -> list[PricingPlanGet]:
return await pricing_plans.list_pricing_plans_by_product(
product_name=product_name,
- resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine),
+ db_engine=app.state.engine,
)
@@ -158,7 +155,7 @@ async def create_pricing_plan(
) -> PricingPlanGet:
return await pricing_plans.create_pricing_plan(
data=data,
- resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine),
+ db_engine=app.state.engine,
)
@@ -172,7 +169,7 @@ async def update_pricing_plan(
return await pricing_plans.update_pricing_plan(
product_name=product_name,
data=data,
- resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine),
+ db_engine=app.state.engine,
)
@@ -191,7 +188,7 @@ async def get_pricing_unit(
product_name=product_name,
pricing_plan_id=pricing_plan_id,
pricing_unit_id=pricing_unit_id,
- resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine),
+ db_engine=app.state.engine,
)
@@ -205,7 +202,7 @@ async def create_pricing_unit(
return await pricing_units.create_pricing_unit(
product_name=product_name,
data=data,
- resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine),
+ db_engine=app.state.engine,
)
@@ -219,7 +216,7 @@ async def update_pricing_unit(
return await pricing_units.update_pricing_unit(
product_name=product_name,
data=data,
- resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine),
+ db_engine=app.state.engine,
)
@@ -238,7 +235,7 @@ async def list_connected_services_to_pricing_plan_by_pricing_plan(
] = await pricing_plans.list_connected_services_to_pricing_plan_by_pricing_plan(
product_name=product_name,
pricing_plan_id=pricing_plan_id,
- resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine),
+ db_engine=app.state.engine,
)
return output
@@ -257,5 +254,5 @@ async def connect_service_to_pricing_plan(
pricing_plan_id=pricing_plan_id,
service_key=service_key,
service_version=service_version,
- resource_tracker_repo=ResourceTrackerRepository(db_engine=app.state.engine),
+ db_engine=app.state.engine,
)
diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/background_task_periodic_heartbeat_check.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/background_task_periodic_heartbeat_check.py
index 256b737d479..fba9332502e 100644
--- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/background_task_periodic_heartbeat_check.py
+++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/background_task_periodic_heartbeat_check.py
@@ -10,11 +10,12 @@
ServiceRunStatus,
)
from pydantic import NonNegativeInt, PositiveInt
+from sqlalchemy.ext.asyncio import AsyncEngine
from ..core.settings import ApplicationSettings
from ..models.credit_transactions import CreditTransactionCreditsAndStatusUpdate
from ..models.service_runs import ServiceRunStoppedAtUpdate
-from .modules.db.repositories.resource_tracker import ResourceTrackerRepository
+from .modules.db import credit_transactions_db, service_runs_db
from .utils import compute_service_run_credit_costs, make_negative
_logger = logging.getLogger(__name__)
@@ -23,7 +24,7 @@
async def _check_service_heartbeat(
- resource_tracker_repo: ResourceTrackerRepository,
+ db_engine: AsyncEngine,
base_start_timestamp: datetime,
resource_usage_tracker_missed_heartbeat_interval: timedelta,
resource_usage_tracker_missed_heartbeat_counter_fail: NonNegativeInt,
@@ -55,7 +56,7 @@ async def _check_service_heartbeat(
missed_heartbeat_counter,
)
await _close_unhealthy_service(
- resource_tracker_repo, service_run_id, base_start_timestamp
+ db_engine, service_run_id, base_start_timestamp
)
else:
_logger.warning(
@@ -63,13 +64,16 @@ async def _check_service_heartbeat(
service_run_id,
missed_heartbeat_counter,
)
- await resource_tracker_repo.update_service_missed_heartbeat_counter(
- service_run_id, last_heartbeat_at, missed_heartbeat_counter
+ await service_runs_db.update_service_missed_heartbeat_counter(
+ db_engine,
+ service_run_id=service_run_id,
+ last_heartbeat_at=last_heartbeat_at,
+ missed_heartbeat_counter=missed_heartbeat_counter,
)
async def _close_unhealthy_service(
- resource_tracker_repo: ResourceTrackerRepository,
+ db_engine: AsyncEngine,
service_run_id: ServiceRunId,
base_start_timestamp: datetime,
):
@@ -80,8 +84,8 @@ async def _close_unhealthy_service(
service_run_status=ServiceRunStatus.ERROR,
service_run_status_msg="Service missed more heartbeats. It's considered unhealthy.",
)
- running_service = await resource_tracker_repo.update_service_run_stopped_at(
- update_service_run_stopped_at
+ running_service = await service_runs_db.update_service_run_stopped_at(
+ db_engine, data=update_service_run_stopped_at
)
if running_service is None:
@@ -108,8 +112,8 @@ async def _close_unhealthy_service(
else CreditTransactionStatus.BILLED
),
)
- await resource_tracker_repo.update_credit_transaction_credits_and_status(
- update_credit_transaction
+ await credit_transactions_db.update_credit_transaction_credits_and_status(
+ db_engine, data=update_credit_transaction
)
@@ -118,19 +122,18 @@ async def periodic_check_of_running_services_task(app: FastAPI) -> None:
# This check runs across all products
app_settings: ApplicationSettings = app.state.settings
- resource_tracker_repo: ResourceTrackerRepository = ResourceTrackerRepository(
- db_engine=app.state.engine
- )
+ _db_engine = app.state.engine
base_start_timestamp = datetime.now(tz=timezone.utc)
# Get all current running services (across all products)
- total_count: PositiveInt = (
- await resource_tracker_repo.total_service_runs_with_running_status_across_all_products()
+ total_count: PositiveInt = await service_runs_db.total_service_runs_with_running_status_across_all_products(
+ _db_engine
)
for offset in range(0, total_count, _BATCH_SIZE):
- batch_check_services = await resource_tracker_repo.list_service_runs_with_running_status_across_all_products(
+ batch_check_services = await service_runs_db.list_service_runs_with_running_status_across_all_products(
+ _db_engine,
offset=offset,
limit=_BATCH_SIZE,
)
@@ -138,7 +141,7 @@ async def periodic_check_of_running_services_task(app: FastAPI) -> None:
await asyncio.gather(
*(
_check_service_heartbeat(
- resource_tracker_repo=resource_tracker_repo,
+ db_engine=_db_engine,
base_start_timestamp=base_start_timestamp,
resource_usage_tracker_missed_heartbeat_interval=app_settings.RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_INTERVAL_SEC,
resource_usage_tracker_missed_heartbeat_counter_fail=app_settings.RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_COUNTER_FAIL,
diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/credit_transactions.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/credit_transactions.py
index 0d4362e9748..c58eb76be8a 100644
--- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/credit_transactions.py
+++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/credit_transactions.py
@@ -13,19 +13,18 @@
)
from models_library.wallets import WalletID
from servicelib.rabbitmq import RabbitMQClient
+from sqlalchemy.ext.asyncio import AsyncEngine
-from ..api.rest.dependencies import get_repository
+from ..api.rest.dependencies import get_resource_tracker_db_engine
from ..models.credit_transactions import CreditTransactionCreate
-from .modules.db.repositories.resource_tracker import ResourceTrackerRepository
+from .modules.db import credit_transactions_db
from .modules.rabbitmq import get_rabbitmq_client_from_request
from .utils import sum_credit_transactions_and_publish_to_rabbitmq
async def create_credit_transaction(
credit_transaction_create_body: CreditTransactionCreateBody,
- resource_tracker_repo: Annotated[
- ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository))
- ],
+ db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)],
rabbitmq_client: Annotated[
RabbitMQClient, Depends(get_rabbitmq_client_from_request)
],
@@ -47,12 +46,12 @@ async def create_credit_transaction(
created_at=credit_transaction_create_body.created_at,
last_heartbeat_at=credit_transaction_create_body.created_at,
)
- transaction_id = await resource_tracker_repo.create_credit_transaction(
- transaction_create
+ transaction_id = await credit_transactions_db.create_credit_transaction(
+ db_engine, data=transaction_create
)
await sum_credit_transactions_and_publish_to_rabbitmq(
- resource_tracker_repo,
+ db_engine,
rabbitmq_client,
credit_transaction_create_body.product_name,
credit_transaction_create_body.wallet_id,
@@ -64,10 +63,8 @@ async def create_credit_transaction(
async def sum_credit_transactions_by_product_and_wallet(
product_name: ProductName,
wallet_id: WalletID,
- resource_tracker_repo: Annotated[
- ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository))
- ],
+ db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)],
) -> WalletTotalCredits:
- return await resource_tracker_repo.sum_credit_transactions_by_product_and_wallet(
- product_name, wallet_id
+ return await credit_transactions_db.sum_credit_transactions_by_product_and_wallet(
+ db_engine, product_name=product_name, wallet_id=wallet_id
)
diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/credit_transactions_db.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/credit_transactions_db.py
new file mode 100644
index 00000000000..76a8e9f1dfe
--- /dev/null
+++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/credit_transactions_db.py
@@ -0,0 +1,162 @@
+import logging
+from decimal import Decimal
+from typing import cast
+
+import sqlalchemy as sa
+from models_library.api_schemas_resource_usage_tracker.credit_transactions import (
+ WalletTotalCredits,
+)
+from models_library.products import ProductName
+from models_library.resource_tracker import CreditTransactionId, CreditTransactionStatus
+from models_library.wallets import WalletID
+from simcore_postgres_database.models.resource_tracker_credit_transactions import (
+ resource_tracker_credit_transactions,
+)
+from simcore_postgres_database.utils_repos import transaction_context
+from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine
+
+from ....exceptions.errors import CreditTransactionNotCreatedDBError
+from ....models.credit_transactions import (
+ CreditTransactionCreate,
+ CreditTransactionCreditsAndStatusUpdate,
+ CreditTransactionCreditsUpdate,
+)
+
+_logger = logging.getLogger(__name__)
+
+
+async def create_credit_transaction(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ data: CreditTransactionCreate
+) -> CreditTransactionId:
+ async with transaction_context(engine, connection) as conn:
+ insert_stmt = (
+ resource_tracker_credit_transactions.insert()
+ .values(
+ product_name=data.product_name,
+ wallet_id=data.wallet_id,
+ wallet_name=data.wallet_name,
+ pricing_plan_id=data.pricing_plan_id,
+ pricing_unit_id=data.pricing_unit_id,
+ pricing_unit_cost_id=data.pricing_unit_cost_id,
+ user_id=data.user_id,
+ user_email=data.user_email,
+ osparc_credits=data.osparc_credits,
+ transaction_status=data.transaction_status,
+ transaction_classification=data.transaction_classification,
+ service_run_id=data.service_run_id,
+ payment_transaction_id=data.payment_transaction_id,
+ created=data.created_at,
+ last_heartbeat_at=data.last_heartbeat_at,
+ modified=sa.func.now(),
+ )
+ .returning(resource_tracker_credit_transactions.c.transaction_id)
+ )
+ result = await conn.execute(insert_stmt)
+ row = result.first()
+ if row is None:
+ raise CreditTransactionNotCreatedDBError(data=data)
+ return cast(CreditTransactionId, row[0])
+
+
+async def update_credit_transaction_credits(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ data: CreditTransactionCreditsUpdate
+) -> CreditTransactionId | None:
+ async with transaction_context(engine, connection) as conn:
+ update_stmt = (
+ resource_tracker_credit_transactions.update()
+ .values(
+ modified=sa.func.now(),
+ osparc_credits=data.osparc_credits,
+ last_heartbeat_at=data.last_heartbeat_at,
+ )
+ .where(
+ (
+ resource_tracker_credit_transactions.c.service_run_id
+ == data.service_run_id
+ )
+ & (
+ resource_tracker_credit_transactions.c.transaction_status
+ == CreditTransactionStatus.PENDING
+ )
+ & (
+ resource_tracker_credit_transactions.c.last_heartbeat_at
+ <= data.last_heartbeat_at
+ )
+ )
+ .returning(resource_tracker_credit_transactions.c.service_run_id)
+ )
+ result = await conn.execute(update_stmt)
+ row = result.first()
+ if row is None:
+ return None
+ return cast(CreditTransactionId | None, row[0])
+
+
+async def update_credit_transaction_credits_and_status(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ data: CreditTransactionCreditsAndStatusUpdate
+) -> CreditTransactionId | None:
+ async with transaction_context(engine, connection) as conn:
+ update_stmt = (
+ resource_tracker_credit_transactions.update()
+ .values(
+ modified=sa.func.now(),
+ osparc_credits=data.osparc_credits,
+ transaction_status=data.transaction_status,
+ )
+ .where(
+ (
+ resource_tracker_credit_transactions.c.service_run_id
+ == data.service_run_id
+ )
+ & (
+ resource_tracker_credit_transactions.c.transaction_status
+ == CreditTransactionStatus.PENDING
+ )
+ )
+ .returning(resource_tracker_credit_transactions.c.service_run_id)
+ )
+ result = await conn.execute(update_stmt)
+ row = result.first()
+ if row is None:
+ return None
+ return cast(CreditTransactionId | None, row[0])
+
+
+async def sum_credit_transactions_by_product_and_wallet(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ product_name: ProductName,
+ wallet_id: WalletID
+) -> WalletTotalCredits:
+ async with transaction_context(engine, connection) as conn:
+ sum_stmt = sa.select(
+ sa.func.sum(resource_tracker_credit_transactions.c.osparc_credits)
+ ).where(
+ (resource_tracker_credit_transactions.c.product_name == product_name)
+ & (resource_tracker_credit_transactions.c.wallet_id == wallet_id)
+ & (
+ resource_tracker_credit_transactions.c.transaction_status.in_(
+ [
+ CreditTransactionStatus.BILLED,
+ CreditTransactionStatus.PENDING,
+ ]
+ )
+ )
+ )
+ result = await conn.execute(sum_stmt)
+ row = result.first()
+ if row is None or row[0] is None:
+ return WalletTotalCredits(
+ wallet_id=wallet_id, available_osparc_credits=Decimal(0)
+ )
+ return WalletTotalCredits(wallet_id=wallet_id, available_osparc_credits=row[0])
diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/pricing_plans_db.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/pricing_plans_db.py
new file mode 100644
index 00000000000..ea6376cc15b
--- /dev/null
+++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/pricing_plans_db.py
@@ -0,0 +1,668 @@
+import logging
+
+import sqlalchemy as sa
+from models_library.products import ProductName
+from models_library.resource_tracker import (
+ PricingPlanCreate,
+ PricingPlanId,
+ PricingPlanUpdate,
+ PricingUnitCostId,
+ PricingUnitId,
+ PricingUnitWithCostCreate,
+ PricingUnitWithCostUpdate,
+)
+from models_library.services import ServiceKey, ServiceVersion
+from simcore_postgres_database.models.resource_tracker_pricing_plan_to_service import (
+ resource_tracker_pricing_plan_to_service,
+)
+from simcore_postgres_database.models.resource_tracker_pricing_plans import (
+ resource_tracker_pricing_plans,
+)
+from simcore_postgres_database.models.resource_tracker_pricing_unit_costs import (
+ resource_tracker_pricing_unit_costs,
+)
+from simcore_postgres_database.models.resource_tracker_pricing_units import (
+ resource_tracker_pricing_units,
+)
+from simcore_postgres_database.utils_repos import transaction_context
+from sqlalchemy.dialects.postgresql import ARRAY, INTEGER
+from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine
+
+from ....exceptions.errors import (
+ PricingPlanAndPricingUnitCombinationDoesNotExistsDBError,
+ PricingPlanDoesNotExistsDBError,
+ PricingPlanNotCreatedDBError,
+ PricingPlanToServiceNotCreatedDBError,
+ PricingUnitCostDoesNotExistsDBError,
+ PricingUnitCostNotCreatedDBError,
+ PricingUnitNotCreatedDBError,
+)
+from ....models.pricing_plans import (
+ PricingPlansDB,
+ PricingPlansWithServiceDefaultPlanDB,
+ PricingPlanToServiceDB,
+)
+from ....models.pricing_unit_costs import PricingUnitCostsDB
+from ....models.pricing_units import PricingUnitsDB
+
+_logger = logging.getLogger(__name__)
+
+
+#################################
+# Pricing plans
+#################################
+
+
+async def list_active_service_pricing_plans_by_product_and_service(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ product_name: ProductName,
+ service_key: ServiceKey,
+ service_version: ServiceVersion,
+) -> list[PricingPlansWithServiceDefaultPlanDB]:
+ # NOTE: consilidate with utils_services_environmnets.py
+ def _version(column_or_value):
+ # converts version value string to array[integer] that can be compared
+ return sa.func.string_to_array(column_or_value, ".").cast(ARRAY(INTEGER))
+
+ async with transaction_context(engine, connection) as conn:
+ # Firstly find the correct service version
+ query = (
+ sa.select(
+ resource_tracker_pricing_plan_to_service.c.service_key,
+ resource_tracker_pricing_plan_to_service.c.service_version,
+ )
+ .select_from(
+ resource_tracker_pricing_plan_to_service.join(
+ resource_tracker_pricing_plans,
+ (
+ resource_tracker_pricing_plan_to_service.c.pricing_plan_id
+ == resource_tracker_pricing_plans.c.pricing_plan_id
+ ),
+ )
+ )
+ .where(
+ (
+ _version(resource_tracker_pricing_plan_to_service.c.service_version)
+ <= _version(service_version)
+ )
+ & (
+ resource_tracker_pricing_plan_to_service.c.service_key
+ == service_key
+ )
+ & (resource_tracker_pricing_plans.c.product_name == product_name)
+ & (resource_tracker_pricing_plans.c.is_active.is_(True))
+ )
+ .order_by(
+ _version(
+ resource_tracker_pricing_plan_to_service.c.service_version
+ ).desc()
+ )
+ .limit(1)
+ )
+ result = await conn.execute(query)
+ row = result.first()
+ if row is None:
+ return []
+ latest_service_key, latest_service_version = row
+ # Now choose all pricing plans connected to this service
+ query = (
+ sa.select(
+ resource_tracker_pricing_plans.c.pricing_plan_id,
+ resource_tracker_pricing_plans.c.display_name,
+ resource_tracker_pricing_plans.c.description,
+ resource_tracker_pricing_plans.c.classification,
+ resource_tracker_pricing_plans.c.is_active,
+ resource_tracker_pricing_plans.c.created,
+ resource_tracker_pricing_plans.c.pricing_plan_key,
+ resource_tracker_pricing_plan_to_service.c.service_default_plan,
+ )
+ .select_from(
+ resource_tracker_pricing_plan_to_service.join(
+ resource_tracker_pricing_plans,
+ (
+ resource_tracker_pricing_plan_to_service.c.pricing_plan_id
+ == resource_tracker_pricing_plans.c.pricing_plan_id
+ ),
+ )
+ )
+ .where(
+ (
+ _version(resource_tracker_pricing_plan_to_service.c.service_version)
+ == _version(latest_service_version)
+ )
+ & (
+ resource_tracker_pricing_plan_to_service.c.service_key
+ == latest_service_key
+ )
+ & (resource_tracker_pricing_plans.c.product_name == product_name)
+ & (resource_tracker_pricing_plans.c.is_active.is_(True))
+ )
+ .order_by(resource_tracker_pricing_plan_to_service.c.pricing_plan_id.desc())
+ )
+ result = await conn.execute(query)
+
+ return [
+ PricingPlansWithServiceDefaultPlanDB.model_validate(row)
+ for row in result.fetchall()
+ ]
+
+
+async def get_pricing_plan(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ product_name: ProductName,
+ pricing_plan_id: PricingPlanId,
+) -> PricingPlansDB:
+ async with transaction_context(engine, connection) as conn:
+ select_stmt = sa.select(
+ resource_tracker_pricing_plans.c.pricing_plan_id,
+ resource_tracker_pricing_plans.c.display_name,
+ resource_tracker_pricing_plans.c.description,
+ resource_tracker_pricing_plans.c.classification,
+ resource_tracker_pricing_plans.c.is_active,
+ resource_tracker_pricing_plans.c.created,
+ resource_tracker_pricing_plans.c.pricing_plan_key,
+ ).where(
+ (resource_tracker_pricing_plans.c.pricing_plan_id == pricing_plan_id)
+ & (resource_tracker_pricing_plans.c.product_name == product_name)
+ )
+ result = await conn.execute(select_stmt)
+ row = result.first()
+ if row is None:
+ raise PricingPlanDoesNotExistsDBError(pricing_plan_id=pricing_plan_id)
+ return PricingPlansDB.model_validate(row)
+
+
+async def list_pricing_plans_by_product(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ product_name: ProductName,
+) -> list[PricingPlansDB]:
+ async with transaction_context(engine, connection) as conn:
+ select_stmt = sa.select(
+ resource_tracker_pricing_plans.c.pricing_plan_id,
+ resource_tracker_pricing_plans.c.display_name,
+ resource_tracker_pricing_plans.c.description,
+ resource_tracker_pricing_plans.c.classification,
+ resource_tracker_pricing_plans.c.is_active,
+ resource_tracker_pricing_plans.c.created,
+ resource_tracker_pricing_plans.c.pricing_plan_key,
+ ).where(resource_tracker_pricing_plans.c.product_name == product_name)
+ result = await conn.execute(select_stmt)
+
+ return [PricingPlansDB.model_validate(row) for row in result.fetchall()]
+
+
+async def create_pricing_plan(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ data: PricingPlanCreate,
+) -> PricingPlansDB:
+ async with transaction_context(engine, connection) as conn:
+ insert_stmt = (
+ resource_tracker_pricing_plans.insert()
+ .values(
+ product_name=data.product_name,
+ display_name=data.display_name,
+ description=data.description,
+ classification=data.classification,
+ is_active=True,
+ created=sa.func.now(),
+ modified=sa.func.now(),
+ pricing_plan_key=data.pricing_plan_key,
+ )
+ .returning(
+ *[
+ resource_tracker_pricing_plans.c.pricing_plan_id,
+ resource_tracker_pricing_plans.c.display_name,
+ resource_tracker_pricing_plans.c.description,
+ resource_tracker_pricing_plans.c.classification,
+ resource_tracker_pricing_plans.c.is_active,
+ resource_tracker_pricing_plans.c.created,
+ resource_tracker_pricing_plans.c.pricing_plan_key,
+ ]
+ )
+ )
+ result = await conn.execute(insert_stmt)
+ row = result.first()
+ if row is None:
+ raise PricingPlanNotCreatedDBError(data=data)
+ return PricingPlansDB.model_validate(row)
+
+
+async def update_pricing_plan(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ product_name: ProductName,
+ data: PricingPlanUpdate,
+) -> PricingPlansDB | None:
+ async with transaction_context(engine, connection) as conn:
+ update_stmt = (
+ resource_tracker_pricing_plans.update()
+ .values(
+ display_name=data.display_name,
+ description=data.description,
+ is_active=data.is_active,
+ modified=sa.func.now(),
+ )
+ .where(
+ (
+ resource_tracker_pricing_plans.c.pricing_plan_id
+ == data.pricing_plan_id
+ )
+ & (resource_tracker_pricing_plans.c.product_name == product_name)
+ )
+ .returning(
+ *[
+ resource_tracker_pricing_plans.c.pricing_plan_id,
+ resource_tracker_pricing_plans.c.display_name,
+ resource_tracker_pricing_plans.c.description,
+ resource_tracker_pricing_plans.c.classification,
+ resource_tracker_pricing_plans.c.is_active,
+ resource_tracker_pricing_plans.c.created,
+ resource_tracker_pricing_plans.c.pricing_plan_key,
+ ]
+ )
+ )
+ result = await conn.execute(update_stmt)
+ row = result.first()
+ if row is None:
+ return None
+ return PricingPlansDB.model_validate(row)
+
+
+#################################
+# Pricing plan to service
+#################################
+
+
+async def list_connected_services_to_pricing_plan_by_pricing_plan(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ product_name: ProductName,
+ pricing_plan_id: PricingPlanId,
+) -> list[PricingPlanToServiceDB]:
+ async with transaction_context(engine, connection) as conn:
+ query = (
+ sa.select(
+ resource_tracker_pricing_plan_to_service.c.pricing_plan_id,
+ resource_tracker_pricing_plan_to_service.c.service_key,
+ resource_tracker_pricing_plan_to_service.c.service_version,
+ resource_tracker_pricing_plan_to_service.c.created,
+ )
+ .select_from(
+ resource_tracker_pricing_plan_to_service.join(
+ resource_tracker_pricing_plans,
+ (
+ resource_tracker_pricing_plan_to_service.c.pricing_plan_id
+ == resource_tracker_pricing_plans.c.pricing_plan_id
+ ),
+ )
+ )
+ .where(
+ (resource_tracker_pricing_plans.c.product_name == product_name)
+ & (resource_tracker_pricing_plans.c.pricing_plan_id == pricing_plan_id)
+ )
+ .order_by(resource_tracker_pricing_plan_to_service.c.pricing_plan_id.desc())
+ )
+ result = await conn.execute(query)
+
+ return [PricingPlanToServiceDB.model_validate(row) for row in result.fetchall()]
+
+
+async def upsert_service_to_pricing_plan(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ product_name: ProductName,
+ pricing_plan_id: PricingPlanId,
+ service_key: ServiceKey,
+ service_version: ServiceVersion,
+) -> PricingPlanToServiceDB:
+ async with transaction_context(engine, connection) as conn:
+ query = (
+ sa.select(
+ resource_tracker_pricing_plan_to_service.c.pricing_plan_id,
+ resource_tracker_pricing_plan_to_service.c.service_key,
+ resource_tracker_pricing_plan_to_service.c.service_version,
+ resource_tracker_pricing_plan_to_service.c.created,
+ )
+ .select_from(
+ resource_tracker_pricing_plan_to_service.join(
+ resource_tracker_pricing_plans,
+ (
+ resource_tracker_pricing_plan_to_service.c.pricing_plan_id
+ == resource_tracker_pricing_plans.c.pricing_plan_id
+ ),
+ )
+ )
+ .where(
+ (resource_tracker_pricing_plans.c.product_name == product_name)
+ & (resource_tracker_pricing_plans.c.pricing_plan_id == pricing_plan_id)
+ & (
+ resource_tracker_pricing_plan_to_service.c.service_key
+ == service_key
+ )
+ & (
+ resource_tracker_pricing_plan_to_service.c.service_version
+ == service_version
+ )
+ )
+ )
+ result = await conn.execute(query)
+ row = result.first()
+
+ if row is not None:
+ delete_stmt = resource_tracker_pricing_plan_to_service.delete().where(
+ (resource_tracker_pricing_plans.c.pricing_plan_id == pricing_plan_id)
+ & (
+ resource_tracker_pricing_plan_to_service.c.service_key
+ == service_key
+ )
+ & (
+ resource_tracker_pricing_plan_to_service.c.service_version
+ == service_version
+ )
+ )
+ await conn.execute(delete_stmt)
+
+ insert_stmt = (
+ resource_tracker_pricing_plan_to_service.insert()
+ .values(
+ pricing_plan_id=pricing_plan_id,
+ service_key=service_key,
+ service_version=service_version,
+ created=sa.func.now(),
+ modified=sa.func.now(),
+ service_default_plan=True,
+ )
+ .returning(
+ *[
+ resource_tracker_pricing_plan_to_service.c.pricing_plan_id,
+ resource_tracker_pricing_plan_to_service.c.service_key,
+ resource_tracker_pricing_plan_to_service.c.service_version,
+ resource_tracker_pricing_plan_to_service.c.created,
+ ]
+ )
+ )
+ result = await conn.execute(insert_stmt)
+ row = result.first()
+ if row is None:
+ raise PricingPlanToServiceNotCreatedDBError(
+ data=f"pricing_plan_id {pricing_plan_id}, service_key {service_key}, service_version {service_version}"
+ )
+ return PricingPlanToServiceDB.model_validate(row)
+
+
+#################################
+# Pricing units
+#################################
+
+
+def _pricing_units_select_stmt():
+ return sa.select(
+ resource_tracker_pricing_units.c.pricing_unit_id,
+ resource_tracker_pricing_units.c.pricing_plan_id,
+ resource_tracker_pricing_units.c.unit_name,
+ resource_tracker_pricing_units.c.unit_extra_info,
+ resource_tracker_pricing_units.c.default,
+ resource_tracker_pricing_units.c.specific_info,
+ resource_tracker_pricing_units.c.created,
+ resource_tracker_pricing_units.c.modified,
+ resource_tracker_pricing_unit_costs.c.cost_per_unit.label(
+ "current_cost_per_unit"
+ ),
+ resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id.label(
+ "current_cost_per_unit_id"
+ ),
+ )
+
+
+async def list_pricing_units_by_pricing_plan(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ pricing_plan_id: PricingPlanId,
+) -> list[PricingUnitsDB]:
+ async with transaction_context(engine, connection) as conn:
+ query = (
+ _pricing_units_select_stmt()
+ .select_from(
+ resource_tracker_pricing_units.join(
+ resource_tracker_pricing_unit_costs,
+ (
+ (
+ resource_tracker_pricing_units.c.pricing_plan_id
+ == resource_tracker_pricing_unit_costs.c.pricing_plan_id
+ )
+ & (
+ resource_tracker_pricing_units.c.pricing_unit_id
+ == resource_tracker_pricing_unit_costs.c.pricing_unit_id
+ )
+ ),
+ )
+ )
+ .where(
+ (resource_tracker_pricing_units.c.pricing_plan_id == pricing_plan_id)
+ & (resource_tracker_pricing_unit_costs.c.valid_to.is_(None))
+ )
+ .order_by(resource_tracker_pricing_unit_costs.c.cost_per_unit.asc())
+ )
+ result = await conn.execute(query)
+
+ return [PricingUnitsDB.model_validate(row) for row in result.fetchall()]
+
+
+async def get_valid_pricing_unit(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ product_name: ProductName,
+ pricing_plan_id: PricingPlanId,
+ pricing_unit_id: PricingUnitId,
+) -> PricingUnitsDB:
+ async with transaction_context(engine, connection) as conn:
+ query = (
+ _pricing_units_select_stmt()
+ .select_from(
+ resource_tracker_pricing_units.join(
+ resource_tracker_pricing_unit_costs,
+ (
+ (
+ resource_tracker_pricing_units.c.pricing_plan_id
+ == resource_tracker_pricing_unit_costs.c.pricing_plan_id
+ )
+ & (
+ resource_tracker_pricing_units.c.pricing_unit_id
+ == resource_tracker_pricing_unit_costs.c.pricing_unit_id
+ )
+ ),
+ ).join(
+ resource_tracker_pricing_plans,
+ (
+ resource_tracker_pricing_plans.c.pricing_plan_id
+ == resource_tracker_pricing_units.c.pricing_plan_id
+ ),
+ )
+ )
+ .where(
+ (resource_tracker_pricing_units.c.pricing_plan_id == pricing_plan_id)
+ & (resource_tracker_pricing_units.c.pricing_unit_id == pricing_unit_id)
+ & (resource_tracker_pricing_unit_costs.c.valid_to.is_(None))
+ & (resource_tracker_pricing_plans.c.product_name == product_name)
+ )
+ )
+ result = await conn.execute(query)
+
+ row = result.first()
+ if row is None:
+ raise PricingPlanAndPricingUnitCombinationDoesNotExistsDBError(
+ pricing_plan_id=pricing_plan_id,
+ pricing_unit_id=pricing_unit_id,
+ product_name=product_name,
+ )
+ return PricingUnitsDB.model_validate(row)
+
+
+async def create_pricing_unit_with_cost(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ data: PricingUnitWithCostCreate,
+ pricing_plan_key: str,
+) -> tuple[PricingUnitId, PricingUnitCostId]:
+ async with transaction_context(engine, connection) as conn:
+ # pricing units table
+ insert_stmt = (
+ resource_tracker_pricing_units.insert()
+ .values(
+ pricing_plan_id=data.pricing_plan_id,
+ unit_name=data.unit_name,
+ unit_extra_info=data.unit_extra_info.model_dump(),
+ default=data.default,
+ specific_info=data.specific_info.model_dump(),
+ created=sa.func.now(),
+ modified=sa.func.now(),
+ )
+ .returning(resource_tracker_pricing_units.c.pricing_unit_id)
+ )
+ result = await conn.execute(insert_stmt)
+ row = result.first()
+ if row is None:
+ raise PricingUnitNotCreatedDBError(data=data)
+ _pricing_unit_id = row[0]
+
+ # pricing unit cost table
+ insert_stmt = (
+ resource_tracker_pricing_unit_costs.insert()
+ .values(
+ pricing_plan_id=data.pricing_plan_id,
+ pricing_plan_key=pricing_plan_key,
+ pricing_unit_id=_pricing_unit_id,
+ pricing_unit_name=data.unit_name,
+ cost_per_unit=data.cost_per_unit,
+ valid_from=sa.func.now(),
+ valid_to=None,
+ created=sa.func.now(),
+ comment=data.comment,
+ modified=sa.func.now(),
+ )
+ .returning(resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id)
+ )
+ result = await conn.execute(insert_stmt)
+ row = result.first()
+ if row is None:
+ raise PricingUnitCostNotCreatedDBError(data=data)
+ _pricing_unit_cost_id = row[0]
+
+ return (_pricing_unit_id, _pricing_unit_cost_id)
+
+
+async def update_pricing_unit_with_cost(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ data: PricingUnitWithCostUpdate,
+ pricing_plan_key: str,
+) -> None:
+ async with transaction_context(engine, connection) as conn:
+ # pricing units table
+ update_stmt = (
+ resource_tracker_pricing_units.update()
+ .values(
+ unit_name=data.unit_name,
+ unit_extra_info=data.unit_extra_info.model_dump(),
+ default=data.default,
+ specific_info=data.specific_info.model_dump(),
+ modified=sa.func.now(),
+ )
+ .where(
+ resource_tracker_pricing_units.c.pricing_unit_id == data.pricing_unit_id
+ )
+ .returning(resource_tracker_pricing_units.c.pricing_unit_id)
+ )
+ await conn.execute(update_stmt)
+
+ # If price change, then we update pricing unit cost table
+ if data.pricing_unit_cost_update:
+ # Firstly we close previous price
+ update_stmt = (
+ resource_tracker_pricing_unit_costs.update()
+ .values(
+ valid_to=sa.func.now(), # <-- Closing previous price
+ modified=sa.func.now(),
+ )
+ .where(
+ resource_tracker_pricing_unit_costs.c.pricing_unit_id
+ == data.pricing_unit_id
+ )
+ .returning(resource_tracker_pricing_unit_costs.c.pricing_unit_id)
+ )
+ result = await conn.execute(update_stmt)
+
+ # Then we create a new price
+ insert_stmt = (
+ resource_tracker_pricing_unit_costs.insert()
+ .values(
+ pricing_plan_id=data.pricing_plan_id,
+ pricing_plan_key=pricing_plan_key,
+ pricing_unit_id=data.pricing_unit_id,
+ pricing_unit_name=data.unit_name,
+ cost_per_unit=data.pricing_unit_cost_update.cost_per_unit,
+ valid_from=sa.func.now(),
+ valid_to=None, # <-- New price is valid
+ created=sa.func.now(),
+ comment=data.pricing_unit_cost_update.comment,
+ modified=sa.func.now(),
+ )
+ .returning(resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id)
+ )
+ result = await conn.execute(insert_stmt)
+ row = result.first()
+ if row is None:
+ raise PricingUnitCostNotCreatedDBError(data=data)
+
+
+#################################
+# Pricing unit-costs
+#################################
+
+
+async def get_pricing_unit_cost_by_id(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ pricing_unit_cost_id: PricingUnitCostId,
+) -> PricingUnitCostsDB:
+ async with transaction_context(engine, connection) as conn:
+ query = sa.select(
+ resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id,
+ resource_tracker_pricing_unit_costs.c.pricing_plan_id,
+ resource_tracker_pricing_unit_costs.c.pricing_plan_key,
+ resource_tracker_pricing_unit_costs.c.pricing_unit_id,
+ resource_tracker_pricing_unit_costs.c.pricing_unit_name,
+ resource_tracker_pricing_unit_costs.c.cost_per_unit,
+ resource_tracker_pricing_unit_costs.c.valid_from,
+ resource_tracker_pricing_unit_costs.c.valid_to,
+ resource_tracker_pricing_unit_costs.c.created,
+ resource_tracker_pricing_unit_costs.c.comment,
+ resource_tracker_pricing_unit_costs.c.modified,
+ ).where(
+ resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id
+ == pricing_unit_cost_id
+ )
+ result = await conn.execute(query)
+
+ row = result.first()
+ if row is None:
+ raise PricingUnitCostDoesNotExistsDBError(
+ pricing_unit_cost_id=pricing_unit_cost_id
+ )
+ return PricingUnitCostsDB.model_validate(row)
diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/__init__.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/__init__.py
deleted file mode 100644
index 93da4003de3..00000000000
--- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from ._base import BaseRepository
-
-__all__: tuple[str, ...] = ("BaseRepository",)
diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/_base.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/_base.py
deleted file mode 100644
index 4a20b37c735..00000000000
--- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/_base.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from dataclasses import dataclass
-
-from sqlalchemy.ext.asyncio import AsyncEngine
-
-
-@dataclass
-class BaseRepository:
- """
- Repositories are pulled at every request
- """
-
- db_engine: AsyncEngine
diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/resource_tracker.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/resource_tracker.py
deleted file mode 100644
index 46439f26e38..00000000000
--- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/repositories/resource_tracker.py
+++ /dev/null
@@ -1,1382 +0,0 @@
-import logging
-from datetime import datetime
-from decimal import Decimal
-from typing import cast
-
-import sqlalchemy as sa
-from models_library.api_schemas_resource_usage_tracker.credit_transactions import (
- WalletTotalCredits,
-)
-from models_library.api_schemas_storage import S3BucketName
-from models_library.products import ProductName
-from models_library.resource_tracker import (
- CreditClassification,
- CreditTransactionId,
- CreditTransactionStatus,
- PricingPlanCreate,
- PricingPlanId,
- PricingPlanUpdate,
- PricingUnitCostId,
- PricingUnitId,
- PricingUnitWithCostCreate,
- PricingUnitWithCostUpdate,
- ServiceRunId,
- ServiceRunStatus,
-)
-from models_library.rest_ordering import OrderBy, OrderDirection
-from models_library.services import ServiceKey, ServiceVersion
-from models_library.users import UserID
-from models_library.wallets import WalletID
-from pydantic import PositiveInt
-from simcore_postgres_database.models.projects_tags import projects_tags
-from simcore_postgres_database.models.resource_tracker_credit_transactions import (
- resource_tracker_credit_transactions,
-)
-from simcore_postgres_database.models.resource_tracker_pricing_plan_to_service import (
- resource_tracker_pricing_plan_to_service,
-)
-from simcore_postgres_database.models.resource_tracker_pricing_plans import (
- resource_tracker_pricing_plans,
-)
-from simcore_postgres_database.models.resource_tracker_pricing_unit_costs import (
- resource_tracker_pricing_unit_costs,
-)
-from simcore_postgres_database.models.resource_tracker_pricing_units import (
- resource_tracker_pricing_units,
-)
-from simcore_postgres_database.models.resource_tracker_service_runs import (
- resource_tracker_service_runs,
-)
-from simcore_postgres_database.models.tags import tags
-from sqlalchemy.dialects.postgresql import ARRAY, INTEGER
-
-from .....exceptions.errors import (
- CreditTransactionNotCreatedDBError,
- PricingPlanAndPricingUnitCombinationDoesNotExistsDBError,
- PricingPlanDoesNotExistsDBError,
- PricingPlanNotCreatedDBError,
- PricingPlanToServiceNotCreatedDBError,
- PricingUnitCostDoesNotExistsDBError,
- PricingUnitCostNotCreatedDBError,
- PricingUnitNotCreatedDBError,
- ServiceRunNotCreatedDBError,
-)
-from .....models.credit_transactions import (
- CreditTransactionCreate,
- CreditTransactionCreditsAndStatusUpdate,
- CreditTransactionCreditsUpdate,
-)
-from .....models.pricing_plans import (
- PricingPlansDB,
- PricingPlansWithServiceDefaultPlanDB,
- PricingPlanToServiceDB,
-)
-from .....models.pricing_unit_costs import PricingUnitCostsDB
-from .....models.pricing_units import PricingUnitsDB
-from .....models.service_runs import (
- OsparcCreditsAggregatedByServiceKeyDB,
- ServiceRunCreate,
- ServiceRunDB,
- ServiceRunForCheckDB,
- ServiceRunLastHeartbeatUpdate,
- ServiceRunStoppedAtUpdate,
- ServiceRunWithCreditsDB,
-)
-from ._base import BaseRepository
-
-_logger = logging.getLogger(__name__)
-
-
-class ResourceTrackerRepository(
- BaseRepository
-): # pylint: disable=too-many-public-methods
- ###############
- # Service Run
- ###############
-
- async def create_service_run(self, data: ServiceRunCreate) -> ServiceRunId:
- async with self.db_engine.begin() as conn:
- insert_stmt = (
- resource_tracker_service_runs.insert()
- .values(
- product_name=data.product_name,
- service_run_id=data.service_run_id,
- wallet_id=data.wallet_id,
- wallet_name=data.wallet_name,
- pricing_plan_id=data.pricing_plan_id,
- pricing_unit_id=data.pricing_unit_id,
- pricing_unit_cost_id=data.pricing_unit_cost_id,
- pricing_unit_cost=data.pricing_unit_cost,
- simcore_user_agent=data.simcore_user_agent,
- user_id=data.user_id,
- user_email=data.user_email,
- project_id=f"{data.project_id}",
- project_name=data.project_name,
- node_id=f"{data.node_id}",
- node_name=data.node_name,
- parent_project_id=f"{data.parent_project_id}",
- root_parent_project_id=f"{data.root_parent_project_id}",
- root_parent_project_name=data.root_parent_project_name,
- parent_node_id=f"{data.parent_node_id}",
- root_parent_node_id=f"{data.root_parent_node_id}",
- service_key=data.service_key,
- service_version=data.service_version,
- service_type=data.service_type,
- service_resources=data.service_resources,
- service_additional_metadata=data.service_additional_metadata,
- started_at=data.started_at,
- stopped_at=None,
- service_run_status=ServiceRunStatus.RUNNING,
- modified=sa.func.now(),
- last_heartbeat_at=data.last_heartbeat_at,
- )
- .returning(resource_tracker_service_runs.c.service_run_id)
- )
- result = await conn.execute(insert_stmt)
- row = result.first()
- if row is None:
- raise ServiceRunNotCreatedDBError(data=data)
- return cast(ServiceRunId, row[0])
-
- async def update_service_run_last_heartbeat(
- self, data: ServiceRunLastHeartbeatUpdate
- ) -> ServiceRunDB | None:
- async with self.db_engine.begin() as conn:
- update_stmt = (
- resource_tracker_service_runs.update()
- .values(
- modified=sa.func.now(),
- last_heartbeat_at=data.last_heartbeat_at,
- missed_heartbeat_counter=0,
- )
- .where(
- (
- resource_tracker_service_runs.c.service_run_id
- == data.service_run_id
- )
- & (
- resource_tracker_service_runs.c.service_run_status
- == ServiceRunStatus.RUNNING
- )
- & (
- resource_tracker_service_runs.c.last_heartbeat_at
- <= data.last_heartbeat_at
- )
- )
- .returning(sa.literal_column("*"))
- )
- result = await conn.execute(update_stmt)
- row = result.first()
- if row is None:
- return None
- return ServiceRunDB.model_validate(row)
-
- async def update_service_run_stopped_at(
- self, data: ServiceRunStoppedAtUpdate
- ) -> ServiceRunDB | None:
- async with self.db_engine.begin() as conn:
- update_stmt = (
- resource_tracker_service_runs.update()
- .values(
- modified=sa.func.now(),
- stopped_at=data.stopped_at,
- service_run_status=data.service_run_status,
- service_run_status_msg=data.service_run_status_msg,
- )
- .where(
- (
- resource_tracker_service_runs.c.service_run_id
- == data.service_run_id
- )
- & (
- resource_tracker_service_runs.c.service_run_status
- == ServiceRunStatus.RUNNING
- )
- )
- .returning(sa.literal_column("*"))
- )
- result = await conn.execute(update_stmt)
- row = result.first()
- if row is None:
- return None
- return ServiceRunDB.model_validate(row)
-
- async def get_service_run_by_id(
- self, service_run_id: ServiceRunId
- ) -> ServiceRunDB | None:
- async with self.db_engine.begin() as conn:
- stmt = sa.select(resource_tracker_service_runs).where(
- resource_tracker_service_runs.c.service_run_id == service_run_id
- )
- result = await conn.execute(stmt)
- row = result.first()
- if row is None:
- return None
- return ServiceRunDB.model_validate(row)
-
- _project_tags_subquery = (
- sa.select(
- projects_tags.c.project_uuid_for_rut,
- sa.func.array_agg(tags.c.name).label("project_tags"),
- )
- .select_from(projects_tags.join(tags, projects_tags.c.tag_id == tags.c.id))
- .group_by(projects_tags.c.project_uuid_for_rut)
- ).subquery("project_tags_subquery")
-
- async def list_service_runs_by_product_and_user_and_wallet(
- self,
- product_name: ProductName,
- *,
- user_id: UserID | None,
- wallet_id: WalletID | None,
- offset: int,
- limit: int,
- service_run_status: ServiceRunStatus | None = None,
- started_from: datetime | None = None,
- started_until: datetime | None = None,
- order_by: OrderBy | None = None,
- ) -> list[ServiceRunWithCreditsDB]:
- async with self.db_engine.begin() as conn:
- query = (
- sa.select(
- resource_tracker_service_runs.c.product_name,
- resource_tracker_service_runs.c.service_run_id,
- resource_tracker_service_runs.c.wallet_id,
- resource_tracker_service_runs.c.wallet_name,
- resource_tracker_service_runs.c.pricing_plan_id,
- resource_tracker_service_runs.c.pricing_unit_id,
- resource_tracker_service_runs.c.pricing_unit_cost_id,
- resource_tracker_service_runs.c.pricing_unit_cost,
- resource_tracker_service_runs.c.user_id,
- resource_tracker_service_runs.c.user_email,
- resource_tracker_service_runs.c.project_id,
- resource_tracker_service_runs.c.project_name,
- resource_tracker_service_runs.c.node_id,
- resource_tracker_service_runs.c.node_name,
- resource_tracker_service_runs.c.parent_project_id,
- resource_tracker_service_runs.c.root_parent_project_id,
- resource_tracker_service_runs.c.root_parent_project_name,
- resource_tracker_service_runs.c.parent_node_id,
- resource_tracker_service_runs.c.root_parent_node_id,
- resource_tracker_service_runs.c.service_key,
- resource_tracker_service_runs.c.service_version,
- resource_tracker_service_runs.c.service_type,
- resource_tracker_service_runs.c.service_resources,
- resource_tracker_service_runs.c.started_at,
- resource_tracker_service_runs.c.stopped_at,
- resource_tracker_service_runs.c.service_run_status,
- resource_tracker_service_runs.c.modified,
- resource_tracker_service_runs.c.last_heartbeat_at,
- resource_tracker_service_runs.c.service_run_status_msg,
- resource_tracker_service_runs.c.missed_heartbeat_counter,
- resource_tracker_credit_transactions.c.osparc_credits,
- resource_tracker_credit_transactions.c.transaction_status,
- sa.func.coalesce(
- self._project_tags_subquery.c.project_tags,
- sa.cast(sa.text("'{}'"), sa.ARRAY(sa.String)),
- ).label("project_tags"),
- )
- .select_from(
- resource_tracker_service_runs.join(
- resource_tracker_credit_transactions,
- (
- resource_tracker_service_runs.c.product_name
- == resource_tracker_credit_transactions.c.product_name
- )
- & (
- resource_tracker_service_runs.c.service_run_id
- == resource_tracker_credit_transactions.c.service_run_id
- ),
- isouter=True,
- ).join(
- self._project_tags_subquery,
- resource_tracker_service_runs.c.project_id
- == self._project_tags_subquery.c.project_uuid_for_rut,
- isouter=True,
- )
- )
- .where(resource_tracker_service_runs.c.product_name == product_name)
- .offset(offset)
- .limit(limit)
- )
-
- if user_id:
- query = query.where(resource_tracker_service_runs.c.user_id == user_id)
- if wallet_id:
- query = query.where(
- resource_tracker_service_runs.c.wallet_id == wallet_id
- )
- if service_run_status:
- query = query.where(
- resource_tracker_service_runs.c.service_run_status
- == service_run_status
- )
- if started_from:
- query = query.where(
- sa.func.DATE(resource_tracker_service_runs.c.started_at)
- >= started_from.date()
- )
- if started_until:
- query = query.where(
- sa.func.DATE(resource_tracker_service_runs.c.started_at)
- <= started_until.date()
- )
-
- if order_by:
- if order_by.direction == OrderDirection.ASC:
- query = query.order_by(sa.asc(order_by.field))
- else:
- query = query.order_by(sa.desc(order_by.field))
- else:
- # Default ordering
- query = query.order_by(
- resource_tracker_service_runs.c.started_at.desc()
- )
-
- result = await conn.execute(query)
-
- return [
- ServiceRunWithCreditsDB.model_validate(row) for row in result.fetchall()
- ]
-
- async def get_osparc_credits_aggregated_by_service(
- self,
- product_name: ProductName,
- *,
- user_id: UserID | None,
- wallet_id: WalletID,
- offset: int,
- limit: int,
- started_from: datetime | None = None,
- started_until: datetime | None = None,
- ) -> tuple[int, list[OsparcCreditsAggregatedByServiceKeyDB]]:
- async with self.db_engine.begin() as conn:
- base_query = (
- sa.select(
- resource_tracker_service_runs.c.service_key,
- sa.func.SUM(
- resource_tracker_credit_transactions.c.osparc_credits
- ).label("osparc_credits"),
- sa.func.SUM(
- sa.func.round(
- (
- sa.func.extract(
- "epoch",
- resource_tracker_service_runs.c.stopped_at,
- )
- - sa.func.extract(
- "epoch",
- resource_tracker_service_runs.c.started_at,
- )
- )
- / 3600,
- 2,
- )
- ).label("running_time_in_hours"),
- )
- .select_from(
- resource_tracker_service_runs.join(
- resource_tracker_credit_transactions,
- (
- resource_tracker_service_runs.c.product_name
- == resource_tracker_credit_transactions.c.product_name
- )
- & (
- resource_tracker_service_runs.c.service_run_id
- == resource_tracker_credit_transactions.c.service_run_id
- ),
- isouter=True,
- )
- )
- .where(
- (resource_tracker_service_runs.c.product_name == product_name)
- & (
- resource_tracker_credit_transactions.c.transaction_status
- == CreditTransactionStatus.BILLED
- )
- & (
- resource_tracker_credit_transactions.c.transaction_classification
- == CreditClassification.DEDUCT_SERVICE_RUN
- )
- & (resource_tracker_credit_transactions.c.wallet_id == wallet_id)
- )
- .group_by(resource_tracker_service_runs.c.service_key)
- )
-
- if user_id:
- base_query = base_query.where(
- resource_tracker_service_runs.c.user_id == user_id
- )
- if started_from:
- base_query = base_query.where(
- sa.func.DATE(resource_tracker_service_runs.c.started_at)
- >= started_from.date()
- )
- if started_until:
- base_query = base_query.where(
- sa.func.DATE(resource_tracker_service_runs.c.started_at)
- <= started_until.date()
- )
-
- subquery = base_query.subquery()
- count_query = sa.select(sa.func.count()).select_from(subquery)
- count_result = await conn.execute(count_query)
-
- # Default ordering and pagination
- list_query = (
- base_query.order_by(resource_tracker_service_runs.c.service_key.asc())
- .offset(offset)
- .limit(limit)
- )
- list_result = await conn.execute(list_query)
-
- return (
- cast(int, count_result.scalar()),
- [
- OsparcCreditsAggregatedByServiceKeyDB.model_validate(row)
- for row in list_result.fetchall()
- ],
- )
-
- async def export_service_runs_table_to_s3(
- self,
- product_name: ProductName,
- s3_bucket_name: S3BucketName,
- s3_key: str,
- s3_region: str,
- *,
- user_id: UserID | None,
- wallet_id: WalletID | None,
- started_from: datetime | None = None,
- started_until: datetime | None = None,
- order_by: OrderBy | None = None,
- ):
- async with self.db_engine.begin() as conn:
- query = (
- sa.select(
- resource_tracker_service_runs.c.product_name,
- resource_tracker_service_runs.c.service_run_id,
- resource_tracker_service_runs.c.wallet_name,
- resource_tracker_service_runs.c.user_email,
- resource_tracker_service_runs.c.root_parent_project_name.label(
- "project_name"
- ),
- resource_tracker_service_runs.c.node_name,
- resource_tracker_service_runs.c.service_key,
- resource_tracker_service_runs.c.service_version,
- resource_tracker_service_runs.c.service_type,
- resource_tracker_service_runs.c.started_at,
- resource_tracker_service_runs.c.stopped_at,
- resource_tracker_credit_transactions.c.osparc_credits,
- resource_tracker_credit_transactions.c.transaction_status,
- sa.func.coalesce(
- self._project_tags_subquery.c.project_tags,
- sa.cast(sa.text("'{}'"), sa.ARRAY(sa.String)),
- ).label("project_tags"),
- )
- .select_from(
- resource_tracker_service_runs.join(
- resource_tracker_credit_transactions,
- resource_tracker_service_runs.c.service_run_id
- == resource_tracker_credit_transactions.c.service_run_id,
- isouter=True,
- ).join(
- self._project_tags_subquery,
- resource_tracker_service_runs.c.project_id
- == self._project_tags_subquery.c.project_uuid_for_rut,
- isouter=True,
- )
- )
- .where(resource_tracker_service_runs.c.product_name == product_name)
- )
-
- if user_id:
- query = query.where(resource_tracker_service_runs.c.user_id == user_id)
- if wallet_id:
- query = query.where(
- resource_tracker_service_runs.c.wallet_id == wallet_id
- )
- if started_from:
- query = query.where(
- sa.func.DATE(resource_tracker_service_runs.c.started_at)
- >= started_from.date()
- )
- if started_until:
- query = query.where(
- sa.func.DATE(resource_tracker_service_runs.c.started_at)
- <= started_until.date()
- )
-
- if order_by:
- if order_by.direction == OrderDirection.ASC:
- query = query.order_by(sa.asc(order_by.field))
- else:
- query = query.order_by(sa.desc(order_by.field))
- else:
- # Default ordering
- query = query.order_by(
- resource_tracker_service_runs.c.started_at.desc()
- )
-
- compiled_query = (
- str(query.compile(compile_kwargs={"literal_binds": True}))
- .replace("\n", "")
- .replace("'", "''")
- )
-
- result = await conn.execute(
- sa.DDL(
- f"""
- SELECT * from aws_s3.query_export_to_s3('{compiled_query}',
- aws_commons.create_s3_uri('{s3_bucket_name}', '{s3_key}', '{s3_region}'), 'format csv, HEADER true');
- """ # noqa: S608
- )
- )
- row = result.first()
- assert row
- _logger.info(
- "Rows uploaded %s, Files uploaded %s, Bytes uploaded %s",
- row[0],
- row[1],
- row[2],
- )
-
- async def total_service_runs_by_product_and_user_and_wallet(
- self,
- product_name: ProductName,
- *,
- user_id: UserID | None,
- wallet_id: WalletID | None,
- service_run_status: ServiceRunStatus | None = None,
- started_from: datetime | None = None,
- started_until: datetime | None = None,
- ) -> PositiveInt:
- async with self.db_engine.begin() as conn:
- query = (
- sa.select(sa.func.count())
- .select_from(resource_tracker_service_runs)
- .where(resource_tracker_service_runs.c.product_name == product_name)
- )
-
- if user_id:
- query = query.where(resource_tracker_service_runs.c.user_id == user_id)
- if wallet_id:
- query = query.where(
- resource_tracker_service_runs.c.wallet_id == wallet_id
- )
- if started_from:
- query = query.where(
- sa.func.DATE(resource_tracker_service_runs.c.started_at)
- >= started_from.date()
- )
- if started_until:
- query = query.where(
- sa.func.DATE(resource_tracker_service_runs.c.started_at)
- <= started_until.date()
- )
- if service_run_status:
- query = query.where(
- resource_tracker_service_runs.c.service_run_status
- == service_run_status
- )
-
- result = await conn.execute(query)
- row = result.first()
- return cast(PositiveInt, row[0]) if row else 0
-
- ### For Background check purpose:
-
- async def list_service_runs_with_running_status_across_all_products(
- self,
- *,
- offset: int,
- limit: int,
- ) -> list[ServiceRunForCheckDB]:
- async with self.db_engine.begin() as conn:
- query = (
- sa.select(
- resource_tracker_service_runs.c.service_run_id,
- resource_tracker_service_runs.c.last_heartbeat_at,
- resource_tracker_service_runs.c.missed_heartbeat_counter,
- resource_tracker_service_runs.c.modified,
- )
- .where(
- resource_tracker_service_runs.c.service_run_status
- == ServiceRunStatus.RUNNING
- )
- .order_by(resource_tracker_service_runs.c.started_at.desc()) # NOTE:
- .offset(offset)
- .limit(limit)
- )
- result = await conn.execute(query)
-
- return [ServiceRunForCheckDB.model_validate(row) for row in result.fetchall()]
-
- async def total_service_runs_with_running_status_across_all_products(
- self,
- ) -> PositiveInt:
- async with self.db_engine.begin() as conn:
- query = (
- sa.select(sa.func.count())
- .select_from(resource_tracker_service_runs)
- .where(
- resource_tracker_service_runs.c.service_run_status
- == ServiceRunStatus.RUNNING
- )
- )
- result = await conn.execute(query)
- row = result.first()
- return cast(PositiveInt, row[0]) if row else 0
-
- async def update_service_missed_heartbeat_counter(
- self,
- service_run_id: ServiceRunId,
- last_heartbeat_at: datetime,
- missed_heartbeat_counter: int,
- ) -> ServiceRunDB | None:
- async with self.db_engine.begin() as conn:
- update_stmt = (
- resource_tracker_service_runs.update()
- .values(
- modified=sa.func.now(),
- missed_heartbeat_counter=missed_heartbeat_counter,
- )
- .where(
- (resource_tracker_service_runs.c.service_run_id == service_run_id)
- & (
- resource_tracker_service_runs.c.service_run_status
- == ServiceRunStatus.RUNNING
- )
- & (
- resource_tracker_service_runs.c.last_heartbeat_at
- == last_heartbeat_at
- )
- )
- .returning(sa.literal_column("*"))
- )
-
- result = await conn.execute(update_stmt)
- row = result.first()
- if row is None:
- return None
- return ServiceRunDB.model_validate(row)
-
- #################################
- # Credit transactions
- #################################
-
- async def create_credit_transaction(
- self, data: CreditTransactionCreate
- ) -> CreditTransactionId:
- async with self.db_engine.begin() as conn:
- insert_stmt = (
- resource_tracker_credit_transactions.insert()
- .values(
- product_name=data.product_name,
- wallet_id=data.wallet_id,
- wallet_name=data.wallet_name,
- pricing_plan_id=data.pricing_plan_id,
- pricing_unit_id=data.pricing_unit_id,
- pricing_unit_cost_id=data.pricing_unit_cost_id,
- user_id=data.user_id,
- user_email=data.user_email,
- osparc_credits=data.osparc_credits,
- transaction_status=data.transaction_status,
- transaction_classification=data.transaction_classification,
- service_run_id=data.service_run_id,
- payment_transaction_id=data.payment_transaction_id,
- created=data.created_at,
- last_heartbeat_at=data.last_heartbeat_at,
- modified=sa.func.now(),
- )
- .returning(resource_tracker_credit_transactions.c.transaction_id)
- )
- result = await conn.execute(insert_stmt)
- row = result.first()
- if row is None:
- raise CreditTransactionNotCreatedDBError(data=data)
- return cast(CreditTransactionId, row[0])
-
- async def update_credit_transaction_credits(
- self, data: CreditTransactionCreditsUpdate
- ) -> CreditTransactionId | None:
- async with self.db_engine.begin() as conn:
- update_stmt = (
- resource_tracker_credit_transactions.update()
- .values(
- modified=sa.func.now(),
- osparc_credits=data.osparc_credits,
- last_heartbeat_at=data.last_heartbeat_at,
- )
- .where(
- (
- resource_tracker_credit_transactions.c.service_run_id
- == data.service_run_id
- )
- & (
- resource_tracker_credit_transactions.c.transaction_status
- == CreditTransactionStatus.PENDING
- )
- & (
- resource_tracker_credit_transactions.c.last_heartbeat_at
- <= data.last_heartbeat_at
- )
- )
- .returning(resource_tracker_credit_transactions.c.service_run_id)
- )
- result = await conn.execute(update_stmt)
- row = result.first()
- if row is None:
- return None
- return cast(CreditTransactionId | None, row[0])
-
- async def update_credit_transaction_credits_and_status(
- self, data: CreditTransactionCreditsAndStatusUpdate
- ) -> CreditTransactionId | None:
- async with self.db_engine.begin() as conn:
- update_stmt = (
- resource_tracker_credit_transactions.update()
- .values(
- modified=sa.func.now(),
- osparc_credits=data.osparc_credits,
- transaction_status=data.transaction_status,
- )
- .where(
- (
- resource_tracker_credit_transactions.c.service_run_id
- == data.service_run_id
- )
- & (
- resource_tracker_credit_transactions.c.transaction_status
- == CreditTransactionStatus.PENDING
- )
- )
- .returning(resource_tracker_credit_transactions.c.service_run_id)
- )
- result = await conn.execute(update_stmt)
- row = result.first()
- if row is None:
- return None
- return cast(CreditTransactionId | None, row[0])
-
- async def sum_credit_transactions_by_product_and_wallet(
- self, product_name: ProductName, wallet_id: WalletID
- ) -> WalletTotalCredits:
- async with self.db_engine.begin() as conn:
- sum_stmt = sa.select(
- sa.func.sum(resource_tracker_credit_transactions.c.osparc_credits)
- ).where(
- (resource_tracker_credit_transactions.c.product_name == product_name)
- & (resource_tracker_credit_transactions.c.wallet_id == wallet_id)
- & (
- resource_tracker_credit_transactions.c.transaction_status.in_(
- [
- CreditTransactionStatus.BILLED,
- CreditTransactionStatus.PENDING,
- ]
- )
- )
- )
- result = await conn.execute(sum_stmt)
- row = result.first()
- if row is None or row[0] is None:
- return WalletTotalCredits(
- wallet_id=wallet_id, available_osparc_credits=Decimal(0)
- )
- return WalletTotalCredits(wallet_id=wallet_id, available_osparc_credits=row[0])
-
- #################################
- # Pricing plans
- #################################
-
- async def list_active_service_pricing_plans_by_product_and_service(
- self,
- product_name: ProductName,
- service_key: ServiceKey,
- service_version: ServiceVersion,
- ) -> list[PricingPlansWithServiceDefaultPlanDB]:
- # NOTE: consilidate with utils_services_environmnets.py
- def _version(column_or_value):
- # converts version value string to array[integer] that can be compared
- return sa.func.string_to_array(column_or_value, ".").cast(ARRAY(INTEGER))
-
- async with self.db_engine.begin() as conn:
- # Firstly find the correct service version
- query = (
- sa.select(
- resource_tracker_pricing_plan_to_service.c.service_key,
- resource_tracker_pricing_plan_to_service.c.service_version,
- )
- .select_from(
- resource_tracker_pricing_plan_to_service.join(
- resource_tracker_pricing_plans,
- (
- resource_tracker_pricing_plan_to_service.c.pricing_plan_id
- == resource_tracker_pricing_plans.c.pricing_plan_id
- ),
- )
- )
- .where(
- (
- _version(
- resource_tracker_pricing_plan_to_service.c.service_version
- )
- <= _version(service_version)
- )
- & (
- resource_tracker_pricing_plan_to_service.c.service_key
- == service_key
- )
- & (resource_tracker_pricing_plans.c.product_name == product_name)
- & (resource_tracker_pricing_plans.c.is_active.is_(True))
- )
- .order_by(
- _version(
- resource_tracker_pricing_plan_to_service.c.service_version
- ).desc()
- )
- .limit(1)
- )
- result = await conn.execute(query)
- row = result.first()
- if row is None:
- return []
- latest_service_key, latest_service_version = row
- # Now choose all pricing plans connected to this service
- query = (
- sa.select(
- resource_tracker_pricing_plans.c.pricing_plan_id,
- resource_tracker_pricing_plans.c.display_name,
- resource_tracker_pricing_plans.c.description,
- resource_tracker_pricing_plans.c.classification,
- resource_tracker_pricing_plans.c.is_active,
- resource_tracker_pricing_plans.c.created,
- resource_tracker_pricing_plans.c.pricing_plan_key,
- resource_tracker_pricing_plan_to_service.c.service_default_plan,
- )
- .select_from(
- resource_tracker_pricing_plan_to_service.join(
- resource_tracker_pricing_plans,
- (
- resource_tracker_pricing_plan_to_service.c.pricing_plan_id
- == resource_tracker_pricing_plans.c.pricing_plan_id
- ),
- )
- )
- .where(
- (
- _version(
- resource_tracker_pricing_plan_to_service.c.service_version
- )
- == _version(latest_service_version)
- )
- & (
- resource_tracker_pricing_plan_to_service.c.service_key
- == latest_service_key
- )
- & (resource_tracker_pricing_plans.c.product_name == product_name)
- & (resource_tracker_pricing_plans.c.is_active.is_(True))
- )
- .order_by(
- resource_tracker_pricing_plan_to_service.c.pricing_plan_id.desc()
- )
- )
- result = await conn.execute(query)
-
- return [
- PricingPlansWithServiceDefaultPlanDB.model_validate(row)
- for row in result.fetchall()
- ]
-
- async def get_pricing_plan(
- self, product_name: ProductName, pricing_plan_id: PricingPlanId
- ) -> PricingPlansDB:
- async with self.db_engine.begin() as conn:
- select_stmt = sa.select(
- resource_tracker_pricing_plans.c.pricing_plan_id,
- resource_tracker_pricing_plans.c.display_name,
- resource_tracker_pricing_plans.c.description,
- resource_tracker_pricing_plans.c.classification,
- resource_tracker_pricing_plans.c.is_active,
- resource_tracker_pricing_plans.c.created,
- resource_tracker_pricing_plans.c.pricing_plan_key,
- ).where(
- (resource_tracker_pricing_plans.c.pricing_plan_id == pricing_plan_id)
- & (resource_tracker_pricing_plans.c.product_name == product_name)
- )
- result = await conn.execute(select_stmt)
- row = result.first()
- if row is None:
- raise PricingPlanDoesNotExistsDBError(pricing_plan_id=pricing_plan_id)
- return PricingPlansDB.model_validate(row)
-
- async def list_pricing_plans_by_product(
- self, product_name: ProductName
- ) -> list[PricingPlansDB]:
- async with self.db_engine.begin() as conn:
- select_stmt = sa.select(
- resource_tracker_pricing_plans.c.pricing_plan_id,
- resource_tracker_pricing_plans.c.display_name,
- resource_tracker_pricing_plans.c.description,
- resource_tracker_pricing_plans.c.classification,
- resource_tracker_pricing_plans.c.is_active,
- resource_tracker_pricing_plans.c.created,
- resource_tracker_pricing_plans.c.pricing_plan_key,
- ).where(resource_tracker_pricing_plans.c.product_name == product_name)
- result = await conn.execute(select_stmt)
-
- return [PricingPlansDB.model_validate(row) for row in result.fetchall()]
-
- async def create_pricing_plan(self, data: PricingPlanCreate) -> PricingPlansDB:
- async with self.db_engine.begin() as conn:
- insert_stmt = (
- resource_tracker_pricing_plans.insert()
- .values(
- product_name=data.product_name,
- display_name=data.display_name,
- description=data.description,
- classification=data.classification,
- is_active=True,
- created=sa.func.now(),
- modified=sa.func.now(),
- pricing_plan_key=data.pricing_plan_key,
- )
- .returning(
- *[
- resource_tracker_pricing_plans.c.pricing_plan_id,
- resource_tracker_pricing_plans.c.display_name,
- resource_tracker_pricing_plans.c.description,
- resource_tracker_pricing_plans.c.classification,
- resource_tracker_pricing_plans.c.is_active,
- resource_tracker_pricing_plans.c.created,
- resource_tracker_pricing_plans.c.pricing_plan_key,
- ]
- )
- )
- result = await conn.execute(insert_stmt)
- row = result.first()
- if row is None:
- raise PricingPlanNotCreatedDBError(data=data)
- return PricingPlansDB.model_validate(row)
-
- async def update_pricing_plan(
- self, product_name: ProductName, data: PricingPlanUpdate
- ) -> PricingPlansDB | None:
- async with self.db_engine.begin() as conn:
- update_stmt = (
- resource_tracker_pricing_plans.update()
- .values(
- display_name=data.display_name,
- description=data.description,
- is_active=data.is_active,
- modified=sa.func.now(),
- )
- .where(
- (
- resource_tracker_pricing_plans.c.pricing_plan_id
- == data.pricing_plan_id
- )
- & (resource_tracker_pricing_plans.c.product_name == product_name)
- )
- .returning(
- *[
- resource_tracker_pricing_plans.c.pricing_plan_id,
- resource_tracker_pricing_plans.c.display_name,
- resource_tracker_pricing_plans.c.description,
- resource_tracker_pricing_plans.c.classification,
- resource_tracker_pricing_plans.c.is_active,
- resource_tracker_pricing_plans.c.created,
- resource_tracker_pricing_plans.c.pricing_plan_key,
- ]
- )
- )
- result = await conn.execute(update_stmt)
- row = result.first()
- if row is None:
- return None
- return PricingPlansDB.model_validate(row)
-
- #################################
- # Pricing plan to service
- #################################
-
- async def list_connected_services_to_pricing_plan_by_pricing_plan(
- self, product_name: ProductName, pricing_plan_id: PricingPlanId
- ) -> list[PricingPlanToServiceDB]:
- async with self.db_engine.begin() as conn:
- query = (
- sa.select(
- resource_tracker_pricing_plan_to_service.c.pricing_plan_id,
- resource_tracker_pricing_plan_to_service.c.service_key,
- resource_tracker_pricing_plan_to_service.c.service_version,
- resource_tracker_pricing_plan_to_service.c.created,
- )
- .select_from(
- resource_tracker_pricing_plan_to_service.join(
- resource_tracker_pricing_plans,
- (
- resource_tracker_pricing_plan_to_service.c.pricing_plan_id
- == resource_tracker_pricing_plans.c.pricing_plan_id
- ),
- )
- )
- .where(
- (resource_tracker_pricing_plans.c.product_name == product_name)
- & (
- resource_tracker_pricing_plans.c.pricing_plan_id
- == pricing_plan_id
- )
- )
- .order_by(
- resource_tracker_pricing_plan_to_service.c.pricing_plan_id.desc()
- )
- )
- result = await conn.execute(query)
-
- return [
- PricingPlanToServiceDB.model_validate(row) for row in result.fetchall()
- ]
-
- async def upsert_service_to_pricing_plan(
- self,
- product_name: ProductName,
- pricing_plan_id: PricingPlanId,
- service_key: ServiceKey,
- service_version: ServiceVersion,
- ) -> PricingPlanToServiceDB:
- async with self.db_engine.begin() as conn:
- query = (
- sa.select(
- resource_tracker_pricing_plan_to_service.c.pricing_plan_id,
- resource_tracker_pricing_plan_to_service.c.service_key,
- resource_tracker_pricing_plan_to_service.c.service_version,
- resource_tracker_pricing_plan_to_service.c.created,
- )
- .select_from(
- resource_tracker_pricing_plan_to_service.join(
- resource_tracker_pricing_plans,
- (
- resource_tracker_pricing_plan_to_service.c.pricing_plan_id
- == resource_tracker_pricing_plans.c.pricing_plan_id
- ),
- )
- )
- .where(
- (resource_tracker_pricing_plans.c.product_name == product_name)
- & (
- resource_tracker_pricing_plans.c.pricing_plan_id
- == pricing_plan_id
- )
- & (
- resource_tracker_pricing_plan_to_service.c.service_key
- == service_key
- )
- & (
- resource_tracker_pricing_plan_to_service.c.service_version
- == service_version
- )
- )
- )
- result = await conn.execute(query)
- row = result.first()
-
- if row is not None:
- delete_stmt = resource_tracker_pricing_plan_to_service.delete().where(
- (
- resource_tracker_pricing_plans.c.pricing_plan_id
- == pricing_plan_id
- )
- & (
- resource_tracker_pricing_plan_to_service.c.service_key
- == service_key
- )
- & (
- resource_tracker_pricing_plan_to_service.c.service_version
- == service_version
- )
- )
- await conn.execute(delete_stmt)
-
- insert_stmt = (
- resource_tracker_pricing_plan_to_service.insert()
- .values(
- pricing_plan_id=pricing_plan_id,
- service_key=service_key,
- service_version=service_version,
- created=sa.func.now(),
- modified=sa.func.now(),
- service_default_plan=True,
- )
- .returning(
- *[
- resource_tracker_pricing_plan_to_service.c.pricing_plan_id,
- resource_tracker_pricing_plan_to_service.c.service_key,
- resource_tracker_pricing_plan_to_service.c.service_version,
- resource_tracker_pricing_plan_to_service.c.created,
- ]
- )
- )
- result = await conn.execute(insert_stmt)
- row = result.first()
- if row is None:
- raise PricingPlanToServiceNotCreatedDBError(
- data=f"pricing_plan_id {pricing_plan_id}, service_key {service_key}, service_version {service_version}"
- )
- return PricingPlanToServiceDB.model_validate(row)
-
- #################################
- # Pricing units
- #################################
-
- @staticmethod
- def _pricing_units_select_stmt():
- return sa.select(
- resource_tracker_pricing_units.c.pricing_unit_id,
- resource_tracker_pricing_units.c.pricing_plan_id,
- resource_tracker_pricing_units.c.unit_name,
- resource_tracker_pricing_units.c.unit_extra_info,
- resource_tracker_pricing_units.c.default,
- resource_tracker_pricing_units.c.specific_info,
- resource_tracker_pricing_units.c.created,
- resource_tracker_pricing_units.c.modified,
- resource_tracker_pricing_unit_costs.c.cost_per_unit.label(
- "current_cost_per_unit"
- ),
- resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id.label(
- "current_cost_per_unit_id"
- ),
- )
-
- async def list_pricing_units_by_pricing_plan(
- self,
- pricing_plan_id: PricingPlanId,
- ) -> list[PricingUnitsDB]:
- async with self.db_engine.begin() as conn:
- query = (
- self._pricing_units_select_stmt()
- .select_from(
- resource_tracker_pricing_units.join(
- resource_tracker_pricing_unit_costs,
- (
- (
- resource_tracker_pricing_units.c.pricing_plan_id
- == resource_tracker_pricing_unit_costs.c.pricing_plan_id
- )
- & (
- resource_tracker_pricing_units.c.pricing_unit_id
- == resource_tracker_pricing_unit_costs.c.pricing_unit_id
- )
- ),
- )
- )
- .where(
- (
- resource_tracker_pricing_units.c.pricing_plan_id
- == pricing_plan_id
- )
- & (resource_tracker_pricing_unit_costs.c.valid_to.is_(None))
- )
- .order_by(resource_tracker_pricing_unit_costs.c.cost_per_unit.asc())
- )
- result = await conn.execute(query)
-
- return [PricingUnitsDB.model_validate(row) for row in result.fetchall()]
-
- async def get_valid_pricing_unit(
- self,
- product_name: ProductName,
- pricing_plan_id: PricingPlanId,
- pricing_unit_id: PricingUnitId,
- ) -> PricingUnitsDB:
- async with self.db_engine.begin() as conn:
- query = (
- self._pricing_units_select_stmt()
- .select_from(
- resource_tracker_pricing_units.join(
- resource_tracker_pricing_unit_costs,
- (
- (
- resource_tracker_pricing_units.c.pricing_plan_id
- == resource_tracker_pricing_unit_costs.c.pricing_plan_id
- )
- & (
- resource_tracker_pricing_units.c.pricing_unit_id
- == resource_tracker_pricing_unit_costs.c.pricing_unit_id
- )
- ),
- ).join(
- resource_tracker_pricing_plans,
- (
- resource_tracker_pricing_plans.c.pricing_plan_id
- == resource_tracker_pricing_units.c.pricing_plan_id
- ),
- )
- )
- .where(
- (
- resource_tracker_pricing_units.c.pricing_plan_id
- == pricing_plan_id
- )
- & (
- resource_tracker_pricing_units.c.pricing_unit_id
- == pricing_unit_id
- )
- & (resource_tracker_pricing_unit_costs.c.valid_to.is_(None))
- & (resource_tracker_pricing_plans.c.product_name == product_name)
- )
- )
- result = await conn.execute(query)
-
- row = result.first()
- if row is None:
- raise PricingPlanAndPricingUnitCombinationDoesNotExistsDBError(
- pricing_plan_id=pricing_plan_id,
- pricing_unit_id=pricing_unit_id,
- product_name=product_name,
- )
- return PricingUnitsDB.model_validate(row)
-
- async def create_pricing_unit_with_cost(
- self, data: PricingUnitWithCostCreate, pricing_plan_key: str
- ) -> tuple[PricingUnitId, PricingUnitCostId]:
- async with self.db_engine.begin() as conn:
- # pricing units table
- insert_stmt = (
- resource_tracker_pricing_units.insert()
- .values(
- pricing_plan_id=data.pricing_plan_id,
- unit_name=data.unit_name,
- unit_extra_info=data.unit_extra_info.model_dump(),
- default=data.default,
- specific_info=data.specific_info.model_dump(),
- created=sa.func.now(),
- modified=sa.func.now(),
- )
- .returning(resource_tracker_pricing_units.c.pricing_unit_id)
- )
- result = await conn.execute(insert_stmt)
- row = result.first()
- if row is None:
- raise PricingUnitNotCreatedDBError(data=data)
- _pricing_unit_id = row[0]
-
- # pricing unit cost table
- insert_stmt = (
- resource_tracker_pricing_unit_costs.insert()
- .values(
- pricing_plan_id=data.pricing_plan_id,
- pricing_plan_key=pricing_plan_key,
- pricing_unit_id=_pricing_unit_id,
- pricing_unit_name=data.unit_name,
- cost_per_unit=data.cost_per_unit,
- valid_from=sa.func.now(),
- valid_to=None,
- created=sa.func.now(),
- comment=data.comment,
- modified=sa.func.now(),
- )
- .returning(resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id)
- )
- result = await conn.execute(insert_stmt)
- row = result.first()
- if row is None:
- raise PricingUnitCostNotCreatedDBError(data=data)
- _pricing_unit_cost_id = row[0]
-
- return (_pricing_unit_id, _pricing_unit_cost_id)
-
- async def update_pricing_unit_with_cost(
- self, data: PricingUnitWithCostUpdate, pricing_plan_key: str
- ) -> None:
- async with self.db_engine.begin() as conn:
- # pricing units table
- update_stmt = (
- resource_tracker_pricing_units.update()
- .values(
- unit_name=data.unit_name,
- unit_extra_info=data.unit_extra_info.model_dump(),
- default=data.default,
- specific_info=data.specific_info.model_dump(),
- modified=sa.func.now(),
- )
- .where(
- resource_tracker_pricing_units.c.pricing_unit_id
- == data.pricing_unit_id
- )
- .returning(resource_tracker_pricing_units.c.pricing_unit_id)
- )
- await conn.execute(update_stmt)
-
- # If price change, then we update pricing unit cost table
- if data.pricing_unit_cost_update:
- # Firstly we close previous price
- update_stmt = (
- resource_tracker_pricing_unit_costs.update()
- .values(
- valid_to=sa.func.now(), # <-- Closing previous price
- modified=sa.func.now(),
- )
- .where(
- resource_tracker_pricing_unit_costs.c.pricing_unit_id
- == data.pricing_unit_id
- )
- .returning(resource_tracker_pricing_unit_costs.c.pricing_unit_id)
- )
- result = await conn.execute(update_stmt)
-
- # Then we create a new price
- insert_stmt = (
- resource_tracker_pricing_unit_costs.insert()
- .values(
- pricing_plan_id=data.pricing_plan_id,
- pricing_plan_key=pricing_plan_key,
- pricing_unit_id=data.pricing_unit_id,
- pricing_unit_name=data.unit_name,
- cost_per_unit=data.pricing_unit_cost_update.cost_per_unit,
- valid_from=sa.func.now(),
- valid_to=None, # <-- New price is valid
- created=sa.func.now(),
- comment=data.pricing_unit_cost_update.comment,
- modified=sa.func.now(),
- )
- .returning(
- resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id
- )
- )
- result = await conn.execute(insert_stmt)
- row = result.first()
- if row is None:
- raise PricingUnitCostNotCreatedDBError(data=data)
-
- #################################
- # Pricing unit-costs
- #################################
-
- async def get_pricing_unit_cost_by_id(
- self, pricing_unit_cost_id: PricingUnitCostId
- ) -> PricingUnitCostsDB:
- async with self.db_engine.begin() as conn:
- query = sa.select(
- resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id,
- resource_tracker_pricing_unit_costs.c.pricing_plan_id,
- resource_tracker_pricing_unit_costs.c.pricing_plan_key,
- resource_tracker_pricing_unit_costs.c.pricing_unit_id,
- resource_tracker_pricing_unit_costs.c.pricing_unit_name,
- resource_tracker_pricing_unit_costs.c.cost_per_unit,
- resource_tracker_pricing_unit_costs.c.valid_from,
- resource_tracker_pricing_unit_costs.c.valid_to,
- resource_tracker_pricing_unit_costs.c.created,
- resource_tracker_pricing_unit_costs.c.comment,
- resource_tracker_pricing_unit_costs.c.modified,
- ).where(
- resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id
- == pricing_unit_cost_id
- )
- result = await conn.execute(query)
-
- row = result.first()
- if row is None:
- raise PricingUnitCostDoesNotExistsDBError(
- pricing_unit_cost_id=pricing_unit_cost_id
- )
- return PricingUnitCostsDB.model_validate(row)
diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/service_runs_db.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/service_runs_db.py
new file mode 100644
index 00000000000..a4ea563803d
--- /dev/null
+++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/service_runs_db.py
@@ -0,0 +1,622 @@
+# pylint: disable=too-many-arguments
+import logging
+from datetime import datetime
+from typing import cast
+
+import sqlalchemy as sa
+from models_library.api_schemas_storage import S3BucketName
+from models_library.products import ProductName
+from models_library.resource_tracker import (
+ CreditClassification,
+ CreditTransactionStatus,
+ ServiceRunId,
+ ServiceRunStatus,
+)
+from models_library.rest_ordering import OrderBy, OrderDirection
+from models_library.users import UserID
+from models_library.wallets import WalletID
+from pydantic import PositiveInt
+from simcore_postgres_database.models.projects_tags import projects_tags
+from simcore_postgres_database.models.resource_tracker_credit_transactions import (
+ resource_tracker_credit_transactions,
+)
+from simcore_postgres_database.models.resource_tracker_service_runs import (
+ resource_tracker_service_runs,
+)
+from simcore_postgres_database.models.tags import tags
+from simcore_postgres_database.utils_repos import transaction_context
+from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine
+
+from ....exceptions.errors import ServiceRunNotCreatedDBError
+from ....models.service_runs import (
+ OsparcCreditsAggregatedByServiceKeyDB,
+ ServiceRunCreate,
+ ServiceRunDB,
+ ServiceRunForCheckDB,
+ ServiceRunLastHeartbeatUpdate,
+ ServiceRunStoppedAtUpdate,
+ ServiceRunWithCreditsDB,
+)
+
+_logger = logging.getLogger(__name__)
+
+
+async def create_service_run(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ data: ServiceRunCreate,
+) -> ServiceRunId:
+ async with transaction_context(engine, connection) as conn:
+ insert_stmt = (
+ resource_tracker_service_runs.insert()
+ .values(
+ product_name=data.product_name,
+ service_run_id=data.service_run_id,
+ wallet_id=data.wallet_id,
+ wallet_name=data.wallet_name,
+ pricing_plan_id=data.pricing_plan_id,
+ pricing_unit_id=data.pricing_unit_id,
+ pricing_unit_cost_id=data.pricing_unit_cost_id,
+ pricing_unit_cost=data.pricing_unit_cost,
+ simcore_user_agent=data.simcore_user_agent,
+ user_id=data.user_id,
+ user_email=data.user_email,
+ project_id=f"{data.project_id}",
+ project_name=data.project_name,
+ node_id=f"{data.node_id}",
+ node_name=data.node_name,
+ parent_project_id=f"{data.parent_project_id}",
+ root_parent_project_id=f"{data.root_parent_project_id}",
+ root_parent_project_name=data.root_parent_project_name,
+ parent_node_id=f"{data.parent_node_id}",
+ root_parent_node_id=f"{data.root_parent_node_id}",
+ service_key=data.service_key,
+ service_version=data.service_version,
+ service_type=data.service_type,
+ service_resources=data.service_resources,
+ service_additional_metadata=data.service_additional_metadata,
+ started_at=data.started_at,
+ stopped_at=None,
+ service_run_status=ServiceRunStatus.RUNNING,
+ modified=sa.func.now(),
+ last_heartbeat_at=data.last_heartbeat_at,
+ )
+ .returning(resource_tracker_service_runs.c.service_run_id)
+ )
+ result = await conn.execute(insert_stmt)
+ row = result.first()
+ if row is None:
+ raise ServiceRunNotCreatedDBError(data=data)
+ return cast(ServiceRunId, row[0])
+
+
+async def update_service_run_last_heartbeat(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ data: ServiceRunLastHeartbeatUpdate,
+) -> ServiceRunDB | None:
+ async with transaction_context(engine, connection) as conn:
+ update_stmt = (
+ resource_tracker_service_runs.update()
+ .values(
+ modified=sa.func.now(),
+ last_heartbeat_at=data.last_heartbeat_at,
+ missed_heartbeat_counter=0,
+ )
+ .where(
+ (resource_tracker_service_runs.c.service_run_id == data.service_run_id)
+ & (
+ resource_tracker_service_runs.c.service_run_status
+ == ServiceRunStatus.RUNNING
+ )
+ & (
+ resource_tracker_service_runs.c.last_heartbeat_at
+ <= data.last_heartbeat_at
+ )
+ )
+ .returning(sa.literal_column("*"))
+ )
+ result = await conn.execute(update_stmt)
+ row = result.first()
+ if row is None:
+ return None
+ return ServiceRunDB.model_validate(row)
+
+
+async def update_service_run_stopped_at(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ data: ServiceRunStoppedAtUpdate,
+) -> ServiceRunDB | None:
+ async with transaction_context(engine, connection) as conn:
+ update_stmt = (
+ resource_tracker_service_runs.update()
+ .values(
+ modified=sa.func.now(),
+ stopped_at=data.stopped_at,
+ service_run_status=data.service_run_status,
+ service_run_status_msg=data.service_run_status_msg,
+ )
+ .where(
+ (resource_tracker_service_runs.c.service_run_id == data.service_run_id)
+ & (
+ resource_tracker_service_runs.c.service_run_status
+ == ServiceRunStatus.RUNNING
+ )
+ )
+ .returning(sa.literal_column("*"))
+ )
+ result = await conn.execute(update_stmt)
+ row = result.first()
+ if row is None:
+ return None
+ return ServiceRunDB.model_validate(row)
+
+
+async def get_service_run_by_id(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ service_run_id: ServiceRunId,
+) -> ServiceRunDB | None:
+ async with transaction_context(engine, connection) as conn:
+ stmt = sa.select(resource_tracker_service_runs).where(
+ resource_tracker_service_runs.c.service_run_id == service_run_id
+ )
+ result = await conn.execute(stmt)
+ row = result.first()
+ if row is None:
+ return None
+ return ServiceRunDB.model_validate(row)
+
+
+_project_tags_subquery = (
+ sa.select(
+ projects_tags.c.project_uuid_for_rut,
+ sa.func.array_agg(tags.c.name).label("project_tags"),
+ )
+ .select_from(projects_tags.join(tags, projects_tags.c.tag_id == tags.c.id))
+ .group_by(projects_tags.c.project_uuid_for_rut)
+).subquery("project_tags_subquery")
+
+
+async def list_service_runs_by_product_and_user_and_wallet(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ product_name: ProductName,
+ user_id: UserID | None,
+ wallet_id: WalletID | None,
+ offset: int,
+ limit: int,
+ service_run_status: ServiceRunStatus | None = None,
+ started_from: datetime | None = None,
+ started_until: datetime | None = None,
+ order_by: OrderBy | None = None,
+) -> list[ServiceRunWithCreditsDB]:
+ async with transaction_context(engine, connection) as conn:
+ query = (
+ sa.select(
+ resource_tracker_service_runs.c.product_name,
+ resource_tracker_service_runs.c.service_run_id,
+ resource_tracker_service_runs.c.wallet_id,
+ resource_tracker_service_runs.c.wallet_name,
+ resource_tracker_service_runs.c.pricing_plan_id,
+ resource_tracker_service_runs.c.pricing_unit_id,
+ resource_tracker_service_runs.c.pricing_unit_cost_id,
+ resource_tracker_service_runs.c.pricing_unit_cost,
+ resource_tracker_service_runs.c.user_id,
+ resource_tracker_service_runs.c.user_email,
+ resource_tracker_service_runs.c.project_id,
+ resource_tracker_service_runs.c.project_name,
+ resource_tracker_service_runs.c.node_id,
+ resource_tracker_service_runs.c.node_name,
+ resource_tracker_service_runs.c.parent_project_id,
+ resource_tracker_service_runs.c.root_parent_project_id,
+ resource_tracker_service_runs.c.root_parent_project_name,
+ resource_tracker_service_runs.c.parent_node_id,
+ resource_tracker_service_runs.c.root_parent_node_id,
+ resource_tracker_service_runs.c.service_key,
+ resource_tracker_service_runs.c.service_version,
+ resource_tracker_service_runs.c.service_type,
+ resource_tracker_service_runs.c.service_resources,
+ resource_tracker_service_runs.c.started_at,
+ resource_tracker_service_runs.c.stopped_at,
+ resource_tracker_service_runs.c.service_run_status,
+ resource_tracker_service_runs.c.modified,
+ resource_tracker_service_runs.c.last_heartbeat_at,
+ resource_tracker_service_runs.c.service_run_status_msg,
+ resource_tracker_service_runs.c.missed_heartbeat_counter,
+ resource_tracker_credit_transactions.c.osparc_credits,
+ resource_tracker_credit_transactions.c.transaction_status,
+ sa.func.coalesce(
+ _project_tags_subquery.c.project_tags,
+ sa.cast(sa.text("'{}'"), sa.ARRAY(sa.String)),
+ ).label("project_tags"),
+ )
+ .select_from(
+ resource_tracker_service_runs.join(
+ resource_tracker_credit_transactions,
+ (
+ resource_tracker_service_runs.c.product_name
+ == resource_tracker_credit_transactions.c.product_name
+ )
+ & (
+ resource_tracker_service_runs.c.service_run_id
+ == resource_tracker_credit_transactions.c.service_run_id
+ ),
+ isouter=True,
+ ).join(
+ _project_tags_subquery,
+ resource_tracker_service_runs.c.project_id
+ == _project_tags_subquery.c.project_uuid_for_rut,
+ isouter=True,
+ )
+ )
+ .where(resource_tracker_service_runs.c.product_name == product_name)
+ .offset(offset)
+ .limit(limit)
+ )
+
+ if user_id:
+ query = query.where(resource_tracker_service_runs.c.user_id == user_id)
+ if wallet_id:
+ query = query.where(resource_tracker_service_runs.c.wallet_id == wallet_id)
+ if service_run_status:
+ query = query.where(
+ resource_tracker_service_runs.c.service_run_status == service_run_status
+ )
+ if started_from:
+ query = query.where(
+ sa.func.DATE(resource_tracker_service_runs.c.started_at)
+ >= started_from.date()
+ )
+ if started_until:
+ query = query.where(
+ sa.func.DATE(resource_tracker_service_runs.c.started_at)
+ <= started_until.date()
+ )
+
+ if order_by:
+ if order_by.direction == OrderDirection.ASC:
+ query = query.order_by(sa.asc(order_by.field))
+ else:
+ query = query.order_by(sa.desc(order_by.field))
+ else:
+ # Default ordering
+ query = query.order_by(resource_tracker_service_runs.c.started_at.desc())
+
+ result = await conn.execute(query)
+
+ return [ServiceRunWithCreditsDB.model_validate(row) for row in result.fetchall()]
+
+
+async def get_osparc_credits_aggregated_by_service(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ product_name: ProductName,
+ user_id: UserID | None,
+ wallet_id: WalletID,
+ offset: int,
+ limit: int,
+ started_from: datetime | None = None,
+ started_until: datetime | None = None,
+) -> tuple[int, list[OsparcCreditsAggregatedByServiceKeyDB]]:
+ async with transaction_context(engine, connection) as conn:
+ base_query = (
+ sa.select(
+ resource_tracker_service_runs.c.service_key,
+ sa.func.SUM(
+ resource_tracker_credit_transactions.c.osparc_credits
+ ).label("osparc_credits"),
+ sa.func.SUM(
+ sa.func.round(
+ (
+ sa.func.extract(
+ "epoch",
+ resource_tracker_service_runs.c.stopped_at,
+ )
+ - sa.func.extract(
+ "epoch",
+ resource_tracker_service_runs.c.started_at,
+ )
+ )
+ / 3600,
+ 2,
+ )
+ ).label("running_time_in_hours"),
+ )
+ .select_from(
+ resource_tracker_service_runs.join(
+ resource_tracker_credit_transactions,
+ (
+ resource_tracker_service_runs.c.product_name
+ == resource_tracker_credit_transactions.c.product_name
+ )
+ & (
+ resource_tracker_service_runs.c.service_run_id
+ == resource_tracker_credit_transactions.c.service_run_id
+ ),
+ isouter=True,
+ )
+ )
+ .where(
+ (resource_tracker_service_runs.c.product_name == product_name)
+ & (
+ resource_tracker_credit_transactions.c.transaction_status
+ == CreditTransactionStatus.BILLED
+ )
+ & (
+ resource_tracker_credit_transactions.c.transaction_classification
+ == CreditClassification.DEDUCT_SERVICE_RUN
+ )
+ & (resource_tracker_credit_transactions.c.wallet_id == wallet_id)
+ )
+ .group_by(resource_tracker_service_runs.c.service_key)
+ )
+
+ if user_id:
+ base_query = base_query.where(
+ resource_tracker_service_runs.c.user_id == user_id
+ )
+ if started_from:
+ base_query = base_query.where(
+ sa.func.DATE(resource_tracker_service_runs.c.started_at)
+ >= started_from.date()
+ )
+ if started_until:
+ base_query = base_query.where(
+ sa.func.DATE(resource_tracker_service_runs.c.started_at)
+ <= started_until.date()
+ )
+
+ subquery = base_query.subquery()
+ count_query = sa.select(sa.func.count()).select_from(subquery)
+ count_result = await conn.execute(count_query)
+
+ # Default ordering and pagination
+ list_query = (
+ base_query.order_by(resource_tracker_service_runs.c.service_key.asc())
+ .offset(offset)
+ .limit(limit)
+ )
+ list_result = await conn.execute(list_query)
+
+ return (
+ cast(int, count_result.scalar()),
+ [
+ OsparcCreditsAggregatedByServiceKeyDB.model_validate(row)
+ for row in list_result.fetchall()
+ ],
+ )
+
+
+async def export_service_runs_table_to_s3(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ product_name: ProductName,
+ s3_bucket_name: S3BucketName,
+ s3_key: str,
+ s3_region: str,
+ user_id: UserID | None,
+ wallet_id: WalletID | None,
+ started_from: datetime | None = None,
+ started_until: datetime | None = None,
+ order_by: OrderBy | None = None,
+):
+ async with transaction_context(engine, connection) as conn:
+ query = (
+ sa.select(
+ resource_tracker_service_runs.c.product_name,
+ resource_tracker_service_runs.c.service_run_id,
+ resource_tracker_service_runs.c.wallet_name,
+ resource_tracker_service_runs.c.user_email,
+ resource_tracker_service_runs.c.root_parent_project_name.label(
+ "project_name"
+ ),
+ resource_tracker_service_runs.c.node_name,
+ resource_tracker_service_runs.c.service_key,
+ resource_tracker_service_runs.c.service_version,
+ resource_tracker_service_runs.c.service_type,
+ resource_tracker_service_runs.c.started_at,
+ resource_tracker_service_runs.c.stopped_at,
+ resource_tracker_credit_transactions.c.osparc_credits,
+ resource_tracker_credit_transactions.c.transaction_status,
+ sa.func.coalesce(
+ _project_tags_subquery.c.project_tags,
+ sa.cast(sa.text("'{}'"), sa.ARRAY(sa.String)),
+ ).label("project_tags"),
+ )
+ .select_from(
+ resource_tracker_service_runs.join(
+ resource_tracker_credit_transactions,
+ resource_tracker_service_runs.c.service_run_id
+ == resource_tracker_credit_transactions.c.service_run_id,
+ isouter=True,
+ ).join(
+ _project_tags_subquery,
+ resource_tracker_service_runs.c.project_id
+ == _project_tags_subquery.c.project_uuid_for_rut,
+ isouter=True,
+ )
+ )
+ .where(resource_tracker_service_runs.c.product_name == product_name)
+ )
+
+ if user_id:
+ query = query.where(resource_tracker_service_runs.c.user_id == user_id)
+ if wallet_id:
+ query = query.where(resource_tracker_service_runs.c.wallet_id == wallet_id)
+ if started_from:
+ query = query.where(
+ sa.func.DATE(resource_tracker_service_runs.c.started_at)
+ >= started_from.date()
+ )
+ if started_until:
+ query = query.where(
+ sa.func.DATE(resource_tracker_service_runs.c.started_at)
+ <= started_until.date()
+ )
+
+ if order_by:
+ if order_by.direction == OrderDirection.ASC:
+ query = query.order_by(sa.asc(order_by.field))
+ else:
+ query = query.order_by(sa.desc(order_by.field))
+ else:
+ # Default ordering
+ query = query.order_by(resource_tracker_service_runs.c.started_at.desc())
+
+ compiled_query = (
+ str(query.compile(compile_kwargs={"literal_binds": True}))
+ .replace("\n", "")
+ .replace("'", "''")
+ )
+
+ result = await conn.execute(
+ sa.DDL(
+ f"""
+ SELECT * from aws_s3.query_export_to_s3('{compiled_query}',
+ aws_commons.create_s3_uri('{s3_bucket_name}', '{s3_key}', '{s3_region}'), 'format csv, HEADER true');
+ """ # noqa: S608
+ )
+ )
+ row = result.first()
+ assert row
+ _logger.info(
+ "Rows uploaded %s, Files uploaded %s, Bytes uploaded %s",
+ row[0],
+ row[1],
+ row[2],
+ )
+
+
+async def total_service_runs_by_product_and_user_and_wallet(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ product_name: ProductName,
+ user_id: UserID | None,
+ wallet_id: WalletID | None,
+ service_run_status: ServiceRunStatus | None = None,
+ started_from: datetime | None = None,
+ started_until: datetime | None = None,
+) -> PositiveInt:
+ async with transaction_context(engine, connection) as conn:
+ query = (
+ sa.select(sa.func.count())
+ .select_from(resource_tracker_service_runs)
+ .where(resource_tracker_service_runs.c.product_name == product_name)
+ )
+
+ if user_id:
+ query = query.where(resource_tracker_service_runs.c.user_id == user_id)
+ if wallet_id:
+ query = query.where(resource_tracker_service_runs.c.wallet_id == wallet_id)
+ if started_from:
+ query = query.where(
+ sa.func.DATE(resource_tracker_service_runs.c.started_at)
+ >= started_from.date()
+ )
+ if started_until:
+ query = query.where(
+ sa.func.DATE(resource_tracker_service_runs.c.started_at)
+ <= started_until.date()
+ )
+ if service_run_status:
+ query = query.where(
+ resource_tracker_service_runs.c.service_run_status == service_run_status
+ )
+
+ result = await conn.execute(query)
+ row = result.first()
+ return cast(PositiveInt, row[0]) if row else 0
+
+
+### For Background check purpose:
+
+
+async def list_service_runs_with_running_status_across_all_products(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ offset: int,
+ limit: int,
+) -> list[ServiceRunForCheckDB]:
+ async with transaction_context(engine, connection) as conn:
+ query = (
+ sa.select(
+ resource_tracker_service_runs.c.service_run_id,
+ resource_tracker_service_runs.c.last_heartbeat_at,
+ resource_tracker_service_runs.c.missed_heartbeat_counter,
+ resource_tracker_service_runs.c.modified,
+ )
+ .where(
+ resource_tracker_service_runs.c.service_run_status
+ == ServiceRunStatus.RUNNING
+ )
+ .order_by(resource_tracker_service_runs.c.started_at.desc()) # NOTE:
+ .offset(offset)
+ .limit(limit)
+ )
+ result = await conn.execute(query)
+
+ return [ServiceRunForCheckDB.model_validate(row) for row in result.fetchall()]
+
+
+async def total_service_runs_with_running_status_across_all_products(
+ engine: AsyncEngine, connection: AsyncConnection | None = None
+) -> PositiveInt:
+ async with transaction_context(engine, connection) as conn:
+ query = (
+ sa.select(sa.func.count())
+ .select_from(resource_tracker_service_runs)
+ .where(
+ resource_tracker_service_runs.c.service_run_status
+ == ServiceRunStatus.RUNNING
+ )
+ )
+ result = await conn.execute(query)
+ row = result.first()
+ return cast(PositiveInt, row[0]) if row else 0
+
+
+async def update_service_missed_heartbeat_counter(
+ engine: AsyncEngine,
+ connection: AsyncConnection | None = None,
+ *,
+ service_run_id: ServiceRunId,
+ last_heartbeat_at: datetime,
+ missed_heartbeat_counter: int,
+) -> ServiceRunDB | None:
+ async with transaction_context(engine, connection) as conn:
+ update_stmt = (
+ resource_tracker_service_runs.update()
+ .values(
+ modified=sa.func.now(),
+ missed_heartbeat_counter=missed_heartbeat_counter,
+ )
+ .where(
+ (resource_tracker_service_runs.c.service_run_id == service_run_id)
+ & (
+ resource_tracker_service_runs.c.service_run_status
+ == ServiceRunStatus.RUNNING
+ )
+ & (
+ resource_tracker_service_runs.c.last_heartbeat_at
+ == last_heartbeat_at
+ )
+ )
+ .returning(sa.literal_column("*"))
+ )
+
+ result = await conn.execute(update_stmt)
+ row = result.first()
+ if row is None:
+ return None
+ return ServiceRunDB.model_validate(row)
diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_plans.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_plans.py
index 9c3dc38bef3..ed34c334187 100644
--- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_plans.py
+++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_plans.py
@@ -14,12 +14,13 @@
)
from models_library.services import ServiceKey, ServiceVersion
from pydantic import TypeAdapter
+from sqlalchemy.ext.asyncio import AsyncEngine
-from ..api.rest.dependencies import get_repository
+from ..api.rest.dependencies import get_resource_tracker_db_engine
from ..exceptions.errors import PricingPlanNotFoundForServiceError
from ..models.pricing_plans import PricingPlansDB, PricingPlanToServiceDB
from ..models.pricing_units import PricingUnitsDB
-from .modules.db.repositories.resource_tracker import ResourceTrackerRepository
+from .modules.db import pricing_plans_db
async def _create_pricing_plan_get(
@@ -52,12 +53,15 @@ async def get_service_default_pricing_plan(
product_name: ProductName,
service_key: ServiceKey,
service_version: ServiceVersion,
- resource_tracker_repo: Annotated[
- ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository))
- ],
+ db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)],
) -> PricingPlanGet:
- active_service_pricing_plans = await resource_tracker_repo.list_active_service_pricing_plans_by_product_and_service(
- product_name, service_key, service_version
+ active_service_pricing_plans = (
+ await pricing_plans_db.list_active_service_pricing_plans_by_product_and_service(
+ db_engine,
+ product_name=product_name,
+ service_key=service_key,
+ service_version=service_version,
+ )
)
default_pricing_plan = None
@@ -71,10 +75,8 @@ async def get_service_default_pricing_plan(
service_key=service_key, service_version=service_version
)
- pricing_plan_unit_db = (
- await resource_tracker_repo.list_pricing_units_by_pricing_plan(
- pricing_plan_id=default_pricing_plan.pricing_plan_id
- )
+ pricing_plan_unit_db = await pricing_plans_db.list_pricing_units_by_pricing_plan(
+ db_engine, pricing_plan_id=default_pricing_plan.pricing_plan_id
)
return await _create_pricing_plan_get(default_pricing_plan, pricing_plan_unit_db)
@@ -83,14 +85,12 @@ async def get_service_default_pricing_plan(
async def list_connected_services_to_pricing_plan_by_pricing_plan(
product_name: ProductName,
pricing_plan_id: PricingPlanId,
- resource_tracker_repo: Annotated[
- ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository))
- ],
+ db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)],
):
output_list: list[
PricingPlanToServiceDB
- ] = await resource_tracker_repo.list_connected_services_to_pricing_plan_by_pricing_plan(
- product_name=product_name, pricing_plan_id=pricing_plan_id
+ ] = await pricing_plans_db.list_connected_services_to_pricing_plan_by_pricing_plan(
+ db_engine, product_name=product_name, pricing_plan_id=pricing_plan_id
)
return [
TypeAdapter(PricingPlanToServiceGet).validate_python(item.model_dump())
@@ -103,12 +103,11 @@ async def connect_service_to_pricing_plan(
pricing_plan_id: PricingPlanId,
service_key: ServiceKey,
service_version: ServiceVersion,
- resource_tracker_repo: Annotated[
- ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository))
- ],
+ db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)],
) -> PricingPlanToServiceGet:
output: PricingPlanToServiceDB = (
- await resource_tracker_repo.upsert_service_to_pricing_plan(
+ await pricing_plans_db.upsert_service_to_pricing_plan(
+ db_engine,
product_name=product_name,
pricing_plan_id=pricing_plan_id,
service_key=service_key,
@@ -120,14 +119,12 @@ async def connect_service_to_pricing_plan(
async def list_pricing_plans_by_product(
product_name: ProductName,
- resource_tracker_repo: Annotated[
- ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository))
- ],
+ db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)],
) -> list[PricingPlanGet]:
pricing_plans_list_db: list[
PricingPlansDB
- ] = await resource_tracker_repo.list_pricing_plans_by_product(
- product_name=product_name
+ ] = await pricing_plans_db.list_pricing_plans_by_product(
+ db_engine, product_name=product_name
)
return [
PricingPlanGet(
@@ -147,32 +144,24 @@ async def list_pricing_plans_by_product(
async def get_pricing_plan(
product_name: ProductName,
pricing_plan_id: PricingPlanId,
- resource_tracker_repo: Annotated[
- ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository))
- ],
+ db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)],
) -> PricingPlanGet:
- pricing_plan_db = await resource_tracker_repo.get_pricing_plan(
- product_name=product_name, pricing_plan_id=pricing_plan_id
+ pricing_plan_db = await pricing_plans_db.get_pricing_plan(
+ db_engine, product_name=product_name, pricing_plan_id=pricing_plan_id
)
- pricing_plan_unit_db = (
- await resource_tracker_repo.list_pricing_units_by_pricing_plan(
- pricing_plan_id=pricing_plan_db.pricing_plan_id
- )
+ pricing_plan_unit_db = await pricing_plans_db.list_pricing_units_by_pricing_plan(
+ db_engine, pricing_plan_id=pricing_plan_db.pricing_plan_id
)
return await _create_pricing_plan_get(pricing_plan_db, pricing_plan_unit_db)
async def create_pricing_plan(
data: PricingPlanCreate,
- resource_tracker_repo: Annotated[
- ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository))
- ],
+ db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)],
) -> PricingPlanGet:
- pricing_plan_db = await resource_tracker_repo.create_pricing_plan(data=data)
- pricing_plan_unit_db = (
- await resource_tracker_repo.list_pricing_units_by_pricing_plan(
- pricing_plan_id=pricing_plan_db.pricing_plan_id
- )
+ pricing_plan_db = await pricing_plans_db.create_pricing_plan(db_engine, data=data)
+ pricing_plan_unit_db = await pricing_plans_db.list_pricing_units_by_pricing_plan(
+ db_engine, pricing_plan_id=pricing_plan_db.pricing_plan_id
)
return await _create_pricing_plan_get(pricing_plan_db, pricing_plan_unit_db)
@@ -180,24 +169,20 @@ async def create_pricing_plan(
async def update_pricing_plan(
product_name: ProductName,
data: PricingPlanUpdate,
- resource_tracker_repo: Annotated[
- ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository))
- ],
+ db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)],
) -> PricingPlanGet:
# Check whether pricing plan exists
- pricing_plan_db = await resource_tracker_repo.get_pricing_plan(
- product_name=product_name, pricing_plan_id=data.pricing_plan_id
+ pricing_plan_db = await pricing_plans_db.get_pricing_plan(
+ db_engine, product_name=product_name, pricing_plan_id=data.pricing_plan_id
)
# Update pricing plan
- pricing_plan_updated_db = await resource_tracker_repo.update_pricing_plan(
- product_name=product_name, data=data
+ pricing_plan_updated_db = await pricing_plans_db.update_pricing_plan(
+ db_engine, product_name=product_name, data=data
)
if pricing_plan_updated_db:
pricing_plan_db = pricing_plan_updated_db
- pricing_plan_unit_db = (
- await resource_tracker_repo.list_pricing_units_by_pricing_plan(
- pricing_plan_id=pricing_plan_db.pricing_plan_id
- )
+ pricing_plan_unit_db = await pricing_plans_db.list_pricing_units_by_pricing_plan(
+ db_engine, pricing_plan_id=pricing_plan_db.pricing_plan_id
)
return await _create_pricing_plan_get(pricing_plan_db, pricing_plan_unit_db)
diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_units.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_units.py
index f2aee53dd80..0a1e72cad65 100644
--- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_units.py
+++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_units.py
@@ -11,21 +11,23 @@
PricingUnitWithCostCreate,
PricingUnitWithCostUpdate,
)
+from sqlalchemy.ext.asyncio import AsyncEngine
-from ..api.rest.dependencies import get_repository
-from .modules.db.repositories.resource_tracker import ResourceTrackerRepository
+from ..api.rest.dependencies import get_resource_tracker_db_engine
+from .modules.db import pricing_plans_db
async def get_pricing_unit(
product_name: ProductName,
pricing_plan_id: PricingPlanId,
pricing_unit_id: PricingUnitId,
- resource_tracker_repo: Annotated[
- ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository))
- ],
+ db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)],
) -> PricingUnitGet:
- pricing_unit = await resource_tracker_repo.get_valid_pricing_unit(
- product_name, pricing_plan_id, pricing_unit_id
+ pricing_unit = await pricing_plans_db.get_valid_pricing_unit(
+ db_engine,
+ product_name=product_name,
+ pricing_plan_id=pricing_plan_id,
+ pricing_unit_id=pricing_unit_id,
)
return PricingUnitGet(
@@ -42,21 +44,22 @@ async def get_pricing_unit(
async def create_pricing_unit(
product_name: ProductName,
data: PricingUnitWithCostCreate,
- resource_tracker_repo: Annotated[
- ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository))
- ],
+ db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)],
) -> PricingUnitGet:
# Check whether pricing plan exists
- pricing_plan_db = await resource_tracker_repo.get_pricing_plan(
- product_name=product_name, pricing_plan_id=data.pricing_plan_id
+ pricing_plan_db = await pricing_plans_db.get_pricing_plan(
+ db_engine, product_name=product_name, pricing_plan_id=data.pricing_plan_id
)
# Create new pricing unit
- pricing_unit_id, _ = await resource_tracker_repo.create_pricing_unit_with_cost(
- data=data, pricing_plan_key=pricing_plan_db.pricing_plan_key
+ pricing_unit_id, _ = await pricing_plans_db.create_pricing_unit_with_cost(
+ db_engine, data=data, pricing_plan_key=pricing_plan_db.pricing_plan_key
)
- pricing_unit = await resource_tracker_repo.get_valid_pricing_unit(
- product_name, data.pricing_plan_id, pricing_unit_id
+ pricing_unit = await pricing_plans_db.get_valid_pricing_unit(
+ db_engine,
+ product_name=product_name,
+ pricing_plan_id=data.pricing_plan_id,
+ pricing_unit_id=pricing_unit_id,
)
return PricingUnitGet(
pricing_unit_id=pricing_unit.pricing_unit_id,
@@ -72,26 +75,30 @@ async def create_pricing_unit(
async def update_pricing_unit(
product_name: ProductName,
data: PricingUnitWithCostUpdate,
- resource_tracker_repo: Annotated[
- ResourceTrackerRepository, Depends(get_repository(ResourceTrackerRepository))
- ],
+ db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)],
) -> PricingUnitGet:
# Check whether pricing unit exists
- await resource_tracker_repo.get_valid_pricing_unit(
- product_name, data.pricing_plan_id, data.pricing_unit_id
+ await pricing_plans_db.get_valid_pricing_unit(
+ db_engine,
+ product_name=product_name,
+ pricing_plan_id=data.pricing_plan_id,
+ pricing_unit_id=data.pricing_unit_id,
)
# Get pricing plan
- pricing_plan_db = await resource_tracker_repo.get_pricing_plan(
- product_name, data.pricing_plan_id
+ pricing_plan_db = await pricing_plans_db.get_pricing_plan(
+ db_engine, product_name=product_name, pricing_plan_id=data.pricing_plan_id
)
# Update pricing unit and cost
- await resource_tracker_repo.update_pricing_unit_with_cost(
- data=data, pricing_plan_key=pricing_plan_db.pricing_plan_key
+ await pricing_plans_db.update_pricing_unit_with_cost(
+ db_engine, data=data, pricing_plan_key=pricing_plan_db.pricing_plan_key
)
- pricing_unit = await resource_tracker_repo.get_valid_pricing_unit(
- product_name, data.pricing_plan_id, data.pricing_unit_id
+ pricing_unit = await pricing_plans_db.get_valid_pricing_unit(
+ db_engine,
+ product_name=product_name,
+ pricing_plan_id=data.pricing_plan_id,
+ pricing_unit_id=data.pricing_unit_id,
)
return PricingUnitGet(
pricing_unit_id=pricing_unit.pricing_unit_id,
diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/process_message_running_service.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/process_message_running_service.py
index 4907c84ecb1..8300ede8283 100644
--- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/process_message_running_service.py
+++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/process_message_running_service.py
@@ -21,6 +21,7 @@
)
from models_library.services import ServiceType
from pydantic import TypeAdapter
+from sqlalchemy.ext.asyncio import AsyncEngine
from ..models.credit_transactions import (
CreditTransactionCreate,
@@ -32,7 +33,7 @@
ServiceRunLastHeartbeatUpdate,
ServiceRunStoppedAtUpdate,
)
-from .modules.db.repositories.resource_tracker import ResourceTrackerRepository
+from .modules.db import credit_transactions_db, pricing_plans_db, service_runs_db
from .modules.rabbitmq import RabbitMQClient, get_rabbitmq_client
from .utils import (
compute_service_run_credit_costs,
@@ -53,24 +54,22 @@ async def process_message(app: FastAPI, data: bytes) -> bool:
rabbit_message.message_type,
rabbit_message.service_run_id,
)
- resource_tracker_repo: ResourceTrackerRepository = ResourceTrackerRepository(
- db_engine=app.state.engine
- )
+ _db_engine = app.state.engine
rabbitmq_client = get_rabbitmq_client(app)
await RABBIT_MSG_TYPE_TO_PROCESS_HANDLER[rabbit_message.message_type](
- resource_tracker_repo, rabbit_message, rabbitmq_client
+ _db_engine, rabbit_message, rabbitmq_client
)
return True
async def _process_start_event(
- resource_tracker_repo: ResourceTrackerRepository,
+ db_engine: AsyncEngine,
msg: RabbitResourceTrackingStartedMessage,
rabbitmq_client: RabbitMQClient,
):
- service_run_db = await resource_tracker_repo.get_service_run_by_id(
- service_run_id=msg.service_run_id
+ service_run_db = await service_runs_db.get_service_run_by_id(
+ db_engine, service_run_id=msg.service_run_id
)
if service_run_db:
# NOTE: After we find out why sometimes RUT recieves multiple start events and fix it, we can change it to log level `error`
@@ -90,8 +89,8 @@ async def _process_start_event(
)
pricing_unit_cost = None
if msg.pricing_unit_cost_id:
- pricing_unit_cost_db = await resource_tracker_repo.get_pricing_unit_cost_by_id(
- pricing_unit_cost_id=msg.pricing_unit_cost_id
+ pricing_unit_cost_db = await pricing_plans_db.get_pricing_unit_cost_by_id(
+ db_engine, pricing_unit_cost_id=msg.pricing_unit_cost_id
)
pricing_unit_cost = pricing_unit_cost_db.cost_per_unit
@@ -125,7 +124,9 @@ async def _process_start_event(
service_run_status=ServiceRunStatus.RUNNING,
last_heartbeat_at=msg.created_at,
)
- service_run_id = await resource_tracker_repo.create_service_run(create_service_run)
+ service_run_id = await service_runs_db.create_service_run(
+ db_engine, data=create_service_run
+ )
if msg.wallet_id and msg.wallet_name:
transaction_create = CreditTransactionCreate(
@@ -145,21 +146,23 @@ async def _process_start_event(
created_at=msg.created_at,
last_heartbeat_at=msg.created_at,
)
- await resource_tracker_repo.create_credit_transaction(transaction_create)
+ await credit_transactions_db.create_credit_transaction(
+ db_engine, data=transaction_create
+ )
# Publish wallet total credits to RabbitMQ
await sum_credit_transactions_and_publish_to_rabbitmq(
- resource_tracker_repo, rabbitmq_client, msg.product_name, msg.wallet_id
+ db_engine, rabbitmq_client, msg.product_name, msg.wallet_id
)
async def _process_heartbeat_event(
- resource_tracker_repo: ResourceTrackerRepository,
+ db_engine: AsyncEngine,
msg: RabbitResourceTrackingHeartbeatMessage,
rabbitmq_client: RabbitMQClient,
):
- service_run_db = await resource_tracker_repo.get_service_run_by_id(
- service_run_id=msg.service_run_id
+ service_run_db = await service_runs_db.get_service_run_by_id(
+ db_engine, service_run_id=msg.service_run_id
)
if not service_run_db:
_logger.error(
@@ -181,8 +184,8 @@ async def _process_heartbeat_event(
update_service_run_last_heartbeat = ServiceRunLastHeartbeatUpdate(
service_run_id=msg.service_run_id, last_heartbeat_at=msg.created_at
)
- running_service = await resource_tracker_repo.update_service_run_last_heartbeat(
- update_service_run_last_heartbeat
+ running_service = await service_runs_db.update_service_run_last_heartbeat(
+ db_engine, data=update_service_run_last_heartbeat
)
if running_service is None:
_logger.info("Nothing to update: %s", msg)
@@ -201,19 +204,19 @@ async def _process_heartbeat_event(
osparc_credits=make_negative(computed_credits),
last_heartbeat_at=msg.created_at,
)
- await resource_tracker_repo.update_credit_transaction_credits(
- update_credit_transaction
+ await credit_transactions_db.update_credit_transaction_credits(
+ db_engine, data=update_credit_transaction
)
# Publish wallet total credits to RabbitMQ
wallet_total_credits = await sum_credit_transactions_and_publish_to_rabbitmq(
- resource_tracker_repo,
+ db_engine,
rabbitmq_client,
running_service.product_name,
running_service.wallet_id,
)
if wallet_total_credits.available_osparc_credits < CreditsLimit.OUT_OF_CREDITS:
await publish_to_rabbitmq_wallet_credits_limit_reached(
- resource_tracker_repo,
+ db_engine,
rabbitmq_client,
product_name=running_service.product_name,
wallet_id=running_service.wallet_id,
@@ -223,12 +226,12 @@ async def _process_heartbeat_event(
async def _process_stop_event(
- resource_tracker_repo: ResourceTrackerRepository,
+ db_engine: AsyncEngine,
msg: RabbitResourceTrackingStoppedMessage,
rabbitmq_client: RabbitMQClient,
):
- service_run_db = await resource_tracker_repo.get_service_run_by_id(
- service_run_id=msg.service_run_id
+ service_run_db = await service_runs_db.get_service_run_by_id(
+ db_engine, service_run_id=msg.service_run_id
)
if not service_run_db:
# NOTE: ANE/MD discussed. When the RUT receives a stop event and has not received before any start or heartbeat event, it probably means that
@@ -262,8 +265,8 @@ async def _process_stop_event(
service_run_status_msg=_run_status_msg,
)
- running_service = await resource_tracker_repo.update_service_run_stopped_at(
- update_service_run_stopped_at
+ running_service = await service_runs_db.update_service_run_stopped_at(
+ db_engine, data=update_service_run_stopped_at
)
if running_service is None:
@@ -287,12 +290,12 @@ async def _process_stop_event(
else CreditTransactionStatus.NOT_BILLED
),
)
- await resource_tracker_repo.update_credit_transaction_credits_and_status(
- update_credit_transaction
+ await credit_transactions_db.update_credit_transaction_credits_and_status(
+ db_engine, data=update_credit_transaction
)
# Publish wallet total credits to RabbitMQ
await sum_credit_transactions_and_publish_to_rabbitmq(
- resource_tracker_repo,
+ db_engine,
rabbitmq_client,
running_service.product_name,
running_service.wallet_id,
diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/service_runs.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/service_runs.py
index fff896c8ec0..b4d9127733e 100644
--- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/service_runs.py
+++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/service_runs.py
@@ -19,9 +19,10 @@
from models_library.users import UserID
from models_library.wallets import WalletID
from pydantic import AnyUrl, PositiveInt, TypeAdapter
+from sqlalchemy.ext.asyncio import AsyncEngine
from ..models.service_runs import ServiceRunWithCreditsDB
-from .modules.db.repositories.resource_tracker import ResourceTrackerRepository
+from .modules.db import service_runs_db
_PRESIGNED_LINK_EXPIRATION_SEC = 7200
@@ -29,7 +30,7 @@
async def list_service_runs(
user_id: UserID,
product_name: ProductName,
- resource_tracker_repo: ResourceTrackerRepository,
+ db_engine: AsyncEngine,
limit: int = 20,
offset: int = 0,
wallet_id: WalletID | None = None,
@@ -45,17 +46,21 @@ async def list_service_runs(
# Situation when we want to see all usage of a specific user (ex. for Non billable product)
if wallet_id is None and access_all_wallet_usage is False:
- total_service_runs: PositiveInt = await resource_tracker_repo.total_service_runs_by_product_and_user_and_wallet(
- product_name,
- user_id=user_id,
- wallet_id=None,
- started_from=started_from,
- started_until=started_until,
+ total_service_runs: PositiveInt = (
+ await service_runs_db.total_service_runs_by_product_and_user_and_wallet(
+ db_engine,
+ product_name=product_name,
+ user_id=user_id,
+ wallet_id=None,
+ started_from=started_from,
+ started_until=started_until,
+ )
)
service_runs_db_model: list[
ServiceRunWithCreditsDB
- ] = await resource_tracker_repo.list_service_runs_by_product_and_user_and_wallet(
- product_name,
+ ] = await service_runs_db.list_service_runs_by_product_and_user_and_wallet(
+ db_engine,
+ product_name=product_name,
user_id=user_id,
wallet_id=None,
offset=offset,
@@ -66,8 +71,9 @@ async def list_service_runs(
)
# Situation when accountant user can see all users usage of the wallet
elif wallet_id and access_all_wallet_usage is True:
- total_service_runs: PositiveInt = await resource_tracker_repo.total_service_runs_by_product_and_user_and_wallet( # type: ignore[no-redef]
- product_name,
+ total_service_runs: PositiveInt = await service_runs_db.total_service_runs_by_product_and_user_and_wallet( # type: ignore[no-redef]
+ db_engine,
+ product_name=product_name,
user_id=None,
wallet_id=wallet_id,
started_from=started_from,
@@ -75,8 +81,9 @@ async def list_service_runs(
)
service_runs_db_model: list[ # type: ignore[no-redef]
ServiceRunWithCreditsDB
- ] = await resource_tracker_repo.list_service_runs_by_product_and_user_and_wallet(
- product_name,
+ ] = await service_runs_db.list_service_runs_by_product_and_user_and_wallet(
+ db_engine,
+ product_name=product_name,
user_id=None,
wallet_id=wallet_id,
offset=offset,
@@ -87,8 +94,9 @@ async def list_service_runs(
)
# Situation when regular user can see only his usage of the wallet
elif wallet_id and access_all_wallet_usage is False:
- total_service_runs: PositiveInt = await resource_tracker_repo.total_service_runs_by_product_and_user_and_wallet( # type: ignore[no-redef]
- product_name,
+ total_service_runs: PositiveInt = await service_runs_db.total_service_runs_by_product_and_user_and_wallet( # type: ignore[no-redef]
+ db_engine,
+ product_name=product_name,
user_id=user_id,
wallet_id=wallet_id,
started_from=started_from,
@@ -96,8 +104,9 @@ async def list_service_runs(
)
service_runs_db_model: list[ # type: ignore[no-redef]
ServiceRunWithCreditsDB
- ] = await resource_tracker_repo.list_service_runs_by_product_and_user_and_wallet(
- product_name,
+ ] = await service_runs_db.list_service_runs_by_product_and_user_and_wallet(
+ db_engine,
+ product_name=product_name,
user_id=user_id,
wallet_id=wallet_id,
offset=offset,
@@ -147,7 +156,7 @@ async def export_service_runs(
s3_region: str,
user_id: UserID,
product_name: ProductName,
- resource_tracker_repo: ResourceTrackerRepository,
+ db_engine: AsyncEngine,
wallet_id: WalletID | None = None,
access_all_wallet_usage: bool = False,
order_by: OrderBy | None = None,
@@ -165,7 +174,8 @@ async def export_service_runs(
)
# Export CSV to S3
- await resource_tracker_repo.export_service_runs_table_to_s3(
+ await service_runs_db.export_service_runs_table_to_s3(
+ db_engine,
product_name=product_name,
s3_bucket_name=s3_bucket_name,
s3_key=s3_object_key,
@@ -188,7 +198,7 @@ async def export_service_runs(
async def get_osparc_credits_aggregated_usages_page(
user_id: UserID,
product_name: ProductName,
- resource_tracker_repo: ResourceTrackerRepository,
+ db_engine: AsyncEngine,
aggregated_by: ServicesAggregatedUsagesType,
time_period: ServicesAggregatedUsagesTimePeriod,
wallet_id: WalletID,
@@ -204,7 +214,8 @@ async def get_osparc_credits_aggregated_usages_page(
(
count_output_list_db,
output_list_db,
- ) = await resource_tracker_repo.get_osparc_credits_aggregated_by_service(
+ ) = await service_runs_db.get_osparc_credits_aggregated_by_service(
+ db_engine,
product_name=product_name,
user_id=user_id if access_all_wallet_usage is False else None,
wallet_id=wallet_id,
diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/utils.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/utils.py
index 73aa7416244..6047ac2e904 100644
--- a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/utils.py
+++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/utils.py
@@ -19,8 +19,9 @@
from models_library.wallets import WalletID
from pydantic import PositiveInt
from servicelib.rabbitmq import RabbitMQClient
+from sqlalchemy.ext.asyncio import AsyncEngine
-from .modules.db.repositories.resource_tracker import ResourceTrackerRepository
+from .modules.db import credit_transactions_db, service_runs_db
_logger = logging.getLogger(__name__)
@@ -30,15 +31,16 @@ def make_negative(n):
async def sum_credit_transactions_and_publish_to_rabbitmq(
- resource_tracker_repo: ResourceTrackerRepository,
+ db_engine: AsyncEngine,
rabbitmq_client: RabbitMQClient,
product_name: ProductName,
wallet_id: WalletID,
) -> WalletTotalCredits:
wallet_total_credits = (
- await resource_tracker_repo.sum_credit_transactions_by_product_and_wallet(
- product_name,
- wallet_id,
+ await credit_transactions_db.sum_credit_transactions_by_product_and_wallet(
+ db_engine,
+ product_name=product_name,
+ wallet_id=wallet_id,
)
)
publish_message = WalletCreditsMessage.model_construct(
@@ -77,7 +79,7 @@ async def _publish_to_rabbitmq_wallet_credits_limit_reached(
async def publish_to_rabbitmq_wallet_credits_limit_reached(
- resource_tracker_repo: ResourceTrackerRepository,
+ db_engine: AsyncEngine,
rabbitmq_client: RabbitMQClient,
product_name: ProductName,
wallet_id: WalletID,
@@ -86,8 +88,9 @@ async def publish_to_rabbitmq_wallet_credits_limit_reached(
):
# Get all current running services for that wallet
total_count: PositiveInt = (
- await resource_tracker_repo.total_service_runs_by_product_and_user_and_wallet(
- product_name,
+ await service_runs_db.total_service_runs_by_product_and_user_and_wallet(
+ db_engine,
+ product_name=product_name,
user_id=None,
wallet_id=wallet_id,
service_run_status=ServiceRunStatus.RUNNING,
@@ -95,13 +98,16 @@ async def publish_to_rabbitmq_wallet_credits_limit_reached(
)
for offset in range(0, total_count, _BATCH_SIZE):
- batch_services = await resource_tracker_repo.list_service_runs_by_product_and_user_and_wallet(
- product_name,
- user_id=None,
- wallet_id=wallet_id,
- offset=offset,
- limit=_BATCH_SIZE,
- service_run_status=ServiceRunStatus.RUNNING,
+ batch_services = (
+ await service_runs_db.list_service_runs_by_product_and_user_and_wallet(
+ db_engine,
+ product_name=product_name,
+ user_id=None,
+ wallet_id=wallet_id,
+ offset=offset,
+ limit=_BATCH_SIZE,
+ service_run_status=ServiceRunStatus.RUNNING,
+ )
)
await asyncio.gather(
diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_api_resource_tracker_service_runs__export.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_resource_tracker_service_runs__export.py
index 56c9c102df6..44a6ce56016 100644
--- a/services/resource-usage-tracker/tests/unit/with_dbs/test_api_resource_tracker_service_runs__export.py
+++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_resource_tracker_service_runs__export.py
@@ -31,7 +31,7 @@
@pytest.fixture
async def mocked_export(mocker: MockerFixture) -> AsyncMock:
return mocker.patch(
- "simcore_service_resource_usage_tracker.services.service_runs.ResourceTrackerRepository.export_service_runs_table_to_s3",
+ "simcore_service_resource_usage_tracker.services.service_runs.service_runs_db.export_service_runs_table_to_s3",
autospec=True,
)
diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_background_task_periodic_heartbeat_check.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_background_task_periodic_heartbeat_check.py
index 35114a3cdf6..8ebe34bbd2d 100644
--- a/services/resource-usage-tracker/tests/unit/with_dbs/test_background_task_periodic_heartbeat_check.py
+++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_background_task_periodic_heartbeat_check.py
@@ -23,9 +23,6 @@
from simcore_service_resource_usage_tracker.services.background_task_periodic_heartbeat_check import (
periodic_check_of_running_services_task,
)
-from simcore_service_resource_usage_tracker.services.modules.db.repositories.resource_tracker import (
- ResourceTrackerRepository,
-)
pytest_simcore_core_services_selection = ["postgres", "rabbit"]
pytest_simcore_ops_services_selection = [
@@ -132,9 +129,6 @@ async def test_process_event_functions(
):
engine = initialized_app.state.engine
app_settings: ApplicationSettings = initialized_app.state.settings
- resource_tracker_repo: ResourceTrackerRepository = ResourceTrackerRepository(
- db_engine=engine
- )
for _ in range(app_settings.RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_COUNTER_FAIL):
await periodic_check_of_running_services_task(initialized_app)
diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message.py
index da321f593f3..57eb9735e68 100644
--- a/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message.py
+++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message.py
@@ -8,9 +8,6 @@
SimcorePlatformStatus,
)
from servicelib.rabbitmq import RabbitMQClient
-from simcore_service_resource_usage_tracker.services.modules.db.repositories.resource_tracker import (
- ResourceTrackerRepository,
-)
from simcore_service_resource_usage_tracker.services.process_message_running_service import (
_process_heartbeat_event,
_process_start_event,
@@ -43,10 +40,7 @@ async def test_process_event_functions(
pricing_unit_id=None,
pricing_unit_cost_id=None,
)
- resource_tracker_repo: ResourceTrackerRepository = ResourceTrackerRepository(
- db_engine=engine
- )
- await _process_start_event(resource_tracker_repo, msg, publisher)
+ await _process_start_event(engine, msg, publisher)
output = await assert_service_runs_db_row(postgres_db, msg.service_run_id)
assert output.stopped_at is None
assert output.service_run_status == "RUNNING"
@@ -55,7 +49,7 @@ async def test_process_event_functions(
heartbeat_msg = RabbitResourceTrackingHeartbeatMessage(
service_run_id=msg.service_run_id, created_at=datetime.now(tz=timezone.utc)
)
- await _process_heartbeat_event(resource_tracker_repo, heartbeat_msg, publisher)
+ await _process_heartbeat_event(engine, heartbeat_msg, publisher)
output = await assert_service_runs_db_row(postgres_db, msg.service_run_id)
assert output.stopped_at is None
assert output.service_run_status == "RUNNING"
@@ -66,7 +60,7 @@ async def test_process_event_functions(
created_at=datetime.now(tz=timezone.utc),
simcore_platform_status=SimcorePlatformStatus.OK,
)
- await _process_stop_event(resource_tracker_repo, stopped_msg, publisher)
+ await _process_stop_event(engine, stopped_msg, publisher)
output = await assert_service_runs_db_row(postgres_db, msg.service_run_id)
assert output.stopped_at is not None
assert output.service_run_status == "SUCCESS"
diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing.py
index 637a2219f94..b29863f0b57 100644
--- a/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing.py
+++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing.py
@@ -31,9 +31,6 @@
resource_tracker_pricing_units,
)
from simcore_postgres_database.models.services import services_meta_data
-from simcore_service_resource_usage_tracker.services.modules.db.repositories.resource_tracker import (
- ResourceTrackerRepository,
-)
from simcore_service_resource_usage_tracker.services.process_message_running_service import (
_process_heartbeat_event,
_process_start_event,
@@ -207,10 +204,8 @@ async def test_process_event_functions(
pricing_unit_id=1,
pricing_unit_cost_id=1,
)
- resource_tracker_repo: ResourceTrackerRepository = ResourceTrackerRepository(
- db_engine=engine
- )
- await _process_start_event(resource_tracker_repo, msg, publisher)
+
+ await _process_start_event(engine, msg, publisher)
output = await assert_credit_transactions_db_row(postgres_db, msg.service_run_id)
assert output.osparc_credits == 0.0
assert output.transaction_status == "PENDING"
@@ -222,7 +217,7 @@ async def test_process_event_functions(
heartbeat_msg = RabbitResourceTrackingHeartbeatMessage(
service_run_id=msg.service_run_id, created_at=datetime.now(tz=timezone.utc)
)
- await _process_heartbeat_event(resource_tracker_repo, heartbeat_msg, publisher)
+ await _process_heartbeat_event(engine, heartbeat_msg, publisher)
output = await assert_credit_transactions_db_row(
postgres_db, msg.service_run_id, modified_at
)
@@ -240,7 +235,7 @@ async def test_process_event_functions(
created_at=datetime.now(tz=timezone.utc),
simcore_platform_status=SimcorePlatformStatus.OK,
)
- await _process_stop_event(resource_tracker_repo, stopped_msg, publisher)
+ await _process_stop_event(engine, stopped_msg, publisher)
output = await assert_credit_transactions_db_row(
postgres_db, msg.service_run_id, modified_at
)
diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing_cost_0.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing_cost_0.py
index 5b903cf759d..ccffbc9f42e 100644
--- a/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing_cost_0.py
+++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing_cost_0.py
@@ -31,9 +31,6 @@
resource_tracker_pricing_units,
)
from simcore_postgres_database.models.services import services_meta_data
-from simcore_service_resource_usage_tracker.services.modules.db.repositories.resource_tracker import (
- ResourceTrackerRepository,
-)
from simcore_service_resource_usage_tracker.services.process_message_running_service import (
_process_heartbeat_event,
_process_start_event,
@@ -149,10 +146,8 @@ async def test_process_event_functions(
pricing_unit_id=1,
pricing_unit_cost_id=1,
)
- resource_tracker_repo: ResourceTrackerRepository = ResourceTrackerRepository(
- db_engine=engine
- )
- await _process_start_event(resource_tracker_repo, msg, publisher)
+
+ await _process_start_event(engine, msg, publisher)
output = await assert_credit_transactions_db_row(postgres_db, msg.service_run_id)
assert output.osparc_credits == 0.0
assert output.transaction_status == "PENDING"
@@ -164,7 +159,7 @@ async def test_process_event_functions(
heartbeat_msg = RabbitResourceTrackingHeartbeatMessage(
service_run_id=msg.service_run_id, created_at=datetime.now(tz=timezone.utc)
)
- await _process_heartbeat_event(resource_tracker_repo, heartbeat_msg, publisher)
+ await _process_heartbeat_event(engine, heartbeat_msg, publisher)
output = await assert_credit_transactions_db_row(
postgres_db, msg.service_run_id, modified_at
)
@@ -177,7 +172,7 @@ async def test_process_event_functions(
created_at=datetime.now(tz=timezone.utc),
simcore_platform_status=SimcorePlatformStatus.OK,
)
- await _process_stop_event(resource_tracker_repo, stopped_msg, publisher)
+ await _process_stop_event(engine, stopped_msg, publisher)
output = await assert_credit_transactions_db_row(
postgres_db, msg.service_run_id, modified_at
)
From 821f20cecee6e2c70659e385607eb1fc3fd24e7e Mon Sep 17 00:00:00 2001
From: Odei Maiz <33152403+odeimaiz@users.noreply.github.com>
Date: Tue, 3 Dec 2024 20:45:16 +0100
Subject: [PATCH 14/16] =?UTF-8?q?=F0=9F=92=A3=20[Frontend]=20Retire=20clus?=
=?UTF-8?q?ters=20(#6883)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../source/class/osparc/auth/Manager.js | 9 -
.../class/osparc/cluster/ClusterMiniView.js | 202 -------
.../class/osparc/cluster/ClusterWorkers.js | 140 -----
.../class/osparc/cluster/ClustersDetails.js | 109 ----
.../source/class/osparc/cluster/Utils.js | 136 -----
.../source/class/osparc/data/Permissions.js | 1 -
.../source/class/osparc/data/Resources.js | 45 --
.../class/osparc/desktop/StartStopButtons.js | 72 ---
.../class/osparc/desktop/StudyEditor.js | 8 -
.../osparc/desktop/preferences/Preferences.js | 22 -
.../desktop/preferences/pages/ClustersPage.js | 563 ------------------
.../class/osparc/editor/ClusterEditor.js | 262 --------
.../class/osparc/navigation/UserMenu.js | 14 -
.../source/class/osparc/product/Utils.js | 7 -
.../client/source/class/osparc/store/Store.js | 5 -
.../class/osparc/ui/list/ClusterListItem.js | 150 -----
.../class/osparc/utils/DisabledPlugins.js | 5 -
17 files changed, 1750 deletions(-)
delete mode 100644 services/static-webserver/client/source/class/osparc/cluster/ClusterMiniView.js
delete mode 100644 services/static-webserver/client/source/class/osparc/cluster/ClusterWorkers.js
delete mode 100644 services/static-webserver/client/source/class/osparc/cluster/ClustersDetails.js
delete mode 100644 services/static-webserver/client/source/class/osparc/cluster/Utils.js
delete mode 100644 services/static-webserver/client/source/class/osparc/desktop/preferences/pages/ClustersPage.js
delete mode 100644 services/static-webserver/client/source/class/osparc/editor/ClusterEditor.js
delete mode 100644 services/static-webserver/client/source/class/osparc/ui/list/ClusterListItem.js
diff --git a/services/static-webserver/client/source/class/osparc/auth/Manager.js b/services/static-webserver/client/source/class/osparc/auth/Manager.js
index 8f325a0644a..5b5efeef7d0 100644
--- a/services/static-webserver/client/source/class/osparc/auth/Manager.js
+++ b/services/static-webserver/client/source/class/osparc/auth/Manager.js
@@ -257,15 +257,6 @@ qx.Class.define("osparc.auth.Manager", {
this.updateProfile(profile);
const role = profile.role.toLowerCase();
osparc.data.Permissions.getInstance().setRole(role);
-
- this.__fetchStartUpResources();
- },
-
- __fetchStartUpResources: function() {
- const isDisabled = osparc.utils.DisabledPlugins.isClustersDisabled();
- if (isDisabled === false) {
- osparc.data.Resources.get("clusters");
- }
},
__logoutUser: function() {
diff --git a/services/static-webserver/client/source/class/osparc/cluster/ClusterMiniView.js b/services/static-webserver/client/source/class/osparc/cluster/ClusterMiniView.js
deleted file mode 100644
index 271ce645725..00000000000
--- a/services/static-webserver/client/source/class/osparc/cluster/ClusterMiniView.js
+++ /dev/null
@@ -1,202 +0,0 @@
-/* ************************************************************************
-
- osparc - the simcore frontend
-
- https://osparc.io
-
- Copyright:
- 2022 IT'IS Foundation, https://itis.swiss
-
- License:
- MIT: https://opensource.org/licenses/MIT
-
- Authors:
- * Odei Maiz (odeimaiz)
-
-************************************************************************ */
-
-qx.Class.define("osparc.cluster.ClusterMiniView", {
- extend: qx.ui.core.Widget,
-
- construct: function() {
- this.base(arguments);
-
- const grid = new qx.ui.layout.Grid(2, 2);
- this._setLayout(grid);
-
- this.__listenToClusterDetails();
-
- this.set({
- allowGrowY: false,
- cursor: "pointer"
- });
- this.addListener("tap", () => osparc.cluster.Utils.popUpClustersDetails(this.__clusterId), this);
-
- const hint = this.__hint = new osparc.ui.hint.Hint(this).set({
- active: false
- });
- const showHint = () => hint.show();
- const hideHint = () => hint.exclude();
- this.addListener("mouseover", showHint);
- [
- "mouseout",
- "tap"
- ].forEach(e => this.addListener(e, hideHint));
- },
-
- statics: {
- GRID_POS: {
- CPU: 0,
- RAM: 1,
- GPU: 2
- }
- },
-
- members: {
- __clusterId: null,
- __hint: null,
-
- setClusterId: function(clusterId) {
- const clusters = osparc.cluster.Utils.getInstance();
- if (this.__clusterId !== null) {
- clusters.stopFetchingDetails(this.__clusterId);
- }
- this.__clusterId = clusterId;
- if (clusterId !== null) {
- clusters.startFetchingDetails(clusterId);
- }
- },
-
- __listenToClusterDetails: function() {
- const clusters = osparc.cluster.Utils.getInstance();
- clusters.addListener("clusterDetailsReceived", e => {
- const data = e.getData();
- if (this.__clusterId === data.clusterId) {
- if ("error" in data) {
- this.__detailsCallFailed();
- } else {
- const clusterDetails = data.clusterDetails;
- this.__updateWorkersDetails(clusterDetails);
- }
- }
- });
- },
-
- __showBulb: function(failed) {
- this._removeAll();
-
- const clusterStatusImage = new qx.ui.basic.Image().set({
- source: "@FontAwesome5Solid/lightbulb/16",
- alignY: "middle",
- alignX: "center",
- paddingLeft: 3,
- textColor: failed ? "failed-red" : "ready-green"
- });
- this._add(clusterStatusImage, {
- row: 0,
- column: 0
- });
- },
-
- __detailsCallFailed: function() {
- this.__showBulb(true);
- this.__hint.setText(this.tr("Connection failed"));
- },
-
- __updateWorkersDetails: function(clusterDetails) {
- this._removeAll();
-
- const workers = clusterDetails.scheduler.workers;
- if (Object.keys(workers).length === 0) {
- this.__showBulb(false);
- this.__hint.setText(this.tr("No workers running at the moment in this cluster"));
- return;
- }
-
- const resources = {
- cpu: {
- metric: "cpu",
- usedResource: "CPU",
- resource: "CPU",
- icon: "@FontAwesome5Solid/microchip/10",
- available: 0,
- used: 0
- },
- ram: {
- metric: "memory",
- usedResource: "RAM",
- resource: "RAM",
- icon: "@MaterialIcons/memory/10",
- available: 0,
- used: 0
- },
- gpu: {
- metric: "gpu",
- usedResource: "GPU",
- resource: "GPU",
- icon: "@FontAwesome5Solid/server/10",
- available: 0,
- used: 0
- }
- };
- Object.keys(resources).forEach(resourceKey => {
- const resource = resources[resourceKey];
- osparc.cluster.Utils.accumulateWorkersResources(workers, resource);
- });
- this.__updateMiniView(resources);
- this.__updateHint(resources);
- },
-
- __updateMiniView: function(resources) {
- Object.keys(resources).forEach((resourceKey, idx) => {
- const resourceInfo = resources[resourceKey];
- if (resourceInfo.available === 0) {
- return;
- }
- const relativeUsage = resourceInfo.used / resourceInfo.available;
- const icon = new qx.ui.basic.Image(resourceInfo.icon).set({
- textColor: relativeUsage > 0.8 ? "busy-orange" : "text"
- });
- this._add(icon, {
- row: idx,
- column: 0
- });
- const progressBar = new qx.ui.indicator.ProgressBar(resourceInfo.used, resourceInfo.available).set({
- height: 10,
- width: 60
- });
- osparc.utils.Utils.hideBorder(progressBar);
- // orange > 80%
- progressBar.getChildControl("progress").set({
- backgroundColor: relativeUsage > 0.8 ? "busy-orange" : "ready-green"
- });
- this._add(progressBar, {
- row: idx,
- column: 1
- });
- });
- },
-
- __updateHint: function(resources) {
- let text = "";
- Object.keys(resources).forEach(resourceKey => {
- const resourceInfo = resources[resourceKey];
- if (resourceInfo.available === 0) {
- return;
- }
- text += resourceInfo.resource + ": ";
- if (resourceKey === "ram") {
- text += osparc.utils.Utils.bytesToGB(resourceInfo.used) + "GB / " + osparc.utils.Utils.bytesToGB(resourceInfo.available) + "GB";
- } else {
- text += resourceInfo.used + " / " + resourceInfo.available;
- }
- text += "
";
- });
- this.__hint.setText(text);
- }
- },
-
- destruct: function() {
- osparc.cluster.Utils.getInstance().stopFetchingDetails(this.__clusterId);
- }
-});
diff --git a/services/static-webserver/client/source/class/osparc/cluster/ClusterWorkers.js b/services/static-webserver/client/source/class/osparc/cluster/ClusterWorkers.js
deleted file mode 100644
index bcb2638439d..00000000000
--- a/services/static-webserver/client/source/class/osparc/cluster/ClusterWorkers.js
+++ /dev/null
@@ -1,140 +0,0 @@
-/* ************************************************************************
-
- osparc - the simcore frontend
-
- https://osparc.io
-
- Copyright:
- 2022 IT'IS Foundation, https://itis.swiss
-
- License:
- MIT: https://opensource.org/licenses/MIT
-
- Authors:
- * Odei Maiz (odeimaiz)
-
-************************************************************************ */
-
-qx.Class.define("osparc.cluster.ClusterWorkers", {
- extend: qx.ui.core.Widget,
-
- construct: function() {
- this.base(arguments);
-
- const grid = new qx.ui.layout.Grid(5, 8);
- for (let i=0; i {
- const worker = clusterDetails.scheduler.workers[workerUrl];
-
- const img = new qx.ui.basic.Image().set({
- source: "@FontAwesome5Solid/hdd/24",
- toolTipText: worker.name,
- textColor: "ready-green",
- paddingTop: 50
- });
- this._add(img, {
- row,
- column: this.self().GRID_POS.ICON
- });
-
- Object.keys(plots).forEach(plotKey => {
- const plotInfo = plots[plotKey];
- const gaugeDatas = osparc.wrapper.Plotly.getDefaultGaugeData();
- const gaugeData = gaugeDatas[0];
- gaugeData.title.text = plotInfo.label.toLocaleString();
- let used = osparc.cluster.Utils.getUsedResourcesAttribute(worker, plotInfo.usedResource);
- let available = osparc.cluster.Utils.getAvailableResourcesAttribute(worker, plotInfo.resource);
- if (plotKey === "ram") {
- used = osparc.utils.Utils.bytesToGB(used);
- available = osparc.utils.Utils.bytesToGB(available);
- }
- if (qx.lang.Type.isNumber(available)) {
- // orange > 80%
- gaugeData.gauge.steps = [{
- range: [0.8*available, available],
- color: qx.theme.manager.Color.getInstance().resolve("busy-orange"),
- thickness: 0.5
- }];
- }
- if (available === "-") {
- gaugeData.value = "-";
- } else {
- gaugeData.value = used;
- gaugeData.gauge.axis.range[1] = available;
- }
- const layout = osparc.wrapper.Plotly.getDefaultLayout();
- const plotId = "ClusterDetails_" + plotKey + "-" + row;
- const w = parseInt(gridW/Object.keys(plots).length);
- const h = parseInt(w*0.75);
- // hide plotly toolbar
- const config = {
- displayModeBar: false
- };
- const plot = new osparc.widget.PlotlyWidget(plotId, gaugeDatas, layout, config).set({
- width: w,
- height: h
- });
- this._add(plot, {
- row,
- column: plotInfo.column
- });
- });
- row++;
- });
- }
- }
-});
diff --git a/services/static-webserver/client/source/class/osparc/cluster/ClustersDetails.js b/services/static-webserver/client/source/class/osparc/cluster/ClustersDetails.js
deleted file mode 100644
index 033f8ab2466..00000000000
--- a/services/static-webserver/client/source/class/osparc/cluster/ClustersDetails.js
+++ /dev/null
@@ -1,109 +0,0 @@
-/* ************************************************************************
-
- osparc - the simcore frontend
-
- https://osparc.io
-
- Copyright:
- 2022 IT'IS Foundation, https://itis.swiss
-
- License:
- MIT: https://opensource.org/licenses/MIT
-
- Authors:
- * Odei Maiz (odeimaiz)
-
-************************************************************************ */
-
-qx.Class.define("osparc.cluster.ClustersDetails", {
- extend: qx.ui.core.Widget,
-
- construct: function(selectClusterId) {
- this.base(arguments);
-
- this._setLayout(new qx.ui.layout.VBox(20));
-
- if (selectClusterId === undefined) {
- selectClusterId = 0;
- }
- this.__clusterId = selectClusterId;
- this.__populateClustersLayout();
- this.__addClusterWorkersLayout();
- this.__startFetchingDetails();
- },
-
- members: {
- __clustersSelectBox: null,
- __clusterId: null,
- __clusterWorkers: null,
-
- __populateClustersLayout: function() {
- const clustersLayout = new qx.ui.container.Composite(new qx.ui.layout.HBox(10).set({
- alignY: "middle"
- }));
-
- const clustersLabel = new qx.ui.basic.Label(this.tr("Connected clusters"));
- clustersLayout.add(clustersLabel);
-
- const selectBox = this.__clustersSelectBox = new qx.ui.form.SelectBox().set({
- allowGrowX: false
- });
- osparc.cluster.Utils.populateClustersSelectBox(selectBox);
- selectBox.addListener("changeSelection", e => {
- const selection = e.getData();
- if (selection.length) {
- const clusterId = selection[0].id;
- this.__selectedClusterChanged(clusterId);
- }
- }, this);
- clustersLayout.add(selectBox);
-
- clustersLayout.add(new qx.ui.core.Spacer(10, null));
-
- const clusterStatusLabel = new qx.ui.basic.Label(this.tr("Status:"));
- clustersLayout.add(clusterStatusLabel);
-
- const clusterStatus = this.__clusterStatus = new qx.ui.basic.Image().set({
- source: "@FontAwesome5Solid/lightbulb/16"
- });
- clustersLayout.add(clusterStatus);
-
- this._add(clustersLayout);
-
- selectBox.getSelectables().forEach(selectable => {
- if (selectable.id === this.__clusterId) {
- selectBox.setSelection([selectable]);
- }
- });
- },
-
- __addClusterWorkersLayout: function() {
- const clusterWorkers = this.__clusterWorkers = new osparc.cluster.ClusterWorkers();
- this._add(clusterWorkers, {
- flex: 1
- });
- },
-
- __selectedClusterChanged: function(clusterId) {
- osparc.cluster.Utils.getInstance().stopFetchingDetails(this.__clusterId);
- this.__clusterId = clusterId;
- this.__startFetchingDetails();
- },
-
- __startFetchingDetails: function() {
- const clusters = osparc.cluster.Utils.getInstance();
- clusters.addListener("clusterDetailsReceived", e => {
- const data = e.getData();
- if (this.__clusterId === data.clusterId) {
- this.__clusterStatus.setTextColor("error" in data ? "failed-red" : "ready-green");
- this.__clusterWorkers.populateWorkersDetails("error" in data ? null : data.clusterDetails);
- }
- });
- clusters.startFetchingDetails(this.__clusterId);
- }
- },
-
- destruct: function() {
- osparc.cluster.Utils.getInstance().stopFetchingDetails(this.__clusterId);
- }
-});
diff --git a/services/static-webserver/client/source/class/osparc/cluster/Utils.js b/services/static-webserver/client/source/class/osparc/cluster/Utils.js
deleted file mode 100644
index 7140bf5cbf1..00000000000
--- a/services/static-webserver/client/source/class/osparc/cluster/Utils.js
+++ /dev/null
@@ -1,136 +0,0 @@
-/* ************************************************************************
-
- osparc - the simcore frontend
-
- https://osparc.io
-
- Copyright:
- 2022 IT'IS Foundation, https://itis.swiss
-
- License:
- MIT: https://opensource.org/licenses/MIT
-
- Authors:
- * Odei Maiz (odeimaiz)
-
-************************************************************************ */
-
-/**
- * Collection of methods for dealing with clusters.
- *
- * *Example*
- */
-
-qx.Class.define("osparc.cluster.Utils", {
- extend: qx.core.Object,
- type: "singleton",
-
- construct: function() {
- this.base(arguments);
-
- this.__clusterIds = [];
- },
-
- statics: {
- popUpClustersDetails: function(clusterId) {
- const clusters = new osparc.cluster.ClustersDetails(clusterId);
- osparc.ui.window.Window.popUpInWindow(clusters, qx.locale.Manager.tr("Clusters & Workers"), 650, 600);
- },
-
- getUsedResourcesAttribute: function(worker, attributeKey) {
- if (attributeKey in worker["used_resources"]) {
- return osparc.utils.Utils.toTwoDecimals(worker["used_resources"][attributeKey]);
- }
- return "-";
- },
-
- getAvailableResourcesAttribute: function(worker, attributeKey) {
- if (attributeKey in worker.resources) {
- return worker.resources[attributeKey];
- }
- return "-";
- },
-
- accumulateWorkersResources: function(workers, resource) {
- Object.keys(workers).forEach(workerUrl => {
- const worker = workers[workerUrl];
- const available = this.getAvailableResourcesAttribute(worker, resource.resource);
- if (available === "-") {
- return;
- }
- resource.available += available;
- const used = this.getUsedResourcesAttribute(worker, resource.usedResource);
- resource.used += used;
- });
- },
-
- populateClustersSelectBox: function(clustersSelectBox) {
- clustersSelectBox.removeAll();
-
- const store = osparc.store.Store.getInstance();
- const clusters = store.getClusters();
- if (clusters) {
- clusters.forEach(cluster => {
- const item = new qx.ui.form.ListItem().set({
- label: cluster["name"],
- toolTipText: cluster["type"] + "\n" + cluster["description"],
- allowGrowY: false
- });
- item.id = cluster["id"];
- clustersSelectBox.add(item);
- });
- }
- }
- },
-
- events: {
- "clusterDetailsReceived": "qx.event.type.Data"
- },
-
- members: {
- __clusterIds: null,
-
- __fetchDetails: function(cid) {
- const params = {
- url: {
- cid
- }
- };
- osparc.data.Resources.get("clusterDetails", params)
- .then(clusterDetails => {
- this.fireDataEvent("clusterDetailsReceived", {
- clusterId: cid,
- clusterDetails
- });
- })
- .catch(err => {
- console.error(err);
- this.fireDataEvent("clusterDetailsReceived", {
- clusterId: cid,
- error: err
- });
- })
- .finally(() => {
- if (this.__clusterIds.includes(cid)) {
- const interval = 10000;
- qx.event.Timer.once(() => this.__fetchDetails(cid), this, interval);
- }
- });
- },
-
- startFetchingDetails: function(clusterId) {
- const found = this.__clusterIds.includes(clusterId);
- this.__clusterIds.push(clusterId);
- if (!found) {
- this.__fetchDetails(clusterId);
- }
- },
-
- stopFetchingDetails: function(clusterId) {
- const idx = this.__clusterIds.indexOf(clusterId);
- if (idx > -1) {
- this.__clusterIds.splice(idx, 1);
- }
- }
- }
-});
diff --git a/services/static-webserver/client/source/class/osparc/data/Permissions.js b/services/static-webserver/client/source/class/osparc/data/Permissions.js
index 5c53b4da76d..0cc74fc9cc0 100644
--- a/services/static-webserver/client/source/class/osparc/data/Permissions.js
+++ b/services/static-webserver/client/source/class/osparc/data/Permissions.js
@@ -130,7 +130,6 @@ qx.Class.define("osparc.data.Permissions", {
"services.all.read",
"services.all.reupdate",
"services.filePicker.read.all",
- "user.clusters.create",
"user.wallets.create",
"study.everyone.share",
"study.snapshot.read",
diff --git a/services/static-webserver/client/source/class/osparc/data/Resources.js b/services/static-webserver/client/source/class/osparc/data/Resources.js
index f8b38797c58..4e54df325c3 100644
--- a/services/static-webserver/client/source/class/osparc/data/Resources.js
+++ b/services/static-webserver/client/source/class/osparc/data/Resources.js
@@ -1029,51 +1029,6 @@ qx.Class.define("osparc.data.Resources", {
}
}
},
- /*
- * CLUSTERS
- */
- "clusters": {
- useCache: true,
- endpoints: {
- get: {
- method: "GET",
- url: statics.API + "/clusters"
- },
- post: {
- method: "POST",
- url: statics.API + "/clusters"
- },
- pingWCredentials: {
- method: "POST",
- url: statics.API + "/clusters:ping"
- },
- getOne: {
- method: "GET",
- url: statics.API + "/clusters/{cid}"
- },
- delete: {
- method: "DELETE",
- url: statics.API + "/clusters/{cid}"
- },
- patch: {
- method: "PATCH",
- url: statics.API + "/clusters/{cid}"
- },
- ping: {
- method: "POST",
- url: statics.API + "/clusters/{cid}:ping"
- }
- }
- },
- "clusterDetails": {
- useCache: false,
- endpoints: {
- get: {
- method: "GET",
- url: statics.API + "/clusters/{cid}/details"
- }
- }
- },
/*
* CLASSIFIERS
* Gets the json object containing sample classifiers
diff --git a/services/static-webserver/client/source/class/osparc/desktop/StartStopButtons.js b/services/static-webserver/client/source/class/osparc/desktop/StartStopButtons.js
index 15cd77f5288..c1633ddda85 100644
--- a/services/static-webserver/client/source/class/osparc/desktop/StartStopButtons.js
+++ b/services/static-webserver/client/source/class/osparc/desktop/StartStopButtons.js
@@ -64,31 +64,6 @@ qx.Class.define("osparc.desktop.StartStopButtons", {
_createChildControlImpl: function(id) {
let control;
switch (id) {
- case "cluster-layout":
- control = new qx.ui.container.Composite(new qx.ui.layout.HBox(5).set({
- alignY: "middle"
- }));
- this._add(control);
- break;
- case "cluster-selector": {
- control = new qx.ui.form.SelectBox().set({
- maxHeight: 32
- });
- this.getChildControl("cluster-layout").add(control);
- const store = osparc.store.Store.getInstance();
- store.addListener("changeClusters", () => this.__populateClustersSelectBox(), this);
- break;
- }
- case "cluster-mini-view":
- control = new osparc.cluster.ClusterMiniView();
- this.getChildControl("cluster-layout").add(control);
- this.getChildControl("cluster-selector").addListener("changeSelection", e => {
- const selection = e.getData();
- if (selection.length) {
- control.setClusterId(selection[0].id);
- }
- }, this);
- break;
case "dynamics-layout":
control = new qx.ui.container.Composite(new qx.ui.layout.HBox(5).set({
alignY: "middle"
@@ -140,9 +115,6 @@ qx.Class.define("osparc.desktop.StartStopButtons", {
},
__buildLayout: function() {
- this.getChildControl("cluster-selector");
- this.getChildControl("cluster-mini-view");
-
this.getChildControl("start-service-button");
this.getChildControl("stop-service-button");
@@ -209,24 +181,9 @@ qx.Class.define("osparc.desktop.StartStopButtons", {
];
},
- __populateClustersSelectBox: function() {
- osparc.cluster.Utils.populateClustersSelectBox(this.getChildControl("cluster-selector"));
- const clusters = osparc.store.Store.getInstance().getClusters();
- this.getChildControl("cluster-layout").setVisibility(Object.keys(clusters).length ? "visible" : "excluded");
- },
-
- getClusterId: function() {
- if (this.getChildControl("cluster-layout").isVisible()) {
- return this.getChildControl("cluster-selector").getSelection()[0].id;
- }
- return null;
- },
-
__applyStudy: async function(study) {
study.getWorkbench().addListener("pipelineChanged", this.__checkButtonsVisible, this);
study.addListener("changePipelineRunning", this.__updateRunButtonsStatus, this);
- this.__populateClustersSelectBox();
- this.__getComputations();
this.__checkButtonsVisible();
this.__updateRunButtonsStatus();
},
@@ -250,34 +207,5 @@ qx.Class.define("osparc.desktop.StartStopButtons", {
this.__setRunning(study.isPipelineRunning());
}
},
-
- __getComputations: function() {
- const studyId = this.getStudy().getUuid();
- const url = "/computations/" + encodeURIComponent(studyId);
- const req = new osparc.io.request.ApiRequest(url, "GET");
- req.addListener("success", e => {
- const res = e.getTarget().getResponse();
- if (res && res.data && "cluster_id" in res.data) {
- const clusterId = res.data["cluster_id"];
- if (clusterId) {
- const clustersBox = this.getChildControl("cluster-selector");
- if (clustersBox.isVisible()) {
- clustersBox.getSelectables().forEach(selectable => {
- if (selectable.id === clusterId) {
- clustersBox.setSelection([selectable]);
- }
- });
- }
- }
- }
- }, this);
- req.addListener("fail", e => {
- const res = e.getTarget().getResponse();
- if (res && res.error) {
- console.error(res.error);
- }
- });
- req.send();
- }
}
});
diff --git a/services/static-webserver/client/source/class/osparc/desktop/StudyEditor.js b/services/static-webserver/client/source/class/osparc/desktop/StudyEditor.js
index 14474eabd7d..bfd02dc41eb 100644
--- a/services/static-webserver/client/source/class/osparc/desktop/StudyEditor.js
+++ b/services/static-webserver/client/source/class/osparc/desktop/StudyEditor.js
@@ -619,10 +619,6 @@ qx.Class.define("osparc.desktop.StudyEditor", {
"subgraph": partialPipeline,
"force_restart": forceRestart
};
- const startStopButtonsWB = this.__workbenchView.getStartStopButtons();
- if (startStopButtonsWB.getClusterId() !== null) {
- requestData["cluster_id"] = startStopButtonsWB.getClusterId();
- }
req.setRequestData(requestData);
req.send();
if (partialPipeline.length) {
@@ -911,10 +907,6 @@ qx.Class.define("osparc.desktop.StudyEditor", {
this.getStudy().stopStudy();
this.__closeStudy();
}
- const clusterMiniView = this.__workbenchView.getStartStopButtons().getChildControl("cluster-mini-view");
- if (clusterMiniView) {
- clusterMiniView.setClusterId(null);
- }
osparc.utils.Utils.closeHangingWindows();
},
diff --git a/services/static-webserver/client/source/class/osparc/desktop/preferences/Preferences.js b/services/static-webserver/client/source/class/osparc/desktop/preferences/Preferences.js
index d04b96ceeea..90616bfd3e4 100644
--- a/services/static-webserver/client/source/class/osparc/desktop/preferences/Preferences.js
+++ b/services/static-webserver/client/source/class/osparc/desktop/preferences/Preferences.js
@@ -29,9 +29,6 @@ qx.Class.define("osparc.desktop.preferences.Preferences", {
if (osparc.data.Permissions.getInstance().canDo("user.tag")) {
this.__addTagsPage();
}
- if (osparc.product.Utils.showClusters()) {
- this.__addClustersPage();
- }
},
members: {
@@ -63,24 +60,5 @@ qx.Class.define("osparc.desktop.preferences.Preferences", {
const page = this.addTab(title, iconSrc, tagsPage);
osparc.utils.Utils.setIdToWidget(page.getChildControl("button"), "preferencesTagsTabBtn");
},
-
- __addClustersPage: function() {
- const title = this.tr("Clusters");
- const iconSrc = "@FontAwesome5Solid/server/24";
- const clustersPage = new osparc.desktop.preferences.pages.ClustersPage();
- const page = this.addTab(title, iconSrc, clustersPage);
- const clustersBtn = page.getChildControl("button");
- clustersBtn.exclude();
- const isDisabled = osparc.utils.DisabledPlugins.isClustersDisabled();
- if (isDisabled === false) {
- osparc.data.Resources.get("clusters")
- .then(clusters => {
- if (clusters.length || osparc.data.Permissions.getInstance().canDo("user.clusters.create")) {
- clustersBtn.show();
- }
- })
- .catch(err => console.error(err));
- }
- },
}
});
diff --git a/services/static-webserver/client/source/class/osparc/desktop/preferences/pages/ClustersPage.js b/services/static-webserver/client/source/class/osparc/desktop/preferences/pages/ClustersPage.js
deleted file mode 100644
index d5d7e6ba6dc..00000000000
--- a/services/static-webserver/client/source/class/osparc/desktop/preferences/pages/ClustersPage.js
+++ /dev/null
@@ -1,563 +0,0 @@
-/* ************************************************************************
-
- osparc - the simcore frontend
-
- https://osparc.io
-
- Copyright:
- 2021 IT'IS Foundation, https://itis.swiss
-
- License:
- MIT: https://opensource.org/licenses/MIT
-
- Authors:
- * Odei Maiz (odeimaiz)
-
-************************************************************************ */
-
-/**
- * Clusters and members in preferences dialog
- *
- */
-
-qx.Class.define("osparc.desktop.preferences.pages.ClustersPage", {
- extend: qx.ui.core.Widget,
-
- construct: function() {
- this.base(arguments);
-
- this._setLayout(new qx.ui.layout.VBox(15));
-
- const buttonsLayout = new qx.ui.container.Composite(new qx.ui.layout.HBox(10).set({
- alignX: "center"
- }));
- if (osparc.data.Permissions.getInstance().canDo("user.clusters.create")) {
- buttonsLayout.add(this.__getCreateClusterButton());
- }
- buttonsLayout.add(this.__getShowClustersDetailsButton());
- this._add(buttonsLayout);
- this._add(this.__getClustersSection());
- this._add(this.__getOrgsAndMembersSection(), {
- flex: 1
- });
-
- this.__reloadClusters();
- },
-
- members: {
- __currentCluster: null,
- __clustersModel: null,
- __clustersList: null,
- __selectOrgMemberLayout: null,
- __organizationsAndMembers: null,
- __membersArrayModel: null,
-
- __getCreateClusterButton: function() {
- const createClusterBtn = new qx.ui.form.Button().set({
- appearance: "strong-button",
- label: this.tr("New Cluster"),
- icon: "@FontAwesome5Solid/plus/14",
- allowGrowX: false
- });
- createClusterBtn.addListener("execute", function() {
- const newCluster = true;
- const clusterEditor = new osparc.editor.ClusterEditor(newCluster);
- const title = this.tr("Cluster Details Editor");
- const win = osparc.ui.window.Window.popUpInWindow(clusterEditor, title, 400, 260);
- clusterEditor.addListener("createCluster", () => {
- this.__createCluster(win, clusterEditor.getChildControl("create"), clusterEditor);
- });
- clusterEditor.addListener("cancel", () => win.close());
- }, this);
- return createClusterBtn;
- },
-
- __getShowClustersDetailsButton: function() {
- const createClusterBtn = new qx.ui.form.Button().set({
- label: this.tr("Show Resources"),
- icon: "@FontAwesome5Solid/info/14",
- allowGrowX: false
- });
- createClusterBtn.addListener("execute", () => osparc.cluster.Utils.popUpClustersDetails(), this);
- return createClusterBtn;
- },
-
- __getClustersSection: function() {
- const box = osparc.ui.window.TabbedView.createSectionBox(this.tr("Clusters"));
- box.add(this.__getClustersList());
- box.setContentPadding(0);
- return box;
- },
-
- __getClustersList: function() {
- const clustersList = this.__clustersList = new qx.ui.form.List().set({
- decorator: "no-border",
- spacing: 3,
- height: 150,
- width: 150
- });
- clustersList.addListener("changeSelection", e => {
- this.__clusterSelected(e.getData());
- }, this);
-
- const clustersModel = this.__clustersModel = new qx.data.Array();
- const clustersCtrl = new qx.data.controller.List(clustersModel, clustersList, "name");
- clustersCtrl.setDelegate({
- createItem: () => new osparc.ui.list.ClusterListItem(),
- bindItem: (ctrl, item, id) => {
- ctrl.bindProperty("id", "model", null, item, id);
- ctrl.bindProperty("id", "key", null, item, id);
- ctrl.bindProperty("thumbnail", "thumbnail", null, item, id);
- ctrl.bindProperty("name", "title", null, item, id);
- ctrl.bindProperty("endpoint", "endpoint", null, item, id);
- ctrl.bindProperty("description", "subtitle", null, item, id);
- ctrl.bindProperty("accessRights", "members", null, item, id);
- },
- configureItem: item => {
- item.addListener("openEditCluster", e => {
- const clusterId = e.getData();
- this.__openEditCluster(clusterId);
- });
-
- item.addListener("deleteCluster", e => {
- const clusterId = e.getData();
- this.__deleteCluster(clusterId);
- });
- }
- });
-
- return clustersList;
- },
-
- __getOrgsAndMembersSection: function() {
- const box = osparc.ui.window.TabbedView.createSectionBox(this.tr("Organization and Members"));
- box.add(this.__getOrgMembersFilter());
- box.add(this.__getMembersList(), {
- flex: 1
- });
- box.setContentPadding(0);
- return box;
- },
-
- __getOrgMembersFilter: function() {
- const vBox = this.__selectOrgMemberLayout = new qx.ui.container.Composite(new qx.ui.layout.VBox());
- vBox.exclude();
-
- const label = new qx.ui.basic.Label(this.tr("Select from the following list")).set({
- paddingLeft: 5
- });
- vBox.add(label);
-
- const hBox = new qx.ui.container.Composite(new qx.ui.layout.HBox(5).set({
- alignY: "middle"
- }));
- vBox.add(hBox);
-
- const organizationsAndMembers = this.__organizationsAndMembers = new osparc.filter.OrganizationsAndMembers("orgAndMembClusters");
- hBox.add(organizationsAndMembers, {
- flex: 1
- });
-
- const addCollaboratorBtn = new qx.ui.form.Button(this.tr("Add")).set({
- appearance: "strong-button",
- allowGrowY: false,
- enabled: false
- });
- addCollaboratorBtn.addListener("execute", () => {
- this.__addMembers(this.__organizationsAndMembers.getSelectedGIDs());
- }, this);
- qx.event.message.Bus.getInstance().subscribe("OrgAndMembClustersFilter", () => {
- const anySelected = Boolean(this.__organizationsAndMembers.getSelectedGIDs().length);
- addCollaboratorBtn.setEnabled(anySelected);
- }, this);
-
- hBox.add(addCollaboratorBtn);
-
- return vBox;
- },
-
- __getMembersList: function() {
- const membersUIList = new qx.ui.form.List().set({
- decorator: "no-border",
- spacing: 3,
- width: 150,
- backgroundColor: "background-main-2"
- });
-
- const membersArrayModel = this.__membersArrayModel = new qx.data.Array();
- const membersCtrl = new qx.data.controller.List(membersArrayModel, membersUIList, "name");
- membersCtrl.setDelegate({
- createItem: () => new osparc.ui.list.MemberListItem(),
- bindItem: (ctrl, item, id) => {
- ctrl.bindProperty("id", "model", null, item, id);
- ctrl.bindProperty("id", "key", null, item, id);
- ctrl.bindProperty("thumbnail", "thumbnail", null, item, id);
- ctrl.bindProperty("name", "title", null, item, id);
- ctrl.bindProperty("login", "subtitleMD", null, item, id);
- ctrl.bindProperty("accessRights", "accessRights", null, item, id);
- ctrl.bindProperty("showOptions", "showOptions", null, item, id);
- },
- configureItem: item => {
- item.getChildControl("thumbnail").getContentElement()
- .setStyles({
- "border-radius": "16px"
- });
- item.addListener("promoteToManager", e => {
- const clusterMember = e.getData();
- this.__promoteToManager(clusterMember);
- });
- item.addListener("removeMember", e => {
- const clusterMember = e.getData();
- this.__deleteMember(clusterMember);
- });
- }
- });
-
- return membersUIList;
- },
-
- __clusterSelected: function(data) {
- this.__selectOrgMemberLayout.exclude();
- if (data && data.length) {
- this.__currentCluster = data[0];
- } else {
- this.__currentCluster = null;
- }
- this.__reloadClusterMembers();
- },
-
- __reloadClusters: function(reloadMembers = false) {
- let reloadClusterKey = null;
- if (reloadMembers) {
- reloadClusterKey = this.__currentCluster.getKey();
- }
-
- const clustersModel = this.__clustersModel;
- clustersModel.removeAll();
-
- const isDisabled = osparc.utils.DisabledPlugins.isClustersDisabled();
- if (isDisabled === false) {
- osparc.data.Resources.get("clusters")
- .then(clusters => {
- clusters.forEach(cluster => clustersModel.append(qx.data.marshal.Json.createModel(cluster)));
- if (reloadClusterKey) {
- const selectables = this.__clustersList.getSelectables();
- selectables.forEach(selectable => {
- if (selectable.getKey() === reloadClusterKey) {
- this.__currentCluster = selectable;
- this.__reloadClusterMembers();
- }
- });
- }
- })
- .catch(err => console.error(err));
- }
- },
-
- __reloadClusterMembers: function() {
- const membersArrayModel = this.__membersArrayModel;
- membersArrayModel.removeAll();
-
- const clusterModel = this.__currentCluster;
- if (clusterModel === null) {
- return;
- }
-
- const clusterMembers = clusterModel.getMembersList();
-
- const groupsStore = osparc.store.Groups.getInstance();
- const myGid = groupsStore.getMyGroupId();
- const membersModel = clusterModel.getMembers();
- const getter = "get"+String(myGid);
- const canWrite = membersModel[getter] ? membersModel[getter]().getWrite() : false;
- if (canWrite) {
- this.__selectOrgMemberLayout.show();
- const memberKeys = [];
- clusterMembers.forEach(clusterMember => memberKeys.push(clusterMember["gid"]));
- this.__organizationsAndMembers.reloadVisibleCollaborators(memberKeys);
- }
-
- const potentialCollaborators = osparc.store.Groups.getInstance().getPotentialCollaborators();
- clusterMembers.forEach(clusterMember => {
- const gid = clusterMember.getGroupId();
- if (gid in potentialCollaborators) {
- const collaborator = potentialCollaborators[gid];
- const collabObj = {};
- if (collaborator["collabType"] === 1) {
- // group
- collabObj["thumbnail"] = collaborator.getThumbnail() || "@FontAwesome5Solid/users/24";
- collabObj["login"] = collaborator.getDescription();
- } else if (collaborator["collabType"] === 2) {
- // user
- collabObj["thumbnail"] = collaborator.getThumbnail() || "@FontAwesome5Solid/user/24";
- collabObj["login"] = collaborator.getLogin();
- }
- if (Object.keys(collabObj).length) {
- collabObj["id"] = collaborator.getGroupId();
- collabObj["name"] = collaborator.getLabel();
- collabObj["accessRights"] = clusterMember.getAccessRights();
- collabObj["showOptions"] = canWrite;
- membersArrayModel.append(qx.data.marshal.Json.createModel(collabObj));
- }
- }
- });
- },
-
- __openEditCluster: function(clusterId) {
- let cluster = null;
- this.__clustersModel.forEach(clusterModel => {
- if (clusterModel.getId() === parseInt(clusterId)) {
- cluster = clusterModel;
- }
- });
- if (cluster === null) {
- return;
- }
-
- const newCluster = false;
- const clusterEditor = new osparc.editor.ClusterEditor(newCluster);
- cluster.bind("id", clusterEditor, "cid");
- cluster.bind("name", clusterEditor, "label");
- cluster.bind("endpoint", clusterEditor, "endpoint");
- clusterEditor.setSimpleAuthenticationUsername(cluster.getAuthentication().getUsername());
- clusterEditor.setSimpleAuthenticationPassword(cluster.getAuthentication().getPassword());
- cluster.bind("description", clusterEditor, "description");
- const title = this.tr("Cluster Details Editor");
- const win = osparc.ui.window.Window.popUpInWindow(clusterEditor, title, 400, 260);
- clusterEditor.addListener("updateCluster", () => {
- this.__updateCluster(win, clusterEditor.getChildControl("save"), clusterEditor);
- });
- clusterEditor.addListener("cancel", () => win.close());
- },
-
- __deleteCluster: function(clusterId) {
- let cluster = null;
- this.__clustersModel.forEach(clusterModel => {
- if (clusterModel.getId() === parseInt(clusterId)) {
- cluster = clusterModel;
- }
- });
- if (cluster === null) {
- return;
- }
-
- const name = cluster.getName();
- const msg = this.tr("Are you sure you want to delete ") + name + "?";
- const win = new osparc.ui.window.Confirmation(msg).set({
- caption: this.tr("Delete Cluster"),
- confirmText: this.tr("Delete"),
- confirmAction: "delete"
- });
- win.center();
- win.open();
- win.addListener("close", () => {
- if (win.getConfirmed()) {
- const params = {
- url: {
- "cid": clusterId
- }
- };
- osparc.data.Resources.fetch("clusters", "delete", params)
- .then(() => {
- osparc.store.Store.getInstance().reset("clusters");
- this.__reloadClusters();
- })
- .catch(err => {
- osparc.FlashMessenger.getInstance().logAs(this.tr("Something went wrong deleting ") + name, "ERROR");
- console.error(err);
- })
- .finally(() => {
- win.close();
- });
- }
- }, this);
- },
-
- __createCluster: function(win, button, clusterEditor) {
- const clusterKey = clusterEditor.getCid();
- const name = clusterEditor.getLabel();
- const endpoint = clusterEditor.getEndpoint();
- const simpleAuthenticationUsername = clusterEditor.getSimpleAuthenticationUsername();
- const simpleAuthenticationPassword = clusterEditor.getSimpleAuthenticationPassword();
- const description = clusterEditor.getDescription();
- const params = {
- url: {
- "cid": clusterKey
- },
- data: {
- "name": name,
- "endpoint": endpoint,
- "authentication": {
- "type": "simple",
- "username": simpleAuthenticationUsername,
- "password": simpleAuthenticationPassword
- },
- "description": description,
- "type": "AWS"
- }
- };
- osparc.data.Resources.fetch("clusters", "post", params)
- .then(() => {
- osparc.FlashMessenger.getInstance().logAs(name + this.tr(" successfully created"));
- button.setFetching(false);
- osparc.store.Store.getInstance().reset("clusters");
- this.__reloadClusters();
- })
- .catch(err => {
- osparc.FlashMessenger.getInstance().logAs(this.tr("Something went wrong creating ") + name, "ERROR");
- button.setFetching(false);
- console.error(err);
- })
- .finally(() => {
- win.close();
- });
- },
-
- __updateCluster: function(win, button, clusterEditor) {
- const clusterId = clusterEditor.getCid();
- const name = clusterEditor.getLabel();
- const endpoint = clusterEditor.getEndpoint();
- const authenticationType = "simple";
- const simpleAuthenticationUsername = clusterEditor.getSimpleAuthenticationUsername();
- const simpleAuthenticationPassword = clusterEditor.getSimpleAuthenticationPassword();
- const description = clusterEditor.getDescription();
- const params = {
- url: {
- "cid": clusterId
- },
- data: {
- "name": name,
- "endpoint": endpoint,
- "authentication": {
- "type": authenticationType,
- "username": simpleAuthenticationUsername,
- "password": simpleAuthenticationPassword
- },
- "description": description,
- "type": "AWS"
- }
- };
- osparc.data.Resources.fetch("clusters", "patch", params)
- .then(() => {
- osparc.FlashMessenger.getInstance().logAs(name + this.tr(" successfully edited"));
- button.setFetching(false);
- win.close();
- osparc.store.Store.getInstance().reset("clusters");
- this.__reloadClusters();
- })
- .catch(err => {
- osparc.FlashMessenger.getInstance().logAs(this.tr("Something went wrong editing ") + name, "ERROR");
- button.setFetching(false);
- console.error(err);
- });
- },
-
- __addMembers: function(gids) {
- if (this.__currentCluster === null) {
- return;
- }
-
- const accessRights = JSON.parse(qx.util.Serializer.toJson(this.__currentCluster.getMembers()));
- gids.forEach(gid => {
- if (gid in accessRights) {
- return;
- }
-
- accessRights[gid] = {
- "read": true,
- "write": false,
- "delete": false
- };
- });
-
- const params = {
- url: {
- "cid": this.__currentCluster.getKey()
- },
- data: {
- "accessRights": accessRights
- }
- };
- osparc.data.Resources.fetch("clusters", "patch", params)
- .then(() => {
- osparc.FlashMessenger.getInstance().logAs(this.tr("Cluster successfully shared"));
- osparc.store.Store.getInstance().reset("clusters");
- this.__reloadClusters(true);
- })
- .catch(err => {
- osparc.FlashMessenger.getInstance().logAs(this.tr("Something went wrong sharing the Cluster"), "ERROR");
- console.error(err);
- });
- },
-
- __promoteToManager: function(clusterMember) {
- if (this.__currentCluster === null) {
- return;
- }
-
- const accessRights = JSON.parse(qx.util.Serializer.toJson(this.__currentCluster.getMembers()));
- if (!(clusterMember["key"] in accessRights)) {
- return;
- }
-
- accessRights[clusterMember["key"]] = {
- "read": true,
- "write": true,
- "delete": false
- };
- const params = {
- url: {
- "cid": this.__currentCluster.getKey()
- },
- data: {
- "accessRights": accessRights
- }
- };
- osparc.data.Resources.fetch("clusters", "patch", params)
- .then(() => {
- osparc.FlashMessenger.getInstance().logAs(clusterMember["name"] + this.tr(" successfully promoted"));
- osparc.store.Store.getInstance().reset("clusters");
- this.__reloadClusters(true);
- })
- .catch(err => {
- osparc.FlashMessenger.getInstance().logAs(this.tr("Something went wrong promoting ") + clusterMember["name"], "ERROR");
- console.error(err);
- });
- },
-
- __deleteMember: function(clusterMember) {
- if (this.__currentCluster === null) {
- return;
- }
-
- const accessRights = JSON.parse(qx.util.Serializer.toJson(this.__currentCluster.getMembers()));
- if (!(clusterMember["key"] in accessRights)) {
- return;
- }
-
- accessRights[clusterMember["key"]] = {
- "read": false,
- "write": false,
- "delete": false
- };
- const params = {
- url: {
- "cid": this.__currentCluster.getKey()
- },
- data: {
- "accessRights": accessRights
- }
- };
- osparc.data.Resources.fetch("clusters", "patch", params)
- .then(() => {
- osparc.FlashMessenger.getInstance().logAs(clusterMember["name"] + this.tr(" successfully removed"));
- osparc.store.Store.getInstance().reset("clusters");
- this.__reloadClusters(true);
- })
- .catch(err => {
- osparc.FlashMessenger.getInstance().logAs(this.tr("Something went wrong removing ") + clusterMember["name"], "ERROR");
- console.error(err);
- });
- }
- }
-});
diff --git a/services/static-webserver/client/source/class/osparc/editor/ClusterEditor.js b/services/static-webserver/client/source/class/osparc/editor/ClusterEditor.js
deleted file mode 100644
index 8418d2494d9..00000000000
--- a/services/static-webserver/client/source/class/osparc/editor/ClusterEditor.js
+++ /dev/null
@@ -1,262 +0,0 @@
-/* ************************************************************************
-
- osparc - the simcore frontend
-
- https://osparc.io
-
- Copyright:
- 2021 IT'IS Foundation, https://itis.swiss
-
- License:
- MIT: https://opensource.org/licenses/MIT
-
- Authors:
- * Odei Maiz (odeimaiz)
-
-************************************************************************ */
-
-qx.Class.define("osparc.editor.ClusterEditor", {
- extend: qx.ui.core.Widget,
-
- construct: function(newCluster = true) {
- this.base(arguments);
- this._setLayout(new qx.ui.layout.VBox(8));
-
- this.__newCluster = newCluster;
-
- const manager = this.__validator = new qx.ui.form.validation.Manager();
- const title = this.getChildControl("title");
- title.setRequired(true);
- manager.add(title);
- const endpoint = this.getChildControl("endpoint");
- endpoint.setRequired(true);
- manager.add(endpoint);
- const username = this.getChildControl("simpleAuthenticationUsername");
- username.setRequired(true);
- manager.add(username);
- const pass = this.getChildControl("simpleAuthenticationPassword");
- pass.setRequired(true);
- manager.add(pass);
- this._createChildControlImpl("description");
- this._createChildControlImpl("test-layout");
- newCluster ? this._createChildControlImpl("create") : this._createChildControlImpl("save");
- },
-
- properties: {
- cid: {
- check: "Number",
- init: 0,
- nullable: false,
- event: "changeCid"
- },
-
- label: {
- check: "String",
- init: "",
- nullable: false,
- event: "changeLabel"
- },
-
- endpoint: {
- check: "String",
- init: "",
- nullable: false,
- event: "changeEndpoint"
- },
-
- simpleAuthenticationUsername: {
- check: "String",
- init: "",
- nullable: false,
- event: "changeSimpleAuthenticationUsername"
- },
-
- simpleAuthenticationPassword: {
- check: "String",
- init: "",
- nullable: false,
- event: "changeSimpleAuthenticationPassword"
- },
-
- description: {
- check: "String",
- init: "",
- nullable: false,
- event: "changeDescription"
- }
- },
-
- events: {
- "createCluster": "qx.event.type.Event",
- "updateCluster": "qx.event.type.Event",
- "cancel": "qx.event.type.Event"
- },
-
- members: {
- __validator: null,
- __newCluster: null,
-
- _createChildControlImpl: function(id) {
- let control;
- switch (id) {
- case "title":
- control = new qx.ui.form.TextField().set({
- font: "text-14",
- backgroundColor: "background-main",
- placeholder: this.tr("Title")
- });
- this.bind("label", control, "value");
- control.bind("value", this, "label");
- this._add(control);
- break;
- case "endpointLayout":
- control = new qx.ui.container.Composite(new qx.ui.layout.HBox(5));
- this._add(control);
- break;
- case "endpoint": {
- const endpointLayout = this.getChildControl("endpointLayout");
- control = new qx.ui.form.TextField().set({
- font: "text-14",
- backgroundColor: "background-main",
- placeholder: this.tr("Endpoint")
- });
- this.bind("endpoint", control, "value");
- control.bind("value", this, "endpoint");
- control.setRequired(true);
- endpointLayout.add(control, {
- flex: 1
- });
- break;
- }
- case "simpleAuthenticationUsername": {
- const endpointLayout = this.getChildControl("endpointLayout");
- control = new qx.ui.form.TextField().set({
- font: "text-14",
- backgroundColor: "background-main",
- placeholder: this.tr("Username")
- });
- control.getContentElement().setAttribute("autocomplete", "off");
- this.bind("simpleAuthenticationUsername", control, "value");
- control.bind("value", this, "simpleAuthenticationUsername");
- control.setRequired(true);
- endpointLayout.add(control);
- break;
- }
- case "simpleAuthenticationPassword": {
- const endpointLayout = this.getChildControl("endpointLayout");
- control = new osparc.ui.form.PasswordField().set({
- font: "text-14",
- backgroundColor: "background-main",
- placeholder: this.tr("Password")
- });
- control.getContentElement().setAttribute("autocomplete", "off");
- this.bind("simpleAuthenticationPassword", control, "value");
- control.bind("value", this, "simpleAuthenticationPassword");
- control.setRequired(true);
- endpointLayout.add(control);
- break;
- }
- case "description":
- control = new qx.ui.form.TextArea().set({
- font: "text-14",
- placeholder: this.tr("Description"),
- autoSize: true,
- minHeight: 70,
- maxHeight: 140
- });
- this.bind("description", control, "value");
- control.bind("value", this, "description");
- this._add(control);
- break;
- case "test-layout": {
- control = this.__getTestLayout();
- this._add(control);
- break;
- }
- case "buttonsLayout": {
- control = new qx.ui.container.Composite(new qx.ui.layout.HBox(8).set({
- alignX: "right"
- }));
- const cancelButton = new qx.ui.form.Button(this.tr("Cancel")).set({
- appearance: "form-button-text"
- });
- cancelButton.addListener("execute", () => this.fireEvent("cancel"), this);
- control.add(cancelButton);
- this._add(control);
- break;
- }
- case "create": {
- const buttons = this.getChildControl("buttonsLayout");
- control = new osparc.ui.form.FetchButton(this.tr("Create")).set({
- appearance: "form-button"
- });
- control.addListener("execute", () => {
- if (this.__validator.validate()) {
- control.setFetching(true);
- this.fireEvent("createCluster");
- }
- }, this);
- buttons.add(control);
- break;
- }
- case "save": {
- const buttons = this.getChildControl("buttonsLayout");
- control = new osparc.ui.form.FetchButton(this.tr("Save")).set({
- appearance: "form-button"
- });
- control.addListener("execute", () => {
- if (this.__validator.validate()) {
- control.setFetching(true);
- this.fireEvent("updateCluster");
- }
- }, this);
- buttons.add(control);
- break;
- }
- }
-
- return control || this.base(arguments, id);
- },
-
- __getTestLayout: function() {
- const testLayout = new qx.ui.container.Composite(new qx.ui.layout.HBox(8));
- const testButton = new osparc.ui.form.FetchButton(this.tr("Test"));
- testLayout.add(testButton);
-
- const testResult = new qx.ui.basic.Image("@FontAwesome5Solid/lightbulb/16");
- testLayout.add(testResult);
-
- testButton.addListener("execute", () => {
- if (this.__validator.validate()) {
- testButton.setFetching(true);
- const endpoint = this.__newCluster ? "pingWCredentials" : "ping";
- const params = {};
- if (this.__newCluster) {
- params["data"] = {
- "endpoint": this.getEndpoint(),
- "authentication": {
- "type": "simple",
- "username": this.getSimpleAuthenticationUsername(),
- "password": this.getSimpleAuthenticationPassword()
- }
- };
- } else {
- params["url"] = {
- cid: this.getCid()
- };
- }
- osparc.data.Resources.fetch("clusters", endpoint, params)
- .then(() => testResult.setTextColor("ready-green"))
- .catch(err => {
- testResult.setTextColor("failed-red");
- const msg = err.message || this.tr("Test failed");
- osparc.FlashMessenger.getInstance().logAs(msg, "Error");
- })
- .finally(() => testButton.setFetching(false));
- }
- }, this);
-
- return testLayout;
- }
- }
-});
diff --git a/services/static-webserver/client/source/class/osparc/navigation/UserMenu.js b/services/static-webserver/client/source/class/osparc/navigation/UserMenu.js
index be4a1c8f4a8..160ab65ae29 100644
--- a/services/static-webserver/client/source/class/osparc/navigation/UserMenu.js
+++ b/services/static-webserver/client/source/class/osparc/navigation/UserMenu.js
@@ -91,18 +91,6 @@ qx.Class.define("osparc.navigation.UserMenu", {
control.addListener("execute", () => osparc.desktop.organizations.OrganizationsWindow.openWindow(), this);
this.add(control);
break;
- case "clusters":
- control = new qx.ui.menu.Button(this.tr("Clusters"));
- control.exclude();
- if (osparc.product.Utils.showClusters()) {
- const isDisabled = osparc.utils.DisabledPlugins.isClustersDisabled();
- if (isDisabled === false) {
- control.show();
- }
- }
- control.addListener("execute", () => osparc.cluster.Utils.popUpClustersDetails(), this);
- this.add(control);
- break;
case "market":
control = new qx.ui.menu.Button(this.tr("Market"));
control.addListener("execute", () => osparc.vipMarket.MarketWindow.openWindow());
@@ -175,7 +163,6 @@ qx.Class.define("osparc.navigation.UserMenu", {
}
this.getChildControl("preferences");
this.getChildControl("organizations");
- this.getChildControl("clusters");
}
this.addSeparator();
@@ -231,7 +218,6 @@ qx.Class.define("osparc.navigation.UserMenu", {
}
this.getChildControl("preferences");
this.getChildControl("organizations");
- this.getChildControl("clusters");
}
this.addSeparator();
diff --git a/services/static-webserver/client/source/class/osparc/product/Utils.js b/services/static-webserver/client/source/class/osparc/product/Utils.js
index 4535f1ca8b6..31501afeb34 100644
--- a/services/static-webserver/client/source/class/osparc/product/Utils.js
+++ b/services/static-webserver/client/source/class/osparc/product/Utils.js
@@ -247,13 +247,6 @@ qx.Class.define("osparc.product.Utils", {
return true;
},
- showClusters: function() {
- if (this.isProduct("s4llite") || this.isProduct("tis") || this.isProduct("tiplite")) {
- return false;
- }
- return true;
- },
-
showDisableServiceAutoStart: function() {
if (this.isProduct("s4llite")) {
return false;
diff --git a/services/static-webserver/client/source/class/osparc/store/Store.js b/services/static-webserver/client/source/class/osparc/store/Store.js
index 7b94b336852..cd82975131d 100644
--- a/services/static-webserver/client/source/class/osparc/store/Store.js
+++ b/services/static-webserver/client/source/class/osparc/store/Store.js
@@ -174,11 +174,6 @@ qx.Class.define("osparc.store.Store", {
check: "Array",
init: []
},
- clusters: {
- check: "Array",
- init: [],
- event: "changeClusters"
- },
services: {
check: "Array",
init: []
diff --git a/services/static-webserver/client/source/class/osparc/ui/list/ClusterListItem.js b/services/static-webserver/client/source/class/osparc/ui/list/ClusterListItem.js
deleted file mode 100644
index ff24bb6f6b0..00000000000
--- a/services/static-webserver/client/source/class/osparc/ui/list/ClusterListItem.js
+++ /dev/null
@@ -1,150 +0,0 @@
-/* ************************************************************************
-
- osparc - the simcore frontend
-
- https://osparc.io
-
- Copyright:
- 2021 IT'IS Foundation, https://itis.swiss
-
- License:
- MIT: https://opensource.org/licenses/MIT
-
- Authors:
- * Odei Maiz (odeimaiz)
-
-************************************************************************ */
-
-qx.Class.define("osparc.ui.list.ClusterListItem", {
- extend: osparc.ui.list.ListItem,
-
- construct: function() {
- this.base(arguments);
- },
-
- properties: {
- members: {
- check: "Object",
- nullable: false,
- apply: "__applyMembers",
- event: "changeMembers"
- },
-
- accessRights: {
- check: "Object",
- nullable: false,
- apply: "__applyAccessRights",
- event: "changeAccessRights"
- },
-
- endpoint: {
- check: "String",
- nullable: false,
- event: "changeEndpoint"
- },
-
- simpleAuthenticationUsername: {
- check: "String",
- init: "",
- nullable: false,
- event: "changeSimpleAuthenticationUsername"
- },
-
- simpleAuthenticationPassword: {
- check: "String",
- init: "",
- nullable: false,
- event: "changeSimpleAuthenticationPassword"
- }
- },
-
- events: {
- "openEditCluster": "qx.event.type.Data",
- "deleteCluster": "qx.event.type.Data"
- },
-
- members: {
- _createChildControlImpl: function(id) {
- let control;
- switch (id) {
- case "options": {
- const iconSize = 25;
- control = new qx.ui.form.MenuButton().set({
- maxWidth: iconSize,
- maxHeight: iconSize,
- alignX: "center",
- alignY: "middle",
- icon: "@FontAwesome5Solid/ellipsis-v/"+(iconSize-11),
- focusable: false
- });
- this._add(control, {
- row: 0,
- column: 3,
- rowSpan: 2
- });
- break;
- }
- }
-
- return control || this.base(arguments, id);
- },
-
- __applyMembers: function(members) {
- if (members === null) {
- return;
- }
-
- const nMembers = this.getMembersList().length + this.tr(" members");
- this.setContact(nMembers);
-
- const myGid = osparc.auth.Data.getInstance().getGroupId();
- if ("get"+myGid in members) {
- this.setAccessRights(members.get(myGid));
- }
- },
-
- getMembersList: function() {
- const membersList = [];
- const members = this.getMembers();
- const memberGids = members.basename.split("|");
- memberGids.forEach(memberGid => {
- const member = members.get(memberGid);
- member.gid = memberGid;
- membersList.push(member);
- });
- return membersList;
- },
-
- __applyAccessRights: function(accessRights) {
- if (accessRights === null) {
- return;
- }
-
- if (accessRights.getDelete()) {
- const optionsMenu = this.getChildControl("options");
- const menu = this.__getOptionsMenu();
- optionsMenu.setMenu(menu);
- }
- },
-
- __getOptionsMenu: function() {
- const menu = new qx.ui.menu.Menu().set({
- position: "bottom-right"
- });
-
- const editClusterButton = new qx.ui.menu.Button(this.tr("Edit details"));
- editClusterButton.addListener("execute", () => {
- this.fireDataEvent("openEditCluster", this.getKey());
- });
- menu.add(editClusterButton);
-
- const deleteClusterButton = new qx.ui.menu.Button(this.tr("Delete"));
- deleteClusterButton.addListener("execute", () => {
- this.fireDataEvent("deleteCluster", this.getKey());
- });
- menu.add(deleteClusterButton);
-
- return menu;
- }
- }
-});
diff --git a/services/static-webserver/client/source/class/osparc/utils/DisabledPlugins.js b/services/static-webserver/client/source/class/osparc/utils/DisabledPlugins.js
index e9e955b1ba9..2b5d9a65995 100644
--- a/services/static-webserver/client/source/class/osparc/utils/DisabledPlugins.js
+++ b/services/static-webserver/client/source/class/osparc/utils/DisabledPlugins.js
@@ -28,7 +28,6 @@ qx.Class.define("osparc.utils.DisabledPlugins", {
SCICRUNCH: "WEBSERVER_SCICRUNCH",
VERSION_CONTROL: "WEBSERVER_VERSION_CONTROL",
META_MODELING: "WEBSERVER_META_MODELING",
- CLUSTERS: "WEBSERVER_CLUSTERS",
FOLDERS: "WEBSERVER_FOLDERS",
isFoldersEnabled: function() {
@@ -53,10 +52,6 @@ qx.Class.define("osparc.utils.DisabledPlugins", {
return this.__isPluginDisabled(this.META_MODELING);
},
- isClustersDisabled: function() {
- return this.__isPluginDisabled(this.CLUSTERS);
- },
-
__isPluginDisabled: function(key) {
const statics = osparc.store.Store.getInstance().get("statics");
if (statics) {
From 7d1ac3e2020fc7423b9928cfb24553ca03396b9d Mon Sep 17 00:00:00 2001
From: Odei Maiz <33152403+odeimaiz@users.noreply.github.com>
Date: Tue, 3 Dec 2024 20:45:28 +0100
Subject: [PATCH 15/16] =?UTF-8?q?=F0=9F=92=A3=20[Frontend]=20Retire=20clus?=
=?UTF-8?q?ters=20(#6883)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
From b1fe5989dbbaed62c26fc1d91bb985ae9b91cac4 Mon Sep 17 00:00:00 2001
From: Odei Maiz <33152403+odeimaiz@users.noreply.github.com>
Date: Tue, 3 Dec 2024 20:45:44 +0100
Subject: [PATCH 16/16] =?UTF-8?q?=F0=9F=92=A3=20[Frontend]=20Retire=20clus?=
=?UTF-8?q?ters=20(#6883)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit