Skip to content

Commit

Permalink
Add Modelmesh, Ray and TrustyAI operands
Browse files Browse the repository at this point in the history
Signed-off-by: Jakub Stejskal <[email protected]>
  • Loading branch information
Frawless committed Jan 29, 2024
1 parent 450b67c commit 0f5b234
Show file tree
Hide file tree
Showing 10 changed files with 275 additions and 8 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -38,5 +38,5 @@ build/

## Suite specific files
**/*.kubeconfig
env
*.env
config.yaml
2 changes: 2 additions & 0 deletions src/main/java/io/odh/test/OdhAnnotationsLabels.java
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,7 @@ public class OdhAnnotationsLabels {

public static final String ANNO_SERVICE_MESH = ODH_DOMAIN + "service-mesh";
public static final String ANNO_NTB_INJECT_OAUTH = "notebooks." + ODH_DOMAIN + "inject-oauth";
public static final String APP_LABEL_KEY = "app";
public static final String APP_LABEL_VALUE = "odh-e2e";

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
/*
* Copyright Skodjob authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.odh.test.framework.manager.requirements;

import io.fabric8.openshift.api.model.operatorhub.v1alpha1.Subscription;
import io.fabric8.openshift.api.model.operatorhub.v1alpha1.SubscriptionBuilder;
import io.odh.test.OdhAnnotationsLabels;
import io.odh.test.framework.manager.ResourceItem;
import io.odh.test.framework.manager.ResourceManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Arrays;
import java.util.Collections;

public class PipelinesOperator {
private static final Logger LOGGER = LoggerFactory.getLogger(PipelinesOperator.class);

public static void deployOperator() {
Subscription subscription = new SubscriptionBuilder()
.editOrNewMetadata()
.withName("openshift-pipelines-operator")
.withNamespace("openshift-operators")
.withLabels(Collections.singletonMap(OdhAnnotationsLabels.APP_LABEL_KEY, OdhAnnotationsLabels.APP_LABEL_VALUE))
.endMetadata()
.editOrNewSpec()
.withName("openshift-pipelines-operator-rh")
.withChannel("latest")
.withSource("redhat-operators")
.withSourceNamespace("openshift-marketplace")
.withInstallPlanApproval("Automatic")
.editOrNewConfig()
.endConfig()
.endSpec()
.build();

ResourceManager.getInstance().createResourceWithWait(subscription);
ResourceManager.getInstance().pushToStack(new ResourceItem(() -> deleteOperator(subscription), null));
}

public static void deleteOperator(Subscription subscription) {
ResourceManager.getClient().delete(Arrays.asList(subscription));
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
/*
* Copyright Skodjob authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.odh.test.framework.manager.requirements;

import io.fabric8.kubernetes.api.model.Namespace;
import io.fabric8.kubernetes.api.model.NamespaceBuilder;
import io.fabric8.openshift.api.model.operatorhub.v1.OperatorGroupBuilder;
import io.fabric8.openshift.api.model.operatorhub.v1alpha1.Subscription;
import io.fabric8.openshift.api.model.operatorhub.v1alpha1.SubscriptionBuilder;
import io.odh.test.OdhAnnotationsLabels;
import io.odh.test.framework.manager.ResourceItem;
import io.odh.test.framework.manager.ResourceManager;
import io.odh.test.framework.manager.resources.OperatorGroupResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Arrays;
import java.util.Collections;

public class ServerlessOperator {
private static final Logger LOGGER = LoggerFactory.getLogger(ServerlessOperator.class);
public static void deployOperator() {
String operatorNs = "openshift-serverless";
// Create ns for the operator
Namespace ns = new NamespaceBuilder()
.withNewMetadata()
.withName(operatorNs)
.withLabels(Collections.singletonMap(OdhAnnotationsLabels.APP_LABEL_KEY, OdhAnnotationsLabels.APP_LABEL_VALUE))
.endMetadata()
.build();
ResourceManager.getInstance().createResourceWithoutWait(ns);
//Create operator group for the operator
if (OperatorGroupResource.operatorGroupClient().inNamespace(operatorNs).list().getItems().isEmpty()) {
OperatorGroupBuilder operatorGroup = new OperatorGroupBuilder()
.editOrNewMetadata()
.withName("odh-group")
.withNamespace(operatorNs)
.withLabels(Collections.singletonMap(OdhAnnotationsLabels.APP_LABEL_KEY, OdhAnnotationsLabels.APP_LABEL_VALUE))
.endMetadata();

ResourceManager.getInstance().createResourceWithWait(operatorGroup.build());
} else {
LOGGER.info("OperatorGroup is already exists.");
}
// Create subscription
Subscription subscription = new SubscriptionBuilder()
.editOrNewMetadata()
.withName("serverless-operator")
.withNamespace(operatorNs)
.withLabels(Collections.singletonMap(OdhAnnotationsLabels.APP_LABEL_KEY, OdhAnnotationsLabels.APP_LABEL_VALUE))
.endMetadata()
.editOrNewSpec()
.withName("serverless-operator")
.withChannel("stable")
.withSource("redhat-operators")
.withSourceNamespace("openshift-marketplace")
.withInstallPlanApproval("Automatic")
.editOrNewConfig()
.endConfig()
.endSpec()
.build();

ResourceManager.getInstance().createResourceWithWait(subscription);
ResourceManager.getInstance().pushToStack(new ResourceItem(() -> deleteOperator(ns), null));
}

public static void deleteOperator(Namespace namespace) {
ResourceManager.getClient().delete(Arrays.asList(namespace));
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
/*
* Copyright Skodjob authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.odh.test.framework.manager.requirements;

import io.fabric8.openshift.api.model.operatorhub.v1alpha1.Subscription;
import io.fabric8.openshift.api.model.operatorhub.v1alpha1.SubscriptionBuilder;
import io.odh.test.OdhAnnotationsLabels;
import io.odh.test.framework.manager.ResourceItem;
import io.odh.test.framework.manager.ResourceManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Arrays;
import java.util.Collections;

public class ServiceMeshOperator {
private static final Logger LOGGER = LoggerFactory.getLogger(ServiceMeshOperator.class);

public static void deployOperator() {
Subscription subscription = new SubscriptionBuilder()
.editOrNewMetadata()
.withName("servicemeshoperator")
.withNamespace("openshift-operators")
.withLabels(Collections.singletonMap(OdhAnnotationsLabels.APP_LABEL_KEY, OdhAnnotationsLabels.APP_LABEL_VALUE))
.endMetadata()
.editOrNewSpec()
.withName("servicemeshoperator")
.withChannel("stable")
.withSource("redhat-operators")
.withSourceNamespace("openshift-marketplace")
.withInstallPlanApproval("Automatic")
.editOrNewConfig()
.endConfig()
.endSpec()
.build();

ResourceManager.getInstance().createResourceWithWait(subscription);
ResourceManager.getInstance().pushToStack(new ResourceItem(() -> deleteOperator(subscription), null));
}

public static void deleteOperator(Subscription subscription) {
ResourceManager.getClient().delete(Arrays.asList(subscription));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,12 @@
import io.odh.test.platform.KubeUtils;
import io.odh.test.utils.PodUtils;
import io.opendatahub.datasciencecluster.v1.DataScienceCluster;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Codeflare;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Datasciencepipelines;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Kserve;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Modelmeshserving;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Ray;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Trustyai;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand Down Expand Up @@ -55,13 +61,55 @@ public boolean waitForReadiness(DataScienceCluster resource) {
DataScienceCluster dsc = dataScienceCLusterClient().withName(resource.getMetadata().getName()).get();

String dashboardStatus = KubeUtils.getDscConditionByType(dsc.getStatus().getConditions(), "dashboardReady").getStatus();
LOGGER.debug("DataScienceCluster {} dashboard status: {}", resource.getMetadata().getName(), dashboardStatus);
LOGGER.debug("DataScienceCluster {} Dashboard status: {}", resource.getMetadata().getName(), dashboardStatus);
dscReady = dashboardStatus.equals("True");

String workbenchesStatus = KubeUtils.getDscConditionByType(dsc.getStatus().getConditions(), "workbenchesReady").getStatus();
LOGGER.debug("DataScienceCluster {} workbenches status: {}", resource.getMetadata().getName(), workbenchesStatus);
LOGGER.debug("DataScienceCluster {} Workbenches status: {}", resource.getMetadata().getName(), workbenchesStatus);
dscReady = dscReady && workbenchesStatus.equals("True");

// Wait for CodeFlare
if (resource.getSpec().getComponents().getCodeflare().getManagementState().equals(Codeflare.ManagementState.MANAGED)) {
String codeflareStatus = KubeUtils.getDscConditionByType(dsc.getStatus().getConditions(), "codeflareReady").getStatus();
LOGGER.debug("DataScienceCluster {} CodeFlare status: {}", resource.getMetadata().getName(), codeflareStatus);
dscReady = dscReady && codeflareStatus.equals("True");
}

// Wait for ModelMesh
if (resource.getSpec().getComponents().getModelmeshserving().getManagementState().equals(Modelmeshserving.ManagementState.MANAGED)) {
String modemeshStatus = KubeUtils.getDscConditionByType(dsc.getStatus().getConditions(), "model-meshReady").getStatus();
LOGGER.debug("DataScienceCluster {} ModelMesh status: {}", resource.getMetadata().getName(), modemeshStatus);
dscReady = dscReady && modemeshStatus.equals("True");
}

// Wait for Ray
if (resource.getSpec().getComponents().getRay().getManagementState().equals(Ray.ManagementState.MANAGED)) {
String rayStatus = KubeUtils.getDscConditionByType(dsc.getStatus().getConditions(), "rayReady").getStatus();
LOGGER.debug("DataScienceCluster {} Ray status: {}", resource.getMetadata().getName(), rayStatus);
dscReady = dscReady && rayStatus.equals("True");
}

// Wait for TrustyAi
if (resource.getSpec().getComponents().getTrustyai().getManagementState().equals(Trustyai.ManagementState.MANAGED)) {
String trustyAiStatus = KubeUtils.getDscConditionByType(dsc.getStatus().getConditions(), "trustyaiReady").getStatus();
LOGGER.debug("DataScienceCluster {} TrustyAI status: {}", resource.getMetadata().getName(), trustyAiStatus);
dscReady = dscReady && trustyAiStatus.equals("True");
}

// Wait for KServe
if (resource.getSpec().getComponents().getKserve().getManagementState().equals(Kserve.ManagementState.MANAGED)) {
String kserveStatus = KubeUtils.getDscConditionByType(dsc.getStatus().getConditions(), "kserveReady").getStatus();
LOGGER.debug("DataScienceCluster {} KServe status: {}", resource.getMetadata().getName(), kserveStatus);
dscReady = dscReady && kserveStatus.equals("True");
}

// Wait for PipelinesOperator
if (resource.getSpec().getComponents().getDatasciencepipelines().getManagementState().equals(Datasciencepipelines.ManagementState.MANAGED)) {
String pipelinesStatus = KubeUtils.getDscConditionByType(dsc.getStatus().getConditions(), "data-science-pipelines-operatorReady").getStatus();
LOGGER.debug("DataScienceCluster {} DataSciencePipelines status: {}", resource.getMetadata().getName(), pipelinesStatus);
dscReady = dscReady && pipelinesStatus.equals("True");
}

return dscReady;
}, () -> { });

Expand Down
11 changes: 11 additions & 0 deletions src/test/java/io/odh/test/e2e/Abstract.java
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@
import io.odh.test.framework.listeners.TestVisualSeparator;
import io.odh.test.framework.manager.ResourceManager;
import io.odh.test.framework.listeners.TestExceptionCallbackListener;
import io.odh.test.framework.manager.requirements.PipelinesOperator;
import io.odh.test.framework.manager.requirements.ServerlessOperator;
import io.odh.test.framework.manager.requirements.ServiceMeshOperator;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.extension.ExtendWith;

Expand All @@ -19,4 +23,11 @@ public abstract class Abstract implements TestVisualSeparator {
static {
ResourceManager.getInstance();
}

@BeforeAll
void setupDependencies() {
PipelinesOperator.deployOperator();
ServiceMeshOperator.deployOperator();
ServerlessOperator.deployOperator();
}
}
21 changes: 19 additions & 2 deletions src/test/java/io/odh/test/e2e/standard/DataScienceClusterST.java
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,12 @@
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.DatasciencepipelinesBuilder;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Kserve;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.KserveBuilder;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Modelmeshserving;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.ModelmeshservingBuilder;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Ray;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.RayBuilder;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Trustyai;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.TrustyaiBuilder;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Workbenches;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.WorkbenchesBuilder;
import io.opendatahub.dscinitialization.v1.DSCInitialization;
Expand Down Expand Up @@ -49,7 +55,7 @@ void createDataScienceCluster() {
.withNamespace(OdhConstants.MONITORING_NAMESPACE)
.endMonitoring()
.withNewServiceMesh()
.withManagementState(ServiceMesh.ManagementState.REMOVED)
.withManagementState(ServiceMesh.ManagementState.MANAGED)
.withNewControlPlane()
.withName("data-science-smcp")
.withNamespace("istio-system")
Expand Down Expand Up @@ -81,11 +87,19 @@ void createDataScienceCluster() {
.withDatasciencepipelines(
new DatasciencepipelinesBuilder().withManagementState(Datasciencepipelines.ManagementState.MANAGED).build()
)
.withModelmeshserving(
new ModelmeshservingBuilder().withManagementState(Modelmeshserving.ManagementState.MANAGED).build()
)
.withRay(
new RayBuilder().withManagementState(Ray.ManagementState.MANAGED).build()
)
.withTrustyai(
new TrustyaiBuilder().withManagementState(Trustyai.ManagementState.MANAGED).build()
)
.build())
.endSpec()
.build();


ResourceManager.getInstance().createResourceWithWait(dsci);
ResourceManager.getInstance().createResourceWithWait(c);

Expand All @@ -96,5 +110,8 @@ void createDataScienceCluster() {
assertEquals(Dashboard.ManagementState.MANAGED, cluster.getSpec().getComponents().getDashboard().getManagementState());
assertEquals(Datasciencepipelines.ManagementState.MANAGED, cluster.getSpec().getComponents().getDatasciencepipelines().getManagementState());
assertEquals(Workbenches.ManagementState.MANAGED, cluster.getSpec().getComponents().getWorkbenches().getManagementState());
assertEquals(Modelmeshserving.ManagementState.MANAGED, cluster.getSpec().getComponents().getModelmeshserving().getManagementState());
assertEquals(Ray.ManagementState.MANAGED, cluster.getSpec().getComponents().getRay().getManagementState());
assertEquals(Trustyai.ManagementState.MANAGED, cluster.getSpec().getComponents().getTrustyai().getManagementState());
}
}
17 changes: 16 additions & 1 deletion src/test/java/io/odh/test/e2e/standard/NotebookST.java
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,12 @@
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.DatasciencepipelinesBuilder;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Kserve;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.KserveBuilder;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Modelmeshserving;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.ModelmeshservingBuilder;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Ray;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.RayBuilder;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Trustyai;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.TrustyaiBuilder;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Workbenches;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.WorkbenchesBuilder;
import io.opendatahub.dscinitialization.v1.DSCInitialization;
Expand Down Expand Up @@ -101,7 +107,7 @@ void deployDataScienceCluster() {
.withNamespace(OdhConstants.MONITORING_NAMESPACE)
.endMonitoring()
.withNewServiceMesh()
.withManagementState(ServiceMesh.ManagementState.REMOVED)
.withManagementState(ServiceMesh.ManagementState.MANAGED)
.withNewControlPlane()
.withName("data-science-smcp")
.withNamespace("istio-system")
Expand Down Expand Up @@ -134,6 +140,15 @@ void deployDataScienceCluster() {
.withDatasciencepipelines(
new DatasciencepipelinesBuilder().withManagementState(Datasciencepipelines.ManagementState.REMOVED).build()
)
.withModelmeshserving(
new ModelmeshservingBuilder().withManagementState(Modelmeshserving.ManagementState.MANAGED).build()
)
.withRay(
new RayBuilder().withManagementState(Ray.ManagementState.MANAGED).build()
)
.withTrustyai(
new TrustyaiBuilder().withManagementState(Trustyai.ManagementState.MANAGED).build()
)
.build())
.endSpec()
.build();
Expand Down
Loading

0 comments on commit 0f5b234

Please sign in to comment.