diff --git a/docs/io.odh.test.e2e.standard.DataScienceClusterST.md b/docs/io.odh.test.e2e.standard.DataScienceClusterST.md
new file mode 100644
index 00000000..ceaabede
--- /dev/null
+++ b/docs/io.odh.test.e2e.standard.DataScienceClusterST.md
@@ -0,0 +1,43 @@
+# DataScienceClusterST
+
+**Description:** Verifies simple setup of ODH by spin-up operator, setup DSCI, and setup DSC.
+
+**Before tests execution steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Deploy Pipelines Operator | Pipelines operator is available on the cluster |
+| 2. | Deploy ServiceMesh Operator | ServiceMesh operator is available on the cluster |
+| 3. | Deploy Serverless Operator | Serverless operator is available on the cluster |
+| 4. | Install ODH operator | Operator is up and running and is able to serve it's operands |
+
+**After tests execution steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Delete ODH operator and all created resources | Operator is removed and all other resources as well |
+
+**Labels:**
+
+* `smoke` (description file doesn't exist)
+
+
+
+## createDataScienceCluster
+
+**Description:** Creates default DSCI and DSC and see if operator configure everything properly. Check that operator set status of the resources properly.
+
+**Contact:** `David Kornel `
+
+**Steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Create default DSCI | DSCI is created and ready |
+| 2. | Create default DSC | DSC is created and ready |
+| 3. | Check that DSC has expected states for all components | DSC status is set properly based on configuration |
+
+**Labels:**
+
+* `smoke` (description file doesn't exist)
+
diff --git a/docs/io.odh.test.e2e.standard.DistributedST.md b/docs/io.odh.test.e2e.standard.DistributedST.md
new file mode 100644
index 00000000..fd631ed6
--- /dev/null
+++ b/docs/io.odh.test.e2e.standard.DistributedST.md
@@ -0,0 +1,58 @@
+# DistributedST
+
+**Description:** Verifies simple setup of ODH for distributed workloads by spin-up operator, setup DSCI, and setup DSC.
+
+**Before tests execution steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Deploy Pipelines Operator | Pipelines operator is available on the cluster |
+| 2. | Deploy ServiceMesh Operator | ServiceMesh operator is available on the cluster |
+| 3. | Deploy Serverless Operator | Serverless operator is available on the cluster |
+| 4. | Install ODH operator | Operator is up and running and is able to serve it's operands |
+| 5. | Deploy DSCI | DSCI is created and ready |
+| 6. | Deploy DSC | DSC is created and ready |
+
+**After tests execution steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Delete ODH operator and all created resources | Operator is removed and all other resources as well |
+
+
+
+## testDistributedWorkloadWithAppWrapper
+
+**Description:** Check that user can create, run and delete a RayCluster through Codeflare AppWrapper from a DataScience project
+
+**Contact:** `Jiri Danek `
+
+**Steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Create namespace for AppWrapper with proper name, labels and annotations | Namespace is created |
+| 2. | Create AppWrapper for RayCluster using Codeflare-generated yaml | AppWrapper instance has been created |
+| 3. | Wait for Ray dashboard endpoint to come up | Ray dashboard service is backed by running pods |
+| 4. | Deploy workload through the route | The workload execution has been successful |
+| 5. | Delete the AppWrapper | The AppWrapper has been deleted |
+
+
+## testDistributedWorkloadWithKueue
+
+**Description:** Check that user can create, run and delete a RayCluster through Codeflare RayCluster backed by Kueue from a DataScience project
+
+**Contact:** `Jiri Danek `
+
+**Steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Create OAuth token | OAuth token has been created |
+| 2. | Create namespace for RayCluster with proper name, labels and annotations | Namespace is created |
+| 3. | Create required Kueue custom resource instances | Kueue queues have been created |
+| 4. | Create RayCluster using Codeflare-generated yaml | AppWrapper instance has been created |
+| 5. | Wait for Ray dashboard endpoint to come up | Ray dashboard service is backed by running pods |
+| 6. | Deploy workload through the route | The workload execution has been successful |
+| 7. | Delete the AppWrapper | The AppWrapper has been deleted |
+
diff --git a/docs/io.odh.test.e2e.standard.ModelServingST.md b/docs/io.odh.test.e2e.standard.ModelServingST.md
new file mode 100644
index 00000000..12448dfe
--- /dev/null
+++ b/docs/io.odh.test.e2e.standard.ModelServingST.md
@@ -0,0 +1,42 @@
+# ModelServingST
+
+**Description:** Verifies simple setup of ODH for model serving by spin-up operator, setup DSCI, and setup DSC.
+
+**Before tests execution steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Deploy Pipelines Operator | Pipelines operator is available on the cluster |
+| 2. | Deploy ServiceMesh Operator | ServiceMesh operator is available on the cluster |
+| 3. | Deploy Serverless Operator | Serverless operator is available on the cluster |
+| 4. | Install ODH operator | Operator is up and running and is able to serve it's operands |
+| 5. | Deploy DSCI | DSCI is created and ready |
+| 6. | Deploy DSC | DSC is created and ready |
+
+**After tests execution steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Delete ODH operator and all created resources | Operator is removed and all other resources as well |
+
+
+
+## testMultiModelServerInference
+
+**Description:** Check that user can create, run inference and delete MultiModelServing server from a DataScience project
+
+**Contact:** `Jiri Danek `
+
+**Steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Create namespace for ServingRuntime application with proper name, labels and annotations | Namespace is created |
+| 2. | Create a serving runtime using the processModelServerTemplate method | Serving runtime instance has been created |
+| 3. | Create a secret that exists, even though it contains no useful information | Secret has been created |
+| 4. | Create an inference service | Inference service has been created |
+| 5. | Perform model inference through the route | The model inference execution has been successful |
+| 6. | Delete the Inference Service | The Inference service has been deleted |
+| 7. | Delete the secret | The secret has been deleted |
+| 8. | Delete the serving runtime | The serving runtime has been deleted |
+
diff --git a/docs/io.odh.test.e2e.standard.NotebookST.md b/docs/io.odh.test.e2e.standard.NotebookST.md
new file mode 100644
index 00000000..b1150073
--- /dev/null
+++ b/docs/io.odh.test.e2e.standard.NotebookST.md
@@ -0,0 +1,38 @@
+# NotebookST
+
+**Description:** Verifies deployments of Notebooks via GitOps approach
+
+**Before tests execution steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Deploy Pipelines Operator | Pipelines operator is available on the cluster |
+| 2. | Deploy ServiceMesh Operator | ServiceMesh operator is available on the cluster |
+| 3. | Deploy Serverless Operator | Serverless operator is available on the cluster |
+| 4. | Install ODH operator | Operator is up and running and is able to serve it's operands |
+| 5. | Deploy DSCI | DSCI is created and ready |
+| 6. | Deploy DSC | DSC is created and ready |
+
+**After tests execution steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Delete ODH operator and all created resources | Operator is removed and all other resources as well |
+
+
+
+## testCreateSimpleNotebook
+
+**Description:** Create simple Notebook with all needed resources and see if Operator creates it properly
+
+**Contact:** `Jakub Stejskal `
+
+**Steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Create namespace for Notebook resources with proper name, labels and annotations | Namespace is created |
+| 2. | Create PVC with proper labels and data for Notebook | PVC is created |
+| 3. | Create Notebook resource with Jupyter Minimal image in pre-defined namespace | Notebook resource is created |
+| 4. | Wait for Notebook pods readiness | Notebook pods are up and running, Notebook is in ready state |
+
diff --git a/docs/io.odh.test.e2e.standard.PipelineServerST.md b/docs/io.odh.test.e2e.standard.PipelineServerST.md
new file mode 100644
index 00000000..5f4855ff
--- /dev/null
+++ b/docs/io.odh.test.e2e.standard.PipelineServerST.md
@@ -0,0 +1,44 @@
+# PipelineServerST
+
+**Description:** Verifies simple setup of ODH by spin-up operator, setup DSCI, and setup DSC.
+
+**Before tests execution steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Deploy Pipelines Operator | Pipelines operator is available on the cluster |
+| 2. | Deploy ServiceMesh Operator | ServiceMesh operator is available on the cluster |
+| 3. | Deploy Serverless Operator | Serverless operator is available on the cluster |
+| 4. | Install ODH operator | Operator is up and running and is able to serve it's operands |
+| 5. | Deploy DSCI | DSCI is created and ready |
+| 6. | Deploy DSC | DSC is created and ready |
+
+**After tests execution steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Delete ODH operator and all created resources | Operator is removed and all other resources as well |
+
+
+
+## testUserCanCreateRunAndDeleteADSPipelineFromDSProject
+
+**Description:** Check that user can create, run and deleted DataSciencePipeline from a DataScience project
+
+**Contact:** `Jiri Danek `
+
+**Steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Create namespace for DataSciencePipelines application with proper name, labels and annotations | Namespace is created |
+| 2. | Create Minio secret with proper data for access s3 | Secret is created |
+| 3. | Create DataSciencePipelinesApplication with configuration for new Minio instance and new MariaDB instance | DataSciencePipelinesApplication resource is created |
+| 4. | Wait for DataSciencePipelines server readiness | DSP API endpoint is available and it return proper data |
+| 5. | Import pipeline to a pipeline server via API | Pipeline is imported |
+| 6. | List imported pipeline via API | Server return list with imported pipeline info |
+| 7. | Trigger pipeline run for imported pipeline | Pipeline is triggered |
+| 8. | Wait for pipeline success | Pipeline succeeded |
+| 9. | Delete pipeline run | Pipeline run is deleted |
+| 10. | Delete pipeline | Pipeline is deleted |
+
diff --git a/docs/io.odh.test.e2e.standard.PipelineV2ServerST.md b/docs/io.odh.test.e2e.standard.PipelineV2ServerST.md
new file mode 100644
index 00000000..03314137
--- /dev/null
+++ b/docs/io.odh.test.e2e.standard.PipelineV2ServerST.md
@@ -0,0 +1,44 @@
+# PipelineV2ServerST
+
+**Description:** Verifies simple setup of ODH by spin-up operator, setup DSCI, and setup DSC.
+
+**Before tests execution steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Deploy Pipelines Operator | Pipelines operator is available on the cluster |
+| 2. | Deploy ServiceMesh Operator | ServiceMesh operator is available on the cluster |
+| 3. | Deploy Serverless Operator | Serverless operator is available on the cluster |
+| 4. | Install ODH operator | Operator is up and running and is able to serve it's operands |
+| 5. | Deploy DSCI | DSCI is created and ready |
+| 6. | Deploy DSC | DSC is created and ready |
+
+**After tests execution steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Delete ODH operator and all created resources | Operator is removed and all other resources as well |
+
+
+
+## testUserCanOperateDSv2PipelineFromDSProject
+
+**Description:** Check that user can create, run and deleted DataSciencePipeline from a DataScience project
+
+**Contact:** `Jiri Danek `
+
+**Steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Create namespace for DataSciencePipelines application with proper name, labels and annotations | Namespace is created |
+| 2. | Create Minio secret with proper data for access s3 | Secret is created |
+| 3. | Create DataSciencePipelinesApplication with configuration for new Minio instance and new MariaDB instance | DataSciencePipelinesApplication resource is created |
+| 4. | Wait for DataSciencePipelines server readiness | DSP API endpoint is available and it return proper data |
+| 5. | Import pipeline to a pipeline server via API | Pipeline is imported |
+| 6. | List imported pipeline via API | Server return list with imported pipeline info |
+| 7. | Trigger pipeline run for imported pipeline | Pipeline is triggered |
+| 8. | Wait for pipeline success | Pipeline succeeded |
+| 9. | Delete pipeline run | Pipeline run is deleted |
+| 10. | Delete pipeline | Pipeline is deleted |
+
diff --git a/docs/io.odh.test.e2e.standard.UninstallST.md b/docs/io.odh.test.e2e.standard.UninstallST.md
new file mode 100644
index 00000000..d0207c34
--- /dev/null
+++ b/docs/io.odh.test.e2e.standard.UninstallST.md
@@ -0,0 +1,33 @@
+# UninstallST
+
+**Description:** Verifies that uninstall process removes all resources created by ODH installation
+
+**Before tests execution steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Deploy Pipelines Operator | Pipelines operator is available on the cluster |
+| 2. | Deploy ServiceMesh Operator | ServiceMesh operator is available on the cluster |
+| 3. | Deploy Serverless Operator | Serverless operator is available on the cluster |
+| 4. | Install ODH operator | Operator is up and running and is able to serve it's operands |
+| 5. | Deploy DSCI | DSCI is created and ready |
+| 6. | Deploy DSC | DSC is created and ready |
+
+
+
+## testUninstallSimpleScenario
+
+**Description:** Check that user can create, run and deleted DataSciencePipeline from a DataScience project
+
+**Contact:** `Jan Stourac `
+
+**Steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Create uninstall configmap | ConfigMap exists |
+| 2. | Wait for controllers namespace deletion | Controllers namespace is deleted |
+| 3. | Check that relevant resources are deleted (Subscription, InstallPlan, CSV) | All relevant resources are deleted |
+| 4. | Check that all related namespaces are deleted (monitoring, notebooks, controllers) | All related namespaces are deleted |
+| 5. | Remove Operator namespace | Operator namespace is deleted |
+
diff --git a/docs/io.odh.test.e2e.upgrade.BundleUpgradeST.md b/docs/io.odh.test.e2e.upgrade.BundleUpgradeST.md
new file mode 100644
index 00000000..71d20182
--- /dev/null
+++ b/docs/io.odh.test.e2e.upgrade.BundleUpgradeST.md
@@ -0,0 +1,47 @@
+# BundleUpgradeST
+
+**Description:** Verifies upgrade path from previously released version to latest available build. Operator installation and upgrade is done via bundle of yaml files.
+
+**Before tests execution steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Deploy Pipelines Operator | Pipelines operator is available on the cluster |
+| 2. | Deploy ServiceMesh Operator | ServiceMesh operator is available on the cluster |
+| 3. | Deploy Serverless Operator | Serverless operator is available on the cluster |
+
+**After tests execution steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Delete all ODH related resources in the cluster | All ODH related resources are gone |
+
+**Labels:**
+
+* `bundle-upgrade` (description file doesn't exist)
+
+
+
+## testUpgradeBundle
+
+**Description:** Creates default DSCI and DSC and see if operator configure everything properly. Check that operator set status of the resources properly.
+
+**Contact:** `David Kornel `
+
+**Steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Install operator via bundle of yaml files with specific version | Operator is up and running |
+| 2. | Deploy DSC (see UpgradeAbstract for more info) | DSC is created and ready |
+| 3. | Deploy Notebook to namespace test-odh-notebook-upgrade | All related pods are up and running. Notebook is in ready state. |
+| 4. | Apply latest yaml files with latest Operator version | Yaml file is applied |
+| 5. | Wait for RollingUpdate of Operator pod to a new version | Operator update is finished and pod is up and running |
+| 6. | Verify that Dashboard pods are stable for 2 minutes | Dashboard pods are stable por 2 minutes after upgrade |
+| 7. | Verify that Notebook pods are stable for 2 minutes | Notebook pods are stable por 2 minutes after upgrade |
+| 8. | Check that ODH operator doesn't contain any error logs | ODH operator log is error free |
+
+**Labels:**
+
+* `bundle-upgrade` (description file doesn't exist)
+
diff --git a/docs/io.odh.test.e2e.upgrade.OlmUpgradeST.md b/docs/io.odh.test.e2e.upgrade.OlmUpgradeST.md
new file mode 100644
index 00000000..6aef5788
--- /dev/null
+++ b/docs/io.odh.test.e2e.upgrade.OlmUpgradeST.md
@@ -0,0 +1,41 @@
+# OlmUpgradeST
+
+**Description:** Verifies upgrade path from previously released version to latest available build. Operator installation and upgrade is done via OLM.
+
+**Before tests execution steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Deploy Pipelines Operator | Pipelines operator is available on the cluster |
+| 2. | Deploy ServiceMesh Operator | ServiceMesh operator is available on the cluster |
+| 3. | Deploy Serverless Operator | Serverless operator is available on the cluster |
+
+**Labels:**
+
+* `olm-upgrade` (description file doesn't exist)
+
+
+
+## testUpgradeOlm
+
+**Description:** Creates default DSCI and DSC and see if operator configure everything properly. Check that operator set status of the resources properly.
+
+**Contact:** `Jakub Stejskal `
+
+**Steps:**
+
+| Step | Action | Result |
+| - | - | - |
+| 1. | Install operator via OLM with manual approval and specific version | Operator is up and running |
+| 2. | Deploy DSC (see UpgradeAbstract for more info) | DSC is created and ready |
+| 3. | Deploy Notebook to namespace test-odh-notebook-upgrade | All related pods are up and running. Notebook is in ready state. |
+| 4. | Approve install plan for new version | Install plan is approved |
+| 5. | Wait for RollingUpdate of Operator pod to a new version | Operator update is finished and pod is up and running |
+| 6. | Verify that Dashboard pods are stable for 2 minutes | Dashboard pods are stable por 2 minutes after upgrade |
+| 7. | Verify that Notebook pods are stable for 2 minutes | Notebook pods are stable por 2 minutes after upgrade |
+| 8. | Check that ODH operator doesn't contain any error logs | ODH operator log is error free |
+
+**Labels:**
+
+* `olm-upgrade` (description file doesn't exist)
+
diff --git a/pom.xml b/pom.xml
index bfe3d367..23ef1cca 100644
--- a/pom.xml
+++ b/pom.xml
@@ -60,7 +60,7 @@
1.7.1
2.16.1
3.0.2
- 0.1.0
+ 0.2.0
2.17.1
1.9.21.2
2.27.0
diff --git a/src/test/java/io/odh/test/e2e/standard/DataScienceClusterST.java b/src/test/java/io/odh/test/e2e/standard/DataScienceClusterST.java
index 3f45ed3b..5b1fa38c 100644
--- a/src/test/java/io/odh/test/e2e/standard/DataScienceClusterST.java
+++ b/src/test/java/io/odh/test/e2e/standard/DataScienceClusterST.java
@@ -25,7 +25,7 @@
import io.skodjob.annotations.Step;
import io.skodjob.annotations.SuiteDoc;
import io.skodjob.annotations.TestDoc;
-import io.skodjob.annotations.TestTag;
+import io.skodjob.annotations.Label;
import io.skodjob.testframe.resources.KubeResourceManager;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
@@ -47,8 +47,8 @@
afterTestSteps = {
@Step(value = "Delete ODH operator and all created resources", expected = "Operator is removed and all other resources as well")
},
- tags = {
- @TestTag(value = TestSuite.SMOKE)
+ labels = {
+ @Label(value = TestSuite.SMOKE)
}
)
@Tag(TestSuite.SMOKE)
@@ -68,8 +68,8 @@ public class DataScienceClusterST extends StandardAbstract {
@Step(value = "Create default DSC", expected = "DSC is created and ready"),
@Step(value = "Check that DSC has expected states for all components", expected = "DSC status is set properly based on configuration")
},
- tags = {
- @TestTag(value = TestSuite.SMOKE)
+ labels = {
+ @Label(value = TestSuite.SMOKE)
}
)
@Test
diff --git a/src/test/java/io/odh/test/e2e/upgrade/BundleUpgradeST.java b/src/test/java/io/odh/test/e2e/upgrade/BundleUpgradeST.java
index be14d854..c76b0d87 100644
--- a/src/test/java/io/odh/test/e2e/upgrade/BundleUpgradeST.java
+++ b/src/test/java/io/odh/test/e2e/upgrade/BundleUpgradeST.java
@@ -17,7 +17,7 @@
import io.skodjob.annotations.Step;
import io.skodjob.annotations.SuiteDoc;
import io.skodjob.annotations.TestDoc;
-import io.skodjob.annotations.TestTag;
+import io.skodjob.annotations.Label;
import io.skodjob.testframe.resources.KubeResourceManager;
import io.skodjob.testframe.utils.PodUtils;
import org.junit.jupiter.api.AfterAll;
@@ -40,8 +40,8 @@
afterTestSteps = {
@Step(value = "Delete all ODH related resources in the cluster", expected = "All ODH related resources are gone")
},
- tags = {
- @TestTag(value = TestSuite.BUNDLE_UPGRADE)
+ labels = {
+ @Label(value = TestSuite.BUNDLE_UPGRADE)
}
)
@Tag(TestSuite.BUNDLE_UPGRADE)
@@ -72,8 +72,8 @@ void clean() {
@Step(value = "Verify that Notebook pods are stable for 2 minutes", expected = "Notebook pods are stable por 2 minutes after upgrade"),
@Step(value = "Check that ODH operator doesn't contain any error logs", expected = "ODH operator log is error free")
},
- tags = {
- @TestTag(value = TestSuite.BUNDLE_UPGRADE)
+ labels = {
+ @Label(value = TestSuite.BUNDLE_UPGRADE)
}
)
@Test
diff --git a/src/test/java/io/odh/test/e2e/upgrade/OlmUpgradeST.java b/src/test/java/io/odh/test/e2e/upgrade/OlmUpgradeST.java
index d8b5cf59..d7326c4b 100644
--- a/src/test/java/io/odh/test/e2e/upgrade/OlmUpgradeST.java
+++ b/src/test/java/io/odh/test/e2e/upgrade/OlmUpgradeST.java
@@ -19,7 +19,7 @@
import io.skodjob.annotations.Step;
import io.skodjob.annotations.SuiteDoc;
import io.skodjob.annotations.TestDoc;
-import io.skodjob.annotations.TestTag;
+import io.skodjob.annotations.Label;
import io.skodjob.testframe.resources.KubeResourceManager;
import io.skodjob.testframe.utils.KubeUtils;
import io.skodjob.testframe.utils.PodUtils;
@@ -39,8 +39,8 @@
@Step(value = "Deploy ServiceMesh Operator", expected = "ServiceMesh operator is available on the cluster"),
@Step(value = "Deploy Serverless Operator", expected = "Serverless operator is available on the cluster")
},
- tags = {
- @TestTag(value = TestSuite.OLM_UPGRADE)
+ labels = {
+ @Label(value = TestSuite.OLM_UPGRADE)
}
)
@Tag(TestSuite.OLM_UPGRADE)
@@ -64,8 +64,8 @@ public class OlmUpgradeST extends UpgradeAbstract {
@Step(value = "Verify that Notebook pods are stable for 2 minutes", expected = "Notebook pods are stable por 2 minutes after upgrade"),
@Step(value = "Check that ODH operator doesn't contain any error logs", expected = "ODH operator log is error free")
},
- tags = {
- @TestTag(value = TestSuite.OLM_UPGRADE)
+ labels = {
+ @Label(value = TestSuite.OLM_UPGRADE)
}
)
@Test