Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor odh-e2e to use test-frame and avoid duplication of code #136

Merged
merged 4 commits into from
May 9, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 27 additions & 2 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@
<junit.jupiter.version>5.10.2</junit.jupiter.version>
<junit.platform.version>1.10.2</junit.platform.version>
<maven.surefire.version>3.2.2</maven.surefire.version>
<logback.version>1.4.12</logback.version>
<logback.version>1.5.6</logback.version>
<slf4j.version>2.0.9</slf4j.version>
<hamcrest.version>2.2</hamcrest.version>
<opedatahub-crds.version>1.0.44-SNAPSHOT</opedatahub-crds.version>
Expand All @@ -63,6 +63,7 @@
<aspectj.version>1.9.21.2</aspectj.version>
<allure.version>2.25.0</allure.version>
<allure.maven.version>2.12.0</allure.maven.version>
<test-frame.version>0.1.0-SNAPSHOT</test-frame.version>
kornys marked this conversation as resolved.
Show resolved Hide resolved
</properties>

<repositories>
Expand All @@ -71,6 +72,12 @@
<name>GitHub Apache Maven Packages</name>
<url>https://maven.pkg.github.com/skodjob/opendatahub-crds</url>
</repository>
<!-- Used for test-frame SNAPSHOT version if needed (debug only) -->
<repository>
<id>test-frame</id>
<name>GitHub Apache Maven Packages</name>
<url>https://maven.pkg.github.com/skodjob/test-frame</url>
</repository>
</repositories>
<pluginRepositories>
<pluginRepository>
Expand All @@ -93,6 +100,21 @@
</dependencyManagement>

<dependencies>
<dependency>
<groupId>io.skodjob</groupId>
<artifactId>test-frame-common</artifactId>
<version>${test-frame.version}</version>
</dependency>
<dependency>
<groupId>io.skodjob</groupId>
<artifactId>test-frame-kubernetes</artifactId>
<version>${test-frame.version}</version>
</dependency>
<dependency>
<groupId>io.skodjob</groupId>
<artifactId>test-frame-openshift</artifactId>
<version>${test-frame.version}</version>
</dependency>
<dependency>
<groupId>io.fabric8</groupId>
<artifactId>openshift-client</artifactId>
Expand Down Expand Up @@ -240,7 +262,7 @@
</executions>
<configuration>
<skipITs>${it.skip}</skipITs>
<forkCount>0</forkCount>
<forkCount>1</forkCount>
jiridanek marked this conversation as resolved.
Show resolved Hide resolved
<forkNode implementation="org.apache.maven.plugin.surefire.extensions.SurefireForkNodeFactory"/>
<includes>
<include>**/IT*.java</include>
Expand All @@ -253,6 +275,9 @@
junit.jupiter.extensions.autodetection.enabled = true
</configurationParameters>
</properties>
<environmentVariables>
<CLIENT_TYPE>oc</CLIENT_TYPE>
</environmentVariables>
</configuration>
</plugin>
<plugin>
Expand Down
5 changes: 5 additions & 0 deletions settings.xml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,11 @@
<username>x-access-token</username>
<password>${env.GITHUB_TOKEN}</password>
</server>
<server>
<id>test-frame</id>
<username>x-access-token</username>
<password>${env.GITHUB_TOKEN}</password>
</server>
<server>
<id>test-metadata-generator</id>
<username>x-access-token</username>
Expand Down
5 changes: 0 additions & 5 deletions src/main/java/io/odh/test/Environment.java
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,6 @@ public class Environment {
public static final String USER_PATH = System.getProperty("user.dir");

private static final String CONFIG_FILE_PATH_ENV = "ENV_FILE";
private static final String TOKEN_ENV = "KUBE_TOKEN";
private static final String URL_ENV = "KUBE_URL";
private static final String PRODUCT_ENV = "PRODUCT";
private static final String LOG_DIR_ENV = "LOG_DIR";

Expand Down Expand Up @@ -74,9 +72,6 @@ public class Environment {
* Set values
*/
public static final String PRODUCT = getOrDefault(PRODUCT_ENV, PRODUCT_ODH);
public static final String RUN_USER = getOrDefault("USER", null);
public static final String KUBE_TOKEN = getOrDefault(TOKEN_ENV, null);
public static final String KUBE_URL = getOrDefault(URL_ENV, null);

//Install
public static final boolean SKIP_INSTALL_OPERATOR_DEPS = getOrDefault(SKIP_INSTALL_OPERATOR_DEPS_ENV, Boolean::valueOf, false);
Expand Down
197 changes: 80 additions & 117 deletions src/main/java/io/odh/test/TestUtils.java
Original file line number Diff line number Diff line change
Expand Up @@ -7,32 +7,36 @@
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.exc.InvalidFormatException;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
import io.odh.test.framework.WaitException;
import io.fabric8.kubernetes.api.model.EndpointSubset;
import io.fabric8.kubernetes.api.model.Endpoints;
import io.fabric8.kubernetes.client.KubernetesClientException;
import io.fabric8.kubernetes.client.dsl.Resource;
import io.fabric8.openshift.api.model.operatorhub.v1alpha1.InstallPlan;
import io.opendatahub.datasciencecluster.v1.datascienceclusterstatus.Conditions;
import io.skodjob.testframe.resources.KubeResourceManager;
import io.skodjob.testframe.utils.KubeUtils;
import io.skodjob.testframe.wait.Wait;
import org.junit.jupiter.api.TestInfo;
import org.junit.jupiter.api.extension.ExtensionContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.io.InputStream;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.net.URI;
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Duration;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.BooleanSupplier;

import static io.odh.test.TestConstants.GLOBAL_POLL_INTERVAL_SHORT;
import static io.odh.test.TestConstants.GLOBAL_TIMEOUT;
Expand All @@ -56,79 +60,6 @@ private TestUtils() {
// All static methods
}

/**
* Poll the given {@code ready} function every {@code pollIntervalMs} milliseconds until it returns true,
* or throw a WaitException if it doesn't return true within {@code timeoutMs} milliseconds.
*
* @return The remaining time left until timeout occurs
* (helpful if you have several calls which need to share a common timeout),
*/
public static long waitFor(String description, long pollIntervalMs, long timeoutMs, BooleanSupplier ready) {
return waitFor(description, pollIntervalMs, timeoutMs, ready, () -> { });
}

public static long waitFor(String description, long pollIntervalMs, long timeoutMs, BooleanSupplier ready, Runnable onTimeout) {
LOGGER.debug("Waiting for {}", description);
long deadline = System.currentTimeMillis() + timeoutMs;

String exceptionMessage = null;
String previousExceptionMessage = null;

// in case we are polling every 1s, we want to print exception after x tries, not on the first try
// for minutes poll interval will 2 be enough
int exceptionAppearanceCount = Duration.ofMillis(pollIntervalMs).toMinutes() > 0 ? 2 : Math.max((int) (timeoutMs / pollIntervalMs) / 4, 2);
int exceptionCount = 0;
int newExceptionAppearance = 0;

StringWriter stackTraceError = new StringWriter();

while (true) {
boolean result;
try {
result = ready.getAsBoolean();
} catch (Exception e) {
exceptionMessage = e.getMessage();

if (++exceptionCount == exceptionAppearanceCount && exceptionMessage != null && exceptionMessage.equals(previousExceptionMessage)) {
LOGGER.error("While waiting for {} exception occurred: {}", description, exceptionMessage);
// log the stacktrace
e.printStackTrace(new PrintWriter(stackTraceError));
} else if (exceptionMessage != null && !exceptionMessage.equals(previousExceptionMessage) && ++newExceptionAppearance == 2) {
previousExceptionMessage = exceptionMessage;
}

result = false;
}
long timeLeft = deadline - System.currentTimeMillis();
if (result) {
return timeLeft;
}
if (timeLeft <= 0) {
if (exceptionCount > 1) {
LOGGER.error("Exception waiting for {}, {}", description, exceptionMessage);

if (!stackTraceError.toString().isEmpty()) {
// printing handled stacktrace
LOGGER.error(stackTraceError.toString());
}
}
onTimeout.run();
WaitException waitException = new WaitException("Timeout after " + timeoutMs + " ms waiting for " + description);
waitException.printStackTrace();
throw waitException;
}
long sleepTime = Math.min(pollIntervalMs, timeLeft);
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("{} not ready, will try again in {} ms ({}ms till timeout)", description, sleepTime, timeLeft);
}
try {
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
return deadline - System.currentTimeMillis();
}
}
}

/**
* Polls the given HTTP {@code url} until it gives != 503 status code
*/
Expand All @@ -141,7 +72,7 @@ public static void waitForServiceNotUnavailable(String url) {
}

public static void waitForServiceNotUnavailable(HttpClient httpClient, String url) {
TestUtils.waitFor("service to be not unavailable", GLOBAL_POLL_INTERVAL_SHORT, GLOBAL_TIMEOUT, () -> {
Wait.until("service to be not unavailable", GLOBAL_POLL_INTERVAL_SHORT, GLOBAL_TIMEOUT, () -> {
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(url))
.GET()
Expand All @@ -167,42 +98,6 @@ public Thread newThread(Runnable r) {
}
});

public static CompletableFuture<Void> asyncWaitFor(String description, long pollIntervalMs, long timeoutMs, BooleanSupplier ready) {
LOGGER.info("Waiting for {}", description);
long deadline = System.currentTimeMillis() + timeoutMs;
CompletableFuture<Void> future = new CompletableFuture<>();
Executor delayed = CompletableFuture.delayedExecutor(pollIntervalMs, TimeUnit.MILLISECONDS, EXECUTOR);
Runnable r = new Runnable() {
@Override
public void run() {
boolean result;
try {
result = ready.getAsBoolean();
} catch (Exception e) {
future.completeExceptionally(e);
return;
}
long timeLeft = deadline - System.currentTimeMillis();
if (!future.isDone()) {
if (!result) {
if (timeLeft >= 0) {
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("{} not ready, will try again ({}ms till timeout)", description, timeLeft);
}
delayed.execute(this);
} else {
future.completeExceptionally(new TimeoutException(String.format("Waiting for %s timeout %s exceeded", description, timeoutMs)));
}
} else {
future.complete(null);
}
}
}
};
r.run();
return future;
}

public static InputStream getFileFromResourceAsStream(String fileName) {

// The class loader that loaded the class
Expand Down Expand Up @@ -272,4 +167,72 @@ public static <T> T runUntilPass(int retry, Callable<T> fn) {
}
throw new IllegalStateException(String.format("Command wasn't pass in %s attempts", retry));
}

public static io.opendatahub.datasciencecluster.v1.datascienceclusterstatus.Conditions getDscConditionByType(List<Conditions> conditions, String type) {
return conditions.stream().filter(c -> c.getType().equals(type)).findFirst().orElseGet(null);
}

public static org.kubeflow.v1.notebookstatus.Conditions getNotebookConditionByType(List<org.kubeflow.v1.notebookstatus.Conditions> conditions, String type) {
return conditions.stream().filter(c -> c.getType().equals(type)).findFirst().orElseGet(null);
}

public static io.kserve.serving.v1beta1.inferenceservicestatus.Conditions getInferenceServiceConditionByType(List<io.kserve.serving.v1beta1.inferenceservicestatus.Conditions> conditions, String type) {
return conditions.stream().filter(c -> c.getType().equals(type)).findFirst().orElseGet(null);
}

public static void clearOdhRemainingResources() {
KubeResourceManager.getKubeClient().getClient().apiextensions().v1().customResourceDefinitions().list().getItems()
.stream().filter(crd -> crd.getMetadata().getName().contains("opendatahub.io")).toList()
.forEach(crd -> {
LOGGER.info("Deleting CRD {}", crd.getMetadata().getName());
KubeResourceManager.getKubeClient().getClient().resource(crd).delete();
});
KubeResourceManager.getKubeClient().getClient().namespaces().withName("opendatahub").delete();
}

/**
* TODO - this should be removed when https://github.com/opendatahub-io/opendatahub-operator/issues/765 will be resolved
*/
public static void deleteDefaultDSCI() {
LOGGER.info("Clearing DSCI ...");
KubeResourceManager.getKubeCmdClient().exec(false, true, Long.valueOf(GLOBAL_TIMEOUT).intValue(), "delete", "dsci", "--all");
}

public static void waitForInstallPlan(String namespace, String csvName) {
Wait.until(String.format("Install plan with new version: %s:%s", namespace, csvName),
GLOBAL_POLL_INTERVAL_SHORT, GLOBAL_TIMEOUT, () -> {
try {
InstallPlan ip = KubeUtils.getNonApprovedInstallPlan(namespace, csvName);
LOGGER.debug("Found InstallPlan {} - {}", ip.getMetadata().getName(), ip.getSpec().getClusterServiceVersionNames());
return true;
} catch (NoSuchElementException ex) {
LOGGER.debug("No new install plan available. Checking again ...");
return false;
}
}, () -> { });
}

public static void waitForEndpoints(String name, Resource<Endpoints> endpoints) {
Wait.until("%s service endpoints to come up".formatted(name), GLOBAL_POLL_INTERVAL_SHORT, GLOBAL_TIMEOUT, () -> {
try {
Endpoints endpointset = endpoints.get();
if (endpointset == null) {
return false;
}
List<EndpointSubset> subsets = endpointset.getSubsets();
if (subsets.isEmpty()) {
return false;
}
for (EndpointSubset subset : subsets) {
return !subset.getAddresses().isEmpty();
}
} catch (KubernetesClientException e) {
if (e.getCode() == 404) {
return false;
}
throw e;
}
return false;
});
}
}
15 changes: 0 additions & 15 deletions src/main/java/io/odh/test/framework/WaitException.java

This file was deleted.

This file was deleted.

Loading
Loading