diff --git a/micrometer/src/test/java/org/jboss/eap/qe/micrometer/MicrometerOtelIntegrationTestCase.java b/micrometer/src/test/java/org/jboss/eap/qe/micrometer/MicrometerOtelIntegrationTestCase.java
index 88838a2d..e80b1438 100644
--- a/micrometer/src/test/java/org/jboss/eap/qe/micrometer/MicrometerOtelIntegrationTestCase.java
+++ b/micrometer/src/test/java/org/jboss/eap/qe/micrometer/MicrometerOtelIntegrationTestCase.java
@@ -21,6 +21,7 @@
import org.jboss.eap.qe.microprofile.tooling.server.configuration.deployment.ConfigurationUtil;
import org.jboss.eap.qe.observability.containers.OpenTelemetryCollectorContainer;
import org.jboss.eap.qe.observability.prometheus.model.PrometheusMetric;
+import org.jboss.eap.qe.observability.server.configuration.micrometer.MicrometerServerConfiguration;
import org.jboss.eap.qe.ts.common.docker.Docker;
import org.jboss.eap.qe.ts.common.docker.junit.DockerRequiredTests;
import org.jboss.shrinkwrap.api.Archive;
@@ -40,7 +41,7 @@
* Tests that metrics can be pushed to the OpenTelemetry collector by Micrometer, and then exported to Jaeger.
* This class is based on the similar one in WildFly, although it uses a different {@code @ServerSetup} task class,
* i.e. {@link MicrometerServerSetup}, which provides the logic for executing the required configuration
- * (see {@link org.jboss.eap.qe.micrometer.util.MicrometerServerConfiguration}) within the Arquillian container.
+ * (see {@link MicrometerServerConfiguration}) within the Arquillian container.
*/
@RunWith(Arquillian.class)
@ServerSetup(MicrometerServerSetup.class) // Enables/Disables Micrometer extension/subsystem for Arquillian in-container tests
diff --git a/micrometer/src/test/java/org/jboss/eap/qe/micrometer/util/MicrometerServerSetup.java b/micrometer/src/test/java/org/jboss/eap/qe/micrometer/util/MicrometerServerSetup.java
index fc1aebab..cbc78aab 100644
--- a/micrometer/src/test/java/org/jboss/eap/qe/micrometer/util/MicrometerServerSetup.java
+++ b/micrometer/src/test/java/org/jboss/eap/qe/micrometer/util/MicrometerServerSetup.java
@@ -2,6 +2,7 @@
import org.jboss.eap.qe.microprofile.tooling.server.configuration.arquillian.MicroProfileServerSetupTask;
import org.jboss.eap.qe.observability.containers.OpenTelemetryCollectorContainer;
+import org.jboss.eap.qe.observability.server.configuration.micrometer.MicrometerServerConfiguration;
import org.jboss.eap.qe.ts.common.docker.Docker;
/**
@@ -20,6 +21,7 @@ public void setup() throws Exception {
}
// start the OTel collector container
otelCollector = OpenTelemetryCollectorContainer.getInstance();
+ otelCollector.start();
// and pass Micrometer the OTel collector endopint URL
MicrometerServerConfiguration.enableMicrometer(otelCollector.getOtlpHttpEndpoint());
}
@@ -29,5 +31,6 @@ public void tearDown() throws Exception {
MicrometerServerConfiguration.disableMicrometer();
// stop the OTel collector container
otelCollector.stop();
+ OpenTelemetryCollectorContainer.dispose();
}
}
diff --git a/microprofile-fault-tolerance/pom.xml b/microprofile-fault-tolerance/pom.xml
index b9608b4a..21877661 100644
--- a/microprofile-fault-tolerance/pom.xml
+++ b/microprofile-fault-tolerance/pom.xml
@@ -98,6 +98,11 @@
org.postgresql
postgresql
+
+ org.slf4j
+ slf4j-simple
+ test
+
diff --git a/microprofile-fault-tolerance/src/test/java/org/jboss/eap/qe/microprofile/fault/tolerance/UndeployDeployTest.java b/microprofile-fault-tolerance/src/test/java/org/jboss/eap/qe/microprofile/fault/tolerance/UndeployDeployTest.java
index 121b983a..7cb9d4b9 100644
--- a/microprofile-fault-tolerance/src/test/java/org/jboss/eap/qe/microprofile/fault/tolerance/UndeployDeployTest.java
+++ b/microprofile-fault-tolerance/src/test/java/org/jboss/eap/qe/microprofile/fault/tolerance/UndeployDeployTest.java
@@ -16,13 +16,14 @@
import org.jboss.arquillian.test.api.ArquillianResource;
import org.jboss.eap.qe.microprofile.fault.tolerance.deployments.v10.HelloService;
import org.jboss.eap.qe.microprofile.fault.tolerance.util.MicroProfileFaultToleranceServerConfiguration;
-import org.jboss.eap.qe.microprofile.fault.tolerance.util.MicroProfileTelemetryServerSetup;
+import org.jboss.eap.qe.observability.server.configuration.microprofile.telemetry.MicroProfileTelemetryServerConfiguration;
import org.jboss.eap.qe.microprofile.tooling.server.configuration.creaper.ManagementClientProvider;
import org.jboss.eap.qe.microprofile.tooling.server.configuration.deployment.ConfigurationUtil;
import org.jboss.eap.qe.microprofile.tooling.server.log.LogChecker;
import org.jboss.eap.qe.microprofile.tooling.server.log.ModelNodeLogChecker;
import org.jboss.eap.qe.observability.containers.OpenTelemetryCollectorContainer;
import org.jboss.eap.qe.observability.prometheus.model.PrometheusMetric;
+import org.jboss.eap.qe.observability.server.configuration.micrometer.MicrometerServerConfiguration;
import org.jboss.eap.qe.ts.common.docker.Docker;
import org.jboss.eap.qe.ts.common.docker.junit.DockerRequiredTests;
import org.jboss.shrinkwrap.api.Archive;
@@ -88,6 +89,9 @@ public static Archive> createNonMPFTDeployment() {
public static void setup() throws Exception {
// Enable FT
MicroProfileFaultToleranceServerConfiguration.enableFaultTolerance();
+ // And disable Micrometer for good measure since were going to test MicroProfile Fault Tolerance integration
+ // with MP Telemetry 2.0 metrics too.
+ MicrometerServerConfiguration.disableMicrometer();
}
/**
@@ -134,73 +138,90 @@ public void testFaultToleranceMetricsAreTracedWithSameDeployments(
}
// start the OTel collector container
otelCollector = OpenTelemetryCollectorContainer.getInstance();
- // Enable MP Telemetry based metrics, which rely on OpenTelemetry subsystem
- MicroProfileTelemetryServerSetup.enableOpenTelemetry();
- MicroProfileTelemetryServerSetup.addOpenTelemetryCollectorConfiguration(otelCollector.getOtlpGrpcEndpoint());
- MicroProfileTelemetryServerSetup.enableMicroProfileTelemetry();
- // manually deploy our deployments
- deployer.deploy(FIRST_DEPLOYMENT);
- deployer.deploy(SECOND_DEPLOYMENT);
- get(firstDeploymentUlr + "?operation=timeout&context=foobar&fail=true").then()
- .assertThat()
- .body(containsString("Fallback Hello, context = foobar"));
- // timeout is not working because 2nd deployment has disabled it
- get(secondDeploymentUlr + "?operation=timeout&context=foobar&fail=true").then()
- .assertThat()
- .body(containsString("Hello from @Timeout method, context = foobar"));
- // fetch the collected metrics in prometheus format
- List metricsToTest = Arrays.asList(
- "ft_timeout_calls_total",
- "ft_invocations_total");
- // give it some time to actually be able and report some metrics via the Pmetheus URL
- Thread.sleep(5_000);
- List metrics = OpenTelemetryCollectorContainer.getInstance().fetchMetrics("");
- // assert
- metricsToTest.forEach(n -> Assert.assertTrue("Missing metric: " + n,
- metrics.stream().anyMatch(m -> m.getKey().startsWith(n))));
+ try {
+ otelCollector.start();
+ try {
+ // Enable MP Telemetry based metrics, which rely on OpenTelemetry subsystem
+ MicroProfileTelemetryServerConfiguration.enableOpenTelemetry();
+ MicroProfileTelemetryServerConfiguration
+ .addOpenTelemetryCollectorConfiguration(otelCollector.getOtlpGrpcEndpoint());
+ MicroProfileTelemetryServerConfiguration.enableMicroProfileTelemetry();
+ try {
+ // manually deploy our deployments
+ deployer.deploy(FIRST_DEPLOYMENT);
+ deployer.deploy(SECOND_DEPLOYMENT);
+ try {
+ get(firstDeploymentUlr + "?operation=timeout&context=foobar&fail=true").then()
+ .assertThat()
+ .body(containsString("Fallback Hello, context = foobar"));
+ // timeout is not working because 2nd deployment has disabled it
+ get(secondDeploymentUlr + "?operation=timeout&context=foobar&fail=true").then()
+ .assertThat()
+ .body(containsString("Hello from @Timeout method, context = foobar"));
+ // fetch the collected metrics in prometheus format
+ List metricsToTest = Arrays.asList(
+ "ft_timeout_calls_total",
+ "ft_invocations_total");
+ // give it some time to actually be able and report some metrics via the Pmetheus URL
+ Thread.sleep(5_000);
+ List metrics = OpenTelemetryCollectorContainer.getInstance().fetchMetrics("");
+ // assert
+ metricsToTest.forEach(n -> Assert.assertTrue("Missing metric: " + n,
+ metrics.stream().anyMatch(m -> m.getKey().startsWith(n))));
- Assert.assertTrue("\"ft_timeout_calls_total\" not found or not expected",
- metrics.stream()
- .filter(m -> "ft_timeout_calls_total".equals(m.getKey()))
- .filter(m -> m.getTags().entrySet().stream().anyMatch(
- t -> "method".equals(t.getKey())
- && "org.jboss.eap.qe.microprofile.fault.tolerance.deployments.v10.HelloService.timeout"
- .equals(t.getValue()))
- && m.getTags().entrySet().stream().anyMatch(
- t -> "timedOut".equals(t.getKey()) && "true".equals(t.getValue())))
- .anyMatch(m -> "1".equals(m.getValue())));
- Assert.assertTrue("\"ft_invocations_total\" (fallback applied) not found or not expected",
- metrics.stream()
- .filter(m -> "ft_invocations_total".equals(m.getKey()))
- .filter(m -> m.getTags().entrySet().stream().anyMatch(
- t -> "fallback".equals(t.getKey()) && "applied".equals(t.getValue()))
- && m.getTags().entrySet().stream().anyMatch(
- t -> "method".equals(t.getKey())
- && "org.jboss.eap.qe.microprofile.fault.tolerance.deployments.v10.HelloService.timeout"
- .equals(t.getValue()))
- && m.getTags().entrySet().stream().anyMatch(
- t -> "result".equals(t.getKey()) && "valueReturned".equals(t.getValue())))
- .anyMatch(m -> "1".equals(m.getValue())));
- Assert.assertTrue("\"ft_invocations_total\" (fallback not applied) not found or not expected",
- metrics.stream()
- .filter(m -> "ft_invocations_total".equals(m.getKey()))
- .filter(m -> m.getTags().entrySet().stream().anyMatch(
- t -> "fallback".equals(t.getKey()) && "notApplied".equals(t.getValue()))
- && m.getTags().entrySet().stream().anyMatch(
- t -> "method".equals(t.getKey())
- && "org.jboss.eap.qe.microprofile.fault.tolerance.deployments.v10.HelloService.timeout"
- .equals(t.getValue()))
- && m.getTags().entrySet().stream().anyMatch(
- t -> "result".equals(t.getKey()) && "valueReturned".equals(t.getValue())))
- .anyMatch(m -> "1".equals(m.getValue())));
- // disable MP Telemetry based metrics
- MicroProfileTelemetryServerSetup.disableMicroProfileTelemetry();
- MicroProfileTelemetryServerSetup.disableOpenTelemetry();
- // stop the OTel collector container
- otelCollector.stop();
- // undeploy
- deployer.undeploy(FIRST_DEPLOYMENT);
- deployer.undeploy(SECOND_DEPLOYMENT);
+ Assert.assertTrue("\"ft_timeout_calls_total\" not found or not expected",
+ metrics.stream()
+ .filter(m -> "ft_timeout_calls_total".equals(m.getKey()))
+ .filter(m -> m.getTags().entrySet().stream().anyMatch(
+ t -> "method".equals(t.getKey())
+ && "org.jboss.eap.qe.microprofile.fault.tolerance.deployments.v10.HelloService.timeout"
+ .equals(t.getValue()))
+ && m.getTags().entrySet().stream().anyMatch(
+ t -> "timedOut".equals(t.getKey()) && "true".equals(t.getValue())))
+ .anyMatch(m -> "1".equals(m.getValue())));
+ Assert.assertTrue("\"ft_invocations_total\" (fallback applied) not found or not expected",
+ metrics.stream()
+ .filter(m -> "ft_invocations_total".equals(m.getKey()))
+ .filter(m -> m.getTags().entrySet().stream().anyMatch(
+ t -> "fallback".equals(t.getKey()) && "applied".equals(t.getValue()))
+ && m.getTags().entrySet().stream().anyMatch(
+ t -> "method".equals(t.getKey())
+ && "org.jboss.eap.qe.microprofile.fault.tolerance.deployments.v10.HelloService.timeout"
+ .equals(t.getValue()))
+ && m.getTags().entrySet().stream().anyMatch(
+ t -> "result".equals(t.getKey())
+ && "valueReturned".equals(t.getValue())))
+ .anyMatch(m -> "1".equals(m.getValue())));
+ Assert.assertTrue("\"ft_invocations_total\" (fallback not applied) not found or not expected",
+ metrics.stream()
+ .filter(m -> "ft_invocations_total".equals(m.getKey()))
+ .filter(m -> m.getTags().entrySet().stream().anyMatch(
+ t -> "fallback".equals(t.getKey()) && "notApplied".equals(t.getValue()))
+ && m.getTags().entrySet().stream().anyMatch(
+ t -> "method".equals(t.getKey())
+ && "org.jboss.eap.qe.microprofile.fault.tolerance.deployments.v10.HelloService.timeout"
+ .equals(t.getValue()))
+ && m.getTags().entrySet().stream().anyMatch(
+ t -> "result".equals(t.getKey())
+ && "valueReturned".equals(t.getValue())))
+ .anyMatch(m -> "1".equals(m.getValue())));
+ } finally {
+ // undeploy
+ deployer.undeploy(FIRST_DEPLOYMENT);
+ deployer.undeploy(SECOND_DEPLOYMENT);
+ }
+ } finally {
+ // disable MP Telemetry based metrics
+ MicroProfileTelemetryServerConfiguration.disableMicroProfileTelemetry();
+ MicroProfileTelemetryServerConfiguration.disableOpenTelemetry();
+ }
+ } finally {
+ // stop the OTel collector container
+ otelCollector.stop();
+ }
+ } finally {
+ OpenTelemetryCollectorContainer.dispose();
+ }
}
/**
diff --git a/microprofile-fault-tolerance/src/test/java/org/jboss/eap/qe/microprofile/fault/tolerance/integration/metrics/MultipleMetricsExtensionTest.java b/microprofile-fault-tolerance/src/test/java/org/jboss/eap/qe/microprofile/fault/tolerance/integration/metrics/MultipleMetricsExtensionTest.java
new file mode 100644
index 00000000..e41e7da5
--- /dev/null
+++ b/microprofile-fault-tolerance/src/test/java/org/jboss/eap/qe/microprofile/fault/tolerance/integration/metrics/MultipleMetricsExtensionTest.java
@@ -0,0 +1,402 @@
+package org.jboss.eap.qe.microprofile.fault.tolerance.integration.metrics;
+
+import static io.restassured.RestAssured.get;
+import static org.hamcrest.Matchers.containsString;
+
+import java.net.URL;
+import java.util.List;
+
+import org.apache.http.HttpStatus;
+import org.jboss.arquillian.container.test.api.Deployer;
+import org.jboss.arquillian.container.test.api.Deployment;
+import org.jboss.arquillian.container.test.api.OperateOnDeployment;
+import org.jboss.arquillian.container.test.api.RunAsClient;
+import org.jboss.arquillian.junit.Arquillian;
+import org.jboss.arquillian.junit.InSequence;
+import org.jboss.arquillian.test.api.ArquillianResource;
+import org.jboss.eap.qe.microprofile.fault.tolerance.deployments.v10.HelloService;
+import org.jboss.eap.qe.microprofile.fault.tolerance.util.MicroProfileFaultToleranceServerConfiguration;
+import org.jboss.eap.qe.observability.server.configuration.microprofile.telemetry.MicroProfileTelemetryServerConfiguration;
+import org.jboss.eap.qe.microprofile.tooling.server.configuration.deployment.ConfigurationUtil;
+import org.jboss.eap.qe.observability.containers.OpenTelemetryCollectorContainer;
+import org.jboss.eap.qe.observability.prometheus.model.PrometheusMetric;
+import org.jboss.eap.qe.observability.server.configuration.micrometer.MicrometerServerConfiguration;
+import org.jboss.eap.qe.ts.common.docker.Docker;
+import org.jboss.eap.qe.ts.common.docker.junit.DockerRequiredTests;
+import org.jboss.shrinkwrap.api.Archive;
+import org.jboss.shrinkwrap.api.ShrinkWrap;
+import org.jboss.shrinkwrap.api.asset.StringAsset;
+import org.jboss.shrinkwrap.api.spec.WebArchive;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+
+/**
+ * Verify the server behavior from the end user PoV, based on whether various extensions and subsystems that
+ * handle metrics generation and collection are available and enabled, or not.
+ *
+ * Uses a workflow that is implemented already in {@link org.jboss.eap.qe.microprofile.fault.tolerance.UndeployDeployTest},
+ * i.e. several deployments are defined, and used via manual deploy conveniently.
+ * In order to be able to inject the deployments URLs, though, Arquillian needs for the deployments to be deployed
+ * initially, which is the reason for the first test in the sequence. Deployments are undeployed after each test.
+ */
+@RunAsClient
+@RunWith(Arquillian.class)
+@Category(DockerRequiredTests.class)
+public class MultipleMetricsExtensionTest {
+
+ private static final String FAULT_TOLERANCE_DEPLOYMENT = "FTDeployment";
+ private static final String FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY = "FTDeploymentWithMPTelemetryEnabled";
+ private static final String FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY_DISABLING_MICROMETER = "FTDeploymentWithMPTelemetryEnabledButDisablingMicrometer";
+ private static final String TESTED_FT_METRIC = "ft_timeout_";
+ private static final int REQUEST_COUNT = 5;
+
+ @ArquillianResource
+ private Deployer deployer;
+
+ @BeforeClass
+ public static void setup() throws Exception {
+ // we need a Docker container for The OTel collector here, so throw an exception if a docker service is not available
+ try {
+ Docker.checkDockerPresent();
+ } catch (Exception e) {
+ throw new IllegalStateException("Cannot verify Docker availability: " + e.getMessage());
+ }
+ // Enable FT
+ MicroProfileFaultToleranceServerConfiguration.enableFaultTolerance();
+ }
+
+ /**
+ * A deployment containing MP Fault Tolerance operations that generate metrics
+ *
+ * @return A ShrinkWrap {@link Archive> containing a deployment that contains MP Fault Tolerance operations
+ */
+ @Deployment(name = FAULT_TOLERANCE_DEPLOYMENT, managed = false)
+ public static Archive> deployment() {
+ String mpConfig = "Timeout/enabled=true";
+ return ShrinkWrap.create(WebArchive.class, FAULT_TOLERANCE_DEPLOYMENT + ".war")
+ .addPackages(true, HelloService.class.getPackage())
+ .addAsManifestResource(ConfigurationUtil.BEANS_XML_FILE_LOCATION, "beans.xml")
+ .addAsManifestResource(new StringAsset(mpConfig), "microprofile-config.properties");
+ }
+
+ /**
+ * A deployment that enables MP Telemetry via the {@code otel.sdk.disabled=false} MP Config property.
+ *
+ * @return A ShrinkWrap {@link Archive> containing a deployment that enables MP Telemetry
+ */
+ @Deployment(name = FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY, managed = false)
+ public static Archive> deploymentWithMPTelemetryEnabled() {
+ String mpConfig = "otel.service.name=" + FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY
+ + "\notel.sdk.disabled=false\nTimeout/enabled=true";
+ return ShrinkWrap.create(WebArchive.class, FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY + ".war")
+ .addPackages(true, HelloService.class.getPackage())
+ .addAsManifestResource(ConfigurationUtil.BEANS_XML_FILE_LOCATION, "beans.xml")
+ .addAsManifestResource(new StringAsset(mpConfig), "microprofile-config.properties");
+ }
+
+ /**
+ * A deployment that enables MP Telemetry via the {@code otel.sdk.disabled=false} MP Config property, and
+ * disabling MP Fault Tolerance metrics collection using Micrometer via the
+ * {@code smallrye.faulttolerance.micrometer.disabled}
+ * MP Config property.
+ *
+ * @return A ShrinkWrap {@link Archive> containing a deployment that enables MP Telemetry
+ */
+ @Deployment(name = FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY_DISABLING_MICROMETER, managed = false)
+ public static Archive> deploymentWithMPTelemetryEnabledDisablingMicrometer() {
+ String mpConfig = "otel.service.name=" + FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY_DISABLING_MICROMETER
+ + "\notel.sdk.disabled=false\nTimeout/enabled=true\nsmallrye.faulttolerance.micrometer.disabled=true";
+ return ShrinkWrap.create(WebArchive.class, FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY_DISABLING_MICROMETER + ".war")
+ .addPackages(true, HelloService.class.getPackage())
+ .addAsManifestResource(ConfigurationUtil.BEANS_XML_FILE_LOCATION, "beans.xml")
+ .addAsManifestResource(new StringAsset(mpConfig), "microprofile-config.properties");
+ }
+
+ /**
+ * Deploy all deployments, so it's possible to get their URLs in other tests. This is limitation of Arquillian
+ * when deployments are deployed manually (in tests) and thus their URL is not known in time when test is started.
+ * The only way how to workaround this issue is to deploy all deployments in first test which allows to inject
+ * deployment URL as params in other test methods.
+ */
+ @Test
+ @InSequence(0)
+ public void deployAll() {
+ deployer.deploy(FAULT_TOLERANCE_DEPLOYMENT);
+ deployer.deploy(FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY);
+ deployer.deploy(FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY_DISABLING_MICROMETER);
+
+ deployer.undeploy(FAULT_TOLERANCE_DEPLOYMENT);
+ deployer.undeploy(FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY);
+ deployer.undeploy(FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY_DISABLING_MICROMETER);
+ }
+
+ /**
+ * Test to verify that no MicroProfile FaultTolerance related metrics are visible via the OTel collector Prometheus
+ * endpoint, when both the Micrometer and MicroProfile Telemetry extensions are not available.
+ *
+ * @param deploymentUrl Base {@link URL} of the app deployment
+ * @throws Exception When something fails during server configuration
+ */
+ @Test
+ @InSequence(10)
+ public void noMetricsAreCollectedWhenMetricsExtensionsAreNotAvailable(
+ @ArquillianResource @OperateOnDeployment(FAULT_TOLERANCE_DEPLOYMENT) URL deploymentUrl) throws Exception {
+ // Remove the MP Telemetry extension
+ MicroProfileTelemetryServerConfiguration.disableMicroProfileTelemetry();
+ // And be sure Micrometer is not available as well
+ MicrometerServerConfiguration.disableMicrometer();
+ // start the OTel collector container
+ OpenTelemetryCollectorContainer otelCollector = OpenTelemetryCollectorContainer.getNewInstance();
+ otelCollector.start();
+ try {
+ // deploy
+ deployer.deploy(FAULT_TOLERANCE_DEPLOYMENT);
+ try {
+ // call the app operation for some times
+ for (int i = 0; i < REQUEST_COUNT; i++) {
+ get(deploymentUrl + "?operation=timeout&context=foobar&fail=true").then()
+ .assertThat()
+ .statusCode(HttpStatus.SC_OK)
+ .body(containsString("Fallback Hello, context = foobar"));
+ }
+ // give it some time to actually be able and report some metrics via the Pmetheus URL
+ Thread.sleep(5_000);
+ // fetch the collected metrics in prometheus format
+ List metrics = otelCollector.fetchMetrics(TESTED_FT_METRIC);
+ Assert.assertTrue(
+ TESTED_FT_METRIC + " metric found, which is not expected when both MP Telemetry and Micrometer "
+ +
+ "extensions are not available.",
+ metrics.stream().noneMatch(m -> m.getKey().startsWith(TESTED_FT_METRIC)));
+ } finally {
+ deployer.undeploy(FAULT_TOLERANCE_DEPLOYMENT);
+ }
+ } finally {
+ otelCollector.stop();
+ }
+ }
+
+ /**
+ * Test to verify that MP Fault Tolerance metrics are collected when Micrometer alone is available and configured
+ *
+ * @param deploymentUrl Base {@link URL} of the app deployment
+ * @throws Exception When something fails during server configuration
+ */
+ @Test
+ @InSequence(10)
+ public void metricsAreCollectedWhenOnlyMicrometerExtensionIsAvailable(
+ @ArquillianResource @OperateOnDeployment(FAULT_TOLERANCE_DEPLOYMENT) URL deploymentUrl) throws Exception {
+ // Remove the MP Telemetry extension
+ MicroProfileTelemetryServerConfiguration.disableMicroProfileTelemetry();
+ // start the OTel collector container
+ OpenTelemetryCollectorContainer otelCollector = OpenTelemetryCollectorContainer.getNewInstance();
+ otelCollector.start();
+ try {
+ // And be sure Micrometer is available instead
+ MicrometerServerConfiguration.enableMicrometer(otelCollector.getOtlpHttpEndpoint());
+ try {
+ // deploy
+ deployer.deploy(FAULT_TOLERANCE_DEPLOYMENT);
+ try {
+ // call the app operation for some times
+ for (int i = 0; i < REQUEST_COUNT; i++) {
+ get(deploymentUrl + "?operation=timeout&context=foobar&fail=true").then()
+ .assertThat()
+ .statusCode(HttpStatus.SC_OK)
+ .body(containsString("Fallback Hello, context = foobar"));
+ }
+ // give it some time to actually be able and report some metrics via the Pmetheus URL
+ Thread.sleep(5_000);
+ // fetch the collected metrics in prometheus format
+ List metrics = otelCollector.fetchMetrics(TESTED_FT_METRIC);
+ Assert.assertFalse(
+ TESTED_FT_METRIC
+ + " metrics not found, which is not expected when the Micrometer extension is available, "
+ +
+ "i.e. FT metrics should be collected.",
+ metrics.isEmpty());
+ } finally {
+ deployer.undeploy(FAULT_TOLERANCE_DEPLOYMENT);
+ }
+ } finally {
+ MicrometerServerConfiguration.disableMicrometer();
+ }
+ } finally {
+ otelCollector.stop();
+ }
+ }
+
+ /**
+ * Test to verify that MP Fault Tolerance metrics are NOT collected when MicroProfile Telemetry alone is available
+ * but not enabled, i.e. via the {@code otel.sdk.disabled=false} MP Config property
+ *
+ * @param deploymentUrl Base {@link URL} of the app deployment
+ * @throws Exception When something fails during server configuration
+ */
+ @Test
+ @InSequence(10)
+ public void noMetricsAreCollectedWhenOnlyMPTelemetryIsAvailableButNotEnabled(
+ @ArquillianResource @OperateOnDeployment(FAULT_TOLERANCE_DEPLOYMENT) URL deploymentUrl) throws Exception {
+ // Remove the Micrometer extension...
+ MicrometerServerConfiguration.disableMicrometer();
+ // ... but make the MicroProfile Telemetry extension available
+ MicroProfileTelemetryServerConfiguration.enableMicroProfileTelemetry();
+ try {
+ // start the OTel collector container
+ OpenTelemetryCollectorContainer otelCollector = OpenTelemetryCollectorContainer.getNewInstance();
+ otelCollector.start();
+ try {
+ // deploy an app that DOES NOT enable MP Telemetry
+ deployer.deploy(FAULT_TOLERANCE_DEPLOYMENT);
+ try {
+ // call the app operation for some times
+ for (int i = 0; i < REQUEST_COUNT; i++) {
+ get(deploymentUrl + "?operation=timeout&context=foobar&fail=true").then()
+ .assertThat()
+ .statusCode(HttpStatus.SC_OK)
+ .body(containsString("Fallback Hello, context = foobar"));
+ }
+ // give it some time to actually be able and report some metrics via the Pmetheus URL
+ Thread.sleep(5_000);
+ // fetch the collected metrics in prometheus format
+ List metrics = otelCollector.fetchMetrics(TESTED_FT_METRIC);
+ Assert.assertTrue(
+ TESTED_FT_METRIC + " metrics found, which is not expected when only the MP Telemetry extension "
+ +
+ "is available but NOT enabled at application level.",
+ metrics.stream().noneMatch(m -> m.getKey().startsWith(TESTED_FT_METRIC)));
+ } finally {
+ deployer.undeploy(FAULT_TOLERANCE_DEPLOYMENT);
+ }
+ } finally {
+ otelCollector.stop();
+ }
+ } finally {
+ MicroProfileTelemetryServerConfiguration.disableMicroProfileTelemetry();
+ }
+ }
+
+ /**
+ * Test to verify that MP Fault Tolerance metrics are collected when MicroProfile Telemetry alone is available
+ * AND enabled, i.e. via the {@code otel.sdk.disabled=false} MP Config property
+ *
+ * @param deploymentUrl Base {@link URL} of the app deployment
+ * @throws Exception When something fails during server configuration
+ */
+ @Test
+ @InSequence(10)
+ public void metricsAreCollectedWhenOnlyMPTelemetryExtensionIsAvailableAndEnabled(
+ @ArquillianResource @OperateOnDeployment(FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY) URL deploymentUrl)
+ throws Exception {
+ // Remove the Micrometer extension
+ MicrometerServerConfiguration.disableMicrometer();
+ // Make the MP Telemetry extension available
+ MicroProfileTelemetryServerConfiguration.enableMicroProfileTelemetry();
+ try {
+ // start the OTel collector container
+ OpenTelemetryCollectorContainer otelCollector = OpenTelemetryCollectorContainer.getNewInstance();
+ otelCollector.start();
+ try {
+ // deploy an app that enables MP Telemetry
+ deployer.deploy(FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY);
+ try {
+ // call the app operation for some times
+ for (int i = 0; i < REQUEST_COUNT; i++) {
+ get(deploymentUrl + "?operation=timeout&context=foobar&fail=true").then()
+ .assertThat()
+ .statusCode(HttpStatus.SC_OK)
+ .body(containsString("Fallback Hello, context = foobar"));
+ }
+ // give it some time to actually be able and report some metrics via the Pmetheus URL
+ Thread.sleep(5_000);
+ // fetch the collected metrics in prometheus format
+ List metrics = otelCollector.fetchMetrics(TESTED_FT_METRIC);
+ Assert.assertFalse(
+ TESTED_FT_METRIC
+ + " metrics not found, which is not expected when the MP Telemetry extension is available "
+ +
+ "and enabled at the application level, i.e. FT metrics should be collected.",
+ metrics.isEmpty());
+ } finally {
+ deployer.undeploy(FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY);
+ }
+ } finally {
+ otelCollector.stop();
+ }
+ } finally {
+ MicroProfileTelemetryServerConfiguration.disableMicroProfileTelemetry();
+ }
+ }
+
+ /**
+ * Test to verify that MP Fault Tolerance metrics are collected uniquely when both MicroProfile Telemetry is
+ * and Micrometer subsystems are available, but MP Telemetry is enabled, i.e. via the {@code otel.sdk.disabled=false}
+ * MP Config property, while Micrometer is not (via the {@code smallrye.faulttolerance.micrometer.disabled=true}
+ * MP Config property).
+ *
+ * @param deploymentUrl Base {@link URL} of the app deployment
+ * @throws Exception When something fails during server configuration
+ */
+ @Test
+ @InSequence(10)
+ public void metricsAreCollectedWhenBothExtensionsAreAvailableAndOnlyMPTelIsEnabled(
+ @ArquillianResource @OperateOnDeployment(FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY_DISABLING_MICROMETER) URL deploymentUrl)
+ throws Exception {
+ // Make the MP Telemetry extension available
+ MicroProfileTelemetryServerConfiguration.enableMicroProfileTelemetry();
+ try {
+ // start the OTel collector container
+ OpenTelemetryCollectorContainer otelCollector = OpenTelemetryCollectorContainer.getNewInstance();
+ otelCollector.start();
+ try {
+ // And be sure Micrometer is available too
+ MicrometerServerConfiguration.enableMicrometer(otelCollector.getOtlpHttpEndpoint());
+ try {
+ // deploy an app that enables MP Telemetry, and disables the Micrometer Fault Tolerance metrics collection
+ // instead, see
+ deployer.deploy(FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY_DISABLING_MICROMETER);
+ try {
+ // call the app operation for some times
+ for (int i = 0; i < REQUEST_COUNT; i++) {
+ get(deploymentUrl + "?operation=timeout&context=foobar&fail=true").then()
+ .assertThat()
+ .statusCode(HttpStatus.SC_OK)
+ .body(containsString("Fallback Hello, context = foobar"));
+ }
+ // give it some time to actually be able and report some metrics via the Pmetheus URL
+ Thread.sleep(5_000);
+ // fetch the collected metrics in prometheus format
+ List metrics = otelCollector.fetchMetrics(TESTED_FT_METRIC);
+ Assert.assertFalse(
+ TESTED_FT_METRIC
+ + " metrics not found, which is not expected when the Micrometer extension is available, i.e. FT metrics should be collected.",
+ metrics.isEmpty());
+ Assert.assertEquals(
+ "Duplicate metrics were found, which is not expected when both the Micrometer and MP Telemetry extension "
+ +
+ "are available but the deployment is explicitly enabling MP Telemetry while disabling Micrometer metrics collection instead.",
+ metrics.size(), metrics.stream().distinct().count());
+ } finally {
+ deployer.undeploy(FAULT_TOLERANCE_DEPOYMENT_WITH_MP_TELEMETRY_DISABLING_MICROMETER);
+ }
+ } finally {
+ MicrometerServerConfiguration.disableMicrometer();
+ }
+ } finally {
+ otelCollector.stop();
+ }
+ } finally {
+ MicroProfileTelemetryServerConfiguration.disableMicroProfileTelemetry();
+ }
+ }
+
+ @AfterClass
+ public static void tearDown() throws Exception {
+ // disable FT
+ MicroProfileFaultToleranceServerConfiguration.disableFaultTolerance();
+ }
+}
diff --git a/pom.xml b/pom.xml
index 172981bf..083d8e6f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -92,6 +92,8 @@
+ 2.0.16
+
--add-exports=java.desktop/sun.awt=ALL-UNNAMED --add-exports=java.naming/com.sun.jndi.ldap=ALL-UNNAMED --add-opens=java.base/java.lang=ALL-UNNAMED --add-opens=java.base/java.lang.invoke=ALL-UNNAMED --add-opens=java.base/java.lang.reflect=ALL-UNNAMED --add-opens=java.base/java.io=ALL-UNNAMED --add-opens=java.base/java.security=ALL-UNNAMED --add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/java.util.concurrent=ALL-UNNAMED --add-opens=java.management/javax.management=ALL-UNNAMED --add-opens=java.naming/javax.naming=ALL-UNNAMED
--add-exports=java.desktop/sun.awt=ALL-UNNAMED --add-opens=java.base/java.lang=ALL-UNNAMED --add-opens=java.base/java.lang.invoke=ALL-UNNAMED --add-opens=java.base/java.lang.reflect=ALL-UNNAMED --add-opens=java.base/java.io=ALL-UNNAMED --add-opens=java.base/java.security=ALL-UNNAMED --add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/java.util.concurrent=ALL-UNNAMED --add-opens=java.management/javax.management=ALL-UNNAMED --add-opens=java.naming/javax.naming=ALL-UNNAMED
@@ -315,6 +317,16 @@
${version.org.jboss.threads.jboss-threads}
test
+
+ org.slf4j
+ slf4j-api
+ ${version.slf4j}
+
+
+ org.slf4j
+ slf4j-simple
+ ${version.slf4j}
+
diff --git a/tooling-observability/pom.xml b/tooling-observability/pom.xml
index 8b3909ee..5ed8dc8e 100644
--- a/tooling-observability/pom.xml
+++ b/tooling-observability/pom.xml
@@ -21,6 +21,14 @@
org.jboss.eap.qe
tooling-docker
+
+ org.wildfly.extras.creaper
+ creaper-core
+
+
+ org.jboss.eap.qe
+ tooling-server-configuration
+
\ No newline at end of file
diff --git a/tooling-observability/src/main/java/org/jboss/eap/qe/observability/containers/OpenTelemetryCollectorContainer.java b/tooling-observability/src/main/java/org/jboss/eap/qe/observability/containers/OpenTelemetryCollectorContainer.java
index 26ba86ee..18dba34c 100644
--- a/tooling-observability/src/main/java/org/jboss/eap/qe/observability/containers/OpenTelemetryCollectorContainer.java
+++ b/tooling-observability/src/main/java/org/jboss/eap/qe/observability/containers/OpenTelemetryCollectorContainer.java
@@ -27,7 +27,14 @@
/**
* Inspired by the similar class in Wildfly, this is an implementation of an OTel collector which uses the TS Docker
- * APIs, instead ogf the Testcontainers based tooling which is available in WildFly
+ * APIs, instead of the Testcontainers based tooling which is available in WildFly.
+ *
+ * Both a singleton or a managed instance can be obtained by {@link OpenTelemetryCollectorContainer#getInstance()} or
+ * {@link OpenTelemetryCollectorContainer#getNewInstance()} methods respectively, and lifecycle methods (e.g.:
+ * {@link OpenTelemetryCollectorContainer#start()}, {@link OpenTelemetryCollectorContainer#stop()},
+ * {@link OpenTelemetryCollectorContainer#dispose()} etc. must be used accordingly.
+ *
+ * Instances cannot be used simultaneously, since ports and general config are unique.
*/
public class OpenTelemetryCollectorContainer {
private static OpenTelemetryCollectorContainer INSTANCE = null;
@@ -53,7 +60,7 @@ public class OpenTelemetryCollectorContainer {
private String otlpGrpcEndpoint;
private String otlpHttpEndpoint;
private String prometheusUrl;
- private final Docker otelCollector;
+ private final Docker otelCollectorContainer;
private String getLocalOtelCollectorConfigYamlAbsolutePath() {
File tempFile = null;
@@ -79,7 +86,7 @@ private String getLocalOtelCollectorConfigYamlAbsolutePath() {
}
private OpenTelemetryCollectorContainer() {
- otelCollector = new Docker.Builder("otel-collector",
+ otelCollectorContainer = new Docker.Builder("otel-collector",
"ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.103.1")
.setContainerReadyCondition(() -> {
try {
@@ -103,16 +110,38 @@ private OpenTelemetryCollectorContainer() {
public static synchronized OpenTelemetryCollectorContainer getInstance() {
if (INSTANCE == null) {
- jaegerContainer = JaegerContainer.getInstance();
INSTANCE = new OpenTelemetryCollectorContainer();
- INSTANCE.start();
}
return INSTANCE;
}
+ public static synchronized OpenTelemetryCollectorContainer getInstance(JaegerContainer jaegerBackendContainer) {
+ if (INSTANCE == null) {
+ jaegerContainer = jaegerBackendContainer;
+ INSTANCE = new OpenTelemetryCollectorContainer();
+ }
+ return INSTANCE;
+ }
+
+ public static synchronized OpenTelemetryCollectorContainer getNewInstance() {
+ return new OpenTelemetryCollectorContainer();
+ }
+
+ public static synchronized OpenTelemetryCollectorContainer getNewInstance(JaegerContainer jaegerBackendContainer) {
+ OpenTelemetryCollectorContainer newInstance = new OpenTelemetryCollectorContainer();
+ jaegerContainer = jaegerBackendContainer;
+ return newInstance;
+ }
+
+ public static synchronized void dispose() {
+ if (INSTANCE != null) {
+ INSTANCE = null;
+ }
+ }
+
public void start() {
try {
- otelCollector.start();
+ otelCollectorContainer.start();
} catch (Exception e) {
throw new IllegalStateException("Starting the OTel container failed: " + e);
}
@@ -122,13 +151,8 @@ public void start() {
}
public synchronized void stop() {
- if (jaegerContainer != null) {
- jaegerContainer.stop();
- jaegerContainer = null;
- }
- INSTANCE = null;
try {
- otelCollector.stop();
+ otelCollectorContainer.stop();
} catch (Exception e) {
throw new IllegalStateException("Stopping the OTel container failed: " + e);
}
@@ -149,23 +173,23 @@ public String getPrometheusUrl() {
public List fetchMetrics(String nameToMonitor) throws InterruptedException {
String body = "";
try (Client client = ClientBuilder.newClient()) {
- WebTarget target = client.target(OpenTelemetryCollectorContainer.getInstance().getPrometheusUrl());
+ WebTarget target = client.target(this.getPrometheusUrl());
int attemptCount = 0;
boolean found = false;
// Request counts can vary. Setting high to help ensure test stability
while (!found && attemptCount < 30) {
- // Wait to give Micrometer time to export
+ // Wait to give metrics systems time to export
Thread.sleep(1000);
body = target.request().get().readEntity(String.class);
- found = body.contains(nameToMonitor);
+ found = body.contains("\n" + nameToMonitor);
attemptCount++;
}
}
- return buildPrometheusMetrics(body);
+ return body.isEmpty() ? List.of() : buildPrometheusMetrics(body);
}
public List getTraces(String serviceName) throws InterruptedException {
diff --git a/tooling-observability/src/main/java/org/jboss/eap/qe/observability/server/configuration/micrometer/MicrometerServerConfiguration.java b/tooling-observability/src/main/java/org/jboss/eap/qe/observability/server/configuration/micrometer/MicrometerServerConfiguration.java
index 644a00d2..61966167 100644
--- a/tooling-observability/src/main/java/org/jboss/eap/qe/observability/server/configuration/micrometer/MicrometerServerConfiguration.java
+++ b/tooling-observability/src/main/java/org/jboss/eap/qe/observability/server/configuration/micrometer/MicrometerServerConfiguration.java
@@ -1,4 +1,4 @@
-package org.jboss.eap.qe.micrometer.util;
+package org.jboss.eap.qe.observability.server.configuration.micrometer;
import org.jboss.eap.qe.microprofile.tooling.server.configuration.creaper.ManagementClientProvider;
import org.wildfly.extras.creaper.core.online.OnlineManagementClient;
diff --git a/microprofile-fault-tolerance/src/test/java/org/jboss/eap/qe/microprofile/fault/tolerance/util/MicroProfileTelemetryServerSetup.java b/tooling-observability/src/main/java/org/jboss/eap/qe/observability/server/configuration/microprofile/telemetry/MicroProfileTelemetryServerConfiguration.java
similarity index 97%
rename from microprofile-fault-tolerance/src/test/java/org/jboss/eap/qe/microprofile/fault/tolerance/util/MicroProfileTelemetryServerSetup.java
rename to tooling-observability/src/main/java/org/jboss/eap/qe/observability/server/configuration/microprofile/telemetry/MicroProfileTelemetryServerConfiguration.java
index fcee3693..ec6cf167 100644
--- a/microprofile-fault-tolerance/src/test/java/org/jboss/eap/qe/microprofile/fault/tolerance/util/MicroProfileTelemetryServerSetup.java
+++ b/tooling-observability/src/main/java/org/jboss/eap/qe/observability/server/configuration/microprofile/telemetry/MicroProfileTelemetryServerConfiguration.java
@@ -1,4 +1,4 @@
-package org.jboss.eap.qe.microprofile.fault.tolerance.util;
+package org.jboss.eap.qe.observability.server.configuration.microprofile.telemetry;
import org.jboss.eap.qe.microprofile.tooling.server.configuration.creaper.ManagementClientProvider;
import org.wildfly.extras.creaper.core.online.OnlineManagementClient;
@@ -9,7 +9,7 @@
/**
* Operations required to set up the server for MicroProfile Telemetry
*/
-public class MicroProfileTelemetryServerSetup {
+public class MicroProfileTelemetryServerConfiguration {
private static final Address OPENTELEMETRY_EXTENSION_ADDRESS = Address
.extension("org.wildfly.extension.opentelemetry");
private static final Address OPENTELEMETRY_SUBSYSTEM_ADDRESS = Address
@@ -163,10 +163,10 @@ public static void enableMicroProfileTelemetry() throws Exception {
*/
public static void enableMicroProfileTelemetry(OnlineManagementClient client) throws Exception {
Operations operations = new Operations(client);
- if (!openTelemetryExtensionExists(operations)) {
+ if (!microProfileTelemetryExtensionExists(operations)) {
operations.add(MICROPROFILE_TELEMETRY_EXTENSION_ADDRESS);
}
- if (!openTelemetrySubsystemExists(operations)) {
+ if (!microProfileTelemetrySubsystemExists(operations)) {
operations.add(MICROPROFILE_TELEMETRY_SUBSYSTEM_ADDRESS);
}
new Administration(client).reloadIfRequired();