diff --git a/README.md b/README.md index 55eeb8e..ed99998 100644 --- a/README.md +++ b/README.md @@ -102,11 +102,24 @@ Because the library contains Java and React script, to deploy it, the machine mu CI=false, else any warning will stop the construction -Docker image is then availabe in the package +Docker image is then available in the package `https://github.com/camunda-community-hub/zeebe-cherry-runtime/pkgs/container/zeebe-cherry-runtime` +# Build +The project is configured to publish automatically to maven central the JAR file, and to docker package a Docker image +## Maven Central repository + +See .github/workflows/mvn-release.yml + + +Visit +https://github.com/camunda-community-hub/community-action-maven-release/tree/main + + +Error +Error: Resource not accessible by integration diff --git a/docker-cherry/.env b/docker-cherry/.env new file mode 100644 index 0000000..2c52531 --- /dev/null +++ b/docker-cherry/.env @@ -0,0 +1,21 @@ +## Image versions ## +CAMUNDA_CONNECTORS_VERSION=0.19.1 +CAMUNDA_OPTIMIZE_VERSION=3.10.0 +CAMUNDA_PLATFORM_VERSION=8.2.4 +CAMUNDA_WEB_MODELER_VERSION=8.2.2 +ELASTIC_VERSION=7.17.9 +KEYCLOAK_SERVER_VERSION=19.0.3 +MAILPIT_VERSION=v1.5.4 +POSTGRES_VERSION=14.5-alpine +HOST=localhost + +## Configuration ## +# By default the zeebe api is public, when setting this to `identity` a valid zeebe client token is required +ZEEBE_AUTHENTICATION_MODE=none +ZEEBE_CLIENT_ID=zeebe +ZEEBE_CLIENT_SECRET=zecret + +# Set to 'true' to enable resource based authorizations for users and groups +# This can be used to limit access for users or groups to view/update specific +# processes and decisions in Operate and Tasklist +RESOURCE_AUTHORIZATIONS_ENABLED=false diff --git a/docker-cherry/README.md b/docker-cherry/README.md new file mode 100644 index 0000000..857c042 --- /dev/null +++ b/docker-cherry/README.md @@ -0,0 +1,36 @@ +# Docker compose + +This section shows different docker compose example. Feel free to copy and adapt them to your use case + +There is two flavor: +* the default one, using a H2 database +* the post + +## H2 database + +Start a complete cherry plus zeebe platform + +```shell +docker-compose -f docker-compose-core.yaml -f docker-compose-cherry.yaml up -d +``` + +To remove the different container created (use `stop` instead of `down` if you just want to stop and don't remove the container) + +```shell +docker-compose -f docker-compose-core.yaml -f docker-compose-cherry.yaml down -v +``` + + +## Postgres + +This example used a postgres database to store information (statistics, upload connectors) + +```shell +docker-compose -f docker-compose-core.yaml -f docker-compose-cherry-postgres.yaml up -d +``` + +To remove the different container created (use `stop` instead of `down` if you just want to stop and don't remove the container) + +```shell +docker-compose -f docker-compose-core.yaml -f docker-compose-cherry-postgres.yaml down -v +``` diff --git a/docker-cherry/connector-secrets.txt b/docker-cherry/connector-secrets.txt new file mode 100644 index 0000000..5b761a3 --- /dev/null +++ b/docker-cherry/connector-secrets.txt @@ -0,0 +1,2 @@ +# add secrets per line in the format NAME=VALUE +# WARNING: ensure not to commit changes to this file diff --git a/docker-cherry/docker-compose-alone.yml b/docker-cherry/docker-compose-alone.yml deleted file mode 100644 index 38c03df..0000000 --- a/docker-cherry/docker-compose-alone.yml +++ /dev/null @@ -1,20 +0,0 @@ -version: "3" -services: - - # Cherry got an error Caused by: org.springframework.beans.BeanInstantiationException: Failed to instantiate [io.camunda.zeebe.client.ZeebeClient]: - # Factory method 'zeebeClient' threw exception; nested exception is java.lang.NullPointerException: target - cherryruntime: - image: ghcr.io/camunda-community-hub/zeebe-cherry-runtime:latest - container_name: cherryruntime - ports: - - "9081:9081" - environment: - - ZEEBE_CLIENT_BROKER_GATEWAY_ADDRESS=host.docker.internal:26500 - - ZEEBE_CLIENT_SECURITY_PLAINTEXT=true - - ZEEBE_CLIENT_CLOUD_REGION= - - ZEEBE_CLIENT_CLOUD_CLUSTERID= - - ZEEBE_CLIENT_CLOUD_CLIENTID= - - ZEEBE_CLIENT_CLOUD_CLIENTSECRET= - - LOGGING_LEVEL_ROOT=INFO - - diff --git a/docker-cherry/docker-compose-postgres.yml b/docker-cherry/docker-compose-cherry-postgres.yaml similarity index 58% rename from docker-cherry/docker-compose-postgres.yml rename to docker-cherry/docker-compose-cherry-postgres.yaml index 5b92301..a8dd801 100644 --- a/docker-cherry/docker-compose-postgres.yml +++ b/docker-cherry/docker-compose-cherry-postgres.yaml @@ -1,27 +1,28 @@ version: "3" services: -# Attention, this configuration is not validated cherry-runtime: - image: docker pull ghcr.io/camunda-community-hub/zeebe-cherry-runtime:latest + image: ghcr.io/camunda-community-hub/zeebe-cherry-runtime:latest container_name: cherryruntime ports: - "9081:9081" environment: - - ZEEBE_CLIENT_BROKER_GATEWAY_ADDRESS=host.docker.internal:26500 + - ZEEBE_CLIENT_BROKER_GATEWAY_ADDRESS=zeebe:26500 - ZEEBE_CLIENT_SECURITY_PLAINTEXT=true - - ZEEBE_CLIENT_CLOUD_REGION= - - ZEEBE_CLIENT_CLOUD_CLUSTERID= - - ZEEBE_CLIENT_CLOUD_CLIENTID= - - ZEEBE_CLIENT_CLOUD_CLIENTSECRET= +# - ZEEBE_CLIENT_CLOUD_REGION= +# - ZEEBE_CLIENT_CLOUD_CLUSTERID= +# - ZEEBE_CLIENT_CLOUD_CLIENTID= +# - ZEEBE_CLIENT_CLOUD_CLIENTSECRET= - LOGGING_LEVEL_ROOT=INFO + - spring.profiles.active=postgres - spring.datasource.url=jdbc:postgresql://postgres/cherrydb - spring.datasource.username=camunda - spring.datasource.password=camundapassword - - spring.datasource.driver-class-name=org.postgresql.Driver - - spring.datasource.spring.jpa.database-platform=org.hibernate.dialect.PostgreSQL94Dialect - - spring.datasource.org.hibernate.dialect=org.hibernate.dialect.PostgreSQL94Dialect + + networks: + - camunda-platform depends_on: + - zeebe - postgres postgres: # https://hub.docker.com/_/postgres @@ -40,3 +41,8 @@ services: interval: 10s timeout: 5s retries: 5 + networks: + - camunda-platform + +networks: + camunda-platform: diff --git a/docker-cherry/docker-compose-cherry-zeebe.yml b/docker-cherry/docker-compose-cherry-zeebe.yml deleted file mode 100644 index 8b8dfff..0000000 --- a/docker-cherry/docker-compose-cherry-zeebe.yml +++ /dev/null @@ -1,83 +0,0 @@ -version: "3" -services: - - # Cherry got an error Caused by: org.springframework.beans.BeanInstantiationException: Failed to instantiate [io.camunda.zeebe.client.ZeebeClient]: - # Factory method 'zeebeClient' threw exception; nested exception is java.lang.NullPointerException: target - cherryruntime: - image: ghcr.io/camunda-community-hub/zeebe-cherry-runtime:latest - container_name: cherryruntime - ports: - - "9081:9081" - environment: - - ZEEBE_CLIENT_BROKER_GATEWAY_ADDRESS=zeebe:26500 - - ZEEBE_CLIENT_SECURITY_PLAINTEXT=true - - ZEEBE_CLIENT_CLOUD_REGION= - - ZEEBE_CLIENT_CLOUD_CLUSTERID= - - ZEEBE_CLIENT_CLOUD_CLIENTID= - - ZEEBE_CLIENT_CLOUD_CLIENTSECRET= - - LOGGING_LEVEL_ROOT=INFO - depends_on: - - zeebecherry - - zeebecherry: - image: camunda/zeebe:${CAMUNDA_CLOUD_VERSION:-8.2.4} - container_name: zeebecherry - environment: - - ZEEBE_LOG_LEVEL=debug - - ZEEBE_BROKER_CLUSTER_NODEID=0 - - ZEEBE_BROKER_CLUSTER_PARTITIONSCOUNT=2 - - ZEEBE_BROKER_CLUSTER_REPLICATIONFACTOR=3 - - ZEEBE_BROKER_CLUSTER_CLUSTERSIZE=3 - - ZEEBE_BROKER_CLUSTER_INITIALCONTACTPOINTS=zeebe:26502 - - ZEEBE_CLIENT_ID=clientId - - ZEEBE_CLIENT_SECRET=clientSecret - - ZEEBE_CLUSTER_ID=clusterId - - ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH_CLASSNAME=io.camunda.zeebe.exporter.ElasticsearchExporter - - ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH_ARGS_URL=http://elasticsearch:9200 - - ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH_ARGS_BULK_SIZE=1 - ports: - - 26500:26500 - depends_on: - - elasticsearchcherry - - operatecherry: - image: camunda/operate:${CAMUNDA_CLOUD_VERSION:-8.2.4} - container_name: operatecherry - environment: - - CAMUNDA_OPERATE_ZEEBE_GATEWAYADDRESS=zeebe:26500 - - CAMUNDA_OPERATE_ELASTICSEARCH_URL=http://elasticsearch:9200 - - CAMUNDA_OPERATE_ZEEBEELASTICSEARCH_URL=http://elasticsearch:9200 - ports: - - 8081:8080 - depends_on: - - elasticsearchcherry - - tasklistcherry: - image: camunda/tasklist:${CAMUNDA_CLOUD_VERSION:-8.2.4} - container_name: tasklistcherry - environment: - - CAMUNDA_TASKLIST_ZEEBE_GATEWAYADDRESS=zeebe:26500 - - CAMUNDA_TASKLIST_ELASTICSEARCH_URL=http://elasticsearch:9200 - - CAMUNDA_TASKLIST_ZEEBEELASTICSEARCH_URL=http://elasticsearch:9200 - ports: - - 8082:8080 - depends_on: - - elasticsearchcherry - - elasticsearchcherry: - image: docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION:-7.16.2} - container_name: elasticsearchcherry - environment: - - cluster.name=elasticsearch - - discovery.type=single-node - - bootstrap.memory_lock=true - - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - ulimits: - memlock: - soft: -1 - hard: -1 - - ports: - - 9200:9200 - - diff --git a/docker-cherry/docker-compose-cherry.yaml b/docker-cherry/docker-compose-cherry.yaml new file mode 100644 index 0000000..12c24dd --- /dev/null +++ b/docker-cherry/docker-compose-cherry.yaml @@ -0,0 +1,23 @@ +version: "3" +services: + + cherryruntime: + image: ghcr.io/camunda-community-hub/zeebe-cherry-runtime:latest + container_name: cherryruntime + ports: + - "9081:9081" + environment: + - ZEEBE_CLIENT_BROKER_GATEWAY_ADDRESS=zeebe:26500 + - ZEEBE_CLIENT_SECURITY_PLAINTEXT=true +# - ZEEBE_CLIENT_CLOUD_REGION= +# - ZEEBE_CLIENT_CLOUD_CLUSTERID= +# - ZEEBE_CLIENT_CLOUD_CLIENTID= +# - ZEEBE_CLIENT_CLOUD_CLIENTSECRET= + - LOGGING_LEVEL_ROOT=INFO + networks: + - camunda-platform + depends_on: + - zeebe + +networks: + camunda-platform: diff --git a/docker-cherry/docker-compose-core.yaml b/docker-cherry/docker-compose-core.yaml new file mode 100644 index 0000000..3e59821 --- /dev/null +++ b/docker-cherry/docker-compose-core.yaml @@ -0,0 +1,164 @@ +# While the Docker images themselves are supported for production usage, +# this docker-compose.yaml is designed to be used by developers to run +# an environment locally. It is not designed to be used in production. +# We recommend to use Kubernetes in production with our Helm Charts: +# https://docs.camunda.io/docs/self-managed/platform-deployment/kubernetes-helm/ +# For local development, we recommend using KIND instead of `docker-compose`: +# https://docs.camunda.io/docs/self-managed/platform-deployment/helm-kubernetes/guides/local-kubernetes-cluster/ + +# This is a lightweight configuration with Zeebe, Operate, Tasklist, and Elasticsearch +# See docker-compose.yml for a configuration that also includes Optimize, Identity, and Keycloak. + +services: + + zeebe: # https://docs.camunda.io/docs/self-managed/platform-deployment/docker/#zeebe + image: camunda/zeebe:${CAMUNDA_PLATFORM_VERSION} + container_name: zeebe + ports: + - "26500:26500" + - "9600:9600" + environment: # https://docs.camunda.io/docs/self-managed/zeebe-deployment/configuration/environment-variables/ + - ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH_CLASSNAME=io.camunda.zeebe.exporter.ElasticsearchExporter + - ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH_ARGS_URL=http://elasticsearch:9200 + # default is 1000, see here: https://github.com/camunda/zeebe/blob/main/exporters/elasticsearch-exporter/src/main/java/io/camunda/zeebe/exporter/ElasticsearchExporterConfiguration.java#L259 + - ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH_ARGS_BULK_SIZE=1 + # allow running with low disk space + - ZEEBE_BROKER_DATA_DISKUSAGECOMMANDWATERMARK=0.998 + - ZEEBE_BROKER_DATA_DISKUSAGEREPLICATIONWATERMARK=0.999 + - "JAVA_TOOL_OPTIONS=-Xms512m -Xmx512m" + restart: always + healthcheck: + test: [ "CMD-SHELL", "curl -f http://localhost:9600/ready" ] + interval: 30s + timeout: 5s + retries: 5 + start_period: 30s + volumes: + - zeebe:/usr/local/zeebe/data + networks: + - camunda-platform + depends_on: + - elasticsearch + + operate: # https://docs.camunda.io/docs/self-managed/platform-deployment/docker/#operate + image: camunda/operate:${CAMUNDA_PLATFORM_VERSION} + container_name: operate + ports: + - "8081:8080" + environment: # https://docs.camunda.io/docs/self-managed/operate-deployment/configuration/ + - CAMUNDA_OPERATE_ZEEBE_GATEWAYADDRESS=zeebe:26500 + - CAMUNDA_OPERATE_ELASTICSEARCH_URL=http://elasticsearch:9200 + - CAMUNDA_OPERATE_ZEEBEELASTICSEARCH_URL=http://elasticsearch:9200 + - management.endpoints.web.exposure.include=health + - management.endpoint.health.probes.enabled=true + healthcheck: + test: [ "CMD-SHELL", "curl -f http://localhost:8080/actuator/health/readiness" ] + interval: 30s + timeout: 1s + retries: 5 + start_period: 30s + networks: + - camunda-platform + depends_on: + - zeebe + - elasticsearch + + tasklist: # https://docs.camunda.io/docs/self-managed/platform-deployment/docker/#tasklist + image: camunda/tasklist:${CAMUNDA_PLATFORM_VERSION} + container_name: tasklist + ports: + - "8082:8080" + environment: # https://docs.camunda.io/docs/self-managed/tasklist-deployment/configuration/ + - CAMUNDA_TASKLIST_ZEEBE_GATEWAYADDRESS=zeebe:26500 + - CAMUNDA_TASKLIST_ELASTICSEARCH_URL=http://elasticsearch:9200 + - CAMUNDA_TASKLIST_ZEEBEELASTICSEARCH_URL=http://elasticsearch:9200 + - management.endpoints.web.exposure.include=health + - management.endpoint.health.probes.enabled=true + healthcheck: + test: [ "CMD-SHELL", "curl -f http://localhost:8080/actuator/health/readiness" ] + interval: 30s + timeout: 1s + retries: 5 + start_period: 30s + networks: + - camunda-platform + depends_on: + - zeebe + - elasticsearch + + connectors: # https://docs.camunda.io/docs/components/integration-framework/connectors/out-of-the-box-connectors/available-connectors-overview/ + image: camunda/connectors-bundle:${CAMUNDA_CONNECTORS_VERSION} + container_name: connectors + ports: + - "8085:8080" + environment: + - ZEEBE_CLIENT_BROKER_GATEWAY-ADDRESS=zeebe:26500 + - ZEEBE_CLIENT_SECURITY_PLAINTEXT=true + - OPERATE_CLIENT_ENABLED=true + - CAMUNDA_OPERATE_CLIENT_URL=http://operate:8080 + - CAMUNDA_OPERATE_CLIENT_USERNAME=demo + - CAMUNDA_OPERATE_CLIENT_PASSWORD=demo + - management.endpoints.web.exposure.include=health + - management.endpoint.health.probes.enabled=true + healthcheck: + test: [ "CMD-SHELL", "curl -f http://localhost:8080/actuator/health/readiness" ] + interval: 30s + timeout: 1s + retries: 5 + start_period: 30s + env_file: connector-secrets.txt + networks: + - camunda-platform + depends_on: + - zeebe + - operate + + elasticsearch: # https://hub.docker.com/_/elasticsearch + image: docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION} + container_name: elasticsearch + ports: + - "9200:9200" + - "9300:9300" + environment: + - bootstrap.memory_lock=true + - discovery.type=single-node + - xpack.security.enabled=false + # allow running with low disk space + - cluster.routing.allocation.disk.threshold_enabled=false + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + ulimits: + memlock: + soft: -1 + hard: -1 + restart: always + healthcheck: + test: [ "CMD-SHELL", "curl -f http://localhost:9200/_cat/health | grep -q green" ] + interval: 30s + timeout: 5s + retries: 3 + volumes: + - elastic:/usr/share/elasticsearch/data + networks: + - camunda-platform + + kibana: + image: docker.elastic.co/kibana/kibana:${ELASTIC_VERSION} + container_name: kibana + ports: + - 5601:5601 + volumes: + - kibana:/usr/share/kibana/data + networks: + - camunda-platform + depends_on: + - elasticsearch + profiles: + - kibana + +volumes: + zeebe: + elastic: + kibana: + +networks: + camunda-platform: diff --git a/src/main/frontend/src/dashboard/Dashboard.jsx b/src/main/frontend/src/dashboard/Dashboard.jsx index b64155e..5abf71c 100644 --- a/src/main/frontend/src/dashboard/Dashboard.jsx +++ b/src/main/frontend/src/dashboard/Dashboard.jsx @@ -31,8 +31,8 @@ class Dashboard extends React.Component { loading: true, orderBy: "nameAsc", period: "ONEDAY", - showActif: true, - showInactif: true, + showActive: true, + showInactive: true, showWorker: true, showConnector: true, showOnlyError: false, @@ -65,17 +65,17 @@ class Dashboard extends React.Component {
- -
+ Peak {this.state.runner.performance.peakTimeInMs} ms
diff --git a/src/main/frontend/src/parameter/Parameters.jsx b/src/main/frontend/src/parameter/Parameters.jsx index faa8a36..233694f 100644 --- a/src/main/frontend/src/parameter/Parameters.jsx +++ b/src/main/frontend/src/parameter/Parameters.jsx @@ -45,6 +45,7 @@ class Parameters extends React.Component { +
@@ -52,112 +53,172 @@ class Parameters extends React.Component {
-

Zeebe Connection

+
+
-
-
- -
-
+
+
Zeebe connection
+
-
-
-
- this.setParameterProperty("gatewayaddress", event.target.value)}/> -
-
-
-
- -
-
-
+
+
+ +
+
-
-
-
- this.setParameterProperty("cloudregion", event.target.value)}/> -
-
-
-
- this.setParameterProperty("cloudclusterid", event.target.value)}/> -
-
-
-
- this.setParameterProperty("cloudclientid", event.target.value)}/> +
+
+
+ this.setParameterProperty("gatewayaddress", event.target.value)}/> +
+
+
+
+ +
+
+
+ + +
+
+
+ this.setParameterProperty("cloudregion", event.target.value)}/> +
+
+
+
+ this.setParameterProperty("cloudclusterid", event.target.value)}/> +
+
+
+
+ this.setParameterProperty("cloudclientid", event.target.value)}/> +
+
+
+
+ this.setParameterProperty("cloudclientsecret", event.target.value)}/> +
+
+
+ +
-
-
- this.setParameterProperty("cloudclientsecret", event.target.value)}/> + + + +
+ +
+
Database
+
+
+
+ +
+
+
+
+ this.setParameterProperty("datasourceurl", event.target.value)}/> +
+
+
+
+ this.setParameterProperty("datasourceusername", event.target.value)}/> +
+
+
-

Workers

-
-
- this.setParameterProperty("maxjobsactive", event.target.value)}/> -
-
-
-
- this.setParameterProperty("nbthreads", event.target.value)}/> + + + +
+
+
+
Worker
+
+
+
+ this.setParameterProperty("maxjobsactive", event.target.value)}/> +
+
+
+
+ this.setParameterProperty("nbthreads", event.target.value)}/> +
+
+
+
+
) diff --git a/src/main/frontend/src/store/Store.jsx b/src/main/frontend/src/store/Store.jsx index 41cfa3d..4d4b4d3 100644 --- a/src/main/frontend/src/store/Store.jsx +++ b/src/main/frontend/src/store/Store.jsx @@ -58,7 +58,6 @@ class Store extends React.Component { Connector Release - Status Operation @@ -80,8 +79,7 @@ class Store extends React.Component { {(connectorStore.status === "NEW" || connectorStore.status === "OLD") && diff --git a/src/main/java/io/camunda/cherry/admin/AdminRestController.java b/src/main/java/io/camunda/cherry/admin/AdminRestController.java index 4a27588..82c6e3e 100644 --- a/src/main/java/io/camunda/cherry/admin/AdminRestController.java +++ b/src/main/java/io/camunda/cherry/admin/AdminRestController.java @@ -8,6 +8,7 @@ /* ******************************************************************** */ package io.camunda.cherry.admin; +import io.camunda.cherry.db.entity.JarStorageEntity; import io.camunda.cherry.definition.AbstractRunner; import io.camunda.cherry.runner.JobRunnerFactory; import io.camunda.cherry.runtime.HistoryFactory; @@ -21,6 +22,11 @@ import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; +import javax.sql.DataSource; +import java.sql.Connection; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneOffset; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -31,21 +37,31 @@ public class AdminRestController { Logger logger = LoggerFactory.getLogger(AdminRestController.class.getName()); - @Autowired - JobRunnerFactory cherryJobRunnerFactory; + private final JobRunnerFactory jobRunnerFactory; - @Autowired - HistoryFactory historyFactory; + private final HistoryFactory historyFactory; - @Autowired - ZeebeConfiguration zeebeConfiguration; + private final ZeebeConfiguration zeebeConfiguration; /** * Spring populate the list of all workers */ - @Autowired - private List listRunner; - + private final List listRunner; + + private final DataSource dataSource; + + AdminRestController( JobRunnerFactory jobRunnerFactory, + HistoryFactory historyFactory, + ZeebeConfiguration zeebeConfiguration, +List listRunner, + DataSource dataSource +) { +this.jobRunnerFactory =jobRunnerFactory; +this.historyFactory=historyFactory; + this.zeebeConfiguration=zeebeConfiguration; + this.listRunner = listRunner; + this.dataSource = dataSource; + } @GetMapping(value = "/api/ping", produces = "application/json") public Map ping() { Map parameters = new HashMap<>(); @@ -58,7 +74,8 @@ public Map getParameters() { Map parameters = new HashMap<>(); parameters.put("zeebekindconnection", zeebeConfiguration.isCloudConfiguration() ? "SAAS" : "GATEWAY"); parameters.put("gatewayaddress", zeebeConfiguration.getGatewayAddress()); - parameters.put("plaintext", zeebeConfiguration.isPlaintext()==null? null : zeebeConfiguration.isPlaintext().toString()); + parameters.put("plaintext", + zeebeConfiguration.isPlaintext() == null ? null : zeebeConfiguration.isPlaintext().toString()); parameters.put("cloudregion", zeebeConfiguration.getRegion()); parameters.put("cloudclusterid", zeebeConfiguration.getClusterId()); @@ -66,19 +83,26 @@ public Map getParameters() { parameters.put("cloudclientsecret", ""); // never send the client Secret // we don't want the configuration here, but the running information - parameters.put("maxjobsactive", cherryJobRunnerFactory.getMaxJobActive()); - parameters.put("nbthreads", cherryJobRunnerFactory.getNumberOfThreads()); + parameters.put("maxjobsactive", jobRunnerFactory.getMaxJobActive()); + parameters.put("nbthreads", jobRunnerFactory.getNumberOfThreads()); + + try(Connection con = dataSource.getConnection()) { + parameters.put("datasourceproductname",con.getMetaData().getDatabaseProductName()); + parameters.put("datasourceurl",con.getMetaData().getURL()); + parameters.put("datasourceusername",con.getMetaData().getUserName()); + + } catch(Exception e){} return parameters; } @GetMapping(value = "/api/runtime/threads", produces = "application/json") public Integer getNumberOfThreads() { - return cherryJobRunnerFactory.getNumberOfThreads(); + return jobRunnerFactory.getNumberOfThreads(); } @PutMapping(value = "/api/runtime/setthreads", produces = "application/json") public void setNumberOfThread(@RequestParam(name = "threads") Integer numberOfThreads) { - cherryJobRunnerFactory.setNumberOfThreads(numberOfThreads); + jobRunnerFactory.setNumberOfThreads(numberOfThreads); } } diff --git a/src/main/java/io/camunda/cherry/db/entity/OperationEntity.java b/src/main/java/io/camunda/cherry/db/entity/OperationEntity.java index d854522..4db481e 100644 --- a/src/main/java/io/camunda/cherry/db/entity/OperationEntity.java +++ b/src/main/java/io/camunda/cherry/db/entity/OperationEntity.java @@ -42,6 +42,6 @@ public class OperationEntity { private Long id; public enum Operation { - HOSTNAME, STARTRUNNER, STOPRUNNER, SETTHRESOLD, STOPRUNTIME, STARTRUNTIME, SERVERINFO, ERROR + HOSTNAME, STARTRUNNER, STOPRUNNER, SETTHRESOLD, STOPRUNTIME, STARTRUNTIME, SERVERINFO, ERROR, REMOVE } } diff --git a/src/main/java/io/camunda/cherry/db/entity/RunnerDefinitionEntity.java b/src/main/java/io/camunda/cherry/db/entity/RunnerDefinitionEntity.java index 703df24..3ae3c51 100644 --- a/src/main/java/io/camunda/cherry/db/entity/RunnerDefinitionEntity.java +++ b/src/main/java/io/camunda/cherry/db/entity/RunnerDefinitionEntity.java @@ -20,13 +20,13 @@ public class RunnerDefinitionEntity { @Column(name = "name", length = 300) public String name; - @Column(name = "classname", length = 1000, unique = true) + @Column(name = "classname", length = 1000) public String classname; @Column(name = "collection", length = 300) public String collectionName; - @Column(name = "type", length = 1000) + @Column(name = "type", length = 1000, unique = true) public String type; @Column(name = "origin", length = 1000) diff --git a/src/main/java/io/camunda/cherry/db/entity/RunnerExecutionEntity.java b/src/main/java/io/camunda/cherry/db/entity/RunnerExecutionEntity.java index 9935526..37bd086 100644 --- a/src/main/java/io/camunda/cherry/db/entity/RunnerExecutionEntity.java +++ b/src/main/java/io/camunda/cherry/db/entity/RunnerExecutionEntity.java @@ -21,7 +21,7 @@ public class RunnerExecutionEntity { @Enumerated(EnumType.STRING) public TypeExecutor typeExecutor; - @Column(name = "runner_type", length = 100) + @Column(name = "runner_type", length = 1000) public String runnerType; /** diff --git a/src/main/java/io/camunda/cherry/db/repository/RunnerDefinitionRepository.java b/src/main/java/io/camunda/cherry/db/repository/RunnerDefinitionRepository.java index 3aacd15..19735d7 100644 --- a/src/main/java/io/camunda/cherry/db/repository/RunnerDefinitionRepository.java +++ b/src/main/java/io/camunda/cherry/db/repository/RunnerDefinitionRepository.java @@ -13,6 +13,14 @@ public interface RunnerDefinitionRepository extends JpaRepository selectNotInType(@Param("listTypes") List listTypes); + @Query("select runnerDefinition from RunnerDefinitionEntity runnerDefinition" + " where runnerDefinition.jar is not null") List selectAllByJarNotNull(); diff --git a/src/main/java/io/camunda/cherry/db/repository/RunnerExecutionRepository.java b/src/main/java/io/camunda/cherry/db/repository/RunnerExecutionRepository.java index 2360df0..b40b987 100644 --- a/src/main/java/io/camunda/cherry/db/repository/RunnerExecutionRepository.java +++ b/src/main/java/io/camunda/cherry/db/repository/RunnerExecutionRepository.java @@ -4,14 +4,17 @@ import io.camunda.cherry.definition.AbstractRunner; import org.springframework.data.domain.Pageable; import org.springframework.data.jpa.repository.JpaRepository; +import org.springframework.data.jpa.repository.Modifying; import org.springframework.data.jpa.repository.Query; import org.springframework.data.repository.query.Param; +import org.springframework.transaction.annotation.Transactional; import java.time.Instant; import java.time.LocalDateTime; import java.util.List; import java.util.Map; +@Transactional public interface RunnerExecutionRepository extends JpaRepository { @Query("select runnerexecution from RunnerExecutionEntity runnerexecution" @@ -39,4 +42,10 @@ List selectRunnerRecordsByStates(@Param("runnerType") Str @Param("dateToSearch") LocalDateTime dateToSearch, @Param("listStates") List listStates, Pageable pageable); + + @Modifying + @Query(value = "delete from RunnerExecutionEntity runnerexecution" + + " where runnerexecution.runnerType = :runnerType") + void deleteFromEntityType(@Param("runnerType") String runnerType); + } diff --git a/src/main/java/io/camunda/cherry/definition/AbstractWorker.java b/src/main/java/io/camunda/cherry/definition/AbstractWorker.java index b01757a..e27635e 100644 --- a/src/main/java/io/camunda/cherry/definition/AbstractWorker.java +++ b/src/main/java/io/camunda/cherry/definition/AbstractWorker.java @@ -101,8 +101,11 @@ public void handle(final JobClient jobClient, final ActivatedJob activatedJob) { errorCode = "Exception"; errorMessage = e.getMessage(); } - // save the output in the process instance - jobClient.newCompleteCommand(activatedJob.getKey()).variables(contextExecution.outVariablesValue).send().join(); + if(ExecutionStatusEnum.FAIL.equals(status) || ExecutionStatusEnum.BPMNERROR.equals(status)) + jobClient.newThrowErrorCommand(activatedJob.getKey()).errorCode(errorCode).errorMessage(errorMessage).send().join(); + else + // save the output in the process instance + jobClient.newCompleteCommand(activatedJob.getKey()).variables(contextExecution.outVariablesValue).send().join(); contextExecution.endExecution = System.currentTimeMillis(); if (isLog()) diff --git a/src/main/java/io/camunda/cherry/embeddedrunner/ping/connector/PingConnector.java b/src/main/java/io/camunda/cherry/embeddedrunner/ping/connector/PingConnector.java index 0503ec8..dbf6c78 100644 --- a/src/main/java/io/camunda/cherry/embeddedrunner/ping/connector/PingConnector.java +++ b/src/main/java/io/camunda/cherry/embeddedrunner/ping/connector/PingConnector.java @@ -24,11 +24,9 @@ /* ------------------------------------------------------------------- */ @Component -@OutboundConnector(name = PingConnector.TYPE_PINGCONNECTOR, - inputVariables = { PingConnectorInput.INPUT_MESSAGE, +@OutboundConnector(name = PingConnector.TYPE_PINGCONNECTOR, inputVariables = { PingConnectorInput.INPUT_MESSAGE, PingConnectorInput.INPUT_DELAY, - PingConnectorInput.INPUT_THROWERRORPLEASE }, - type = PingConnector.TYPE_PINGCONNECTOR) + PingConnectorInput.INPUT_THROWERRORPLEASE }, type = PingConnector.TYPE_PINGCONNECTOR) public class PingConnector extends AbstractConnector implements IntFrameworkRunner, OutboundConnectorFunction { public static final String ERROR_BAD_WEATHER = "BAD_WEATHER"; diff --git a/src/main/java/io/camunda/cherry/runner/JobRunnerFactory.java b/src/main/java/io/camunda/cherry/runner/JobRunnerFactory.java index 1c4b84f..e430160 100644 --- a/src/main/java/io/camunda/cherry/runner/JobRunnerFactory.java +++ b/src/main/java/io/camunda/cherry/runner/JobRunnerFactory.java @@ -10,7 +10,6 @@ import io.camunda.cherry.definition.AbstractConnector; import io.camunda.cherry.definition.AbstractRunner; import io.camunda.cherry.definition.AbstractWorker; - import io.camunda.cherry.definition.CherryConnectorJobHandler; import io.camunda.cherry.definition.SdkRunnerConnector; import io.camunda.cherry.exception.OperationAlreadyStartedException; @@ -79,7 +78,7 @@ public void startAll() { // now start the Zeebe Client try { zeebeContainer.startZeebeeClient(); - } catch(TechnicalException e) { + } catch (TechnicalException e) { logger.error("ZeebeClient is not started, can't start runner"); return; } @@ -189,11 +188,13 @@ public boolean isRunnerActive(String runnerType) throws OperationException { /** * We ask the container what is the number of job active configured + * * @return number of job active */ public int getMaxJobActive() { return zeebeContainer.getMaxJobsActive(); } + public int getNumberOfThreads() { return zeebeContainer.getNumberOfThreads(); } @@ -203,7 +204,6 @@ public void setNumberOfThreads(int numberOfThreadsRequired) throws TechnicalExce zeebeContainer.stopZeebeeClient(); zeebeContainer.startZeebeeClient(); - // stop all running and restart them for (Running running : mapRunning.values()) { closeJobWorker(running.containerJobWorker.getJobWorker()); diff --git a/src/main/java/io/camunda/cherry/runner/LogOperation.java b/src/main/java/io/camunda/cherry/runner/LogOperation.java index e10dac9..a0ac77b 100644 --- a/src/main/java/io/camunda/cherry/runner/LogOperation.java +++ b/src/main/java/io/camunda/cherry/runner/LogOperation.java @@ -34,7 +34,7 @@ public class LogOperation { * @param message message */ public void log(OperationEntity.Operation operation, String message) { - logger.info("Operation {} [{}]",operation.toString(),message); + logger.info("Operation {} [{}]", operation.toString(), message); OperationEntity operationEntity = new OperationEntity(); operationEntity.operation = operation; operationEntity.executionTime = LocalDateTime.ofInstant(Instant.now(), ZoneOffset.UTC); @@ -108,7 +108,7 @@ public void logException(String runnerType, String message, Exception ex) { * @param e exception */ public void logError(String message, Exception e) { - logger.error("Exception {} {}", message, e.getMessage()); + logger.error("Exception {} {}", message, e); OperationEntity operationEntity = new OperationEntity(); operationEntity.executionTime = LocalDateTime.ofInstant(Instant.now(), ZoneOffset.UTC); operationEntity.operation = OperationEntity.Operation.ERROR; @@ -116,13 +116,14 @@ public void logError(String message, Exception e) { operationEntity.message = message + ": " + e.getMessage(); saveOperationEntity(operationEntity); } + /** * OperationLog an error * * @param message contextual message (what operation was performed) */ public void logError(String message) { - logger.error("Error {} {}", message); + logger.error("Error {}", message); OperationEntity operationEntity = new OperationEntity(); operationEntity.executionTime = LocalDateTime.ofInstant(Instant.now(), ZoneOffset.UTC); operationEntity.operation = OperationEntity.Operation.ERROR; @@ -131,16 +132,15 @@ public void logError(String message) { saveOperationEntity(operationEntity); } - private String getServerIdentification() { return getHostName(); } private String getHostName() { try { - InetAddress IP = InetAddress.getLocalHost(); + InetAddress ipAddress = InetAddress.getLocalHost(); - return IP.getHostName(); + return ipAddress.getHostName(); } catch (Exception e) { return "CherryHostName"; } @@ -150,7 +150,7 @@ private void saveOperationEntity(OperationEntity operationEntity) { try { operationRepository.save(operationEntity); } catch (Exception e) { - logger.error("Can't save OperationEntity " + operationEntity); + logger.error("Can't save OperationEntity [{}]", operationEntity); } } } diff --git a/src/main/java/io/camunda/cherry/runner/RunnerEmbeddedFactory.java b/src/main/java/io/camunda/cherry/runner/RunnerEmbeddedFactory.java index 974ee36..cde9f0d 100644 --- a/src/main/java/io/camunda/cherry/runner/RunnerEmbeddedFactory.java +++ b/src/main/java/io/camunda/cherry/runner/RunnerEmbeddedFactory.java @@ -8,12 +8,13 @@ package io.camunda.cherry.runner; +import io.camunda.cherry.db.entity.OperationEntity; +import io.camunda.cherry.db.entity.RunnerDefinitionEntity; import io.camunda.cherry.definition.AbstractConnector; import io.camunda.cherry.definition.AbstractRunner; import io.camunda.cherry.definition.AbstractWorker; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import java.util.List; @@ -24,14 +25,22 @@ public class RunnerEmbeddedFactory { Logger logger = LoggerFactory.getLogger(RunnerEmbeddedFactory.class.getName()); - @Autowired List listAbstractConnector; - @Autowired List listAbstractWorker; - @Autowired StorageRunner storageRunner; + LogOperation logOperation; + + RunnerEmbeddedFactory(List listAbstractConnector, + List listAbstractWorker, + StorageRunner storageRunner, + LogOperation logOperation) { + this.listAbstractConnector = listAbstractConnector; + this.listAbstractWorker = listAbstractWorker; + this.storageRunner = storageRunner; + this.logOperation = logOperation; + } public void registerInternalRunner() { List listRunners = Stream.concat(listAbstractConnector.stream(), listAbstractWorker.stream()) @@ -47,14 +56,24 @@ public void registerInternalRunner() { try { storageRunner.saveEmbeddedRunner(runner); } catch (Exception e) { - logger.error("RunnerEmbeddedFactory: CAN'T SAVE [" + runner.getType() + (runner.getName() != null ? - " (" + runner.getName() + ")" : - "") + "] error " + e.getMessage()); - continue; + logOperation.log(OperationEntity.Operation.ERROR, "RunnerEmbeddedFactory: CAN'T SAVE [" + runner.getType() + ( + runner.getName() != null ? + " (" + runner.getName() + ")" : + "") + "] error " + e.getMessage()); } } } + public List getAllRunners() { + return Stream.concat(listAbstractConnector.stream(), listAbstractWorker.stream()).map(t -> { + RunnerLightDefinition light = new RunnerLightDefinition(); + light.name = t.getName(); + light.type = t.getType(); + light.origin = RunnerDefinitionEntity.Origin.EMBEDDED; + return light; + }).toList(); + } + /** * Return the runner by its name, if it exists * @@ -67,4 +86,17 @@ public AbstractRunner getByName(String name) { .toList(); return listRunners.isEmpty() ? null : listRunners.get(0); } + + /** + * Return the runner by its name, if it exists + * + * @param type type of the runner + * @return null if not exist, else the runner + */ + public AbstractRunner getByType(String type) { + List listRunners = Stream.concat(listAbstractConnector.stream(), listAbstractWorker.stream()) + .filter(t -> t.getType().equals(type)) + .toList(); + return listRunners.isEmpty() ? null : listRunners.get(0); + } } diff --git a/src/main/java/io/camunda/cherry/runner/RunnerFactory.java b/src/main/java/io/camunda/cherry/runner/RunnerFactory.java index 95ed314..aa38bd1 100644 --- a/src/main/java/io/camunda/cherry/runner/RunnerFactory.java +++ b/src/main/java/io/camunda/cherry/runner/RunnerFactory.java @@ -11,14 +11,18 @@ /* ******************************************************************** */ package io.camunda.cherry.runner; +import io.camunda.cherry.db.entity.OperationEntity; import io.camunda.cherry.db.entity.RunnerDefinitionEntity; +import io.camunda.cherry.db.repository.RunnerExecutionRepository; import io.camunda.cherry.definition.AbstractRunner; import io.camunda.cherry.definition.SdkRunnerConnector; import io.camunda.connector.api.annotation.OutboundConnector; import io.camunda.connector.api.outbound.OutboundConnectorFunction; +import org.hibernate.Session; +import org.hibernate.SessionFactory; +import org.hibernate.Transaction; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import java.io.File; @@ -26,22 +30,37 @@ import java.net.URLClassLoader; import java.util.ArrayList; import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; @Service public class RunnerFactory { - @Autowired - RunnerEmbeddedFactory runnerEmbeddedFactory; - @Autowired - RunnerUploadFactory runnerUploadFactory; - @Autowired - StorageRunner storageRunner; - - @Autowired - LogOperation logOperation; + private final RunnerEmbeddedFactory runnerEmbeddedFactory; + private final RunnerUploadFactory runnerUploadFactory; + private final StorageRunner storageRunner; + private final RunnerExecutionRepository runnerExecutionRepository; + private final LogOperation logOperation; + private final SessionFactory sessionFactory; Logger logger = LoggerFactory.getLogger(RunnerFactory.class.getName()); + RunnerFactory(RunnerEmbeddedFactory runnerEmbeddedFactory, + RunnerUploadFactory runnerUploadFactory, + StorageRunner storageRunner, + RunnerExecutionRepository runnerExecutionRepository, + LogOperation logOperation, + SessionFactory sessionFactory) { + this.runnerEmbeddedFactory = runnerEmbeddedFactory; + this.runnerUploadFactory = runnerUploadFactory; + this.storageRunner = storageRunner; + this.runnerExecutionRepository = runnerExecutionRepository; + this.logOperation = logOperation; + this.sessionFactory = sessionFactory; + } + public void init() { logger.info("----- RunnerFactory.1 Load all embedded runner"); @@ -56,6 +75,40 @@ public void init() { runnerUploadFactory.loadJavaFromStorage(); } + /** + * Must be call after the initialisation + * all runners are loaded amd identified. The storageRunner are checked, and all runner in the database + * which are not loaded are purged. + */ + public void synchronize() { + Map mapExistingRunners = Stream.concat( + runnerEmbeddedFactory.getAllRunners().stream(), runnerUploadFactory.getAllRunners().stream()) + .collect(Collectors.toMap(RunnerLightDefinition::getType, Function.identity())); + + // get the list of entities + List listRunnersEntity = storageRunner.getRunners(new StorageRunner.Filter()); + // identify entity which does not exist + List listEntityToRemove = listRunnersEntity.stream() + .filter(t -> !mapExistingRunners.containsKey(t.type)) + .toList(); + + for (RunnerDefinitionEntity entityToRemove : listEntityToRemove) { + logOperation.log(OperationEntity.Operation.REMOVE, + "Entity type[" + entityToRemove.type + "] name[" + entityToRemove.name + "]"); + + try (Session session = sessionFactory.openSession()) { + Transaction txn = session.beginTransaction(); + runnerExecutionRepository.deleteFromEntityType(entityToRemove.type); + + storageRunner.removeEntity(entityToRemove); + txn.commit(); + } catch (Exception e) { + logOperation.logError("Can't delete [" + entityToRemove.type + "]", e); + } + } + + } + /** * Get All runners * @@ -96,7 +149,7 @@ private AbstractRunner getRunnerFromEntity(RunnerDefinitionEntity runnerDefiniti ClassLoader loader; try { // if this class is embedded? - AbstractRunner embeddedRunner = runnerEmbeddedFactory.getByName(runnerDefinitionEntity.name); + AbstractRunner embeddedRunner = runnerEmbeddedFactory.getByType(runnerDefinitionEntity.type); if (embeddedRunner != null) { return embeddedRunner; } @@ -111,7 +164,7 @@ private AbstractRunner getRunnerFromEntity(RunnerDefinitionEntity runnerDefiniti Object objectRunner = clazz.getDeclaredConstructor().newInstance(); if (AbstractRunner.class.isAssignableFrom(objectRunner.getClass())) { - // if (objectRunner instanceof AbstractRunner runner) { + // if (objectRunner instanceof AbstractRunner runner) { return (AbstractRunner) objectRunner; } else if (objectRunner instanceof OutboundConnectorFunction outboundConnector) { SdkRunnerConnector runner = new SdkRunnerConnector(outboundConnector); @@ -121,9 +174,9 @@ private AbstractRunner getRunnerFromEntity(RunnerDefinitionEntity runnerDefiniti return runner; } logger.error("No method to get a runner from [" + runnerDefinitionEntity.name + "]"); - logOperation.logError("Class ["+runnerDefinitionEntity.classname+"] in jar["+jarFileName+"] not a Runner or OutboundConnectorFunction"); - } - else { + logOperation.logError("Class [" + runnerDefinitionEntity.classname + "] in jar[" + jarFileName + + "] not a Runner or OutboundConnectorFunction"); + } else { logOperation.logError("No Jar file, not an embedded runner for [" + runnerDefinitionEntity.name + "]"); } return null; diff --git a/src/main/java/io/camunda/cherry/runner/RunnerLightDefinition.java b/src/main/java/io/camunda/cherry/runner/RunnerLightDefinition.java new file mode 100644 index 0000000..b31a53a --- /dev/null +++ b/src/main/java/io/camunda/cherry/runner/RunnerLightDefinition.java @@ -0,0 +1,30 @@ +/* ******************************************************************** */ +/* */ +/* RunnerLightDefinition */ +/* */ +/* To carry information on different Runner */ +/* */ +/* ******************************************************************** */ +package io.camunda.cherry.runner; + +import io.camunda.cherry.db.entity.RunnerDefinitionEntity; + +public class RunnerLightDefinition { + + public String type; + + public String name; + public RunnerDefinitionEntity.Origin origin; + + public String getType() { + return type; + } + + public String getName() { + return name; + } + + public RunnerDefinitionEntity.Origin getOrigin() { + return origin; + } +} diff --git a/src/main/java/io/camunda/cherry/runner/RunnerUploadFactory.java b/src/main/java/io/camunda/cherry/runner/RunnerUploadFactory.java index 4a3ff53..e350199 100644 --- a/src/main/java/io/camunda/cherry/runner/RunnerUploadFactory.java +++ b/src/main/java/io/camunda/cherry/runner/RunnerUploadFactory.java @@ -2,12 +2,12 @@ import io.camunda.cherry.db.entity.JarStorageEntity; import io.camunda.cherry.db.entity.OperationEntity; +import io.camunda.cherry.db.entity.RunnerDefinitionEntity; import io.camunda.cherry.definition.AbstractRunner; import io.camunda.connector.api.annotation.OutboundConnector; import org.hibernate.SessionFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import org.springframework.context.annotation.Configuration; import org.springframework.stereotype.Service; @@ -16,7 +16,9 @@ import java.io.FileOutputStream; import java.net.URL; import java.net.URLClassLoader; +import java.util.ArrayList; import java.util.Enumeration; +import java.util.List; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; @@ -24,24 +26,31 @@ @Configuration public class RunnerUploadFactory { + private final StorageRunner storageRunner; + private final LogOperation logOperation; + private final SessionFactory sessionFactory; Logger logger = LoggerFactory.getLogger(RunnerUploadFactory.class.getName()); - @Autowired - StorageRunner storageRunner; - - @Autowired - LogOperation logOperation; - + private final List listLightRunners = new ArrayList<>(); @Value("${cherry.connectorslib.uploadpath:@null}") private String uploadPath; - @Value("${cherry.connectorslib.classloaderpath:@null}") private String classLoaderPath; - @Value("${cherry.connectorslib.forcerefresh:false}") private Boolean forceRefresh; - @Autowired - private SessionFactory sessionFactory; + public RunnerUploadFactory(StorageRunner storageRunner, LogOperation logOperation, SessionFactory sessionFactory) { + this.storageRunner = storageRunner; + this.logOperation = logOperation; + this.sessionFactory = sessionFactory; + } + + private static RunnerLightDefinition getLightFromRunnerDefinitionEntity(RunnerDefinitionEntity entityRunner) { + RunnerLightDefinition runnerLightDefinition = new RunnerLightDefinition(); + runnerLightDefinition.name = entityRunner.name; + runnerLightDefinition.type = entityRunner.type; + runnerLightDefinition.origin = RunnerDefinitionEntity.Origin.JARFILE; + return runnerLightDefinition; + } public void loadConnectorsFromClassLoaderPath() { // No special operation to do @@ -102,8 +111,17 @@ public void loadStorageFromUploadPath() { jarStorageEntity = storageRunner.getJarStorageByName(jarFile.getName()); if (jarStorageEntity != null && !Boolean.TRUE.equals(forceRefresh)) { + // we don't reload the JAR file, so we believe what we have in the database + if (jarStorageEntity != null) { + List runners = storageRunner.getRunners( + new StorageRunner.Filter().jarFileName(jarStorageEntity.name)); + listLightRunners.addAll( + runners.stream().map(RunnerUploadFactory::getLightFromRunnerDefinitionEntity).toList()); + } + continue; } + if (jarStorageEntity == null) { // save it jarStorageEntity = storageRunner.saveJarRunner(jarFile); @@ -139,6 +157,7 @@ public void loadStorageFromUploadPath() { // this is a AbstractConnector AbstractRunner runner = (AbstractRunner) instanceClass; storageRunner.saveUploadRunner(runner, jarStorageEntity); + listLightRunners.add(getLightFromRunner(runner)); logLoadJar.append("RunnerDectection["); logLoadJar.append(runner.getName()); @@ -146,14 +165,16 @@ public void loadStorageFromUploadPath() { logLoadJar.append(runner.getType()); logLoadJar.append("]; "); logOperation.log(OperationEntity.Operation.SERVERINFO, - "Load Jar[" + jarFile.getName() + "] Runner[" + runner.getName() + "] type[" - + runner.getType() + "]"); + "Load Jar[" + jarFile.getName() + "] Runner[" + runner.getName() + "] type[" + runner.getType() + + "]"); nbRunners++; } else if (connectorAnnotation != null) { // this is a Outbound connector storageRunner.saveUploadRunner(connectorAnnotation.name(), connectorAnnotation.type(), clazz, jarStorageEntity); + listLightRunners.add(getLightFromConnectorAnnotation(connectorAnnotation)); + logLoadJar.append("ConnectorDetection["); logLoadJar.append(connectorAnnotation.name()); logLoadJar.append("], type["); @@ -193,8 +214,8 @@ public void loadStorageFromUploadPath() { jarStorageEntity.loadLog = logLoadJar.toString(); storageRunner.updateJarStorage(jarStorageEntity); logOperation.log(OperationEntity.Operation.SERVERINFO, - "Load [" + jarFile.getPath() + "] connectors: " + nbConnectors + " runners: " + nbRunners + " in " - + (endOperation - beginOperation) + " ms "); + "Load [" + jarFile.getPath() + "] connectors: " + nbConnectors + " runners: " + nbRunners + " in " + ( + endOperation - beginOperation) + " ms "); } catch (Exception e) { logOperation.log(OperationEntity.Operation.ERROR, @@ -203,4 +224,24 @@ public void loadStorageFromUploadPath() { } } + + public List getAllRunners() { + return listLightRunners; + } + + private RunnerLightDefinition getLightFromRunner(AbstractRunner runner) { + RunnerLightDefinition runnerLightDefinition = new RunnerLightDefinition(); + runnerLightDefinition.name = runner.getName(); + runnerLightDefinition.type = runner.getType(); + runnerLightDefinition.origin = RunnerDefinitionEntity.Origin.JARFILE; + return runnerLightDefinition; + } + + private RunnerLightDefinition getLightFromConnectorAnnotation(OutboundConnector connectorAnnotation) { + RunnerLightDefinition runnerLightDefinition = new RunnerLightDefinition(); + runnerLightDefinition.name = connectorAnnotation.name(); + runnerLightDefinition.type = connectorAnnotation.type(); + runnerLightDefinition.origin = RunnerDefinitionEntity.Origin.JARFILE; + return runnerLightDefinition; + } } diff --git a/src/main/java/io/camunda/cherry/runner/StorageRunner.java b/src/main/java/io/camunda/cherry/runner/StorageRunner.java index 38ca43f..c9b69d3 100644 --- a/src/main/java/io/camunda/cherry/runner/StorageRunner.java +++ b/src/main/java/io/camunda/cherry/runner/StorageRunner.java @@ -70,6 +70,7 @@ public class StorageRunner { */ public JarStorageEntity saveJarRunner(File jarFile) throws TechnicalException { String connectorName = jarFile.getName(); + logger.info("StorageRunner.saveJarRunner: file[{}] connectorName[{}]", jarFile.getPath(), connectorName); JarStorageEntity jarStorageEntity = jarDefinitionRepository.findByName(connectorName); if (jarStorageEntity != null) @@ -177,12 +178,10 @@ public RunnerDefinitionEntity saveUploadRunner(String name, * @return a RunnerDefinitionEntity, saved. */ public RunnerDefinitionEntity saveUploadRunner(AbstractRunner runner, JarStorageEntity jarDefinition) { - RunnerDefinitionEntity runnerDefinition = runnerDefinitionRepository.selectByName(runner.getName()); - if (runnerDefinition != null) - return runnerDefinition; - - runnerDefinition = new RunnerDefinitionEntity(); - + RunnerDefinitionEntity runnerDefinition = runnerDefinitionRepository.selectByType(runner.getType()); + if (runnerDefinition == null) { + runnerDefinition = new RunnerDefinitionEntity(); + } runnerDefinition.name = runner.getName(); runnerDefinition.classname = runner.getClass().getCanonicalName(); runnerDefinition.jar = jarDefinition; @@ -241,20 +240,18 @@ public void writeJarBlob(Session session, JarStorageEntity jarStorageEntity, Inp * @throws IOException in case of error during the operation */ public RunnerDefinitionEntity saveEmbeddedRunner(AbstractRunner runner) throws IOException { - RunnerDefinitionEntity runnerDefinition = runnerDefinitionRepository.selectByName(runner.getName()); - if (runnerDefinition != null) - return runnerDefinition; - - runnerDefinition = new RunnerDefinitionEntity(); - + RunnerDefinitionEntity runnerDefinition = runnerDefinitionRepository.selectByType(runner.getType()); + if (runnerDefinition == null) { + runnerDefinition = new RunnerDefinitionEntity(); + // start it by default + runnerDefinition.activeRunner = true; + } runnerDefinition.name = runner.getName(); runnerDefinition.classname = runner.getClass().getCanonicalName(); runnerDefinition.type = runner.getType(); runnerDefinition.collectionName = runner.getCollectionName(); runnerDefinition.origin = RunnerDefinitionEntity.Origin.EMBEDDED; - // start it by default - runnerDefinition.activeRunner = true; return runnerDefinitionRepository.save(runnerDefinition); } @@ -288,13 +285,42 @@ public List getRunners(Filter filter) { if (filter.filterType == null) return true; return t.type.equals(filter.filterType); + }).filter(t -> { + if (filter.jarFileName == null) { + return true; + } else { + return t.jar != null && filter.jarFileName.equals(t.jar.name); + } }).toList(); } - public boolean existRunner(String runnerName) { - return runnerDefinitionRepository.selectByName(runnerName) != null; + /** + * existRunner by type + * + * @param runnerType type of runner + * @return true if the runner exists + */ + public boolean existRunnerByType(String runnerType) { + return runnerDefinitionRepository.selectByType(runnerType) != null; + } + + /** + * Remove an entity - does not remove the history of execution + * + * @param entity entity to remove + */ + + public void removeEntity(RunnerDefinitionEntity entity) { + runnerDefinitionRepository.delete(entity); } + /* ******************************************************************** */ + /* */ + /* Remove entity */ + /* */ + /* Remove the entity */ + /* ******************************************************************** */ + public static class Filter { /** * Null: all runners, else True or False @@ -308,6 +334,11 @@ public static class Filter { */ Boolean storeOnly; + /** + * Only jar runner inside a specific JarFile + */ + String jarFileName; + public Filter isActive(boolean activeOnly) { this.activeOnly = activeOnly; return this; @@ -327,5 +358,10 @@ public Filter type(String type) { this.filterType = type; return this; } + + public Filter jarFileName(String jarFileName) { + this.jarFileName = jarFileName; + return this; + } } } diff --git a/src/main/java/io/camunda/cherry/runtime/CherryMain.java b/src/main/java/io/camunda/cherry/runtime/CherryMain.java index 4ced233..e788fe8 100644 --- a/src/main/java/io/camunda/cherry/runtime/CherryMain.java +++ b/src/main/java/io/camunda/cherry/runtime/CherryMain.java @@ -25,7 +25,7 @@ public class CherryMain { RunnerFactory runnerFactory; @Autowired - JobRunnerFactory cherryJobRunnerFactory; + JobRunnerFactory jobRunnerFactory; @PostConstruct public void init() { @@ -33,16 +33,19 @@ public void init() { logger.info("----- CherryMain.1 Load all embedded runner"); runnerFactory.init(); + logger.info("----- CherryMain.2 purge non existing anymore runner"); + runnerFactory.synchronize(); + // at this point, the table is up-to-date, class loader is correct : let's start all runners - logger.info("----- CherryMain.4 Start all runners"); - cherryJobRunnerFactory.startAll(); + logger.info("----- CherryMain.3 Start all runners"); + jobRunnerFactory.startAll(); } @PreDestroy public void end() { logger.info("----- End is called"); - cherryJobRunnerFactory.stopAll(); + jobRunnerFactory.stopAll(); } diff --git a/src/main/java/io/camunda/cherry/runtime/HistoryPerformance.java b/src/main/java/io/camunda/cherry/runtime/HistoryPerformance.java index 6acd238..5361a74 100644 --- a/src/main/java/io/camunda/cherry/runtime/HistoryPerformance.java +++ b/src/main/java/io/camunda/cherry/runtime/HistoryPerformance.java @@ -89,8 +89,8 @@ public Performance getPerformance(String runnerType, LocalDateTime dateNow, Peri case FAIL -> interval.executionsFailed++; case BPMNERROR -> interval.executionsBpmnErrors++; } - if (runnerExecutionEntity.executionMs > interval.picTimeInMs) - interval.picTimeInMs = runnerExecutionEntity.executionMs; + if (runnerExecutionEntity.executionMs > interval.peakTimeInMs) + interval.peakTimeInMs = runnerExecutionEntity.executionMs; } // build the list and calculate average @@ -103,8 +103,8 @@ public Performance getPerformance(String runnerType, LocalDateTime dateNow, Peri sumTotalExecutionTimeInMs += interval.sumOfExecutionTime; sumTotalExecutions += interval.executions; - if (interval.picTimeInMs > performance.picTimeInMs) - performance.picTimeInMs = interval.picTimeInMs; + if (interval.peakTimeInMs > performance.peakTimeInMs) + performance.peakTimeInMs = interval.peakTimeInMs; } // global values @@ -174,7 +174,7 @@ public enum PeriodStatistic { } public static class Performance { - public long picTimeInMs; + public long peakTimeInMs; public long executions; public long averageTimeInMs; public List listIntervals = new ArrayList<>(); @@ -192,7 +192,7 @@ public static class Interval { public long executionsSucceeded = 0; public long executionsFailed = 0; public long executionsBpmnErrors = 0; - public long picTimeInMs = 0; + public long peakTimeInMs = 0; public long averageTimeInMs = 0; public Interval(String slot, LocalDateTime slotTime) { diff --git a/src/main/java/io/camunda/cherry/store/StoreRestController.java b/src/main/java/io/camunda/cherry/store/StoreRestController.java index 01ec435..34147f6 100644 --- a/src/main/java/io/camunda/cherry/store/StoreRestController.java +++ b/src/main/java/io/camunda/cherry/store/StoreRestController.java @@ -49,6 +49,7 @@ public List> listConnectorInStore() { try { String lastRelease = storeService.getLatestRelease(); Map connectors = storeService.listConnectors(lastRelease); + List listRunnersEntity = runnerFactory.getAllRunnersEntity( new StorageRunner.Filter().isStore(true)); Map mapRunners = listRunnersEntity.stream() diff --git a/src/main/java/io/camunda/cherry/store/StoreService.java b/src/main/java/io/camunda/cherry/store/StoreService.java index d481b4f..63a59fd 100644 --- a/src/main/java/io/camunda/cherry/store/StoreService.java +++ b/src/main/java/io/camunda/cherry/store/StoreService.java @@ -202,10 +202,9 @@ private JsonNode getElementTemplate(String name, String release) { private String getMavenCentralUrl(String release, String name) { String groupId = ""; String artifactId = ""; + String url="https://raw.githubusercontent.com/" + REPO + "/" + release + "/connectors/" + name + "/pom.xml"; try { - String pom = restTemplate.getForObject( - "https://raw.githubusercontent.com/" + REPO + "/" + release + "/connectors/" + name + "/pom.xml", - String.class); + String pom = restTemplate.getForObject(url,String.class); DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); DocumentBuilder db = dbf.newDocumentBuilder(); Document doc = db.parse(new ByteArrayInputStream(pom.getBytes())); @@ -223,7 +222,7 @@ private String getMavenCentralUrl(String release, String name) { } catch (ParserConfigurationException | SAXException | IOException | XPathExpressionException e) { throw new TechnicalException("ControllerPage building the maven url from the pom", e); } catch (Exception ex) { - throw new TechnicalException("Can't access the repository", ex); + throw new TechnicalException("Can't access the repository ["+url+"]", ex); } } diff --git a/src/main/java/io/camunda/cherry/zeebe/ZeebeConfiguration.java b/src/main/java/io/camunda/cherry/zeebe/ZeebeConfiguration.java index f2573e9..753fb30 100644 --- a/src/main/java/io/camunda/cherry/zeebe/ZeebeConfiguration.java +++ b/src/main/java/io/camunda/cherry/zeebe/ZeebeConfiguration.java @@ -14,7 +14,6 @@ public class ZeebeConfiguration { @Nullable private String gateway; - @Value("${zeebe.client.security.plaintext:true}") @Nullable private Boolean plaintext; @@ -64,7 +63,6 @@ public String getGatewayAddress() { return gateway; } - @Nullable public String getGateway() { return gateway; diff --git a/src/main/java/io/camunda/cherry/zeebe/ZeebeContainer.java b/src/main/java/io/camunda/cherry/zeebe/ZeebeContainer.java index dfe5418..53994fe 100644 --- a/src/main/java/io/camunda/cherry/zeebe/ZeebeContainer.java +++ b/src/main/java/io/camunda/cherry/zeebe/ZeebeContainer.java @@ -29,16 +29,13 @@ public class ZeebeContainer { @Autowired ZeebeConfiguration zeebeConfiguration; - - private ZeebeClient zeebeClient; - @Autowired LogOperation logOperation; + private ZeebeClient zeebeClient; /** * Number of thread currently used at the Zeebe Client */ - /** * Start the ZeebeClient */ @@ -47,12 +44,11 @@ public void startZeebeeClient() throws TechnicalException { String validation = zeebeConfiguration.checkValidation(); if (validation != null) { logger.error("Incorrect configuration: " + validation); - logOperation.logError("Incorrect Zeebe configuration "+validation); + logOperation.logError("Incorrect Zeebe configuration " + validation); return; } - - logger.info("ZeebeContainer.startZeebe {} ",zeebeConfiguration.getLogConfiguration()); + logger.info("ZeebeContainer.startZeebe {} ", zeebeConfiguration.getLogConfiguration()); ZeebeClientBuilder zeebeClientBuilder; if (zeebeConfiguration.isCloudConfiguration()) { zeebeClientBuilder = ZeebeClient.newCloudClientBuilder() @@ -62,17 +58,16 @@ public void startZeebeeClient() throws TechnicalException { .withRegion(zeebeConfiguration.getRegion()); } else { - zeebeClientBuilder = ZeebeClient.newClientBuilder() - .gatewayAddress(zeebeConfiguration.getGatewayAddress()); + zeebeClientBuilder = ZeebeClient.newClientBuilder().gatewayAddress(zeebeConfiguration.getGatewayAddress()); if (zeebeConfiguration.isPlaintext()) zeebeClientBuilder = zeebeClientBuilder.usePlaintext(); } try { zeebeClient = zeebeClientBuilder.numJobWorkerExecutionThreads(zeebeConfiguration.getNumberOfThreads()).build(); - } catch(Exception e) { - logOperation.logError("Can't start ZeebeClient ",e); - throw new TechnicalException("Can't start ZeebeClient",e); + } catch (Exception e) { + logOperation.logError("Can't start ZeebeClient ", e); + throw new TechnicalException("Can't start ZeebeClient", e); } pingZeebeClient(); @@ -85,7 +80,7 @@ public void startZeebeeClient() throws TechnicalException { * @return true if the zeebe server is alive, else false */ public boolean pingZeebeClient() { - if (zeebeClient==null) + if (zeebeClient == null) return false; try { @@ -102,7 +97,7 @@ public boolean pingZeebeClient() { * Stop the zeebeClient */ public void stopZeebeeClient() { - if (zeebeClient==null) + if (zeebeClient == null) return; zeebeClient.close(); zeebeClient = null; @@ -124,6 +119,7 @@ public boolean isOk() { /** * get the number of jobs in the + * * @return the number of threads used when the ZeebeClient is started */ public int getNumberOfThreads() { @@ -131,12 +127,10 @@ public int getNumberOfThreads() { } /** - * * @return the number of threads used when the ZeebeClient is started */ public int getMaxJobsActive() { return zeebeClient.getConfiguration().getDefaultJobWorkerMaxJobsActive(); } - } diff --git a/src/main/resources/application-postgres.yaml b/src/main/resources/application-postgres.yaml index 1284c84..a6c65e8 100644 --- a/src/main/resources/application-postgres.yaml +++ b/src/main/resources/application-postgres.yaml @@ -48,7 +48,7 @@ spring.datasource: username: "camunda" password: "camunda" driver-class-name: "org.postgresql.Driver" -spring.jpa.database-platform: "org.hibernate.dialect.PostgreSQL94Dialect" +spring.jpa.database-platform: org.hibernate.dialect.PostgreSQL94Dialect org.hibernate.dialect: org.hibernate.dialect.PostgreSQL94Dialect # hibernate.jdbc.use_streams_for_binary: true