diff --git a/scenarios/distributed-kafka/docker-compose.yml b/scenarios/distributed-kafka/docker-compose.yml new file mode 100644 index 00000000..b4155788 --- /dev/null +++ b/scenarios/distributed-kafka/docker-compose.yml @@ -0,0 +1,65 @@ +# Use this only in dev environments. It's not intended for production usage. +version: '3.9' +services: + zookeeper: + image: confluentinc/cp-zookeeper:latest + environment: + ZOOKEEPER_CLIENT_PORT: 2181 + ZOOKEEPER_TICK_TIME: 2000 + ports: + - '22181:2181' + + kafka: + image: confluentinc/cp-kafka:latest + depends_on: + - zookeeper + ports: + - '9092:9092' + - '29092:29092' + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT + KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + + init-kafka: + image: confluentinc/cp-kafka:latest + depends_on: + - kafka + entrypoint: [ '/bin/bash', '-c' ] + command: | + " + # blocks until kafka is reachable + echo -e 'Currently available topics:' + kafka-topics --bootstrap-server kafka:9092 --list + + echo -e 'Creating kafka topics...' + kafka-topics --bootstrap-server kafka:9092 --create --if-not-exists --topic polyflow-task --replication-factor 1 --partitions 1 + kafka-topics --bootstrap-server kafka:9092 --create --if-not-exists --topic polyflow-data --replication-factor 1 --partitions 1 + + echo -e 'Resulting topics:' + kafka-topics --bootstrap-server kafka:9092 --list + " + + postgres-engine: + image: postgres:13.2 + container_name: postgres-engine + environment: + POSTGRES_USER: polyflow_user + POSTGRES_PASSWORD: S3Cr3T! + POSTGRES_DB: enginedb + ports: + - '25433:5432' + + postgres-tasklist: + image: postgres:13.2 + container_name: postgres-tasklist + environment: + POSTGRES_USER: polyflow_user + POSTGRES_PASSWORD: S3Cr3T! + POSTGRES_DB: tasklistdb + ports: + - '25432:5432' + diff --git a/scenarios/distributed-kafka/pom.xml b/scenarios/distributed-kafka/pom.xml index cabd9a43..337a7265 100755 --- a/scenarios/distributed-kafka/pom.xml +++ b/scenarios/distributed-kafka/pom.xml @@ -24,11 +24,11 @@ + - org.axonframework - axon-server-connector - 4.6.7 - runtime + org.axonframework.extensions.kafka + axon-kafka-spring-boot-starter + 4.6.0 diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/pom.xml b/scenarios/distributed-kafka/process-application-local-polyflow/pom.xml index a5810514..62928597 100755 --- a/scenarios/distributed-kafka/process-application-local-polyflow/pom.xml +++ b/scenarios/distributed-kafka/process-application-local-polyflow/pom.xml @@ -30,12 +30,26 @@ io.holunda.polyflow polyflow-datapool-core + + io.holunda.polyflow + polyflow-bus-jackson + io.holunda.polyflow polyflow-camunda-bpm-taskpool-job-sender + + + org.axonframework.extensions.kafka + axon-kafka-spring-boot-starter + + + org.apache.kafka + kafka-clients + + org.postgresql diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/ExampleProcessApplicationLocalPolyflowDistributedWithKafka.kt b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/io/holunda/polyflow/example/process/approval/ExampleProcessApplicationLocalPolyflowDistributedWithKafka.kt similarity index 55% rename from scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/ExampleProcessApplicationLocalPolyflowDistributedWithKafka.kt rename to scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/io/holunda/polyflow/example/process/approval/ExampleProcessApplicationLocalPolyflowDistributedWithKafka.kt index b39c76d7..0348e1b5 100755 --- a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/ExampleProcessApplicationLocalPolyflowDistributedWithKafka.kt +++ b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/io/holunda/polyflow/example/process/approval/ExampleProcessApplicationLocalPolyflowDistributedWithKafka.kt @@ -1,30 +1,26 @@ package io.holunda.polyflow.example.process.approval +import com.fasterxml.jackson.databind.DeserializationFeature import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.databind.SerializationFeature import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule import com.fasterxml.jackson.module.kotlin.jacksonObjectMapper -import com.thoughtworks.xstream.XStream -import com.thoughtworks.xstream.security.AnyTypePermission -import io.holunda.polyflow.bus.jackson.config.FallbackPayloadObjectMapperAutoConfiguration.Companion.PAYLOAD_OBJECT_MAPPER +import com.fasterxml.jackson.module.kotlin.registerKotlinModule +import io.holunda.polyflow.bus.jackson.ObjectMapperConfigurationHelper +import io.holunda.polyflow.bus.jackson.config.FallbackPayloadObjectMapperAutoConfiguration import io.holunda.polyflow.bus.jackson.configurePolyflowJacksonObjectMapper import io.holunda.polyflow.datapool.core.EnablePolyflowDataPool +import io.holunda.polyflow.example.process.approval.RequestApprovalProcessConfiguration import io.holunda.polyflow.taskpool.core.EnablePolyflowTaskPool -import org.axonframework.commandhandling.CommandBus -import org.axonframework.commandhandling.gateway.CommandGateway -import org.axonframework.commandhandling.gateway.DefaultCommandGateway import org.axonframework.eventhandling.deadletter.jpa.DeadLetterEventEntry import org.axonframework.eventhandling.tokenstore.jpa.TokenEntry +import org.axonframework.eventsourcing.eventstore.jpa.DomainEventEntry +import org.axonframework.eventsourcing.eventstore.jpa.SnapshotEventEntry import org.axonframework.modelling.saga.repository.jpa.SagaEntry -import org.axonframework.serialization.xml.CompactDriver -import org.axonframework.springboot.util.ConditionalOnMissingQualifiedBean -import org.axonframework.springboot.util.XStreamSecurityTypeUtility import org.springframework.beans.factory.annotation.Qualifier import org.springframework.boot.SpringApplication import org.springframework.boot.autoconfigure.SpringBootApplication -import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean import org.springframework.boot.autoconfigure.domain.EntityScan -import org.springframework.context.ApplicationContext import org.springframework.context.annotation.Bean import org.springframework.context.annotation.Import import org.springframework.context.annotation.Primary @@ -51,35 +47,28 @@ fun main(args: Array) { basePackageClasses = [ TokenEntry::class, SagaEntry::class, - DeadLetterEventEntry::class + DeadLetterEventEntry::class, + DomainEventEntry::class, + SnapshotEventEntry::class ] ) class ExampleProcessApplicationLocalPolyflowDistributedWithKafka { + @Qualifier(FallbackPayloadObjectMapperAutoConfiguration.PAYLOAD_OBJECT_MAPPER) @Bean - fun objectMapper(): ObjectMapper { - return jacksonObjectMapper() + @Primary + fun objectMapper(): ObjectMapper = + jacksonObjectMapper() .registerModule(JavaTimeModule()) .configurePolyflowJacksonObjectMapper() .disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) - } - - @Bean("defaultAxonXStream") - @ConditionalOnMissingBean - fun defaultAxonXStream(applicationContext: ApplicationContext): XStream { - val xStream = XStream(CompactDriver()) - xStream.allowTypesByWildcard(XStreamSecurityTypeUtility.autoConfigBasePackages(applicationContext)) - // This configures XStream to permit any class to be deserialized. - // FIXME: We might want to make this more restrictive to improve security - xStream.addPermission(AnyTypePermission.ANY) - return xStream - } - /* - @Bean - @Primary - @ConditionalOnMissingQualifiedBean(beanClass = CommandGateway::class, qualifier = "unqualified") - fun defaultCommandGateway(bus: CommandBus): CommandGateway = DefaultCommandGateway.builder().commandBus(bus).build() - */ + @Bean("defaultAxonObjectMapper") + @Qualifier("defaultAxonObjectMapper") + fun defaultAxonObjectMapper(): ObjectMapper = + jacksonObjectMapper() + .registerModule(JavaTimeModule()) + .configurePolyflowJacksonObjectMapper() + .disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) } diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/io/holunda/polyflow/example/process/approval/kafka/KafkaTopicRouter.kt b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/io/holunda/polyflow/example/process/approval/kafka/KafkaTopicRouter.kt new file mode 100644 index 00000000..86761922 --- /dev/null +++ b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/io/holunda/polyflow/example/process/approval/kafka/KafkaTopicRouter.kt @@ -0,0 +1,16 @@ +package io.holunda.polyflow.example.process.approval.kafka + +import javax.validation.constraints.NotNull + +/** + * Router to decide where to publish events to. + */ +fun interface KafkaTopicRouter { + /** + * Retrieves the topic name for given payload type. + * + * @param payloadType payload type. + * @return topic or null, if the event should be dropped. + */ + fun topicForPayloadType(payloadType: @NotNull Class<*>): String? +} diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/io/holunda/polyflow/example/process/approval/kafka/PolyflowAxonKafkaConfiguration.kt b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/io/holunda/polyflow/example/process/approval/kafka/PolyflowAxonKafkaConfiguration.kt new file mode 100644 index 00000000..cc1c93cb --- /dev/null +++ b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/io/holunda/polyflow/example/process/approval/kafka/PolyflowAxonKafkaConfiguration.kt @@ -0,0 +1,133 @@ +package io.holunda.polyflow.example.process.approval.kafka + +import org.apache.kafka.clients.consumer.ConsumerRecord +import org.apache.kafka.clients.producer.ProducerRecord +import org.axonframework.common.AxonConfigurationException +import org.axonframework.config.EventProcessingConfigurer +import org.axonframework.eventhandling.EventMessage +import org.axonframework.eventhandling.PropagatingErrorHandler +import org.axonframework.extensions.kafka.KafkaProperties +import org.axonframework.extensions.kafka.autoconfig.KafkaAutoConfiguration +import org.axonframework.extensions.kafka.eventhandling.DefaultKafkaMessageConverter +import org.axonframework.extensions.kafka.eventhandling.KafkaMessageConverter +import org.axonframework.extensions.kafka.eventhandling.producer.KafkaEventPublisher +import org.axonframework.extensions.kafka.eventhandling.producer.KafkaPublisher +import org.axonframework.extensions.kafka.eventhandling.producer.ProducerFactory +import org.axonframework.serialization.Serializer +import org.springframework.beans.factory.annotation.Qualifier +import org.springframework.boot.autoconfigure.AutoConfigureBefore +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean +import org.springframework.boot.context.properties.EnableConfigurationProperties +import org.springframework.context.annotation.Bean +import org.springframework.context.annotation.Configuration +import org.springframework.context.annotation.Primary +import java.util.* + +/** + * Configure to send polyflow events only if kafka is not disabled (is enabled). + */ +@Configuration +@AutoConfigureBefore( + KafkaAutoConfiguration::class +) // we should run before Axon Kafka autoconfiguration +@EnableConfigurationProperties(PolyflowAxonKafkaProperties::class) +class PolyflowAxonKafkaConfiguration { + @ConditionalOnMissingBean + @Bean + fun kafkaTopicRouter(properties: PolyflowAxonKafkaProperties): KafkaTopicRouter { + return KafkaTopicRouter { payloadType -> + properties.topics.firstOrNull { it.payloadType.isAssignableFrom(payloadType) }?.topic + } + } + + @Bean + @Primary + fun routingKafkaMessageConverter( + @Qualifier("eventSerializer") eventSerializer: Serializer, + kafkaTopicRouter: KafkaTopicRouter + ): KafkaMessageConverter { + val defaultConverter: KafkaMessageConverter = + DefaultKafkaMessageConverter.builder().serializer(eventSerializer).build() + return object : KafkaMessageConverter { + override fun createKafkaMessage( + eventMessage: EventMessage<*>, + topic: String + ): ProducerRecord { + val topicOverride = kafkaTopicRouter.topicForPayloadType(eventMessage.getPayloadType()) + return defaultConverter.createKafkaMessage(eventMessage, topicOverride ?: topic) + } + + override fun readKafkaMessage(consumerRecord: ConsumerRecord): Optional> { + return defaultConverter.readKafkaMessage(consumerRecord) + } + } + } + + /** + * Configures a KafkaEventPublisher that sends events to Kafka only if they are routed via kafka event router. + * + * @see KafkaAutoConfiguration.kafkaEventPublisher + */ + @Bean + fun routingKafkaEventPublisher( + kafkaPublisher: KafkaPublisher, + kafkaProperties: KafkaProperties, + eventProcessingConfigurer: EventProcessingConfigurer, + kafkaTopicRouter: KafkaTopicRouter + ): KafkaEventPublisher { + val kafkaEventPublisher: KafkaEventPublisher = + RoutingKafkaEventPublisher.builder() + .kafkaPublisher(kafkaPublisher) + .kafkaTopicRouter(kafkaTopicRouter) + .build() + + /* + * Register an invocation error handler which re-throws any exception. + * This will ensure a TrackingEventProcessor to enter the error mode which will retry, and it will ensure the + * SubscribingEventProcessor to bubble the exception to the callee. For more information see + * https://docs.axoniq.io/reference-guide/configuring-infrastructure-components/event-processing/event-processors#error-handling + */ + // TODO: Check if this still works. Our publisher is no longer in the default processing group, I think. + eventProcessingConfigurer.registerEventHandler { kafkaEventPublisher } + .registerListenerInvocationErrorHandler( + KafkaEventPublisher.DEFAULT_PROCESSING_GROUP + ) { PropagatingErrorHandler.instance() } + .assignHandlerTypesMatching( + KafkaEventPublisher.DEFAULT_PROCESSING_GROUP + ) { clazz: Class<*> -> + clazz.isAssignableFrom( + KafkaEventPublisher::class.java + ) + } + when (val processorMode: KafkaProperties.EventProcessorMode = kafkaProperties.producer.eventProcessorMode) { + KafkaProperties.EventProcessorMode.SUBSCRIBING -> eventProcessingConfigurer.registerSubscribingEventProcessor(KafkaEventPublisher.DEFAULT_PROCESSING_GROUP) + KafkaProperties.EventProcessorMode.TRACKING -> eventProcessingConfigurer.registerTrackingEventProcessor(KafkaEventPublisher.DEFAULT_PROCESSING_GROUP) + KafkaProperties.EventProcessorMode.POOLED_STREAMING -> eventProcessingConfigurer.registerPooledStreamingEventProcessor(KafkaEventPublisher.DEFAULT_PROCESSING_GROUP) + else -> throw AxonConfigurationException("Unknown Event Processor Mode [$processorMode] detected") + } + + return kafkaEventPublisher + } + + // We need to duplicate the bean factory from KafkaAutoConfiguration because there is no way to set `publisherAckTimeout` via configuration properties + @Bean(destroyMethod = "shutDown") + fun kafkaAcknowledgingPublisher( + kafkaProducerFactory: ProducerFactory, + kafkaMessageConverter: KafkaMessageConverter, + configuration: org.axonframework.config.Configuration, + properties: KafkaProperties, + serializer: Serializer + ): KafkaPublisher { + return KafkaPublisher + .builder() + .producerFactory(kafkaProducerFactory) + .messageConverter(kafkaMessageConverter) + .messageMonitor(configuration.messageMonitor(KafkaPublisher::class.java, "kafkaPublisher")) + .topicResolver { Optional.of(properties.defaultTopic) } + .serializer(serializer) + .publisherAckTimeout( + properties.producer.properties.getOrDefault("delivery.timeout.ms", "30000").toLong() + 1000 + ) + .build() + } +} diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/io/holunda/polyflow/example/process/approval/kafka/PolyflowAxonKafkaProperties.kt b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/io/holunda/polyflow/example/process/approval/kafka/PolyflowAxonKafkaProperties.kt new file mode 100644 index 00000000..6f132f51 --- /dev/null +++ b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/io/holunda/polyflow/example/process/approval/kafka/PolyflowAxonKafkaProperties.kt @@ -0,0 +1,21 @@ +package io.holunda.polyflow.example.process.approval.kafka + +import org.springframework.boot.context.properties.ConfigurationProperties +import org.springframework.boot.context.properties.ConstructorBinding +import org.springframework.boot.context.properties.NestedConfigurationProperty + +@ConfigurationProperties(prefix = "polyflow.axon.kafka") +@ConstructorBinding +data class PolyflowAxonKafkaProperties( + /** + * List of mappings of payload class to kafka topic name that payload of this class should be directed to. + */ + @NestedConfigurationProperty + val topics: List +) { + @ConstructorBinding + data class PayloadTypeToTopic( + val payloadType: Class<*>, + val topic: String + ) +} diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/io/holunda/polyflow/example/process/approval/kafka/RoutingKafkaEventPublisher.java b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/io/holunda/polyflow/example/process/approval/kafka/RoutingKafkaEventPublisher.java new file mode 100644 index 00000000..97fba636 --- /dev/null +++ b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/kotlin/io/holunda/polyflow/example/process/approval/kafka/RoutingKafkaEventPublisher.java @@ -0,0 +1,82 @@ +package io.holunda.polyflow.example.process.approval.kafka; + +import org.axonframework.eventhandling.EventMessage; +import org.axonframework.extensions.kafka.eventhandling.producer.KafkaEventPublisher; +import org.axonframework.extensions.kafka.eventhandling.producer.KafkaPublisher; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.axonframework.common.BuilderUtils.assertNonNull; + +/** + * A {@link KafkaEventPublisher} that routes incoming events based on payload type. + * + * @param key type. + * @param value type. + */ +public class RoutingKafkaEventPublisher extends KafkaEventPublisher { + + private static final Logger logger = LoggerFactory.getLogger(RoutingKafkaEventPublisher.class); + + private final KafkaTopicRouter kafkaTopicRouter; + + protected RoutingKafkaEventPublisher(Builder builder) { + super(builder); + this.kafkaTopicRouter = builder.kafkaTopicRouter; + } + + @Override + public Object handle(EventMessage event) { + if (kafkaTopicRouter.topicForPayloadType(event.getPayloadType()) != null) { + super.handle(event); + } else if (logger.isTraceEnabled()) { + logger.trace("Message will not be published to Kafka because its type is not configured to go to any topic: {}", event.getPayload()); + } + return null; + } + + /** + * Instantiate a Builder to be able to create a {@link RoutingKafkaEventPublisher}. + *

+ * The {@link KafkaPublisher} and {@link KafkaTopicRouter} are hard requirements and as such should be provided. + * + * @param a generic type for the key of the {@link KafkaPublisher} + * @param a generic type for the value of the {@link KafkaPublisher} + * @return a Builder to be able to create a {@link RoutingKafkaEventPublisher} + */ + public static Builder builder() { + return new Builder<>(); + } + + public static class Builder extends KafkaEventPublisher.Builder { + private KafkaTopicRouter kafkaTopicRouter; + + /** + * Sets the {@link KafkaTopicRouter} to be used by this {@link KafkaEventPublisher} to determine which events to publish. + * + * @param kafkaTopicRouter the {@link KafkaTopicRouter} to be used by this {@link KafkaEventPublisher} to determine which events to publish + * @return the current Builder instance, for fluent interfacing + */ + public Builder kafkaTopicRouter(KafkaTopicRouter kafkaTopicRouter) { + this.kafkaTopicRouter = kafkaTopicRouter; + return this; + } + + @Override + public Builder kafkaPublisher(KafkaPublisher kafkaPublisher) { + super.kafkaPublisher(kafkaPublisher); + return this; + } + + @Override + public RoutingKafkaEventPublisher build() { + return new RoutingKafkaEventPublisher<>(this); + } + + @Override + protected void validate() { + super.validate(); + assertNonNull(kafkaTopicRouter, "The KafkaTopicRouter is a hard requirement and must be provided"); + } + } +} diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/application.yml b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/application.yml new file mode 100644 index 00000000..73de12e4 --- /dev/null +++ b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/application.yml @@ -0,0 +1,134 @@ +server: + port: 8080 +spring: + application: + name: example-process-approval + datasource: + url: jdbc:postgresql://localhost:25433/enginedb + username: polyflow_user + password: S3Cr3T! + jpa: + generate-ddl: false + hibernate.ddl-auto: validate + show-sql: false + open-in-view: false + database-platform: io.holunda.polyflow.example.infrastructure.jpa.NoToastPostgresSQLDialect + flyway: + enabled: true + locations: "classpath:db/migrations" + groovy: + template: + check-template-location: false + +springdoc: + swagger-ui: + try-it-out-enabled: true + display-request-duration: true + +#axon.serializer: +# general: jackson +# events: jackson +# messages: jackson + +camunda: + bpm: + login: + enabled: true + user-id: admin + admin-user: + email: admin@local + id: admin + first-name: Adminus + last-name: Administratius + password: admin + default-serialization-format: application/json + authorization: + enabled: false + history-level: full + id-generator: strong + database: + schema-update: false + type: postgres + webapp: + index-redirect-enabled: false + eventing: + task: false + +axon: + serializer: + events: jackson + messages: jackson + general: jackson + axonserver: + enabled: false + kafka: + clientid: ${STAGE:local}-${APPLICATION_NAME:engine}-${HOSTNAME:localhost} + # we intentionally provide no default values from here on to have a fail-fast behaviour + # ($ is not a legal character for a kafka topic name or a server URL) + defaulttopic: polyflow-task + publisher: + confirmation-mode: wait_for_ack + producer: + retries: 0 # If we enabled retries, there is a chance of producing duplicate messages out of order. + bootstrap-servers: localhost:29092 + event-processor-mode: tracking + properties: + max.in.flight.requests.per.connection: "1" # Note that if this setting is set to be greater than 1 and there are failed sends, there is a risk of message re-ordering due to retries (i.e., if retries are enabled). Even though we disabled retries, we keep this configuration here because if we ever enable retries again, we don't want to forget it. + delivery.timeout.ms: "30000" # Even with no retries, Kafka can buffer messages and send them later if there is no connection to the broker. The publisherAckTimeout of the KafkaPublisher would throw an error after 1 second and cause an axon-side retry, which might happen on a different pod and cause the same out-of-order duplicate problem as the retry. So we reduce the delivery timeout to the minimum possible (must be at least as high as request.timeout.ms) and increase the publisherAckTimeout to a little more than this. + properties: + security.protocol: PLAINTEXT + +polyflow: + axon: + kafka: + topics: + - payloadType: io.holunda.camunda.taskpool.api.task.TaskEvent + topic: polyflow-task + - payloadType: io.holunda.camunda.taskpool.api.business.DataEntryCreatedEvent + topic: polyflow-data + - payloadType: io.holunda.camunda.taskpool.api.business.DataEntryUpdatedEvent + topic: polyflow-data + - payloadType: io.holunda.camunda.taskpool.api.business.DataEntryDeletedEvent + topic: polyflow-data + integration: + collector: + camunda: + application-name: ${spring.application.name} # default + process-instance: + enabled: true + process-definition: + enabled: true + task: + enabled: true + enricher: + type: processVariables + importer: + enabled: true + task-filter-type: eventstore + sender: + enabled: true + data-entry: + enabled: true + type: simple + application-name: ${spring.application.name} # default + process-definition: + enabled: true + process-instance: + enabled: true + task: + enabled: true + type: tx + send-within-transaction: true + tasklist: + tasklist-url: http://localhost:8081/polyflow/tasks + +logging.level: + io.holunda.polyflow: + taskpool: + gateway: DEBUG + sender: INFO + core: WARN + view.simple: INFO + datapool: + sender: INFO + org.hibernate.engine.jdbc.env.internal.LobCreatorBuilderImpl: ERROR diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_10__axon_dlq.sql b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_10__axon_dlq.sql new file mode 100644 index 00000000..7c937294 --- /dev/null +++ b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_10__axon_dlq.sql @@ -0,0 +1,31 @@ +CREATE TABLE dead_letter_entry ( + dead_letter_id VARCHAR(255) NOT NULL, + cause_message VARCHAR(255), + cause_type VARCHAR(255), + diagnostics BYTEA, + enqueued_at TIMESTAMP NOT NULL, + last_touched TIMESTAMP, + aggregate_identifier VARCHAR(255), + event_identifier VARCHAR(255) NOT NULL, + message_type VARCHAR(255) NOT NULL, + meta_data BYTEA, + payload BYTEA NOT NULL, + payload_revision VARCHAR(255), + payload_type VARCHAR(255) NOT NULL, + sequence_number INT8, + time_stamp VARCHAR(255) NOT NULL, + token BYTEA, + token_type VARCHAR(255), + type VARCHAR(255), + processing_group VARCHAR(255) NOT NULL, + processing_started TIMESTAMP, + sequence_identifier VARCHAR(255) NOT NULL, + sequence_index INT8 NOT NULL, + PRIMARY KEY (dead_letter_id) +); + +create index IDX_dead_letter_entry_pg on dead_letter_entry (processing_group); +create index IDX_dead_letter_entry_pgsi on dead_letter_entry (processing_group, sequence_identifier); + +alter table dead_letter_entry + add constraint UC_dead_letter_entry_pgsisi unique (processing_group, sequence_identifier, sequence_index); diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_11__postgres_engine_7.16_to_7.17.sql b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_11__postgres_engine_7.16_to_7.17.sql new file mode 100644 index 00000000..dfdc0c9a --- /dev/null +++ b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_11__postgres_engine_7.16_to_7.17.sql @@ -0,0 +1,29 @@ +-- +-- Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH +-- under one or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information regarding copyright +-- ownership. Camunda licenses this file to you under the Apache License, +-- Version 2.0; you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +insert into ACT_GE_SCHEMA_LOG +values ('600', CURRENT_TIMESTAMP, '7.17.0'); + +-- https://jira.camunda.com/browse/CAM-14006 -- +ALTER TABLE ACT_RU_JOB + ADD COLUMN LAST_FAILURE_LOG_ID_ varchar(64); + +ALTER TABLE ACT_RU_EXT_TASK + ADD COLUMN LAST_FAILURE_LOG_ID_ varchar(64); + +create index ACT_IDX_HI_VARINST_NAME on ACT_HI_VARINST(NAME_); +create index ACT_IDX_HI_VARINST_ACT_INST_ID on ACT_HI_VARINST(ACT_INST_ID_); diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_12__postgres_engine_7.17_to_7.18.sql b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_12__postgres_engine_7.17_to_7.18.sql new file mode 100644 index 00000000..4fa86840 --- /dev/null +++ b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_12__postgres_engine_7.17_to_7.18.sql @@ -0,0 +1,34 @@ +-- +-- Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH +-- under one or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information regarding copyright +-- ownership. Camunda licenses this file to you under the Apache License, +-- Version 2.0; you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +insert into ACT_GE_SCHEMA_LOG +values ('700', CURRENT_TIMESTAMP, '7.18.0'); + +-- https://jira.camunda.com/browse/CAM-14303 -- +ALTER TABLE ACT_RU_TASK + ADD COLUMN LAST_UPDATED_ timestamp; +create index ACT_IDX_TASK_LAST_UPDATED on ACT_RU_TASK(LAST_UPDATED_); + +-- https://jira.camunda.com/browse/CAM-14721 +ALTER TABLE ACT_RU_BATCH + ADD COLUMN START_TIME_ timestamp; + +-- https://jira.camunda.com/browse/CAM-14722 +ALTER TABLE ACT_RU_BATCH + ADD COLUMN EXEC_START_TIME_ timestamp; +ALTER TABLE ACT_HI_BATCH + ADD COLUMN EXEC_START_TIME_ timestamp; \ No newline at end of file diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_1__postgres_engine_7.14.0.sql b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_1__postgres_engine_7.14.0.sql new file mode 100644 index 00000000..c21c0b3a --- /dev/null +++ b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_1__postgres_engine_7.14.0.sql @@ -0,0 +1,1378 @@ +-- +-- Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH +-- under one or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information regarding copyright +-- ownership. Camunda licenses this file to you under the Apache License, +-- Version 2.0; you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +create table ACT_GE_PROPERTY ( + NAME_ varchar(64), + VALUE_ varchar(300), + REV_ integer, + primary key (NAME_) +); + +insert into ACT_GE_PROPERTY +values ('schema.version', 'fox', 1); + +insert into ACT_GE_PROPERTY +values ('schema.history', 'create(fox)', 1); + +insert into ACT_GE_PROPERTY +values ('next.dbid', '1', 1); + +insert into ACT_GE_PROPERTY +values ('deployment.lock', '0', 1); + +insert into ACT_GE_PROPERTY +values ('history.cleanup.job.lock', '0', 1); + +insert into ACT_GE_PROPERTY +values ('startup.lock', '0', 1); + +insert into ACT_GE_PROPERTY +values ('telemetry.lock', '0', 1); + +insert into ACT_GE_PROPERTY +values ('installationId.lock', '0', 1); + +create table ACT_GE_BYTEARRAY ( + ID_ varchar(64), + REV_ integer, + NAME_ varchar(255), + DEPLOYMENT_ID_ varchar(64), + BYTES_ bytea, + GENERATED_ boolean, + TENANT_ID_ varchar(64), + TYPE_ integer, + CREATE_TIME_ timestamp, + ROOT_PROC_INST_ID_ varchar(64), + REMOVAL_TIME_ timestamp, + primary key (ID_) +); + +create table ACT_GE_SCHEMA_LOG ( + ID_ varchar(64), + TIMESTAMP_ timestamp, + VERSION_ varchar(255), + primary key (ID_) +); + +insert into ACT_GE_SCHEMA_LOG +values ('0', CURRENT_TIMESTAMP, '7.14.0'); + +create table ACT_RE_DEPLOYMENT ( + ID_ varchar(64), + NAME_ varchar(255), + DEPLOY_TIME_ timestamp, + SOURCE_ varchar(255), + TENANT_ID_ varchar(64), + primary key (ID_) +); + +create table ACT_RU_EXECUTION ( + ID_ varchar(64), + REV_ integer, + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + BUSINESS_KEY_ varchar(255), + PARENT_ID_ varchar(64), + PROC_DEF_ID_ varchar(64), + SUPER_EXEC_ varchar(64), + SUPER_CASE_EXEC_ varchar(64), + CASE_INST_ID_ varchar(64), + ACT_ID_ varchar(255), + ACT_INST_ID_ varchar(64), + IS_ACTIVE_ boolean, + IS_CONCURRENT_ boolean, + IS_SCOPE_ boolean, + IS_EVENT_SCOPE_ boolean, + SUSPENSION_STATE_ integer, + CACHED_ENT_STATE_ integer, + SEQUENCE_COUNTER_ bigint, + TENANT_ID_ varchar(64), + primary key (ID_) +); + +create table ACT_RU_JOB ( + ID_ varchar(64) NOT NULL, + REV_ integer, + TYPE_ varchar(255) NOT NULL, + LOCK_EXP_TIME_ timestamp, + LOCK_OWNER_ varchar(255), + EXCLUSIVE_ boolean, + EXECUTION_ID_ varchar(64), + PROCESS_INSTANCE_ID_ varchar(64), + PROCESS_DEF_ID_ varchar(64), + PROCESS_DEF_KEY_ varchar(255), + RETRIES_ integer, + EXCEPTION_STACK_ID_ varchar(64), + EXCEPTION_MSG_ varchar(4000), + FAILED_ACT_ID_ varchar(255), + DUEDATE_ timestamp, + REPEAT_ varchar(255), + REPEAT_OFFSET_ bigint DEFAULT 0, + HANDLER_TYPE_ varchar(255), + HANDLER_CFG_ varchar(4000), + DEPLOYMENT_ID_ varchar(64), + SUSPENSION_STATE_ integer NOT NULL DEFAULT 1, + JOB_DEF_ID_ varchar(64), + PRIORITY_ bigint NOT NULL DEFAULT 0, + SEQUENCE_COUNTER_ bigint, + TENANT_ID_ varchar(64), + CREATE_TIME_ timestamp, + primary key (ID_) +); + +create table ACT_RU_JOBDEF ( + ID_ varchar(64) NOT NULL, + REV_ integer, + PROC_DEF_ID_ varchar(64), + PROC_DEF_KEY_ varchar(255), + ACT_ID_ varchar(255), + JOB_TYPE_ varchar(255) NOT NULL, + JOB_CONFIGURATION_ varchar(255), + SUSPENSION_STATE_ integer, + JOB_PRIORITY_ bigint, + TENANT_ID_ varchar(64), + DEPLOYMENT_ID_ varchar(64), + primary key (ID_) +); + +create table ACT_RE_PROCDEF ( + ID_ varchar(64) NOT NULL, + REV_ integer, + CATEGORY_ varchar(255), + NAME_ varchar(255), + KEY_ varchar(255) NOT NULL, + VERSION_ integer NOT NULL, + DEPLOYMENT_ID_ varchar(64), + RESOURCE_NAME_ varchar(4000), + DGRM_RESOURCE_NAME_ varchar(4000), + HAS_START_FORM_KEY_ boolean, + SUSPENSION_STATE_ integer, + TENANT_ID_ varchar(64), + VERSION_TAG_ varchar(64), + HISTORY_TTL_ integer, + STARTABLE_ boolean NOT NULL default TRUE, + primary key (ID_) +); + +create table ACT_RU_TASK ( + ID_ varchar(64), + REV_ integer, + EXECUTION_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + PROC_DEF_ID_ varchar(64), + CASE_EXECUTION_ID_ varchar(64), + CASE_INST_ID_ varchar(64), + CASE_DEF_ID_ varchar(64), + NAME_ varchar(255), + PARENT_TASK_ID_ varchar(64), + DESCRIPTION_ varchar(4000), + TASK_DEF_KEY_ varchar(255), + OWNER_ varchar(255), + ASSIGNEE_ varchar(255), + DELEGATION_ varchar(64), + PRIORITY_ integer, + CREATE_TIME_ timestamp, + DUE_DATE_ timestamp, + FOLLOW_UP_DATE_ timestamp, + SUSPENSION_STATE_ integer, + TENANT_ID_ varchar(64), + primary key (ID_) +); + +create table ACT_RU_IDENTITYLINK ( + ID_ varchar(64), + REV_ integer, + GROUP_ID_ varchar(255), + TYPE_ varchar(255), + USER_ID_ varchar(255), + TASK_ID_ varchar(64), + PROC_DEF_ID_ varchar (64), + TENANT_ID_ varchar(64), + primary key (ID_) +); + +create table ACT_RU_VARIABLE ( + ID_ varchar(64) not null, + REV_ integer, + TYPE_ varchar(255) not null, + NAME_ varchar(255) not null, + EXECUTION_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + PROC_DEF_ID_ varchar(64), + CASE_EXECUTION_ID_ varchar(64), + CASE_INST_ID_ varchar(64), + TASK_ID_ varchar(64), + BATCH_ID_ varchar(64), + BYTEARRAY_ID_ varchar(64), + DOUBLE_ double precision, + LONG_ bigint, + TEXT_ varchar(4000), + TEXT2_ varchar(4000), + VAR_SCOPE_ varchar(64), + SEQUENCE_COUNTER_ bigint, + IS_CONCURRENT_LOCAL_ boolean, + TENANT_ID_ varchar(64), + primary key (ID_) +); + +create table ACT_RU_EVENT_SUBSCR ( + ID_ varchar(64) not null, + REV_ integer, + EVENT_TYPE_ varchar(255) not null, + EVENT_NAME_ varchar(255), + EXECUTION_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + ACTIVITY_ID_ varchar(255), + CONFIGURATION_ varchar(255), + CREATED_ timestamp not null, + TENANT_ID_ varchar(64), + primary key (ID_) +); + +create table ACT_RU_INCIDENT ( + ID_ varchar(64) not null, + REV_ integer not null, + INCIDENT_TIMESTAMP_ timestamp not null, + INCIDENT_MSG_ varchar(4000), + INCIDENT_TYPE_ varchar(255) not null, + EXECUTION_ID_ varchar(64), + ACTIVITY_ID_ varchar(255), + FAILED_ACTIVITY_ID_ varchar(255), + PROC_INST_ID_ varchar(64), + PROC_DEF_ID_ varchar(64), + CAUSE_INCIDENT_ID_ varchar(64), + ROOT_CAUSE_INCIDENT_ID_ varchar(64), + CONFIGURATION_ varchar(255), + TENANT_ID_ varchar(64), + JOB_DEF_ID_ varchar(64), + primary key (ID_) +); + +create table ACT_RU_AUTHORIZATION ( + ID_ varchar(64) not null, + REV_ integer not null, + TYPE_ integer not null, + GROUP_ID_ varchar(255), + USER_ID_ varchar(255), + RESOURCE_TYPE_ integer not null, + RESOURCE_ID_ varchar(255), + PERMS_ integer, + REMOVAL_TIME_ timestamp, + ROOT_PROC_INST_ID_ varchar(64), + primary key (ID_) +); + +create table ACT_RU_FILTER ( + ID_ varchar(64) not null, + REV_ integer not null, + RESOURCE_TYPE_ varchar(255) not null, + NAME_ varchar(255) not null, + OWNER_ varchar(255), + QUERY_ TEXT not null, + PROPERTIES_ TEXT, + primary key (ID_) +); + +create table ACT_RU_METER_LOG ( + ID_ varchar(64) not null, + NAME_ varchar(64) not null, + REPORTER_ varchar(255), + VALUE_ bigint, + TIMESTAMP_ timestamp, + MILLISECONDS_ bigint DEFAULT 0, + primary key (ID_) +); + +create table ACT_RU_EXT_TASK ( + ID_ varchar(64) not null, + REV_ integer not null, + WORKER_ID_ varchar(255), + TOPIC_NAME_ varchar(255), + RETRIES_ integer, + ERROR_MSG_ varchar(4000), + ERROR_DETAILS_ID_ varchar(64), + LOCK_EXP_TIME_ timestamp, + SUSPENSION_STATE_ integer, + EXECUTION_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + PROC_DEF_ID_ varchar(64), + PROC_DEF_KEY_ varchar(255), + ACT_ID_ varchar(255), + ACT_INST_ID_ varchar(64), + TENANT_ID_ varchar(64), + PRIORITY_ bigint NOT NULL DEFAULT 0, + primary key (ID_) +); + +create table ACT_RU_BATCH ( + ID_ varchar(64) not null, + REV_ integer not null, + TYPE_ varchar(255), + TOTAL_JOBS_ integer, + JOBS_CREATED_ integer, + JOBS_PER_SEED_ integer, + INVOCATIONS_PER_JOB_ integer, + SEED_JOB_DEF_ID_ varchar(64), + BATCH_JOB_DEF_ID_ varchar(64), + MONITOR_JOB_DEF_ID_ varchar(64), + SUSPENSION_STATE_ integer, + CONFIGURATION_ varchar(255), + TENANT_ID_ varchar(64), + CREATE_USER_ID_ varchar(255), + primary key (ID_) +); + +create index ACT_IDX_EXE_ROOT_PI on ACT_RU_EXECUTION(ROOT_PROC_INST_ID_); +create index ACT_IDX_EXEC_BUSKEY on ACT_RU_EXECUTION(BUSINESS_KEY_); +create index ACT_IDX_EXEC_TENANT_ID on ACT_RU_EXECUTION(TENANT_ID_); +create index ACT_IDX_TASK_CREATE on ACT_RU_TASK(CREATE_TIME_); +create index ACT_IDX_TASK_ASSIGNEE on ACT_RU_TASK(ASSIGNEE_); +create index ACT_IDX_TASK_OWNER on ACT_RU_TASK(OWNER_); +create index ACT_IDX_TASK_TENANT_ID on ACT_RU_TASK(TENANT_ID_); +create index ACT_IDX_IDENT_LNK_USER on ACT_RU_IDENTITYLINK(USER_ID_); +create index ACT_IDX_IDENT_LNK_GROUP on ACT_RU_IDENTITYLINK(GROUP_ID_); +create index ACT_IDX_EVENT_SUBSCR_CONFIG_ on ACT_RU_EVENT_SUBSCR(CONFIGURATION_); +create index ACT_IDX_EVENT_SUBSCR_TENANT_ID on ACT_RU_EVENT_SUBSCR(TENANT_ID_); + +create index ACT_IDX_VARIABLE_TASK_ID on ACT_RU_VARIABLE(TASK_ID_); +create index ACT_IDX_VARIABLE_TENANT_ID on ACT_RU_VARIABLE(TENANT_ID_); +create index ACT_IDX_VARIABLE_TASK_NAME_TYPE on ACT_RU_VARIABLE(TASK_ID_, NAME_, TYPE_); + +create index ACT_IDX_INC_CONFIGURATION on ACT_RU_INCIDENT(CONFIGURATION_); +create index ACT_IDX_INC_TENANT_ID on ACT_RU_INCIDENT(TENANT_ID_); +-- CAM-5914 +create index ACT_IDX_JOB_EXECUTION_ID on ACT_RU_JOB(EXECUTION_ID_); +create index ACT_IDX_JOB_HANDLER on ACT_RU_JOB(HANDLER_TYPE_,HANDLER_CFG_); +create index ACT_IDX_JOB_PROCINST on ACT_RU_JOB(PROCESS_INSTANCE_ID_); +create index ACT_IDX_JOB_TENANT_ID on ACT_RU_JOB(TENANT_ID_); +create index ACT_IDX_JOBDEF_TENANT_ID on ACT_RU_JOBDEF(TENANT_ID_); + +-- new metric milliseconds column +CREATE INDEX ACT_IDX_METER_LOG_MS ON ACT_RU_METER_LOG(MILLISECONDS_); +CREATE INDEX ACT_IDX_METER_LOG_NAME_MS ON ACT_RU_METER_LOG(NAME_, MILLISECONDS_); +CREATE INDEX ACT_IDX_METER_LOG_REPORT ON ACT_RU_METER_LOG(NAME_, REPORTER_, MILLISECONDS_); + +-- old metric timestamp column +CREATE INDEX ACT_IDX_METER_LOG_TIME ON ACT_RU_METER_LOG(TIMESTAMP_); +CREATE INDEX ACT_IDX_METER_LOG ON ACT_RU_METER_LOG(NAME_, TIMESTAMP_); + +create index ACT_IDX_EXT_TASK_TOPIC on ACT_RU_EXT_TASK(TOPIC_NAME_); +create index ACT_IDX_EXT_TASK_TENANT_ID on ACT_RU_EXT_TASK(TENANT_ID_); +create index ACT_IDX_EXT_TASK_PRIORITY ON ACT_RU_EXT_TASK(PRIORITY_); +create index ACT_IDX_EXT_TASK_ERR_DETAILS ON ACT_RU_EXT_TASK(ERROR_DETAILS_ID_); +create index ACT_IDX_AUTH_GROUP_ID on ACT_RU_AUTHORIZATION(GROUP_ID_); +create index ACT_IDX_JOB_JOB_DEF_ID on ACT_RU_JOB(JOB_DEF_ID_); + +create index ACT_IDX_BYTEAR_DEPL on ACT_GE_BYTEARRAY(DEPLOYMENT_ID_); +alter table ACT_GE_BYTEARRAY + add constraint ACT_FK_BYTEARR_DEPL + foreign key (DEPLOYMENT_ID_) + references ACT_RE_DEPLOYMENT (ID_); + +create index ACT_IDX_EXE_PROCINST on ACT_RU_EXECUTION(PROC_INST_ID_); +alter table ACT_RU_EXECUTION + add constraint ACT_FK_EXE_PROCINST + foreign key (PROC_INST_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_EXE_PARENT on ACT_RU_EXECUTION(PARENT_ID_); +alter table ACT_RU_EXECUTION + add constraint ACT_FK_EXE_PARENT + foreign key (PARENT_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_EXE_SUPER on ACT_RU_EXECUTION(SUPER_EXEC_); +alter table ACT_RU_EXECUTION + add constraint ACT_FK_EXE_SUPER + foreign key (SUPER_EXEC_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_EXE_PROCDEF on ACT_RU_EXECUTION(PROC_DEF_ID_); +alter table ACT_RU_EXECUTION + add constraint ACT_FK_EXE_PROCDEF + foreign key (PROC_DEF_ID_) + references ACT_RE_PROCDEF (ID_); + + +create index ACT_IDX_TSKASS_TASK on ACT_RU_IDENTITYLINK(TASK_ID_); +alter table ACT_RU_IDENTITYLINK + add constraint ACT_FK_TSKASS_TASK + foreign key (TASK_ID_) + references ACT_RU_TASK (ID_); + +create index ACT_IDX_ATHRZ_PROCEDEF on ACT_RU_IDENTITYLINK(PROC_DEF_ID_); +alter table ACT_RU_IDENTITYLINK + add constraint ACT_FK_ATHRZ_PROCEDEF + foreign key (PROC_DEF_ID_) + references ACT_RE_PROCDEF (ID_); + +create index ACT_IDX_TASK_EXEC on ACT_RU_TASK(EXECUTION_ID_); +alter table ACT_RU_TASK + add constraint ACT_FK_TASK_EXE + foreign key (EXECUTION_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_TASK_PROCINST on ACT_RU_TASK(PROC_INST_ID_); +alter table ACT_RU_TASK + add constraint ACT_FK_TASK_PROCINST + foreign key (PROC_INST_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_TASK_PROCDEF on ACT_RU_TASK(PROC_DEF_ID_); +alter table ACT_RU_TASK + add constraint ACT_FK_TASK_PROCDEF + foreign key (PROC_DEF_ID_) + references ACT_RE_PROCDEF (ID_); + +create index ACT_IDX_VAR_EXE on ACT_RU_VARIABLE(EXECUTION_ID_); +alter table ACT_RU_VARIABLE + add constraint ACT_FK_VAR_EXE + foreign key (EXECUTION_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_VAR_PROCINST on ACT_RU_VARIABLE(PROC_INST_ID_); +alter table ACT_RU_VARIABLE + add constraint ACT_FK_VAR_PROCINST + foreign key (PROC_INST_ID_) + references ACT_RU_EXECUTION(ID_); + +create index ACT_IDX_VAR_BYTEARRAY on ACT_RU_VARIABLE(BYTEARRAY_ID_); +alter table ACT_RU_VARIABLE + add constraint ACT_FK_VAR_BYTEARRAY + foreign key (BYTEARRAY_ID_) + references ACT_GE_BYTEARRAY (ID_); + +create index ACT_IDX_JOB_EXCEPTION on ACT_RU_JOB(EXCEPTION_STACK_ID_); +alter table ACT_RU_JOB + add constraint ACT_FK_JOB_EXCEPTION + foreign key (EXCEPTION_STACK_ID_) + references ACT_GE_BYTEARRAY (ID_); + +create index ACT_IDX_EVENT_SUBSCR on ACT_RU_EVENT_SUBSCR(EXECUTION_ID_); +alter table ACT_RU_EVENT_SUBSCR + add constraint ACT_FK_EVENT_EXEC + foreign key (EXECUTION_ID_) + references ACT_RU_EXECUTION(ID_); + +alter table ACT_RU_INCIDENT + add constraint ACT_FK_INC_EXE + foreign key (EXECUTION_ID_) + references ACT_RU_EXECUTION (ID_); + +alter table ACT_RU_INCIDENT + add constraint ACT_FK_INC_PROCINST + foreign key (PROC_INST_ID_) + references ACT_RU_EXECUTION (ID_); + +alter table ACT_RU_INCIDENT + add constraint ACT_FK_INC_PROCDEF + foreign key (PROC_DEF_ID_) + references ACT_RE_PROCDEF (ID_); + +alter table ACT_RU_INCIDENT + add constraint ACT_FK_INC_CAUSE + foreign key (CAUSE_INCIDENT_ID_) + references ACT_RU_INCIDENT (ID_); + +alter table ACT_RU_INCIDENT + add constraint ACT_FK_INC_RCAUSE + foreign key (ROOT_CAUSE_INCIDENT_ID_) + references ACT_RU_INCIDENT (ID_); + +create index ACT_IDX_INC_JOB_DEF on ACT_RU_INCIDENT(JOB_DEF_ID_); +alter table ACT_RU_INCIDENT + add constraint ACT_FK_INC_JOB_DEF + foreign key (JOB_DEF_ID_) + references ACT_RU_JOBDEF (ID_); + +alter table ACT_RU_AUTHORIZATION + add constraint ACT_UNIQ_AUTH_USER + unique (TYPE_,USER_ID_,RESOURCE_TYPE_,RESOURCE_ID_); + +alter table ACT_RU_AUTHORIZATION + add constraint ACT_UNIQ_AUTH_GROUP + unique (TYPE_,GROUP_ID_,RESOURCE_TYPE_,RESOURCE_ID_); + +alter table ACT_RU_VARIABLE + add constraint ACT_UNIQ_VARIABLE + unique (VAR_SCOPE_, NAME_); + +alter table ACT_RU_EXT_TASK + add constraint ACT_FK_EXT_TASK_EXE + foreign key (EXECUTION_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_BATCH_SEED_JOB_DEF ON ACT_RU_BATCH(SEED_JOB_DEF_ID_); +alter table ACT_RU_BATCH + add constraint ACT_FK_BATCH_SEED_JOB_DEF + foreign key (SEED_JOB_DEF_ID_) + references ACT_RU_JOBDEF (ID_); + +create index ACT_IDX_BATCH_MONITOR_JOB_DEF ON ACT_RU_BATCH(MONITOR_JOB_DEF_ID_); +alter table ACT_RU_BATCH + add constraint ACT_FK_BATCH_MONITOR_JOB_DEF + foreign key (MONITOR_JOB_DEF_ID_) + references ACT_RU_JOBDEF (ID_); + +create index ACT_IDX_BATCH_JOB_DEF ON ACT_RU_BATCH(BATCH_JOB_DEF_ID_); +alter table ACT_RU_BATCH + add constraint ACT_FK_BATCH_JOB_DEF + foreign key (BATCH_JOB_DEF_ID_) + references ACT_RU_JOBDEF (ID_); + +alter table ACT_RU_EXT_TASK + add constraint ACT_FK_EXT_TASK_ERROR_DETAILS + foreign key (ERROR_DETAILS_ID_) + references ACT_GE_BYTEARRAY (ID_); + +create index ACT_IDX_BATCH_ID ON ACT_RU_VARIABLE(BATCH_ID_); +alter table ACT_RU_VARIABLE + add constraint ACT_FK_VAR_BATCH + foreign key (BATCH_ID_) + references ACT_RU_BATCH (ID_); + +-- indexes for deadlock problems - https://app.camunda.com/jira/browse/CAM-2567 -- +create index ACT_IDX_INC_CAUSEINCID on ACT_RU_INCIDENT(CAUSE_INCIDENT_ID_); +create index ACT_IDX_INC_EXID on ACT_RU_INCIDENT(EXECUTION_ID_); +create index ACT_IDX_INC_PROCDEFID on ACT_RU_INCIDENT(PROC_DEF_ID_); +create index ACT_IDX_INC_PROCINSTID on ACT_RU_INCIDENT(PROC_INST_ID_); +create index ACT_IDX_INC_ROOTCAUSEINCID on ACT_RU_INCIDENT(ROOT_CAUSE_INCIDENT_ID_); +-- index for deadlock problem - https://app.camunda.com/jira/browse/CAM-4440 -- +create index ACT_IDX_AUTH_RESOURCE_ID on ACT_RU_AUTHORIZATION(RESOURCE_ID_); +-- index to prevent deadlock on fk constraint - https://app.camunda.com/jira/browse/CAM-5440 -- +create index ACT_IDX_EXT_TASK_EXEC on ACT_RU_EXT_TASK(EXECUTION_ID_); + +-- indexes to improve deployment +create index ACT_IDX_BYTEARRAY_ROOT_PI on ACT_GE_BYTEARRAY(ROOT_PROC_INST_ID_); +create index ACT_IDX_BYTEARRAY_RM_TIME on ACT_GE_BYTEARRAY(REMOVAL_TIME_); +create index ACT_IDX_BYTEARRAY_NAME on ACT_GE_BYTEARRAY(NAME_); +create index ACT_IDX_DEPLOYMENT_NAME on ACT_RE_DEPLOYMENT(NAME_); +create index ACT_IDX_DEPLOYMENT_TENANT_ID on ACT_RE_DEPLOYMENT(TENANT_ID_); +create index ACT_IDX_JOBDEF_PROC_DEF_ID ON ACT_RU_JOBDEF(PROC_DEF_ID_); +create index ACT_IDX_JOB_HANDLER_TYPE ON ACT_RU_JOB(HANDLER_TYPE_); +create index ACT_IDX_EVENT_SUBSCR_EVT_NAME ON ACT_RU_EVENT_SUBSCR(EVENT_NAME_); +create index ACT_IDX_PROCDEF_DEPLOYMENT_ID ON ACT_RE_PROCDEF(DEPLOYMENT_ID_); +create index ACT_IDX_PROCDEF_TENANT_ID ON ACT_RE_PROCDEF(TENANT_ID_); +create index ACT_IDX_PROCDEF_VER_TAG ON ACT_RE_PROCDEF(VERSION_TAG_); + +-- indices for history cleanup: https://jira.camunda.com/browse/CAM-11616 +create index ACT_IDX_AUTH_ROOT_PI on ACT_RU_AUTHORIZATION(ROOT_PROC_INST_ID_); +create index ACT_IDX_AUTH_RM_TIME on ACT_RU_AUTHORIZATION(REMOVAL_TIME_); +-- +-- Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH +-- under one or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information regarding copyright +-- ownership. Camunda licenses this file to you under the Apache License, +-- Version 2.0; you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +-- create case definition table -- + +create table ACT_RE_CASE_DEF ( + ID_ varchar(64) NOT NULL, + REV_ integer, + CATEGORY_ varchar(255), + NAME_ varchar(255), + KEY_ varchar(255) NOT NULL, + VERSION_ integer NOT NULL, + DEPLOYMENT_ID_ varchar(64), + RESOURCE_NAME_ varchar(4000), + DGRM_RESOURCE_NAME_ varchar(4000), + TENANT_ID_ varchar(64), + HISTORY_TTL_ integer, + primary key (ID_) +); + +-- create case execution table -- + +create table ACT_RU_CASE_EXECUTION ( + ID_ varchar(64) NOT NULL, + REV_ integer, + CASE_INST_ID_ varchar(64), + SUPER_CASE_EXEC_ varchar(64), + SUPER_EXEC_ varchar(64), + BUSINESS_KEY_ varchar(255), + PARENT_ID_ varchar(64), + CASE_DEF_ID_ varchar(64), + ACT_ID_ varchar(255), + PREV_STATE_ integer, + CURRENT_STATE_ integer, + REQUIRED_ boolean, + TENANT_ID_ varchar(64), + primary key (ID_) +); + +-- create case sentry part table -- + +create table ACT_RU_CASE_SENTRY_PART ( + ID_ varchar(64) NOT NULL, + REV_ integer, + CASE_INST_ID_ varchar(64), + CASE_EXEC_ID_ varchar(64), + SENTRY_ID_ varchar(255), + TYPE_ varchar(255), + SOURCE_CASE_EXEC_ID_ varchar(64), + STANDARD_EVENT_ varchar(255), + SOURCE_ varchar(255), + VARIABLE_EVENT_ varchar(255), + VARIABLE_NAME_ varchar(255), + SATISFIED_ boolean, + TENANT_ID_ varchar(64), + primary key (ID_) +); + +-- create index on business key -- +create index ACT_IDX_CASE_EXEC_BUSKEY on ACT_RU_CASE_EXECUTION(BUSINESS_KEY_); + +-- create foreign key constraints on ACT_RU_CASE_EXECUTION -- +create index ACT_IDX_CASE_EXE_CASE_INST on ACT_RU_CASE_EXECUTION(CASE_INST_ID_); +alter table ACT_RU_CASE_EXECUTION + add constraint ACT_FK_CASE_EXE_CASE_INST + foreign key (CASE_INST_ID_) + references ACT_RU_CASE_EXECUTION(ID_); + +create index ACT_IDX_CASE_EXE_PARENT on ACT_RU_CASE_EXECUTION(PARENT_ID_); +alter table ACT_RU_CASE_EXECUTION + add constraint ACT_FK_CASE_EXE_PARENT + foreign key (PARENT_ID_) + references ACT_RU_CASE_EXECUTION(ID_); + +create index ACT_IDX_CASE_EXE_CASE_DEF on ACT_RU_CASE_EXECUTION(CASE_DEF_ID_); +alter table ACT_RU_CASE_EXECUTION + add constraint ACT_FK_CASE_EXE_CASE_DEF + foreign key (CASE_DEF_ID_) + references ACT_RE_CASE_DEF(ID_); + +-- create foreign key constraints on ACT_RU_VARIABLE -- +create index ACT_IDX_VAR_CASE_EXE on ACT_RU_VARIABLE(CASE_EXECUTION_ID_); +alter table ACT_RU_VARIABLE + add constraint ACT_FK_VAR_CASE_EXE + foreign key (CASE_EXECUTION_ID_) + references ACT_RU_CASE_EXECUTION(ID_); + +create index ACT_IDX_VAR_CASE_INST_ID on ACT_RU_VARIABLE(CASE_INST_ID_); +alter table ACT_RU_VARIABLE + add constraint ACT_FK_VAR_CASE_INST + foreign key (CASE_INST_ID_) + references ACT_RU_CASE_EXECUTION(ID_); + +-- create foreign key constraints on ACT_RU_TASK -- +create index ACT_IDX_TASK_CASE_EXEC on ACT_RU_TASK(CASE_EXECUTION_ID_); +alter table ACT_RU_TASK + add constraint ACT_FK_TASK_CASE_EXE + foreign key (CASE_EXECUTION_ID_) + references ACT_RU_CASE_EXECUTION(ID_); + +create index ACT_IDX_TASK_CASE_DEF_ID on ACT_RU_TASK(CASE_DEF_ID_); +alter table ACT_RU_TASK + add constraint ACT_FK_TASK_CASE_DEF + foreign key (CASE_DEF_ID_) + references ACT_RE_CASE_DEF(ID_); + +-- create foreign key constraints on ACT_RU_CASE_SENTRY_PART -- +create index ACT_IDX_CASE_SENTRY_CASE_INST on ACT_RU_CASE_SENTRY_PART(CASE_INST_ID_); +alter table ACT_RU_CASE_SENTRY_PART + add constraint ACT_FK_CASE_SENTRY_CASE_INST + foreign key (CASE_INST_ID_) + references ACT_RU_CASE_EXECUTION(ID_); + +create index ACT_IDX_CASE_SENTRY_CASE_EXEC on ACT_RU_CASE_SENTRY_PART(CASE_EXEC_ID_); +alter table ACT_RU_CASE_SENTRY_PART + add constraint ACT_FK_CASE_SENTRY_CASE_EXEC + foreign key (CASE_EXEC_ID_) + references ACT_RU_CASE_EXECUTION(ID_); + +create index ACT_IDX_CASE_DEF_TENANT_ID on ACT_RE_CASE_DEF(TENANT_ID_); +create index ACT_IDX_CASE_EXEC_TENANT_ID on ACT_RU_CASE_EXECUTION(TENANT_ID_); +-- +-- Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH +-- under one or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information regarding copyright +-- ownership. Camunda licenses this file to you under the Apache License, +-- Version 2.0; you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +-- create decision definition table -- +create table ACT_RE_DECISION_DEF ( + ID_ varchar(64) NOT NULL, + REV_ integer, + CATEGORY_ varchar(255), + NAME_ varchar(255), + KEY_ varchar(255) NOT NULL, + VERSION_ integer NOT NULL, + DEPLOYMENT_ID_ varchar(64), + RESOURCE_NAME_ varchar(4000), + DGRM_RESOURCE_NAME_ varchar(4000), + DEC_REQ_ID_ varchar(64), + DEC_REQ_KEY_ varchar(255), + TENANT_ID_ varchar(64), + HISTORY_TTL_ integer, + VERSION_TAG_ varchar(64), + primary key (ID_) +); + +-- create decision requirements definition table -- +create table ACT_RE_DECISION_REQ_DEF ( + ID_ varchar(64) NOT NULL, + REV_ integer, + CATEGORY_ varchar(255), + NAME_ varchar(255), + KEY_ varchar(255) NOT NULL, + VERSION_ integer NOT NULL, + DEPLOYMENT_ID_ varchar(64), + RESOURCE_NAME_ varchar(4000), + DGRM_RESOURCE_NAME_ varchar(4000), + TENANT_ID_ varchar(64), + primary key (ID_) +); + +alter table ACT_RE_DECISION_DEF + add constraint ACT_FK_DEC_REQ + foreign key (DEC_REQ_ID_) + references ACT_RE_DECISION_REQ_DEF(ID_); + +create index ACT_IDX_DEC_DEF_TENANT_ID on ACT_RE_DECISION_DEF(TENANT_ID_); +create index ACT_IDX_DEC_DEF_REQ_ID on ACT_RE_DECISION_DEF(DEC_REQ_ID_); +create index ACT_IDX_DEC_REQ_DEF_TENANT_ID on ACT_RE_DECISION_REQ_DEF(TENANT_ID_); +-- +-- Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH +-- under one or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information regarding copyright +-- ownership. Camunda licenses this file to you under the Apache License, +-- Version 2.0; you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +create table ACT_HI_PROCINST ( + ID_ varchar(64) not null, + PROC_INST_ID_ varchar(64) not null, + BUSINESS_KEY_ varchar(255), + PROC_DEF_KEY_ varchar(255), + PROC_DEF_ID_ varchar(64) not null, + START_TIME_ timestamp not null, + END_TIME_ timestamp, + REMOVAL_TIME_ timestamp, + DURATION_ bigint, + START_USER_ID_ varchar(255), + START_ACT_ID_ varchar(255), + END_ACT_ID_ varchar(255), + SUPER_PROCESS_INSTANCE_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + SUPER_CASE_INSTANCE_ID_ varchar(64), + CASE_INST_ID_ varchar(64), + DELETE_REASON_ varchar(4000), + TENANT_ID_ varchar(64), + STATE_ varchar(255), + primary key (ID_), + unique (PROC_INST_ID_) +); + +create table ACT_HI_ACTINST ( + ID_ varchar(64) not null, + PARENT_ACT_INST_ID_ varchar(64), + PROC_DEF_KEY_ varchar(255), + PROC_DEF_ID_ varchar(64) not null, + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64) not null, + EXECUTION_ID_ varchar(64) not null, + ACT_ID_ varchar(255) not null, + TASK_ID_ varchar(64), + CALL_PROC_INST_ID_ varchar(64), + CALL_CASE_INST_ID_ varchar(64), + ACT_NAME_ varchar(255), + ACT_TYPE_ varchar(255) not null, + ASSIGNEE_ varchar(255), + START_TIME_ timestamp not null, + END_TIME_ timestamp, + DURATION_ bigint, + ACT_INST_STATE_ integer, + SEQUENCE_COUNTER_ bigint, + TENANT_ID_ varchar(64), + REMOVAL_TIME_ timestamp, + primary key (ID_) +); + +create table ACT_HI_TASKINST ( + ID_ varchar(64) not null, + TASK_DEF_KEY_ varchar(255), + PROC_DEF_KEY_ varchar(255), + PROC_DEF_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + EXECUTION_ID_ varchar(64), + CASE_DEF_KEY_ varchar(255), + CASE_DEF_ID_ varchar(64), + CASE_INST_ID_ varchar(64), + CASE_EXECUTION_ID_ varchar(64), + ACT_INST_ID_ varchar(64), + NAME_ varchar(255), + PARENT_TASK_ID_ varchar(64), + DESCRIPTION_ varchar(4000), + OWNER_ varchar(255), + ASSIGNEE_ varchar(255), + START_TIME_ timestamp not null, + END_TIME_ timestamp, + DURATION_ bigint, + DELETE_REASON_ varchar(4000), + PRIORITY_ integer, + DUE_DATE_ timestamp, + FOLLOW_UP_DATE_ timestamp, + TENANT_ID_ varchar(64), + REMOVAL_TIME_ timestamp, + primary key (ID_) +); + +create table ACT_HI_VARINST ( + ID_ varchar(64) not null, + PROC_DEF_KEY_ varchar(255), + PROC_DEF_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + EXECUTION_ID_ varchar(64), + ACT_INST_ID_ varchar(64), + CASE_DEF_KEY_ varchar(255), + CASE_DEF_ID_ varchar(64), + CASE_INST_ID_ varchar(64), + CASE_EXECUTION_ID_ varchar(64), + TASK_ID_ varchar(64), + NAME_ varchar(255) not null, + VAR_TYPE_ varchar(100), + CREATE_TIME_ timestamp, + REV_ integer, + BYTEARRAY_ID_ varchar(64), + DOUBLE_ double precision, + LONG_ bigint, + TEXT_ varchar(4000), + TEXT2_ varchar(4000), + TENANT_ID_ varchar(64), + STATE_ varchar(20), + REMOVAL_TIME_ timestamp, + primary key (ID_) +); + +create table ACT_HI_DETAIL ( + ID_ varchar(64) not null, + TYPE_ varchar(255) not null, + PROC_DEF_KEY_ varchar(255), + PROC_DEF_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + EXECUTION_ID_ varchar(64), + CASE_DEF_KEY_ varchar(255), + CASE_DEF_ID_ varchar(64), + CASE_INST_ID_ varchar(64), + CASE_EXECUTION_ID_ varchar(64), + TASK_ID_ varchar(64), + ACT_INST_ID_ varchar(64), + VAR_INST_ID_ varchar(64), + NAME_ varchar(255) not null, + VAR_TYPE_ varchar(64), + REV_ integer, + TIME_ timestamp not null, + BYTEARRAY_ID_ varchar(64), + DOUBLE_ double precision, + LONG_ bigint, + TEXT_ varchar(4000), + TEXT2_ varchar(4000), + SEQUENCE_COUNTER_ bigint, + TENANT_ID_ varchar(64), + OPERATION_ID_ varchar(64), + REMOVAL_TIME_ timestamp, + INITIAL_ boolean, + primary key (ID_) +); + +create table ACT_HI_IDENTITYLINK ( + ID_ varchar(64) not null, + TIMESTAMP_ timestamp not null, + TYPE_ varchar(255), + USER_ID_ varchar(255), + GROUP_ID_ varchar(255), + TASK_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROC_DEF_ID_ varchar(64), + OPERATION_TYPE_ varchar(64), + ASSIGNER_ID_ varchar(64), + PROC_DEF_KEY_ varchar(255), + TENANT_ID_ varchar(64), + REMOVAL_TIME_ timestamp, + primary key (ID_) +); + +create table ACT_HI_COMMENT ( + ID_ varchar(64) not null, + TYPE_ varchar(255), + TIME_ timestamp not null, + USER_ID_ varchar(255), + TASK_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + ACTION_ varchar(255), + MESSAGE_ varchar(4000), + FULL_MSG_ bytea, + TENANT_ID_ varchar(64), + REMOVAL_TIME_ timestamp, + primary key (ID_) +); + +create table ACT_HI_ATTACHMENT ( + ID_ varchar(64) not null, + REV_ integer, + USER_ID_ varchar(255), + NAME_ varchar(255), + DESCRIPTION_ varchar(4000), + TYPE_ varchar(255), + TASK_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + URL_ varchar(4000), + CONTENT_ID_ varchar(64), + TENANT_ID_ varchar(64), + CREATE_TIME_ timestamp, + REMOVAL_TIME_ timestamp, + primary key (ID_) +); + +create table ACT_HI_OP_LOG ( + ID_ varchar(64) not null, + DEPLOYMENT_ID_ varchar(64), + PROC_DEF_ID_ varchar(64), + PROC_DEF_KEY_ varchar(255), + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + EXECUTION_ID_ varchar(64), + CASE_DEF_ID_ varchar(64), + CASE_INST_ID_ varchar(64), + CASE_EXECUTION_ID_ varchar(64), + TASK_ID_ varchar(64), + JOB_ID_ varchar(64), + JOB_DEF_ID_ varchar(64), + BATCH_ID_ varchar(64), + USER_ID_ varchar(255), + TIMESTAMP_ timestamp not null, + OPERATION_TYPE_ varchar(64), + OPERATION_ID_ varchar(64), + ENTITY_TYPE_ varchar(30), + PROPERTY_ varchar(64), + ORG_VALUE_ varchar(4000), + NEW_VALUE_ varchar(4000), + TENANT_ID_ varchar(64), + REMOVAL_TIME_ timestamp, + CATEGORY_ varchar(64), + EXTERNAL_TASK_ID_ varchar(64), + ANNOTATION_ varchar(4000), + primary key (ID_) +); + +create table ACT_HI_INCIDENT ( + ID_ varchar(64) not null, + PROC_DEF_KEY_ varchar(255), + PROC_DEF_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + EXECUTION_ID_ varchar(64), + CREATE_TIME_ timestamp not null, + END_TIME_ timestamp, + INCIDENT_MSG_ varchar(4000), + INCIDENT_TYPE_ varchar(255) not null, + ACTIVITY_ID_ varchar(255), + FAILED_ACTIVITY_ID_ varchar(255), + CAUSE_INCIDENT_ID_ varchar(64), + ROOT_CAUSE_INCIDENT_ID_ varchar(64), + CONFIGURATION_ varchar(255), + HISTORY_CONFIGURATION_ varchar(255), + INCIDENT_STATE_ integer, + TENANT_ID_ varchar(64), + JOB_DEF_ID_ varchar(64), + REMOVAL_TIME_ timestamp, + primary key (ID_) +); + +create table ACT_HI_JOB_LOG ( + ID_ varchar(64) not null, + TIMESTAMP_ timestamp not null, + JOB_ID_ varchar(64) not null, + JOB_DUEDATE_ timestamp, + JOB_RETRIES_ integer, + JOB_PRIORITY_ bigint NOT NULL DEFAULT 0, + JOB_EXCEPTION_MSG_ varchar(4000), + JOB_EXCEPTION_STACK_ID_ varchar(64), + JOB_STATE_ integer, + JOB_DEF_ID_ varchar(64), + JOB_DEF_TYPE_ varchar(255), + JOB_DEF_CONFIGURATION_ varchar(255), + ACT_ID_ varchar(255), + FAILED_ACT_ID_ varchar(255), + EXECUTION_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROCESS_INSTANCE_ID_ varchar(64), + PROCESS_DEF_ID_ varchar(64), + PROCESS_DEF_KEY_ varchar(255), + DEPLOYMENT_ID_ varchar(64), + SEQUENCE_COUNTER_ bigint, + TENANT_ID_ varchar(64), + HOSTNAME_ varchar(255), + REMOVAL_TIME_ timestamp, + primary key (ID_) +); + +create table ACT_HI_BATCH ( + ID_ varchar(64) not null, + TYPE_ varchar(255), + TOTAL_JOBS_ integer, + JOBS_PER_SEED_ integer, + INVOCATIONS_PER_JOB_ integer, + SEED_JOB_DEF_ID_ varchar(64), + MONITOR_JOB_DEF_ID_ varchar(64), + BATCH_JOB_DEF_ID_ varchar(64), + TENANT_ID_ varchar(64), + CREATE_USER_ID_ varchar(255), + START_TIME_ timestamp not null, + END_TIME_ timestamp, + REMOVAL_TIME_ timestamp, + primary key (ID_) +); + +create table ACT_HI_EXT_TASK_LOG ( + ID_ varchar(64) not null, + TIMESTAMP_ timestamp not null, + EXT_TASK_ID_ varchar(64) not null, + RETRIES_ integer, + TOPIC_NAME_ varchar(255), + WORKER_ID_ varchar(255), + PRIORITY_ bigint not null default 0, + ERROR_MSG_ varchar(4000), + ERROR_DETAILS_ID_ varchar(64), + ACT_ID_ varchar(255), + ACT_INST_ID_ varchar(64), + EXECUTION_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROC_DEF_ID_ varchar(64), + PROC_DEF_KEY_ varchar(255), + TENANT_ID_ varchar(64), + STATE_ integer, + REMOVAL_TIME_ timestamp, + primary key (ID_) +); + +create index ACT_IDX_HI_PRO_INST_END on ACT_HI_PROCINST(END_TIME_); +create index ACT_IDX_HI_PRO_I_BUSKEY on ACT_HI_PROCINST(BUSINESS_KEY_); +create index ACT_IDX_HI_PRO_INST_TENANT_ID on ACT_HI_PROCINST(TENANT_ID_); +create index ACT_IDX_HI_PRO_INST_PROC_DEF_KEY on ACT_HI_PROCINST(PROC_DEF_KEY_); +create index ACT_IDX_HI_PRO_INST_PROC_TIME on ACT_HI_PROCINST(START_TIME_, END_TIME_); +create index ACT_IDX_HI_PI_PDEFID_END_TIME on ACT_HI_PROCINST(PROC_DEF_ID_, END_TIME_); +create index ACT_IDX_HI_PRO_INST_ROOT_PI on ACT_HI_PROCINST(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_PRO_INST_RM_TIME on ACT_HI_PROCINST(REMOVAL_TIME_); + +create index ACT_IDX_HI_ACTINST_ROOT_PI on ACT_HI_ACTINST(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_ACT_INST_START_END on ACT_HI_ACTINST(START_TIME_, END_TIME_); +create index ACT_IDX_HI_ACT_INST_END on ACT_HI_ACTINST(END_TIME_); +create index ACT_IDX_HI_ACT_INST_PROCINST on ACT_HI_ACTINST(PROC_INST_ID_, ACT_ID_); +create index ACT_IDX_HI_ACT_INST_COMP on ACT_HI_ACTINST(EXECUTION_ID_, ACT_ID_, END_TIME_, ID_); +create index ACT_IDX_HI_ACT_INST_STATS on ACT_HI_ACTINST(PROC_DEF_ID_, PROC_INST_ID_, ACT_ID_, END_TIME_, ACT_INST_STATE_); +create index ACT_IDX_HI_ACT_INST_TENANT_ID on ACT_HI_ACTINST(TENANT_ID_); +create index ACT_IDX_HI_ACT_INST_PROC_DEF_KEY on ACT_HI_ACTINST(PROC_DEF_KEY_); +create index ACT_IDX_HI_AI_PDEFID_END_TIME on ACT_HI_ACTINST(PROC_DEF_ID_, END_TIME_); +create index ACT_IDX_HI_ACT_INST_RM_TIME on ACT_HI_ACTINST(REMOVAL_TIME_); + +create index ACT_IDX_HI_TASKINST_ROOT_PI on ACT_HI_TASKINST(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_TASK_INST_TENANT_ID on ACT_HI_TASKINST(TENANT_ID_); +create index ACT_IDX_HI_TASK_INST_PROC_DEF_KEY on ACT_HI_TASKINST(PROC_DEF_KEY_); +create index ACT_IDX_HI_TASKINST_PROCINST on ACT_HI_TASKINST(PROC_INST_ID_); +create index ACT_IDX_HI_TASKINSTID_PROCINST on ACT_HI_TASKINST(ID_,PROC_INST_ID_); +create index ACT_IDX_HI_TASK_INST_RM_TIME on ACT_HI_TASKINST(REMOVAL_TIME_); +create index ACT_IDX_HI_TASK_INST_START on ACT_HI_TASKINST(START_TIME_); +create index ACT_IDX_HI_TASK_INST_END on ACT_HI_TASKINST(END_TIME_); + +create index ACT_IDX_HI_DETAIL_ROOT_PI on ACT_HI_DETAIL(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_DETAIL_PROC_INST on ACT_HI_DETAIL(PROC_INST_ID_); +create index ACT_IDX_HI_DETAIL_ACT_INST on ACT_HI_DETAIL(ACT_INST_ID_); +create index ACT_IDX_HI_DETAIL_CASE_INST on ACT_HI_DETAIL(CASE_INST_ID_); +create index ACT_IDX_HI_DETAIL_CASE_EXEC on ACT_HI_DETAIL(CASE_EXECUTION_ID_); +create index ACT_IDX_HI_DETAIL_TIME on ACT_HI_DETAIL(TIME_); +create index ACT_IDX_HI_DETAIL_NAME on ACT_HI_DETAIL(NAME_); +create index ACT_IDX_HI_DETAIL_TASK_ID on ACT_HI_DETAIL(TASK_ID_); +create index ACT_IDX_HI_DETAIL_TENANT_ID on ACT_HI_DETAIL(TENANT_ID_); +create index ACT_IDX_HI_DETAIL_PROC_DEF_KEY on ACT_HI_DETAIL(PROC_DEF_KEY_); +create index ACT_IDX_HI_DETAIL_BYTEAR on ACT_HI_DETAIL(BYTEARRAY_ID_); +create index ACT_IDX_HI_DETAIL_RM_TIME on ACT_HI_DETAIL(REMOVAL_TIME_); +create index ACT_IDX_HI_DETAIL_TASK_BYTEAR on ACT_HI_DETAIL(BYTEARRAY_ID_, TASK_ID_); +create index ACT_IDX_HI_DETAIL_VAR_INST_ID on ACT_HI_DETAIL(VAR_INST_ID_); + +create index ACT_IDX_HI_IDENT_LNK_ROOT_PI on ACT_HI_IDENTITYLINK(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_IDENT_LNK_USER on ACT_HI_IDENTITYLINK(USER_ID_); +create index ACT_IDX_HI_IDENT_LNK_GROUP on ACT_HI_IDENTITYLINK(GROUP_ID_); +create index ACT_IDX_HI_IDENT_LNK_TENANT_ID on ACT_HI_IDENTITYLINK(TENANT_ID_); +create index ACT_IDX_HI_IDENT_LNK_PROC_DEF_KEY on ACT_HI_IDENTITYLINK(PROC_DEF_KEY_); +create index ACT_IDX_HI_IDENT_LINK_TASK on ACT_HI_IDENTITYLINK(TASK_ID_); +create index ACT_IDX_HI_IDENT_LINK_RM_TIME on ACT_HI_IDENTITYLINK(REMOVAL_TIME_); +create index ACT_IDX_HI_IDENT_LNK_TIMESTAMP on ACT_HI_IDENTITYLINK(TIMESTAMP_); + +create index ACT_IDX_HI_VARINST_ROOT_PI on ACT_HI_VARINST(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_PROCVAR_PROC_INST on ACT_HI_VARINST(PROC_INST_ID_); +create index ACT_IDX_HI_PROCVAR_NAME_TYPE on ACT_HI_VARINST(NAME_, VAR_TYPE_); +create index ACT_IDX_HI_CASEVAR_CASE_INST on ACT_HI_VARINST(CASE_INST_ID_); +create index ACT_IDX_HI_VAR_INST_TENANT_ID on ACT_HI_VARINST(TENANT_ID_); +create index ACT_IDX_HI_VAR_INST_PROC_DEF_KEY on ACT_HI_VARINST(PROC_DEF_KEY_); +create index ACT_IDX_HI_VARINST_BYTEAR on ACT_HI_VARINST(BYTEARRAY_ID_); +create index ACT_IDX_HI_VARINST_RM_TIME on ACT_HI_VARINST(REMOVAL_TIME_); +create index ACT_IDX_HI_VAR_PI_NAME_TYPE on ACT_HI_VARINST(PROC_INST_ID_, NAME_, VAR_TYPE_); + +create index ACT_IDX_HI_INCIDENT_TENANT_ID on ACT_HI_INCIDENT(TENANT_ID_); +create index ACT_IDX_HI_INCIDENT_PROC_DEF_KEY on ACT_HI_INCIDENT(PROC_DEF_KEY_); +create index ACT_IDX_HI_INCIDENT_ROOT_PI on ACT_HI_INCIDENT(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_INCIDENT_PROCINST on ACT_HI_INCIDENT(PROC_INST_ID_); +create index ACT_IDX_HI_INCIDENT_RM_TIME on ACT_HI_INCIDENT(REMOVAL_TIME_); +create index ACT_IDX_HI_INCIDENT_CREATE_TIME on ACT_HI_INCIDENT(CREATE_TIME_); +create index ACT_IDX_HI_INCIDENT_END_TIME on ACT_HI_INCIDENT(END_TIME_); + +create index ACT_IDX_HI_JOB_LOG_ROOT_PI on ACT_HI_JOB_LOG(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_JOB_LOG_PROCINST on ACT_HI_JOB_LOG(PROCESS_INSTANCE_ID_); +create index ACT_IDX_HI_JOB_LOG_PROCDEF on ACT_HI_JOB_LOG(PROCESS_DEF_ID_); +create index ACT_IDX_HI_JOB_LOG_TENANT_ID on ACT_HI_JOB_LOG(TENANT_ID_); +create index ACT_IDX_HI_JOB_LOG_JOB_DEF_ID on ACT_HI_JOB_LOG(JOB_DEF_ID_); +create index ACT_IDX_HI_JOB_LOG_PROC_DEF_KEY on ACT_HI_JOB_LOG(PROCESS_DEF_KEY_); +create index ACT_IDX_HI_JOB_LOG_EX_STACK on ACT_HI_JOB_LOG(JOB_EXCEPTION_STACK_ID_); +create index ACT_IDX_HI_JOB_LOG_RM_TIME on ACT_HI_JOB_LOG(REMOVAL_TIME_); +create index ACT_IDX_HI_JOB_LOG_JOB_CONF on ACT_HI_JOB_LOG(JOB_DEF_CONFIGURATION_); + +create index ACT_HI_BAT_RM_TIME on ACT_HI_BATCH(REMOVAL_TIME_); + +create index ACT_HI_EXT_TASK_LOG_ROOT_PI on ACT_HI_EXT_TASK_LOG(ROOT_PROC_INST_ID_); +create index ACT_HI_EXT_TASK_LOG_PROCINST on ACT_HI_EXT_TASK_LOG(PROC_INST_ID_); +create index ACT_HI_EXT_TASK_LOG_PROCDEF on ACT_HI_EXT_TASK_LOG(PROC_DEF_ID_); +create index ACT_HI_EXT_TASK_LOG_PROC_DEF_KEY on ACT_HI_EXT_TASK_LOG(PROC_DEF_KEY_); +create index ACT_HI_EXT_TASK_LOG_TENANT_ID on ACT_HI_EXT_TASK_LOG(TENANT_ID_); +create index ACT_IDX_HI_EXTTASKLOG_ERRORDET on ACT_HI_EXT_TASK_LOG(ERROR_DETAILS_ID_); +create index ACT_HI_EXT_TASK_LOG_RM_TIME on ACT_HI_EXT_TASK_LOG(REMOVAL_TIME_); + +create index ACT_IDX_HI_OP_LOG_ROOT_PI on ACT_HI_OP_LOG(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_OP_LOG_PROCINST on ACT_HI_OP_LOG(PROC_INST_ID_); +create index ACT_IDX_HI_OP_LOG_PROCDEF on ACT_HI_OP_LOG(PROC_DEF_ID_); +create index ACT_IDX_HI_OP_LOG_TASK on ACT_HI_OP_LOG(TASK_ID_); +create index ACT_IDX_HI_OP_LOG_RM_TIME on ACT_HI_OP_LOG(REMOVAL_TIME_); +create index ACT_IDX_HI_OP_LOG_TIMESTAMP on ACT_HI_OP_LOG(TIMESTAMP_); +create index ACT_IDX_HI_OP_LOG_USER_ID on ACT_HI_OP_LOG(USER_ID_); +create index ACT_IDX_HI_OP_LOG_OP_TYPE on ACT_HI_OP_LOG(OPERATION_TYPE_); +create index ACT_IDX_HI_OP_LOG_ENTITY_TYPE on ACT_HI_OP_LOG(ENTITY_TYPE_); + +create index ACT_IDX_HI_ATTACHMENT_CONTENT on ACT_HI_ATTACHMENT(CONTENT_ID_); +create index ACT_IDX_HI_ATTACHMENT_ROOT_PI on ACT_HI_ATTACHMENT(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_ATTACHMENT_PROCINST on ACT_HI_ATTACHMENT(PROC_INST_ID_); +create index ACT_IDX_HI_ATTACHMENT_TASK on ACT_HI_ATTACHMENT(TASK_ID_); +create index ACT_IDX_HI_ATTACHMENT_RM_TIME on ACT_HI_ATTACHMENT(REMOVAL_TIME_); + +create index ACT_IDX_HI_COMMENT_TASK on ACT_HI_COMMENT(TASK_ID_); +create index ACT_IDX_HI_COMMENT_ROOT_PI on ACT_HI_COMMENT(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_COMMENT_PROCINST on ACT_HI_COMMENT(PROC_INST_ID_); +create index ACT_IDX_HI_COMMENT_RM_TIME on ACT_HI_COMMENT(REMOVAL_TIME_); +-- +-- Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH +-- under one or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information regarding copyright +-- ownership. Camunda licenses this file to you under the Apache License, +-- Version 2.0; you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +create table ACT_HI_CASEINST ( + ID_ varchar(64) not null, + CASE_INST_ID_ varchar(64) not null, + BUSINESS_KEY_ varchar(255), + CASE_DEF_ID_ varchar(64) not null, + CREATE_TIME_ timestamp not null, + CLOSE_TIME_ timestamp, + DURATION_ bigint, + STATE_ integer, + CREATE_USER_ID_ varchar(255), + SUPER_CASE_INSTANCE_ID_ varchar(64), + SUPER_PROCESS_INSTANCE_ID_ varchar(64), + TENANT_ID_ varchar(64), + primary key (ID_), + unique (CASE_INST_ID_) +); + +create table ACT_HI_CASEACTINST ( + ID_ varchar(64) not null, + PARENT_ACT_INST_ID_ varchar(64), + CASE_DEF_ID_ varchar(64) not null, + CASE_INST_ID_ varchar(64) not null, + CASE_ACT_ID_ varchar(255) not null, + TASK_ID_ varchar(64), + CALL_PROC_INST_ID_ varchar(64), + CALL_CASE_INST_ID_ varchar(64), + CASE_ACT_NAME_ varchar(255), + CASE_ACT_TYPE_ varchar(255), + CREATE_TIME_ timestamp not null, + END_TIME_ timestamp, + DURATION_ bigint, + STATE_ integer, + REQUIRED_ boolean, + TENANT_ID_ varchar(64), + primary key (ID_) +); + +create index ACT_IDX_HI_CAS_I_CLOSE on ACT_HI_CASEINST(CLOSE_TIME_); +create index ACT_IDX_HI_CAS_I_BUSKEY on ACT_HI_CASEINST(BUSINESS_KEY_); +create index ACT_IDX_HI_CAS_I_TENANT_ID on ACT_HI_CASEINST(TENANT_ID_); +create index ACT_IDX_HI_CAS_A_I_CREATE on ACT_HI_CASEACTINST(CREATE_TIME_); +create index ACT_IDX_HI_CAS_A_I_END on ACT_HI_CASEACTINST(END_TIME_); +create index ACT_IDX_HI_CAS_A_I_COMP on ACT_HI_CASEACTINST(CASE_ACT_ID_, END_TIME_, ID_); +create index ACT_IDX_HI_CAS_A_I_TENANT_ID on ACT_HI_CASEACTINST(TENANT_ID_); +-- +-- Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH +-- under one or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information regarding copyright +-- ownership. Camunda licenses this file to you under the Apache License, +-- Version 2.0; you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +-- create history decision instance table -- +create table ACT_HI_DECINST ( + ID_ varchar(64) NOT NULL, + DEC_DEF_ID_ varchar(64) NOT NULL, + DEC_DEF_KEY_ varchar(255) NOT NULL, + DEC_DEF_NAME_ varchar(255), + PROC_DEF_KEY_ varchar(255), + PROC_DEF_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + CASE_DEF_KEY_ varchar(255), + CASE_DEF_ID_ varchar(64), + CASE_INST_ID_ varchar(64), + ACT_INST_ID_ varchar(64), + ACT_ID_ varchar(255), + EVAL_TIME_ timestamp not null, + REMOVAL_TIME_ timestamp, + COLLECT_VALUE_ double precision, + USER_ID_ varchar(255), + ROOT_DEC_INST_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + DEC_REQ_ID_ varchar(64), + DEC_REQ_KEY_ varchar(255), + TENANT_ID_ varchar(64), + primary key (ID_) +); + +-- create history decision input table -- +create table ACT_HI_DEC_IN ( + ID_ varchar(64) NOT NULL, + DEC_INST_ID_ varchar(64) NOT NULL, + CLAUSE_ID_ varchar(64), + CLAUSE_NAME_ varchar(255), + VAR_TYPE_ varchar(100), + BYTEARRAY_ID_ varchar(64), + DOUBLE_ double precision, + LONG_ bigint, + TEXT_ varchar(4000), + TEXT2_ varchar(4000), + TENANT_ID_ varchar(64), + CREATE_TIME_ timestamp, + ROOT_PROC_INST_ID_ varchar(64), + REMOVAL_TIME_ timestamp, + primary key (ID_) +); + +-- create history decision output table -- +create table ACT_HI_DEC_OUT ( + ID_ varchar(64) NOT NULL, + DEC_INST_ID_ varchar(64) NOT NULL, + CLAUSE_ID_ varchar(64), + CLAUSE_NAME_ varchar(255), + RULE_ID_ varchar(64), + RULE_ORDER_ integer, + VAR_NAME_ varchar(255), + VAR_TYPE_ varchar(100), + BYTEARRAY_ID_ varchar(64), + DOUBLE_ double precision, + LONG_ bigint, + TEXT_ varchar(4000), + TEXT2_ varchar(4000), + TENANT_ID_ varchar(64), + CREATE_TIME_ timestamp, + ROOT_PROC_INST_ID_ varchar(64), + REMOVAL_TIME_ timestamp, + primary key (ID_) +); + + +create index ACT_IDX_HI_DEC_INST_ID on ACT_HI_DECINST(DEC_DEF_ID_); +create index ACT_IDX_HI_DEC_INST_KEY on ACT_HI_DECINST(DEC_DEF_KEY_); +create index ACT_IDX_HI_DEC_INST_PI on ACT_HI_DECINST(PROC_INST_ID_); +create index ACT_IDX_HI_DEC_INST_CI on ACT_HI_DECINST(CASE_INST_ID_); +create index ACT_IDX_HI_DEC_INST_ACT on ACT_HI_DECINST(ACT_ID_); +create index ACT_IDX_HI_DEC_INST_ACT_INST on ACT_HI_DECINST(ACT_INST_ID_); +create index ACT_IDX_HI_DEC_INST_TIME on ACT_HI_DECINST(EVAL_TIME_); +create index ACT_IDX_HI_DEC_INST_TENANT_ID on ACT_HI_DECINST(TENANT_ID_); +create index ACT_IDX_HI_DEC_INST_ROOT_ID on ACT_HI_DECINST(ROOT_DEC_INST_ID_); +create index ACT_IDX_HI_DEC_INST_REQ_ID on ACT_HI_DECINST(DEC_REQ_ID_); +create index ACT_IDX_HI_DEC_INST_REQ_KEY on ACT_HI_DECINST(DEC_REQ_KEY_); +create index ACT_IDX_HI_DEC_INST_ROOT_PI on ACT_HI_DECINST(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_DEC_INST_RM_TIME on ACT_HI_DECINST(REMOVAL_TIME_); + +create index ACT_IDX_HI_DEC_IN_INST on ACT_HI_DEC_IN(DEC_INST_ID_); +create index ACT_IDX_HI_DEC_IN_CLAUSE on ACT_HI_DEC_IN(DEC_INST_ID_, CLAUSE_ID_); +create index ACT_IDX_HI_DEC_IN_ROOT_PI on ACT_HI_DEC_IN(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_DEC_IN_RM_TIME on ACT_HI_DEC_IN(REMOVAL_TIME_); + +create index ACT_IDX_HI_DEC_OUT_INST on ACT_HI_DEC_OUT(DEC_INST_ID_); +create index ACT_IDX_HI_DEC_OUT_RULE on ACT_HI_DEC_OUT(RULE_ORDER_, CLAUSE_ID_); +create index ACT_IDX_HI_DEC_OUT_ROOT_PI on ACT_HI_DEC_OUT(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_DEC_OUT_RM_TIME on ACT_HI_DEC_OUT(REMOVAL_TIME_); diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_2__postgres_identity_7.14.0.sql b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_2__postgres_identity_7.14.0.sql new file mode 100644 index 00000000..5b63794f --- /dev/null +++ b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_2__postgres_identity_7.14.0.sql @@ -0,0 +1,109 @@ +-- +-- Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH +-- under one or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information regarding copyright +-- ownership. Camunda licenses this file to you under the Apache License, +-- Version 2.0; you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +create table ACT_ID_GROUP ( + ID_ varchar(64), + REV_ integer, + NAME_ varchar(255), + TYPE_ varchar(255), + primary key (ID_) +); + +create table ACT_ID_MEMBERSHIP ( + USER_ID_ varchar(64), + GROUP_ID_ varchar(64), + primary key (USER_ID_, GROUP_ID_) +); + +create table ACT_ID_USER ( + ID_ varchar(64), + REV_ integer, + FIRST_ varchar(255), + LAST_ varchar(255), + EMAIL_ varchar(255), + PWD_ varchar(255), + SALT_ varchar(255), + LOCK_EXP_TIME_ timestamp, + ATTEMPTS_ integer, + PICTURE_ID_ varchar(64), + primary key (ID_) +); + +create table ACT_ID_INFO ( + ID_ varchar(64), + REV_ integer, + USER_ID_ varchar(64), + TYPE_ varchar(64), + KEY_ varchar(255), + VALUE_ varchar(255), + PASSWORD_ bytea, + PARENT_ID_ varchar(255), + primary key (ID_) +); + +create table ACT_ID_TENANT ( + ID_ varchar(64), + REV_ integer, + NAME_ varchar(255), + primary key (ID_) +); + +create table ACT_ID_TENANT_MEMBER ( + ID_ varchar(64) not null, + TENANT_ID_ varchar(64) not null, + USER_ID_ varchar(64), + GROUP_ID_ varchar(64), + primary key (ID_) +); + +create index ACT_IDX_MEMB_GROUP on ACT_ID_MEMBERSHIP(GROUP_ID_); +alter table ACT_ID_MEMBERSHIP + add constraint ACT_FK_MEMB_GROUP + foreign key (GROUP_ID_) + references ACT_ID_GROUP (ID_); + +create index ACT_IDX_MEMB_USER on ACT_ID_MEMBERSHIP(USER_ID_); +alter table ACT_ID_MEMBERSHIP + add constraint ACT_FK_MEMB_USER + foreign key (USER_ID_) + references ACT_ID_USER (ID_); + +alter table ACT_ID_TENANT_MEMBER + add constraint ACT_UNIQ_TENANT_MEMB_USER + unique (TENANT_ID_, USER_ID_); + +alter table ACT_ID_TENANT_MEMBER + add constraint ACT_UNIQ_TENANT_MEMB_GROUP + unique (TENANT_ID_, GROUP_ID_); + +create index ACT_IDX_TENANT_MEMB on ACT_ID_TENANT_MEMBER(TENANT_ID_); +alter table ACT_ID_TENANT_MEMBER + add constraint ACT_FK_TENANT_MEMB + foreign key (TENANT_ID_) + references ACT_ID_TENANT (ID_); + +create index ACT_IDX_TENANT_MEMB_USER on ACT_ID_TENANT_MEMBER(USER_ID_); +alter table ACT_ID_TENANT_MEMBER + add constraint ACT_FK_TENANT_MEMB_USER + foreign key (USER_ID_) + references ACT_ID_USER (ID_); + +create index ACT_IDX_TENANT_MEMB_GROUP on ACT_ID_TENANT_MEMBER(GROUP_ID_); +alter table ACT_ID_TENANT_MEMBER + add constraint ACT_FK_TENANT_MEMB_GROUP + foreign key (GROUP_ID_) + references ACT_ID_GROUP (ID_); diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_3__admin.sql b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_3__admin.sql new file mode 100644 index 00000000..57e037cb --- /dev/null +++ b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_3__admin.sql @@ -0,0 +1,46 @@ +-- Create admin user (password: admin) +INSERT INTO ACT_ID_USER (ID_, REV_, FIRST_, LAST_, EMAIL_, PWD_, PICTURE_ID_) +VALUES ('admin', 1, 'Admin', 'Administratus', 'bpm@holunda.io', '{SHA}0DPiKuNIrrVmD8IUCuw1hQxNqZc=', null); + +-- Create admin group +INSERT INTO ACT_ID_GROUP (ID_, REV_, NAME_, TYPE_) +VALUES ('camunda-admin', 1, 'Camunda BPM Administrators', 'SYSTEM'); + +-- Add admin user to admin group +INSERT into ACT_ID_MEMBERSHIP (USER_ID_, GROUP_ID_) +VALUES ('admin', 'camunda-admin'); + +-- Add authorizations +INSERT INTO ACT_RU_AUTHORIZATION (ID_, REV_, TYPE_, GROUP_ID_, USER_ID_, RESOURCE_TYPE_, RESOURCE_ID_, PERMS_) +VALUES ('A1', 1, 1, null, 'admin', 1, 'admin', 2147483647); +INSERT INTO ACT_RU_AUTHORIZATION (ID_, REV_, TYPE_, GROUP_ID_, USER_ID_, RESOURCE_TYPE_, RESOURCE_ID_, PERMS_) +VALUES ('A2', 1, 1, 'camunda-admin', null, 0, '*', 2147483647); +INSERT INTO ACT_RU_AUTHORIZATION (ID_, REV_, TYPE_, GROUP_ID_, USER_ID_, RESOURCE_TYPE_, RESOURCE_ID_, PERMS_) +VALUES ('A3', 1, 1, 'camunda-admin', null, 1, '*', 2147483647); +INSERT INTO ACT_RU_AUTHORIZATION (ID_, REV_, TYPE_, GROUP_ID_, USER_ID_, RESOURCE_TYPE_, RESOURCE_ID_, PERMS_) +VALUES ('A4', 1, 1, 'camunda-admin', null, 2, '*', 2147483647); +INSERT INTO ACT_RU_AUTHORIZATION (ID_, REV_, TYPE_, GROUP_ID_, USER_ID_, RESOURCE_TYPE_, RESOURCE_ID_, PERMS_) +VALUES ('A5', 1, 1, 'camunda-admin', null, 3, '*', 2147483647); +INSERT INTO ACT_RU_AUTHORIZATION (ID_, REV_, TYPE_, GROUP_ID_, USER_ID_, RESOURCE_TYPE_, RESOURCE_ID_, PERMS_) +VALUES ('A6', 1, 1, 'camunda-admin', null, 4, '*', 2147483647); +INSERT INTO ACT_RU_AUTHORIZATION (ID_, REV_, TYPE_, GROUP_ID_, USER_ID_, RESOURCE_TYPE_, RESOURCE_ID_, PERMS_) +VALUES ('A7', 1, 1, 'camunda-admin', null, 5, '*', 2147483647); +INSERT INTO ACT_RU_AUTHORIZATION (ID_, REV_, TYPE_, GROUP_ID_, USER_ID_, RESOURCE_TYPE_, RESOURCE_ID_, PERMS_) +VALUES ('A8', 1, 1, 'camunda-admin', null, 6, '*', 2147483647); +INSERT INTO ACT_RU_AUTHORIZATION (ID_, REV_, TYPE_, GROUP_ID_, USER_ID_, RESOURCE_TYPE_, RESOURCE_ID_, PERMS_) +VALUES ('A9', 1, 1, 'camunda-admin', null, 7, '*', 2147483647); +INSERT INTO ACT_RU_AUTHORIZATION (ID_, REV_, TYPE_, GROUP_ID_, USER_ID_, RESOURCE_TYPE_, RESOURCE_ID_, PERMS_) +VALUES ('A10', 1, 1, 'camunda-admin', null, 8, '*', 2147483647); +INSERT INTO ACT_RU_AUTHORIZATION (ID_, REV_, TYPE_, GROUP_ID_, USER_ID_, RESOURCE_TYPE_, RESOURCE_ID_, PERMS_) +VALUES ('A11', 1, 1, 'camunda-admin', null, 9, '*', 2147483647); +INSERT INTO ACT_RU_AUTHORIZATION (ID_, REV_, TYPE_, GROUP_ID_, USER_ID_, RESOURCE_TYPE_, RESOURCE_ID_, PERMS_) +VALUES ('A12', 1, 1, 'camunda-admin', null, 10, '*', 2147483647); +INSERT INTO ACT_RU_AUTHORIZATION (ID_, REV_, TYPE_, GROUP_ID_, USER_ID_, RESOURCE_TYPE_, RESOURCE_ID_, PERMS_) +VALUES ('A13', 1, 1, 'camunda-admin', null, 11, '*', 2147483647); +INSERT INTO ACT_RU_AUTHORIZATION (ID_, REV_, TYPE_, GROUP_ID_, USER_ID_, RESOURCE_TYPE_, RESOURCE_ID_, PERMS_) +VALUES ('A14', 1, 1, 'camunda-admin', null, 12, '*', 2147483647); +INSERT INTO ACT_RU_AUTHORIZATION (ID_, REV_, TYPE_, GROUP_ID_, USER_ID_, RESOURCE_TYPE_, RESOURCE_ID_, PERMS_) +VALUES ('A15', 1, 1, 'camunda-admin', null, 13, '*', 2147483647); +INSERT INTO ACT_RU_AUTHORIZATION (ID_, REV_, TYPE_, GROUP_ID_, USER_ID_, RESOURCE_TYPE_, RESOURCE_ID_, PERMS_) +VALUES ('A16', 1, 1, 'camunda-admin', null, 14, '*', 2147483647); + diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_4__tasklist filters.sql b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_4__tasklist filters.sql new file mode 100644 index 00000000..c73b5803 --- /dev/null +++ b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_4__tasklist filters.sql @@ -0,0 +1,7 @@ +-- All tasks excludes just for admin +INSERT INTO ACT_RU_FILTER (ID_, REV_, RESOURCE_TYPE_, NAME_, OWNER_, QUERY_, PROPERTIES_) +VALUES ('F000', 1, 'Task', 'All Tasks', 'admin', '{}', + '{"showUndefinedVariable":false,"description":"All tasks (for admin use only!)","refresh":true,"priority":0}'); + +INSERT INTO ACT_RU_AUTHORIZATION (ID_, REV_, TYPE_, GROUP_ID_, USER_ID_, RESOURCE_TYPE_, RESOURCE_ID_, PERMS_) +VALUES ('A20', 1, 1, null, 'admin', 5, 'F000', 2147483647); diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_5__axon.sql b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_5__axon.sql new file mode 100644 index 00000000..1aa3c66f --- /dev/null +++ b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_5__axon.sql @@ -0,0 +1,73 @@ +create sequence hibernate_sequence start with 1 increment by 1; + +create table association_value_entry +( + id int8 not null, + association_key varchar(255) not null, + association_value varchar(255), + saga_id varchar(255) not null, + saga_type varchar(255), + primary key (id) +); + +create table saga_entry +( + saga_id varchar(255) not null, + revision varchar(255), + saga_type varchar(255), + serialized_saga bytea, + primary key (saga_id) +); + +create table snapshot_event_entry +( + aggregate_identifier varchar(255) not null, + sequence_number bigint not null, + type varchar(255) not null, + event_identifier varchar(255) not null, + meta_data bytea, + payload bytea not null, + payload_revision varchar(255), + payload_type varchar(255) not null, + time_stamp varchar(255) not null, + primary key (aggregate_identifier, sequence_number, type) +); + +create table token_entry +( + processor_name varchar(255) not null, + segment int4 not null, + owner varchar(255), + timestamp varchar(255) not null, + token bytea, + token_type varchar(255), + primary key (processor_name, segment) +); + +create table domain_event_entry +( + global_index INT8 not null, + event_identifier varchar(255) not null, + meta_data BYTEA, + payload BYTEA not null, + payload_revision varchar(255), + payload_type varchar(255) not null, + time_stamp varchar(255) not null, + aggregate_identifier varchar(255) not null, + sequence_number INT8 not null, + type varchar(255), + primary key (global_index) +); + + +create index IDX_association_value_entry_stakav on association_value_entry (saga_type, association_key, association_value); +create index IDX_association_value_entry_sist on association_value_entry (saga_id, saga_type); + +alter table domain_event_entry + add constraint UC_domain_event_entry_aisn unique (aggregate_identifier, sequence_number); + +alter table domain_event_entry + add constraint UC_domain_event_entry_ei unique (event_identifier); + +alter table snapshot_event_entry + add constraint UC_snapshot_event_entry_ei unique (event_identifier); diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_6__request.sql b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_6__request.sql new file mode 100644 index 00000000..d4aee7e2 --- /dev/null +++ b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_6__request.sql @@ -0,0 +1,9 @@ +create table APP_APPROVAL_REQUEST +( + id varchar(255) not null, + amount decimal(10, 2), + applicant varchar(255), + currency varchar(255), + subject varchar(255), + primary key (id) +); diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_7__postgres_engine_7.14_to_7.15.sql b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_7__postgres_engine_7.14_to_7.15.sql new file mode 100644 index 00000000..259227b6 --- /dev/null +++ b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_7__postgres_engine_7.14_to_7.15.sql @@ -0,0 +1,37 @@ +-- +-- Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH +-- under one or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information regarding copyright +-- ownership. Camunda licenses this file to you under the Apache License, +-- Version 2.0; you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +insert into ACT_GE_SCHEMA_LOG +values ('400', CURRENT_TIMESTAMP, '7.15.0'); + +-- https://jira.camunda.com/browse/CAM-13013 + +create table ACT_RU_TASK_METER_LOG ( + ID_ varchar(64) not null, + ASSIGNEE_HASH_ bigint, + TIMESTAMP_ timestamp, + primary key (ID_) +); + +create index ACT_IDX_TASK_METER_LOG_TIME on ACT_RU_TASK_METER_LOG(TIMESTAMP_); + +-- https://jira.camunda.com/browse/CAM-13060 +ALTER TABLE ACT_RU_INCIDENT + ADD ANNOTATION_ varchar(4000); + +ALTER TABLE ACT_HI_INCIDENT + ADD ANNOTATION_ varchar(4000); diff --git a/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_9__postgres_engine_7.15_to_7.16.sql b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_9__postgres_engine_7.15_to_7.16.sql new file mode 100644 index 00000000..ec0c9e80 --- /dev/null +++ b/scenarios/distributed-kafka/process-application-local-polyflow/src/main/resources/db/migrations/V0_0_9__postgres_engine_7.15_to_7.16.sql @@ -0,0 +1,30 @@ +-- +-- Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH +-- under one or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information regarding copyright +-- ownership. Camunda licenses this file to you under the Apache License, +-- Version 2.0; you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +insert into ACT_GE_SCHEMA_LOG +values ('500', CURRENT_TIMESTAMP, '7.16.0'); + +create table ACT_RE_CAMFORMDEF ( + ID_ varchar(64) NOT NULL, + REV_ integer, + KEY_ varchar(255) NOT NULL, + VERSION_ integer NOT NULL, + DEPLOYMENT_ID_ varchar(64), + RESOURCE_NAME_ varchar(4000), + TENANT_ID_ varchar(64), + primary key (ID_) +); \ No newline at end of file diff --git a/scenarios/distributed-kafka/process-platform-view-only/pom.xml b/scenarios/distributed-kafka/process-platform-view-only/pom.xml index eab367a5..b54db8f7 100644 --- a/scenarios/distributed-kafka/process-platform-view-only/pom.xml +++ b/scenarios/distributed-kafka/process-platform-view-only/pom.xml @@ -30,6 +30,16 @@ polyflow-view-jpa + + + org.axonframework.extensions.kafka + axon-kafka-spring-boot-starter + + + org.apache.kafka + kafka-clients + + org.flywaydb flyway-core diff --git a/scenarios/distributed-kafka/process-platform-view-only/src/main/kotlin/ExamplePlatformApplicationDistributedWithKafka.kt b/scenarios/distributed-kafka/process-platform-view-only/src/main/kotlin/io/holunda/polyflow/example/process/platform/ExamplePlatformApplicationDistributedWithKafka.kt similarity index 78% rename from scenarios/distributed-kafka/process-platform-view-only/src/main/kotlin/ExamplePlatformApplicationDistributedWithKafka.kt rename to scenarios/distributed-kafka/process-platform-view-only/src/main/kotlin/io/holunda/polyflow/example/process/platform/ExamplePlatformApplicationDistributedWithKafka.kt index fec5932b..9a7cbe24 100755 --- a/scenarios/distributed-kafka/process-platform-view-only/src/main/kotlin/ExamplePlatformApplicationDistributedWithKafka.kt +++ b/scenarios/distributed-kafka/process-platform-view-only/src/main/kotlin/io/holunda/polyflow/example/process/platform/ExamplePlatformApplicationDistributedWithKafka.kt @@ -53,24 +53,19 @@ class ExamplePlatformApplicationDistributedWithKafka { @Qualifier(PAYLOAD_OBJECT_MAPPER) @Bean @Primary - fun payloadObjectMapper(): ObjectMapper { - return jacksonObjectMapper() + fun objectMapper(): ObjectMapper = + jacksonObjectMapper() .registerModule(JavaTimeModule()) .configurePolyflowJacksonObjectMapper() - .disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) // let the dates be strings and not nanoseconds - .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) // be nice to properties we don't understand - } + .disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) - @Bean("defaultAxonXStream") - @ConditionalOnMissingBean - fun defaultAxonXStream(applicationContext: ApplicationContext): XStream { - val xStream = XStream(CompactDriver()) - xStream.allowTypesByWildcard(XStreamSecurityTypeUtility.autoConfigBasePackages(applicationContext)) - // This configures XStream to permit any class to be deserialized. - // FIXME: We might want to make this more restrictive to improve security - xStream.addPermission(AnyTypePermission.ANY) - return xStream - } + @Bean("defaultAxonObjectMapper") + @Qualifier("defaultAxonObjectMapper") + fun defaultAxonObjectMapper(): ObjectMapper = + jacksonObjectMapper() + .registerModule(JavaTimeModule()) + .configurePolyflowJacksonObjectMapper() + .disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS) /** * Factory function creating correlation data provider for revision information. diff --git a/scenarios/distributed-kafka/process-platform-view-only/src/main/resources/application.yml b/scenarios/distributed-kafka/process-platform-view-only/src/main/resources/application.yml new file mode 100755 index 00000000..2283a29a --- /dev/null +++ b/scenarios/distributed-kafka/process-platform-view-only/src/main/resources/application.yml @@ -0,0 +1,87 @@ +server: + port: 8081 + +spring: + application: + name: taskpool + datasource: + url: jdbc:postgresql://localhost:25432/tasklistdb + username: polyflow_user + password: S3Cr3T! + jpa: + generate-ddl: false + hibernate.ddl-auto: validate + show-sql: false + open-in-view: false + database-platform: io.holunda.polyflow.example.infrastructure.NoToastPostgresSQLDialect + flyway: + enabled: true + locations: "classpath:db/migrations" + +springdoc: + packagesToScan: io.holunda.polyflow.example.tasklist.rest + auto-tag-classes: false + swagger-ui: + try-it-out-enabled: true + +axon: + serializer: + events: jackson + messages: jackson + general: jackson + axonserver: + enabled: false # disable axon server connector + eventhandling: + processors: + "[io.holunda.polyflow.view.jpa.service.task]": + source: kafkaMessageSourcePolyflowTask + mode: TRACKING + threadCount: 1 + batchSize: 1 + "[io.holunda.polyflow.view.jpa.service.data]": + source: kafkaMessageSourcePolyflowData + mode: TRACKING + threadCount: 1 + batchSize: 1 + kafka: + defaulttopic: not_used_but_must_be_set_to_some_value + client-id: will_be_set_in_properties_manually + consumer: + bootstrap-servers: localhost:29092 + event-processor-mode: TRACKING + auto-offset-reset: earliest + properties: + security.protocol: PLAINTEXT + +polyflow: + axon: + kafka: + enabled: true + topic-tasks: polyflow-task + topic-data-entries: polyflow-data + integration: + form-url-resolver: + default-task-template: "tasks/${formKey}/${id}?userId=%userId%" + default-process-template: "${formKey}?userId=%userId%" + default-data-entry-template: "${entryType}/${entryId}?userId=%userId%" + default-application-template: "http://localhost:${server.port}/${applicationName}" + applications: + example-process-approval: + url: "http://localhost:8080/example-process-approval" + processes: + process_approve_request: "start?userId=%userId%" + data-entries: + io.holunda.camunda.taskpool.example.ApprovalRequest: "approval-request/${entryId}?userId=%userId%" + view: + jpa: + stored-items: TASK, DATA_ENTRY, PROCESS_DEFINITION + +logging.level: + io.holunda.polyflow: + core.taskpool: WARN + view.simple.service.DataEntryService : DEBUG + view.simple.service.TaskPoolService : INFO + view.jpa.JpaPolyflowViewProcessDefinitionService : DEBUG + example.tasklist: WARN +# org.springframework.web.bind: DEBUG +# org.springframework.web: DEBUG diff --git a/scenarios/distributed-kafka/process-platform-view-only/src/main/resources/db/migrations/V01__axon.sql b/scenarios/distributed-kafka/process-platform-view-only/src/main/resources/db/migrations/V01__axon.sql new file mode 100644 index 00000000..78ab2fc8 --- /dev/null +++ b/scenarios/distributed-kafka/process-platform-view-only/src/main/resources/db/migrations/V01__axon.sql @@ -0,0 +1,34 @@ +create sequence hibernate_sequence start with 1 increment by 1; + +create table association_value_entry +( + id int8 not null, + association_key varchar(255) not null, + association_value varchar(255), + saga_id varchar(255) not null, + saga_type varchar(255), + primary key (id) +); + +create table saga_entry +( + saga_id varchar(255) not null, + revision varchar(255), + saga_type varchar(255), + serialized_saga bytea, + primary key (saga_id) +); + +create table token_entry +( + processor_name varchar(255) not null, + segment int4 not null, + owner varchar(255), + timestamp varchar(255) not null, + token bytea, + token_type varchar(255), + primary key (processor_name, segment) +); + +create index IDX_association_value_entry_stakav on association_value_entry (saga_type, association_key, association_value); +create index IDX_association_value_entry_sist on association_value_entry (saga_id, saga_type); diff --git a/scenarios/distributed-kafka/process-platform-view-only/src/main/resources/db/migrations/V02__jpa_view.sql b/scenarios/distributed-kafka/process-platform-view-only/src/main/resources/db/migrations/V02__jpa_view.sql new file mode 100644 index 00000000..d0ab8832 --- /dev/null +++ b/scenarios/distributed-kafka/process-platform-view-only/src/main/resources/db/migrations/V02__jpa_view.sql @@ -0,0 +1,163 @@ +CREATE TABLE plf_data_entry ( + entry_id VARCHAR(255) NOT NULL, + entry_type VARCHAR(255) NOT NULL, + application_name VARCHAR(255) NOT NULL, + date_created TIMESTAMP NOT NULL, + description VARCHAR(2048), + form_key VARCHAR(255), + date_last_modified TIMESTAMP NOT NULL, + name VARCHAR(255) NOT NULL, + payload OID, + revision INT8, + processing_type VARCHAR(255) NOT NULL, + state VARCHAR(255) NOT NULL, + type VARCHAR(255) NOT NULL, + PRIMARY KEY (entry_id, entry_type) +); + +CREATE TABLE plf_data_entry_authorizations ( + entry_id VARCHAR(255) NOT NULL, + entry_type VARCHAR(255) NOT NULL, + authorized_principal VARCHAR(255) NOT NULL, + PRIMARY KEY (entry_id, entry_type, authorized_principal) +); + +CREATE TABLE plf_data_entry_payload_attributes ( + entry_id VARCHAR(255) NOT NULL, + entry_type VARCHAR(255) NOT NULL, + path VARCHAR(255) NOT NULL, + value VARCHAR(255) NOT NULL, + PRIMARY KEY (entry_id, entry_type, path, value) +); + +CREATE TABLE plf_data_entry_protocol ( + id VARCHAR(255) NOT NULL, + log_details VARCHAR(255), + log_message VARCHAR(255), + processing_type VARCHAR(255) NOT NULL, + state VARCHAR(255) NOT NULL, + time TIMESTAMP NOT NULL, + username VARCHAR(255), + entry_id VARCHAR(255) NOT NULL, + entry_type VARCHAR(255) NOT NULL, + PRIMARY KEY (id) +); + +CREATE TABLE plf_proc_def ( + proc_def_id VARCHAR(255) NOT NULL, + application_name VARCHAR(255) NOT NULL, + description VARCHAR(2048), + name VARCHAR(255) NOT NULL, + proc_def_key VARCHAR(255) NOT NULL, + proc_def_version INT4 NOT NULL, + start_form_key VARCHAR(255), + startable_from_tasklist BOOLEAN, + version_tag VARCHAR(255), + PRIMARY KEY (proc_def_id) +); + +CREATE TABLE plf_proc_def_authorizations ( + proc_def_id VARCHAR(255) NOT NULL, + authorized_starter_principal VARCHAR(255) NOT NULL, + PRIMARY KEY (proc_def_id, authorized_starter_principal) +); + +CREATE TABLE plf_proc_instance ( + instance_id VARCHAR(255) NOT NULL, + business_key VARCHAR(255), + delete_reason VARCHAR(255), + end_activity_id VARCHAR(255), + application_name VARCHAR(255) NOT NULL, + source_def_id VARCHAR(255) NOT NULL, + source_def_key VARCHAR(255) NOT NULL, + source_execution_id VARCHAR(255) NOT NULL, + source_instance_id VARCHAR(255) NOT NULL, + source_name VARCHAR(255) NOT NULL, + source_type VARCHAR(255) NOT NULL, + source_tenant_id VARCHAR(255), + start_activity_id VARCHAR(255), + start_user_id VARCHAR(255), + run_state VARCHAR(255) NOT NULL, + super_instance_id VARCHAR(255), + PRIMARY KEY (instance_id) +); + +CREATE TABLE plf_task ( + task_id VARCHAR(255) NOT NULL, + assignee_id VARCHAR(255), + business_key VARCHAR(255), + date_created TIMESTAMP NOT NULL, + description VARCHAR(2048), + date_due TIMESTAMP, + date_follow_up TIMESTAMP, + form_key VARCHAR(255), + name VARCHAR(255) NOT NULL, + owner_id VARCHAR(255), + payload OID, + priority INT4, + application_name VARCHAR(255) NOT NULL, + source_def_id VARCHAR(255) NOT NULL, + source_def_key VARCHAR(255) NOT NULL, + source_execution_id VARCHAR(255) NOT NULL, + source_instance_id VARCHAR(255) NOT NULL, + source_name VARCHAR(255) NOT NULL, + source_type VARCHAR(255) NOT NULL, + source_tenant_id VARCHAR(255), + task_def_key VARCHAR(255) NOT NULL, + PRIMARY KEY (task_id) +); + +CREATE TABLE plf_task_authorizations ( + task_id VARCHAR(255) NOT NULL, + authorized_principal VARCHAR(255) NOT NULL, + PRIMARY KEY (task_id, authorized_principal) +); + +CREATE TABLE plf_task_correlations ( + task_id VARCHAR(255) NOT NULL, + entry_id VARCHAR(255) NOT NULL, + entry_type VARCHAR(255) NOT NULL, + PRIMARY KEY (task_id, entry_id, entry_type) +); + +CREATE TABLE plf_task_payload_attributes ( + task_id VARCHAR(255) NOT NULL, + path VARCHAR(255) NOT NULL, + value VARCHAR(255) NOT NULL, + PRIMARY KEY (task_id, path, value) +); + +ALTER TABLE plf_data_entry_authorizations + ADD CONSTRAINT FK_authorizations_have_data_entry + FOREIGN KEY (entry_id, entry_type) + REFERENCES plf_data_entry; + +ALTER TABLE plf_data_entry_payload_attributes + ADD CONSTRAINT FK_payload_attributes_have_data_entry + FOREIGN KEY (entry_id, entry_type) + REFERENCES plf_data_entry; + +ALTER TABLE plf_data_entry_protocol + ADD CONSTRAINT FK_protocol_have_data_entry + FOREIGN KEY (entry_id, entry_type) + REFERENCES plf_data_entry; + +ALTER TABLE plf_proc_def_authorizations + ADD CONSTRAINT FK_authorizations_have_proc_def + FOREIGN KEY (proc_def_id) + REFERENCES plf_proc_def; + +ALTER TABLE plf_task_authorizations + ADD CONSTRAINT FK_authorizations_have_task + FOREIGN KEY (task_id) + REFERENCES plf_task; + +ALTER TABLE plf_task_correlations + ADD CONSTRAINT FK_correlation_have_task + FOREIGN KEY (task_id) + REFERENCES plf_task; + +ALTER TABLE plf_task_payload_attributes + ADD CONSTRAINT FK_payload_attributes_have_task + FOREIGN KEY (task_id) + REFERENCES plf_task; diff --git a/scenarios/distributed-kafka/process-platform-view-only/src/main/resources/db/migrations/V03__axon_dlq.sql b/scenarios/distributed-kafka/process-platform-view-only/src/main/resources/db/migrations/V03__axon_dlq.sql new file mode 100644 index 00000000..e26da824 --- /dev/null +++ b/scenarios/distributed-kafka/process-platform-view-only/src/main/resources/db/migrations/V03__axon_dlq.sql @@ -0,0 +1,25 @@ +CREATE TABLE dead_letter_entry ( + dead_letter_id VARCHAR(255) NOT NULL, + cause_message VARCHAR(255), + cause_type VARCHAR(255), + diagnostics BYTEA, + enqueued_at TIMESTAMP NOT NULL, + last_touched TIMESTAMP, + aggregate_identifier VARCHAR(255), + event_identifier VARCHAR(255) NOT NULL, + message_type VARCHAR(255) NOT NULL, + meta_data BYTEA, + payload BYTEA NOT NULL, + payload_revision VARCHAR(255), + payload_type VARCHAR(255) NOT NULL, + sequence_number INT8, + time_stamp VARCHAR(255) NOT NULL, + token BYTEA, + token_type VARCHAR(255), + type VARCHAR(255), + processing_group VARCHAR(255) NOT NULL, + processing_started TIMESTAMP, + sequence_identifier VARCHAR(255) NOT NULL, + sequence_index INT8 NOT NULL, + PRIMARY KEY (dead_letter_id) +); diff --git a/scenarios/distributed-kafka/process-platform-view-only/src/main/resources/db/migrations/V04__polyflow_deleted.sql b/scenarios/distributed-kafka/process-platform-view-only/src/main/resources/db/migrations/V04__polyflow_deleted.sql new file mode 100644 index 00000000..bf3e32aa --- /dev/null +++ b/scenarios/distributed-kafka/process-platform-view-only/src/main/resources/db/migrations/V04__polyflow_deleted.sql @@ -0,0 +1,2 @@ +ALTER TABLE plf_data_entry + ADD date_deleted timestamp;