From bbb1102c3b891d00af1ae472f048aace43b05cdf Mon Sep 17 00:00:00 2001 From: Priyanka K U Date: Wed, 7 Aug 2024 13:57:12 +0530 Subject: [PATCH] MQ V2 OSS release (#136) * Only once (#23) squash and merge of exactly once work, history is available in the onlyOnce-history branch * feat: Update mq image to latest (#45) Contributes to: event-integration/qp-planning#10883 Signed-off-by: Joel Hanson * fix: Include appropriate license in packaging (#46) * fix:L Include appropriate license in packaging Include appropriate license in packaging Contributes to: event-integration/eventstreams-planning#11171 Signed-off-by: neeraj-laad Signed-off-by: neeraj-laad * fix: Move Licenses folder under META-INF Move Licenses folder under META-INF Contributes to: event-integration/eventstreams-planning#11171 Signed-off-by: neeraj-laad * fix: update license in README update license in README Contributes to: event-integration/eventstreams-planning#11171 Signed-off-by: neeraj-laad * fix: fix the no-auth MQ integration tests (#48) We use the latest tag MQ image in our integration tests. In December, MQ's container image removed the no-auth svrconn channel so this breaks the connector tests that try to make connections to MQ without credentials. This commit adds a custom MQSC script to configure the queue manager to restore the previous behaviour. The MQSourceTaskAuthIT tests still test the ability to connect to a queue manager with auth credentials, so this commit means we still test connecting with and without credentials. Contributes-to: event-integration/eventstreams-planning#12427 Signed-off-by: Dale Lane * refactor: delegate config validation to Kafka Connect (#50) * feat: pull in changes from origin Contributes to: event-integration/eventstreams-planning#12467 Signed-off-by: Joel Hanson * feat: Fix suggested review comments Contributes to: event-integration/eventstreams-planning#12467 Signed-off-by: Joel Hanson --------- Signed-off-by: Joel Hanson * fix: avoid dead-locking the SourceTask if all commits aren't received (#51) * chore: comment updates - no functional changes Fixed some weird line-wrapping in the existing comments, edited comments that were technically inaccurate, and added some extra clarification to comments ahead of changes in the next commit. Signed-off-by: Dale Lane * test: new unit tests that illustrate desired behaviour poll() should quickly return an empty list if we are still waiting for a previous batch to complete, but throw an exception if this happens too many times. Signed-off-by: Dale Lane * fix: avoid deadlocking poll calls when commitRecord isnt called Signed-off-by: Dale Lane * chore: prepare new release I also removed an invalid config option that was included in the pom.xml at some point. Signed-off-by: Dale Lane * feat: Address review comments Contributes to: event-integration/eventstreams-planning#0 Signed-off-by: Joel Hanson * fix: Update kafka connect-api and connect-json version Contributes to: event-integration/eventstreams-planning#0 Signed-off-by: Joel Hanson --------- Signed-off-by: Dale Lane Signed-off-by: Joel Hanson Co-authored-by: Joel Hanson * feat: Automate mq source release (#52) Contributes to: event-integration/eventstreams-planning#0 Signed-off-by: Joel Hanson * fix: travis release (#53) Contributes to: event-integration/eventstreams-planning#0 Signed-off-by: Joel Hanson * feat: Addressing review comments Contributes to: event-integration/eventstreams-planning#12988 Signed-off-by: Priyanka.K.U Priyanka.k.u@ibm.com feat: updated versions in pom.xml Contributes to: event-integration/eventstreams-planning#12988 Signed-off-by: Priyanka.K.U Priyanka.k.u@ibm.com feat: Addressing review comments Contributes to: event-integration/eventstreams-planning#12988 Signed-off-by: Priyanka.K.U Priyanka.k.u@ibm.com feat: Addressing review comments Contributes to: event-integration/eventstreams-planning#12988 Signed-off-by: Priyanka.K.U Priyanka.k.u@ibm.com Updated BUG Report Template Contributes to: event-integration/eventstreams-planning#12988 Signed-off-by: Priyanka.K.U Priyanka.k.u@ibm.com feat: updated copyright year 2024 Contributes to: event-integration/eventstreams-planning#12988 Signed-off-by: Priyanka.K.U Priyanka.k.u@ibm.com feat: addressed the review comments and updated the version to 2.1.0 Contributes to: event-integration/eventstreams-planning#12988 Signed-off-by: Priyanka.K.U Priyanka.k.u@ibm.com feat: updated the License folder Contributes to: event-integration/eventstreams-planning#12988 Signed-off-by: Priyanka.K.U Priyanka.k.u@ibm.com feat: Updated License and Travis for MQ 2.0.2 Contributes to: event-integration/eventstreams-planning#12988 Signed-off-by: Priyanka.K.U Priyanka.k.u@ibm.com --------- Signed-off-by: Joel Hanson Signed-off-by: neeraj-laad Signed-off-by: Dale Lane Signed-off-by: Priyanka.K.U Priyanka.k.u@ibm.com Co-authored-by: Joseph Vullo Co-authored-by: Joel Hanson Co-authored-by: Neeraj Laad Co-authored-by: Dale Lane Co-authored-by: Priyanka K U --- .github/ISSUE_TEMPLATE/BUG-REPORT.yml | 3 +- .gitignore | 6 +- Dockerfile | 4 +- LICENSE | 2 +- README.md | 122 ++-- config/mq-source-exactly-once.json | 19 + config/mq-source.properties | 8 +- pom.xml | 79 ++- src/assembly/package.xml | 3 +- .../mqsource/AbstractJMSContextIT.java | 195 ++---- .../connect/mqsource/JMSWorkerIT.java | 110 ++++ .../connect/mqsource/MQSourceTaskAuthIT.java | 49 +- .../MQSourceTaskExceptionHandlingIT.java | 356 +++++++++++ .../connect/mqsource/MQSourceTaskIT.java | 353 +++++++---- .../mqsource/MQSourceTaskObjectMother.java | 52 ++ .../mqsource/MQSourceTaskOnlyOnceIT.java | 398 ++++++++++++ .../MQSourceTaskOnlyOnceStartBehaviourIT.java | 573 +++++++++++++++++ .../mqsource/SequenceStateClientIT.java | 336 ++++++++++ .../builders/DefaultRecordBuilderIT.java | 40 +- .../builders/JsonRecordBuilderIT.java | 5 +- .../connect/mqsource/utils/JsonRestApi.java | 48 +- .../utils/MQConnectionRollbackHelper.java | 69 ++ .../mqsource/utils/MQQueueManagerAttrs.java | 102 ++- .../connect/mqsource/utils/MQTestUtil.java | 248 ++++++++ .../mqsource/utils/MessagesObjectMother.java | 53 ++ .../utils/SourceTaskContextObjectMother.java | 68 ++ .../mqsource/utils/SourceTaskStopper.java | 10 +- .../connect/mqsource/JMSReader.java | 512 --------------- .../connect/mqsource/JMSWorker.java | 395 ++++++++++++ .../JMSWorkerConnectionException.java | 28 + .../connect/mqsource/MQSourceConnector.java | 424 ++++++++----- .../connect/mqsource/MQSourceTask.java | 596 ++++++++++++++---- .../mqsource/MQSourceTaskStartUpAction.java | 22 + .../connect/mqsource/SSLContextBuilder.java | 85 +++ .../mqsource/builders/BaseRecordBuilder.java | 69 +- .../builders/DefaultRecordBuilder.java | 5 +- .../mqsource/builders/JsonRecordBuilder.java | 5 +- .../mqsource/builders/RecordBuilder.java | 11 +- .../builders/RecordBuilderException.java | 31 + .../builders/RecordBuilderFactory.java | 50 ++ .../processor/JmsToKafkaHeaderConverter.java | 2 +- .../mqsource/sequencestate/SequenceState.java | 104 +++ .../sequencestate/SequenceStateClient.java | 141 +++++ .../sequencestate/SequenceStateException.java | 28 + .../mqsource/util/ExceptionProcessor.java | 91 +++ .../connect/mqsource/util/LogMessages.java | 55 ++ .../connect/mqsource/util/QueueConfig.java | 62 ++ .../JmsToKafkaHeaderConverterTest.java | 2 +- .../mqsource/MQSourceConnectorTest.java | 50 +- .../connect/mqsource/MQSourceTaskTest.java | 275 ++++++++ .../builders/RecordBuilderFactoryTest.java | 49 ++ .../SequenceStateClientTest.java | 142 +++++ .../sequencestate/SequenceStateTest.java | 165 +++++ .../mqsource/util/ExceptionProcessorTest.java | 83 +++ .../mqsource/util/QueueConfigTest.java | 77 +++ src/test/resources/log4j.properties | 2 +- src/test/resources/no-auth-qmgr.mqsc | 3 + 57 files changed, 5628 insertions(+), 1247 deletions(-) create mode 100644 config/mq-source-exactly-once.json create mode 100644 src/integration/java/com/ibm/eventstreams/connect/mqsource/JMSWorkerIT.java create mode 100644 src/integration/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskExceptionHandlingIT.java create mode 100644 src/integration/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskObjectMother.java create mode 100644 src/integration/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskOnlyOnceIT.java create mode 100644 src/integration/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskOnlyOnceStartBehaviourIT.java create mode 100644 src/integration/java/com/ibm/eventstreams/connect/mqsource/SequenceStateClientIT.java create mode 100644 src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/MQConnectionRollbackHelper.java create mode 100644 src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/MQTestUtil.java create mode 100644 src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/MessagesObjectMother.java create mode 100644 src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/SourceTaskContextObjectMother.java delete mode 100755 src/main/java/com/ibm/eventstreams/connect/mqsource/JMSReader.java create mode 100755 src/main/java/com/ibm/eventstreams/connect/mqsource/JMSWorker.java create mode 100644 src/main/java/com/ibm/eventstreams/connect/mqsource/JMSWorkerConnectionException.java create mode 100644 src/main/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskStartUpAction.java create mode 100644 src/main/java/com/ibm/eventstreams/connect/mqsource/SSLContextBuilder.java create mode 100644 src/main/java/com/ibm/eventstreams/connect/mqsource/builders/RecordBuilderException.java create mode 100644 src/main/java/com/ibm/eventstreams/connect/mqsource/builders/RecordBuilderFactory.java create mode 100644 src/main/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceState.java create mode 100644 src/main/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceStateClient.java create mode 100644 src/main/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceStateException.java create mode 100644 src/main/java/com/ibm/eventstreams/connect/mqsource/util/ExceptionProcessor.java create mode 100644 src/main/java/com/ibm/eventstreams/connect/mqsource/util/LogMessages.java create mode 100644 src/main/java/com/ibm/eventstreams/connect/mqsource/util/QueueConfig.java create mode 100644 src/test/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskTest.java create mode 100644 src/test/java/com/ibm/eventstreams/connect/mqsource/builders/RecordBuilderFactoryTest.java create mode 100644 src/test/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceStateClientTest.java create mode 100644 src/test/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceStateTest.java create mode 100644 src/test/java/com/ibm/eventstreams/connect/mqsource/util/ExceptionProcessorTest.java create mode 100644 src/test/java/com/ibm/eventstreams/connect/mqsource/util/QueueConfigTest.java create mode 100644 src/test/resources/no-auth-qmgr.mqsc diff --git a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml index 4bc9718..e835b69 100644 --- a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml +++ b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml @@ -57,7 +57,8 @@ body: label: Version description: What version of our software are you running? options: - - 1.3.5 (Default) + - 2.1.0 (Default) + - 1.3.5 - older (<1.3.5) validations: required: true diff --git a/.gitignore b/.gitignore index 96cd907..a86c91c 100644 --- a/.gitignore +++ b/.gitignore @@ -15,4 +15,8 @@ target/ *.iws # Visual Studio Code -.vscode/ \ No newline at end of file +.vscode/ + +mqjms.log.* +/src/main/resources/simplelogger.properties +.envrc diff --git a/Dockerfile b/Dockerfile index 81b598b..07f998f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,7 +5,7 @@ FROM alpine as builder RUN apk update RUN apk --no-cache add curl -RUN curl -L "https://downloads.apache.org/kafka/3.4.1/kafka_2.12-3.4.1.tgz" -o kafka.tgz +RUN curl -L "https://downloads.apache.org/kafka/3.6.2/kafka_2.12-3.6.2.tgz" -o kafka.tgz RUN mkdir /opt/kafka \ && tar -xf kafka.tgz -C /opt/kafka --strip-components=1 @@ -27,4 +27,4 @@ EXPOSE 8083 USER esuser -ENTRYPOINT ["./bin/connect-distributed.sh", "config/connect-distributed.properties"] +ENTRYPOINT ["./bin/connect-distributed.sh", "config/connect-distributed.properties"] \ No newline at end of file diff --git a/LICENSE b/LICENSE index 0a24bd8..9df4e14 100644 --- a/LICENSE +++ b/LICENSE @@ -175,7 +175,7 @@ Apache License END OF TERMS AND CONDITIONS - Copyright 2017 IBM Corporation + Copyright 2017, 2024 IBM Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index 1e895a6..542e7e0 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,7 @@ The connector is supplied as source code which you can easily build into a JAR f - [Data formats](#data-formats) - [Security](#security) - [Configuration](#configuration) +- [Exactly-once message delivery semantics](#exactly-once-message-delivery-semantics) - [Troubleshooting](#troubleshooting) - [Support](#support) - [Issues and contributions](#issues-and-contributions) @@ -46,7 +47,9 @@ Build the connector using Maven: mvn clean package ``` -Once built, the output is a single JAR called `target/kafka-connect-mq-source--jar-with-dependencies.jar` which contains all of the required dependencies. +Once built, the output is a single JAR called `target/kafka-connect-mq-source--jar-with-dependencies.jar` which contains all of the required dependencies. + +**NOTE:** With the 2.0.0 release the base Kafka Connect library has been updated from 2.6.0 to 3.4.0 to enable the implementation of the exactly-once delivery. ## Running the connector @@ -59,8 +62,8 @@ To run the connector, you must have: - The JAR from building the connector - A properties file containing the configuration for the connector -- Apache Kafka 2.6.2 or later, either standalone or included as part of an offering such as IBM Event Streams -- IBM MQ v9 or later, or the IBM MQ on Cloud service +- Apache Kafka 2.0.0 or later, either standalone or included as part of an offering such as IBM Event Streams (Apache Kafka 3.3.0 or later is required for exactly-once delivery). +- IBM MQ v8 or later, or the IBM MQ on Cloud service The connector can be run in a Kafka Connect worker in either standalone (single process) or distributed mode. It's a good idea to start in standalone mode. @@ -92,15 +95,13 @@ curl -X POST -H "Content-Type: application/json" http://localhost:8083/connector This repository includes an example Dockerfile to run Kafka Connect in distributed mode. It also adds in the MQ source connector as an available connector plugin. It uses the default `connect-distributed.properties` and `connect-log4j.properties` files. 1. `mvn clean package` -1. `docker build -t kafkaconnect-with-mq-source: .` -1. `docker run -p 8083:8083 kafkaconnect-with-mq-source:` - -Substitute `` with the version of the connector or `latest` to use the latest version. +1. `docker build -t kafkaconnect-with-mq-source:2.0.0 .` +1. `docker run -p 8083:8083 kafkaconnect-with-mq-source:2.0.0` **NOTE:** To provide custom properties files create a folder called `config` containing the `connect-distributed.properties` and `connect-log4j.properties` files and use a Docker volume to make them available when running the container like this: ``` shell -docker run -v $(pwd)/config:/opt/kafka/config -p 8083:8083 kafkaconnect-with-mq-source: +docker run -v $(pwd)/config:/opt/kafka/config -p 8083:8083 kafkaconnect-with-mq-source:2.0.0 ``` To start the MQ connector, you can use `config/mq-source.json` in this repository after replacing all placeholders and use a command like this: @@ -276,31 +277,33 @@ For troubleshooting, or to better understand the handshake performed by the IBM The configuration options for the Kafka Connect source connector for IBM MQ are as follows: -| Name | Description | Type | Default | Valid values | -| --------------------------------------- | ---------------------------------------------------------------------- | ------- | -------------- | ------------------------------------------------------- | -| topic | The name of the target Kafka topic | string | | Topic name | -| mq.queue.manager | The name of the MQ queue manager | string | | MQ queue manager name | -| mq.connection.mode | The connection mode - bindings or client | string | client | client, bindings | -| mq.connection.name.list | List of connection names for queue manager | string | | host(port)[,host(port),...] | -| mq.channel.name | The name of the server-connection channel | string | | MQ channel name | -| mq.queue | The name of the source MQ queue | string | | MQ queue name | -| mq.user.name | The user name for authenticating with the queue manager | string | | User name | -| mq.password | The password for authenticating with the queue manager | string | | Password | -| mq.user.authentication.mqcsp | Whether to use MQ connection security parameters (MQCSP) | boolean | true | | -| mq.ccdt.url | The URL for the CCDT file containing MQ connection details | string | | URL for obtaining a CCDT file | -| mq.record.builder | The class used to build the Kafka Connect record | string | | Class implementing RecordBuilder | -| mq.message.body.jms | Whether to interpret the message body as a JMS message type | boolean | false | | -| mq.record.builder.key.header | The JMS message header to use as the Kafka record key | string | | JMSMessageID, JMSCorrelationID, JMSCorrelationIDAsBytes, JMSDestination | -| mq.jms.properties.copy.to.kafka.headers | Whether to copy JMS message properties to Kafka headers | boolean | false | | -| mq.ssl.cipher.suite | The name of the cipher suite for TLS (SSL) connection | string | | Blank or valid cipher suite | -| mq.ssl.peer.name | The distinguished name pattern of the TLS (SSL) peer | string | | Blank or DN pattern | -| mq.ssl.keystore.location | The path to the JKS keystore to use for SSL (TLS) connections | string | JVM keystore | Local path to a JKS file | -| mq.ssl.keystore.password | The password of the JKS keystore to use for SSL (TLS) connections | string | | | -| mq.ssl.truststore.location | The path to the JKS truststore to use for SSL (TLS) connections | string | JVM truststore | Local path to a JKS file | -| mq.ssl.truststore.password | The password of the JKS truststore to use for SSL (TLS) connections | string | | | -| mq.ssl.use.ibm.cipher.mappings | Whether to set system property to control use of IBM cipher mappings | boolean | | | -| mq.batch.size | The maximum number of messages in a batch (unit of work) | integer | 250 | 1 or greater | -| mq.message.mqmd.read | Whether to enable reading of all MQMD fields | boolean | false | | +| Name | Description | Type | Default | Valid values | +| --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ------- | -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| topic | The name of the target Kafka topic | string | | Topic name | +| mq.queue.manager | The name of the MQ queue manager | string | | MQ queue manager name | +| mq.connection.mode | The connection mode - bindings or client | string | client | client, bindings | +| mq.connection.name.list | List of connection names for queue manager | string | | host(port)[,host(port),...] | +| mq.channel.name | The name of the server-connection channel | string | | MQ channel name | +| mq.queue | The name of the source MQ queue | string | | MQ queue name | +| mq.exactly.once.state.queue | The name of the MQ queue used to store state when running with exactly-once semantics | string | | MQ state queue name | +| mq.user.name | The user name for authenticating with the queue manager | string | | User name | +| mq.password | The password for authenticating with the queue manager | string | | Password | +| mq.user.authentication.mqcsp | Whether to use MQ connection security parameters (MQCSP) | boolean | true | | +| mq.ccdt.url | The URL for the CCDT file containing MQ connection details | string | | URL for obtaining a CCDT file | +| mq.record.builder | The class used to build the Kafka Connect record | string | | Class implementing RecordBuilder | +| mq.message.body.jms | Whether to interpret the message body as a JMS message type | boolean | false | | +| mq.record.builder.key.header | The JMS message header to use as the Kafka record key | string | | JMSMessageID, JMSCorrelationID, JMSCorrelationIDAsBytes, JMSDestination | +| mq.jms.properties.copy.to.kafka.headers | Whether to copy JMS message properties to Kafka headers | boolean | false | | +| mq.ssl.cipher.suite | The name of the cipher suite for TLS (SSL) connection | string | | Blank or valid cipher suite | +| mq.ssl.peer.name | The distinguished name pattern of the TLS (SSL) peer | string | | Blank or DN pattern | +| mq.ssl.keystore.location | The path to the JKS keystore to use for SSL (TLS) connections | string | JVM keystore | Local path to a JKS file | +| mq.ssl.keystore.password | The password of the JKS keystore to use for SSL (TLS) connections | string | | | +| mq.ssl.truststore.location | The path to the JKS truststore to use for SSL (TLS) connections | string | JVM truststore | Local path to a JKS file | +| mq.ssl.truststore.password | The password of the JKS truststore to use for SSL (TLS) connections | string | | | +| mq.ssl.use.ibm.cipher.mappings | Whether to set system property to control use of IBM cipher mappings | boolean | | | +| mq.batch.size | The maximum number of messages in a batch (unit of work) | integer | 250 | 1 or greater | +| mq.message.mqmd.read | Whether to enable reading of all MQMD fields | boolean | false | | +| mq.max.poll.blocked.time.ms | How long the connector will wait for the previous batch of messages to be delivered to Kafka before starting a new poll | integer | 2000 | It is important that this is less than the time defined for `task.shutdown.graceful.timeout.ms` as that is how long connect will wait for the task to perform lifecycle operations. | ### Using a CCDT file @@ -337,8 +340,55 @@ mq.password=${file:mq-secret.properties:secret-key} To use a file for the `mq.password` in Kubernetes, you create a Secret using the file as described in [the Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). +## Exactly-once message delivery semantics + +The MQ source connector provides at-least-once message delivery by default. This means that each MQ message will be delivered to Kafka, but in failure scenarios it is possible to have duplicated messages delivered to Kafka. + +Version 2.0.0 of the MQ source connector introduced exactly-once message delivery semantics. An additional MQ queue is used to store the state of message deliveries. When exactly-once delivery is enabled all MQ messages are delivered to Kafka with no duplicated messages. + +### Exactly-once delivery Kafka Connect worker configuration + +To enable exactly-once delivery, the MQ source connector must be run on Kafka Connect version 3.3.0 or later with the `exactly.once.source.support` property set to `enabled` in the Kafka connect worker configuration. See the [Kafka documentation](https://kafka.apache.org/documentation/#connect_exactlyoncesource) for more details about this setting, and the ACL requirements for the worker nodes. + +**Note**: Exactly-once support for source connectors is only available in [distributed mode](#running-in-distributed-mode); standalone Connect workers cannot provide exactly-once delivery semantics. Kafka Connect is in distributed mode when [running the connector with Docker](#running-with-docker) and when [deploying the connector to Kubernetes](#deploying-to-kubernetes). + +### Exactly-once delivery MQ source connector configuration + +To enable exactly-once delivery, the MQ source connector must be configured the `mq.exactly.once.state.queue` property set to the name of a pre-configured MQ queue on the same queue manager as the source MQ queue. The MQ source connector only permits a [`transaction.boundary`](https://kafka.apache.org/documentation/#sourceconnectorconfigs_transaction.boundary) property value of `poll` (the default value), as exactly-once delivery requires the Kafka producer transactions to be started and committed for each batch of records that the MQ source connector provides to Kafka. + +Exactly-once delivery requires that only a single connector task can run in the Kafka Connect instance, hence the `tasks.max` property must be set to `1` to ensure that failure scenarios do not cause duplicated messages to be delivered. + +Exactly-once delivery also requires that the MQ Source connector principal has a specific set of ACLs to be able to write transactionally to Kafka. +See the [Kafka documentation](https://kafka.apache.org/documentation/#connect_exactlyoncesource) for the ACL requirements. + +To start the MQ source connector with exactly-once delivery, the `config/mq-source-exactly-once.json` file in this repository can be used as a connector configuration template. + +**Note**: Exactly-once delivery requires a clear state queue on start-up otherwise the connector will behave as if it is recovering from a failure state and will attempt to get undelivered messages recorded in the out-of-date state message. Therefore, ensure that the state queue is empty each time exactly-once delivery is enabled (especially if re-enabling the exactly once feature). + +### Exactly-once delivery MQ requirements + +The following values are recommended across MQ to facilitate the exactly-once behaviour: + +- On the channel used for Kafka Connect, `HBINT` should be set to 30 seconds to allow MQ transaction rollbacks to occur more quickly in failure scenarios. +- On the state queue, `DEFSOPT` should be set to `EXCL` to ensure the state queue share option is exclusive. + +Exactly-once delivery requires that messages are set to not expire and that all messages on both the source and state queue are all persistent (this is to ensure correct behaviour around queue manager restarts). + +### Exactly-once delivery downstream Kafka consumer requirements + +Once the MQ source connector has delivered messages to Kafka with exactly-once semantics, downstream consumers of the Kafka topic must set the [`isolation.level`](https://kafka.apache.org/documentation/#consumerconfigs_isolation.level) configuration property to `read_committed` to ensure they are only consuming transactionally committed messages. + +### Exactly-once failure scenarios + +The MQ source connector is designed to fail on start-up in certain cases to ensure that exactly-once delivery is not compromised. +In some of these failure scenarios, it will be necessary for an MQ administrator to remove messages from the exactly-once state queue before the MQ source connector can start up and begin to deliver messages from the source queue again. In these cases, the MQ source connector will have the `FAILED` status and the Kafka Connect logs will describe any required administrative action. + ## Troubleshooting +### Connector in a `FAILED` state + +If the connector experiences a non retriable error then a ConnectException will cause the connector to go in to a `FAILED` state. This will require a manual restart using the Kafka Connect REST API to restart the connector. + ### Unable to connect to Kafka You may receive an `org.apache.kafka.common.errors.SslAuthenticationException: SSL handshake failed` error when trying to run the MQ source connector using SSL to connect to your Kafka cluster. In the case that the error is caused by the following exception: `Caused by: java.security.cert.CertificateException: No subject alternative DNS name matching XXXXX found.`, Java may be replacing the IP address of your cluster with the corresponding hostname in your `/etc/hosts` file. For example, to push Docker images to a custom Docker repository, you may add an entry in this file which corresponds to the IP of your repository e.g. `123.456.78.90 mycluster.icp`. To fix this, you can comment out this line in your `/etc/hosts` file. @@ -349,7 +399,7 @@ When configuring TLS connection to MQ, you may find that the queue manager rejec ## Support -Commercial support for this connector is available for customers with a support entitlement for [IBM Event Automation](https://www.ibm.com/products/event-automation) or [IBM Cloud Pak for Integration](https://www.ibm.com/cloud/cloud-pak-for-integration). +A commercially supported version of this connector is available for customers with a support entitlement for [IBM Event Streams](https://www.ibm.com/cloud/event-streams) or [IBM Cloud Pak for Integration](https://www.ibm.com/cloud/cloud-pak-for-integration). ## Issues and contributions @@ -357,7 +407,7 @@ For issues relating specifically to this connector, please use the [GitHub issue ## License -Copyright 2017, 2020, 2023 IBM Corporation +Copyright 2017, 2020, 2023, 2024 IBM Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -369,4 +419,4 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the License.The project is licensed under the Apache 2 license. +limitations under the License.The project is licensed under the Apache 2 license. \ No newline at end of file diff --git a/config/mq-source-exactly-once.json b/config/mq-source-exactly-once.json new file mode 100644 index 0000000..10d51c0 --- /dev/null +++ b/config/mq-source-exactly-once.json @@ -0,0 +1,19 @@ +{ + "name": "mq-source-exactly-once", + "config": + { + "connector.class": "com.ibm.eventstreams.connect.mqsource.MQSourceConnector", + "tasks.max": "1", + "topic": "", + + "key.converter": "org.apache.kafka.connect.storage.StringConverter", + "value.converter": "org.apache.kafka.connect.storage.StringConverter", + + "mq.queue.manager": "", + "mq.connection.name.list": "", + "mq.channel.name": "", + "mq.queue": "", + "mq.record.builder": "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder", + "mq.exactly.once.state.queue": "" + } +} diff --git a/config/mq-source.properties b/config/mq-source.properties index 5628f39..84cb262 100644 --- a/config/mq-source.properties +++ b/config/mq-source.properties @@ -1,4 +1,4 @@ -# Copyright 2017, 2020 IBM Corporation +# Copyright 2017, 2020, 2023, 2024 IBM Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +16,8 @@ name=mq-source connector.class=com.ibm.eventstreams.connect.mqsource.MQSourceConnector # You can increase this for higher throughput, but message ordering will be lost +# Exactly-once message delivery requires tasks.max to be 1 +# (see README.md for more details). tasks.max=1 # The name of the target Kafka topic - required @@ -37,6 +39,10 @@ mq.channel.name= # The name of the source MQ queue - required mq.queue= +# This is required for exactly-once delivery. If not supplied, message delivery will be at-least-once. +# (see README.md for more details). +# mq.exactly.once.state.queue= + # The user name for authenticating with the queue manager - optional # mq.user.name= diff --git a/pom.xml b/pom.xml index fa80e70..a293abf 100644 --- a/pom.xml +++ b/pom.xml @@ -1,6 +1,6 @@ - + --> + 4.0.0 com.ibm.eventstreams.connect kafka-connect-mq-source jar - 1.3.5 + 2.1.0 kafka-connect-mq-source IBM Corporation @@ -46,13 +45,13 @@ org.apache.kafka connect-api - 3.4.0 + 3.6.2 provided org.apache.kafka connect-json - 3.4.0 + 3.6.2 provided @@ -81,14 +80,14 @@ org.slf4j - slf4j-log4j12 + slf4j-api 2.0.7 - test org.slf4j - slf4j-api + slf4j-simple 2.0.7 + test @@ -98,10 +97,34 @@ 1.17.6 test + + + org.assertj + assertj-core + 3.24.2 + test + + + + maven-resources-plugin + 3.3.1 + + ${project.build.outputDirectory}/META-INF + + + ${project.basedir} + + LICENSE + + + + + + maven-compiler-plugin 3.1 @@ -121,6 +144,9 @@ ${project.version} + + src/test/resources + @@ -133,6 +159,9 @@ ${project.version} + + src/test/resources + @@ -187,7 +216,6 @@ - org.jacoco @@ -200,8 +228,7 @@ prepare-agent - - ${project.build.directory}/jacoco-output/jacoco-unit-tests.exec + ${project.build.directory}/jacoco-output/jacoco-unit-tests.exec surefire.jacoco.args @@ -212,10 +239,9 @@ report - - ${project.build.directory}/jacoco-output/jacoco-unit-tests.exec - - ${project.reporting.outputDirectory}/jacoco-unit-test-coverage-report + ${project.build.directory}/jacoco-output/jacoco-unit-tests.exec + ${project.reporting.outputDirectory}/jacoco-unit-test-coverage-report + @@ -225,8 +251,7 @@ prepare-agent - - ${project.build.directory}/jacoco-output/jacoco-integration-tests.exec + ${project.build.directory}/jacoco-output/jacoco-integration-tests.exec failsafe.jacoco.args @@ -237,10 +262,10 @@ report - - ${project.build.directory}/jacoco-output/jacoco-integration-tests.exec + ${project.build.directory}/jacoco-output/jacoco-integration-tests.exec - ${project.reporting.outputDirectory}/jacoco-integration-test-coverage-report + ${project.reporting.outputDirectory}/jacoco-integration-test-coverage-report + @@ -269,18 +294,18 @@ ${project.build.directory}/jacoco-output/merged.exec - - ${project.reporting.outputDirectory}/jacoco-merged-test-coverage-report + ${project.reporting.outputDirectory}/jacoco-merged-test-coverage-report + + org.apache.maven.plugins maven-checkstyle-plugin 3.2.0 - UTF-8 true true warning @@ -301,8 +326,6 @@ - - src/integration/resources diff --git a/src/assembly/package.xml b/src/assembly/package.xml index b28d245..68dab45 100644 --- a/src/assembly/package.xml +++ b/src/assembly/package.xml @@ -1,6 +1,6 @@ failure + .when(spyJMSWorker).receive(anyString(),any(QueueConfig.class), anyBoolean()); + + pollCommitAndAssert(connectTask, 5, 10); + + // Do a poll - this will fail as we've stopped the queue and throws a retriable exception. + List sourceRecords = Collections.EMPTY_LIST; + Exception exc = null; + try{ + sourceRecords = connectTask.poll(); + } catch (Exception e) { + exc = e; + } + assertThat(exc).isNotNull(); + assertThat(exc).isInstanceOf(RetriableException.class); + for (SourceRecord record : sourceRecords) { + connectTask.commitRecord(record); + } + + assertThat(sourceRecords.size()).isEqualTo(0); + + // fix our queue so we can continue... + startChannel(QMGR_NAME, REST_API_HOST_PORT, ADMIN_PASSWORD); + + assertThat(getMessageCount(DEFAULT_SOURCE_QUEUE)).isEqualTo(10); + + pollCommitAndAssert(connectTask, 5, 5); + + pollCommitAndAssert(connectTask, 5, 0); + + pollCommitAndAssert(connectTask, 0, 0); + + assertThat(getMessageCount(DEFAULT_SOURCE_QUEUE)).isEqualTo(0); + } + + @Test + public void testNoExceptionThrownWhenJMSExceptionThrownByBuilder_AndRecordsAreRollbackAndThenProcessedNextPoll() throws Exception { + JMSWorker sharedJMSWorker = new JMSWorker(); + sharedJMSWorker.configure(getPropertiesConfig(getConnectorProps())); + + JMSWorker dedicated = new JMSWorker(); + dedicated.configure(getPropertiesConfig(getConnectorProps())); + + DefaultRecordBuilder spyRecordBuilder = Mockito.spy(new DefaultRecordBuilder()); + + MQSourceTask connectTask = getSourceTaskWithEmptyKafkaOffset(); + + final Map connectorConfigProps = getConnectorProps(); + connectorConfigProps.put("mq.batch.size", "5"); + + connectTask.start( + connectorConfigProps, + sharedJMSWorker, + dedicated, + createSequenceStateClient(sharedJMSWorker, dedicated, connectorConfigProps) + ); + + final List messages = createAListOfMessages(getJmsContext(), 15, "Builder exception message: "); + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, messages); + + assertThat(getMessageCount(DEFAULT_SOURCE_QUEUE)).isEqualTo(15); + + doCallRealMethod() + .doCallRealMethod() + .doCallRealMethod() + .doCallRealMethod() + .doCallRealMethod() // 5 + .doCallRealMethod() + .doCallRealMethod() + .doThrow(new JMSException("This is a JMSException caused by a spy!!")) // 8 + .doCallRealMethod() + // Be careful with these, any() does not cover null hence null is used + .when(spyRecordBuilder).toSourceRecord(any(JMSContext.class), eq(TOPIC_NAME), anyBoolean(), any(Message.class), any(), any()); + + // Needs to be done here, after the doCallRealMethods have been setup. + sharedJMSWorker.setRecordBuilder(spyRecordBuilder); + + // Poll no issues, round 1 + pollCommitAndAssert(connectTask, 5, 10); + + // Do a poll - this will fail as we've stopped the queue and an exception is thrown. + pollCommitAndAssert(connectTask, 0, 10); + + // If batch complete signal is not null, program will hang forever. + assertThat(connectTask.getBatchCompleteSignal()).isNull(); + + pollCommitAndAssert(connectTask, 5, 5); + + pollCommitAndAssert(connectTask, 5, 0); + + pollCommitAndAssert(connectTask, 0, 0); + } + + @Test + public void verifyMessageBatchRollback() throws Exception { + connectTask = getSourceTaskWithEmptyKafkaOffset(); + + final Map connectorConfigProps = getConnectorProps(); + connectorConfigProps.put("mq.batch.size", "10"); + + connectTask.start(connectorConfigProps); + + final List messages = listOfMessagesButOneIsMalformed(getJmsContext()); + + Thread.sleep(5000L); + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, messages); + + final List kafkaMessages; + + // first batch should successfully retrieve messages 01-10 + kafkaMessages = connectTask.poll(); + assertEquals(10, kafkaMessages.size()); + connectTask.commit(); + connectTask.commit(); + + // second batch (11-20) should fail because of message 16 + final ConnectException exc = assertThrows(ConnectException.class, () -> { + connectTask.poll(); + }); + + assertEquals("com.ibm.eventstreams.connect.mqsource.builders.RecordBuilderException: Unsupported JMS message type", exc.getMessage()); + + // there should be 20 messages left on the MQ queue (messages 11-30) + connectTask.stop(); + final List remainingMQMessages = getAllMessagesFromQueue(DEFAULT_SOURCE_QUEUE); + assertEquals(20, remainingMQMessages.size()); + } + + + private static void pollCommitAndAssert(MQSourceTask connectTask, int recordsProcessed, int recordsLeft) throws Exception { + + List sourceRecords = new ArrayList<>(); + + try { + sourceRecords = connectTask.poll(); + for (SourceRecord record : sourceRecords) { + connectTask.commitRecord(record); + } + + } catch(Exception e) { + log.info("exception caught and thrown away during test: " + e); + } + + assertThat(sourceRecords.size()).isEqualTo(recordsProcessed); + + assertThat(getMessageCount(DEFAULT_SOURCE_QUEUE)).isEqualTo(recordsLeft); + } + +} diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskIT.java b/src/integration/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskIT.java index 9c82bec..dba1908 100644 --- a/src/integration/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskIT.java +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskIT.java @@ -1,5 +1,5 @@ /** - * Copyright 2022 IBM Corporation + * Copyright 2022, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,75 +15,104 @@ */ package com.ibm.eventstreams.connect.mqsource; +import static com.ibm.eventstreams.connect.mqsource.MQSourceTaskObjectMother.getSourceTaskWithEmptyKafkaOffset; +import static com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil.browseAllMessagesFromQueue; +import static com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil.getAllMessagesFromQueue; +import static com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil.putAllMessagesToQueue; +import static com.ibm.eventstreams.connect.mqsource.utils.MessagesObjectMother.createAListOfMessages; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatNoException; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; -import javax.jms.MapMessage; +import javax.jms.JMSException; import javax.jms.Message; import javax.jms.TextMessage; import org.apache.kafka.connect.data.Schema; -import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.source.SourceRecord; import org.junit.After; +import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; +import com.ibm.eventstreams.connect.mqsource.sequencestate.SequenceStateClient; +import com.ibm.eventstreams.connect.mqsource.sequencestate.SequenceStateException; +import com.ibm.eventstreams.connect.mqsource.util.QueueConfig; +import com.ibm.eventstreams.connect.mqsource.utils.JsonRestApi; +import com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil; import com.ibm.eventstreams.connect.mqsource.utils.SourceTaskStopper; + public class MQSourceTaskIT extends AbstractJMSContextIT { private MQSourceTask connectTask = null; @After - public void cleanup() throws InterruptedException { + public void after() throws InterruptedException { final SourceTaskStopper stopper = new SourceTaskStopper(connectTask); stopper.run(); } - private static final String MQ_QUEUE = "DEV.QUEUE.1"; + @Before + public void before() throws JMSException { + MQTestUtil.removeAllMessagesFromQueue(DEFAULT_SOURCE_QUEUE); + MQTestUtil.removeAllMessagesFromQueue(DEFAULT_STATE_QUEUE); + } private Map createDefaultConnectorProperties() { final Map props = new HashMap<>(); - props.put("mq.queue.manager", getQmgrName()); + props.put("mq.queue.manager", QMGR_NAME); props.put("mq.connection.mode", "client"); - props.put("mq.connection.name.list", getConnectionName()); - props.put("mq.channel.name", getChannelName()); - props.put("mq.queue", MQ_QUEUE); + props.put("mq.connection.name.list", DEFAULT_CONNECTION_NAME); + props.put("mq.channel.name", CHANNEL_NAME); + props.put("mq.queue", DEFAULT_SOURCE_QUEUE); props.put("mq.user.authentication.mqcsp", "false"); props.put("topic", "mytopic"); return props; } + private Map createExactlyOnceConnectorProperties() { + final Map props = createDefaultConnectorProperties(); + props.put("mq.exactly.once.state.queue", DEFAULT_STATE_QUEUE); + props.put("tasks.max", "1"); + return props; + } + @Test public void verifyJmsTextMessages() throws Exception { - connectTask = new MQSourceTask(); + connectTask = getSourceTaskWithEmptyKafkaOffset(); final Map connectorConfigProps = createDefaultConnectorProperties(); connectorConfigProps.put("mq.message.body.jms", "true"); - connectorConfigProps.put("mq.record.builder", - "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + connectorConfigProps.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); connectTask.start(connectorConfigProps); - + assertThat(connectTask.startUpAction).isEqualTo(MQSourceTaskStartUpAction.NORMAL_OPERATION); final TextMessage message1 = getJmsContext().createTextMessage("hello"); final TextMessage message2 = getJmsContext().createTextMessage("world"); - putAllMessagesToQueue(MQ_QUEUE, Arrays.asList(message1, message2)); + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, Arrays.asList(message1, message2)); final List kafkaMessages = connectTask.poll(); assertEquals(2, kafkaMessages.size()); for (final SourceRecord kafkaMessage : kafkaMessages) { - assertEquals("mytopic", kafkaMessage.topic()); assertNull(kafkaMessage.key()); assertNull(kafkaMessage.valueSchema()); @@ -96,12 +125,11 @@ public void verifyJmsTextMessages() throws Exception { @Test public void verifyJmsJsonMessages() throws Exception { - connectTask = new MQSourceTask(); + connectTask = getSourceTaskWithEmptyKafkaOffset(); final Map connectorConfigProps = createDefaultConnectorProperties(); connectorConfigProps.put("mq.message.body.jms", "true"); - connectorConfigProps.put("mq.record.builder", - "com.ibm.eventstreams.connect.mqsource.builders.JsonRecordBuilder"); + connectorConfigProps.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.JsonRecordBuilder"); connectTask.start(connectorConfigProps); @@ -112,13 +140,12 @@ public void verifyJmsJsonMessages() throws Exception { "\"i\" : " + i + "}")); } - putAllMessagesToQueue(MQ_QUEUE, messages); + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, messages); final List kafkaMessages = connectTask.poll(); assertEquals(5, kafkaMessages.size()); for (int i = 0; i < 5; i++) { final SourceRecord kafkaMessage = kafkaMessages.get(i); - assertEquals("mytopic", kafkaMessage.topic()); assertNull(kafkaMessage.key()); assertNull(kafkaMessage.valueSchema()); @@ -129,14 +156,39 @@ public void verifyJmsJsonMessages() throws Exception { } } + @Test + public void verifyMQMessage() throws Exception { + connectTask = getSourceTaskWithEmptyKafkaOffset(); + + final Map connectorConfigProps = createDefaultConnectorProperties(); + connectorConfigProps.put("mq.message.body.jms", "false"); //this could also be absent but if set to true the test should fail + connectorConfigProps.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + + connectTask.start(connectorConfigProps); + + final String sent = "Hello World"; + final String url = "https://localhost:" + REST_API_HOST_PORT + "/ibmmq/rest/v1/messaging/qmgr/" + QMGR_NAME + "/queue/DEV.QUEUE.1/message"; + JsonRestApi.postString(url, "app", ADMIN_PASSWORD, sent); + + final List kafkaMessages = connectTask.poll(); // get all the SRs (1) + SourceRecord firstMsg = kafkaMessages.get(0); + Object received = firstMsg.value(); + + assertNotEquals(received.getClass(), String.class); //jms messages are retrieved as Strings + assertEquals(received.getClass(), byte[].class); + assertEquals(new String((byte[]) received, StandardCharsets.UTF_8), sent); + + connectTask.commitRecord(firstMsg); + connectTask.poll(); + } + @Test public void verifyJmsMessageHeaders() throws Exception { - connectTask = new MQSourceTask(); + connectTask = getSourceTaskWithEmptyKafkaOffset(); final Map connectorConfigProps = createDefaultConnectorProperties(); connectorConfigProps.put("mq.message.body.jms", "true"); - connectorConfigProps.put("mq.record.builder", - "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + connectorConfigProps.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); connectorConfigProps.put("mq.jms.properties.copy.to.kafka.headers", "true"); connectTask.start(connectorConfigProps); @@ -146,12 +198,11 @@ public void verifyJmsMessageHeaders() throws Exception { message.setIntProperty("volume", 11); message.setDoubleProperty("decimalmeaning", 42.0); - putAllMessagesToQueue(MQ_QUEUE, Arrays.asList(message)); + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, Arrays.asList(message)); final List kafkaMessages = connectTask.poll(); assertEquals(1, kafkaMessages.size()); final SourceRecord kafkaMessage = kafkaMessages.get(0); - assertEquals("mytopic", kafkaMessage.topic()); assertNull(kafkaMessage.key()); assertNull(kafkaMessage.valueSchema()); @@ -166,21 +217,18 @@ public void verifyJmsMessageHeaders() throws Exception { @Test public void verifyMessageBatchIndividualCommits() throws Exception { - connectTask = new MQSourceTask(); + connectTask = getSourceTaskWithEmptyKafkaOffset(); final Map connectorConfigProps = createDefaultConnectorProperties(); + connectorConfigProps.put("mq.message.body.jms", "true"); - connectorConfigProps.put("mq.record.builder", - "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + connectorConfigProps.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); connectorConfigProps.put("mq.batch.size", "10"); connectTask.start(connectorConfigProps); - final List messages = new ArrayList<>(); - for (int i = 1; i <= 35; i++) { - messages.add(getJmsContext().createTextMessage("batch message " + i)); - } - putAllMessagesToQueue(MQ_QUEUE, messages); + final List messages = createAListOfMessages(getJmsContext(), 35, "batch message "); + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, messages); int nextExpectedMessage = 1; @@ -217,21 +265,17 @@ public void verifyMessageBatchIndividualCommits() throws Exception { @Test public void verifyMessageBatchGroupCommits() throws Exception { - connectTask = new MQSourceTask(); + connectTask = getSourceTaskWithEmptyKafkaOffset(); final Map connectorConfigProps = createDefaultConnectorProperties(); connectorConfigProps.put("mq.message.body.jms", "true"); - connectorConfigProps.put("mq.record.builder", - "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + connectorConfigProps.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); connectorConfigProps.put("mq.batch.size", "10"); connectTask.start(connectorConfigProps); - final List messages = new ArrayList<>(); - for (int i = 1; i <= 35; i++) { - messages.add(getJmsContext().createTextMessage("message " + i)); - } - putAllMessagesToQueue(MQ_QUEUE, messages); + final List messages = createAListOfMessages(getJmsContext(), 35, "message "); + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, messages); List kafkaMessages; @@ -260,59 +304,9 @@ public void verifyMessageBatchGroupCommits() throws Exception { } } - @Test - public void verifyMessageBatchRollback() throws Exception { - connectTask = new MQSourceTask(); - - final Map connectorConfigProps = createDefaultConnectorProperties(); - connectorConfigProps.put("mq.message.body.jms", "true"); - connectorConfigProps.put("mq.record.builder", - "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); - connectorConfigProps.put("mq.batch.size", "10"); - - connectTask.start(connectorConfigProps); - - // Test overview: - // - // messages 01-15 - valid messages - // message 16 - a message that the builder can't process - // messages 17-30 - valid messages - - final List messages = new ArrayList<>(); - for (int i = 1; i <= 15; i++) { - messages.add(getJmsContext().createTextMessage("message " + i)); - } - final MapMessage invalidMessage = getJmsContext().createMapMessage(); - invalidMessage.setString("test", "builder cannot convert this"); - messages.add(invalidMessage); - for (int i = 17; i <= 30; i++) { - messages.add(getJmsContext().createTextMessage("message " + i)); - } - putAllMessagesToQueue(MQ_QUEUE, messages); - - final List kafkaMessages; - - // first batch should successfully retrieve messages 01-10 - kafkaMessages = connectTask.poll(); - assertEquals(10, kafkaMessages.size()); - connectTask.commit(); - connectTask.commit(); - - // second batch (11-20) should fail because of message 16 - final ConnectException exc = assertThrows(ConnectException.class, () -> { - connectTask.poll(); - }); - assertTrue(exc.getMessage().equals("Unsupported JMS message type")); - - // there should be 20 messages left on the MQ queue (messages 11-30) - connectTask.stop(); - final List remainingMQMessages = getAllMessagesFromQueue(MQ_QUEUE); - assertEquals(20, remainingMQMessages.size()); - } - @Test public void verifyMessageIdAsKey() throws Exception { - connectTask = new MQSourceTask(); + connectTask = getSourceTaskWithEmptyKafkaOffset(); final Map connectorConfigProps = createDefaultConnectorProperties(); connectorConfigProps.put("mq.message.body.jms", "true"); @@ -323,7 +317,7 @@ public void verifyMessageIdAsKey() throws Exception { connectTask.start(connectorConfigProps); final TextMessage message = getJmsContext().createTextMessage("testmessage"); - putAllMessagesToQueue(MQ_QUEUE, Arrays.asList(message)); + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, Arrays.asList(message)); final List kafkaMessages = connectTask.poll(); assertEquals(1, kafkaMessages.size()); @@ -340,7 +334,7 @@ public void verifyMessageIdAsKey() throws Exception { @Test public void verifyCorrelationIdAsKey() throws Exception { - connectTask = new MQSourceTask(); + connectTask = getSourceTaskWithEmptyKafkaOffset(); final Map connectorConfigProps = createDefaultConnectorProperties(); connectorConfigProps.put("mq.message.body.jms", "true"); @@ -354,7 +348,7 @@ public void verifyCorrelationIdAsKey() throws Exception { message1.setJMSCorrelationID("verifycorrel"); final TextMessage message2 = getJmsContext().createTextMessage("second message"); message2.setJMSCorrelationID("ID:5fb4a18030154fe4b09a1dfe8075bc101dfe8075bc104fe4"); - putAllMessagesToQueue(MQ_QUEUE, Arrays.asList(message1, message2)); + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, Arrays.asList(message1, message2)); final List kafkaMessages = connectTask.poll(); assertEquals(2, kafkaMessages.size()); @@ -374,7 +368,7 @@ public void verifyCorrelationIdAsKey() throws Exception { @Test public void verifyCorrelationIdBytesAsKey() throws Exception { - connectTask = new MQSourceTask(); + connectTask = getSourceTaskWithEmptyKafkaOffset(); final Map connectorConfigProps = createDefaultConnectorProperties(); connectorConfigProps.put("mq.message.body.jms", "true"); @@ -386,7 +380,7 @@ public void verifyCorrelationIdBytesAsKey() throws Exception { final TextMessage message = getJmsContext().createTextMessage("testmessagewithcorrelbytes"); message.setJMSCorrelationID("verifycorrelbytes"); - putAllMessagesToQueue(MQ_QUEUE, Arrays.asList(message)); + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, Arrays.asList(message)); final List kafkaMessages = connectTask.poll(); assertEquals(1, kafkaMessages.size()); @@ -402,7 +396,7 @@ public void verifyCorrelationIdBytesAsKey() throws Exception { @Test public void verifyDestinationAsKey() throws Exception { - connectTask = new MQSourceTask(); + connectTask = getSourceTaskWithEmptyKafkaOffset(); final Map connectorConfigProps = createDefaultConnectorProperties(); connectorConfigProps.put("mq.message.body.jms", "true"); @@ -413,13 +407,13 @@ public void verifyDestinationAsKey() throws Exception { connectTask.start(connectorConfigProps); final TextMessage message = getJmsContext().createTextMessage("testmessagewithdest"); - putAllMessagesToQueue(MQ_QUEUE, Arrays.asList(message)); + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, Arrays.asList(message)); final List kafkaMessages = connectTask.poll(); assertEquals(1, kafkaMessages.size()); final SourceRecord kafkaMessage = kafkaMessages.get(0); - assertEquals("queue:///" + MQ_QUEUE, kafkaMessage.key()); + assertEquals("queue:///" + DEFAULT_SOURCE_QUEUE, kafkaMessage.key()); assertEquals(Schema.OPTIONAL_STRING_SCHEMA, kafkaMessage.keySchema()); assertEquals("testmessagewithdest", kafkaMessage.value()); @@ -427,51 +421,160 @@ public void verifyDestinationAsKey() throws Exception { connectTask.commitRecord(kafkaMessage); } + @Test + public void testSequenceStateMsgReadUnderMQTx() throws Exception { + JMSWorker spyJMSWorker = Mockito.spy(new JMSWorker()); + + connectTask = getSourceTaskWithEmptyKafkaOffset(); + + final Map connectorConfigProps = createExactlyOnceConnectorProperties(); + connectorConfigProps.put("mq.message.body.jms", "true"); + connectorConfigProps.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + + spyJMSWorker.configure(getPropertiesConfig(connectorConfigProps)); + JMSWorker dedicated = new JMSWorker(); + dedicated.configure(getPropertiesConfig(connectorConfigProps)); + SequenceStateClient sequenceStateClient = Mockito.spy(new SequenceStateClient(DEFAULT_STATE_QUEUE, spyJMSWorker, dedicated)); + + connectTask.start(connectorConfigProps, spyJMSWorker, dedicated, sequenceStateClient); + + final List messages = createAListOfMessages(getJmsContext(), 2, "message "); + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, messages); + + List kafkaMessages; + kafkaMessages = connectTask.poll(); + + List stateMsgs1 = browseAllMessagesFromQueue(DEFAULT_STATE_QUEUE); + assertThat(stateMsgs1.size()).isEqualTo(1); + + for (final SourceRecord m : kafkaMessages) { + connectTask.commitRecord(m); + } + + /// make commit do rollback when poll is called + doAnswer((Void) -> { + spyJMSWorker.getContext().rollback(); + throw new Exception("such an exception"); + + }).when(spyJMSWorker).commit(); + + try { + connectTask.poll(); + } catch (Exception e) { + System.out.println("exception caught"); + } + + + /// expect statequeue to not be empty + List stateMsgs2 = getAllMessagesFromQueue(DEFAULT_STATE_QUEUE); + assertThat(stateMsgs2.size()).isEqualTo(1); + + List sourceMsgs = getAllMessagesFromQueue(DEFAULT_SOURCE_QUEUE); + assertThat(sourceMsgs.size()).isEqualTo(2); + + + } @Test - public void verifyEmptyMessage() throws Exception { - connectTask = new MQSourceTask(); + public void testSequenceStateMsgWrittenIndependentFromGetSource() throws Exception { + // setup test condition: put messages on source queue, poll once to read them + connectTask = getSourceTaskWithEmptyKafkaOffset(); + + final Map connectorConfigProps = createExactlyOnceConnectorProperties(); + connectorConfigProps.put("mq.message.body.jms", "true"); + connectorConfigProps.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + + JMSWorker shared = new JMSWorker(); + shared.configure(getPropertiesConfig(connectorConfigProps)); + JMSWorker dedicated = new JMSWorker(); + dedicated.configure(getPropertiesConfig(connectorConfigProps)); + SequenceStateClient sequenceStateClient = new SequenceStateClient(DEFAULT_STATE_QUEUE, shared, dedicated); + + connectTask.start(connectorConfigProps, shared, dedicated, sequenceStateClient); + + final List messages = createAListOfMessages(getJmsContext(), 2, "message "); + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, messages); + + connectTask.poll(); + + List stateMsgs1 = browseAllMessagesFromQueue(DEFAULT_STATE_QUEUE); + assertThat(stateMsgs1.size()).isEqualTo(1); + shared.attemptRollback(); + assertThat(stateMsgs1.size()).isEqualTo(1); //state message is still there even though source message were rolled back + + } + + @Test + public void testRemoveDeliveredMessagesFromSourceQueueThrowsException() throws Exception { final Map connectorConfigProps = createDefaultConnectorProperties(); connectorConfigProps.put("mq.message.body.jms", "true"); - connectorConfigProps.put("mq.record.builder", - "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + connectorConfigProps.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); - connectTask.start(connectorConfigProps); + JMSWorker spyJMSWorker = Mockito.spy(new JMSWorker()); + JMSWorker spyDedicated = Mockito.spy(new JMSWorker()); + JMSWorker spyShared = Mockito.spy(new JMSWorker()); - Message emptyMessage = getJmsContext().createMessage(); - putAllMessagesToQueue(MQ_QUEUE, Arrays.asList(emptyMessage)); + spyJMSWorker.configure(getPropertiesConfig(connectorConfigProps)); + spyDedicated.configure(getPropertiesConfig(connectorConfigProps)); + spyShared.configure(getPropertiesConfig(connectorConfigProps)); - final List kafkaMessages = connectTask.poll(); - assertEquals(1, kafkaMessages.size()); + Message messageSpy = Mockito.spy(getJmsContext().createTextMessage("Spy Injected Message")); - final SourceRecord kafkaMessage = kafkaMessages.get(0); - assertNull(kafkaMessage.value()); + doReturn("6") + .when(messageSpy) + .getJMSMessageID(); - connectTask.commitRecord(kafkaMessage); + doReturn(messageSpy) + .when(spyJMSWorker).receive( + anyString(), + any(QueueConfig.class), + anyBoolean()); + + connectTask = getSourceTaskWithEmptyKafkaOffset(); + connectTask.start(connectorConfigProps, spyJMSWorker, spyDedicated, new SequenceStateClient(DEFAULT_STATE_QUEUE, spyShared, spyJMSWorker)); + + String[] msgIds = new String[] {"1", "2"}; + + assertThrows(SequenceStateException.class, + () -> connectTask.removeDeliveredMessagesFromSourceQueue(Arrays.asList(msgIds)) + ); } @Test - public void verifyEmptyTextMessage() throws Exception { - connectTask = new MQSourceTask(); + public void testRemoveDeliveredMessagesFromSourceQueueDoesNotThrowException() throws Exception { final Map connectorConfigProps = createDefaultConnectorProperties(); connectorConfigProps.put("mq.message.body.jms", "true"); - connectorConfigProps.put("mq.record.builder", - "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + connectorConfigProps.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); - connectTask.start(connectorConfigProps); + JMSWorker spyJMSWorker = Mockito.spy(new JMSWorker()); + JMSWorker spyDedicated = Mockito.spy(new JMSWorker()); + JMSWorker spyShared = Mockito.spy(new JMSWorker()); - TextMessage emptyMessage = getJmsContext().createTextMessage(); - putAllMessagesToQueue(MQ_QUEUE, Arrays.asList(emptyMessage)); + spyJMSWorker.configure(getPropertiesConfig(connectorConfigProps)); + spyDedicated.configure(getPropertiesConfig(connectorConfigProps)); + spyShared.configure(getPropertiesConfig(connectorConfigProps)); - final List kafkaMessages = connectTask.poll(); - assertEquals(1, kafkaMessages.size()); + Message messageSpy = Mockito.spy(getJmsContext().createTextMessage("Spy Injected Message")); - final SourceRecord kafkaMessage = kafkaMessages.get(0); - assertNull(kafkaMessage.value()); + doReturn("1") + .when(messageSpy) + .getJMSMessageID(); - connectTask.commitRecord(kafkaMessage); + doReturn(messageSpy) + .when(spyJMSWorker).receive( + anyString(), + any(QueueConfig.class), + anyBoolean()); + + connectTask = getSourceTaskWithEmptyKafkaOffset(); + connectTask.start(connectorConfigProps, spyJMSWorker, spyDedicated, new SequenceStateClient(DEFAULT_STATE_QUEUE, spyShared, spyJMSWorker)); + + String[] msgIds = new String[] {"1", "2"}; + + assertThatNoException() + .isThrownBy(() -> connectTask.removeDeliveredMessagesFromSourceQueue(Arrays.asList(msgIds))); } } diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskObjectMother.java b/src/integration/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskObjectMother.java new file mode 100644 index 0000000..d327af7 --- /dev/null +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskObjectMother.java @@ -0,0 +1,52 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource; + +import static com.ibm.eventstreams.connect.mqsource.utils.SourceTaskContextObjectMother.emptyKafkaOffsetContext; +import static com.ibm.eventstreams.connect.mqsource.utils.SourceTaskContextObjectMother.kafkaContextWithOffsetGivenAs5; +import static com.ibm.eventstreams.connect.mqsource.utils.SourceTaskContextObjectMother.sourceTaskContextWithOffsetId; + +import org.apache.kafka.connect.source.SourceTaskContext; + +public class MQSourceTaskObjectMother { + + public static MQSourceTask getSourceTaskWithEmptyKafkaOffset() { + SourceTaskContext contextMock = emptyKafkaOffsetContext(); + MQSourceTask connectTask = new MQSourceTask(); + connectTask.initialize(contextMock); + return connectTask; + } + + public static MQSourceTask getSourceTaskWithKafkaOffset() { + SourceTaskContext contextMock = kafkaContextWithOffsetGivenAs5(); + MQSourceTask connectTask = new MQSourceTask(); + connectTask.initialize(contextMock); + return connectTask; + } + + public static MQSourceTask getSourceTaskWithKafkaOffset(long id) { + SourceTaskContext contextMock = sourceTaskContextWithOffsetId(id); + MQSourceTask connectTask = new MQSourceTask(); + connectTask.initialize(contextMock); + return connectTask; + } + + public static MQSourceTask getSourceTaskWithContext(SourceTaskContext context) { + MQSourceTask connectTask = new MQSourceTask(); + connectTask.initialize(context); + return connectTask; + } +} diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskOnlyOnceIT.java b/src/integration/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskOnlyOnceIT.java new file mode 100644 index 0000000..3630242 --- /dev/null +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskOnlyOnceIT.java @@ -0,0 +1,398 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.ibm.eventstreams.connect.mqsource.sequencestate.SequenceState; +import com.ibm.eventstreams.connect.mqsource.sequencestate.SequenceStateClient; +import com.ibm.eventstreams.connect.mqsource.util.LogMessages; +import com.ibm.eventstreams.connect.mqsource.utils.SourceTaskStopper; + +import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.source.SourceRecord; +import org.jetbrains.annotations.NotNull; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import javax.jms.JMSException; +import javax.jms.Message; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static com.ibm.eventstreams.connect.mqsource.MQSourceTaskObjectMother.getSourceTaskWithEmptyKafkaOffset; +import static com.ibm.eventstreams.connect.mqsource.MQSourceTaskObjectMother.getSourceTaskWithKafkaOffset; +import static com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil.browseAllMessagesFromQueue; +import static com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil.removeAllMessagesFromQueue; +import static com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil.getAllMessagesFromQueue; +import static com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil.getIDsOfMessagesCurrentlyOnQueue; +import static com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil.putAllMessagesToQueue; +import static com.ibm.eventstreams.connect.mqsource.utils.MessagesObjectMother.createAListOfMessages; +import static org.assertj.core.api.Assertions.assertThat; + +public class MQSourceTaskOnlyOnceIT extends AbstractJMSContextIT { + + private MQSourceTask connectTask; + private SequenceStateClient sequenceStateClient; + + @Before + public void startup() throws Exception { + connectTask = getSourceTaskWithEmptyKafkaOffset(); + removeAllMessagesFromQueue(DEFAULT_STATE_QUEUE); + removeAllMessagesFromQueue(DEFAULT_SOURCE_QUEUE); + final Map connectorConfigProps = connectionProperties(); + + final JMSWorker shared = new JMSWorker(); + shared.configure(getPropertiesConfig(connectorConfigProps)); + final JMSWorker dedicated = new JMSWorker(); + dedicated.configure(getPropertiesConfig(connectorConfigProps)); + sequenceStateClient = new SequenceStateClient(DEFAULT_STATE_QUEUE, shared, dedicated); + + connectTask.start(connectorConfigProps, shared, dedicated, sequenceStateClient); + } + + @After + public void cleanup() throws InterruptedException { + final SourceTaskStopper stopper = new SourceTaskStopper(connectTask); + stopper.run(); + + sequenceStateClient.closeClientConnections(); + } + + private Map connectionProperties() { + final Map props = new HashMap<>(); + props.put("mq.queue.manager", QMGR_NAME); + props.put("mq.connection.mode", "client"); + props.put("mq.connection.name.list", DEFAULT_CONNECTION_NAME); + props.put("mq.channel.name", CHANNEL_NAME); + props.put("mq.queue", DEFAULT_SOURCE_QUEUE); + props.put("mq.user.authentication.mqcsp", "false"); + props.put("mq.message.body.jms", "true"); + props.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + props.put("mq.exactly.once.state.queue", DEFAULT_STATE_QUEUE); + props.put("tasks.max", "1"); + props.put("topic", "mytopic"); + return props; + } + + private Map connectionPropertiesWithOnlyOnceDisabled() { + final Map props = connectionProperties(); + props.remove("mq.exactly.once.state.queue"); + return props; + } + + @NotNull + private List aListOfSomeMessages() throws Exception { + return Arrays.asList( + getJmsContext().createTextMessage("hello"), + getJmsContext().createTextMessage("world") + ); + } + + @Test + public void testPollGetSequenceId_GivenSequenceStateIsPresentOnQueue() throws Exception { + + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, aListOfSomeMessages()); + List allMessageIds = getIDsOfMessagesCurrentlyOnQueue(DEFAULT_SOURCE_QUEUE); + SequenceState deliveryState = new SequenceState( + 1, + allMessageIds, + SequenceState.LastKnownState.IN_FLIGHT + ); + + sequenceStateClient.write(deliveryState); + + connectTask = getSourceTaskWithEmptyKafkaOffset(); + + connectTask.start(connectionProperties()); + + final List kafkaMessages = connectTask.poll(); + + for (final SourceRecord kafkaMessage : kafkaMessages) { + connectTask.commitRecord(kafkaMessage); + } + + assertThat(kafkaMessages) + .hasSize(2) + .extracting(SourceRecord::value) + .containsExactlyInAnyOrder("hello", "world"); + + assertThat(connectTask.getSequenceId().get()).isEqualTo(1L); + } + + @Test + public void testPollGetsSequenceId_GivenSequenceStateIsNotPresentOnQueue() throws Exception { + + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, aListOfSomeMessages()); + + final List kafkaMessages = connectTask.poll(); + + for (final SourceRecord kafkaMessage : kafkaMessages) { + connectTask.commitRecord(kafkaMessage); + } + + assertThat(kafkaMessages) + .hasSize(2) + .extracting(SourceRecord::value) + .containsExactlyInAnyOrder("hello", "world"); + + assertThat(connectTask.getSequenceId().get()).isEqualTo(1L); + } + + @Test + public void testPollEndsWithPutSequenceStateOnQueue_GivenMessagesHaveBeenReceivedFromQueue() throws Exception { + + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, aListOfSomeMessages()); + + List allMessageIDsOnQueue = getIDsOfMessagesCurrentlyOnQueue(DEFAULT_SOURCE_QUEUE, 2); + + final List kafkaMessages = connectTask.poll(); + + for (final SourceRecord kafkaMessage : kafkaMessages) { + connectTask.commitRecord(kafkaMessage); + } + + assertThat(getSequenceStateAndAssertNotEmpty()).isEqualTo(new SequenceState( + 1L, + allMessageIDsOnQueue, + SequenceState.LastKnownState.IN_FLIGHT + )); + + assertThat(kafkaMessages) + .hasSize(2) + .extracting(SourceRecord::value) + .containsExactlyInAnyOrder("hello", "world"); + } + + @Test + public void testSequenceIDIncrementsBy1InLineWithBatchBehaviour_WhenPollIsCalled() throws Exception { + connectTask = getSourceTaskWithEmptyKafkaOffset(); + + final Map connectorConfigProps = connectionProperties(); + connectorConfigProps.put("mq.batch.size", "10"); + + connectTask.start(connectorConfigProps); + + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, createAListOfMessages(getJmsContext(), 35, "message ")); + + pollAndAssert(10, 1L); + + pollAndAssert(10, 2L); + + pollAndAssert(10, 3L); + + pollAndAssert(5, 4L); + } + + private void pollAndAssert(int expectedBatchSize, long sequenceId) throws Exception { + + List messageIds = getIDsOfMessagesCurrentlyOnQueue(DEFAULT_SOURCE_QUEUE, expectedBatchSize); + + List kafkaMessages = connectTask.poll(); + + assertThat(kafkaMessages) + .hasSize(expectedBatchSize) + .extracting(SourceRecord::sourceOffset) + .isNotNull() + .extracting((sourceOffset) -> (Long) sourceOffset.get("sequence-id")) + .contains(sequenceId); + + for (final SourceRecord m : kafkaMessages) { + connectTask.commitRecord(m); + } + + assertThat(getSequenceStateAndAssertNotEmpty()).isEqualTo(new SequenceState( + sequenceId, + messageIds, + SequenceState.LastKnownState.IN_FLIGHT + )); + } + + private SequenceState getSequenceStateAndAssertNotEmpty() { + SequenceState sequenceState; + try { + List stateMsgs = getAllMessagesFromQueue(DEFAULT_STATE_QUEUE); + assertThat(stateMsgs.size()).isEqualTo(1); + ObjectMapper mapper = new ObjectMapper(); + sequenceState = mapper.readValue(stateMsgs.get(0).getBody(String.class), SequenceState.class); + } catch (JMSException | IOException e) { + throw new RuntimeException(e); + }; + + return sequenceState; + } + + @Test + public void testSourceOffset() throws Exception { + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, aListOfSomeMessages()); + + final List sourceRecords = connectTask.poll(); + + for (final SourceRecord sourceRecord : sourceRecords) { + connectTask.commitRecord(sourceRecord); + } + + assertThat(sourceRecords) + .hasSize(2) + .extracting(SourceRecord::sourceOffset) + .isNotNull() + .extracting((sourceOffset) -> (Long) sourceOffset.get("sequence-id")) + .containsExactlyInAnyOrder(1L, 1L); + } + + @Test + public void test_IfSequenceStateIsOnStateQueue_ThenActionIsREDELIVER_UNSENT_BATCH_SequenceIDIsValueFromStateQueue() throws Exception { + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, aListOfSomeMessages()); + List allMessageIds = getIDsOfMessagesCurrentlyOnQueue(DEFAULT_SOURCE_QUEUE); + SequenceState deliveryState = new SequenceState( + 2, + allMessageIds, + SequenceState.LastKnownState.IN_FLIGHT + ); + + sequenceStateClient.write(deliveryState); + + MQSourceTask connectTask = getSourceTaskWithEmptyKafkaOffset(); + connectTask.start(connectionProperties()); + + final List kafkaMessages = connectTask.poll(); + + for (final SourceRecord kafkaMessage : kafkaMessages) { + connectTask.commitRecord(kafkaMessage); + } + + assertThat(kafkaMessages) + .hasSize(2) + .extracting(SourceRecord::value) + .containsExactlyInAnyOrder("hello", "world"); + + assertThat(connectTask.getSequenceId().get()).isEqualTo(2L); + } + + @Test + public void testConnectorFirstTimeRunStart_ActionIsStandardGet_AndStateNotInKafka() throws Exception { + + // i.e State is not in Kafka + MQSourceTask connectTask = getSourceTaskWithEmptyKafkaOffset(); + connectTask.start(connectionProperties()); + + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, aListOfSomeMessages()); + + final List kafkaMessages = connectTask.poll(); + + for (final SourceRecord kafkaMessage : kafkaMessages) { + connectTask.commitRecord(kafkaMessage); + } + + assertThat(kafkaMessages) + .hasSize(2) + .extracting(SourceRecord::value) + .containsExactlyInAnyOrder("hello", "world"); + + assertThat(connectTask.getSequenceId().get()).isEqualTo(1L); + } + + @Test + public void testOnlyOnceDisabled_NoStateSaved_AndSequenceIdIncrements() throws Exception { + + // i.e State is not in Kafka + MQSourceTask connectTask = getSourceTaskWithKafkaOffset(10L); + connectTask.start(connectionPropertiesWithOnlyOnceDisabled()); + + // Assert that nothing put here after SourceTask start + assertThat(browseAllMessagesFromQueue(DEFAULT_STATE_QUEUE)).isEmpty(); + + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, aListOfSomeMessages()); + + + List kafkaMessages; + + // --- POLL 1 + + kafkaMessages = connectTask.poll(); + + for (final SourceRecord kafkaMessage : kafkaMessages) { + connectTask.commitRecord(kafkaMessage); + } + + assertThat(kafkaMessages) + .hasSize(2) + .extracting(SourceRecord::sourceOffset) + .containsOnlyNulls(); + + assertThat(connectTask.getSequenceId().get()).isEqualTo(0L); + + assertThat(browseAllMessagesFromQueue(DEFAULT_STATE_QUEUE)).isEmpty(); + + + // --- POLL 2 + + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, aListOfSomeMessages()); + + kafkaMessages = connectTask.poll(); + + for (final SourceRecord kafkaMessage : kafkaMessages) { + connectTask.commitRecord(kafkaMessage); + } + + assertThat(kafkaMessages) + .hasSize(2) + .extracting(SourceRecord::sourceOffset) + .containsOnlyNulls(); + + assertThat(connectTask.getSequenceId().get()).isEqualTo(0L); + + assertThat(browseAllMessagesFromQueue(DEFAULT_STATE_QUEUE)).isEmpty(); + } + + @Test + public void testPoll_WhenMessagesAreNotRolledBack_TimeOutConnectExceptionIsThrown() throws Exception { + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, aListOfSomeMessages()); + List allMessageIds = getIDsOfMessagesCurrentlyOnQueue(DEFAULT_SOURCE_QUEUE); + List oldMessageIds = new ArrayList(); + oldMessageIds.add("0"); + oldMessageIds.add("1"); + assertThat(allMessageIds).doesNotContain("0"); + assertThat(allMessageIds).doesNotContain("1"); + SequenceState deliveryState = new SequenceState( + 2, + oldMessageIds, + SequenceState.LastKnownState.IN_FLIGHT + ); + + sequenceStateClient.write(deliveryState); + + MQSourceTask connectTask = getSourceTaskWithEmptyKafkaOffset(); + connectTask.start(connectionProperties()); + for(int i = 0 ; i < 300 ; i ++) { + connectTask.poll(); + } + + Exception exc = null; + try{ + connectTask.poll(); + } catch (Exception e) { + exc = e; + } + assertThat(exc).isNotNull(); + assertThat(exc).isInstanceOf(ConnectException.class); + assertThat(exc.getMessage()).isEqualTo(LogMessages.rollbackTimeout(oldMessageIds)); + } +} \ No newline at end of file diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskOnlyOnceStartBehaviourIT.java b/src/integration/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskOnlyOnceStartBehaviourIT.java new file mode 100644 index 0000000..992ea75 --- /dev/null +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskOnlyOnceStartBehaviourIT.java @@ -0,0 +1,573 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource; + +import com.ibm.eventstreams.connect.mqsource.sequencestate.SequenceState; +import com.ibm.eventstreams.connect.mqsource.sequencestate.SequenceStateClient; +import com.ibm.eventstreams.connect.mqsource.utils.MQConnectionRollbackHelper; +import com.ibm.eventstreams.connect.mqsource.utils.SourceTaskStopper; +import org.apache.kafka.connect.connector.ConnectRecord; +import org.apache.kafka.connect.source.SourceRecord; +import org.jetbrains.annotations.NotNull; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import javax.jms.JMSException; +import javax.jms.Message; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import static com.ibm.eventstreams.connect.mqsource.MQSourceTaskObjectMother.getSourceTaskWithContext; +import static com.ibm.eventstreams.connect.mqsource.MQSourceTaskObjectMother.getSourceTaskWithEmptyKafkaOffset; +import static com.ibm.eventstreams.connect.mqsource.MQSourceTaskObjectMother.getSourceTaskWithKafkaOffset; +import static com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil.browseAllMessagesFromQueue; +import static com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil.removeAllMessagesFromQueue; +import static com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil.getIDsOfMessagesCurrentlyOnQueue; +import static com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil.putAllMessagesToQueue; +import static com.ibm.eventstreams.connect.mqsource.utils.SourceTaskContextObjectMother.SourceTaskContextWithOffsetId; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertThrows; + +public class MQSourceTaskOnlyOnceStartBehaviourIT extends AbstractJMSContextIT { + + private MQSourceTask connectTask; + private SequenceStateClient sequenceStateClient; + + @Before + public void startup() throws Exception { + + JMSWorker shared = new JMSWorker(); + shared.configure(getPropertiesConfig(connectionProperties())); + shared.connect(); + JMSWorker dedicated = new JMSWorker(); + dedicated.configure(getPropertiesConfig(connectionProperties())); + dedicated.connect(); + sequenceStateClient = new SequenceStateClient(DEFAULT_STATE_QUEUE, shared, dedicated); + + removeAllMessagesFromQueue(DEFAULT_SOURCE_QUEUE); + removeAllMessagesFromQueue(DEFAULT_STATE_QUEUE); + } + + @After + public void cleanup() throws InterruptedException { + final SourceTaskStopper stopper = new SourceTaskStopper(connectTask); + stopper.run(); + + sequenceStateClient.closeClientConnections(); + } + + private Map connectionProperties() { + final Map props = new HashMap<>(); + props.put("mq.queue.manager", QMGR_NAME); + props.put("mq.connection.mode", "client"); + props.put("mq.connection.name.list", DEFAULT_CONNECTION_NAME); + props.put("mq.channel.name", CHANNEL_NAME); + props.put("mq.queue", DEFAULT_SOURCE_QUEUE); + props.put("mq.user.authentication.mqcsp", "false"); + props.put("mq.message.body.jms", "true"); + props.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + props.put("mq.exactly.once.state.queue", DEFAULT_STATE_QUEUE); + props.put("tasks.max", "1"); + props.put("topic", "mytopic"); + return props; + } + + @NotNull + private List aListOfTwoMessages() throws Exception { + return Arrays.asList( + getJmsContext().createTextMessage("hello"), + getJmsContext().createTextMessage("world") + ); + } + + @NotNull + private List aListOfEightMessages() throws Exception { + return Arrays.asList( + getJmsContext().createTextMessage("hello"), + getJmsContext().createTextMessage("world"), + getJmsContext().createTextMessage("this"), + getJmsContext().createTextMessage("is"), + getJmsContext().createTextMessage("a"), + getJmsContext().createTextMessage("longer"), + getJmsContext().createTextMessage("test"), + getJmsContext().createTextMessage("!") + ); + } + + @Test + public void testOnlyOnceStartBehaviour_GivenNoSequenceStateIsPresentOnQueueOrKafka() throws Exception { + + connectTask = getSourceTaskWithEmptyKafkaOffset(); // Kafka has no state + + connectTask.start(connectionProperties()); + + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, aListOfTwoMessages()); + List msgIds = getIDsOfMessagesCurrentlyOnQueue(DEFAULT_SOURCE_QUEUE); + + List messages = browseAllMessagesFromQueue(DEFAULT_STATE_QUEUE); + assertThat(messages).isEmpty(); // Check that MQ has no state + + final List kafkaMessages = connectTask.poll(); + + for (final SourceRecord kafkaMessage : kafkaMessages) { + connectTask.commitRecord(kafkaMessage); + } + + // Check that MQ has been read from + List mqMessages = browseAllMessagesFromQueue(DEFAULT_SOURCE_QUEUE); + assertThat(mqMessages).isEmpty(); + + // Check that the messages have been created as source records correctly + assertThat(kafkaMessages) + .hasSize(2) + .extracting(SourceRecord::sourceOffset) + .isNotNull() + .extracting((sourceOffset) -> (Long) sourceOffset.get("sequence-id")) + .containsExactlyInAnyOrder(1L, 1L); + + // Check that the internal sequence id has been set correctly + assertThat(connectTask.getSequenceId().get()).isEqualTo(1L); + + // Check that MQ sequence state has been set correctly + Optional mqSequenceState = sequenceStateClient.browse(); + assertThat(mqSequenceState) + .isNotEmpty() + .get() + .isEqualTo(new SequenceState(1L, msgIds, SequenceState.LastKnownState.IN_FLIGHT)); + } + + @Test + public void testOnlyOnceStartBehaviour_GivenNoSequenceStateIsPresentOnQueueButOffsetIsPresentInKafka() throws Exception { + + connectTask = getSourceTaskWithKafkaOffset(); // Kafka has state + + connectTask.start(connectionProperties()); + + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, aListOfTwoMessages()); + + List messages = browseAllMessagesFromQueue(DEFAULT_STATE_QUEUE); + assertThat(messages).isEmpty(); // Check that MQ has no state + + final List kafkaMessages = connectTask.poll(); + + for (final SourceRecord kafkaMessage : kafkaMessages) { + connectTask.commitRecord(kafkaMessage); + } + + // Check that MQ has been read from + List mqMessages = browseAllMessagesFromQueue(DEFAULT_SOURCE_QUEUE); + assertThat(mqMessages).isEmpty(); + + // Check that the messages have been created as source records correctly + assertThat(kafkaMessages) + .hasSize(2) + .extracting(SourceRecord::sourceOffset) + .isNotNull() + .extracting((sourceOffset) -> (Long) sourceOffset.get("sequence-id")) + .containsExactlyInAnyOrder(6L, 6L); + + // Check that the internal sequence id has been set correctly + assertThat(connectTask.getSequenceId().get()).isEqualTo(6L); + + // Check that MQ sequence state has been set correctly + Optional mqSequenceState = sequenceStateClient.browse(); + assertThat(mqSequenceState).isNotEmpty(); + assertThat(mqSequenceState.get().getSequenceId()).isEqualTo(6L); + } + + @Test // this one needs to take in to account the message ids that should be re deilvered from MQ Also need assertion on state defined inthe start command + public void testOnlyOnceStartBehaviour_GivenSequenceStateIsPresentOnQueue_AndStateIsInFlight_AndStoredOffsetNotInKafka() throws Exception { + + connectTask = getSourceTaskWithEmptyKafkaOffset(); // Kafka has no state + + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, aListOfEightMessages()); //adding 8 messages to source q + + List allMessageIds = getIDsOfMessagesCurrentlyOnQueue(DEFAULT_SOURCE_QUEUE); // getting all message ids for use in setting up state q entry and assertions + List firstBatchOfMessageIds = allMessageIds.subList(0, 5); + SequenceState deliveryState = new SequenceState( + 23, + firstBatchOfMessageIds, + SequenceState.LastKnownState.IN_FLIGHT + ); + + sequenceStateClient.write(deliveryState); + MQConnectionRollbackHelper rollbackTestHelper = new MQConnectionRollbackHelper(); + rollbackTestHelper.readNoCommit(DEFAULT_SOURCE_QUEUE, 5); + + Map props = connectionProperties(); + props.put("mq.batch.size", "5"); + + connectTask.start(props); // this is a start after a crash + + assertThat(connectTask.startUpAction).isEqualTo(MQSourceTaskStartUpAction.REDELIVER_UNSENT_BATCH); + assertThat(connectTask.getSequenceId().get()).isEqualTo(23L); + assertThat(connectTask.getMsgIds()).isEqualTo(firstBatchOfMessageIds); + + final List kafkaMessagesInitialPoll = connectTask.poll(); + + // Check that no record have been returned since the inflight records transaction has ent been rolled back yet + assertThat(kafkaMessagesInitialPoll.size()).isEqualTo(0); + + // Check that the internal sequence id has not been incremented by poll since this poll call should return an empty list + assertThat(connectTask.getSequenceId().get()).isEqualTo(23L); + + // Check that MQ sequence state has been set correctly + Optional mqSequenceState = sequenceStateClient.browse(); + assertThat(mqSequenceState).isNotEmpty(); + assertThat(mqSequenceState.get().getSequenceId()).isEqualTo(23L); + + rollbackTestHelper.rollback(); + + final List kafkaMessagesSecondPoll = connectTask.poll(); + + // Check that the messages have been created as source records correctly + assertThat(kafkaMessagesSecondPoll) + .hasSize(5) + .extracting(SourceRecord::sourceOffset) + .isNotNull() + .extracting((sourceOffset) -> (Long) sourceOffset.get("sequence-id")) + .containsExactlyInAnyOrder(23L, 23L, 23L, 23L, 23L); + + // Check that MQ has been read from + List mqMessages = browseAllMessagesFromQueue(DEFAULT_SOURCE_QUEUE); + assertThat(mqMessages.size()).isEqualTo(3); + + // Check that the internal sequence id has been set correctly + assertThat(connectTask.getSequenceId().get()).isEqualTo(23L); + + // Check that MQ sequence state has been set correctly + Optional mqSequenceStateSecondPoll = sequenceStateClient.browse(); + assertThat(mqSequenceStateSecondPoll).isNotEmpty(); + assertThat(mqSequenceStateSecondPoll.get().getSequenceId()).isEqualTo(23L); + } + + @Test + public void testOnlyOnceStartBehaviour_GivenSequenceStateIsPresentOnQueue_AndStateIsInFlight_AndStoredOffsetInKafka_AndMqSequenceDoesNotMatch() throws Exception { + connectTask = getSourceTaskWithKafkaOffset(22L); + + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, aListOfEightMessages()); //adding 8 messages to source q + + List allMessageIds = getIDsOfMessagesCurrentlyOnQueue(DEFAULT_SOURCE_QUEUE); // getting all message ids for use in setting up state q entry and assertions + List firstBatchOfMessageIds = allMessageIds.subList(0, 5); + + SequenceState deliveryState = new SequenceState( + 23, + firstBatchOfMessageIds, + SequenceState.LastKnownState.IN_FLIGHT + ); + + sequenceStateClient.write(deliveryState); + MQConnectionRollbackHelper rollbackTestHelper = new MQConnectionRollbackHelper(); + rollbackTestHelper.readNoCommit(DEFAULT_SOURCE_QUEUE, 5); + + Map props = connectionProperties(); + props.put("mq.batch.size", "5"); + + connectTask.start(props); // this is a start after a crash + + assertThat(connectTask.startUpAction).isEqualTo(MQSourceTaskStartUpAction.REDELIVER_UNSENT_BATCH); + assertThat(connectTask.getSequenceId().get()).isEqualTo(23L); + assertThat(connectTask.getMsgIds()).isEqualTo(firstBatchOfMessageIds); + + final List kafkaMessagesInitialPoll = connectTask.poll(); + + // Check that no record have been returned since the inflight records transaction has ent been rolled back yet + assertThat(kafkaMessagesInitialPoll.size()).isEqualTo(0); + + // Check that the internal sequence id has not been incremented by poll since this poll call should return an empty list + assertThat(connectTask.getSequenceId().get()).isEqualTo(23L); + + // Check that MQ sequence state has been set correctly + Optional mqSequenceState = sequenceStateClient.browse(); + assertThat(mqSequenceState).isNotEmpty(); + assertThat(mqSequenceState.get().getSequenceId()).isEqualTo(23L); + + rollbackTestHelper.rollback(); + + final List kafkaMessagesSecondPoll = connectTask.poll(); + + // Check that the messages have been created as source records correctly + assertThat(kafkaMessagesSecondPoll) + .hasSize(5) + .extracting(SourceRecord::sourceOffset) + .isNotNull() + .extracting((sourceOffset) -> (Long) sourceOffset.get("sequence-id")) + .containsExactlyInAnyOrder(23L, 23L, 23L, 23L, 23L); + + // Check that MQ has been read from + List mqMessages = browseAllMessagesFromQueue(DEFAULT_SOURCE_QUEUE); + assertThat(mqMessages.size()).isEqualTo(3); + + // Check that the internal sequence id has been set correctly + assertThat(connectTask.getSequenceId().get()).isEqualTo(23L); + + // Check that MQ sequence state has been set correctly + Optional mqSequenceStateSecondPoll = sequenceStateClient.browse(); + assertThat(mqSequenceStateSecondPoll).isNotEmpty(); + assertThat(mqSequenceStateSecondPoll.get().getSequenceId()).isEqualTo(23L); + } + + + + + @Test + public void testOnlyOnceStartBehaviour_GivenSequenceStateIsPresentOnQueue_AndStateIsInFlight_AndStoredOffsetInKafka_AndMqSequenceDoMatch() throws Exception { + connectTask = getSourceTaskWithKafkaOffset(23L); + + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, aListOfEightMessages()); //adding 8 messages to source q + + List allMessageIds = getIDsOfMessagesCurrentlyOnQueue(DEFAULT_SOURCE_QUEUE); // getting all message ids for use in setting up state q entry and assertions + List firstBatchOfMessageIds = allMessageIds.subList(0, 5); + + SequenceState deliveryState = new SequenceState( + 23, + firstBatchOfMessageIds, + SequenceState.LastKnownState.IN_FLIGHT + ); + + sequenceStateClient.write(deliveryState); + + // The next two lines hold the first 5 messages on the source queue in a tx so poll won't find them yet. rollbackTestHelper will roll back later to make them available + MQConnectionRollbackHelper rollbackTestHelper = new MQConnectionRollbackHelper(); + rollbackTestHelper.readNoCommit(DEFAULT_SOURCE_QUEUE, 5); + + Map props = connectionProperties(); + props.put("mq.batch.size", "5"); + + connectTask.start(props); // this is a start after a crash + + assertThat(connectTask.startUpAction).isEqualTo(MQSourceTaskStartUpAction.REMOVE_DELIVERED_MESSAGES_FROM_SOURCE_QUEUE); + assertThat(connectTask.getSequenceId().get()).isEqualTo(23L); + assertThat(connectTask.getMsgIds()).isEqualTo(firstBatchOfMessageIds); + + Optional mqSequenceStateInitialAfterStart = sequenceStateClient.browse(); + + assertThat(mqSequenceStateInitialAfterStart).isNotEmpty(); + assertThat(mqSequenceStateInitialAfterStart.get().getSequenceId()).isEqualTo(23L); + assertThat(mqSequenceStateInitialAfterStart.get().getMessageIds()).isEqualTo(firstBatchOfMessageIds); + assertThat(mqSequenceStateInitialAfterStart.get().getLastKnownState()).isEqualTo(SequenceState.LastKnownState.DELIVERED); + + final List kafkaMessagesInitialPoll = connectTask.poll(); + + // Check that no record have been returned since the inflight records transaction has ent been rolled back yet + assertThat(kafkaMessagesInitialPoll.size()).isEqualTo(0); + + // Check that the internal sequence id has not been incremented by poll since this poll call should return an empty list + assertThat(connectTask.getSequenceId().get()).isEqualTo(23L); + + // Check that MQ sequence state has been set correctly + Optional mqSequenceState = sequenceStateClient.browse(); + assertThat(mqSequenceState).isNotEmpty(); + assertThat(mqSequenceState.get().getSequenceId()).isEqualTo(23L); + + rollbackTestHelper.rollback(); + + final List kafkaMessagesSecondPoll = connectTask.poll(); + + // Check that the messages have not been passed to Kafka again + assertThat(kafkaMessagesSecondPoll).hasSize(0); + + // Check that we're now back to normal operation + assertThat(connectTask.startUpAction).isEqualTo(MQSourceTaskStartUpAction.NORMAL_OPERATION); + + // Check that MQ has been read from + List mqMessages = browseAllMessagesFromQueue(DEFAULT_SOURCE_QUEUE); + assertThat(mqMessages.size()).isEqualTo(3); // only the DELIVERED batch should have been removed + + // Check that MQ sequence state has been resolved/cleared correctly + Optional mqSequenceStateSecondPoll = sequenceStateClient.browse(); + assertThat(mqSequenceStateSecondPoll).isEmpty(); + + final List kafkaMessagesThirdPoll = connectTask.poll(); + Optional mqSequenceStateThirdPoll = sequenceStateClient.browse(); + + assertThat(kafkaMessagesThirdPoll).hasSize(3); + SequenceState sequenceStateThirdPoll = mqSequenceStateThirdPoll.get(); + assertThat(sequenceStateThirdPoll.getSequenceId()).isEqualTo(24L); //new state with increased sequence-id should be there + assertThat(sequenceStateThirdPoll.getMessageIds().size()).isEqualTo(3); //there should be three msgIds; + assertThat(sequenceStateThirdPoll.isInFlight()).isTrue(); //sequence should be in-flight + + } + + @Test + public void testOnlyOnceStartBehaviour_GivenSequenceStateIsPresentOnQueue_AndStateIsIsDelivered() throws Exception { + connectTask = getSourceTaskWithKafkaOffset(22L); + + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, aListOfEightMessages()); //adding 8 messages to source q + + List allMessageIds = getIDsOfMessagesCurrentlyOnQueue(DEFAULT_SOURCE_QUEUE); // getting all message ids for use in setting up state q entry and assertions + List firstBatchOfMessageIds = allMessageIds.subList(0, 5); + + SequenceState deliveryState = new SequenceState( + 23, + firstBatchOfMessageIds, + SequenceState.LastKnownState.DELIVERED + ); + + sequenceStateClient.write(deliveryState); + MQConnectionRollbackHelper rollbackTestHelper = new MQConnectionRollbackHelper(); + rollbackTestHelper.readNoCommit(DEFAULT_SOURCE_QUEUE, 5); + + Map props = connectionProperties(); + props.put("mq.batch.size", "5"); + + connectTask.start(props); // this is a start after a crash + + assertThat(connectTask.startUpAction).isEqualTo(MQSourceTaskStartUpAction.REMOVE_DELIVERED_MESSAGES_FROM_SOURCE_QUEUE); + assertThat(connectTask.getSequenceId().get()).isEqualTo(23L); + assertThat(connectTask.getMsgIds()).isEqualTo(firstBatchOfMessageIds); + + final List kafkaMessagesInitialPoll = connectTask.poll(); + + assertThat(kafkaMessagesInitialPoll.size()).isEqualTo(0); + + // Check that the internal sequence id has not been incremented by poll since this poll call should return an empty list + assertThat(connectTask.getSequenceId().get()).isEqualTo(23L); + + // Check that MQ sequence state has been set correctly + Optional mqSequenceState = sequenceStateClient.browse(); + assertThat(mqSequenceState).isNotEmpty(); + assertThat(mqSequenceState.get().getSequenceId()).isEqualTo(23L); + + rollbackTestHelper.rollback(); + + final List kafkaMessagesSecondPoll = connectTask.poll(); + assertThat(kafkaMessagesSecondPoll).hasSize(0); + assertThat(connectTask.startUpAction).isEqualTo(MQSourceTaskStartUpAction.NORMAL_OPERATION); + + final List kafkaMessagesThirdPoll = connectTask.poll(); + // Check that the messages have been created as source records correctly + assertThat(kafkaMessagesThirdPoll) + .hasSize(3) + .extracting(SourceRecord::sourceOffset) + .isNotNull() + .extracting((sourceOffset) -> (Long) sourceOffset.get("sequence-id")) + .containsOnly(24L); + + // Check that MQ has been read from + List mqMessages = browseAllMessagesFromQueue(DEFAULT_SOURCE_QUEUE); + assertThat(mqMessages.size()).isEqualTo(0); + + // Check that the internal sequence id has been set correctly + assertThat(connectTask.getSequenceId().get()).isEqualTo(24L); + + // Check that MQ sequence state has been set correctly + Optional mqSequenceStateSecondPoll = sequenceStateClient.browse(); + assertThat(mqSequenceStateSecondPoll).isNotEmpty(); + assertThat(mqSequenceStateSecondPoll.get().getSequenceId()).isEqualTo(24L); + } + + @Test + public void testOnlyOnceStartBehaviour_CrashAfterKafkaCommitBeforeMQCommit() throws Exception { + connectTask = getSourceTaskWithEmptyKafkaOffset(); + + final Map connectorConfigProps = connectionProperties(); + connectorConfigProps.put("mq.message.body.jms", "true"); + connectorConfigProps.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + connectorConfigProps.put("mq.batch.size", "2"); + + connectTask.start(connectorConfigProps); + + putAllMessagesToQueue(DEFAULT_SOURCE_QUEUE, + Arrays.asList( + getJmsContext().createTextMessage("hello"), + getJmsContext().createTextMessage("world"), + getJmsContext().createTextMessage("more"), + getJmsContext().createTextMessage("messages") + ) + ); + + final List kafkaMessagesRoundOne = connectTask.poll(); + + assertThat(kafkaMessagesRoundOne.size()).isEqualTo(2); + + for (final SourceRecord kafkaMessage : kafkaMessagesRoundOne) { + connectTask.commitRecord(kafkaMessage); + } + + assertThat(kafkaMessagesRoundOne) + .extracting(ConnectRecord::value) + .containsExactly( + "hello", "world" + ); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SOURCE_QUEUE).size()).isEqualTo(2); + + //// --- POLL 2 + + final List kafkaMessagesRoundTwo = connectTask.poll(); + + assertThat(kafkaMessagesRoundTwo.size()).isEqualTo(2); + + for (final SourceRecord kafkaMessage : kafkaMessagesRoundTwo) { + connectTask.commitRecord(kafkaMessage); + } + + assertThat(kafkaMessagesRoundTwo) + .extracting(ConnectRecord::value) + .containsExactly( + "more", "messages" + ); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SOURCE_QUEUE).size()).isEqualTo(0); + + long offsetId = connectTask.getSequenceId().get(); + + // To simulate crash + restart ---> Happening before mq commit has occurred + connectTask.stop(); + MQSourceTask connectTask = getSourceTaskWithContext(SourceTaskContextWithOffsetId(offsetId)); + connectTask.start(connectorConfigProps); + + /// ---- POLL 3 + final List kafkaMessagesRoundThree = connectTask.poll(); + + for (final SourceRecord kafkaMessage : kafkaMessagesRoundThree) { + connectTask.commitRecord(kafkaMessage); + } + + // These messages would have been returned if onlyOnce wasn't implemented. + assertThat(kafkaMessagesRoundThree) + .extracting(ConnectRecord::value) + .doesNotContain( + "more", "messages" + ); + + assertThat(kafkaMessagesRoundThree).isEmpty(); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SOURCE_QUEUE).size()).isEqualTo(0); + } + + @Test + public void testOnceOnceStart_ThrowErrorWhenThereAreMultipleStateMsgs() throws JMSException { + + connectTask = getSourceTaskWithEmptyKafkaOffset(); + final Map connectorConfigProps = connectionProperties(); + connectorConfigProps.put("mq.message.body.jms", "true"); + connectorConfigProps.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + connectorConfigProps.put("mq.batch.size", "2"); + + final SequenceState sampleState = new SequenceState(1, new ArrayList<>(Arrays.asList("414d51204d59514d475220202020202033056b6401d50010")),SequenceState.LastKnownState.IN_FLIGHT); + sequenceStateClient.write(sampleState); + sequenceStateClient.write(sampleState); + + assertThrows(Exception.class, ()-> connectTask.start(connectorConfigProps)); + } +} \ No newline at end of file diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsource/SequenceStateClientIT.java b/src/integration/java/com/ibm/eventstreams/connect/mqsource/SequenceStateClientIT.java new file mode 100644 index 0000000..95dd9a2 --- /dev/null +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsource/SequenceStateClientIT.java @@ -0,0 +1,336 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource; + +import static com.ibm.eventstreams.connect.mqsource.utils.MQQueueManagerAttrs.enableQueuePUT; +import static com.ibm.eventstreams.connect.mqsource.utils.MQQueueManagerAttrs.inhibitQueuePUT; +import static com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil.browseAllMessagesFromQueue; +import static com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil.removeAllMessagesFromQueue; +import static com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil.getAllMessagesFromQueue; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertThrows; + +import java.io.IOException; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import javax.jms.JMSException; +import javax.jms.Message; +import javax.jms.TextMessage; + +import org.jetbrains.annotations.NotNull; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import com.ibm.eventstreams.connect.mqsource.sequencestate.SequenceState; +import com.ibm.eventstreams.connect.mqsource.sequencestate.SequenceStateClient; +import com.ibm.eventstreams.connect.mqsource.sequencestate.SequenceStateException; +import com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil; + +public class SequenceStateClientIT extends AbstractJMSContextIT { + + private SequenceStateClient sequenceStateClient; + private JMSWorker shared; + + @Before + public void createSequenceStateClient() { + Map props = getDefaultConnectorProperties(); + props.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + shared = new JMSWorker(); + shared.configure(getPropertiesConfig(props)); + JMSWorker dedicated = new JMSWorker(); + dedicated.configure(getPropertiesConfig(props)); + sequenceStateClient = new SequenceStateClient(DEFAULT_STATE_QUEUE, shared, dedicated); + } + + @After + public void closeConnectionsAndClearStateQueue() throws JMSException, IOException, NoSuchAlgorithmException, KeyManagementException { + sequenceStateClient.closeClientConnections(); + enableQueuePUT(QMGR_NAME, REST_API_HOST_PORT, ADMIN_PASSWORD, DEFAULT_STATE_QUEUE); + removeAllMessagesFromQueue(DEFAULT_STATE_QUEUE); + } + + @Test + public void testWriteState() throws Exception { + SequenceState sequenceState = new SequenceState( + 314, + new ArrayList<>(Arrays.asList( + "414d51204d59514d475220202020202033056b6401d51010", + "414d51204d59514d475220202020202033056b6401d51011", + "414d51204d59514d475220202020202033056b6401d51012" + )), + SequenceState.LastKnownState.IN_FLIGHT + ); + + sequenceStateClient.write(sequenceState); + TextMessage firstMessage = (TextMessage) getAllMessagesFromQueue(DEFAULT_STATE_QUEUE).get(0); + + String expectedResult = + "{" + + "\"sequenceId\":314," + + "\"messageIds\":[" + + "\"414d51204d59514d475220202020202033056b6401d51010\"," + + "\"414d51204d59514d475220202020202033056b6401d51011\"," + + "\"414d51204d59514d475220202020202033056b6401d51012\"" + + "]," + + "\"lastKnownState\":\"IN_FLIGHT\"" + + "}"; + + assertThat(firstMessage.getText()).isEqualTo(expectedResult); + } + + @Test + public void testWriteState_HasCommitted() throws JMSException { + + SequenceState sequenceState1 = new SequenceState( + 314, + new ArrayList<>(Arrays.asList( + "414d51204d59514d475220202020202033056b6401d51010", + "414d51204d59514d475220202020202033056b6401d51011", + "414d51204d59514d475220202020202033056b6401d51012" + )), + SequenceState.LastKnownState.IN_FLIGHT + ); + + sequenceStateClient.write(sequenceState1); + sequenceStateClient.closeClientConnections(); + + TextMessage stateMessage = (TextMessage) getAllMessagesFromQueue(DEFAULT_STATE_QUEUE).get(0); + assertThat(stateMessage.getText().contains("FLIGHT")).isTrue(); + } + + @Test + public void testRetrieveStateInSharedTx_ReturnsCorrectState() throws Exception { + SequenceState sequenceState1 = new SequenceState( + 314, + new ArrayList<>(Arrays.asList( + "414d51204d59514d475220202020202033056b6401d50010", + "414d51204d59514d475220202020202033056b6401d50011", + "414d51204d59514d475220202020202033056b6401d50012" + )), + SequenceState.LastKnownState.IN_FLIGHT + ); + + sequenceStateClient.write(sequenceState1); + + Optional sequenceState = sequenceStateClient.retrieveStateInSharedTx(); + this.shared.commit(); + assertThat(sequenceState).isNotEmpty(); + assertThat(sequenceState.get()).isEqualTo(sequenceState1); + } + + @Test + public void testRetrieveStateInSharedTx_IsRolledBackWithSharedJMSWorker() throws Exception { + // setup test + SequenceState sequenceState1 = new SequenceState( + 314, + new ArrayList<>(Arrays.asList( + "414d51204d59514d475220202020202033056b6401d50010", + "414d51204d59514d475220202020202033056b6401d50011", + "414d51204d59514d475220202020202033056b6401d50012" + )), + SequenceState.LastKnownState.IN_FLIGHT + ); + + sequenceStateClient.write(sequenceState1); + assertThat(browseAllMessagesFromQueue(DEFAULT_STATE_QUEUE).size()).isEqualTo(1); + + // retrieve state Message + Optional sequenceState = sequenceStateClient.retrieveStateInSharedTx(); + assertThat(sequenceState).isNotEmpty(); + assertThat(browseAllMessagesFromQueue(DEFAULT_STATE_QUEUE).size()).isEqualTo(0); + + // roll back + shared.attemptRollback(); + assertThat(browseAllMessagesFromQueue(DEFAULT_STATE_QUEUE).size()).isEqualTo(1); + + } + + @Test + public void testRetrieveStateInSharedTx_GivenNoPreviousStateStoredOnQueue() throws Exception { + assertThat(sequenceStateClient.retrieveStateInSharedTx()).isEmpty(); + } + + @Test + public void test_replaceState_happyPath() throws JMSException { + //setup state before call + int sequenceId = 314; + ArrayList localMsgIds = new ArrayList<>(Arrays.asList( + "414d51204d59514d475220202020202033056b6401d50010", + "414d51204d59514d475220202020202033056b6401d50011", + "414d51204d59514d475220202020202033056b6401d50012" + )); + SequenceState origState = new SequenceState( + sequenceId, + localMsgIds, + SequenceState.LastKnownState.IN_FLIGHT + ); + sequenceStateClient.write(origState); + final List stateMsgs = browseAllMessagesFromQueue(DEFAULT_STATE_QUEUE); + assertThat(stateMsgs.size()).isEqualTo(1); + + // call replace + SequenceState newState = new SequenceState(sequenceId, localMsgIds, SequenceState.LastKnownState.DELIVERED); + sequenceStateClient.replaceState(newState); + + // assert that the state is now delivered on the queue + final List stateMsgs_afterReplace = browseAllMessagesFromQueue(DEFAULT_STATE_QUEUE); + assertThat(stateMsgs_afterReplace.size()).isEqualTo(1); + assertThat(sequenceStateClient.browse().get()).isEqualTo(newState); + } + + @Test + public void test_replaceState_isRolledBackIfWriteFails() throws JMSException, IOException, NoSuchAlgorithmException, KeyManagementException { + //setup state before call + int sequenceId = 314; + ArrayList localMsgIds = new ArrayList<>(Arrays.asList( + "414d51204d59514d475220202020202033056b6401d50010", + "414d51204d59514d475220202020202033056b6401d50011", + "414d51204d59514d475220202020202033056b6401d50012" + )); + SequenceState origState = new SequenceState( + sequenceId, + localMsgIds, + SequenceState.LastKnownState.IN_FLIGHT + ); + sequenceStateClient.write(origState); + final List stateMsgs = browseAllMessagesFromQueue(DEFAULT_STATE_QUEUE); + assertThat(stateMsgs.size()).isEqualTo(1); + + // disable queue so that put/write will fail + inhibitQueuePUT(QMGR_NAME, REST_API_HOST_PORT, ADMIN_PASSWORD, DEFAULT_STATE_QUEUE); + + // call replace + SequenceState newState = new SequenceState(sequenceId, localMsgIds, SequenceState.LastKnownState.DELIVERED); + + assertThrows(Exception.class, () -> sequenceStateClient.replaceState(newState)); + + List stateMsgs_After = getAllMessagesFromQueue(DEFAULT_STATE_QUEUE); + assertThat(stateMsgs_After.size()).isEqualTo(1); + String payload = stateMsgs_After.get(0).getBody(String.class); + assertThat(payload.contains("DELIVERED")).isFalse(); + assertThat(payload.contains("FLIGHT")).isTrue(); + } + + @Test + public void test_MessageCanBeConvertedToSequenceState() throws Exception{ + + SequenceStateClient sequenceStateClient = new SequenceStateClient( + DEFAULT_STATE_QUEUE, + Mockito.mock(JMSWorker.class), + Mockito.mock(JMSWorker.class) + ); + + SequenceState sequenceState = new SequenceState( + 314, + new ArrayList<>(Arrays.asList( + "414d51204d59514d475220202020202033056b6401d51010", + "414d51204d59514d475220202020202033056b6401d51011", + "414d51204d59514d475220202020202033056b6401d51012" + )), + SequenceState.LastKnownState.IN_FLIGHT + ); + + MQTestUtil.putAllMessagesToQueue(DEFAULT_STATE_QUEUE, aListOfOneSequenceStateTextMessage()); + List messages = MQTestUtil.browseAllMessagesFromQueue(DEFAULT_STATE_QUEUE); + SequenceState state = sequenceStateClient.messageToStateObject(messages.get(0)); + assertThat(state).isEqualTo(sequenceState); + } + + @Test + public void test_MessageCanNotBeCastToTextMessage() throws Exception{ + + SequenceStateClient sequenceStateClient = new SequenceStateClient( + DEFAULT_STATE_QUEUE, + Mockito.mock(JMSWorker.class), + Mockito.mock(JMSWorker.class) + ); + + SequenceState sequenceState = new SequenceState( + 314, + new ArrayList<>(Arrays.asList( + "414d51204d59514d475220202020202033056b6401d51010", + "414d51204d59514d475220202020202033056b6401d51011", + "414d51204d59514d475220202020202033056b6401d51012" + )), + SequenceState.LastKnownState.IN_FLIGHT + ); + + MQTestUtil.putAllMessagesToQueue(DEFAULT_STATE_QUEUE, aListOfOneBytesMessage()); + List messages = MQTestUtil.browseAllMessagesFromQueue(DEFAULT_STATE_QUEUE); + assertThrows(SequenceStateException.class, () -> sequenceStateClient.messageToStateObject(messages.get(0))); + } + + @Test + public void test_MessageCanNotBeConvertedToSequenceState() throws Exception{ + + SequenceStateClient sequenceStateClient = new SequenceStateClient( + DEFAULT_STATE_QUEUE, + Mockito.mock(JMSWorker.class), + Mockito.mock(JMSWorker.class) + ); + + SequenceState sequenceState = new SequenceState( + 314, + new ArrayList<>(Arrays.asList( + "414d51204d59514d475220202020202033056b6401d51010", + "414d51204d59514d475220202020202033056b6401d51011", + "414d51204d59514d475220202020202033056b6401d51012" + )), + SequenceState.LastKnownState.IN_FLIGHT + ); + + MQTestUtil.putAllMessagesToQueue(DEFAULT_STATE_QUEUE, aListOfOneStringMessage()); + List messages = MQTestUtil.browseAllMessagesFromQueue(DEFAULT_STATE_QUEUE); + assertThrows(SequenceStateException.class, () -> sequenceStateClient.messageToStateObject(messages.get(0))); + } + + @NotNull + private List aListOfOneBytesMessage() throws Exception { + return Arrays.asList( + getJmsContext().createBytesMessage() + ); + } + + @NotNull + private List aListOfOneSequenceStateTextMessage() throws Exception { + String stateAsText = + "{" + + "\"sequenceId\":314," + + "\"messageIds\":[" + + "\"414d51204d59514d475220202020202033056b6401d51010\"," + + "\"414d51204d59514d475220202020202033056b6401d51011\"," + + "\"414d51204d59514d475220202020202033056b6401d51012\"" + + "]," + + "\"lastKnownState\":\"IN_FLIGHT\"" + + "}"; + return Arrays.asList(getJmsContext().createTextMessage(stateAsText)); + } + + @NotNull + private List aListOfOneStringMessage() throws Exception { + String text = "GenericTextMessage"; + return Arrays.asList(getJmsContext().createTextMessage(text)); + } + +} diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsource/builders/DefaultRecordBuilderIT.java b/src/integration/java/com/ibm/eventstreams/connect/mqsource/builders/DefaultRecordBuilderIT.java index 8062ba8..41dad6f 100644 --- a/src/integration/java/com/ibm/eventstreams/connect/mqsource/builders/DefaultRecordBuilderIT.java +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsource/builders/DefaultRecordBuilderIT.java @@ -1,5 +1,5 @@ /** - * Copyright 2022 IBM Corporation + * Copyright 2022, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,19 +15,22 @@ */ package com.ibm.eventstreams.connect.mqsource.builders; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; +import java.util.HashMap; +import java.util.Map; + import javax.jms.BytesMessage; import javax.jms.MapMessage; import javax.jms.MessageFormatException; import javax.jms.TextMessage; import org.apache.kafka.connect.data.Schema; -import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.source.SourceRecord; import org.junit.Test; @@ -49,7 +52,7 @@ public void buildFromJmsMapMessage() throws Exception { // use the builder to convert it to a Kafka record final DefaultRecordBuilder builder = new DefaultRecordBuilder(); - final ConnectException exc = assertThrows(ConnectException.class, () -> { + final RecordBuilderException exc = assertThrows(RecordBuilderException.class, () -> { builder.toSourceRecord(getJmsContext(), TOPIC, isJMS, message); }); @@ -135,4 +138,35 @@ public void buildFromBytesMessage() throws Exception { assertArrayEquals(messageContents, (byte[]) record.value()); assertEquals(Schema.OPTIONAL_BYTES_SCHEMA, record.valueSchema()); } + + @Test + public void testBuildWithOffset() throws Exception { + final String messageContents = "This is the JMS message contents"; + final boolean isJMS = true; + + // create MQ message + final TextMessage message = getJmsContext().createTextMessage(messageContents); + + // use the builder to convert it to a Kafka record + final DefaultRecordBuilder builder = new DefaultRecordBuilder(); + + Map sourceOffset = new HashMap<>(); + sourceOffset.put("sequence-id", 0L); + + Map sourcePartition = new HashMap<>(); + sourcePartition.put("source", "myqmgr/myq"); + + + final SourceRecord record = builder.toSourceRecord(getJmsContext(), TOPIC, isJMS, message, sourceOffset, sourcePartition); + + assertThat(record).isNotNull(); + assertThat(record.sourceOffset()).isEqualTo(sourceOffset); + assertThat(record.sourcePartition()).isEqualTo(sourcePartition); + + // verify the Kafka record + assertNull(record.key()); + assertEquals(messageContents, record.value()); + assertNull(record.valueSchema()); + } + } diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsource/builders/JsonRecordBuilderIT.java b/src/integration/java/com/ibm/eventstreams/connect/mqsource/builders/JsonRecordBuilderIT.java index df87893..fd8f055 100644 --- a/src/integration/java/com/ibm/eventstreams/connect/mqsource/builders/JsonRecordBuilderIT.java +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsource/builders/JsonRecordBuilderIT.java @@ -1,5 +1,5 @@ /** - * Copyright 2022 IBM Corporation + * Copyright 2022, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,7 +27,6 @@ import javax.jms.MapMessage; import javax.jms.TextMessage; -import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.source.SourceRecord; import org.junit.Test; @@ -96,7 +95,7 @@ public void buildFromJmsMapMessage() throws Exception { // use the builder to convert it to a Kafka record final JsonRecordBuilder builder = new JsonRecordBuilder(); - final ConnectException exc = assertThrows(ConnectException.class, () -> { + final RecordBuilderException exc = assertThrows(RecordBuilderException.class, () -> { builder.toSourceRecord(getJmsContext(), topic, isJMS, message); }); diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/JsonRestApi.java b/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/JsonRestApi.java index 86246dc..b871dca 100644 --- a/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/JsonRestApi.java +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/JsonRestApi.java @@ -1,5 +1,5 @@ /** - * Copyright 2022 IBM Corporation + * Copyright 2022, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,6 +15,16 @@ */ package com.ibm.eventstreams.connect.mqsource.utils; +import org.json.JSONException; +import org.json.JSONObject; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSession; +import javax.net.ssl.SSLSocketFactory; +import javax.net.ssl.TrustManager; +import javax.net.ssl.X509TrustManager; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -28,17 +38,6 @@ import java.security.cert.X509Certificate; import java.util.Base64; -import javax.net.ssl.HostnameVerifier; -import javax.net.ssl.HttpsURLConnection; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLSession; -import javax.net.ssl.SSLSocketFactory; -import javax.net.ssl.TrustManager; -import javax.net.ssl.X509TrustManager; - -import org.json.JSONException; -import org.json.JSONObject; - public class JsonRestApi { public static JSONObject jsonPost(final String url, final String username, final String password, @@ -63,6 +62,31 @@ public static JSONObject jsonPost(final String url, final String username, final } } + public static void postString(final String url, final String username, final String password, + final String payload) throws IOException, KeyManagementException, NoSuchAlgorithmException, JSONException { + final URL urlObj = new URL(url); + final HttpsURLConnection urlConnection = (HttpsURLConnection) urlObj.openConnection(); + urlConnection.setHostnameVerifier(new IgnoreCertVerifier()); + urlConnection.setSSLSocketFactory(getTrustAllCertsFactory()); + urlConnection.setRequestProperty("Authorization", getAuthHeader(username, password)); + urlConnection.setRequestProperty("Content-Type", "text/plain"); + urlConnection.setRequestProperty("ibm-mq-rest-csrf-token", "junit"); + urlConnection.setDoOutput(true); + + try (OutputStream os = urlConnection.getOutputStream()) { + final byte[] input = payload.getBytes("utf-8"); + os.write(input, 0, input.length); + } + + try (InputStream input = urlConnection.getInputStream()) { + final BufferedReader re = new BufferedReader(new InputStreamReader(input, Charset.forName("utf-8"))); + String output; + while ((output = re.readLine()) != null) { + System.out.println(output); + } + } + } + private static String read(final Reader re) throws IOException { final StringBuilder str = new StringBuilder(); int ch; diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/MQConnectionRollbackHelper.java b/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/MQConnectionRollbackHelper.java new file mode 100644 index 0000000..ed3c127 --- /dev/null +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/MQConnectionRollbackHelper.java @@ -0,0 +1,69 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource.utils; + +import static com.ibm.eventstreams.connect.mqsource.utils.MQTestUtil.getJmsConnectionFactory; + +import java.util.ArrayList; +import java.util.List; + +import javax.jms.Connection; +import javax.jms.Destination; +import javax.jms.JMSException; +import javax.jms.Message; +import javax.jms.MessageConsumer; +import javax.jms.Session; + +import com.ibm.msg.client.jms.JmsConnectionFactory; + +public class MQConnectionRollbackHelper { + + private Connection connection; + private Session session; + private Destination destination; + private MessageConsumer consumer; + + public List readNoCommit(final String queueName, final int batchSize) throws JMSException { + + final JmsConnectionFactory cf = getJmsConnectionFactory(); + + connection = cf.createConnection(); + session = connection.createSession(true, Session.AUTO_ACKNOWLEDGE); + + destination = session.createQueue(queueName); + consumer = session.createConsumer(destination); + + connection.start(); + + final List messages = new ArrayList<>(); + int messageCount = 0; + Message message; + do { + message = consumer.receiveNoWait(); + if (message != null) { + messages.add(message); + messageCount++; + } + } + while (message != null && messageCount < batchSize); + + return messages; + } + + public void rollback() throws JMSException { + session.rollback(); + } +} diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/MQQueueManagerAttrs.java b/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/MQQueueManagerAttrs.java index b358b85..dfc64e3 100644 --- a/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/MQQueueManagerAttrs.java +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/MQQueueManagerAttrs.java @@ -1,5 +1,5 @@ /** - * Copyright 2022 IBM Corporation + * Copyright 2022, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,15 +15,36 @@ */ package com.ibm.eventstreams.connect.mqsource.utils; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.json.JSONException; +import org.json.JSONObject; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.io.IOException; import java.security.KeyManagementException; import java.security.NoSuchAlgorithmException; -import org.json.JSONException; -import org.json.JSONObject; - public class MQQueueManagerAttrs { + private static final Logger log = LoggerFactory.getLogger(MQQueueManagerAttrs.class); + + public static String generateGetPutMqscCommand(final String queueName, final boolean enable, final boolean get) throws JsonProcessingException { + ObjectMapper mapper = new ObjectMapper(); + ObjectNode rootNode = mapper.createObjectNode(); + rootNode.put("type", "runCommand"); + + ObjectNode parametersNode = mapper.createObjectNode(); + String operation = get ? "GET" : "PUT"; + String property = enable ? "ENABLED" : "DISABLED"; + parametersNode.put("command", "ALTER QLOCAL(\'" + queueName + "\') "+operation+"("+property+")" ); + + rootNode.put("parameters", parametersNode); + return mapper.writerWithDefaultPrettyPrinter().writeValueAsString(rootNode); + }; + private static final String REQ_GET_SVRCONNS = "{" + " \"type\": \"runCommand\"," + " \"parameters\": {" @@ -31,18 +52,83 @@ public class MQQueueManagerAttrs { + " }" + "}"; + private static final String REQ_STOP_QUEUE = "{" + + " \"type\": \"runCommand\"," + + " \"parameters\": {" + + " \"command\": \"STOP CHANNEL('DEV.APP.SVRCONN') MODE(QUIESCE)\"" + + " }" + + "}"; + + private static final String REQ_START_QUEUE = "{" + + " \"type\": \"runCommand\"," + + " \"parameters\": {" + + " \"command\": \"START CHANNEL('DEV.APP.SVRCONN')\"" + + " }" + + "}"; + + private static final String INHIBIT_GET_ON_QUEUE = "{" + + " \"type\": \"runCommand\"," + + " \"parameters\": {" + + " \"command\": \"ALTER QLOCAL('DEV.QUEUE.1') GET(DISABLED)\"" + + " }" + + "}"; + + private static final String ENABLE_QUEUE = "{" + + " \"type\": \"runCommand\"," + + " \"parameters\": {" + + " \"command\": \"ALTER QLOCAL('DEV.QUEUE.1') GET(ENABLED)\"" + + " }" + + "}"; + + + public static int inhibitQueueGET(final String qmgrname, final int portnum, final String password) + throws IOException, NoSuchAlgorithmException, KeyManagementException { + return sendCommand(qmgrname, portnum, password, INHIBIT_GET_ON_QUEUE); + } + + public static int inhibitQueuePUT(final String qmgrname, final int portnum, final String password, final String queueName) + throws IOException, NoSuchAlgorithmException, KeyManagementException { + return sendCommand(qmgrname, portnum, password, generateGetPutMqscCommand(queueName, false, false)); + } + + public static int enableQueueGET(final String qmgrname, final int portnum, final String password) + throws IOException, NoSuchAlgorithmException, KeyManagementException { + return sendCommand(qmgrname, portnum, password, ENABLE_QUEUE); + } + + public static int enableQueuePUT(final String qmgrname, final int portnum, final String password, final String queueName) + throws IOException, NoSuchAlgorithmException, KeyManagementException { + return sendCommand(qmgrname, portnum, password, generateGetPutMqscCommand(queueName, true, false)); + } + + public static int startChannel(final String qmgrname, final int portnum, final String password) + throws IOException, NoSuchAlgorithmException, KeyManagementException { + return sendCommand(qmgrname, portnum, password, REQ_START_QUEUE); + } + + public static int stopChannel(final String qmgrname, final int portnum, final String password) + throws IOException, NoSuchAlgorithmException, KeyManagementException { + return sendCommand(qmgrname, portnum, password, REQ_STOP_QUEUE); + } + public static int getNumConnections(final String qmgrname, final int portnum, final String password) throws KeyManagementException, NoSuchAlgorithmException, IOException, JSONException { + return sendCommand(qmgrname, portnum, password, REQ_GET_SVRCONNS); + } + + private static int sendCommand(String qmgrname, int portnum, String password, String request) throws IOException, KeyManagementException, NoSuchAlgorithmException { final String url = "https://localhost:" + portnum + "/ibmmq/rest/v2/admin/action/qmgr/" + qmgrname + "/mqsc"; - final JSONObject connectionInfo = JsonRestApi.jsonPost(url, "admin", password, REQ_GET_SVRCONNS); + final JSONObject commandResult = JsonRestApi.jsonPost(url, "admin", password, request); + + log.debug("result = " + commandResult); - final int completionCode = connectionInfo.getInt("overallCompletionCode"); - final int reasonCode = connectionInfo.getInt("overallReasonCode"); + final int completionCode = commandResult.getInt("overallCompletionCode"); + final int reasonCode = commandResult.getInt("overallReasonCode"); if (completionCode == 2 && reasonCode == 3008) { return 0; } else if (completionCode == 0 && reasonCode == 0) { - return connectionInfo.getJSONArray("commandResponse").length(); + return commandResult.getJSONArray("commandResponse").length(); } else { return -1; } diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/MQTestUtil.java b/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/MQTestUtil.java new file mode 100644 index 0000000..a1910a3 --- /dev/null +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/MQTestUtil.java @@ -0,0 +1,248 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource.utils; + +import com.ibm.msg.client.jms.JmsConnectionFactory; +import com.ibm.msg.client.jms.JmsFactoryFactory; +import com.ibm.msg.client.wmq.WMQConstants; +import org.jetbrains.annotations.NotNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.jms.Connection; +import javax.jms.Destination; +import javax.jms.JMSException; +import javax.jms.Message; +import javax.jms.MessageConsumer; +import javax.jms.MessageProducer; +import javax.jms.Queue; +import javax.jms.QueueBrowser; +import javax.jms.Session; +import javax.jms.TextMessage; +import java.util.ArrayList; +import java.util.Enumeration; +import java.util.List; +import java.util.stream.Collectors; + +import static com.ibm.eventstreams.connect.mqsource.AbstractJMSContextIT.CHANNEL_NAME; +import static com.ibm.eventstreams.connect.mqsource.AbstractJMSContextIT.QMGR_NAME; +import static com.ibm.eventstreams.connect.mqsource.AbstractJMSContextIT.TCP_MQ_HOST_PORT; + +public class MQTestUtil { + + private static final Logger log = LoggerFactory.getLogger(MQTestUtil.class); + + public static final String mqContainer = "icr.io/ibm-messaging/mq:latest"; + + /** + * Puts all messages to the specified MQ queue. Used in tests to + * give the Connector something to get. + */ + public static void putAllMessagesToQueue(final String queueName, final List messages) throws JMSException { + Connection connection = null; + Session session = null; + Destination destination = null; + MessageProducer producer = null; + + final JmsConnectionFactory cf = getJmsConnectionFactory(); + + connection = cf.createConnection(); + session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); + + destination = session.createQueue(queueName); + producer = session.createProducer(destination); + + connection.start(); + + for (final Message message : messages) { + message.setJMSDestination(destination); + producer.send(message); + } + + connection.close(); + } + + @NotNull + public static JmsConnectionFactory getJmsConnectionFactory() throws JMSException { + final JmsFactoryFactory ff = JmsFactoryFactory.getInstance(WMQConstants.WMQ_PROVIDER); + + final JmsConnectionFactory cf = ff.createConnectionFactory(); + cf.setStringProperty(WMQConstants.WMQ_HOST_NAME, "localhost"); + cf.setIntProperty(WMQConstants.WMQ_PORT, TCP_MQ_HOST_PORT); + cf.setStringProperty(WMQConstants.WMQ_CHANNEL, CHANNEL_NAME); + cf.setIntProperty(WMQConstants.WMQ_CONNECTION_MODE, WMQConstants.WMQ_CM_CLIENT); + cf.setStringProperty(WMQConstants.WMQ_QUEUE_MANAGER, QMGR_NAME); + cf.setBooleanProperty(WMQConstants.USER_AUTHENTICATION_MQCSP, false); + return cf; + } + + public static void removeAllMessagesFromQueue(String queueName) throws JMSException { + log.info("Starting to remove messages..."); + getAllMessagesFromQueue(queueName); + log.info("Done removing messages..."); + } + + /** + * Gets all messages from the specified MQ queue. Used in tests to + * verify what is left on the test queue + */ + public static List getAllMessagesFromQueue(final String queueName) throws JMSException { + Connection connection = null; + Session session = null; + Destination destination = null; + MessageConsumer consumer = null; + + final JmsConnectionFactory cf = getJmsConnectionFactory(); + + connection = cf.createConnection(); + session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); + + destination = session.createQueue(queueName); + consumer = session.createConsumer(destination); + + connection.start(); + + final List messages = new ArrayList<>(); + Message message; + do { + message = consumer.receiveNoWait(); + if (message != null) { + messages.add(message); + } + } + while (message != null); + + connection.close(); + + return messages; + } + + public static List getSingleBatchOfMessagesFromQueue(final String queueName, final int batchSize) throws JMSException { + Connection connection = null; + Session session = null; + Destination destination = null; + MessageConsumer consumer = null; + + final JmsConnectionFactory cf = getJmsConnectionFactory(); + + connection = cf.createConnection(); + session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); + + destination = session.createQueue(queueName); + consumer = session.createConsumer(destination); + + connection.start(); + + final List messages = new ArrayList<>(); + int messageCount = 0; + Message message; + do { + message = consumer.receiveNoWait(); + if (message != null) { + messages.add(message); + messageCount++; + } + } + while (message != null && messageCount < batchSize); + + session.rollback(); + + connection.close(); + + return messages; + } + + public static List getIDsOfMessagesCurrentlyOnQueue(String queueName, int batchSize) throws JMSException { + List messages = browseMessagesInBatch(queueName, batchSize); + + return messages.stream().map(m -> { + try { + return m.getJMSMessageID(); + } catch (JMSException e) { + throw new RuntimeException(e); + } + }).collect(Collectors.toList()); + } + + public static List getIDsOfMessagesCurrentlyOnQueue(String queueName) throws JMSException { + List messages = browseAllMessagesFromQueue(queueName); + + return messages.stream().map(m -> { + try { + return m.getJMSMessageID(); + } catch (JMSException e) { + throw new RuntimeException(e); + } + }).collect(Collectors.toList()); + } + + public static int getMessageCount(final String queueName) throws JMSException { + return browseAllMessagesFromQueue(queueName).size(); + } + + public static List browseAllMessagesFromQueue(final String queueName) throws JMSException { + Connection connection = null; + Session session = null; + + final JmsConnectionFactory cf = getJmsConnectionFactory(); + + connection = cf.createConnection(); + session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); + + Queue queue = session.createQueue(queueName); + QueueBrowser browser = session.createBrowser(queue); + + connection.start(); + + final List messages = new ArrayList<>(); + + Enumeration e = browser.getEnumeration(); + while (e.hasMoreElements()) { + messages.add((Message) e.nextElement()); + } + + connection.close(); + + return messages; + } + + public static List browseMessagesInBatch(final String queueName, int batchSize) throws JMSException { + Connection connection = null; + Session session = null; + + final JmsConnectionFactory cf = getJmsConnectionFactory(); + + connection = cf.createConnection(); + session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); + + Queue queue = session.createQueue(queueName); + QueueBrowser browser = session.createBrowser(queue); + + connection.start(); + + final List messages = new ArrayList<>(); + + Enumeration e = browser.getEnumeration(); + while (messages.size() < batchSize) { + messages.add((TextMessage) e.nextElement()); + } + + connection.close(); + + return messages; + } + +} diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/MessagesObjectMother.java b/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/MessagesObjectMother.java new file mode 100644 index 0000000..289fb49 --- /dev/null +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/MessagesObjectMother.java @@ -0,0 +1,53 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource.utils; + +import org.jetbrains.annotations.NotNull; + +import javax.jms.JMSContext; +import javax.jms.MapMessage; +import javax.jms.Message; +import java.util.ArrayList; +import java.util.List; + +public class MessagesObjectMother { + + @NotNull + public static List createAListOfMessages(JMSContext jmsContext, int numberOfMessages, String messageContent) throws Exception { + final List messages = new ArrayList<>(); + for (int i = 1; i <= numberOfMessages; i++) { + messages.add(jmsContext.createTextMessage(messageContent + i)); + } + return messages; + } + + /** + * messages 01-15 - valid messages + * message 16 - a message that the builder can't process + * messages 17-30 - valid messages + */ + @NotNull + public static List listOfMessagesButOneIsMalformed(JMSContext jmsContext) throws Exception { + final List messages = createAListOfMessages(jmsContext, 15, "message "); + final MapMessage invalidMessage = jmsContext.createMapMessage(); + invalidMessage.setString("test", "builder cannot convert this"); + messages.add(invalidMessage); + for (int i = 17; i <= 30; i++) { + messages.add(jmsContext.createTextMessage("message " + i)); + } + return messages; + } +} diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/SourceTaskContextObjectMother.java b/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/SourceTaskContextObjectMother.java new file mode 100644 index 0000000..a7a450b --- /dev/null +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/SourceTaskContextObjectMother.java @@ -0,0 +1,68 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource.utils; + +import org.apache.kafka.connect.source.SourceTaskContext; +import org.apache.kafka.connect.storage.OffsetStorageReader; +import org.jetbrains.annotations.NotNull; +import org.mockito.Mockito; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.mockito.ArgumentMatchers.any; + +public class SourceTaskContextObjectMother { + + @NotNull + public static SourceTaskContext emptyKafkaOffsetContext() { + return createContext(Collections.emptyMap()); + } + + @NotNull + public static SourceTaskContext kafkaContextWithOffsetGivenAs5() { + Map map = new HashMap<>(); + map.put("sequence-id", 5L); + return createContext(map); + } + + @NotNull + public static SourceTaskContext SourceTaskContextWithOffsetId(long id) { + Map map = new HashMap<>(); + map.put("sequence-id", id); + return createContext(map); + } + + @NotNull + public static SourceTaskContext sourceTaskContextWithOffsetId(long id) { + Map map = new HashMap<>(); + map.put("sequence-id", id); + return createContext(map); + } + + @NotNull + private static SourceTaskContext createContext(Map map) { + SourceTaskContext contextMock = Mockito.mock(SourceTaskContext.class); + OffsetStorageReader offsetStorageReaderMock = Mockito.mock(OffsetStorageReader.class); + + Mockito.when(offsetStorageReaderMock.offset(any())).thenReturn(map); + Mockito.when(contextMock.offsetStorageReader()).thenReturn(offsetStorageReaderMock); + + return contextMock; + } + +} diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/SourceTaskStopper.java b/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/SourceTaskStopper.java index 5e6ed75..4d1245c 100644 --- a/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/SourceTaskStopper.java +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsource/utils/SourceTaskStopper.java @@ -1,5 +1,5 @@ /** - * Copyright 2022 IBM Corporation + * Copyright 2022, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,11 +15,14 @@ */ package com.ibm.eventstreams.connect.mqsource.utils; +import com.ibm.eventstreams.connect.mqsource.sequencestate.SequenceStateClient; +import org.apache.kafka.connect.source.SourceTask; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import org.apache.kafka.connect.source.SourceTask; - /** * Stops an instance of the MQSourceTask in a way that will ensure @@ -29,6 +32,7 @@ * each other. */ public class SourceTaskStopper { + private static final Logger log = LoggerFactory.getLogger(SequenceStateClient.class); private static ExecutorService executor = Executors.newCachedThreadPool(); diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/JMSReader.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/JMSReader.java deleted file mode 100755 index 71b5db0..0000000 --- a/src/main/java/com/ibm/eventstreams/connect/mqsource/JMSReader.java +++ /dev/null @@ -1,512 +0,0 @@ -/** - * Copyright 2017, 2020 IBM Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.ibm.eventstreams.connect.mqsource; - -import com.ibm.eventstreams.connect.mqsource.builders.RecordBuilder; - -import com.ibm.mq.MQException; -import com.ibm.mq.constants.MQConstants; -import com.ibm.mq.jms.MQConnectionFactory; -import com.ibm.mq.jms.MQQueue; -import com.ibm.msg.client.wmq.WMQConstants; - -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.net.MalformedURLException; -import java.net.URL; -import java.security.GeneralSecurityException; -import java.security.KeyStore; -import java.security.SecureRandom; -import java.util.concurrent.atomic.AtomicBoolean; - -import javax.jms.JMSConsumer; -import javax.jms.JMSContext; -import javax.jms.JMSException; -import javax.jms.JMSRuntimeException; -import javax.jms.Message; -import javax.net.ssl.KeyManager; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.TrustManager; -import javax.net.ssl.TrustManagerFactory; - -import org.apache.kafka.common.config.AbstractConfig; -import org.apache.kafka.common.config.types.Password; -import org.apache.kafka.connect.errors.ConnectException; -import org.apache.kafka.connect.errors.RetriableException; -import org.apache.kafka.connect.source.SourceRecord; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Reads messages from MQ using JMS. Uses a transacted session, adding messages to the current - * transaction until told to commit. Automatically reconnects as needed. - */ -public class JMSReader { - private static final Logger log = LoggerFactory.getLogger(JMSReader.class); - - // Configs - private String userName; - private Password password; - private String topic; - private boolean messageBodyJms; - - // JMS factory and context - private MQConnectionFactory mqConnFactory; - private JMSContext jmsCtxt; - private JMSConsumer jmsCons; - private MQQueue queue; - - private RecordBuilder builder; - - private boolean connected = false; // Whether connected to MQ - private boolean inflight = false; // Whether messages in-flight in current transaction - private boolean inperil = false; // Whether current transaction must be forced to roll back - private AtomicBoolean closeNow = new AtomicBoolean(); // Whether close has been requested - private long reconnectDelayMillis = reconnectDelayMillisMin; // Delay between repeated reconnect attempts - - private static long receiveTimeout = 2000L; - private static long reconnectDelayMillisMin = 64L; - private static long reconnectDelayMillisMax = 8192L; - - public JMSReader() { - } - - /** - * Configure this class. - * - * @param props initial configuration - * - * @throws ConnectException Operation failed and connector should stop. - */ - public void configure(final AbstractConfig config) { - log.trace("[{}] Entry {}.configure, props={}", Thread.currentThread().getId(), this.getClass().getName(), - config); - - System.setProperty("com.ibm.mq.cfg.useIBMCipherMappings", - config.getBoolean(MQSourceConnector.CONFIG_NAME_MQ_SSL_USE_IBM_CIPHER_MAPPINGS).toString()); - - final int transportType = - config.getString(MQSourceConnector.CONFIG_NAME_MQ_CONNECTION_MODE) - .equals(MQSourceConnector.CONFIG_VALUE_MQ_CONNECTION_MODE_CLIENT) ? - WMQConstants.WMQ_CM_CLIENT : - WMQConstants.WMQ_CM_BINDINGS; - - try { - mqConnFactory = new MQConnectionFactory(); - mqConnFactory.setTransportType(transportType); - mqConnFactory.setQueueManager(config.getString(MQSourceConnector.CONFIG_NAME_MQ_QUEUE_MANAGER)); - mqConnFactory.setBooleanProperty(WMQConstants.USER_AUTHENTICATION_MQCSP, - config.getBoolean(MQSourceConnector.CONFIG_NAME_MQ_USER_AUTHENTICATION_MQCSP)); - - if (transportType == WMQConstants.WMQ_CM_CLIENT) { - final String ccdtUrl = config.getString(MQSourceConnector.CONFIG_NAME_MQ_CCDT_URL); - - if (ccdtUrl != null) { - mqConnFactory.setCCDTURL(new URL(ccdtUrl)); - } else { - mqConnFactory.setConnectionNameList(config.getString(MQSourceConnector.CONFIG_NAME_MQ_CONNECTION_NAME_LIST)); - mqConnFactory.setChannel(config.getString(MQSourceConnector.CONFIG_NAME_MQ_CHANNEL_NAME)); - } - - mqConnFactory.setSSLCipherSuite(config.getString(MQSourceConnector.CONFIG_NAME_MQ_SSL_CIPHER_SUITE)); - mqConnFactory.setSSLPeerName(config.getString(MQSourceConnector.CONFIG_NAME_MQ_SSL_PEER_NAME)); - - - final String sslKeystoreLocation = config.getString(MQSourceConnector.CONFIG_NAME_MQ_SSL_KEYSTORE_LOCATION); - final Password sslKeystorePassword = config.getPassword(MQSourceConnector.CONFIG_NAME_MQ_SSL_KEYSTORE_PASSWORD); - final String sslTruststoreLocation = config.getString(MQSourceConnector.CONFIG_NAME_MQ_SSL_TRUSTSTORE_LOCATION); - final Password sslTruststorePassword = config.getPassword(MQSourceConnector.CONFIG_NAME_MQ_SSL_TRUSTSTORE_PASSWORD); - if (sslKeystoreLocation != null || sslTruststoreLocation != null) { - final SSLContext sslContext = buildSslContext(sslKeystoreLocation, sslKeystorePassword, - sslTruststoreLocation, sslTruststorePassword); - mqConnFactory.setSSLSocketFactory(sslContext.getSocketFactory()); - } - } - - queue = new MQQueue(config.getString(MQSourceConnector.CONFIG_NAME_MQ_QUEUE)); - - userName = config.getString(MQSourceConnector.CONFIG_NAME_MQ_USER_NAME); - password = config.getPassword(MQSourceConnector.CONFIG_NAME_MQ_PASSWORD); - - messageBodyJms = config.getBoolean(MQSourceConnector.CONFIG_NAME_MQ_MESSAGE_BODY_JMS); - queue.setMessageBodyStyle(messageBodyJms ? - WMQConstants.WMQ_MESSAGE_BODY_JMS : - WMQConstants.WMQ_MESSAGE_BODY_MQ); - - queue.setBooleanProperty(WMQConstants.WMQ_MQMD_READ_ENABLED, - config.getBoolean(MQSourceConnector.CONFIG_NAME_MQ_MESSAGE_MQMD_READ)); - - topic = config.getString(MQSourceConnector.CONFIG_NAME_TOPIC); - - } catch (JMSException | JMSRuntimeException jmse) { - log.error("JMS exception {}", jmse); - throw new ConnectException(jmse); - } catch (final MalformedURLException e) { - log.error("MalformedURLException exception {}", e); - throw new ConnectException("CCDT file url invalid", e); - } - - - final String builderClass = config.getString(MQSourceConnector.CONFIG_NAME_MQ_RECORD_BUILDER); - try { - final Class c = Class.forName(builderClass).asSubclass(RecordBuilder.class); - builder = c.newInstance(); - builder.configure(config.originalsStrings()); - } catch (ClassNotFoundException | ClassCastException | IllegalAccessException | InstantiationException - | NullPointerException exc) { - log.error("Could not instantiate message builder {}", builderClass); - throw new ConnectException("Could not instantiate message builder", exc); - } - - log.trace("[{}] Exit {}.configure", Thread.currentThread().getId(), this.getClass().getName()); - } - - /** - * Connects to MQ. - */ - public void connect() { - log.trace("[{}] Entry {}.connect", Thread.currentThread().getId(), this.getClass().getName()); - - try { - if (userName != null) { - jmsCtxt = mqConnFactory.createContext(userName, password.value(), JMSContext.SESSION_TRANSACTED); - } else { - jmsCtxt = mqConnFactory.createContext(JMSContext.SESSION_TRANSACTED); - } - - jmsCons = jmsCtxt.createConsumer(queue); - connected = true; - - log.info("Connection to MQ established"); - } catch (final JMSRuntimeException jmse) { - log.info("Connection to MQ could not be established"); - log.error("JMS exception {}", jmse); - handleException(jmse); - } - - log.trace("[{}] Exit {}.connect", Thread.currentThread().getId(), this.getClass().getName()); - } - - /** - * Receives a message from MQ. Adds the message to the current transaction. - * Reconnects to MQ if required. - * - * @param wait Whether to wait indefinitely for a message - * - * @return The SourceRecord representing the message - */ - public SourceRecord receive(final boolean wait) { - log.trace("[{}] Entry {}.receive", Thread.currentThread().getId(), this.getClass().getName()); - - if (!connectInternal()) { - log.trace("[{}] Exit {}.receive, retval=null", Thread.currentThread().getId(), this.getClass().getName()); - return null; - } - - Message m = null; - SourceRecord sr = null; - try { - if (wait) { - log.debug("Waiting {} ms for message", receiveTimeout); - m = jmsCons.receive(receiveTimeout); - - if (m == null) { - log.debug("No message received"); - } - } else { - m = jmsCons.receiveNoWait(); - } - - if (m != null) { - inflight = true; - - // We've received a message in a transacted session so we must only permit the - // transaction - // to commit once we've passed it on to Kafka. Temporarily mark the transaction - // as "in-peril" - // so that any exception thrown will result in the transaction rolling back - // instead of committing. - inperil = true; - - sr = builder.toSourceRecord(jmsCtxt, topic, messageBodyJms, m); - inperil = false; - } - } catch (JMSException | JMSRuntimeException exc) { - log.error("JMS exception {}", exc); - handleException(exc); - } catch (final ConnectException exc) { - log.error("Connect exception {}", exc); - attemptRollback(); - throw exc; - } - - log.trace("[{}] Exit {}.receive, retval={}", Thread.currentThread().getId(), this.getClass().getName(), sr); - return sr; - } - - /** - * Returns messages got from the MQ queue. Called if the builder has failed to - * transform the - * messages and return them to Connect for producing to Kafka. - */ - private void attemptRollback() { - log.trace("[{}] Entry {}.attemptRollback", Thread.currentThread().getId(), this.getClass().getName()); - try { - jmsCtxt.rollback(); - } catch (final JMSRuntimeException jmsExc) { - log.error("rollback failed {}", jmsExc); - } - log.trace("[{}] Exit {}.attemptRollback", Thread.currentThread().getId(), this.getClass().getName()); - } - - /** - * Commits the current transaction. If the current transaction contains a - * message that could not - * be processed, the transaction is "in peril" and is rolled back instead to - * avoid data loss. - */ - public void commit() { - log.trace("[{}] Entry {}.commit", Thread.currentThread().getId(), this.getClass().getName()); - - if (!connectInternal()) { - return; - } - - try { - if (inflight) { - inflight = false; - - if (inperil) { - inperil = false; - log.debug("Rolling back in-flight transaction"); - jmsCtxt.rollback(); - } else { - jmsCtxt.commit(); - } - } - } catch (final JMSRuntimeException jmse) { - log.error("JMS exception {}", jmse); - handleException(jmse); - } - - log.trace("[{}] Exit {}.commit", Thread.currentThread().getId(), this.getClass().getName()); - } - - /** - * Closes the connection. - */ - public void close() { - log.trace("[{}] Entry {}.close", Thread.currentThread().getId(), this.getClass().getName()); - - closeNow.set(true); - closeInternal(); - - log.trace("[{}] Exit {}.close", Thread.currentThread().getId(), this.getClass().getName()); - } - - /** - * Internal method to connect to MQ. - * - * @return true if connection can be used, false otherwise - */ - private boolean connectInternal() { - if (connected) { - return true; - } - - if (closeNow.get()) { - log.debug("Closing connection now"); - return false; - } - - log.trace("[{}] Entry {}.connectInternal", Thread.currentThread().getId(), this.getClass().getName()); - try { - if (userName != null) { - jmsCtxt = mqConnFactory.createContext(userName, password.value(), JMSContext.SESSION_TRANSACTED); - } else { - jmsCtxt = mqConnFactory.createContext(JMSContext.SESSION_TRANSACTED); - } - - jmsCons = jmsCtxt.createConsumer(queue); - reconnectDelayMillis = reconnectDelayMillisMin; - connected = true; - - log.info("Connection to MQ established"); - } catch (final JMSRuntimeException jmse) { - // Delay slightly so that repeated reconnect loops don't run too fast - try { - Thread.sleep(reconnectDelayMillis); - } catch (final InterruptedException ie) { - } - - if (reconnectDelayMillis < reconnectDelayMillisMax) { - reconnectDelayMillis = reconnectDelayMillis * 2; - } - - log.error("JMS exception {}", jmse); - handleException(jmse); - log.trace("[{}] Exit {}.connectInternal, retval=false", Thread.currentThread().getId(), - this.getClass().getName()); - return false; - } - - log.trace("[{}] Exit {}.connectInternal, retval=true", Thread.currentThread().getId(), - this.getClass().getName()); - return true; - } - - /** - * Internal method to close the connection. - */ - private void closeInternal() { - log.trace("[{}] Entry {}.closeInternal", Thread.currentThread().getId(), this.getClass().getName()); - - try { - inflight = false; - inperil = false; - connected = false; - - if (jmsCtxt != null) { - jmsCtxt.close(); - } - } catch (final JMSRuntimeException jmse) { - } finally { - jmsCtxt = null; - log.debug("Connection to MQ closed"); - } - - log.trace("[{}] Exit {}.closeInternal", Thread.currentThread().getId(), this.getClass().getName()); - } - - /** - * Handles exceptions from MQ. Some JMS exceptions are treated as retriable - * meaning that the - * connector can keep running and just trying again is likely to fix things. - */ - private ConnectException handleException(final Throwable exc) { - boolean isRetriable = false; - boolean mustClose = true; - int reason = -1; - - // Try to extract the MQ reason code to see if it's a retriable exception - Throwable t = exc.getCause(); - while (t != null) { - if (t instanceof MQException) { - final MQException mqe = (MQException) t; - log.error("MQ error: CompCode {}, Reason {} {}", mqe.getCompCode(), mqe.getReason(), - MQConstants.lookupReasonCode(mqe.getReason())); - reason = mqe.getReason(); - break; - } else if (t instanceof JMSException) { - final JMSException jmse = (JMSException) t; - log.error("JMS exception: error code {}", jmse.getErrorCode()); - } - - t = t.getCause(); - } - - switch (reason) { - // These reason codes indicate that the connection needs to be closed, but just - // retrying later - // will probably recover - case MQConstants.MQRC_BACKED_OUT: - case MQConstants.MQRC_CHANNEL_NOT_AVAILABLE: - case MQConstants.MQRC_CONNECTION_BROKEN: - case MQConstants.MQRC_HOST_NOT_AVAILABLE: - case MQConstants.MQRC_NOT_AUTHORIZED: - case MQConstants.MQRC_Q_MGR_NOT_AVAILABLE: - case MQConstants.MQRC_Q_MGR_QUIESCING: - case MQConstants.MQRC_Q_MGR_STOPPING: - case MQConstants.MQRC_UNEXPECTED_ERROR: - isRetriable = true; - break; - - // These reason codes indicate that the connection is still OK, but just - // retrying later - // will probably recover - possibly with administrative action on the queue - // manager - case MQConstants.MQRC_GET_INHIBITED: - isRetriable = true; - mustClose = false; - break; - } - - if (mustClose) { - // Delay so that repeated reconnect loops don't run too fast - try { - Thread.sleep(reconnectDelayMillisMax); - } catch (final InterruptedException ie) { - } - closeInternal(); - } - - if (isRetriable) { - return new RetriableException(exc); - } - - return new ConnectException(exc); - } - - private SSLContext buildSslContext(final String sslKeystoreLocation, final Password sslKeystorePassword, - final String sslTruststoreLocation, final Password sslTruststorePassword) { - log.trace("[{}] Entry {}.buildSslContext", Thread.currentThread().getId(), this.getClass().getName()); - - try { - KeyManager[] keyManagers = null; - TrustManager[] trustManagers = null; - - if (sslKeystoreLocation != null) { - final KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - kmf.init(loadKeyStore(sslKeystoreLocation, sslKeystorePassword), sslKeystorePassword.value().toCharArray()); - keyManagers = kmf.getKeyManagers(); - } - - if (sslTruststoreLocation != null) { - final TrustManagerFactory tmf = TrustManagerFactory - .getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(loadKeyStore(sslTruststoreLocation, sslTruststorePassword)); - trustManagers = tmf.getTrustManagers(); - } - - final SSLContext sslContext = SSLContext.getInstance("TLS"); - sslContext.init(keyManagers, trustManagers, new SecureRandom()); - - log.trace("[{}] Exit {}.buildSslContext, retval={}", Thread.currentThread().getId(), - this.getClass().getName(), sslContext); - return sslContext; - } catch (final GeneralSecurityException e) { - throw new ConnectException("Error creating SSLContext", e); - } - } - - private KeyStore loadKeyStore(final String location, final Password password) throws GeneralSecurityException { - log.trace("[{}] Entry {}.loadKeyStore", Thread.currentThread().getId(), this.getClass().getName()); - - try (final InputStream ksStr = new FileInputStream(location)) { - final KeyStore ks = KeyStore.getInstance("JKS"); - ks.load(ksStr, password.value().toCharArray()); - - log.trace("[{}] Exit {}.loadKeyStore, retval={}", Thread.currentThread().getId(), - this.getClass().getName(), ks); - return ks; - } catch (final IOException e) { - throw new ConnectException("Error reading keystore " + location, e); - } - } -} \ No newline at end of file diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/JMSWorker.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/JMSWorker.java new file mode 100755 index 0000000..b4f00a5 --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/JMSWorker.java @@ -0,0 +1,395 @@ +/** + * Copyright 2017, 2020, 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource; + +import com.ibm.eventstreams.connect.mqsource.builders.RecordBuilder; +import com.ibm.eventstreams.connect.mqsource.builders.RecordBuilderFactory; +import com.ibm.eventstreams.connect.mqsource.builders.RecordBuilderException; +import com.ibm.eventstreams.connect.mqsource.util.QueueConfig; +import com.ibm.mq.jms.MQConnectionFactory; +import com.ibm.mq.jms.MQQueue; +import com.ibm.msg.client.wmq.WMQConstants; +import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.common.config.types.Password; +import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.source.SourceRecord; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.jms.JMSConsumer; +import javax.jms.JMSContext; +import javax.jms.JMSException; +import javax.jms.JMSProducer; +import javax.jms.JMSRuntimeException; +import javax.jms.Message; +import javax.jms.QueueBrowser; +import javax.jms.TextMessage; +import javax.net.ssl.SSLContext; + +import java.net.MalformedURLException; +import java.net.URL; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Reads messages from MQ using JMS. Uses a transacted session, adding messages + * to the current + * transaction until told to commit. Automatically reconnects as needed. + */ +public class JMSWorker { + private static final Logger log = LoggerFactory.getLogger(JMSWorker.class); + + // Configs + private String userName; + private Password password; + private String topic; + + // JMS factory and context + private MQConnectionFactory mqConnFactory; + private JMSContext jmsCtxt; + + final private HashMap jmsConsumers = new HashMap<>(); + private RecordBuilder recordBuilder; + + private boolean connected = false; // Whether connected to MQ + private AtomicBoolean closeNow; // Whether close has been requested + private long reconnectDelayMillis = RECONNECT_DELAY_MILLIS_MIN; // Delay between repeated reconnect attempts + + private static final long RECEIVE_TIMEOUT = 2000L; + private static final long RECONNECT_DELAY_MILLIS_MIN = 64L; + private static final long RECONNECT_DELAY_MILLIS_MAX = 8192L; + + /** + * Configure this class. + * + * @param config initial configuration + * @throws JMSWorkerConnectionException + */ + public void configure(final AbstractConfig config) { + + log.trace("[{}] Entry {}.configure, props={}", Thread.currentThread().getId(), this.getClass().getName(), + config); + + System.setProperty("com.ibm.mq.cfg.useIBMCipherMappings", + config.getBoolean(MQSourceConnector.CONFIG_NAME_MQ_SSL_USE_IBM_CIPHER_MAPPINGS).toString()); + + final int transportType = + config.getString(MQSourceConnector.CONFIG_NAME_MQ_CONNECTION_MODE) + .equals(MQSourceConnector.CONFIG_VALUE_MQ_CONNECTION_MODE_CLIENT) ? + WMQConstants.WMQ_CM_CLIENT : + WMQConstants.WMQ_CM_BINDINGS; + + try { + mqConnFactory = new MQConnectionFactory(); + mqConnFactory.setTransportType(transportType); + mqConnFactory.setQueueManager(config.getString(MQSourceConnector.CONFIG_NAME_MQ_QUEUE_MANAGER)); + mqConnFactory.setBooleanProperty(WMQConstants.USER_AUTHENTICATION_MQCSP, + config.getBoolean(MQSourceConnector.CONFIG_NAME_MQ_USER_AUTHENTICATION_MQCSP)); + + if (transportType == WMQConstants.WMQ_CM_CLIENT) { + final String ccdtUrl = config.getString(MQSourceConnector.CONFIG_NAME_MQ_CCDT_URL); + + if (ccdtUrl != null) { + mqConnFactory.setCCDTURL(new URL(ccdtUrl)); + } else { + mqConnFactory.setConnectionNameList(config.getString(MQSourceConnector.CONFIG_NAME_MQ_CONNECTION_NAME_LIST)); + mqConnFactory.setChannel(config.getString(MQSourceConnector.CONFIG_NAME_MQ_CHANNEL_NAME)); + } + + mqConnFactory.setSSLCipherSuite(config.getString(MQSourceConnector.CONFIG_NAME_MQ_SSL_CIPHER_SUITE)); + mqConnFactory.setSSLPeerName(config.getString(MQSourceConnector.CONFIG_NAME_MQ_SSL_PEER_NAME)); + + + final String sslKeystoreLocation = config.getString(MQSourceConnector.CONFIG_NAME_MQ_SSL_KEYSTORE_LOCATION); + final Password sslKeystorePassword = config.getPassword(MQSourceConnector.CONFIG_NAME_MQ_SSL_KEYSTORE_PASSWORD); + final String sslTruststoreLocation = config.getString(MQSourceConnector.CONFIG_NAME_MQ_SSL_TRUSTSTORE_LOCATION); + final Password sslTruststorePassword = config.getPassword(MQSourceConnector.CONFIG_NAME_MQ_SSL_TRUSTSTORE_PASSWORD); + if (sslKeystoreLocation != null || sslTruststoreLocation != null) { + final SSLContext sslContext = new SSLContextBuilder().buildSslContext(sslKeystoreLocation, sslKeystorePassword, + sslTruststoreLocation, sslTruststorePassword); + mqConnFactory.setSSLSocketFactory(sslContext.getSocketFactory()); + } + } + + userName = config.getString(MQSourceConnector.CONFIG_NAME_MQ_USER_NAME); + password = config.getPassword(MQSourceConnector.CONFIG_NAME_MQ_PASSWORD); + topic = config.getString(MQSourceConnector.CONFIG_NAME_TOPIC); + } catch (JMSException | JMSRuntimeException jmse) { + log.error("JMS exception {}", jmse); + throw new JMSWorkerConnectionException("JMS connection failed", jmse); + } catch (final MalformedURLException e) { + log.error("MalformedURLException exception {}", e); + throw new ConnectException("CCDT file url invalid", e); + } + closeNow = new AtomicBoolean(); + closeNow.set(false); + this.recordBuilder = RecordBuilderFactory.getRecordBuilder(config.originalsStrings()); + + log.trace("[{}] Exit {}.configure", Thread.currentThread().getId(), this.getClass().getName()); + } + + + /** + * Used for tests. + */ + protected void setRecordBuilder(final RecordBuilder recordBuilder) { + this.recordBuilder = recordBuilder; + } + + protected JMSContext getContext() { // used to enable testing + if (jmsCtxt == null) maybeReconnect(); + return jmsCtxt; + } + + /** + * Connects to MQ. + */ + public void connect() { + log.trace("[{}] Entry {}.connect", Thread.currentThread().getId(), this.getClass().getName()); + if (userName != null) { + this.jmsCtxt = mqConnFactory.createContext(userName, password.value(), JMSContext.SESSION_TRANSACTED); + } else { + this.jmsCtxt = mqConnFactory.createContext(JMSContext.SESSION_TRANSACTED); + } + + connected = true; + + log.info("Connection to MQ established"); + log.trace("[{}] Exit {}.connect", Thread.currentThread().getId(), this.getClass().getName()); + } + + /** + * Receives a message from MQ. Adds the message to the current transaction. + * Reconnects to MQ if required. + * + * @param queueName The name of the queue to get messages from + * @param queueConfig Any particular queue configuration that should be applied + * @param wait Whether to wait indefinitely for a message + * @return The Message retrieved from MQ + */ + public Message receive(final String queueName, final QueueConfig queueConfig, final boolean wait) throws JMSRuntimeException, JMSException { + log.trace("[{}] Entry {}.receive", Thread.currentThread().getId(), this.getClass().getName()); + + if (!maybeReconnect()) { + log.trace("[{}] Exit {}.receive, retval=null", Thread.currentThread().getId(), this.getClass().getName()); + return null; + } + + final JMSConsumer internalConsumer; + if (jmsConsumers.containsKey(queueName)) { + internalConsumer = jmsConsumers.get(queueName); + } else { + MQQueue queue = new MQQueue(queueName); + queue = queueConfig.applyToQueue(queue); + internalConsumer = jmsCtxt.createConsumer(queue); + jmsConsumers.put(queueName, internalConsumer); + } + + Message message = null; + if (wait) { + log.debug("Waiting {} ms for message", RECEIVE_TIMEOUT); + + message = internalConsumer.receive(RECEIVE_TIMEOUT); + + if (message == null) { + log.debug("No message received"); + } + } else { + message = internalConsumer.receiveNoWait(); + } + + log.trace("[{}] Exit {}.receive, retval={}", Thread.currentThread().getId(), this.getClass().getName(), message); + + return message; + } + + public Optional browse(final String queueName) throws JMSRuntimeException, JMSException { + final QueueBrowser internalBrowser = jmsCtxt.createBrowser(new MQQueue(queueName)); + final Message message; + final Enumeration e = internalBrowser.getEnumeration(); + if (e.hasMoreElements()) { + message = (Message) e.nextElement(); // two messages (true) or one message (false) + } else { + message = null; // no message + } + internalBrowser.close(); + return Optional.ofNullable(message); + } + + /** + * Browses the queue and returns true if there are at least two messages and false if there is zero or one message. + * The method does not read or return any of the messages. + * + * @param queueName String. Name of the queue to be browsed + * @return boolean + * @throws JMSException + */ + public boolean queueHoldsMoreThanOneMessage(final String queueName) throws JMSException { + final QueueBrowser internalBrowser; + final boolean moreThanOneMessageOnQueue; + internalBrowser = jmsCtxt.createBrowser(new MQQueue(queueName)); + final Enumeration e = internalBrowser.getEnumeration(); + if (e.hasMoreElements()) { + e.nextElement(); //get first + moreThanOneMessageOnQueue = e.hasMoreElements(); // two messages (true) or one message (false) + } else { + moreThanOneMessageOnQueue = false; // no message + } + internalBrowser.close(); + return moreThanOneMessageOnQueue; + } + + /** + * Writes a message to a queue + * @throws JMSException + */ + public void putTextMessage(final String payload, final String queueName) throws JMSRuntimeException, JMSException { + + if (!maybeReconnect()) { + log.trace("[{}] Exit {}.receive, retval=null", Thread.currentThread().getId(), this.getClass().getName()); + return; + } + + final TextMessage message = jmsCtxt.createTextMessage(payload); + final JMSProducer localProducer = jmsCtxt.createProducer(); + localProducer.send(new MQQueue(queueName), message); + } + + /** + * Returns messages got from the MQ queue. Called if the builder has failed to + * transform the + * messages and return them to Connect for producing to Kafka. + */ + public void attemptRollback() { + + if (!maybeReconnect()) { + log.warn("[{}] Exit {}.attemptRollback, retval=null, connection failed", Thread.currentThread().getId(), this.getClass().getName()); + return; + } + + log.trace("[{}] Entry {}.attemptRollback", Thread.currentThread().getId(), this.getClass().getName()); + try { + jmsCtxt.rollback(); + } catch (final JMSRuntimeException jmsExc) { + log.error("rollback failed {0}", jmsExc); + } + log.trace("[{}] Exit {}.attemptRollback", Thread.currentThread().getId(), this.getClass().getName()); + } + + /** + * Commits the current transaction. + */ + public void commit() { + log.trace("[{}] Entry {}.commit", Thread.currentThread().getId(), this.getClass().getName()); + + if (!maybeReconnect()) { + return; + } + + jmsCtxt.commit(); + + log.trace("[{}] Exit {}.commit", Thread.currentThread().getId(), this.getClass().getName()); + } + + /** + * Closes the connection. + */ + public void stop() { + log.trace("[{}] Entry {}.close", Thread.currentThread().getId(), this.getClass().getName()); + + closeNow.set(true); + close(); + + log.trace("[{}] Exit {}.close", Thread.currentThread().getId(), this.getClass().getName()); + } + + /** + * Internal method to reconnect to MQ. + * + * @return true if connection can be used, false otherwise + */ + private boolean maybeReconnect() throws JMSRuntimeException { + if (connected) { + return true; + } + + if (closeNow.get()) { + log.debug("Closing connection now"); + return false; + } + + log.trace("[{}] Entry {}.maybeReconnect", Thread.currentThread().getId(), this.getClass().getName()); + try { + connect(); + reconnectDelayMillis = RECONNECT_DELAY_MILLIS_MIN; + log.info("Connection to MQ established"); + } catch (final JMSRuntimeException jmse) { + // Delay slightly so that repeated reconnect loops don't run too fast + try { + Thread.sleep(reconnectDelayMillis); + } catch (final InterruptedException ie) { + } + + if (reconnectDelayMillis < RECONNECT_DELAY_MILLIS_MAX) { + reconnectDelayMillis = reconnectDelayMillis * 2; + } + + log.error("JMS exception {}", jmse); + log.trace("[{}] Exit {}.maybeReconnect, retval=JMSRuntimeException", Thread.currentThread().getId(), + this.getClass().getName()); + throw jmse; + } + + log.trace("[{}] Exit {}.maybeReconnect, retval=true", Thread.currentThread().getId(), + this.getClass().getName()); + return true; + } + + /** + * Internal method to close the connection. + */ + public void close() { + log.trace("[{}] Entry {}.close", Thread.currentThread().getId(), this.getClass().getName()); + + try { + connected = false; + + jmsConsumers.clear(); + + if (jmsCtxt != null) { + jmsCtxt.close(); + } + } catch (final JMSRuntimeException jmse) { + log.error("", jmse); + } finally { + jmsCtxt = null; + log.debug("Connection to MQ closed"); + } + + log.trace("[{}] Exit {}.close", Thread.currentThread().getId(), this.getClass().getName()); + } + + public SourceRecord toSourceRecord(final Message message, final boolean messageBodyJms, final Map sourceOffset, final Map sourcePartition) { + try { + return recordBuilder.toSourceRecord(jmsCtxt, topic, messageBodyJms, message, sourceOffset, sourcePartition); + } catch (final JMSException e) { + throw new RecordBuilderException(e); + } + } +} \ No newline at end of file diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/JMSWorkerConnectionException.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/JMSWorkerConnectionException.java new file mode 100644 index 0000000..3c8fd23 --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/JMSWorkerConnectionException.java @@ -0,0 +1,28 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource; + +public class JMSWorkerConnectionException extends RuntimeException { + + public JMSWorkerConnectionException(final String message) { + super(message); + } + + public JMSWorkerConnectionException(final String message, final Throwable exc) { + super(message, exc); + } + +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/MQSourceConnector.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/MQSourceConnector.java index 02f8e62..e399c1d 100644 --- a/src/main/java/com/ibm/eventstreams/connect/mqsource/MQSourceConnector.java +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/MQSourceConnector.java @@ -1,5 +1,5 @@ /** - * Copyright 2017, 2020 IBM Corporation + * Copyright 2017, 2020, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -30,6 +30,8 @@ import org.apache.kafka.common.config.ConfigDef.Width; import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.connect.connector.Task; +import org.apache.kafka.connect.source.ConnectorTransactionBoundaries; +import org.apache.kafka.connect.source.ExactlyOnceSupport; import org.apache.kafka.connect.source.SourceConnector; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,6 +65,10 @@ public class MQSourceConnector extends SourceConnector { public static final String CONFIG_DOCUMENTATION_MQ_QUEUE = "The name of the source MQ queue."; public static final String CONFIG_DISPLAY_MQ_QUEUE = "Source queue"; + public static final String CONFIG_NAME_MQ_EXACTLY_ONCE_STATE_QUEUE = "mq.exactly.once.state.queue"; + public static final String CONFIG_DOCUMENTATION_MQ_EXACTLY_ONCE_STATE_QUEUE = "The name of the MQ queue used to store state. Required to run with exactly-once processing."; + public static final String CONFIG_DISPLAY_MQ_EXACTLY_ONCE_STATE_QUEUE = "Exactly-once state queue"; + public static final String CONFIG_NAME_MQ_USER_NAME = "mq.user.name"; public static final String CONFIG_DOCUMENTATION_MQ_USER_NAME = "The user name for authenticating with the queue manager."; public static final String CONFIG_DISPLAY_MQ_USER_NAME = "User name"; @@ -141,7 +147,12 @@ public class MQSourceConnector extends SourceConnector { public static final String CONFIG_DOCUMENTATION_TOPIC = "The name of the target Kafka topic."; public static final String CONFIG_DISPLAY_TOPIC = "Target Kafka topic"; - public static String version = "1.3.5"; + public static final String CONFIG_MAX_POLL_BLOCKED_TIME_MS = "mq.max.poll.blocked.time.ms"; + public static final String CONFIG_DOCUMENTATION_MAX_POLL_BLOCKED_TIME_MS = "How long the SourceTask will wait for a " + + "previous batch of messages to be delivered to Kafka before starting a new poll."; + public static final String CONFIG_DISPLAY_MAX_POLL_BLOCKED_TIME_MS = "Max poll blocked time ms"; + + public static String version = "2.1.0"; private Map configProps; @@ -233,217 +244,238 @@ public ConfigDef config() { CONFIGDEF = new ConfigDef(); CONFIGDEF.define(CONFIG_NAME_MQ_QUEUE_MANAGER, - Type.STRING, - // user must specify the queue manager name - ConfigDef.NO_DEFAULT_VALUE, new ConfigDef.NonEmptyStringWithoutControlChars(), - Importance.HIGH, - CONFIG_DOCUMENTATION_MQ_QUEUE_MANAGER, - CONFIG_GROUP_MQ, 1, Width.MEDIUM, - CONFIG_DISPLAY_MQ_QUEUE_MANAGER); + Type.STRING, + // user must specify the queue manager name + ConfigDef.NO_DEFAULT_VALUE, new ConfigDef.NonEmptyStringWithoutControlChars(), + Importance.HIGH, + CONFIG_DOCUMENTATION_MQ_QUEUE_MANAGER, + CONFIG_GROUP_MQ, 1, Width.MEDIUM, + CONFIG_DISPLAY_MQ_QUEUE_MANAGER); CONFIGDEF.define(CONFIG_NAME_MQ_CONNECTION_MODE, - Type.STRING, - // required value - two valid options - CONFIG_VALUE_MQ_CONNECTION_MODE_CLIENT, - ConfigDef.ValidString.in(CONFIG_VALUE_MQ_CONNECTION_MODE_CLIENT, - CONFIG_VALUE_MQ_CONNECTION_MODE_BINDINGS), - Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_CONNECTION_MODE, - CONFIG_GROUP_MQ, 2, Width.SHORT, - CONFIG_DISPLAY_MQ_CONNECTION_MODE); + Type.STRING, + // required value - two valid options + CONFIG_VALUE_MQ_CONNECTION_MODE_CLIENT, + ConfigDef.ValidString.in(CONFIG_VALUE_MQ_CONNECTION_MODE_CLIENT, + CONFIG_VALUE_MQ_CONNECTION_MODE_BINDINGS), + Importance.MEDIUM, + CONFIG_DOCUMENTATION_MQ_CONNECTION_MODE, + CONFIG_GROUP_MQ, 2, Width.SHORT, + CONFIG_DISPLAY_MQ_CONNECTION_MODE); CONFIGDEF.define(CONFIG_NAME_MQ_CONNECTION_NAME_LIST, - Type.STRING, - // can be null, for example when using bindings mode or a CCDT - null, ANY, - Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_CONNNECTION_NAME_LIST, - CONFIG_GROUP_MQ, 3, Width.LONG, - CONFIG_DISPLAY_MQ_CONNECTION_NAME_LIST); + Type.STRING, + // can be null, for example when using bindings mode or a CCDT + null, ANY, + Importance.MEDIUM, + CONFIG_DOCUMENTATION_MQ_CONNNECTION_NAME_LIST, + CONFIG_GROUP_MQ, 3, Width.LONG, + CONFIG_DISPLAY_MQ_CONNECTION_NAME_LIST); CONFIGDEF.define(CONFIG_NAME_MQ_CHANNEL_NAME, - Type.STRING, - // can be null, for example when using bindings mode - null, ANY, - Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_CHANNEL_NAME, - CONFIG_GROUP_MQ, 4, Width.MEDIUM, - CONFIG_DISPLAY_MQ_CHANNEL_NAME); + Type.STRING, + // can be null, for example when using bindings mode + null, ANY, + Importance.MEDIUM, + CONFIG_DOCUMENTATION_MQ_CHANNEL_NAME, + CONFIG_GROUP_MQ, 4, Width.MEDIUM, + CONFIG_DISPLAY_MQ_CHANNEL_NAME); CONFIGDEF.define(CONFIG_NAME_MQ_CCDT_URL, - Type.STRING, - // can be null, for example when using bindings mode or a conname list - null, new ValidURL(), - Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_CCDT_URL, - CONFIG_GROUP_MQ, 5, Width.MEDIUM, - CONFIG_DISPLAY_MQ_CCDT_URL); + Type.STRING, + // can be null, for example when using bindings mode or a conname list + null, new ValidURL(), + Importance.MEDIUM, + CONFIG_DOCUMENTATION_MQ_CCDT_URL, + CONFIG_GROUP_MQ, 5, Width.MEDIUM, + CONFIG_DISPLAY_MQ_CCDT_URL); CONFIGDEF.define(CONFIG_NAME_MQ_QUEUE, - Type.STRING, - // user must specify the queue name - ConfigDef.NO_DEFAULT_VALUE, new ConfigDef.NonEmptyStringWithoutControlChars(), - Importance.HIGH, - CONFIG_DOCUMENTATION_MQ_QUEUE, - CONFIG_GROUP_MQ, 6, Width.LONG, - CONFIG_DISPLAY_MQ_QUEUE); + Type.STRING, + // user must specify the queue name + ConfigDef.NO_DEFAULT_VALUE, new ConfigDef.NonEmptyStringWithoutControlChars(), + Importance.HIGH, + CONFIG_DOCUMENTATION_MQ_QUEUE, + CONFIG_GROUP_MQ, 6, Width.LONG, + CONFIG_DISPLAY_MQ_QUEUE); CONFIGDEF.define(CONFIG_NAME_MQ_USER_NAME, - Type.STRING, - // can be null, when auth not required - null, ANY, - Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_USER_NAME, - CONFIG_GROUP_MQ, 7, Width.MEDIUM, - CONFIG_DISPLAY_MQ_USER_NAME); + Type.STRING, + // can be null, when auth not required + null, ANY, + Importance.MEDIUM, + CONFIG_DOCUMENTATION_MQ_USER_NAME, + CONFIG_GROUP_MQ, 7, Width.MEDIUM, + CONFIG_DISPLAY_MQ_USER_NAME); CONFIGDEF.define(CONFIG_NAME_MQ_PASSWORD, - Type.PASSWORD, - // can be null, when auth not required - null, ANY, - Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_PASSWORD, - CONFIG_GROUP_MQ, 8, Width.MEDIUM, - CONFIG_DISPLAY_MQ_PASSWORD); + Type.PASSWORD, + // can be null, when auth not required + null, ANY, + Importance.MEDIUM, + CONFIG_DOCUMENTATION_MQ_PASSWORD, + CONFIG_GROUP_MQ, 8, Width.MEDIUM, + CONFIG_DISPLAY_MQ_PASSWORD); CONFIGDEF.define(CONFIG_NAME_MQ_RECORD_BUILDER, - Type.STRING, - // user must specify a record builder class - ConfigDef.NO_DEFAULT_VALUE, new ConfigDef.NonEmptyStringWithoutControlChars(), - Importance.HIGH, - CONFIG_DOCUMENTATION_MQ_RECORD_BUILDER, - CONFIG_GROUP_MQ, 9, Width.LONG, - CONFIG_DISPLAY_MQ_RECORD_BUILDER); + Type.STRING, + // user must specify a record builder class + ConfigDef.NO_DEFAULT_VALUE, new ConfigDef.NonEmptyStringWithoutControlChars(), + Importance.HIGH, + CONFIG_DOCUMENTATION_MQ_RECORD_BUILDER, + CONFIG_GROUP_MQ, 9, Width.LONG, + CONFIG_DISPLAY_MQ_RECORD_BUILDER); CONFIGDEF.define(CONFIG_NAME_MQ_MESSAGE_BODY_JMS, - Type.BOOLEAN, - // must be a non-null boolean - assume false if not provided - Boolean.FALSE, new ConfigDef.NonNullValidator(), - Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_MESSAGE_BODY_JMS, - CONFIG_GROUP_MQ, 10, Width.SHORT, - CONFIG_DISPLAY_MQ_MESSAGE_BODY_JMS); + Type.BOOLEAN, + // must be a non-null boolean - assume false if not provided + Boolean.FALSE, new ConfigDef.NonNullValidator(), + Importance.MEDIUM, + CONFIG_DOCUMENTATION_MQ_MESSAGE_BODY_JMS, + CONFIG_GROUP_MQ, 10, Width.SHORT, + CONFIG_DISPLAY_MQ_MESSAGE_BODY_JMS); CONFIGDEF.define(CONFIG_NAME_MQ_RECORD_BUILDER_KEY_HEADER, - Type.STRING, - // optional value - four valid values - null, ConfigDef.ValidString.in(null, - CONFIG_VALUE_MQ_RECORD_BUILDER_KEY_HEADER_JMSMESSAGEID, - CONFIG_VALUE_MQ_RECORD_BUILDER_KEY_HEADER_JMSCORRELATIONID, - CONFIG_VALUE_MQ_RECORD_BUILDER_KEY_HEADER_JMSCORRELATIONIDASBYTES, - CONFIG_VALUE_MQ_RECORD_BUILDER_KEY_HEADER_JMSDESTINATION), - Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_RECORD_BUILDER_KEY_HEADER, - CONFIG_GROUP_MQ, 11, Width.MEDIUM, - CONFIG_DISPLAY_MQ_RECORD_BUILDER_KEY_HEADER); + Type.STRING, + // optional value - four valid values + null, ConfigDef.ValidString.in(null, + CONFIG_VALUE_MQ_RECORD_BUILDER_KEY_HEADER_JMSMESSAGEID, + CONFIG_VALUE_MQ_RECORD_BUILDER_KEY_HEADER_JMSCORRELATIONID, + CONFIG_VALUE_MQ_RECORD_BUILDER_KEY_HEADER_JMSCORRELATIONIDASBYTES, + CONFIG_VALUE_MQ_RECORD_BUILDER_KEY_HEADER_JMSDESTINATION), + Importance.MEDIUM, + CONFIG_DOCUMENTATION_MQ_RECORD_BUILDER_KEY_HEADER, + CONFIG_GROUP_MQ, 11, Width.MEDIUM, + CONFIG_DISPLAY_MQ_RECORD_BUILDER_KEY_HEADER); CONFIGDEF.define(CONFIG_NAME_MQ_SSL_CIPHER_SUITE, - Type.STRING, - // optional - not needed if not using SSL - SSL cipher suites change - // too frequently so we won't maintain a valid list here - null, ANY, - Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_SSL_CIPHER_SUITE, - CONFIG_GROUP_MQ, 12, Width.MEDIUM, - CONFIG_DISPLAY_MQ_SSL_CIPHER_SUITE); + Type.STRING, + // optional - not needed if not using SSL - SSL cipher suites change + // too frequently so we won't maintain a valid list here + null, ANY, + Importance.MEDIUM, + CONFIG_DOCUMENTATION_MQ_SSL_CIPHER_SUITE, + CONFIG_GROUP_MQ, 12, Width.MEDIUM, + CONFIG_DISPLAY_MQ_SSL_CIPHER_SUITE); CONFIGDEF.define(CONFIG_NAME_MQ_SSL_PEER_NAME, - Type.STRING, - // optional - not needed if not using SSL - null, ANY, - Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_SSL_PEER_NAME, - CONFIG_GROUP_MQ, 13, Width.MEDIUM, - CONFIG_DISPLAY_MQ_SSL_PEER_NAME); + Type.STRING, + // optional - not needed if not using SSL + null, ANY, + Importance.MEDIUM, + CONFIG_DOCUMENTATION_MQ_SSL_PEER_NAME, + CONFIG_GROUP_MQ, 13, Width.MEDIUM, + CONFIG_DISPLAY_MQ_SSL_PEER_NAME); CONFIGDEF.define(CONFIG_NAME_MQ_SSL_KEYSTORE_LOCATION, - Type.STRING, - // optional - if provided should be the location of a readable file - null, new ReadableFile(), - Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_SSL_KEYSTORE_LOCATION, - CONFIG_GROUP_MQ, 14, Width.MEDIUM, - CONFIG_DISPLAY_MQ_SSL_KEYSTORE_LOCATION); + Type.STRING, + // optional - if provided should be the location of a readable file + null, new ReadableFile(), + Importance.MEDIUM, + CONFIG_DOCUMENTATION_MQ_SSL_KEYSTORE_LOCATION, + CONFIG_GROUP_MQ, 14, Width.MEDIUM, + CONFIG_DISPLAY_MQ_SSL_KEYSTORE_LOCATION); CONFIGDEF.define(CONFIG_NAME_MQ_SSL_KEYSTORE_PASSWORD, - Type.PASSWORD, - // optional - not needed if SSL keystore isn't provided - null, ANY, - Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_SSL_KEYSTORE_PASSWORD, - CONFIG_GROUP_MQ, 15, Width.MEDIUM, - CONFIG_DISPLAY_MQ_SSL_KEYSTORE_PASSWORD); + Type.PASSWORD, + // optional - not needed if SSL keystore isn't provided + null, ANY, + Importance.MEDIUM, + CONFIG_DOCUMENTATION_MQ_SSL_KEYSTORE_PASSWORD, + CONFIG_GROUP_MQ, 15, Width.MEDIUM, + CONFIG_DISPLAY_MQ_SSL_KEYSTORE_PASSWORD); CONFIGDEF.define(CONFIG_NAME_MQ_SSL_TRUSTSTORE_LOCATION, - Type.STRING, - // optional - if provided should be the location of a readable file - null, new ReadableFile(), - Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_SSL_TRUSTSTORE_LOCATION, - CONFIG_GROUP_MQ, 16, Width.MEDIUM, - CONFIG_DISPLAY_MQ_SSL_TRUSTSTORE_LOCATION); + Type.STRING, + // optional - if provided should be the location of a readable file + null, new ReadableFile(), + Importance.MEDIUM, + CONFIG_DOCUMENTATION_MQ_SSL_TRUSTSTORE_LOCATION, + CONFIG_GROUP_MQ, 16, Width.MEDIUM, + CONFIG_DISPLAY_MQ_SSL_TRUSTSTORE_LOCATION); CONFIGDEF.define(CONFIG_NAME_MQ_SSL_TRUSTSTORE_PASSWORD, - Type.PASSWORD, - // optional - not needed if SSL truststore isn't provided - null, Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_SSL_TRUSTSTORE_PASSWORD, - CONFIG_GROUP_MQ, 17, Width.MEDIUM, - CONFIG_DISPLAY_MQ_SSL_TRUSTSTORE_PASSWORD); + Type.PASSWORD, + // optional - not needed if SSL truststore isn't provided + null, Importance.MEDIUM, + CONFIG_DOCUMENTATION_MQ_SSL_TRUSTSTORE_PASSWORD, + CONFIG_GROUP_MQ, 17, Width.MEDIUM, + CONFIG_DISPLAY_MQ_SSL_TRUSTSTORE_PASSWORD); CONFIGDEF.define(CONFIG_NAME_MQ_BATCH_SIZE, - Type.INT, - // must be an int greater than min - CONFIG_VALUE_MQ_BATCH_SIZE_DEFAULT, ConfigDef.Range.atLeast(CONFIG_VALUE_MQ_BATCH_SIZE_MINIMUM), - Importance.LOW, - CONFIG_DOCUMENTATION_MQ_BATCH_SIZE, - CONFIG_GROUP_MQ, 18, Width.MEDIUM, - CONFIG_DISPLAY_MQ_BATCH_SIZE); + Type.INT, + // must be an int greater than min + CONFIG_VALUE_MQ_BATCH_SIZE_DEFAULT, ConfigDef.Range.atLeast(CONFIG_VALUE_MQ_BATCH_SIZE_MINIMUM), + Importance.LOW, + CONFIG_DOCUMENTATION_MQ_BATCH_SIZE, + CONFIG_GROUP_MQ, 18, Width.MEDIUM, + CONFIG_DISPLAY_MQ_BATCH_SIZE); CONFIGDEF.define(CONFIG_NAME_MQ_MESSAGE_MQMD_READ, - Type.BOOLEAN, - // must be a non-null boolean - assume false if not provided - Boolean.FALSE, new ConfigDef.NonNullValidator(), - Importance.LOW, - CONFIG_DOCUMENTATION_MQ_MESSAGE_MQMD_READ, - CONFIG_GROUP_MQ, 19, Width.SHORT, - CONFIG_DISPLAY_MQ_MESSAGE_MQMD_READ); + Type.BOOLEAN, + // must be a non-null boolean - assume false if not provided + Boolean.FALSE, new ConfigDef.NonNullValidator(), + Importance.LOW, + CONFIG_DOCUMENTATION_MQ_MESSAGE_MQMD_READ, + CONFIG_GROUP_MQ, 19, Width.SHORT, + CONFIG_DISPLAY_MQ_MESSAGE_MQMD_READ); CONFIGDEF.define(CONFIG_NAME_MQ_USER_AUTHENTICATION_MQCSP, - Type.BOOLEAN, - // must be a non-null boolean - assume true if not provided - Boolean.TRUE, new ConfigDef.NonNullValidator(), - Importance.LOW, - CONFIG_DOCUMENTATION_MQ_USER_AUTHENTICATION_MQCSP, - CONFIG_GROUP_MQ, 20, Width.SHORT, - CONFIG_DISPLAY_MQ_USER_AUTHENTICATION_MQCSP); + Type.BOOLEAN, + // must be a non-null boolean - assume true if not provided + Boolean.TRUE, new ConfigDef.NonNullValidator(), + Importance.LOW, + CONFIG_DOCUMENTATION_MQ_USER_AUTHENTICATION_MQCSP, + CONFIG_GROUP_MQ, 20, Width.SHORT, + CONFIG_DISPLAY_MQ_USER_AUTHENTICATION_MQCSP); CONFIGDEF.define(CONFIG_NAME_MQ_JMS_PROPERTY_COPY_TO_KAFKA_HEADER, - Type.BOOLEAN, - // must be a non-null boolean - assume false if not provided - Boolean.FALSE, new ConfigDef.NonNullValidator(), - Importance.LOW, - CONFIG_DOCUMENTATION_MQ_JMS_PROPERTY_COPY_TO_KAFKA_HEADER, - CONFIG_GROUP_MQ, 21, Width.MEDIUM, - CONFIG_DISPLAY_MQ_JMS_PROPERTY_COPY_TO_KAFKA_HEADER); + Type.BOOLEAN, + // must be a non-null boolean - assume false if not provided + Boolean.FALSE, new ConfigDef.NonNullValidator(), + Importance.LOW, + CONFIG_DOCUMENTATION_MQ_JMS_PROPERTY_COPY_TO_KAFKA_HEADER, + CONFIG_GROUP_MQ, 21, Width.MEDIUM, + CONFIG_DISPLAY_MQ_JMS_PROPERTY_COPY_TO_KAFKA_HEADER); CONFIGDEF.define(CONFIG_NAME_MQ_SSL_USE_IBM_CIPHER_MAPPINGS, - Type.BOOLEAN, - // must be a non-null boolean - assume true if not provided - Boolean.TRUE, new ConfigDef.NonNullValidator(), - Importance.LOW, - CONFIG_DOCUMENTATION_MQ_SSL_USE_IBM_CIPHER_MAPPINGS, - CONFIG_GROUP_MQ, 22, Width.SHORT, - CONFIG_DISPLAY_MQ_SSL_USE_IBM_CIPHER_MAPPINGS); + Type.BOOLEAN, + // must be a non-null boolean - assume true if not provided + Boolean.TRUE, new ConfigDef.NonNullValidator(), + Importance.LOW, + CONFIG_DOCUMENTATION_MQ_SSL_USE_IBM_CIPHER_MAPPINGS, + CONFIG_GROUP_MQ, 22, Width.SHORT, + CONFIG_DISPLAY_MQ_SSL_USE_IBM_CIPHER_MAPPINGS); + + CONFIGDEF.define(CONFIG_NAME_MQ_EXACTLY_ONCE_STATE_QUEUE, + Type.STRING, + null, ANY, + Importance.LOW, + CONFIG_DOCUMENTATION_MQ_EXACTLY_ONCE_STATE_QUEUE, + CONFIG_GROUP_MQ, 23, Width.LONG, + CONFIG_DISPLAY_MQ_EXACTLY_ONCE_STATE_QUEUE); + + // How long the SourceTask will wait for a previous batch of messages to + // be delivered to Kafka before starting a new poll. + // It is important that this is less than the time defined for + // task.shutdown.graceful.timeout.ms as that is how long Connect will + // wait for the task to perform lifecycle operations. + CONFIGDEF.define(CONFIG_MAX_POLL_BLOCKED_TIME_MS, + Type.INT, + 2000, ConfigDef.Range.atLeast(0), + Importance.MEDIUM, + CONFIG_DOCUMENTATION_MAX_POLL_BLOCKED_TIME_MS, + null, 24, Width.MEDIUM, + CONFIG_DISPLAY_MAX_POLL_BLOCKED_TIME_MS); CONFIGDEF.define(CONFIG_NAME_TOPIC, - Type.STRING, - // user must specify the topic name - ConfigDef.NO_DEFAULT_VALUE, new ConfigDef.NonEmptyStringWithoutControlChars(), - Importance.HIGH, - CONFIG_DOCUMENTATION_TOPIC, - null, 0, Width.MEDIUM, - CONFIG_DISPLAY_TOPIC); + Type.STRING, + // user must specify the topic name + ConfigDef.NO_DEFAULT_VALUE, new ConfigDef.NonEmptyStringWithoutControlChars(), + Importance.HIGH, + CONFIG_DOCUMENTATION_TOPIC, + null, 0, Width.MEDIUM, + CONFIG_DISPLAY_TOPIC); } @@ -456,14 +488,15 @@ public void ensureValid(final String name, final Object value) { return; } + final File file; try { - final File file = new File((String) value); - if (!file.isFile() || !file.canRead()) { - throw new ConfigException(name, value, "Value must be the location of a readable file"); - } + file = new File((String) value); } catch (final Exception exc) { throw new ConfigException(name, value, "Value must be a valid file location"); } + if (!file.isFile() || !file.canRead()) { + throw new ConfigException(name, value, "Value must be the location of a readable file"); + } } } @@ -483,4 +516,49 @@ public void ensureValid(final String name, final Object value) { } } } + + /** + * Signals that this connector is not capable of defining other transaction boundaries. + * A new transaction will be started and committed for every batch of records returned by {@link MQSourceTask#poll()}. + * + * @param connectorConfig the configuration that will be used for the connector + * @return {@link ConnectorTransactionBoundaries#UNSUPPORTED} + */ + @Override + public ConnectorTransactionBoundaries canDefineTransactionBoundaries(final Map connectorConfig) { + // The connector only supports Kafka transaction boundaries on the poll() method + return ConnectorTransactionBoundaries.UNSUPPORTED; + } + + /** + * Signals whether this connector supports exactly-once semantics with the supplied configuration. + * + * @param connectorConfig the configuration that will be used for the connector. + * 'mq.exactly.once.state.queue' must be supplied in the configuration to enable exactly-once semantics. + * + * @return {@link ExactlyOnceSupport#SUPPORTED} if the configuration supports exactly-once semantics, + * {@link ExactlyOnceSupport#UNSUPPORTED} otherwise. + */ + @Override + public ExactlyOnceSupport exactlyOnceSupport(final Map connectorConfig) { + if (configSupportsExactlyOnce(connectorConfig)) { + return ExactlyOnceSupport.SUPPORTED; + } + return ExactlyOnceSupport.UNSUPPORTED; + } + + /** + * Returns true if the supplied connector configuration supports exactly-once semantics. + * Checks that 'mq.exactly.once.state.queue' property is supplied and is not empty and + * that 'tasks.max' is 1. + * + * @param connectorConfig the connector config + * @return true if 'mq.exactly.once.state.queue' property is supplied and is not empty and 'tasks.max' is 1. + */ + public static final boolean configSupportsExactlyOnce(final Map connectorConfig) { + // If there is a state queue configured and tasks.max is 1 we can do exactly-once semantics + final String exactlyOnceStateQueue = connectorConfig.get(CONFIG_NAME_MQ_EXACTLY_ONCE_STATE_QUEUE); + final String tasksMax = connectorConfig.get("tasks.max"); + return exactlyOnceStateQueue != null && !exactlyOnceStateQueue.isEmpty() && (tasksMax == null || "1".equals(tasksMax)); + } } \ No newline at end of file diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/MQSourceTask.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/MQSourceTask.java index f635e2d..8126e48 100755 --- a/src/main/java/com/ibm/eventstreams/connect/mqsource/MQSourceTask.java +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/MQSourceTask.java @@ -1,194 +1,479 @@ /** - * Copyright 2017, 2018, 2019 IBM Corporation + * Copyright 2017, 2018, 2019, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package com.ibm.eventstreams.connect.mqsource; +import com.ibm.eventstreams.connect.mqsource.builders.RecordBuilderException; +import com.ibm.eventstreams.connect.mqsource.sequencestate.SequenceState; +import com.ibm.eventstreams.connect.mqsource.sequencestate.SequenceStateClient; +import com.ibm.eventstreams.connect.mqsource.sequencestate.SequenceStateException; +import com.ibm.eventstreams.connect.mqsource.util.LogMessages; +import com.ibm.eventstreams.connect.mqsource.util.ExceptionProcessor; +import com.ibm.eventstreams.connect.mqsource.util.QueueConfig; +import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.source.SourceRecord; +import org.apache.kafka.connect.source.SourceTask; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.jms.JMSException; +import javax.jms.JMSRuntimeException; +import javax.jms.Message; + import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.List; -import java.util.Locale; import java.util.Map; -import java.util.Map.Entry; +import java.util.Optional; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.kafka.common.config.AbstractConfig; -import org.apache.kafka.connect.source.SourceRecord; -import org.apache.kafka.connect.source.SourceTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Consumer; +import java.util.stream.Collectors; + +import static com.ibm.eventstreams.connect.mqsource.MQSourceConnector.CONFIG_MAX_POLL_BLOCKED_TIME_MS; +import static com.ibm.eventstreams.connect.mqsource.MQSourceConnector.CONFIG_NAME_MQ_BATCH_SIZE; +import static com.ibm.eventstreams.connect.mqsource.MQSourceConnector.CONFIG_NAME_MQ_EXACTLY_ONCE_STATE_QUEUE; +import static com.ibm.eventstreams.connect.mqsource.MQSourceConnector.CONFIG_NAME_MQ_QUEUE; +import static com.ibm.eventstreams.connect.mqsource.MQSourceConnector.CONFIG_NAME_MQ_QUEUE_MANAGER; +import static com.ibm.eventstreams.connect.mqsource.MQSourceConnector.CONFIG_VALUE_MQ_BATCH_SIZE_DEFAULT; + +import static com.ibm.eventstreams.connect.mqsource.MQSourceTaskStartUpAction.REMOVE_DELIVERED_MESSAGES_FROM_SOURCE_QUEUE; +import static com.ibm.eventstreams.connect.mqsource.MQSourceTaskStartUpAction.NORMAL_OPERATION; +import static com.ibm.eventstreams.connect.mqsource.MQSourceTaskStartUpAction.REDELIVER_UNSENT_BATCH; +import static com.ibm.eventstreams.connect.mqsource.sequencestate.SequenceState.LastKnownState.DELIVERED; +import static com.ibm.eventstreams.connect.mqsource.sequencestate.SequenceState.LastKnownState.IN_FLIGHT; public class MQSourceTask extends SourceTask { private static final Logger log = LoggerFactory.getLogger(MQSourceTask.class); // The maximum number of records returned per call to poll() - private int batchSize = MQSourceConnector.CONFIG_VALUE_MQ_BATCH_SIZE_DEFAULT; - private CountDownLatch batchCompleteSignal = null; // Used to signal completion of a batch - private AtomicInteger pollCycle = new AtomicInteger(1); // Incremented each time poll() is called - private int lastCommitPollCycle = 0; // The value of pollCycle the last time commit() was called - private AtomicBoolean stopNow = new AtomicBoolean(); // Whether stop has been requested - - private JMSReader reader; - - public MQSourceTask() { + private int batchSize = CONFIG_VALUE_MQ_BATCH_SIZE_DEFAULT; + + // Used to signal completion of a batch + // After returning a batch of messages to Connect, the SourceTask waits + // for an acknowledgement that each message has been successfully + // delivered to Kafka. + // + // The count maintained by the latch is a count of how many MQ messages + // the task is still waiting for this confirmation for. + // + // There is only one active batch at a time - a new batch cannot be + // started until this countdown has reached zero. + private CountDownLatch batchCompleteSignal = null; + + // The number of times a new poll was blocked because the current batch + // is not yet complete. (A batch is complete once all messages have + // been delivered to Kafka, as confirmed by callbacks to #commitRecord + // and #commit). + private int blockedPollsCount = 0; + + // The maximum number of times the SourceTask will tolerate new polls + // being blocked before reporting an error to the Connect framework. + private final static int MAX_BLOCKED_POLLS = 50; + + // Incremented each time poll() is called successfully + private AtomicLong pollCycle = new AtomicLong(1); + + // The value of pollCycle the last time commit() was called + private long lastCommitPollCycle = 0; + + private AtomicLong sequenceStateId = new AtomicLong(0); + private List msgIds = new ArrayList(); + private AtomicBoolean stopNow = new AtomicBoolean(); // Whether stop has been requested + private boolean isExactlyOnceMode; + private String sourceQueue; + private QueueConfig sourceQueueConfig; + private JMSWorker reader; + private JMSWorker dedicated; + private SequenceStateClient sequenceStateClient; + private Map sourceQueuePartition; + private int getMaxPollBlockedTimeMs; + + private int startActionPollLimit = 300; // This is a 5 minute time out on the initial start procedure + private AtomicInteger startActionPollCount = new AtomicInteger(0); + + private final static String OFFSET_IDENTIFIER = "sequence-id"; + private final static String SOURCE_PARTITION_IDENTIFIER = "source"; + + public MQSourceTaskStartUpAction startUpAction; + + protected CountDownLatch getBatchCompleteSignal() { + return batchCompleteSignal; } + private void resetBatchCompleteSignal() { + batchCompleteSignal = null; + blockedPollsCount = 0; + } + /** - * Get the version of this task. Usually this should be the same as the corresponding {@link Connector} class's version. + * Get the version of this task. This should be the same as the + * {@link MQSourceConnector} class's version. * * @return the version, formatted as a String */ - @Override public String version() { + @Override + public String version() { return MQSourceConnector.version; } /** - * Start the Task. This should handle any configuration parsing and one-time setup of the task. + * Start the Task. This handles configuration parsing and preparing + * the JMS clients that will be used by the task. + * * @param props initial configuration */ - @Override public void start(final Map props) { + @Override + public void start(final Map props) { log.trace("[{}] Entry {}.start, props={}", Thread.currentThread().getId(), this.getClass().getName(), props); - - for (final Entry entry : props.entrySet()) { - final String value; - if (entry.getKey().toLowerCase(Locale.ENGLISH).contains("password")) { - value = "[hidden]"; - } else { - value = entry.getValue(); - } - log.debug("Task props entry {} : {}", entry.getKey(), value); + final JMSWorker reader = new JMSWorker(); + JMSWorker dedicated = null; + SequenceStateClient client = null; + if (MQSourceConnector.configSupportsExactlyOnce(props)) { + dedicated = new JMSWorker(); + client = new SequenceStateClient(props.get(CONFIG_NAME_MQ_EXACTLY_ONCE_STATE_QUEUE), reader, dedicated); } + start(props, reader, dedicated, client); + log.trace("[{}] Exit {}.start, props={}", Thread.currentThread().getId(), this.getClass().getName(), props); + } + + protected void start(final Map props, final JMSWorker reader, final JMSWorker dedicated, final SequenceStateClient sequenceStateClient) { + log.trace("[{}] Entry {}.start, props={}", Thread.currentThread().getId(), this.getClass().getName(), props); + final AbstractConfig config = new AbstractConfig(MQSourceConnector.CONFIGDEF, props, true); - final AbstractConfig config = new AbstractConfig(MQSourceConnector.CONFIGDEF, props); + this.reader = reader; + this.dedicated = dedicated; + this.sequenceStateClient = sequenceStateClient; + this.isExactlyOnceMode = MQSourceConnector.configSupportsExactlyOnce(props); + this.sourceQueueConfig = new QueueConfig(props); + this.getMaxPollBlockedTimeMs = config.getInt(CONFIG_MAX_POLL_BLOCKED_TIME_MS); - batchSize = config.getInt(MQSourceConnector.CONFIG_NAME_MQ_BATCH_SIZE); + this.sourceQueuePartition = Collections.singletonMap( + SOURCE_PARTITION_IDENTIFIER, + props.get(CONFIG_NAME_MQ_QUEUE_MANAGER) + "/" + props.get(CONFIG_NAME_MQ_QUEUE) + ); - // Construct a reader to interface with MQ - reader = new JMSReader(); - reader.configure(config); + startUpAction = NORMAL_OPERATION; - // Make a connection as an initial test of the configuration - reader.connect(); + batchSize = config.getInt(CONFIG_NAME_MQ_BATCH_SIZE); + try { + reader.configure(config); + reader.connect(); + + if (isExactlyOnceMode) { + log.debug(" Deciding startup behaviour from state provided in the state queue and Kafka offsets for exactly once processing."); + dedicated.configure(config); + dedicated.connect(); + + sequenceStateClient.validateStateQueue(); + final Optional mqSequenceState = sequenceStateClient.browse(); + final Optional kafkaSequenceState = sequenceStateClient.getSequenceFromKafkaOffset(context, OFFSET_IDENTIFIER, sourceQueuePartition); + + startUpAction = determineStartupAction(mqSequenceState, kafkaSequenceState); + sequenceStateId.set(mqSequenceState.map(SequenceState::getSequenceId).orElseGet(() -> kafkaSequenceState.orElse(SequenceState.DEFAULT_SEQUENCE_ID))); // get sequenceId from MQ state or Kafka or Default + mqSequenceState.ifPresent(sequenceState -> msgIds.addAll(sequenceState.getMessageIds())); // if there is an MQ state take the msgIds from there; + + if (startUpAction == REMOVE_DELIVERED_MESSAGES_FROM_SOURCE_QUEUE && mqSequenceState.get().isInFlight()) { + sequenceStateClient.replaceState(new SequenceState(sequenceStateId.get(), msgIds, DELIVERED)); // change deliveryState from InFlight to Delivered + } + } + + } catch (JMSRuntimeException | JMSException | JMSWorkerConnectionException e) { + log.error("MQ Connection Exception: ", e); + closeAllWorkers(); + throw new ConnectException(e); + } catch (final SequenceStateException e) { + log.error(LogMessages.UNEXPECTED_MESSAGE_ON_STATE_QUEUE, e); + closeAllWorkers(); + throw new ConnectException(e); + } catch (final ConnectException e) { + log.error("Unexpected connect exception: ", e); + closeAllWorkers(); + throw e; + } catch (final RuntimeException e) { + log.error(LogMessages.UNEXPECTED_EXCEPTION, e); + closeAllWorkers(); + throw e; + } + + sourceQueue = props.get(CONFIG_NAME_MQ_QUEUE); log.trace("[{}] Exit {}.start", Thread.currentThread().getId(), this.getClass().getName()); } + private MQSourceTaskStartUpAction determineStartupAction(final Optional mqSequenceState, final Optional kafkaSequenceState) { + + if (mqSequenceState.isPresent()) { + if (mqSequenceState.get().isDelivered()) { + log.debug(" There are messages on MQ that have been delivered to the topic already. Removing delivered messages from the source queue."); + return REMOVE_DELIVERED_MESSAGES_FROM_SOURCE_QUEUE; + } else if (mqSequenceState.get().isInFlight() && mqSequenceMatchesKafkaSequence(mqSequenceState.get(), kafkaSequenceState)) { + log.debug(" There are messages on MQ that have been delivered to the topic already. Removing delivered messages from the source queue."); + return REMOVE_DELIVERED_MESSAGES_FROM_SOURCE_QUEUE; + } else if (mqSequenceState.get().isInFlight() && !kafkaSequenceState.isPresent() || + mqSequenceState.get().isInFlight() && !mqSequenceMatchesKafkaSequence(mqSequenceState.get(), kafkaSequenceState)) { + log.debug(" There are messages on MQ that need to be redelivered to Kafka as the previous attempt failed."); + return REDELIVER_UNSENT_BATCH; + } + } + log.debug(" The state queue is empty. Proceeding to normal operation."); + return NORMAL_OPERATION; + } + + private static boolean mqSequenceMatchesKafkaSequence(final SequenceState mqSequenceState, final Optional kafkaSequenceState) { + return mqSequenceState.getSequenceId() == kafkaSequenceState.orElse(-1L); + } + /** - * Poll this SourceTask for new records. This method should block if no data is - * currently - * available. + * Poll this SourceTask for new records. This method briefly blocks + * if no data is currently available to wait for new messages, however + * needs to promptly return control to Connect to allow the thread to + * be used for task lifecycle management. * * @return a list of source records */ @Override public List poll() throws InterruptedException { log.trace("[{}] Entry {}.poll", Thread.currentThread().getId(), this.getClass().getName()); + try { + return internalPoll(); + } catch (JMSRuntimeException | JMSException e) { + log.error("JMS Exception: ", e); + maybeCloseAllWorkers(e); + throw ExceptionProcessor.handleException(e); + } catch (final RecordBuilderException e) { + resetBatchCompleteSignal(); + maybeCloseAllWorkers(e); + throw new ConnectException(e); + } catch (final ConnectException e) { + log.error("Unexpected connect exception: ", e); + maybeCloseAllWorkers(e); + throw e; + } catch (final RuntimeException e) { + log.error(LogMessages.UNEXPECTED_EXCEPTION, e); + maybeCloseAllWorkers(e); + throw e; + } + } - final List msgs = new ArrayList<>(); - int messageCount = 0; + private List internalPoll() throws InterruptedException, JMSRuntimeException, JMSException { + final List messageList; // Resolve any in-flight transaction, committing unless there has been an error - // between - // receiving the message from MQ and converting it + // between receiving the message from MQ and converting it if (batchCompleteSignal != null) { - log.debug("Awaiting batch completion signal"); - batchCompleteSignal.await(); + final boolean batchIsComplete = waitForKafkaThenCommitMQ(); + if (batchIsComplete) { + blockedPollsCount = 0; + } else { + // we cannot proceed with this poll because the previous + // batch has not yet been delivered to Kafka - log.debug("Committing records"); - reader.commit(); + blockedPollsCount += 1; + + if (blockedPollsCount > MAX_BLOCKED_POLLS) { + // we have been blocked for too long and need to + // report that the task cannot proceed + throw new ConnectException("Missing commits for message batch"); + } else { + log.debug("skipping poll cycle until previous batch completes"); + return null; + } + } } // Increment the counter for the number of times poll is called so we can ensure // we don't get stuck waiting for // commitRecord callbacks to trigger the batch complete signal - final int currentPollCycle = pollCycle.incrementAndGet(); - log.debug("Starting poll cycle {}", currentPollCycle); + log.debug("Starting poll cycle {}", pollCycle.incrementAndGet()); + + log.debug(" {}.internalPoll: acting on startup action {}", this.getClass().getName(), startUpAction); + switch (startUpAction) { + case REMOVE_DELIVERED_MESSAGES_FROM_SOURCE_QUEUE: + if (isFirstMsgOnSourceQueueARequiredMsg(msgIds)) { + removeDeliveredMessagesFromSourceQueue(msgIds); + startUpAction = NORMAL_OPERATION; + log.debug(" Delivered message have been removed from the source queue and will not be forwarded to Kafka."); + } else { // The messages could still be locked in a tx on mq. + log.debug(" Delivered message have not been rolled back to the source queue."); + maybeFailWithTimeoutIfNotWaitAndIncrement(msgIds); + } + return Collections.emptyList(); + + case REDELIVER_UNSENT_BATCH: + if (isFirstMsgOnSourceQueueARequiredMsg(msgIds)) { + messageList = new ArrayList<>(pollSourceQueue(msgIds.size())); + log.debug(" The task has retrieved undelivered messages from the source queue."); + } else { // The messages could still be locked in a tx on mq. + log.debug(" Delivered message have not been rolled back to the source queue."); + maybeFailWithTimeoutIfNotWaitAndIncrement(msgIds); + return Collections.emptyList(); + } + break; + + case NORMAL_OPERATION: + messageList = new ArrayList<>(pollSourceQueue(batchSize)); + startActionPollCount.set(0); + if (messageList.size() == 0) { + // There were no messages + log.debug(" There were no messages."); + initOrResetBatchCompleteSignal(false, messageList); + return Collections.emptyList(); + } + if (isExactlyOnceMode) { + sequenceStateId.incrementAndGet(); + } + break; - try { - if (!stopNow.get()) { - log.debug("Polling for records"); - SourceRecord src; - do { - // For the first message in the batch, wait a while if no message - src = reader.receive(messageCount == 0); - if (src != null) { - msgs.add(src); - messageCount++; - } - } while (src != null && messageCount < batchSize && !stopNow.get()); - } else { - log.info("Stopping polling for records"); - } - } finally { + default: + log.warn(" {}.internalPoll: Entered default case. Start has failed to set the startup action for the connector.", this.getClass().getName()); + return Collections.emptyList(); } + // if we're here then there were messages on the queue + initOrResetBatchCompleteSignal(messageList.size() > 0, messageList); + + final HashMap sourceOffset; + if (isExactlyOnceMode) { + log.debug(" Adding the sequence id as the offset within the source records."); + sourceOffset = new HashMap<>(); + sourceOffset.put(OFFSET_IDENTIFIER, sequenceStateId.get()); + } else { + sourceOffset = null; + } + + final ArrayList msgIds = new ArrayList<>(); + final List sourceRecordList = messageList.stream() + .peek(saveMessageID(msgIds)) + .map(message -> reader.toSourceRecord(message, sourceQueueConfig.isMqMessageBodyJms(), sourceOffset, sourceQueuePartition)) + .collect(Collectors.toList()); + + // In RE-DELIVER we already have a state on the queue + if (isExactlyOnceMode && startUpAction == NORMAL_OPERATION) { + sequenceStateClient.write( + new SequenceState( + sequenceStateId.get(), + msgIds, + IN_FLIGHT) + ); + } + + log.debug("Poll returning {} records", messageList.size()); + log.trace("[{}] Exit {}.poll, retval={}", Thread.currentThread().getId(), this.getClass().getName(), messageList.size()); + + return sourceRecordList; + } + + private static Consumer saveMessageID(final ArrayList msgIds) { + return message -> { + try { + msgIds.add(message.getJMSMessageID()); + } catch (final JMSException e) { + throw new RecordBuilderException(e); + } + }; + } + + private void initOrResetBatchCompleteSignal(final boolean predicate, final List messageList) { synchronized (this) { - if (messageCount > 0) { + if (predicate) { if (!stopNow.get()) { - batchCompleteSignal = new CountDownLatch(messageCount); + // start waiting for confirmations for every + // message in the list + batchCompleteSignal = new CountDownLatch(messageList.size()); + blockedPollsCount = 0; } else { - // Discard this batch - we've rolled back when the connection to MQ was closed - // in stop() - log.debug("Discarding a batch of {} records as task is stopping", messageCount); - msgs.clear(); - batchCompleteSignal = null; + // Discard this batch - we've rolled back when + // the connection to MQ was closed in stop() + log.debug("Discarding a batch of {} records as task is stopping", messageList.size()); + messageList.clear(); + resetBatchCompleteSignal(); } } else { - batchCompleteSignal = null; + resetBatchCompleteSignal(); } } + } + + private List pollSourceQueue(final int numberOfMessagesToBePolled) throws JMSException { + final List localList = new ArrayList<>(); + + if (!stopNow.get()) { + log.debug("Polling for records"); + Message message; + do { + message = reader.receive(sourceQueue, sourceQueueConfig, localList.size() == 0); + if (message != null) { + localList.add(message); + } + } while (message != null && localList.size() < numberOfMessagesToBePolled && !stopNow.get()); + } else { + log.info("Stopping polling for records"); + } + return localList; + } + + private boolean isFirstMsgOnSourceQueueARequiredMsg(final List msgIds) throws JMSException { + final Message message = reader.browse(sourceQueue).get(); + return msgIds.contains(message.getJMSMessageID()); + } + + private boolean waitForKafkaThenCommitMQ() throws InterruptedException, JMSRuntimeException, JMSException { + log.debug("Awaiting batch completion signal"); + final boolean batchIsComplete = batchCompleteSignal.await(getMaxPollBlockedTimeMs, TimeUnit.MILLISECONDS); + + if (batchIsComplete) { + if (isExactlyOnceMode) { + sequenceStateClient.retrieveStateInSharedTx(); + } - log.debug("Poll returning {} records", messageCount); + log.debug("Committing records"); + reader.commit(); + startUpAction = NORMAL_OPERATION; + } else { + log.debug("{} messages from previous batch still not committed", batchCompleteSignal.getCount()); + } - log.trace("[{}] Exit {}.poll, retval={}", Thread.currentThread().getId(), this.getClass().getName(), - messageCount); - return msgs; + return batchIsComplete; } /** - *

- * Commit the offsets, up to the offsets that have been returned by - * {@link #poll()}. This - * method should block until the commit is complete. - *

- *

- * SourceTasks are not required to implement this functionality; Kafka Connect - * will record offsets - * automatically. This hook is provided for systems that also need to store - * offsets internally - * in their own system. - *

+ * Indicates that Connect believes all records in the previous batch + * have been committed. */ public void commit() throws InterruptedException { log.trace("[{}] Entry {}.commit", Thread.currentThread().getId(), this.getClass().getName()); - // This callback is simply used to ensure that the mechanism to use commitRecord - // callbacks - // to check that all messages in a batch are complete is not getting stuck. If - // this callback - // is being called, it means that Kafka Connect believes that all outstanding - // messages have - // been completed. That should mean that commitRecord has been called for all of - // them too. - // However, if too few calls to commitRecord are received, the connector could - // wait indefinitely. - // If this commit callback is called twice without the poll cycle increasing, - // trigger the - // batch complete signal directly. - final int currentPollCycle = pollCycle.get(); + // This callback is simply used to ensure that the mechanism to use + // commitRecord callbacks to check that all messages in a batch are + // complete is not getting stuck. If this callback is being called, + // it means that Kafka Connect believes that all outstanding messages + // have been completed. That should mean that commitRecord has been + // called for all of them too. + // + // However, if too few calls to commitRecord are received, the + // connector could wait indefinitely. + // + // If this commit callback is called twice without the poll cycle + // increasing, trigger the batch complete signal directly. + final long currentPollCycle = pollCycle.get(); log.debug("Commit starting in poll cycle {}", currentPollCycle); if (lastCommitPollCycle == currentPollCycle) { @@ -211,20 +496,10 @@ public void commit() throws InterruptedException { } /** - * Signal this SourceTask to stop. In SourceTasks, this method only needs to - * signal to the task that it should stop - * trying to poll for new data and interrupt any outstanding poll() requests. It - * is not required that the task has - * fully stopped. Note that this method necessarily may be invoked from a - * different thread than {@link #poll()} and - * {@link #commit()}. - * - * For example, if a task uses a {@link java.nio.channels.Selector} to receive - * data over the network, this method - * could set a flag that will force {@link #poll()} to exit immediately and - * invoke - * {@link java.nio.channels.Selector#wakeup() wakeup()} to interrupt any ongoing - * requests. + * Signal this SourceTask to stop. In SourceTasks, this method only needs + * to signal to the task that it should stop trying to poll for new data. + * Note that this method is invoked from the same thread as {@link #poll()} + * however a different thread than {@link #commit()}. */ @Override public void stop() { @@ -233,9 +508,12 @@ public void stop() { stopNow.set(true); synchronized (this) { - // Close the connection to MQ to clean up + // Close the connections to MQ to clean up if (reader != null) { - reader.close(); + reader.stop(); + } + if (dedicated != null) { + dedicated.stop(); } } @@ -243,25 +521,20 @@ public void stop() { } /** - *

* Commit an individual {@link SourceRecord} when the callback from the producer * client is received, or if a record is filtered by a transformation. - *

- *

- * SourceTasks are not required to implement this functionality; Kafka Connect - * will record offsets - * automatically. This hook is provided for systems that also need to store - * offsets internally - * in their own system. - *

* - * @param record {@link SourceRecord} that was successfully sent via the - * producer. + * This is used to know when all messages in an MQ batch have been successfully + * delivered to Kafka. The SourceTask will not proceed to get new messages from + * MQ until this has completed. + * + * @param record {@link SourceRecord} that was successfully sent to Kafka. * @throws InterruptedException */ @Override public void commitRecord(final SourceRecord record) throws InterruptedException { - log.trace("[{}] Entry {}.commitRecord, record={}", Thread.currentThread().getId(), this.getClass().getName(), record); + log.trace("[{}] Entry {}.commitRecord, record={}", Thread.currentThread().getId(), this.getClass().getName(), + record); synchronized (this) { batchCompleteSignal.countDown(); @@ -269,4 +542,47 @@ public void commitRecord(final SourceRecord record) throws InterruptedException log.trace("[{}] Exit {}.commitRecord", Thread.currentThread().getId(), this.getClass().getName()); } + + protected void removeDeliveredMessagesFromSourceQueue(final List msgIds) throws JMSException { + log.debug("Polling for records"); + Message message; + for (final String string : msgIds) { + message = reader.receive(sourceQueue, sourceQueueConfig, false); + final String msgId = message.getJMSMessageID(); + if (!msgIds.contains(msgId)) throw new SequenceStateException("Sequence state is in an unexpected state. Please ask an MQ admin to review"); + } + sequenceStateClient.retrieveStateInSharedTx(); + reader.commit(); + } + + protected AtomicLong getSequenceId() { + return this.sequenceStateId; + } + + protected List getMsgIds() { + return this.msgIds; + } + + private void maybeFailWithTimeoutIfNotWaitAndIncrement(final List msgIds) throws InterruptedException { + if (startActionPollCount.get() >= startActionPollLimit) { + throw new ConnectException(LogMessages.rollbackTimeout(msgIds)); // ?? sequence state exception + } + Thread.sleep(1000); + startActionPollCount.incrementAndGet(); + } + + private void maybeCloseAllWorkers(final Throwable exc) { + log.debug(" Checking to see if the failed connection should be closed."); + if (ExceptionProcessor.isClosable(exc)) { + closeAllWorkers(); + } + } + + private void closeAllWorkers() { + log.debug(" Closing connection to MQ."); + reader.close(); + if (isExactlyOnceMode) { + dedicated.close(); + } + } } \ No newline at end of file diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskStartUpAction.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskStartUpAction.java new file mode 100644 index 0000000..34bc91d --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskStartUpAction.java @@ -0,0 +1,22 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource; + +public enum MQSourceTaskStartUpAction { + REDELIVER_UNSENT_BATCH, + REMOVE_DELIVERED_MESSAGES_FROM_SOURCE_QUEUE, + NORMAL_OPERATION +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/SSLContextBuilder.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/SSLContextBuilder.java new file mode 100644 index 0000000..89f5c08 --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/SSLContextBuilder.java @@ -0,0 +1,85 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource; + +import org.apache.kafka.connect.errors.ConnectException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.kafka.common.config.types.Password; + +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.security.SecureRandom; + +public class SSLContextBuilder { + + private static final Logger log = LoggerFactory.getLogger(SSLContextBuilder.class); + + public SSLContext buildSslContext(final String sslKeystoreLocation, final Password sslKeystorePassword, + final String sslTruststoreLocation, final Password sslTruststorePassword) { + log.trace("[{}] Entry {}.buildSslContext", Thread.currentThread().getId(), this.getClass().getName()); + + try { + KeyManager[] keyManagers = null; + TrustManager[] trustManagers = null; + + if (sslKeystoreLocation != null) { + final KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + kmf.init(loadKeyStore(sslKeystoreLocation, sslKeystorePassword.value()), sslKeystorePassword.value().toCharArray()); + keyManagers = kmf.getKeyManagers(); + } + + if (sslTruststoreLocation != null) { + final TrustManagerFactory tmf = TrustManagerFactory + .getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(loadKeyStore(sslTruststoreLocation, sslTruststorePassword.value())); + trustManagers = tmf.getTrustManagers(); + } + + final SSLContext sslContext = SSLContext.getInstance("TLS"); + sslContext.init(keyManagers, trustManagers, new SecureRandom()); + + log.trace("[{}] Exit {}.buildSslContext, retval={}", Thread.currentThread().getId(), + this.getClass().getName(), sslContext); + return sslContext; + } catch (final GeneralSecurityException e) { + throw new ConnectException("Error creating SSLContext", e); + } + } + + private KeyStore loadKeyStore(final String location, final String password) throws GeneralSecurityException { + log.trace("[{}] Entry {}.loadKeyStore", Thread.currentThread().getId(), this.getClass().getName()); + + try (final InputStream ksStr = new FileInputStream(location)) { + final KeyStore ks = KeyStore.getInstance("JKS"); + ks.load(ksStr, password.toCharArray()); + + log.trace("[{}] Exit {}.loadKeyStore, retval={}", Thread.currentThread().getId(), + this.getClass().getName(), ks); + return ks; + } catch (final IOException e) { + throw new ConnectException("Error reading keystore " + location, e); + } + } +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/BaseRecordBuilder.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/BaseRecordBuilder.java index fd4c0b6..1dfd0d8 100755 --- a/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/BaseRecordBuilder.java +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/BaseRecordBuilder.java @@ -1,5 +1,5 @@ /** - * Copyright 2018, 2019 IBM Corporation + * Copyright 2018, 2019, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,11 +17,8 @@ import com.ibm.eventstreams.connect.mqsource.MQSourceConnector; import com.ibm.eventstreams.connect.mqsource.processor.JmsToKafkaHeaderConverter; - -import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.SchemaAndValue; -import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.source.SourceRecord; import org.slf4j.Logger; @@ -31,6 +28,7 @@ import javax.jms.JMSException; import javax.jms.Message; import java.util.Map; +import java.util.Optional; /** * Builds Kafka Connect SourceRecords from messages. @@ -50,33 +48,34 @@ public enum KeyHeader { NONE, MESSAGE_ID, CORRELATION_ID, CORRELATION_ID_AS_BYTE * * @param props initial configuration * - * @throws ConnectException Operation failed and connector should stop. + * @throws RecordBuilderException Operation failed and connector should stop. */ @Override public void configure(final Map props) { log.trace("[{}] Entry {}.configure, props={}", Thread.currentThread().getId(), this.getClass().getName(), props); - final AbstractConfig config = new AbstractConfig(MQSourceConnector.CONFIGDEF, props); - switch (String.valueOf(config.getString(MQSourceConnector.CONFIG_NAME_MQ_RECORD_BUILDER_KEY_HEADER))) { - case MQSourceConnector.CONFIG_VALUE_MQ_RECORD_BUILDER_KEY_HEADER_JMSMESSAGEID: + final String kh = props.get(MQSourceConnector.CONFIG_NAME_MQ_RECORD_BUILDER_KEY_HEADER); + if (kh != null) { + if (kh.equals(MQSourceConnector.CONFIG_VALUE_MQ_RECORD_BUILDER_KEY_HEADER_JMSMESSAGEID)) { keyheader = KeyHeader.MESSAGE_ID; log.debug("Setting Kafka record key from JMSMessageID header field"); - break; - case MQSourceConnector.CONFIG_VALUE_MQ_RECORD_BUILDER_KEY_HEADER_JMSCORRELATIONID: + } else if (kh.equals(MQSourceConnector.CONFIG_VALUE_MQ_RECORD_BUILDER_KEY_HEADER_JMSCORRELATIONID)) { keyheader = KeyHeader.CORRELATION_ID; log.debug("Setting Kafka record key from JMSCorrelationID header field"); - break; - case MQSourceConnector.CONFIG_VALUE_MQ_RECORD_BUILDER_KEY_HEADER_JMSCORRELATIONIDASBYTES: + } else if (kh.equals(MQSourceConnector.CONFIG_VALUE_MQ_RECORD_BUILDER_KEY_HEADER_JMSCORRELATIONIDASBYTES)) { keyheader = KeyHeader.CORRELATION_ID_AS_BYTES; log.debug("Setting Kafka record key from JMSCorrelationIDAsBytes header field"); - break; - case MQSourceConnector.CONFIG_VALUE_MQ_RECORD_BUILDER_KEY_HEADER_JMSDESTINATION: + } else if (kh.equals(MQSourceConnector.CONFIG_VALUE_MQ_RECORD_BUILDER_KEY_HEADER_JMSDESTINATION)) { keyheader = KeyHeader.DESTINATION; log.debug("Setting Kafka record key from JMSDestination header field"); - break; + } else { + log.error("Unsupported MQ record builder key header value {}", kh); + throw new RecordBuilderException("Unsupported MQ record builder key header value"); + } } - copyJmsPropertiesFlag = config.getBoolean(MQSourceConnector.CONFIG_NAME_MQ_JMS_PROPERTY_COPY_TO_KAFKA_HEADER); + final String str = props.get(MQSourceConnector.CONFIG_NAME_MQ_JMS_PROPERTY_COPY_TO_KAFKA_HEADER); + copyJmsPropertiesFlag = Boolean.parseBoolean(Optional.ofNullable(str).orElse("false")); jmsToKafkaHeaderConverter = new JmsToKafkaHeaderConverter(); log.trace("[{}] Exit {}.configure", Thread.currentThread().getId(), this.getClass().getName()); @@ -161,16 +160,38 @@ public abstract SchemaAndValue getValue(JMSContext context, String topic, boolea * @throws JMSException Message could not be converted */ @Override - public SourceRecord toSourceRecord(final JMSContext context, final String topic, final boolean messageBodyJms, - final Message message) throws JMSException { + public SourceRecord toSourceRecord(final JMSContext context, final String topic, final boolean messageBodyJms, final Message message) throws JMSException { + return toSourceRecord(context, topic, messageBodyJms, message, null, null); + } + + @Override + public SourceRecord toSourceRecord(final JMSContext context, final String topic, final boolean messageBodyJms, final Message message, final Map sourceOffset, final Map sourceQueuePartition) throws JMSException { final SchemaAndValue key = this.getKey(context, topic, message); final SchemaAndValue value = this.getValue(context, topic, messageBodyJms, message); - if (copyJmsPropertiesFlag && messageBodyJms) - return new SourceRecord(null, null, topic, (Integer) null, key.schema(), key.value(), value.schema(), - value.value(), message.getJMSTimestamp(), - jmsToKafkaHeaderConverter.convertJmsPropertiesToKafkaHeaders(message)); - else - return new SourceRecord(null, null, topic, key.schema(), key.value(), value.schema(), value.value()); + if (copyJmsPropertiesFlag && messageBodyJms) { + return new SourceRecord( + sourceQueuePartition, + sourceOffset, + topic, + null, + key.schema(), + key.value(), + value.schema(), + value.value(), + message.getJMSTimestamp(), + jmsToKafkaHeaderConverter.convertJmsPropertiesToKafkaHeaders(message) + ); + } else { + return new SourceRecord( + sourceQueuePartition, + sourceOffset, + topic, + key.schema(), + key.value(), + value.schema(), + value.value() + ); + } } } \ No newline at end of file diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/DefaultRecordBuilder.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/DefaultRecordBuilder.java index 76ff810..a570ba9 100755 --- a/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/DefaultRecordBuilder.java +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/DefaultRecordBuilder.java @@ -1,5 +1,5 @@ /** - * Copyright 2017, 2018, 2019 IBM Corporation + * Copyright 2017, 2018, 2019, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,7 +23,6 @@ import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.SchemaAndValue; -import org.apache.kafka.connect.errors.ConnectException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -75,7 +74,7 @@ public DefaultRecordBuilder() { value = null; } else { log.error("Unsupported JMS message type {}", message.getClass()); - throw new ConnectException("Unsupported JMS message type"); + throw new RecordBuilderException("Unsupported JMS message type"); } } else { // Not interpreting the body as a JMS message type, all messages come through as diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/JsonRecordBuilder.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/JsonRecordBuilder.java index ba3e389..5ff72ac 100755 --- a/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/JsonRecordBuilder.java +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/JsonRecordBuilder.java @@ -1,5 +1,5 @@ /** - * Copyright 2017, 2018, 2019 IBM Corporation + * Copyright 2017, 2018, 2019, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,7 +25,6 @@ import javax.jms.TextMessage; import org.apache.kafka.connect.data.SchemaAndValue; -import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.json.JsonConverter; import org.slf4j.Logger; @@ -76,7 +75,7 @@ public SchemaAndValue getValue(final JMSContext context, final String topic, fin payload = s.getBytes(UTF_8); } else { log.error("Unsupported JMS message type {}", message.getClass()); - throw new ConnectException("Unsupported JMS message type"); + throw new RecordBuilderException("Unsupported JMS message type"); } return converter.toConnectData(topic, payload); diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/RecordBuilder.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/RecordBuilder.java index e33d8bd..68932bc 100755 --- a/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/RecordBuilder.java +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/RecordBuilder.java @@ -1,5 +1,5 @@ /** - * Copyright 2017, 2018 IBM Corporation + * Copyright 2017, 2018, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,14 +15,12 @@ */ package com.ibm.eventstreams.connect.mqsource.builders; -import java.util.Map; +import org.apache.kafka.connect.source.SourceRecord; import javax.jms.JMSContext; import javax.jms.JMSException; import javax.jms.Message; - -import org.apache.kafka.connect.errors.ConnectException; -import org.apache.kafka.connect.source.SourceRecord; +import java.util.Map; /** * Builds Kafka Connect SourceRecords from messages. @@ -33,7 +31,6 @@ public interface RecordBuilder { * * @param props initial configuration * - * @throws ConnectException Operation failed and connector should stop. */ default void configure(Map props) {} @@ -50,4 +47,6 @@ default void configure(Map props) {} * @throws JMSException Message could not be converted */ SourceRecord toSourceRecord(JMSContext context, String topic, boolean messageBodyJms, Message message) throws JMSException; + + SourceRecord toSourceRecord(JMSContext context, String topic, boolean messageBodyJms, Message message, Map sourceOffset, Map sourcePartition) throws JMSException; } \ No newline at end of file diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/RecordBuilderException.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/RecordBuilderException.java new file mode 100644 index 0000000..ca64787 --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/RecordBuilderException.java @@ -0,0 +1,31 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource.builders; + +public class RecordBuilderException extends RuntimeException { + + public RecordBuilderException(final String s) { + super(s); + } + + public RecordBuilderException(final String s, final Throwable throwable) { + super(s, throwable); + } + + public RecordBuilderException(final Throwable throwable) { + super(throwable); + } +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/RecordBuilderFactory.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/RecordBuilderFactory.java new file mode 100644 index 0000000..a7a10a8 --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/builders/RecordBuilderFactory.java @@ -0,0 +1,50 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource.builders; + +import com.ibm.eventstreams.connect.mqsource.MQSourceConnector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Map; + +public class RecordBuilderFactory { + + private static final Logger log = LoggerFactory.getLogger(RecordBuilderFactory.class); + + public static RecordBuilder getRecordBuilder(final Map props) { + return getRecordBuilder( + props.get(MQSourceConnector.CONFIG_NAME_MQ_RECORD_BUILDER), + props + ); + } + + protected static RecordBuilder getRecordBuilder(final String builderClass, final Map props) { + + final RecordBuilder builder; + + try { + final Class c = Class.forName(builderClass).asSubclass(RecordBuilder.class); + builder = c.newInstance(); + builder.configure(props); + } catch (ClassNotFoundException | ClassCastException | IllegalAccessException | InstantiationException | NullPointerException exc) { + log.error("Could not instantiate message builder {}", builderClass); + throw new RecordBuilderException("Could not instantiate message builder", exc); + } + + return builder; + } +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/processor/JmsToKafkaHeaderConverter.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/processor/JmsToKafkaHeaderConverter.java index 52357ee..977fe4e 100644 --- a/src/main/java/com/ibm/eventstreams/connect/mqsource/processor/JmsToKafkaHeaderConverter.java +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/processor/JmsToKafkaHeaderConverter.java @@ -1,5 +1,5 @@ /** - * Copyright 2019 IBM Corporation + * Copyright 2019, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceState.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceState.java new file mode 100644 index 0000000..9661a17 --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceState.java @@ -0,0 +1,104 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource.sequencestate; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.List; +import java.util.Objects; + + +public class SequenceState { + + private long sequenceId; + private List messageIds; + private LastKnownState lastKnownState; + + public SequenceState(final long sequenceId, final List messageIds, final LastKnownState lastKnownState) { + this.sequenceId = sequenceId; + this.messageIds = messageIds; + this.lastKnownState = lastKnownState; + } + + public SequenceState() { + } + + public long getSequenceId() { + return this.sequenceId; + } + + public List getMessageIds() { + return this.messageIds; + } + + public LastKnownState getLastKnownState() { + return this.lastKnownState; + } + + public boolean equals(final Object o) { + if (o == this) return true; + if (!(o instanceof SequenceState)) return false; + final SequenceState other = (SequenceState) o; + if (!other.canEqual((Object) this)) return false; + if (this.getSequenceId() != other.getSequenceId()) return false; + final Object thismessageIds = this.getMessageIds(); + final Object othermessageIds = other.getMessageIds(); + if (!Objects.equals(thismessageIds, othermessageIds)) + return false; + final Object thislastKnownState = this.getLastKnownState(); + final Object otherlastKnownState = other.getLastKnownState(); + if (!Objects.equals(thislastKnownState, otherlastKnownState)) + return false; + return true; + } + + protected boolean canEqual(final Object other) { + return other instanceof SequenceState; + } + + public int hashCode() { + final int prime = 59; + int result = 1; + final long sequenceId = this.getSequenceId(); + result = result * prime + (int) (sequenceId >>> 32 ^ sequenceId); + final Object messageIds = this.getMessageIds(); + result = result * prime + (messageIds == null ? 43 : messageIds.hashCode()); + final Object lastKnownState = this.getLastKnownState(); + result = result * prime + (lastKnownState == null ? 43 : lastKnownState.hashCode()); + return result; + } + + public String toString() { + return "SequenceState(sequenceId=" + this.getSequenceId() + ", messageIds=" + this.getMessageIds() + ", lastKnownState=" + this.getLastKnownState() + ")"; + } + + public enum LastKnownState { + IN_FLIGHT, DELIVERED; + } + + public final static long DEFAULT_SEQUENCE_ID = 0; + + @JsonIgnore + public final boolean isDelivered() { + return this.lastKnownState == LastKnownState.DELIVERED; + } + + @JsonIgnore + public final boolean isInFlight() { + return this.lastKnownState == LastKnownState.IN_FLIGHT; + } + +} + diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceStateClient.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceStateClient.java new file mode 100644 index 0000000..cad0cc3 --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceStateClient.java @@ -0,0 +1,141 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource.sequencestate; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.ibm.eventstreams.connect.mqsource.JMSWorker; +import com.ibm.eventstreams.connect.mqsource.util.LogMessages; +import com.ibm.eventstreams.connect.mqsource.util.QueueConfig; +import org.apache.kafka.connect.source.SourceTaskContext; +import org.apache.kafka.connect.storage.OffsetStorageReader; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.jms.JMSException; +import javax.jms.JMSRuntimeException; +import javax.jms.Message; +import javax.jms.TextMessage; +import java.text.MessageFormat; +import java.util.Map; +import java.util.Optional; + +public class SequenceStateClient { + private static final Logger log = LoggerFactory.getLogger(SequenceStateClient.class); + + private final String stateQueueName; + private final QueueConfig stateQueueConfig; + private final ObjectMapper mapper; + private final JMSWorker sharedJMSWorker; + private final JMSWorker dedicatedJMSWorker; + + public SequenceStateClient(final String stateQueue, final JMSWorker sharedJMSWorker, final JMSWorker dedicatedJMSWorker) { + this.stateQueueName = stateQueue; + this.sharedJMSWorker = sharedJMSWorker; + this.dedicatedJMSWorker = dedicatedJMSWorker; + stateQueueConfig = new QueueConfig(true, false); + mapper = new ObjectMapper(); + } + + public Optional browse() throws JMSRuntimeException, JMSException { + final Optional message = dedicatedJMSWorker.browse(stateQueueName); + if (message.isPresent()) { + final SequenceState sequenceState = messageToStateObject(message.get()); + log.debug(MessageFormat.format("State message read (non-destructive GET) from queue: {0}", stateQueueName)); + return Optional.of(sequenceState); + } else { + return Optional.empty(); + } + } + + public Optional retrieveStateInSharedTx() throws JMSRuntimeException, JMSException { + final Message message = sharedJMSWorker.receive(stateQueueName, stateQueueConfig, false); + if (message == null) { + return Optional.empty(); + } + final SequenceState sequenceState = messageToStateObject(message); + log.debug(MessageFormat.format("State message read (destructive GET) from queue: {0}", stateQueueName)); + return Optional.of(sequenceState); + } + + public SequenceState write(final SequenceState sequenceState) throws JMSException { + final String json; + try { + json = mapper.writeValueAsString(sequenceState); + } catch (final JsonProcessingException e) { + throw convertToSequenceStateExceptionWithErrorMessage(e, LogMessages.JSON_PARSING_ERROR); + } + dedicatedJMSWorker.putTextMessage(json, stateQueueName); + dedicatedJMSWorker.commit(); + log.debug(MessageFormat.format("State message written to queue: {0}", stateQueueName)); + return sequenceState; + } + + public SequenceState replaceState(final SequenceState newState) throws JMSException { + dedicatedJMSWorker.receive(stateQueueName, stateQueueConfig, false); + log.debug(MessageFormat.format("State message read (destructive GET) from queue: {0}", stateQueueName)); + try { + dedicatedJMSWorker.putTextMessage(mapper.writeValueAsString(newState), stateQueueName); + log.debug(MessageFormat.format("State message written to queue: {0}", stateQueueName)); + } catch (final JsonProcessingException e) { + throw convertToSequenceStateExceptionWithErrorMessage(e, LogMessages.JSON_PARSING_ERROR); + } + dedicatedJMSWorker.commit(); + return newState; + } + + /** + * Validates that the state queue has either zero or one message. Throws error if + * a) the queue contains more than one message + * b) the queue can not be browsed + * @throws JMSException + */ + public void validateStateQueue() throws JMSException { + final boolean invalid; + invalid = dedicatedJMSWorker.queueHoldsMoreThanOneMessage(stateQueueName); + if (invalid) throw new SequenceStateException("State Queue holds more than one message."); + } + + public void closeClientConnections() { + dedicatedJMSWorker.stop(); + sharedJMSWorker.stop(); + } + + public Optional getSequenceFromKafkaOffset(final SourceTaskContext context, final String offsetIdentifier, final Map sourceQueuePartition) { + final OffsetStorageReader offsetStorageReader = context.offsetStorageReader(); + final Map kafkaConnectOffset = offsetStorageReader.offset(sourceQueuePartition); + return Optional.ofNullable(kafkaConnectOffset).map(offSetMap -> (Long) offSetMap.get(offsetIdentifier)); + } + + public SequenceState messageToStateObject(final Message message) throws JMSException { + final TextMessage textMessage; + final SequenceState sequenceState; + try { + textMessage = (TextMessage) message; + sequenceState = mapper.readValue(textMessage.getText(), SequenceState.class); + } catch (final ClassCastException e) { + throw convertToSequenceStateExceptionWithErrorMessage(e, LogMessages.CASTING_MQ_SEQ_STATE_TO_TEXTMSG_ERROR); + } catch (final JsonProcessingException e) { + throw convertToSequenceStateExceptionWithErrorMessage(e, LogMessages.JSON_PARSING_ERROR); + } + return sequenceState; + } + + private SequenceStateException convertToSequenceStateExceptionWithErrorMessage(final Exception e, final String message) { + log.error(message, e); + return new SequenceStateException(message, e); + } +} \ No newline at end of file diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceStateException.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceStateException.java new file mode 100644 index 0000000..64e4265 --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceStateException.java @@ -0,0 +1,28 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource.sequencestate; + +public class SequenceStateException extends RuntimeException { + + public SequenceStateException(final String message, final Throwable exc) { + super(message, exc); + } + + public SequenceStateException(final String message) { + super(message); + } + +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/util/ExceptionProcessor.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/util/ExceptionProcessor.java new file mode 100644 index 0000000..d03ddd3 --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/util/ExceptionProcessor.java @@ -0,0 +1,91 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource.util; + +import javax.jms.JMSException; + +import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.errors.RetriableException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.ibm.mq.MQException; +import com.ibm.mq.constants.MQConstants; + +public class ExceptionProcessor { + + private static final Logger log = LoggerFactory.getLogger(ExceptionProcessor.class); + + + protected static int getReason(final Throwable exc) { + int reason = -1; + + // Try to extract the MQ reason code to see if it's a retriable exception + Throwable t = exc.getCause(); + while (t != null) { + if (t instanceof MQException) { + final MQException mqe = (MQException) t; + log.error("MQ error: CompCode {}, Reason {} {}", mqe.getCompCode(), mqe.getReason(), + MQConstants.lookupReasonCode(mqe.getReason())); + reason = mqe.getReason(); + break; + } else if (t instanceof JMSException) { + final JMSException jmse = (JMSException) t; + log.error("JMS exception: error code {}", jmse.getErrorCode()); + } + + t = t.getCause(); // Moves t up the stack trace until it is null. + } + return reason; + } + + public static boolean isClosable(final Throwable exc) { + if (getReason(exc) == MQConstants.MQRC_GET_INHIBITED) { + log.info("A queue has the GET operation intentionally inhibited, wait for next poll."); + return false; + } + log.info(" All MQ connections will be closed."); + return true; + } + + public static boolean isRetriable(final Throwable exc) { + final int reason = getReason(exc); + switch (reason) { + // These reason codes indicate that the connection can be just retried later will probably recover + case MQConstants.MQRC_BACKED_OUT: + case MQConstants.MQRC_CHANNEL_NOT_AVAILABLE: + case MQConstants.MQRC_CONNECTION_BROKEN: + case MQConstants.MQRC_HOST_NOT_AVAILABLE: + case MQConstants.MQRC_NOT_AUTHORIZED: + case MQConstants.MQRC_Q_MGR_NOT_AVAILABLE: + case MQConstants.MQRC_Q_MGR_QUIESCING: + case MQConstants.MQRC_Q_MGR_STOPPING: + case MQConstants.MQRC_UNEXPECTED_ERROR: + case MQConstants.MQRC_GET_INHIBITED: + log.info("JMS exception is retriable, wait for next poll."); + return true; + } + log.info("JMS exception is not retriable, the connector is in a failed state."); + return false; + } + + public static ConnectException handleException(final Throwable exc) { + if (isRetriable(exc)) { + return new RetriableException(exc); + } + return new ConnectException(exc); + } +} \ No newline at end of file diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/util/LogMessages.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/util/LogMessages.java new file mode 100644 index 0000000..0cc3ea8 --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/util/LogMessages.java @@ -0,0 +1,55 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource.util; + +import java.text.MessageFormat; +import java.util.List; + +public class LogMessages { + + // Error messages + + public final static String JSON_PARSING_ERROR = "The sequence state stored on the MQ State queue failed to parse as to a sequence state locally." + + "The state queue should contain at most one message containing a valid JSON representation of a sequence state" + + "e.g.\n" + + "{\n" + + " \"sequenceId\":314,\n" + + " \"messageIds\":[\n" + + " \"414d51204d59514d475220202020202033056b6401d51010\",\n" + + " \"414d51204d59514d475220202020202033056b6401d51011\",\n" + + " \"414d51204d59514d475220202020202033056b6401d51012\"\n" + + " ],\n" + + " \"lastKnownState\":\"IN_FLIGHT\"\n" + + "}\n" + + "An admin needs to inspect the state queue before the connector is restarted."; + + public final static String CASTING_MQ_SEQ_STATE_TO_TEXTMSG_ERROR = "The sequence state stored on the MQ State queue failed to cast to a TextMessage." + + " The state queue should only contain messages of the TextMessage type." + + " An admin needs to inspect the state queue before the connector is restarted."; + + public final static String UNEXPECTED_MESSAGE_ON_STATE_QUEUE = "Unexpected State on the MQ state queue, please review the messages on the state queue and refer to the readme for further guidance: "; + + public final static String UNEXPECTED_EXCEPTION = "An unexpected exception has been thrown, please raise a support case or github issue including the connector logs and configuration."; + + public final static String rollbackTimeout(final List msgIds) { + return MessageFormat.format( + "The connector has tried to get the messages with the following ids: \n{0}\n " + + "for 5 minutes and these messages are not available on the source queue. To adhere to the exactly once delivery the " + + "connector has been put in a failed state to allow the cause to be investigated. An admin needs to inspect the state " + + "queue and source queue before the connector is restarted.", + msgIds); + } +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsource/util/QueueConfig.java b/src/main/java/com/ibm/eventstreams/connect/mqsource/util/QueueConfig.java new file mode 100644 index 0000000..1d1d712 --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsource/util/QueueConfig.java @@ -0,0 +1,62 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource.util; + +import com.ibm.mq.jms.MQQueue; +import com.ibm.msg.client.wmq.WMQConstants; + +import javax.jms.JMSException; +import java.util.Map; + +import static com.ibm.eventstreams.connect.mqsource.MQSourceConnector.CONFIG_NAME_MQ_MESSAGE_BODY_JMS; +import static com.ibm.eventstreams.connect.mqsource.MQSourceConnector.CONFIG_NAME_MQ_MESSAGE_MQMD_READ; + + +public class QueueConfig { + final private boolean mqMessageBodyJms; + final private boolean mqMessageMqmdRead; + + public QueueConfig(final Map props) { + this( + Boolean.parseBoolean(props.get(CONFIG_NAME_MQ_MESSAGE_BODY_JMS)), + Boolean.parseBoolean(props.get(CONFIG_NAME_MQ_MESSAGE_MQMD_READ)) + ); + } + + public QueueConfig(final Boolean mqMessageBodyJms, final Boolean mqMessageMqmdRead) { + this.mqMessageBodyJms = mqMessageBodyJms; + this.mqMessageMqmdRead = mqMessageMqmdRead; + } + + public MQQueue applyToQueue(final MQQueue queue) throws JMSException { + if (mqMessageBodyJms) { + queue.setMessageBodyStyle(WMQConstants.WMQ_MESSAGE_BODY_JMS); + } else { + queue.setMessageBodyStyle(WMQConstants.WMQ_MESSAGE_BODY_MQ); + } + + if (mqMessageMqmdRead) queue.setBooleanProperty(WMQConstants.WMQ_MQMD_READ_ENABLED, true); + return queue; + } + + public boolean isMqMessageBodyJms() { + return this.mqMessageBodyJms; + } + + public boolean isMqMessageMqmdRead() { + return this.mqMessageMqmdRead; + } +} diff --git a/src/test/java/com/ibm/eventstreams/connect/mqsource/JmsToKafkaHeaderConverterTest.java b/src/test/java/com/ibm/eventstreams/connect/mqsource/JmsToKafkaHeaderConverterTest.java index 0598a9b..057b29e 100644 --- a/src/test/java/com/ibm/eventstreams/connect/mqsource/JmsToKafkaHeaderConverterTest.java +++ b/src/test/java/com/ibm/eventstreams/connect/mqsource/JmsToKafkaHeaderConverterTest.java @@ -1,5 +1,5 @@ /** - * Copyright 2019 IBM Corporation + * Copyright 2019, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/test/java/com/ibm/eventstreams/connect/mqsource/MQSourceConnectorTest.java b/src/test/java/com/ibm/eventstreams/connect/mqsource/MQSourceConnectorTest.java index 9ea8431..5ded06f 100644 --- a/src/test/java/com/ibm/eventstreams/connect/mqsource/MQSourceConnectorTest.java +++ b/src/test/java/com/ibm/eventstreams/connect/mqsource/MQSourceConnectorTest.java @@ -1,5 +1,5 @@ /** - * Copyright 2017, 2018, 2019 IBM Corporation + * Copyright 2017, 2018, 2019, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,20 @@ package com.ibm.eventstreams.connect.mqsource; import org.apache.kafka.connect.connector.Connector; +import org.apache.kafka.connect.source.ConnectorTransactionBoundaries; +import org.apache.kafka.connect.source.ExactlyOnceSupport; import org.apache.kafka.connect.source.SourceConnector; import org.junit.Test; +import java.util.Collections; + import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import java.util.HashMap; +import java.util.Map; + public class MQSourceConnectorTest { @Test public void testVersion() { @@ -35,4 +43,44 @@ public void testConnectorType() { final Connector connector = new MQSourceConnector(); assertTrue(SourceConnector.class.isAssignableFrom(connector.getClass())); } + + @Test + public void testConnectorCanDefineTransactionBoundaries() { + final SourceConnector connector = new MQSourceConnector(); + // Not supported + assertEquals(ConnectorTransactionBoundaries.UNSUPPORTED, connector.canDefineTransactionBoundaries(Collections.emptyMap())); + assertEquals(ConnectorTransactionBoundaries.UNSUPPORTED, connector.canDefineTransactionBoundaries(Collections.singletonMap("mq.exactly.once.state.queue", "DEV.QUEUE.2"))); + } + + @Test + public void testConnectorExactlyOnceSupport() { + final SourceConnector connector = new MQSourceConnector(); + // Only supported if mq.exactly.once.state.queue is supplied in the config and 'tasks.max' is 1 + assertEquals(ExactlyOnceSupport.UNSUPPORTED, connector.exactlyOnceSupport(Collections.emptyMap())); + assertEquals(ExactlyOnceSupport.UNSUPPORTED, connector.exactlyOnceSupport(Collections.singletonMap("mq.exactly.once.state.queue", ""))); + assertEquals(ExactlyOnceSupport.UNSUPPORTED, connector.exactlyOnceSupport(Collections.singletonMap("mq.exactly.once.state.queue", null))); + assertEquals(ExactlyOnceSupport.UNSUPPORTED, connector.exactlyOnceSupport(Collections.singletonMap("tasks.max", "1"))); + + final Map configProps = new HashMap(); + configProps.put("mq.exactly.once.state.queue", "DEV.QUEUE.2"); + configProps.put("tasks.max", "1"); + assertEquals(ExactlyOnceSupport.SUPPORTED, connector.exactlyOnceSupport(configProps)); + assertEquals(ExactlyOnceSupport.SUPPORTED, connector.exactlyOnceSupport(Collections.singletonMap("mq.exactly.once.state.queue", "DEV.QUEUE.2"))); + } + + @Test + public void testConnectorConfigSupportsExactlyOnce() { + // True if an mq.exactly.once.state.queue value is supplied in the config and 'tasks.max' is 1 + final Map configProps = new HashMap(); + configProps.put("mq.exactly.once.state.queue", "DEV.QUEUE.2"); + configProps.put("tasks.max", "1"); + assertTrue(MQSourceConnector.configSupportsExactlyOnce(configProps)); + assertTrue(MQSourceConnector.configSupportsExactlyOnce(Collections.singletonMap("mq.exactly.once.state.queue", "DEV.QUEUE.2"))); + // False otherwise + assertFalse(MQSourceConnector.configSupportsExactlyOnce(Collections.singletonMap("tasks.max", "1"))); + assertFalse(MQSourceConnector.configSupportsExactlyOnce(Collections.emptyMap())); + assertFalse(MQSourceConnector.configSupportsExactlyOnce(Collections.singletonMap("mq.exactly.once.state.queue", ""))); + assertFalse(MQSourceConnector.configSupportsExactlyOnce(Collections.singletonMap("mq.exactly.once.state.queue", null))); + } + } \ No newline at end of file diff --git a/src/test/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskTest.java b/src/test/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskTest.java new file mode 100644 index 0000000..378acbb --- /dev/null +++ b/src/test/java/com/ibm/eventstreams/connect/mqsource/MQSourceTaskTest.java @@ -0,0 +1,275 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource; + +import com.ibm.eventstreams.connect.mqsource.sequencestate.SequenceState; +import com.ibm.eventstreams.connect.mqsource.sequencestate.SequenceStateClient; +import com.ibm.eventstreams.connect.mqsource.util.QueueConfig; + +import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.source.SourceRecord; +import org.apache.kafka.connect.source.SourceTaskContext; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import javax.jms.JMSException; +import javax.jms.JMSRuntimeException; +import javax.jms.TextMessage; + +import static com.ibm.eventstreams.connect.mqsource.AbstractJMSContextIT.CHANNEL_NAME; +import static com.ibm.eventstreams.connect.mqsource.AbstractJMSContextIT.DEFAULT_CONNECTION_NAME; +import static com.ibm.eventstreams.connect.mqsource.AbstractJMSContextIT.DEFAULT_SOURCE_QUEUE; +import static com.ibm.eventstreams.connect.mqsource.AbstractJMSContextIT.DEFAULT_STATE_QUEUE; +import static com.ibm.eventstreams.connect.mqsource.AbstractJMSContextIT.QMGR_NAME; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.anyString; + +@RunWith(MockitoJUnitRunner.class) +public class MQSourceTaskTest { + + @Mock private TextMessage jmsMessage; + @Mock private JMSWorker jmsWorker; + @Mock private JMSWorker dedicatedWorker; + @Mock private SequenceStateClient sequenceStateClient; + @Mock private SourceTaskContext sourceTaskContext; + + private final static int MQ_BATCH_SIZE = 8; + private final static int MAX_POLL_BLOCKED_TIME_MS = 100; + + private Map createDefaultConnectorProperties() { + final Map props = new HashMap<>(); + props.put("mq.queue.manager", QMGR_NAME); + props.put("mq.connection.mode", "client"); + props.put("mq.connection.name.list", DEFAULT_CONNECTION_NAME); + props.put("mq.channel.name", CHANNEL_NAME); + props.put("mq.queue", DEFAULT_SOURCE_QUEUE); + props.put("mq.user.authentication.mqcsp", "false"); + props.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + props.put("topic", "mytopic"); + props.put("mq.batch.size", Integer.toString(MQ_BATCH_SIZE)); + props.put("mq.max.poll.blocked.time.ms", Integer.toString(MAX_POLL_BLOCKED_TIME_MS)); + return props; + } + + private Map createExactlyOnceConnectorProperties() { + final Map props = createDefaultConnectorProperties(); + props.put("mq.exactly.once.state.queue", DEFAULT_STATE_QUEUE); + props.put("tasks.max", "1"); + return props; + } + + @Test + public void testSequenceStateClientBrowseHasBeenCalledInStart() throws JMSRuntimeException, JMSException { + + MQSourceTask mqSourceTask = new MQSourceTask(); + mqSourceTask.initialize(sourceTaskContext); + mqSourceTask.start(createExactlyOnceConnectorProperties(), jmsWorker, dedicatedWorker, sequenceStateClient); + + Mockito.verify(sequenceStateClient, Mockito.times(1)).browse(); + } + + @Test + public void testStartWhenAtLeastOnceDeliveryConfig() throws JMSRuntimeException, JMSException { + + MQSourceTask mqSourceTask = new MQSourceTask(); + mqSourceTask.initialize(sourceTaskContext); + mqSourceTask.start(createDefaultConnectorProperties(), jmsWorker, dedicatedWorker, sequenceStateClient); + + // Check that sequenceStateClient is not called and the the startup action is normal + Mockito.verify(sequenceStateClient, Mockito.times(0)).browse(); + assertThat(mqSourceTask.startUpAction).isEqualTo(MQSourceTaskStartUpAction.NORMAL_OPERATION); + assertThat(mqSourceTask.getSequenceId().get()).isEqualTo(0L); + } + + @Test + public void testStartWhenNoPrepareMessageInMQAndNoKafkaOffset() { + + MQSourceTask mqSourceTask = new MQSourceTask(); + mqSourceTask.initialize(sourceTaskContext); + Mockito.when(sequenceStateClient.getSequenceFromKafkaOffset(any(SourceTaskContext.class), anyString(), anyMap())).thenReturn(Optional.of(5L)); + mqSourceTask.start(createExactlyOnceConnectorProperties(), jmsWorker, dedicatedWorker, sequenceStateClient); + + assertThat(mqSourceTask.startUpAction).isEqualTo(MQSourceTaskStartUpAction.NORMAL_OPERATION); + + assertThat(mqSourceTask.getSequenceId().get()).isEqualTo(5L); + } + + @Test + public void testStartWithKafkaOffsetButNoMQOffset() { + + MQSourceTask mqSourceTask = new MQSourceTask(); + // setUpEmptyKafkaOffsetMock(); + mqSourceTask.initialize(sourceTaskContext); + mqSourceTask.start(createExactlyOnceConnectorProperties(), jmsWorker, dedicatedWorker, sequenceStateClient); + + assertThat(mqSourceTask.startUpAction).isEqualTo(MQSourceTaskStartUpAction.NORMAL_OPERATION); + + assertThat(mqSourceTask.getSequenceId().get()).isEqualTo(0L); + } + + @Test + public void testStartWhenPrepareMessageInMQAndSequenceStateIsDELIVERED() throws JMSRuntimeException, JMSException { + List messageIds = Arrays.asList("1", "2", "3", "4", "5"); + + Mockito.when(sequenceStateClient.browse()).thenReturn( + Optional.of(new SequenceState( + 5, + messageIds, + SequenceState.LastKnownState.DELIVERED)) + ); + MQSourceTask mqSourceTask = new MQSourceTask(); + mqSourceTask.initialize(sourceTaskContext); + mqSourceTask.start(createExactlyOnceConnectorProperties(), jmsWorker, dedicatedWorker, sequenceStateClient); + assertThat(mqSourceTask.startUpAction).isEqualTo(MQSourceTaskStartUpAction.REMOVE_DELIVERED_MESSAGES_FROM_SOURCE_QUEUE); + + assertThat(mqSourceTask.getSequenceId().get()).isEqualTo(5L); + assertThat(mqSourceTask.getMsgIds()).isEqualTo(messageIds); + } + + @Test + public void testStartWhenPrepareMessageInMQAndSequenceStateIsIN_FLIGHTNoKafkaState() throws JMSRuntimeException, JMSException { + List messageIds = Arrays.asList("1", "2", "3", "4", "5"); + + Mockito.when(sequenceStateClient.browse()).thenReturn( + Optional.of(new SequenceState( + 6, + messageIds, + SequenceState.LastKnownState.IN_FLIGHT)) + ); + MQSourceTask mqSourceTask = new MQSourceTask(); + mqSourceTask.initialize(sourceTaskContext); + Mockito.when(sequenceStateClient.getSequenceFromKafkaOffset(any(SourceTaskContext.class), anyString(), anyMap())).thenReturn(Optional.empty()); + mqSourceTask.start(createExactlyOnceConnectorProperties(), jmsWorker, dedicatedWorker, sequenceStateClient); + + assertThat(mqSourceTask.startUpAction).isEqualTo(MQSourceTaskStartUpAction.REDELIVER_UNSENT_BATCH); + + assertThat(mqSourceTask.getSequenceId().get()).isEqualTo(6L); + assertThat(mqSourceTask.getMsgIds()).isEqualTo(messageIds); + } + + @Test + public void testStartWhenPrepareMessageInMQAndSequenceStateIsIN_FLIGHTWithKafkaStateUnmatched() throws JMSRuntimeException, JMSException { + List messageIds = Arrays.asList("1", "2", "3", "4", "5"); + + Mockito.when(sequenceStateClient.browse()).thenReturn( + Optional.of(new SequenceState( + 2L, + messageIds, + SequenceState.LastKnownState.IN_FLIGHT)) + ); + MQSourceTask mqSourceTask = new MQSourceTask(); + mqSourceTask.initialize(sourceTaskContext); + Mockito.when(sequenceStateClient.getSequenceFromKafkaOffset(any(SourceTaskContext.class), anyString(), anyMap())).thenReturn(Optional.of(1L)); + mqSourceTask.start(createExactlyOnceConnectorProperties(), jmsWorker, dedicatedWorker, sequenceStateClient); + + assertThat(mqSourceTask.startUpAction).isEqualTo(MQSourceTaskStartUpAction.REDELIVER_UNSENT_BATCH); + + assertThat(mqSourceTask.getSequenceId().get()).isEqualTo(2L); + assertThat(mqSourceTask.getMsgIds()).isEqualTo(messageIds); + } + + @Test + public void testStart_WhenPrepareMessageInMQ_AndSequenceStateIsIN_FLIGHTWithKafkaStateMatched() throws JMSRuntimeException, JMSException { + List messageIds = Arrays.asList("1", "2", "3", "4", "5"); + + Mockito.when(sequenceStateClient.browse()).thenReturn( + Optional.of(new SequenceState( + 7, + messageIds, + SequenceState.LastKnownState.IN_FLIGHT)) + ); + MQSourceTask mqSourceTask = new MQSourceTask(); + mqSourceTask.initialize(sourceTaskContext); + Mockito.when(sequenceStateClient.getSequenceFromKafkaOffset(any(SourceTaskContext.class), anyString(), anyMap())).thenReturn(Optional.of(7L)); + mqSourceTask.start(createExactlyOnceConnectorProperties(), jmsWorker, dedicatedWorker, sequenceStateClient); + + assertThat(mqSourceTask.startUpAction).isEqualTo(MQSourceTaskStartUpAction.REMOVE_DELIVERED_MESSAGES_FROM_SOURCE_QUEUE); + assertThat(mqSourceTask.getSequenceId().get()).isEqualTo(7L); + assertThat(mqSourceTask.getMsgIds()).isEqualTo(messageIds); + + Mockito.verify(sequenceStateClient, Mockito.times(1)).replaceState( + new SequenceState( + 7, + messageIds, + SequenceState.LastKnownState.DELIVERED) + ); + } + + @Test + public void testPollsBlockUntilBatchComplete() throws JMSRuntimeException, JMSException, InterruptedException { + Mockito.when(jmsWorker.receive(anyString(), any(QueueConfig.class), anyBoolean())).thenReturn(jmsMessage); + + MQSourceTask mqSourceTask = new MQSourceTask(); + mqSourceTask.initialize(sourceTaskContext); + mqSourceTask.start(createDefaultConnectorProperties(), jmsWorker, dedicatedWorker, sequenceStateClient); + + List firstConnectMessagesBatch = mqSourceTask.poll(); + assertThat(firstConnectMessagesBatch.size()).isEqualTo(MQ_BATCH_SIZE); + + for (int i = 0; i < firstConnectMessagesBatch.size(); i++) { + if (i < 2 || i > (MQ_BATCH_SIZE - 2)) { + // do a few polls while messages are being committed, but + // keep under the limit that will cause an exception + List pollDuringCommits = mqSourceTask.poll(); + assertThat(pollDuringCommits).isNull(); + } + + mqSourceTask.commitRecord(firstConnectMessagesBatch.get(i)); + } + + // now all messages are committed, a poll should return messages + List secondConnectMessagesBatch = mqSourceTask.poll(); + assertThat(secondConnectMessagesBatch.size()).isEqualTo(MQ_BATCH_SIZE); + } + + @Test + public void testRepeatedPollsFailWhileMessagesInFlight() throws JMSRuntimeException, JMSException, InterruptedException { + Mockito.when(jmsWorker.receive(anyString(), any(QueueConfig.class), anyBoolean())).thenReturn(jmsMessage); + + MQSourceTask mqSourceTask = new MQSourceTask(); + mqSourceTask.initialize(sourceTaskContext); + mqSourceTask.start(createDefaultConnectorProperties(), jmsWorker, dedicatedWorker, sequenceStateClient); + + List firstConnectMessagesBatch = mqSourceTask.poll(); + assertThat(firstConnectMessagesBatch.size()).isEqualTo(MQ_BATCH_SIZE); + + final int BLOCKED_POLLS_LIMIT = 50; + for (int i = 0; i < BLOCKED_POLLS_LIMIT; i++) { + // up to the limit, polls before the batch is committed are + // allowed, but return null to indicate that the poll + // cycle was skipped + List pollDuringCommits = mqSourceTask.poll(); + assertThat(pollDuringCommits).isNull(); + } + + // additional polls throw an exception to indicate that the + // task has been blocked for too long + assertThrows(ConnectException.class, () -> mqSourceTask.poll()); + } +} diff --git a/src/test/java/com/ibm/eventstreams/connect/mqsource/builders/RecordBuilderFactoryTest.java b/src/test/java/com/ibm/eventstreams/connect/mqsource/builders/RecordBuilderFactoryTest.java new file mode 100644 index 0000000..c79bc5f --- /dev/null +++ b/src/test/java/com/ibm/eventstreams/connect/mqsource/builders/RecordBuilderFactoryTest.java @@ -0,0 +1,49 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource.builders; + +import org.assertj.core.api.Assertions; +import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; + +public class RecordBuilderFactoryTest { + + final Map emptyProps = new HashMap<>(); + + @Test + public void testGetRecordBuilder_ForJsonRecordBuilder() { + RecordBuilder recordBuilder = RecordBuilderFactory.getRecordBuilder("com.ibm.eventstreams.connect.mqsource.builders.JsonRecordBuilder", emptyProps); + Assertions.assertThat(recordBuilder).isInstanceOf(JsonRecordBuilder.class); + } + + @Test + public void testGetRecordBuilder_ForDefaultRecordBuilder() { + RecordBuilder recordBuilder = RecordBuilderFactory.getRecordBuilder("com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder", emptyProps); + Assertions.assertThat(recordBuilder).isInstanceOf(DefaultRecordBuilder.class); + } + + @Test(expected = RecordBuilderException.class) + public void testGetRecordBuilder_JunkClass() { + RecordBuilderFactory.getRecordBuilder("casjsajhasdhusdo;iasd", emptyProps); + } + + @Test(expected = RecordBuilderException.class) + public void testGetRecordBuilder_NullProps() { + RecordBuilderFactory.getRecordBuilder("casjsajhasdhusdo;iasd", null); + } +} \ No newline at end of file diff --git a/src/test/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceStateClientTest.java b/src/test/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceStateClientTest.java new file mode 100644 index 0000000..0f0d525 --- /dev/null +++ b/src/test/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceStateClientTest.java @@ -0,0 +1,142 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource.sequencestate; + +import static com.ibm.eventstreams.connect.mqsource.AbstractJMSContextIT.CHANNEL_NAME; +import static com.ibm.eventstreams.connect.mqsource.AbstractJMSContextIT.DEFAULT_CONNECTION_NAME; +import static com.ibm.eventstreams.connect.mqsource.AbstractJMSContextIT.DEFAULT_SOURCE_QUEUE; +import static com.ibm.eventstreams.connect.mqsource.AbstractJMSContextIT.DEFAULT_STATE_QUEUE; +import static com.ibm.eventstreams.connect.mqsource.AbstractJMSContextIT.QMGR_NAME; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +import javax.jms.JMSException; + +import org.apache.kafka.connect.source.SourceTaskContext; +import org.apache.kafka.connect.storage.OffsetStorageReader; +import org.junit.Test; +import org.mockito.Mockito; + +import com.ibm.eventstreams.connect.mqsource.JMSWorker; + +public class SequenceStateClientTest { + + private Map getDefaultConnectorProperties() { + final Map props = new HashMap<>(); + props.put("mq.queue.manager", QMGR_NAME); + props.put("mq.connection.mode", "client"); + props.put("mq.connection.name.list", DEFAULT_CONNECTION_NAME); + props.put("mq.channel.name", CHANNEL_NAME); + props.put("mq.queue", DEFAULT_SOURCE_QUEUE); + props.put("mq.user.authentication.mqcsp", "false"); + return props; + } + + @Test + public void test_emptyResponseFromKafkaOffsetTopic_ThenOptionalEmpty() { + + Map props = getDefaultConnectorProperties(); + props.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + + SequenceStateClient sequenceStateClient = new SequenceStateClient( + DEFAULT_STATE_QUEUE, + Mockito.mock(JMSWorker.class), + Mockito.mock(JMSWorker.class) + ); + + SourceTaskContext contextMock = Mockito.mock(SourceTaskContext.class); + OffsetStorageReader offsetStorageReaderMock = Mockito.mock(OffsetStorageReader.class); + Mockito.when(offsetStorageReaderMock.offset(any())).thenReturn(Collections.emptyMap()); + Mockito.when(contextMock.offsetStorageReader()).thenReturn(offsetStorageReaderMock); + + Map sourcePartition = new HashMap<>(); + sourcePartition.put("source", "myqmgr/myq"); + + Optional sequenceFromKafkaOffset = sequenceStateClient.getSequenceFromKafkaOffset(contextMock, "test-offset", sourcePartition); + + assertThat(sequenceFromKafkaOffset).isEmpty(); + } + + @Test + public void test_NullResponseFromKafkaOffsetTopic_ThenOptionalEmpty() { + + Map props = getDefaultConnectorProperties(); + props.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + + SequenceStateClient sequenceStateClient = new SequenceStateClient( + DEFAULT_STATE_QUEUE, + Mockito.mock(JMSWorker.class), + Mockito.mock(JMSWorker.class) + ); + + SourceTaskContext contextMock = Mockito.mock(SourceTaskContext.class); + OffsetStorageReader offsetStorageReaderMock = Mockito.mock(OffsetStorageReader.class); + Mockito.when(offsetStorageReaderMock.offset(any())).thenReturn(null); + Mockito.when(contextMock.offsetStorageReader()).thenReturn(offsetStorageReaderMock); + + Map sourcePartition = new HashMap<>(); + sourcePartition.put("source", "myqmgr/myq"); + + Optional sequenceFromKafkaOffset = sequenceStateClient.getSequenceFromKafkaOffset(contextMock, "test-offset", sourcePartition); + + assertThat(sequenceFromKafkaOffset).isEmpty(); + } + + @Test(expected = Test.None.class /* no exception expected */) + public void test_validateStateQueue_OK() throws JMSException { + Map props = getDefaultConnectorProperties(); + props.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + + JMSWorker dedicatedMock = Mockito.mock(JMSWorker.class); + Mockito.when(dedicatedMock.queueHoldsMoreThanOneMessage(anyString())).thenReturn(false); + + SequenceStateClient sequenceStateClient = new SequenceStateClient( + DEFAULT_STATE_QUEUE, + Mockito.mock(JMSWorker.class), + dedicatedMock + ); + + sequenceStateClient.validateStateQueue(); + } + + @Test + public void test_validateStateQueue_NOT_OK() throws JMSException { + Map props = getDefaultConnectorProperties(); + props.put("mq.record.builder", "com.ibm.eventstreams.connect.mqsource.builders.DefaultRecordBuilder"); + + JMSWorker dedicatedMock = Mockito.mock(JMSWorker.class); + Mockito.when(dedicatedMock.queueHoldsMoreThanOneMessage(anyString())).thenReturn(true); + + SequenceStateClient sequenceStateClient = new SequenceStateClient( + DEFAULT_STATE_QUEUE, + Mockito.mock(JMSWorker.class), + dedicatedMock + ); + + Exception exception = assertThrows(Exception.class, sequenceStateClient::validateStateQueue); + String actualMessage = exception.getMessage(); + + assertTrue(actualMessage.contains("more than one message")); + } +} \ No newline at end of file diff --git a/src/test/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceStateTest.java b/src/test/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceStateTest.java new file mode 100644 index 0000000..51e6939 --- /dev/null +++ b/src/test/java/com/ibm/eventstreams/connect/mqsource/sequencestate/SequenceStateTest.java @@ -0,0 +1,165 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource.sequencestate; + +import org.junit.Test; +import org.testcontainers.shaded.com.fasterxml.jackson.databind.ObjectMapper; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; + +import static org.assertj.core.api.Assertions.assertThat; + +public class SequenceStateTest { + + private final ObjectMapper mapper = new ObjectMapper(); + + @Test + public void testSequenceStateEquals() { + + SequenceState sequenceState1 = new SequenceState( + 1, + new ArrayList<>(Arrays.asList( + "414d51204d59514d475220202020202033056b6401d50010", + "414d51204d59514d475220202020202033056b6401d50011", + "414d51204d59514d475220202020202033056b6401d50012" + )), + SequenceState.LastKnownState.IN_FLIGHT + ); + + SequenceState sequenceState2 = new SequenceState( + 1, + new ArrayList<>(Arrays.asList( + "414d51204d59514d475220202020202033056b6401d50010", + "414d51204d59514d475220202020202033056b6401d50011", + "414d51204d59514d475220202020202033056b6401d50012" + )), + SequenceState.LastKnownState.IN_FLIGHT + ); + + assertThat(sequenceState1).isEqualTo(sequenceState2); + } + + @Test + public void testSequenceStateNotEquals() { + + SequenceState sequenceState1 = new SequenceState( + 1, + new ArrayList<>(Arrays.asList( + "414d51204d59514d475220202020202033056b6401d50010", + "414d51204d59514d475220202020202033056b6401d50011", + "414d51204d59514d475220202020202033056b6401d50012" + )), + SequenceState.LastKnownState.IN_FLIGHT + ); + + SequenceState sequenceState2 = new SequenceState( + 2, + new ArrayList<>(Arrays.asList( + "414d51204d59514d475220202020202033056b6401d50010", + "414d51204d59514d475220202020202033056b6401d50011", + "414d51204d59514d475220202020202033056b6401d50012" + )), + SequenceState.LastKnownState.IN_FLIGHT + ); + + assertThat(sequenceState1).isNotEqualTo(sequenceState2); + } + + + @Test + public void testMapperReadValue() throws IOException { + + String input = "{" + + "\"sequenceId\":1," + + "\"messageIds\":[" + + "\"414d51204d59514d475220202020202033056b6401d50010\"," + + "\"414d51204d59514d475220202020202033056b6401d50011\"," + + "\"414d51204d59514d475220202020202033056b6401d50012\"" + + "]," + + "\"lastKnownState\":\"IN_FLIGHT\"" + + "}"; + + SequenceState sequenceState = mapper.readValue(input, SequenceState.class); + + assertThat(sequenceState).isEqualTo( + new SequenceState( + 1, + new ArrayList<>(Arrays.asList( + "414d51204d59514d475220202020202033056b6401d50010", + "414d51204d59514d475220202020202033056b6401d50011", + "414d51204d59514d475220202020202033056b6401d50012" + )), + SequenceState.LastKnownState.IN_FLIGHT + ) + ); + } + + @Test + public void testMapperWriteValueAsString() throws IOException { + + SequenceState sequenceState = new SequenceState( + 1, + new ArrayList<>(Arrays.asList( + "414d51204d59514d475220202020202033056b6401d50010", + "414d51204d59514d475220202020202033056b6401d50011", + "414d51204d59514d475220202020202033056b6401d50012" + )), + SequenceState.LastKnownState.IN_FLIGHT + ); + + + assertThat(mapper.writeValueAsString(sequenceState)).isEqualTo( + "{" + + "\"sequenceId\":1," + + "\"messageIds\":[" + + "\"414d51204d59514d475220202020202033056b6401d50010\"," + + "\"414d51204d59514d475220202020202033056b6401d50011\"," + + "\"414d51204d59514d475220202020202033056b6401d50012\"" + + "]," + + "\"lastKnownState\":\"IN_FLIGHT\"" + + "}" + ); + } + + @Test + public void isDeliveredAndInFlightShouldBeFalseOnEmptyConstructor() { + SequenceState sequenceState = new SequenceState(); + assertThat(sequenceState.isDelivered()).isFalse(); + assertThat(sequenceState.isInFlight()).isFalse(); + } + + @Test + public void isDeliveredWorks() { + SequenceState sequenceState = new SequenceState(1, + new ArrayList<>(), + SequenceState.LastKnownState.DELIVERED); + + assertThat(sequenceState.isDelivered()).isTrue(); + assertThat(sequenceState.isInFlight()).isFalse(); + } + @Test + public void isInFlightWorks() { + SequenceState sequenceState = new SequenceState(1, + new ArrayList<>(), + SequenceState.LastKnownState.IN_FLIGHT); + + assertThat(sequenceState.isInFlight()).isTrue(); + assertThat(sequenceState.isDelivered()).isFalse(); + } + +} \ No newline at end of file diff --git a/src/test/java/com/ibm/eventstreams/connect/mqsource/util/ExceptionProcessorTest.java b/src/test/java/com/ibm/eventstreams/connect/mqsource/util/ExceptionProcessorTest.java new file mode 100644 index 0000000..7129d77 --- /dev/null +++ b/src/test/java/com/ibm/eventstreams/connect/mqsource/util/ExceptionProcessorTest.java @@ -0,0 +1,83 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource.util; + +import org.apache.kafka.connect.errors.ConnectException; + +import com.ibm.mq.MQException; + +import junit.framework.TestCase; +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.ArrayList; +import java.util.List; + +public class ExceptionProcessorTest extends TestCase { + + public void test_getReasonWithNonMQException() { + ConnectException exp = new ConnectException("test text"); + int reason = ExceptionProcessor.getReason(exp); + assertThat(reason).isEqualTo(-1); + } + + public void test_getReasonWithMQException() { + MQException exp = new MQException(1, 1, getClass()); + MQException wrapper_exp = new MQException(1, 1, exp, exp); + int reason = ExceptionProcessor.getReason(wrapper_exp); + assertThat(reason).isGreaterThan(-1); + } + + public void test_isClosableWithMQExceptionErrorNotClosable() { + MQException exp = new MQException(1, 1, getClass()); + MQException wrapper_exp = new MQException(1, 1, exp, exp); + boolean isClosable = ExceptionProcessor.isClosable(wrapper_exp); + assertThat(isClosable).isTrue(); + } + + public void test_isClosableWithMQExceptionErrorIsClosable() { + MQException exp = new MQException(1, 2016, getClass()); + MQException wrapper_exp = new MQException(1, 1, exp, exp); + boolean isClosable = ExceptionProcessor.isClosable(wrapper_exp); + assertThat(isClosable).isFalse(); + } + + public void test_isRetriableWithMQExceptionErrorsAreRetriable() { + final List reasonsRetriable = new ArrayList<>(); + reasonsRetriable.add(2003); + reasonsRetriable.add(2537); + reasonsRetriable.add(2009); + reasonsRetriable.add(2538); + reasonsRetriable.add(2035); + reasonsRetriable.add(2059); + reasonsRetriable.add(2161); + reasonsRetriable.add(2162); + reasonsRetriable.add(2195); + reasonsRetriable.add(2016); + for (int reason : reasonsRetriable){ + createAndProcessExceptionThrough_isRetriable_andAssert(reason, true); + } + } + + public void test_isRetriableWithMQExceptionErrorsAreNotRetriable() { + createAndProcessExceptionThrough_isRetriable_andAssert(1, false); + } + + private void createAndProcessExceptionThrough_isRetriable_andAssert(int reason, Boolean expectedResult) { + MQException exp = new MQException(1, reason, getClass()); + MQException wrapper_exp = new MQException(1, 1, exp, exp); + assertThat(ExceptionProcessor.isRetriable(wrapper_exp)).isEqualTo(expectedResult); + } +} diff --git a/src/test/java/com/ibm/eventstreams/connect/mqsource/util/QueueConfigTest.java b/src/test/java/com/ibm/eventstreams/connect/mqsource/util/QueueConfigTest.java new file mode 100644 index 0000000..12a02e5 --- /dev/null +++ b/src/test/java/com/ibm/eventstreams/connect/mqsource/util/QueueConfigTest.java @@ -0,0 +1,77 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsource.util; + +import com.ibm.mq.constants.MQConstants; +import com.ibm.mq.jms.MQQueue; +import com.ibm.msg.client.wmq.WMQConstants; +import junit.framework.TestCase; +import org.mockito.Mockito; +import javax.jms.JMSException; +import static org.assertj.core.api.Assertions.assertThat; + + +public class QueueConfigTest extends TestCase { + + public void testApplyToQueue_JMSBody() throws JMSException { + QueueConfig jmsBodyConfig = new QueueConfig(true, false); + MQQueue mockQueue = Mockito.mock(MQQueue.class); + + jmsBodyConfig.applyToQueue(mockQueue); + + Mockito.verify(mockQueue).setMessageBodyStyle(MQConstants.WMQ_MESSAGE_BODY_JMS); + } + + public void testApplyToQueue_MQBody() throws JMSException { + QueueConfig mqBodyConfig = new QueueConfig(false, false); + MQQueue mockQueue = Mockito.mock(MQQueue.class); + + mqBodyConfig.applyToQueue(mockQueue); + + Mockito.verify(mockQueue).setMessageBodyStyle(MQConstants.WMQ_MESSAGE_BODY_MQ); + } + + public void testApplyToQueue_MQMDRead() throws JMSException { + QueueConfig mqmdReadBodyConfig = new QueueConfig(false, true); + MQQueue mockQueue = Mockito.mock(MQQueue.class); + mqmdReadBodyConfig.applyToQueue(mockQueue); + + Mockito.verify(mockQueue).setBooleanProperty(WMQConstants.WMQ_MQMD_READ_ENABLED,true); + + QueueConfig mqmdNoReadBodyConfig = new QueueConfig(false, false); + MQQueue mockQueue2 = Mockito.mock(MQQueue.class); + mqmdNoReadBodyConfig.applyToQueue(mockQueue2); + + Mockito.verify(mockQueue2, Mockito.never()).setBooleanProperty(WMQConstants.WMQ_MQMD_READ_ENABLED,true); + } + + public void testIsMqMessageBodyJms() { + QueueConfig jmsBodyConfig = new QueueConfig(true, false); + assertThat(jmsBodyConfig.isMqMessageBodyJms()).isTrue(); + + QueueConfig mqBodyConfig = new QueueConfig(false, false); + assertThat(mqBodyConfig.isMqMessageBodyJms()).isFalse(); + } + + public void testIsMqMessageMqmdRead() { + QueueConfig mqmdBodyConfig = new QueueConfig(false, true); + assertThat(mqmdBodyConfig.isMqMessageMqmdRead()).isTrue(); + + QueueConfig nomqmdBodyConfig = new QueueConfig(false, false); + assertThat(nomqmdBodyConfig.isMqMessageMqmdRead()).isFalse(); + + } +} \ No newline at end of file diff --git a/src/test/resources/log4j.properties b/src/test/resources/log4j.properties index 514f678..2af0c08 100644 --- a/src/test/resources/log4j.properties +++ b/src/test/resources/log4j.properties @@ -1,5 +1,5 @@ # -# Copyright 2017, 2018, 2019 IBM Corporation +# Copyright 2017, 2018, 2019, 2024 IBM Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/test/resources/no-auth-qmgr.mqsc b/src/test/resources/no-auth-qmgr.mqsc new file mode 100644 index 0000000..a05d40e --- /dev/null +++ b/src/test/resources/no-auth-qmgr.mqsc @@ -0,0 +1,3 @@ +ALTER QMGR CHLAUTH(DISABLED) +ALTER QMGR CONNAUTH(' ') +REFRESH SECURITY TYPE(CONNAUTH)